id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
7,827
def test_output_quiet(): with AssertNotPrints('2'): ip.run_cell('1+1;', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1; # comment with a semicolon', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1;\n#commented_out_function()', store_history=True)
[ "def", "test_output_quiet", "(", ")", ":", "with", "AssertNotPrints", "(", "'2'", ")", ":", "ip", ".", "run_cell", "(", "'1+1;'", ",", "store_history", "=", "True", ")", "with", "AssertNotPrints", "(", "'2'", ")", ":", "ip", ".", "run_cell", "(", "'1+1; # comment with a semicolon'", ",", "store_history", "=", "True", ")", "with", "AssertNotPrints", "(", "'2'", ")", ":", "ip", ".", "run_cell", "(", "'1+1;\\n#commented_out_function()'", ",", "store_history", "=", "True", ")" ]
checking to make sure that output is quiet .
train
false
7,828
@register.filter def rest_json(value, arg=None): if (not value): return mark_safe(u'{}') if (not isinstance(value, (dict, OrderedDict, list, tuple))): msg = u'Given value must be of type dict, OrderedDict, list or tuple but it is {}.' raise ValueError(msg.format(value.__class__.__name__)) data = JSONRenderer().render(value) return mark_safe(data)
[ "@", "register", ".", "filter", "def", "rest_json", "(", "value", ",", "arg", "=", "None", ")", ":", "if", "(", "not", "value", ")", ":", "return", "mark_safe", "(", "u'{}'", ")", "if", "(", "not", "isinstance", "(", "value", ",", "(", "dict", ",", "OrderedDict", ",", "list", ",", "tuple", ")", ")", ")", ":", "msg", "=", "u'Given value must be of type dict, OrderedDict, list or tuple but it is {}.'", "raise", "ValueError", "(", "msg", ".", "format", "(", "value", ".", "__class__", ".", "__name__", ")", ")", "data", "=", "JSONRenderer", "(", ")", ".", "render", "(", "value", ")", "return", "mark_safe", "(", "data", ")" ]
renders a returndict as used by the rest framework into a safe json string .
train
false
7,829
def paste_server(app, gcfg=None, host='127.0.0.1', port=None, *args, **kwargs): util.warn('This command is deprecated.\n\n You should now use the `--paste` option. Ex.:\n\n gunicorn --paste development.ini\n ') from gunicorn.app.pasterapp import PasterServerApplication PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
[ "def", "paste_server", "(", "app", ",", "gcfg", "=", "None", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "None", ",", "*", "args", ",", "**", "kwargs", ")", ":", "util", ".", "warn", "(", "'This command is deprecated.\\n\\n You should now use the `--paste` option. Ex.:\\n\\n gunicorn --paste development.ini\\n '", ")", "from", "gunicorn", ".", "app", ".", "pasterapp", "import", "PasterServerApplication", "PasterServerApplication", "(", "app", ",", "gcfg", "=", "gcfg", ",", "host", "=", "host", ",", "port", "=", "port", ",", "*", "args", ",", "**", "kwargs", ")", ".", "run", "(", ")" ]
a paster server .
train
false
7,830
def flatatt(attrs): key_value_attrs = [] boolean_attrs = [] for (attr, value) in attrs.items(): if isinstance(value, bool): if value: boolean_attrs.append((attr,)) else: try: value = value.format(**attrs) except KeyError: pass key_value_attrs.append((attr, value)) return (format_html_join(u'', u' {}="{}"', sorted(key_value_attrs)) + format_html_join(u'', u' {}', sorted(boolean_attrs)))
[ "def", "flatatt", "(", "attrs", ")", ":", "key_value_attrs", "=", "[", "]", "boolean_attrs", "=", "[", "]", "for", "(", "attr", ",", "value", ")", "in", "attrs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "if", "value", ":", "boolean_attrs", ".", "append", "(", "(", "attr", ",", ")", ")", "else", ":", "try", ":", "value", "=", "value", ".", "format", "(", "**", "attrs", ")", "except", "KeyError", ":", "pass", "key_value_attrs", ".", "append", "(", "(", "attr", ",", "value", ")", ")", "return", "(", "format_html_join", "(", "u''", ",", "u' {}=\"{}\"'", ",", "sorted", "(", "key_value_attrs", ")", ")", "+", "format_html_join", "(", "u''", ",", "u' {}'", ",", "sorted", "(", "boolean_attrs", ")", ")", ")" ]
convert a dictionary of attributes to a single string .
train
true
7,832
def cosine_proximity(y_true, y_pred): y_true = tf.nn.l2_normalize(y_true, (len(y_true.get_shape()) - 1)) y_pred = tf.nn.l2_normalize(y_pred, (len(y_pred.get_shape()) - 1)) return tf.reduce_sum((y_true * y_pred))
[ "def", "cosine_proximity", "(", "y_true", ",", "y_pred", ")", ":", "y_true", "=", "tf", ".", "nn", ".", "l2_normalize", "(", "y_true", ",", "(", "len", "(", "y_true", ".", "get_shape", "(", ")", ")", "-", "1", ")", ")", "y_pred", "=", "tf", ".", "nn", ".", "l2_normalize", "(", "y_pred", ",", "(", "len", "(", "y_pred", ".", "get_shape", "(", ")", ")", "-", "1", ")", ")", "return", "tf", ".", "reduce_sum", "(", "(", "y_true", "*", "y_pred", ")", ")" ]
cosine similarity of two vectors .
train
false
7,833
def all_valid_collectors(): now = int(time.time()) for col in all_collectors(): if ((not col.dead) or ((now - col.lastspawn) > 3600)): (yield col)
[ "def", "all_valid_collectors", "(", ")", ":", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "for", "col", "in", "all_collectors", "(", ")", ":", "if", "(", "(", "not", "col", ".", "dead", ")", "or", "(", "(", "now", "-", "col", ".", "lastspawn", ")", ">", "3600", ")", ")", ":", "(", "yield", "col", ")" ]
generator to return all defined collectors that havent been marked dead in the past hour .
train
false
7,835
def canonicalize_stderr(stderr): stderr = stderr.strip().split('\n')[(-1)] substitutions = [("NameError: global name '", "NameError: name '"), ("AttributeError: '(\\w+)' object attribute '(\\w+)' is read-only", 'AttributeError: \\2'), ('TypeError: object.__new__\\(\\) takes no parameters', 'TypeError: object() takes no parameters'), ('IndexError: list assignment index out of range', 'IndexError: list index out of range'), ("unqualified exec is not allowed in function '(\\w+)' it (.*)", "unqualified exec is not allowed in function '\\1' because it \\2")] for (pattern, subst_with) in substitutions: stderr = re.sub(pattern, subst_with, stderr) return stderr
[ "def", "canonicalize_stderr", "(", "stderr", ")", ":", "stderr", "=", "stderr", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "[", "(", "-", "1", ")", "]", "substitutions", "=", "[", "(", "\"NameError: global name '\"", ",", "\"NameError: name '\"", ")", ",", "(", "\"AttributeError: '(\\\\w+)' object attribute '(\\\\w+)' is read-only\"", ",", "'AttributeError: \\\\2'", ")", ",", "(", "'TypeError: object.__new__\\\\(\\\\) takes no parameters'", ",", "'TypeError: object() takes no parameters'", ")", ",", "(", "'IndexError: list assignment index out of range'", ",", "'IndexError: list index out of range'", ")", ",", "(", "\"unqualified exec is not allowed in function '(\\\\w+)' it (.*)\"", ",", "\"unqualified exec is not allowed in function '\\\\1' because it \\\\2\"", ")", "]", "for", "(", "pattern", ",", "subst_with", ")", "in", "substitutions", ":", "stderr", "=", "re", ".", "sub", "(", "pattern", ",", "subst_with", ",", "stderr", ")", "return", "stderr" ]
for a while we were trying to maintain *exact* stderr compatibility with cpython .
train
false
7,838
def send_email_after_account_create(form): send_email(to=form['email'], action=USER_REGISTER, subject=MAILS[USER_REGISTER]['subject'].format(app_name=get_settings()['app_name']), html=MAILS[USER_REGISTER]['message'].format(email=form['email']))
[ "def", "send_email_after_account_create", "(", "form", ")", ":", "send_email", "(", "to", "=", "form", "[", "'email'", "]", ",", "action", "=", "USER_REGISTER", ",", "subject", "=", "MAILS", "[", "USER_REGISTER", "]", "[", "'subject'", "]", ".", "format", "(", "app_name", "=", "get_settings", "(", ")", "[", "'app_name'", "]", ")", ",", "html", "=", "MAILS", "[", "USER_REGISTER", "]", "[", "'message'", "]", ".", "format", "(", "email", "=", "form", "[", "'email'", "]", ")", ")" ]
send email after account create .
train
false
7,840
def log_list_volumes(function): counter = itertools.count(1) def _count_calls(*args, **kwargs): '\n Run given function with count.\n ' CALL_LIST_VOLUMES(function=function.__name__, count=next(counter)).write() return function(*args, **kwargs) return _count_calls
[ "def", "log_list_volumes", "(", "function", ")", ":", "counter", "=", "itertools", ".", "count", "(", "1", ")", "def", "_count_calls", "(", "*", "args", ",", "**", "kwargs", ")", ":", "CALL_LIST_VOLUMES", "(", "function", "=", "function", ".", "__name__", ",", "count", "=", "next", "(", "counter", ")", ")", ".", "write", "(", ")", "return", "function", "(", "*", "args", ",", "**", "kwargs", ")", "return", "_count_calls" ]
decorator to count calls to list_volumes .
train
false
7,841
def detect_text_cloud_storage(uri): vision_client = vision.Client() image = vision_client.image(source_uri=uri) texts = image.detect_text() print 'Texts:' for text in texts: print text.description
[ "def", "detect_text_cloud_storage", "(", "uri", ")", ":", "vision_client", "=", "vision", ".", "Client", "(", ")", "image", "=", "vision_client", ".", "image", "(", "source_uri", "=", "uri", ")", "texts", "=", "image", ".", "detect_text", "(", ")", "print", "'Texts:'", "for", "text", "in", "texts", ":", "print", "text", ".", "description" ]
detects text in the file located in google cloud storage .
train
false
7,843
def get_email_addresses(survey, startdate, enddate): token = settings.SURVEYGIZMO_API_TOKEN secret = settings.SURVEYGIZMO_API_TOKEN_SECRET emails = [] page = 1 more_pages = True survey_id = SURVEYS[survey]['email_collection_survey_id'] if ((token is None) or (secret is None)): return emails while more_pages: response = requests.get('https://restapi.surveygizmo.com/v2/survey/{survey}/surveyresponse?filter[field][0]=datesubmitted&filter[operator][0]=>=&filter[value][0]={start}+0:0:0filter[field][1]=datesubmitted&filter[operator][1]=<&filter[value][1]={end}+0:0:0&filter[field][2]=status&filter[operator][2]==&filter[value][2]=Complete&resultsperpage=500&page={page}&api_token={token}&api_token_secret={secret}'.format(survey=survey_id, start=startdate, end=enddate, page=page, token=token, secret=secret), timeout=300) results = json.loads(response.content) total_pages = results['total_pages'] more_pages = (page < total_pages) emails = (emails + [r['[question(13)]'] for r in results['data']]) page += 1 return emails
[ "def", "get_email_addresses", "(", "survey", ",", "startdate", ",", "enddate", ")", ":", "token", "=", "settings", ".", "SURVEYGIZMO_API_TOKEN", "secret", "=", "settings", ".", "SURVEYGIZMO_API_TOKEN_SECRET", "emails", "=", "[", "]", "page", "=", "1", "more_pages", "=", "True", "survey_id", "=", "SURVEYS", "[", "survey", "]", "[", "'email_collection_survey_id'", "]", "if", "(", "(", "token", "is", "None", ")", "or", "(", "secret", "is", "None", ")", ")", ":", "return", "emails", "while", "more_pages", ":", "response", "=", "requests", ".", "get", "(", "'https://restapi.surveygizmo.com/v2/survey/{survey}/surveyresponse?filter[field][0]=datesubmitted&filter[operator][0]=>=&filter[value][0]={start}+0:0:0filter[field][1]=datesubmitted&filter[operator][1]=<&filter[value][1]={end}+0:0:0&filter[field][2]=status&filter[operator][2]==&filter[value][2]=Complete&resultsperpage=500&page={page}&api_token={token}&api_token_secret={secret}'", ".", "format", "(", "survey", "=", "survey_id", ",", "start", "=", "startdate", ",", "end", "=", "enddate", ",", "page", "=", "page", ",", "token", "=", "token", ",", "secret", "=", "secret", ")", ",", "timeout", "=", "300", ")", "results", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "total_pages", "=", "results", "[", "'total_pages'", "]", "more_pages", "=", "(", "page", "<", "total_pages", ")", "emails", "=", "(", "emails", "+", "[", "r", "[", "'[question(13)]'", "]", "for", "r", "in", "results", "[", "'data'", "]", "]", ")", "page", "+=", "1", "return", "emails" ]
get the email addresses collected between startdate and enddate .
train
false
7,844
def findfile(file, here=__file__, subdir=None): if os.path.isabs(file): return file if (subdir is not None): file = os.path.join(subdir, file) path = sys.path path = ([os.path.dirname(here)] + path) for dn in path: fn = os.path.join(dn, file) if os.path.exists(fn): return fn return file
[ "def", "findfile", "(", "file", ",", "here", "=", "__file__", ",", "subdir", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "file", ")", ":", "return", "file", "if", "(", "subdir", "is", "not", "None", ")", ":", "file", "=", "os", ".", "path", ".", "join", "(", "subdir", ",", "file", ")", "path", "=", "sys", ".", "path", "path", "=", "(", "[", "os", ".", "path", ".", "dirname", "(", "here", ")", "]", "+", "path", ")", "for", "dn", "in", "path", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "dn", ",", "file", ")", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "return", "fn", "return", "file" ]
try to find a file on sys .
train
false
7,846
def brick_get_encryptor(connection_info, *args, **kwargs): root_helper = get_root_helper() key_manager = keymgr.API(CONF) return encryptors.get_volume_encryptor(root_helper=root_helper, connection_info=connection_info, keymgr=key_manager, *args, **kwargs)
[ "def", "brick_get_encryptor", "(", "connection_info", ",", "*", "args", ",", "**", "kwargs", ")", ":", "root_helper", "=", "get_root_helper", "(", ")", "key_manager", "=", "keymgr", ".", "API", "(", "CONF", ")", "return", "encryptors", ".", "get_volume_encryptor", "(", "root_helper", "=", "root_helper", ",", "connection_info", "=", "connection_info", ",", "keymgr", "=", "key_manager", ",", "*", "args", ",", "**", "kwargs", ")" ]
wrapper to get a brick encryptor object .
train
false
7,847
def region_code_for_country_code(country_code): regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None) if (regions is None): return UNKNOWN_REGION else: return regions[0]
[ "def", "region_code_for_country_code", "(", "country_code", ")", ":", "regions", "=", "COUNTRY_CODE_TO_REGION_CODE", ".", "get", "(", "country_code", ",", "None", ")", "if", "(", "regions", "is", "None", ")", ":", "return", "UNKNOWN_REGION", "else", ":", "return", "regions", "[", "0", "]" ]
returns the region code that matches a specific country calling code .
train
false
7,848
def get_free_space_in_dir(path): if sys.platform.startswith(u'win'): import ctypes free_bytes = ctypes.c_ulonglong(0) retval = ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path), None, None, ctypes.pointer(free_bytes)) if (retval == 0): raise IOError(u'Checking free space on {!r} failed unexpectedly.'.format(path)) return free_bytes.value else: stat = os.statvfs(path) return (stat.f_bavail * stat.f_frsize)
[ "def", "get_free_space_in_dir", "(", "path", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "u'win'", ")", ":", "import", "ctypes", "free_bytes", "=", "ctypes", ".", "c_ulonglong", "(", "0", ")", "retval", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "GetDiskFreeSpaceExW", "(", "ctypes", ".", "c_wchar_p", "(", "path", ")", ",", "None", ",", "None", ",", "ctypes", ".", "pointer", "(", "free_bytes", ")", ")", "if", "(", "retval", "==", "0", ")", ":", "raise", "IOError", "(", "u'Checking free space on {!r} failed unexpectedly.'", ".", "format", "(", "path", ")", ")", "return", "free_bytes", ".", "value", "else", ":", "stat", "=", "os", ".", "statvfs", "(", "path", ")", "return", "(", "stat", ".", "f_bavail", "*", "stat", ".", "f_frsize", ")" ]
given a path to a directory .
train
false
7,849
def doc_html_cache_key(locale, slug, mobile, minimal): cache_key = DOC_HTML_CACHE_KEY.format(locale=locale, slug=slug, mobile=str(mobile), minimal=str(minimal)) return hashlib.sha1(smart_str(cache_key)).hexdigest()
[ "def", "doc_html_cache_key", "(", "locale", ",", "slug", ",", "mobile", ",", "minimal", ")", ":", "cache_key", "=", "DOC_HTML_CACHE_KEY", ".", "format", "(", "locale", "=", "locale", ",", "slug", "=", "slug", ",", "mobile", "=", "str", "(", "mobile", ")", ",", "minimal", "=", "str", "(", "minimal", ")", ")", "return", "hashlib", ".", "sha1", "(", "smart_str", "(", "cache_key", ")", ")", ".", "hexdigest", "(", ")" ]
returns the cache key for the document html .
train
false
7,850
@pytest.mark.django_db def test_toggle_quality_check(rf, admin): qc_filter = dict(false_positive=False, unit__state=TRANSLATED, unit__store__translation_project__project__disabled=False) qc = QualityCheck.objects.filter(**qc_filter).first() unit = qc.unit data = 'mute=' request = create_api_request(rf, method='post', user=admin, data=data, encode_as_json=False) response = toggle_qualitycheck(request, unit.id, qc.id) assert (response.status_code == 200) assert (QualityCheck.objects.get(id=qc.id).false_positive is True) request = create_api_request(rf, method='post', user=admin) response = toggle_qualitycheck(request, unit.id, qc.id) assert (response.status_code == 200) assert (QualityCheck.objects.get(id=qc.id).false_positive is False)
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_toggle_quality_check", "(", "rf", ",", "admin", ")", ":", "qc_filter", "=", "dict", "(", "false_positive", "=", "False", ",", "unit__state", "=", "TRANSLATED", ",", "unit__store__translation_project__project__disabled", "=", "False", ")", "qc", "=", "QualityCheck", ".", "objects", ".", "filter", "(", "**", "qc_filter", ")", ".", "first", "(", ")", "unit", "=", "qc", ".", "unit", "data", "=", "'mute='", "request", "=", "create_api_request", "(", "rf", ",", "method", "=", "'post'", ",", "user", "=", "admin", ",", "data", "=", "data", ",", "encode_as_json", "=", "False", ")", "response", "=", "toggle_qualitycheck", "(", "request", ",", "unit", ".", "id", ",", "qc", ".", "id", ")", "assert", "(", "response", ".", "status_code", "==", "200", ")", "assert", "(", "QualityCheck", ".", "objects", ".", "get", "(", "id", "=", "qc", ".", "id", ")", ".", "false_positive", "is", "True", ")", "request", "=", "create_api_request", "(", "rf", ",", "method", "=", "'post'", ",", "user", "=", "admin", ")", "response", "=", "toggle_qualitycheck", "(", "request", ",", "unit", ".", "id", ",", "qc", ".", "id", ")", "assert", "(", "response", ".", "status_code", "==", "200", ")", "assert", "(", "QualityCheck", ".", "objects", ".", "get", "(", "id", "=", "qc", ".", "id", ")", ".", "false_positive", "is", "False", ")" ]
tests the view that mutes/unmutes quality checks .
train
false
7,851
def blob_to_file(filename_hint_propertyname=None, directory_hint=''): directory = [] def transform_function(value, bulkload_state): if (not directory): parent_dir = os.path.dirname(bulkload_state.filename) directory.append(os.path.join(parent_dir, directory_hint)) if (directory[0] and (not os.path.exists(directory[0]))): os.makedirs(directory[0]) filename_hint = 'blob_' suffix = '' filename = '' if filename_hint_propertyname: filename_hint = bulkload_state.current_entity[filename_hint_propertyname] filename = os.path.join(directory[0], filename_hint) if os.path.exists(filename): filename = '' (filename_hint, suffix) = os.path.splitext(filename_hint) if (not filename): filename = tempfile.mktemp(suffix, filename_hint, directory[0]) f = open(filename, 'wb') f.write(value) f.close() return filename return transform_function
[ "def", "blob_to_file", "(", "filename_hint_propertyname", "=", "None", ",", "directory_hint", "=", "''", ")", ":", "directory", "=", "[", "]", "def", "transform_function", "(", "value", ",", "bulkload_state", ")", ":", "if", "(", "not", "directory", ")", ":", "parent_dir", "=", "os", ".", "path", ".", "dirname", "(", "bulkload_state", ".", "filename", ")", "directory", ".", "append", "(", "os", ".", "path", ".", "join", "(", "parent_dir", ",", "directory_hint", ")", ")", "if", "(", "directory", "[", "0", "]", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "directory", "[", "0", "]", ")", ")", ")", ":", "os", ".", "makedirs", "(", "directory", "[", "0", "]", ")", "filename_hint", "=", "'blob_'", "suffix", "=", "''", "filename", "=", "''", "if", "filename_hint_propertyname", ":", "filename_hint", "=", "bulkload_state", ".", "current_entity", "[", "filename_hint_propertyname", "]", "filename", "=", "os", ".", "path", ".", "join", "(", "directory", "[", "0", "]", ",", "filename_hint", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "filename", "=", "''", "(", "filename_hint", ",", "suffix", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename_hint", ")", "if", "(", "not", "filename", ")", ":", "filename", "=", "tempfile", ".", "mktemp", "(", "suffix", ",", "filename_hint", ",", "directory", "[", "0", "]", ")", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "value", ")", "f", ".", "close", "(", ")", "return", "filename", "return", "transform_function" ]
write the blob contents to a file .
train
false
7,852
def avail(search=None, verbose=False): ret = {} imgadm = _check_imgadm() cmd = '{0} avail -j'.format(imgadm) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = {} if (retcode != 0): ret['Error'] = _exit_status(retcode) return ret for image in json.loads(res['stdout']): if (image['manifest']['disabled'] or (not image['manifest']['public'])): continue if (search and (search not in image['manifest']['name'])): continue result[image['manifest']['uuid']] = _parse_image_meta(image, verbose) return result
[ "def", "avail", "(", "search", "=", "None", ",", "verbose", "=", "False", ")", ":", "ret", "=", "{", "}", "imgadm", "=", "_check_imgadm", "(", ")", "cmd", "=", "'{0} avail -j'", ".", "format", "(", "imgadm", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "retcode", "=", "res", "[", "'retcode'", "]", "result", "=", "{", "}", "if", "(", "retcode", "!=", "0", ")", ":", "ret", "[", "'Error'", "]", "=", "_exit_status", "(", "retcode", ")", "return", "ret", "for", "image", "in", "json", ".", "loads", "(", "res", "[", "'stdout'", "]", ")", ":", "if", "(", "image", "[", "'manifest'", "]", "[", "'disabled'", "]", "or", "(", "not", "image", "[", "'manifest'", "]", "[", "'public'", "]", ")", ")", ":", "continue", "if", "(", "search", "and", "(", "search", "not", "in", "image", "[", "'manifest'", "]", "[", "'name'", "]", ")", ")", ":", "continue", "result", "[", "image", "[", "'manifest'", "]", "[", "'uuid'", "]", "]", "=", "_parse_image_meta", "(", "image", ",", "verbose", ")", "return", "result" ]
check if a locale is available .
train
false
7,853
def _add_inventory_to_provider(conn, rp, inv_list, to_add): for rc_id in to_add: rc_str = _RC_CACHE.string_from_id(rc_id) inv_record = inv_list.find(rc_str) if (inv_record.capacity <= 0): raise exception.InvalidInventoryCapacity(resource_class=rc_str, resource_provider=rp.uuid) ins_stmt = _INV_TBL.insert().values(resource_provider_id=rp.id, resource_class_id=rc_id, total=inv_record.total, reserved=inv_record.reserved, min_unit=inv_record.min_unit, max_unit=inv_record.max_unit, step_size=inv_record.step_size, allocation_ratio=inv_record.allocation_ratio) conn.execute(ins_stmt)
[ "def", "_add_inventory_to_provider", "(", "conn", ",", "rp", ",", "inv_list", ",", "to_add", ")", ":", "for", "rc_id", "in", "to_add", ":", "rc_str", "=", "_RC_CACHE", ".", "string_from_id", "(", "rc_id", ")", "inv_record", "=", "inv_list", ".", "find", "(", "rc_str", ")", "if", "(", "inv_record", ".", "capacity", "<=", "0", ")", ":", "raise", "exception", ".", "InvalidInventoryCapacity", "(", "resource_class", "=", "rc_str", ",", "resource_provider", "=", "rp", ".", "uuid", ")", "ins_stmt", "=", "_INV_TBL", ".", "insert", "(", ")", ".", "values", "(", "resource_provider_id", "=", "rp", ".", "id", ",", "resource_class_id", "=", "rc_id", ",", "total", "=", "inv_record", ".", "total", ",", "reserved", "=", "inv_record", ".", "reserved", ",", "min_unit", "=", "inv_record", ".", "min_unit", ",", "max_unit", "=", "inv_record", ".", "max_unit", ",", "step_size", "=", "inv_record", ".", "step_size", ",", "allocation_ratio", "=", "inv_record", ".", "allocation_ratio", ")", "conn", ".", "execute", "(", "ins_stmt", ")" ]
inserts new inventory records for the supplied resource provider .
train
false
7,854
def contrasting_color_generator(): def rgb_to_hex(rgb): return (u'#%02x%02x%02x' % tuple(rgb)) triples = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (0, 1, 1), (1, 0, 1), (1, 1, 1)] n = (1 << 7) so_far = [[0, 0, 0]] while True: if (n == 0): (yield u'#000000') copy_so_far = list(so_far) for triple in triples: for previous in copy_so_far: rgb = [((n * triple[i]) + previous[i]) for i in range(3)] so_far.append(rgb) (yield rgb_to_hex(rgb)) n >>= 1
[ "def", "contrasting_color_generator", "(", ")", ":", "def", "rgb_to_hex", "(", "rgb", ")", ":", "return", "(", "u'#%02x%02x%02x'", "%", "tuple", "(", "rgb", ")", ")", "triples", "=", "[", "(", "1", ",", "0", ",", "0", ")", ",", "(", "0", ",", "1", ",", "0", ")", ",", "(", "0", ",", "0", ",", "1", ")", ",", "(", "1", ",", "1", ",", "0", ")", ",", "(", "0", ",", "1", ",", "1", ")", ",", "(", "1", ",", "0", ",", "1", ")", ",", "(", "1", ",", "1", ",", "1", ")", "]", "n", "=", "(", "1", "<<", "7", ")", "so_far", "=", "[", "[", "0", ",", "0", ",", "0", "]", "]", "while", "True", ":", "if", "(", "n", "==", "0", ")", ":", "(", "yield", "u'#000000'", ")", "copy_so_far", "=", "list", "(", "so_far", ")", "for", "triple", "in", "triples", ":", "for", "previous", "in", "copy_so_far", ":", "rgb", "=", "[", "(", "(", "n", "*", "triple", "[", "i", "]", ")", "+", "previous", "[", "i", "]", ")", "for", "i", "in", "range", "(", "3", ")", "]", "so_far", ".", "append", "(", "rgb", ")", "(", "yield", "rgb_to_hex", "(", "rgb", ")", ")", "n", ">>=", "1" ]
generate constrasting colors by varying most significant bit of rgb first .
train
false
7,857
def real_scan_files(site, cache=None): task_fnames = set([]) real_fnames = set([]) output_folder = site.config['OUTPUT_FOLDER'] for fname in _call_nikola_list(site, cache)[0]: fname = fname.strip() if fname.startswith(output_folder): task_fnames.add(fname) for (root, dirs, files) in os.walk(output_folder, followlinks=True): for src_name in files: fname = os.path.join(root, src_name) real_fnames.add(fname) only_on_output = list((real_fnames - task_fnames)) only_on_input = list((task_fnames - real_fnames)) return (only_on_output, only_on_input)
[ "def", "real_scan_files", "(", "site", ",", "cache", "=", "None", ")", ":", "task_fnames", "=", "set", "(", "[", "]", ")", "real_fnames", "=", "set", "(", "[", "]", ")", "output_folder", "=", "site", ".", "config", "[", "'OUTPUT_FOLDER'", "]", "for", "fname", "in", "_call_nikola_list", "(", "site", ",", "cache", ")", "[", "0", "]", ":", "fname", "=", "fname", ".", "strip", "(", ")", "if", "fname", ".", "startswith", "(", "output_folder", ")", ":", "task_fnames", ".", "add", "(", "fname", ")", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "output_folder", ",", "followlinks", "=", "True", ")", ":", "for", "src_name", "in", "files", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "src_name", ")", "real_fnames", ".", "add", "(", "fname", ")", "only_on_output", "=", "list", "(", "(", "real_fnames", "-", "task_fnames", ")", ")", "only_on_input", "=", "list", "(", "(", "task_fnames", "-", "real_fnames", ")", ")", "return", "(", "only_on_output", ",", "only_on_input", ")" ]
scan for files .
train
false
7,858
def directional_variance_i(x_i, w): return (dot(x_i, direction(w)) ** 2)
[ "def", "directional_variance_i", "(", "x_i", ",", "w", ")", ":", "return", "(", "dot", "(", "x_i", ",", "direction", "(", "w", ")", ")", "**", "2", ")" ]
the variance of the row x_i in the direction w .
train
false
7,859
def assert_grammar_validation(grammar_errors, test_obj): for (attr, value, error, message) in grammar_errors: with nt.assert_raises(error) as err: setattr(test_obj, attr, value) nt.assert_equal(err.exception.args[0], message)
[ "def", "assert_grammar_validation", "(", "grammar_errors", ",", "test_obj", ")", ":", "for", "(", "attr", ",", "value", ",", "error", ",", "message", ")", "in", "grammar_errors", ":", "with", "nt", ".", "assert_raises", "(", "error", ")", "as", "err", ":", "setattr", "(", "test_obj", ",", "attr", ",", "value", ")", "nt", ".", "assert_equal", "(", "err", ".", "exception", ".", "args", "[", "0", "]", ",", "message", ")" ]
check grammar methods for validation errors .
train
false
7,860
@pytest.mark.parametrize('parallel', [True, False]) def test_many_columns(parallel, read_basic): text = ' '.join([str(i) for i in range(500)]) text += ((('\n' + text) + '\n') + text) table = read_basic(text, parallel=parallel) expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) assert_table_equal(table, expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'parallel'", ",", "[", "True", ",", "False", "]", ")", "def", "test_many_columns", "(", "parallel", ",", "read_basic", ")", ":", "text", "=", "' '", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "500", ")", "]", ")", "text", "+=", "(", "(", "(", "'\\n'", "+", "text", ")", "+", "'\\n'", ")", "+", "text", ")", "table", "=", "read_basic", "(", "text", ",", "parallel", "=", "parallel", ")", "expected", "=", "Table", "(", "[", "[", "i", ",", "i", "]", "for", "i", "in", "range", "(", "500", ")", "]", ",", "names", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "500", ")", "]", ")", "assert_table_equal", "(", "table", ",", "expected", ")" ]
make sure memory reallocation works okay when the number of columns is large .
train
false
7,861
def get_matching_docs(dirname, suffixes, exclude_matchers=()): suffixpatterns = [('*' + s) for s in suffixes] for filename in get_matching_files(dirname, exclude_matchers): for suffixpattern in suffixpatterns: if fnmatch.fnmatch(filename, suffixpattern): (yield filename[:((- len(suffixpattern)) + 1)]) break
[ "def", "get_matching_docs", "(", "dirname", ",", "suffixes", ",", "exclude_matchers", "=", "(", ")", ")", ":", "suffixpatterns", "=", "[", "(", "'*'", "+", "s", ")", "for", "s", "in", "suffixes", "]", "for", "filename", "in", "get_matching_files", "(", "dirname", ",", "exclude_matchers", ")", ":", "for", "suffixpattern", "in", "suffixpatterns", ":", "if", "fnmatch", ".", "fnmatch", "(", "filename", ",", "suffixpattern", ")", ":", "(", "yield", "filename", "[", ":", "(", "(", "-", "len", "(", "suffixpattern", ")", ")", "+", "1", ")", "]", ")", "break" ]
get all file names matching a suffix in a directory .
train
false
7,862
def _create_event(): return windll.kernel32.CreateEventA(pointer(SECURITY_ATTRIBUTES()), BOOL(True), BOOL(False), None)
[ "def", "_create_event", "(", ")", ":", "return", "windll", ".", "kernel32", ".", "CreateEventA", "(", "pointer", "(", "SECURITY_ATTRIBUTES", "(", ")", ")", ",", "BOOL", "(", "True", ")", ",", "BOOL", "(", "False", ")", ",", "None", ")" ]
creates a win32 unnamed event .
train
true
7,863
def check_host_port(host, port): hostport = ('%s:%d' % (host, port)) for regexp in conf.WHITELIST.get(): if regexp.match(hostport): return True return False
[ "def", "check_host_port", "(", "host", ",", "port", ")", ":", "hostport", "=", "(", "'%s:%d'", "%", "(", "host", ",", "port", ")", ")", "for", "regexp", "in", "conf", ".", "WHITELIST", ".", "get", "(", ")", ":", "if", "regexp", ".", "match", "(", "hostport", ")", ":", "return", "True", "return", "False" ]
return true if this host:port pair is allowed to be proxied .
train
false
7,864
def _build_cluster_status_fsm_table(): S = ClusterStatusStates I = ClusterStatusInputs O = ClusterStatusOutputs table = TransitionTable() table = table.addTransitions(S.DISCONNECTED, {I.CONNECTED_TO_CONTROL_SERVICE: ([O.STORE_CLIENT], S.IGNORANT), I.SHUTDOWN: ([], S.SHUTDOWN)}) table = table.addTransitions(S.IGNORANT, {I.DISCONNECTED_FROM_CONTROL_SERVICE: ([], S.DISCONNECTED), I.STATUS_UPDATE: ([O.UPDATE_STATUS], S.KNOWLEDGEABLE), I.SHUTDOWN: ([O.DISCONNECT], S.SHUTDOWN)}) table = table.addTransitions(S.KNOWLEDGEABLE, {I.STATUS_UPDATE: ([O.UPDATE_STATUS], S.KNOWLEDGEABLE), I.DISCONNECTED_FROM_CONTROL_SERVICE: ([O.STOP], S.DISCONNECTED), I.SHUTDOWN: ([O.STOP, O.DISCONNECT], S.SHUTDOWN)}) table = table.addTransitions(S.SHUTDOWN, {I.DISCONNECTED_FROM_CONTROL_SERVICE: ([], S.SHUTDOWN), I.STATUS_UPDATE: ([], S.SHUTDOWN)}) return table
[ "def", "_build_cluster_status_fsm_table", "(", ")", ":", "S", "=", "ClusterStatusStates", "I", "=", "ClusterStatusInputs", "O", "=", "ClusterStatusOutputs", "table", "=", "TransitionTable", "(", ")", "table", "=", "table", ".", "addTransitions", "(", "S", ".", "DISCONNECTED", ",", "{", "I", ".", "CONNECTED_TO_CONTROL_SERVICE", ":", "(", "[", "O", ".", "STORE_CLIENT", "]", ",", "S", ".", "IGNORANT", ")", ",", "I", ".", "SHUTDOWN", ":", "(", "[", "]", ",", "S", ".", "SHUTDOWN", ")", "}", ")", "table", "=", "table", ".", "addTransitions", "(", "S", ".", "IGNORANT", ",", "{", "I", ".", "DISCONNECTED_FROM_CONTROL_SERVICE", ":", "(", "[", "]", ",", "S", ".", "DISCONNECTED", ")", ",", "I", ".", "STATUS_UPDATE", ":", "(", "[", "O", ".", "UPDATE_STATUS", "]", ",", "S", ".", "KNOWLEDGEABLE", ")", ",", "I", ".", "SHUTDOWN", ":", "(", "[", "O", ".", "DISCONNECT", "]", ",", "S", ".", "SHUTDOWN", ")", "}", ")", "table", "=", "table", ".", "addTransitions", "(", "S", ".", "KNOWLEDGEABLE", ",", "{", "I", ".", "STATUS_UPDATE", ":", "(", "[", "O", ".", "UPDATE_STATUS", "]", ",", "S", ".", "KNOWLEDGEABLE", ")", ",", "I", ".", "DISCONNECTED_FROM_CONTROL_SERVICE", ":", "(", "[", "O", ".", "STOP", "]", ",", "S", ".", "DISCONNECTED", ")", ",", "I", ".", "SHUTDOWN", ":", "(", "[", "O", ".", "STOP", ",", "O", ".", "DISCONNECT", "]", ",", "S", ".", "SHUTDOWN", ")", "}", ")", "table", "=", "table", ".", "addTransitions", "(", "S", ".", "SHUTDOWN", ",", "{", "I", ".", "DISCONNECTED_FROM_CONTROL_SERVICE", ":", "(", "[", "]", ",", "S", ".", "SHUTDOWN", ")", ",", "I", ".", "STATUS_UPDATE", ":", "(", "[", "]", ",", "S", ".", "SHUTDOWN", ")", "}", ")", "return", "table" ]
create the transitiontable needed by the cluster status fsm .
train
false
7,865
def restore_trash(cookie, tokens, fidlist): url = ''.join([const.PAN_API_URL, 'recycle/restore?channel=chunlei&clienttype=0&web=1', '&t=', util.timestamp(), '&bdstoken=', tokens['bdstoken']]) data = ('fidlist=' + encoder.encode_uri_component(json.dumps(fidlist))) req = net.urlopen(url, headers={'Cookie': cookie.header_output(), 'Content-type': const.CONTENT_FORM_UTF8}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
[ "def", "restore_trash", "(", "cookie", ",", "tokens", ",", "fidlist", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PAN_API_URL", ",", "'recycle/restore?channel=chunlei&clienttype=0&web=1'", ",", "'&t='", ",", "util", ".", "timestamp", "(", ")", ",", "'&bdstoken='", ",", "tokens", "[", "'bdstoken'", "]", "]", ")", "data", "=", "(", "'fidlist='", "+", "encoder", ".", "encode_uri_component", "(", "json", ".", "dumps", "(", "fidlist", ")", ")", ")", "req", "=", "net", ".", "urlopen", "(", "url", ",", "headers", "=", "{", "'Cookie'", ":", "cookie", ".", "header_output", "(", ")", ",", "'Content-type'", ":", "const", ".", "CONTENT_FORM_UTF8", "}", ",", "data", "=", "data", ".", "encode", "(", ")", ")", "if", "req", ":", "content", "=", "req", ".", "data", "return", "json", ".", "loads", "(", "content", ".", "decode", "(", ")", ")", "else", ":", "return", "None" ]
fildlist - 要还原的文件/目录列表 .
train
true
7,867
def test_extract_array_1d_odd(): assert np.all((extract_array(np.arange(4), (3,), ((-1),), fill_value=(-99)) == np.array([(-99), (-99), 0]))) assert np.all((extract_array(np.arange(4), (3,), (0,), fill_value=(-99)) == np.array([(-99), 0, 1]))) for i in [1, 2]: assert np.all((extract_array(np.arange(4), (3,), (i,)) == np.array([(i - 1), i, (i + 1)]))) assert np.all((extract_array(np.arange(4), (3,), (3,), fill_value=(-99)) == np.array([2, 3, (-99)]))) arrayin = np.arange(4.0) extracted = extract_array(arrayin, (3,), (4,)) assert (extracted[0] == 3) assert np.isnan(extracted[1]) assert (extracted.dtype == arrayin.dtype)
[ "def", "test_extract_array_1d_odd", "(", ")", ":", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "3", ",", ")", ",", "(", "(", "-", "1", ")", ",", ")", ",", "fill_value", "=", "(", "-", "99", ")", ")", "==", "np", ".", "array", "(", "[", "(", "-", "99", ")", ",", "(", "-", "99", ")", ",", "0", "]", ")", ")", ")", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "3", ",", ")", ",", "(", "0", ",", ")", ",", "fill_value", "=", "(", "-", "99", ")", ")", "==", "np", ".", "array", "(", "[", "(", "-", "99", ")", ",", "0", ",", "1", "]", ")", ")", ")", "for", "i", "in", "[", "1", ",", "2", "]", ":", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "3", ",", ")", ",", "(", "i", ",", ")", ")", "==", "np", ".", "array", "(", "[", "(", "i", "-", "1", ")", ",", "i", ",", "(", "i", "+", "1", ")", "]", ")", ")", ")", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "3", ",", ")", ",", "(", "3", ",", ")", ",", "fill_value", "=", "(", "-", "99", ")", ")", "==", "np", ".", "array", "(", "[", "2", ",", "3", ",", "(", "-", "99", ")", "]", ")", ")", ")", "arrayin", "=", "np", ".", "arange", "(", "4.0", ")", "extracted", "=", "extract_array", "(", "arrayin", ",", "(", "3", ",", ")", ",", "(", "4", ",", ")", ")", "assert", "(", "extracted", "[", "0", "]", "==", "3", ")", "assert", "np", ".", "isnan", "(", "extracted", "[", "1", "]", ")", "assert", "(", "extracted", ".", "dtype", "==", "arrayin", ".", "dtype", ")" ]
extract 1 d arrays .
train
false
7,868
def add_translation(key, translation): if (not hasattr(_to_save, 'translations')): _to_save.translations = {} _to_save.translations.setdefault(key, []) _to_save.translations[key].append(translation)
[ "def", "add_translation", "(", "key", ",", "translation", ")", ":", "if", "(", "not", "hasattr", "(", "_to_save", ",", "'translations'", ")", ")", ":", "_to_save", ".", "translations", "=", "{", "}", "_to_save", ".", "translations", ".", "setdefault", "(", "key", ",", "[", "]", ")", "_to_save", ".", "translations", "[", "key", "]", ".", "append", "(", "translation", ")" ]
queue a translation that needs to be saved for a particular object .
train
false
7,869
def _read_password_file(b_path): content = None if os.path.exists(b_path): with open(b_path, 'rb') as f: b_content = f.read().rstrip() content = to_text(b_content, errors='surrogate_or_strict') return content
[ "def", "_read_password_file", "(", "b_path", ")", ":", "content", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "b_path", ")", ":", "with", "open", "(", "b_path", ",", "'rb'", ")", "as", "f", ":", "b_content", "=", "f", ".", "read", "(", ")", ".", "rstrip", "(", ")", "content", "=", "to_text", "(", "b_content", ",", "errors", "=", "'surrogate_or_strict'", ")", "return", "content" ]
read the contents of a password file and return it :arg b_path: a byte string containing the path to the password file :returns: a text string containing the contents of the password file or none if no password file was present .
train
false
7,870
def make_argument_parser(): parser = argparse.ArgumentParser(description='Launch a prediction from a pkl file') parser.add_argument('model_filename', help='Specifies the pkl model file') parser.add_argument('test_filename', help='Specifies the csv file with the values to predict') parser.add_argument('output_filename', help='Specifies the predictions output file') parser.add_argument('--prediction_type', '-P', default='classification', help='Prediction type (classification/regression)') parser.add_argument('--output_type', '-T', default='int', help='Output variable type (int/float)') parser.add_argument('--has-headers', '-H', dest='has_headers', action='store_true', help='Indicates the first row in the input file is feature labels') parser.add_argument('--has-row-label', '-L', dest='has_row_label', action='store_true', help='Indicates the first column in the input file is row labels') parser.add_argument('--delimiter', '-D', default=',', help="Specifies the CSV delimiter for the test file. Usual values are comma (default) ',' semicolon ';' colon ':' tabulation '\\t' and space ' '") return parser
[ "def", "make_argument_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Launch a prediction from a pkl file'", ")", "parser", ".", "add_argument", "(", "'model_filename'", ",", "help", "=", "'Specifies the pkl model file'", ")", "parser", ".", "add_argument", "(", "'test_filename'", ",", "help", "=", "'Specifies the csv file with the values to predict'", ")", "parser", ".", "add_argument", "(", "'output_filename'", ",", "help", "=", "'Specifies the predictions output file'", ")", "parser", ".", "add_argument", "(", "'--prediction_type'", ",", "'-P'", ",", "default", "=", "'classification'", ",", "help", "=", "'Prediction type (classification/regression)'", ")", "parser", ".", "add_argument", "(", "'--output_type'", ",", "'-T'", ",", "default", "=", "'int'", ",", "help", "=", "'Output variable type (int/float)'", ")", "parser", ".", "add_argument", "(", "'--has-headers'", ",", "'-H'", ",", "dest", "=", "'has_headers'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Indicates the first row in the input file is feature labels'", ")", "parser", ".", "add_argument", "(", "'--has-row-label'", ",", "'-L'", ",", "dest", "=", "'has_row_label'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Indicates the first column in the input file is row labels'", ")", "parser", ".", "add_argument", "(", "'--delimiter'", ",", "'-D'", ",", "default", "=", "','", ",", "help", "=", "\"Specifies the CSV delimiter for the test file. Usual values are comma (default) ',' semicolon ';' colon ':' tabulation '\\\\t' and space ' '\"", ")", "return", "parser" ]
creates an argumentparser to read the options for this script from sys .
train
false
7,871
@before.each_scenario def reset_data(scenario): LOGGER.debug('Flushing the test database...') call_command('flush', interactive=False, verbosity=0) world.absorb({}, 'scenario_dict')
[ "@", "before", ".", "each_scenario", "def", "reset_data", "(", "scenario", ")", ":", "LOGGER", ".", "debug", "(", "'Flushing the test database...'", ")", "call_command", "(", "'flush'", ",", "interactive", "=", "False", ",", "verbosity", "=", "0", ")", "world", ".", "absorb", "(", "{", "}", ",", "'scenario_dict'", ")" ]
clean out the django test database defined in the envs/acceptance .
train
false
7,872
def ZigZagEncode(value): if (value >= 0): return (value << 1) return ((value << 1) ^ (~ 0))
[ "def", "ZigZagEncode", "(", "value", ")", ":", "if", "(", "value", ">=", "0", ")", ":", "return", "(", "value", "<<", "1", ")", "return", "(", "(", "value", "<<", "1", ")", "^", "(", "~", "0", ")", ")" ]
zigzag transform: encodes signed integers so that they can be effectively used with varint encoding .
train
false
7,873
@hook.regex('vimeo.com/([0-9]+)') def vimeo_url(match): info = http.get_json(('http://vimeo.com/api/v2/video/%s.json' % match.group(1))) if info: info[0]['duration'] = timeformat.format_time(info[0]['duration']) info[0]['stats_number_of_likes'] = format(info[0]['stats_number_of_likes'], ',d') info[0]['stats_number_of_plays'] = format(info[0]['stats_number_of_plays'], ',d') return ('\x02%(title)s\x02 - length \x02%(duration)s\x02 - \x02%(stats_number_of_likes)s\x02 likes - \x02%(stats_number_of_plays)s\x02 plays - \x02%(user_name)s\x02 on \x02%(upload_date)s\x02' % info[0])
[ "@", "hook", ".", "regex", "(", "'vimeo.com/([0-9]+)'", ")", "def", "vimeo_url", "(", "match", ")", ":", "info", "=", "http", ".", "get_json", "(", "(", "'http://vimeo.com/api/v2/video/%s.json'", "%", "match", ".", "group", "(", "1", ")", ")", ")", "if", "info", ":", "info", "[", "0", "]", "[", "'duration'", "]", "=", "timeformat", ".", "format_time", "(", "info", "[", "0", "]", "[", "'duration'", "]", ")", "info", "[", "0", "]", "[", "'stats_number_of_likes'", "]", "=", "format", "(", "info", "[", "0", "]", "[", "'stats_number_of_likes'", "]", ",", "',d'", ")", "info", "[", "0", "]", "[", "'stats_number_of_plays'", "]", "=", "format", "(", "info", "[", "0", "]", "[", "'stats_number_of_plays'", "]", ",", "',d'", ")", "return", "(", "'\\x02%(title)s\\x02 - length \\x02%(duration)s\\x02 - \\x02%(stats_number_of_likes)s\\x02 likes - \\x02%(stats_number_of_plays)s\\x02 plays - \\x02%(user_name)s\\x02 on \\x02%(upload_date)s\\x02'", "%", "info", "[", "0", "]", ")" ]
vimeo <url> -- returns information on the vimeo video at <url> .
train
false
7,874
def ValidateToken(token, targets): def GetSubjectForError(): if (len(targets) == 1): return list(targets)[0] else: return None if (not token): raise access_control.UnauthorizedAccess(('Must give an authorization token for %s' % targets), subject=GetSubjectForError()) token.CheckExpiry() if (not token.username): raise access_control.UnauthorizedAccess(('Must specify a username for access to %s.' % targets), subject=GetSubjectForError()) return True
[ "def", "ValidateToken", "(", "token", ",", "targets", ")", ":", "def", "GetSubjectForError", "(", ")", ":", "if", "(", "len", "(", "targets", ")", "==", "1", ")", ":", "return", "list", "(", "targets", ")", "[", "0", "]", "else", ":", "return", "None", "if", "(", "not", "token", ")", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "(", "'Must give an authorization token for %s'", "%", "targets", ")", ",", "subject", "=", "GetSubjectForError", "(", ")", ")", "token", ".", "CheckExpiry", "(", ")", "if", "(", "not", "token", ".", "username", ")", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "(", "'Must specify a username for access to %s.'", "%", "targets", ")", ",", "subject", "=", "GetSubjectForError", "(", ")", ")", "return", "True" ]
does basic token validation .
train
true
7,875
def byte_to_int(b): if (sys.version_info >= (3, 0)): return b return ord(b)
[ "def", "byte_to_int", "(", "b", ")", ":", "if", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ")", ":", "return", "b", "return", "ord", "(", "b", ")" ]
given an element in a binary buffer .
train
false
7,876
def console_write(string, params=None, strip=True, indent=None, prefix=True): string = text.format(str_cls(string), params, strip=strip, indent=indent) if (sys.version_info < (3,)): if isinstance(string, str_cls): string = string.encode('UTF-8') if prefix: sys.stdout.write('Package Control: ') print string
[ "def", "console_write", "(", "string", ",", "params", "=", "None", ",", "strip", "=", "True", ",", "indent", "=", "None", ",", "prefix", "=", "True", ")", ":", "string", "=", "text", ".", "format", "(", "str_cls", "(", "string", ")", ",", "params", ",", "strip", "=", "strip", ",", "indent", "=", "indent", ")", "if", "(", "sys", ".", "version_info", "<", "(", "3", ",", ")", ")", ":", "if", "isinstance", "(", "string", ",", "str_cls", ")", ":", "string", "=", "string", ".", "encode", "(", "'UTF-8'", ")", "if", "prefix", ":", "sys", ".", "stdout", ".", "write", "(", "'Package Control: '", ")", "print", "string" ]
writes a value to the sublime text console .
train
false
7,877
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration): msg = vehicle.message_factory.set_position_target_local_ned_encode(0, 0, 0, mavutil.mavlink.MAV_FRAME_LOCAL_NED, 4039, 0, 0, 0, velocity_x, velocity_y, velocity_z, 0, 0, 0, 0, 0) for x in range(0, duration): vehicle.send_mavlink(msg) time.sleep(1)
[ "def", "send_ned_velocity", "(", "velocity_x", ",", "velocity_y", ",", "velocity_z", ",", "duration", ")", ":", "msg", "=", "vehicle", ".", "message_factory", ".", "set_position_target_local_ned_encode", "(", "0", ",", "0", ",", "0", ",", "mavutil", ".", "mavlink", ".", "MAV_FRAME_LOCAL_NED", ",", "4039", ",", "0", ",", "0", ",", "0", ",", "velocity_x", ",", "velocity_y", ",", "velocity_z", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ")", "for", "x", "in", "range", "(", "0", ",", "duration", ")", ":", "vehicle", ".", "send_mavlink", "(", "msg", ")", "time", ".", "sleep", "(", "1", ")" ]
move vehicle in direction based on specified velocity vectors and for the specified duration .
train
true
7,879
def store_token(mx_id, token): AUTH_TOKENS[mx_id] = token with open(SESSION_FILE, 'w') as handle: handle.write(json.dumps(AUTH_TOKENS))
[ "def", "store_token", "(", "mx_id", ",", "token", ")", ":", "AUTH_TOKENS", "[", "mx_id", "]", "=", "token", "with", "open", "(", "SESSION_FILE", ",", "'w'", ")", "as", "handle", ":", "handle", ".", "write", "(", "json", ".", "dumps", "(", "AUTH_TOKENS", ")", ")" ]
store authentication token to session and persistent storage .
train
false
7,881
def git_am_patch_split(f, encoding=None): encoding = (encoding or getattr(f, 'encoding', 'ascii')) contents = f.read() if ((type(contents) is bytes) and getattr(email.parser, 'BytesParser', None)): parser = email.parser.BytesParser() msg = parser.parsebytes(contents) else: parser = email.parser.Parser() msg = parser.parsestr(contents) return parse_patch_message(msg, encoding)
[ "def", "git_am_patch_split", "(", "f", ",", "encoding", "=", "None", ")", ":", "encoding", "=", "(", "encoding", "or", "getattr", "(", "f", ",", "'encoding'", ",", "'ascii'", ")", ")", "contents", "=", "f", ".", "read", "(", ")", "if", "(", "(", "type", "(", "contents", ")", "is", "bytes", ")", "and", "getattr", "(", "email", ".", "parser", ",", "'BytesParser'", ",", "None", ")", ")", ":", "parser", "=", "email", ".", "parser", ".", "BytesParser", "(", ")", "msg", "=", "parser", ".", "parsebytes", "(", "contents", ")", "else", ":", "parser", "=", "email", ".", "parser", ".", "Parser", "(", ")", "msg", "=", "parser", ".", "parsestr", "(", "contents", ")", "return", "parse_patch_message", "(", "msg", ",", "encoding", ")" ]
parse a git-am-style patch and split it up into bits .
train
false
7,883
def read_mm_stamp(fh, byteorder, dtype, count): return fh.read_array((byteorder + 'f8'), 8)
[ "def", "read_mm_stamp", "(", "fh", ",", "byteorder", ",", "dtype", ",", "count", ")", ":", "return", "fh", ".", "read_array", "(", "(", "byteorder", "+", "'f8'", ")", ",", "8", ")" ]
read mm_stamp tag from file and return as numpy .
train
false
7,884
def p_and_expression_2(t): pass
[ "def", "p_and_expression_2", "(", "t", ")", ":", "pass" ]
and_expression : and_expression and equality_expression .
train
false
7,885
def create_vdir(name, site, sourcepath, app='/'): ret = {'name': name, 'changes': {}, 'comment': str(), 'result': None} current_vdirs = __salt__['win_iis.list_vdirs'](site, app) if (name in current_vdirs): ret['comment'] = 'Virtual directory already present: {0}'.format(name) ret['result'] = True elif __opts__['test']: ret['comment'] = 'Virtual directory will be created: {0}'.format(name) ret['changes'] = {'old': None, 'new': name} else: ret['comment'] = 'Created virtual directory: {0}'.format(name) ret['changes'] = {'old': None, 'new': name} ret['result'] = __salt__['win_iis.create_vdir'](name, site, sourcepath, app) return ret
[ "def", "create_vdir", "(", "name", ",", "site", ",", "sourcepath", ",", "app", "=", "'/'", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "str", "(", ")", ",", "'result'", ":", "None", "}", "current_vdirs", "=", "__salt__", "[", "'win_iis.list_vdirs'", "]", "(", "site", ",", "app", ")", "if", "(", "name", "in", "current_vdirs", ")", ":", "ret", "[", "'comment'", "]", "=", "'Virtual directory already present: {0}'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "True", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Virtual directory will be created: {0}'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "None", ",", "'new'", ":", "name", "}", "else", ":", "ret", "[", "'comment'", "]", "=", "'Created virtual directory: {0}'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "None", ",", "'new'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'win_iis.create_vdir'", "]", "(", "name", ",", "site", ",", "sourcepath", ",", "app", ")", "return", "ret" ]
create an iis virtual directory .
train
true
7,886
@_get_client def image_member_find(client, image_id=None, member=None, status=None, include_deleted=False): return client.image_member_find(image_id=image_id, member=member, status=status, include_deleted=include_deleted)
[ "@", "_get_client", "def", "image_member_find", "(", "client", ",", "image_id", "=", "None", ",", "member", "=", "None", ",", "status", "=", "None", ",", "include_deleted", "=", "False", ")", ":", "return", "client", ".", "image_member_find", "(", "image_id", "=", "image_id", ",", "member", "=", "member", ",", "status", "=", "status", ",", "include_deleted", "=", "include_deleted", ")" ]
find all members that meet the given criteria .
train
false
7,887
def search_all(*args, **kwargs): from .. import conf all_results = {} catalog_db = kwargs.get(u'catalog_db', None) if (u'catalog_db' in kwargs): kwargs.pop(u'catalog_db') cache = kwargs.get(u'cache', True) verbose = kwargs.get(u'verbose', True) catalogs = vos_catalog._get_catalogs(conf.conesearch_dbname, catalog_db, cache=cache, verbose=verbose) for (name, catalog) in catalogs: try: result = conesearch(catalog_db=catalog, *args, **kwargs) except VOSError: pass else: all_results[result.url] = result return all_results
[ "def", "search_all", "(", "*", "args", ",", "**", "kwargs", ")", ":", "from", ".", ".", "import", "conf", "all_results", "=", "{", "}", "catalog_db", "=", "kwargs", ".", "get", "(", "u'catalog_db'", ",", "None", ")", "if", "(", "u'catalog_db'", "in", "kwargs", ")", ":", "kwargs", ".", "pop", "(", "u'catalog_db'", ")", "cache", "=", "kwargs", ".", "get", "(", "u'cache'", ",", "True", ")", "verbose", "=", "kwargs", ".", "get", "(", "u'verbose'", ",", "True", ")", "catalogs", "=", "vos_catalog", ".", "_get_catalogs", "(", "conf", ".", "conesearch_dbname", ",", "catalog_db", ",", "cache", "=", "cache", ",", "verbose", "=", "verbose", ")", "for", "(", "name", ",", "catalog", ")", "in", "catalogs", ":", "try", ":", "result", "=", "conesearch", "(", "catalog_db", "=", "catalog", ",", "*", "args", ",", "**", "kwargs", ")", "except", "VOSError", ":", "pass", "else", ":", "all_results", "[", "result", ".", "url", "]", "=", "result", "return", "all_results" ]
perform cone search and returns the results of all successful queries .
train
false
7,888
def add_trigger_models(trigger_types): [r for r in (_validate_trigger_type(trigger_type) for trigger_type in trigger_types) if (r is not None)] result = [] for trigger_type in trigger_types: item = _add_trigger_models(trigger_type=trigger_type) if item: result.append(item) return result
[ "def", "add_trigger_models", "(", "trigger_types", ")", ":", "[", "r", "for", "r", "in", "(", "_validate_trigger_type", "(", "trigger_type", ")", "for", "trigger_type", "in", "trigger_types", ")", "if", "(", "r", "is", "not", "None", ")", "]", "result", "=", "[", "]", "for", "trigger_type", "in", "trigger_types", ":", "item", "=", "_add_trigger_models", "(", "trigger_type", "=", "trigger_type", ")", "if", "item", ":", "result", ".", "append", "(", "item", ")", "return", "result" ]
register trigger types .
train
false
7,889
@curry def apply(f, *args, **kwargs): return f(*args, **kwargs)
[ "@", "curry", "def", "apply", "(", "f", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")" ]
set a single key .
train
false
7,890
def UnpackLongList(data): if ((len(data) > 17) and (data[:8] == '\xff\xff\xff\xff\xff\xff\xff\xff')): data = zlib.decompress(data[8:]) return list(struct.unpack(('<' + ('q' * (len(data) // 8))), data))
[ "def", "UnpackLongList", "(", "data", ")", ":", "if", "(", "(", "len", "(", "data", ")", ">", "17", ")", "and", "(", "data", "[", ":", "8", "]", "==", "'\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff'", ")", ")", ":", "data", "=", "zlib", ".", "decompress", "(", "data", "[", "8", ":", "]", ")", "return", "list", "(", "struct", ".", "unpack", "(", "(", "'<'", "+", "(", "'q'", "*", "(", "len", "(", "data", ")", "//", "8", ")", ")", ")", ",", "data", ")", ")" ]
unpack a list of longs previously packed using packlonglist .
train
false
7,891
def check_barcodes(bc_to_sid, barcode_len, barcode_type): barcode_len_in_map = len(bc_to_sid.keys()[0]) if (barcode_len_in_map != barcode_len): raise BarcodeLenMismatchError(('Barcodes in mapping file are of length %d, but expected length is %d.' % (barcode_len_in_map, barcode_len))) if (barcode_type == 'golay_12'): invalid_golay_barcodes = get_invalid_golay_barcodes(bc_to_sid.keys()) if invalid_golay_barcodes: raise InvalidGolayBarcodeError(('Some or all barcodes in the mapping file are not valid golay codes. Do they need to be reverse complemented? If these are not golay barcodes either pass --barcode_type 12 to disable barcode error correction, or pass --barcode_type # if the barcodes are not 12 base pairs, where # is the size of the barcodes.\n\nInvalid barcodes: %s' % ' '.join(invalid_golay_barcodes)))
[ "def", "check_barcodes", "(", "bc_to_sid", ",", "barcode_len", ",", "barcode_type", ")", ":", "barcode_len_in_map", "=", "len", "(", "bc_to_sid", ".", "keys", "(", ")", "[", "0", "]", ")", "if", "(", "barcode_len_in_map", "!=", "barcode_len", ")", ":", "raise", "BarcodeLenMismatchError", "(", "(", "'Barcodes in mapping file are of length %d, but expected length is %d.'", "%", "(", "barcode_len_in_map", ",", "barcode_len", ")", ")", ")", "if", "(", "barcode_type", "==", "'golay_12'", ")", ":", "invalid_golay_barcodes", "=", "get_invalid_golay_barcodes", "(", "bc_to_sid", ".", "keys", "(", ")", ")", "if", "invalid_golay_barcodes", ":", "raise", "InvalidGolayBarcodeError", "(", "(", "'Some or all barcodes in the mapping file are not valid golay codes. Do they need to be reverse complemented? If these are not golay barcodes either pass --barcode_type 12 to disable barcode error correction, or pass --barcode_type # if the barcodes are not 12 base pairs, where # is the size of the barcodes.\\n\\nInvalid barcodes: %s'", "%", "' '", ".", "join", "(", "invalid_golay_barcodes", ")", ")", ")" ]
make sure that barcodes are the correct length that the user specified .
train
false
7,892
def _to_player(accessing_obj): if utils.inherits_from(accessing_obj, 'evennia.objects.objects.DefaultObject'): accessing_obj = accessing_obj.player return accessing_obj
[ "def", "_to_player", "(", "accessing_obj", ")", ":", "if", "utils", ".", "inherits_from", "(", "accessing_obj", ",", "'evennia.objects.objects.DefaultObject'", ")", ":", "accessing_obj", "=", "accessing_obj", ".", "player", "return", "accessing_obj" ]
helper function .
train
false
7,893
def copula_bv_ev(u, v, transform, args=()): return np.exp((np.log((u * v)) * transform((np.log(v) / np.log((u * v))), *args)))
[ "def", "copula_bv_ev", "(", "u", ",", "v", ",", "transform", ",", "args", "=", "(", ")", ")", ":", "return", "np", ".", "exp", "(", "(", "np", ".", "log", "(", "(", "u", "*", "v", ")", ")", "*", "transform", "(", "(", "np", ".", "log", "(", "v", ")", "/", "np", ".", "log", "(", "(", "u", "*", "v", ")", ")", ")", ",", "*", "args", ")", ")", ")" ]
generic bivariate extreme value copula .
train
false
7,894
def add_team_repo(repo_name, team_name, profile='github', permission=None): team = get_team(team_name, profile=profile) if (not team): log.error('Team {0} does not exist'.format(team_name)) return False try: client = _get_client(profile) organization = client.get_organization(_get_config_value(profile, 'org_name')) team = organization.get_team(team['id']) repo = organization.get_repo(repo_name) except UnknownObjectException as e: log.exception('Resource not found: {0}'.format(team['id'])) return False params = None if (permission is not None): params = {'permission': permission} (headers, data) = team._requester.requestJsonAndCheck('PUT', ((team.url + '/repos/') + repo._identity), input=params) list_team_repos(team_name, profile=profile, ignore_cache=True) return True
[ "def", "add_team_repo", "(", "repo_name", ",", "team_name", ",", "profile", "=", "'github'", ",", "permission", "=", "None", ")", ":", "team", "=", "get_team", "(", "team_name", ",", "profile", "=", "profile", ")", "if", "(", "not", "team", ")", ":", "log", ".", "error", "(", "'Team {0} does not exist'", ".", "format", "(", "team_name", ")", ")", "return", "False", "try", ":", "client", "=", "_get_client", "(", "profile", ")", "organization", "=", "client", ".", "get_organization", "(", "_get_config_value", "(", "profile", ",", "'org_name'", ")", ")", "team", "=", "organization", ".", "get_team", "(", "team", "[", "'id'", "]", ")", "repo", "=", "organization", ".", "get_repo", "(", "repo_name", ")", "except", "UnknownObjectException", "as", "e", ":", "log", ".", "exception", "(", "'Resource not found: {0}'", ".", "format", "(", "team", "[", "'id'", "]", ")", ")", "return", "False", "params", "=", "None", "if", "(", "permission", "is", "not", "None", ")", ":", "params", "=", "{", "'permission'", ":", "permission", "}", "(", "headers", ",", "data", ")", "=", "team", ".", "_requester", ".", "requestJsonAndCheck", "(", "'PUT'", ",", "(", "(", "team", ".", "url", "+", "'/repos/'", ")", "+", "repo", ".", "_identity", ")", ",", "input", "=", "params", ")", "list_team_repos", "(", "team_name", ",", "profile", "=", "profile", ",", "ignore_cache", "=", "True", ")", "return", "True" ]
adds a repository to a team with team_name .
train
true
7,895
@login_required @ensure_csrf_cookie @require_http_methods(('POST', 'PUT', 'DELETE')) def signatory_detail_handler(request, course_key_string, certificate_id, signatory_id): course_key = CourseKey.from_string(course_key_string) store = modulestore() with store.bulk_operations(course_key): course = _get_course_and_check_access(course_key, request.user) certificates_list = course.certificates['certificates'] match_cert = None for (index, cert) in enumerate(certificates_list): if (certificate_id is not None): if (int(cert['id']) == int(certificate_id)): match_cert = cert if (request.method == 'DELETE'): if (not match_cert): return JsonResponse(status=404) CertificateManager.remove_signatory(request=request, store=store, course=course, certificate_id=certificate_id, signatory_id=signatory_id) return JsonResponse(status=204)
[ "@", "login_required", "@", "ensure_csrf_cookie", "@", "require_http_methods", "(", "(", "'POST'", ",", "'PUT'", ",", "'DELETE'", ")", ")", "def", "signatory_detail_handler", "(", "request", ",", "course_key_string", ",", "certificate_id", ",", "signatory_id", ")", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_key_string", ")", "store", "=", "modulestore", "(", ")", "with", "store", ".", "bulk_operations", "(", "course_key", ")", ":", "course", "=", "_get_course_and_check_access", "(", "course_key", ",", "request", ".", "user", ")", "certificates_list", "=", "course", ".", "certificates", "[", "'certificates'", "]", "match_cert", "=", "None", "for", "(", "index", ",", "cert", ")", "in", "enumerate", "(", "certificates_list", ")", ":", "if", "(", "certificate_id", "is", "not", "None", ")", ":", "if", "(", "int", "(", "cert", "[", "'id'", "]", ")", "==", "int", "(", "certificate_id", ")", ")", ":", "match_cert", "=", "cert", "if", "(", "request", ".", "method", "==", "'DELETE'", ")", ":", "if", "(", "not", "match_cert", ")", ":", "return", "JsonResponse", "(", "status", "=", "404", ")", "CertificateManager", ".", "remove_signatory", "(", "request", "=", "request", ",", "store", "=", "store", ",", "course", "=", "course", ",", "certificate_id", "=", "certificate_id", ",", "signatory_id", "=", "signatory_id", ")", "return", "JsonResponse", "(", "status", "=", "204", ")" ]
json api endpoint for manipulating a specific course certificate signatory via its internal identifier .
train
false
7,896
def bfs_identity_search(gate_list, nqubits, max_depth=None, identity_only=False): if ((max_depth is None) or (max_depth <= 0)): max_depth = len(gate_list) id_only = identity_only queue = deque([()]) ids = set() while (len(queue) > 0): current_circuit = queue.popleft() for next_gate in gate_list: new_circuit = (current_circuit + (next_gate,)) circuit_reducible = is_reducible(new_circuit, nqubits, 1, len(new_circuit)) if (is_scalar_matrix(new_circuit, nqubits, id_only) and (not is_degenerate(ids, new_circuit)) and (not circuit_reducible)): ids.add(GateIdentity(*new_circuit)) elif ((len(new_circuit) < max_depth) and (not circuit_reducible)): queue.append(new_circuit) return ids
[ "def", "bfs_identity_search", "(", "gate_list", ",", "nqubits", ",", "max_depth", "=", "None", ",", "identity_only", "=", "False", ")", ":", "if", "(", "(", "max_depth", "is", "None", ")", "or", "(", "max_depth", "<=", "0", ")", ")", ":", "max_depth", "=", "len", "(", "gate_list", ")", "id_only", "=", "identity_only", "queue", "=", "deque", "(", "[", "(", ")", "]", ")", "ids", "=", "set", "(", ")", "while", "(", "len", "(", "queue", ")", ">", "0", ")", ":", "current_circuit", "=", "queue", ".", "popleft", "(", ")", "for", "next_gate", "in", "gate_list", ":", "new_circuit", "=", "(", "current_circuit", "+", "(", "next_gate", ",", ")", ")", "circuit_reducible", "=", "is_reducible", "(", "new_circuit", ",", "nqubits", ",", "1", ",", "len", "(", "new_circuit", ")", ")", "if", "(", "is_scalar_matrix", "(", "new_circuit", ",", "nqubits", ",", "id_only", ")", "and", "(", "not", "is_degenerate", "(", "ids", ",", "new_circuit", ")", ")", "and", "(", "not", "circuit_reducible", ")", ")", ":", "ids", ".", "add", "(", "GateIdentity", "(", "*", "new_circuit", ")", ")", "elif", "(", "(", "len", "(", "new_circuit", ")", "<", "max_depth", ")", "and", "(", "not", "circuit_reducible", ")", ")", ":", "queue", ".", "append", "(", "new_circuit", ")", "return", "ids" ]
constructs a set of gate identities from the list of possible gates .
train
false
7,897
def confirm_email(token): (expired, invalid, user) = confirm_email_token_status(token) if ((not user) or invalid): invalid = True do_flash(*get_message('INVALID_CONFIRMATION_TOKEN')) if expired: send_confirmation_instructions(user) do_flash(*get_message('CONFIRMATION_EXPIRED', email=user.email, within=_security.confirm_email_within)) if (invalid or expired): return redirect((get_url(_security.confirm_error_view) or url_for('send_confirmation'))) if (user != current_user): logout_user() login_user(user) if confirm_user(user): after_this_request(_commit) msg = 'EMAIL_CONFIRMED' else: msg = 'ALREADY_CONFIRMED' do_flash(*get_message(msg)) return redirect((get_url(_security.post_confirm_view) or get_url(_security.post_login_view)))
[ "def", "confirm_email", "(", "token", ")", ":", "(", "expired", ",", "invalid", ",", "user", ")", "=", "confirm_email_token_status", "(", "token", ")", "if", "(", "(", "not", "user", ")", "or", "invalid", ")", ":", "invalid", "=", "True", "do_flash", "(", "*", "get_message", "(", "'INVALID_CONFIRMATION_TOKEN'", ")", ")", "if", "expired", ":", "send_confirmation_instructions", "(", "user", ")", "do_flash", "(", "*", "get_message", "(", "'CONFIRMATION_EXPIRED'", ",", "email", "=", "user", ".", "email", ",", "within", "=", "_security", ".", "confirm_email_within", ")", ")", "if", "(", "invalid", "or", "expired", ")", ":", "return", "redirect", "(", "(", "get_url", "(", "_security", ".", "confirm_error_view", ")", "or", "url_for", "(", "'send_confirmation'", ")", ")", ")", "if", "(", "user", "!=", "current_user", ")", ":", "logout_user", "(", ")", "login_user", "(", "user", ")", "if", "confirm_user", "(", "user", ")", ":", "after_this_request", "(", "_commit", ")", "msg", "=", "'EMAIL_CONFIRMED'", "else", ":", "msg", "=", "'ALREADY_CONFIRMED'", "do_flash", "(", "*", "get_message", "(", "msg", ")", ")", "return", "redirect", "(", "(", "get_url", "(", "_security", ".", "post_confirm_view", ")", "or", "get_url", "(", "_security", ".", "post_login_view", ")", ")", ")" ]
view function which handles a email confirmation request .
train
true
7,898
@environmentfilter def syntax(env, value, lexer=None, filename=None): try: import pygments from pygments import lexers from pygments import formatters except ImportError: logger.error(u'pygments library is required to use syntax highlighting tags.') raise TemplateError('Cannot load pygments') pyg = (lexers.get_lexer_by_name(lexer) if lexer else lexers.guess_lexer(value)) settings = {} if hasattr(env.config, 'syntax'): settings = getattr(env.config.syntax, 'options', Expando({})).to_dict() formatter = formatters.HtmlFormatter(**settings) code = pygments.highlight(value, pyg, formatter) code = code.replace('\n\n', '\n&nbsp;\n').replace('\n', '<br />') caption = (filename if filename else pyg.name) if hasattr(env.config, 'syntax'): if (not getattr(env.config.syntax, 'use_figure', True)): return Markup(code) return Markup(('<div class="codebox"><figure class="code">%s<figcaption>%s</figcaption></figure></div>\n\n' % (code, caption)))
[ "@", "environmentfilter", "def", "syntax", "(", "env", ",", "value", ",", "lexer", "=", "None", ",", "filename", "=", "None", ")", ":", "try", ":", "import", "pygments", "from", "pygments", "import", "lexers", "from", "pygments", "import", "formatters", "except", "ImportError", ":", "logger", ".", "error", "(", "u'pygments library is required to use syntax highlighting tags.'", ")", "raise", "TemplateError", "(", "'Cannot load pygments'", ")", "pyg", "=", "(", "lexers", ".", "get_lexer_by_name", "(", "lexer", ")", "if", "lexer", "else", "lexers", ".", "guess_lexer", "(", "value", ")", ")", "settings", "=", "{", "}", "if", "hasattr", "(", "env", ".", "config", ",", "'syntax'", ")", ":", "settings", "=", "getattr", "(", "env", ".", "config", ".", "syntax", ",", "'options'", ",", "Expando", "(", "{", "}", ")", ")", ".", "to_dict", "(", ")", "formatter", "=", "formatters", ".", "HtmlFormatter", "(", "**", "settings", ")", "code", "=", "pygments", ".", "highlight", "(", "value", ",", "pyg", ",", "formatter", ")", "code", "=", "code", ".", "replace", "(", "'\\n\\n'", ",", "'\\n&nbsp;\\n'", ")", ".", "replace", "(", "'\\n'", ",", "'<br />'", ")", "caption", "=", "(", "filename", "if", "filename", "else", "pyg", ".", "name", ")", "if", "hasattr", "(", "env", ".", "config", ",", "'syntax'", ")", ":", "if", "(", "not", "getattr", "(", "env", ".", "config", ".", "syntax", ",", "'use_figure'", ",", "True", ")", ")", ":", "return", "Markup", "(", "code", ")", "return", "Markup", "(", "(", "'<div class=\"codebox\"><figure class=\"code\">%s<figcaption>%s</figcaption></figure></div>\\n\\n'", "%", "(", "code", ",", "caption", ")", ")", ")" ]
processes the contained block using pygments .
train
false
7,899
def fast_logdet(A): (sign, ld) = np.linalg.slogdet(A) if (not (sign > 0)): return (- np.inf) return ld
[ "def", "fast_logdet", "(", "A", ")", ":", "(", "sign", ",", "ld", ")", "=", "np", ".", "linalg", ".", "slogdet", "(", "A", ")", "if", "(", "not", "(", "sign", ">", "0", ")", ")", ":", "return", "(", "-", "np", ".", "inf", ")", "return", "ld" ]
compute log(det(a)) for a symmetric equivalent to : np .
train
false
7,900
def is_windows_path(uri): log.warning('is_windows_path() is deprecated and will be removed in v0.6.0') if _WINPATH_RE.match(uri): return True else: return False
[ "def", "is_windows_path", "(", "uri", ")", ":", "log", ".", "warning", "(", "'is_windows_path() is deprecated and will be removed in v0.6.0'", ")", "if", "_WINPATH_RE", ".", "match", "(", "uri", ")", ":", "return", "True", "else", ":", "return", "False" ]
return true if *uri* is a windows path .
train
false
7,901
def summarize_pcoas(master_pcoa, support_pcoas, method='IQR', apply_procrustes=True): if apply_procrustes: support_pcoas = [list(sp) for sp in support_pcoas] master_pcoa = list(master_pcoa) for (i, pcoa) in enumerate(support_pcoas): (master_std, pcoa_std, m_squared) = procrustes(master_pcoa[1], pcoa[1]) support_pcoas[i][1] = pcoa_std master_pcoa[1] = master_std m_matrix = master_pcoa[1] m_eigvals = master_pcoa[2] m_names = master_pcoa[0] jn_flipped_matrices = [] all_eigvals = [] for rep in support_pcoas: matrix = rep[1] eigvals = rep[2] all_eigvals.append(eigvals) jn_flipped_matrices.append(_flip_vectors(matrix, m_matrix)) (matrix_average, matrix_low, matrix_high) = _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method) all_eigvals_stack = vstack(all_eigvals) eigval_sum = np_sum(all_eigvals_stack, axis=0) eigval_average = (eigval_sum / float(len(all_eigvals))) return (matrix_average, matrix_low, matrix_high, eigval_average, m_names)
[ "def", "summarize_pcoas", "(", "master_pcoa", ",", "support_pcoas", ",", "method", "=", "'IQR'", ",", "apply_procrustes", "=", "True", ")", ":", "if", "apply_procrustes", ":", "support_pcoas", "=", "[", "list", "(", "sp", ")", "for", "sp", "in", "support_pcoas", "]", "master_pcoa", "=", "list", "(", "master_pcoa", ")", "for", "(", "i", ",", "pcoa", ")", "in", "enumerate", "(", "support_pcoas", ")", ":", "(", "master_std", ",", "pcoa_std", ",", "m_squared", ")", "=", "procrustes", "(", "master_pcoa", "[", "1", "]", ",", "pcoa", "[", "1", "]", ")", "support_pcoas", "[", "i", "]", "[", "1", "]", "=", "pcoa_std", "master_pcoa", "[", "1", "]", "=", "master_std", "m_matrix", "=", "master_pcoa", "[", "1", "]", "m_eigvals", "=", "master_pcoa", "[", "2", "]", "m_names", "=", "master_pcoa", "[", "0", "]", "jn_flipped_matrices", "=", "[", "]", "all_eigvals", "=", "[", "]", "for", "rep", "in", "support_pcoas", ":", "matrix", "=", "rep", "[", "1", "]", "eigvals", "=", "rep", "[", "2", "]", "all_eigvals", ".", "append", "(", "eigvals", ")", "jn_flipped_matrices", ".", "append", "(", "_flip_vectors", "(", "matrix", ",", "m_matrix", ")", ")", "(", "matrix_average", ",", "matrix_low", ",", "matrix_high", ")", "=", "_compute_jn_pcoa_avg_ranges", "(", "jn_flipped_matrices", ",", "method", ")", "all_eigvals_stack", "=", "vstack", "(", "all_eigvals", ")", "eigval_sum", "=", "np_sum", "(", "all_eigvals_stack", ",", "axis", "=", "0", ")", "eigval_average", "=", "(", "eigval_sum", "/", "float", "(", "len", "(", "all_eigvals", ")", ")", ")", "return", "(", "matrix_average", ",", "matrix_low", ",", "matrix_high", ",", "eigval_average", ",", "m_names", ")" ]
returns the average pcoa vector values for the support pcoas also returns the ranges as calculated with the specified method .
train
false
7,902
@conf.commands.register def srp(x, promisc=None, iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args, **kargs): if (not kargs.has_key('timeout')): kargs['timeout'] = (-1) if ((iface is None) and (iface_hint is not None)): iface = conf.route.route(iface_hint)[0] s = conf.L2socket(promisc=promisc, iface=iface, filter=filter, nofilter=nofilter, type=type) (a, b) = sndrcv(s, x, *args, **kargs) s.close() return (a, b)
[ "@", "conf", ".", "commands", ".", "register", "def", "srp", "(", "x", ",", "promisc", "=", "None", ",", "iface", "=", "None", ",", "iface_hint", "=", "None", ",", "filter", "=", "None", ",", "nofilter", "=", "0", ",", "type", "=", "ETH_P_ALL", ",", "*", "args", ",", "**", "kargs", ")", ":", "if", "(", "not", "kargs", ".", "has_key", "(", "'timeout'", ")", ")", ":", "kargs", "[", "'timeout'", "]", "=", "(", "-", "1", ")", "if", "(", "(", "iface", "is", "None", ")", "and", "(", "iface_hint", "is", "not", "None", ")", ")", ":", "iface", "=", "conf", ".", "route", ".", "route", "(", "iface_hint", ")", "[", "0", "]", "s", "=", "conf", ".", "L2socket", "(", "promisc", "=", "promisc", ",", "iface", "=", "iface", ",", "filter", "=", "filter", ",", "nofilter", "=", "nofilter", ",", "type", "=", "type", ")", "(", "a", ",", "b", ")", "=", "sndrcv", "(", "s", ",", "x", ",", "*", "args", ",", "**", "kargs", ")", "s", ".", "close", "(", ")", "return", "(", "a", ",", "b", ")" ]
send and receive packets at layer 2 nofilter: put 1 to avoid use of bpf filters retry: if positive .
train
false
7,903
def _check_guts_toc(attr, old, toc, last_build, pyc=0): return (_check_guts_eq(attr, old, toc, last_build) or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))
[ "def", "_check_guts_toc", "(", "attr", ",", "old", ",", "toc", ",", "last_build", ",", "pyc", "=", "0", ")", ":", "return", "(", "_check_guts_eq", "(", "attr", ",", "old", ",", "toc", ",", "last_build", ")", "or", "_check_guts_toc_mtime", "(", "attr", ",", "old", ",", "toc", ",", "last_build", ",", "pyc", "=", "pyc", ")", ")" ]
rebuild is required if either toc content changed or mtimes of files listed in old toc are newer than last_build if pyc=1 .
train
true
7,905
def budget(): def prep(r): if ((r.method == 'timeplot') and (r.get_vars.get('component') == 'allocation')): query = (FS('allocation.start_date') != None) r.resource.add_component_filter('allocation', query) return True s3.prep = prep return s3_rest_controller(rheader=s3db.budget_rheader)
[ "def", "budget", "(", ")", ":", "def", "prep", "(", "r", ")", ":", "if", "(", "(", "r", ".", "method", "==", "'timeplot'", ")", "and", "(", "r", ".", "get_vars", ".", "get", "(", "'component'", ")", "==", "'allocation'", ")", ")", ":", "query", "=", "(", "FS", "(", "'allocation.start_date'", ")", "!=", "None", ")", "r", ".", "resource", ".", "add_component_filter", "(", "'allocation'", ",", "query", ")", "return", "True", "s3", ".", "prep", "=", "prep", "return", "s3_rest_controller", "(", "rheader", "=", "s3db", ".", "budget_rheader", ")" ]
restful crud controller .
train
false
7,907
@pytest.fixture def setup_command(request, tempdir): marker = request.node.get_marker('setup_command') args = (marker.args if marker else []) pkgrootdir = (tempdir / 'root') root.copytree(pkgrootdir) with cd(pkgrootdir): pythonpath = os.path.dirname(os.path.dirname(sphinx.__file__)) if os.getenv('PYTHONPATH'): pythonpath = ((os.getenv('PYTHONPATH') + os.pathsep) + pythonpath) command = [sys.executable, 'setup.py', 'build_sphinx'] command.extend(args) proc = subprocess.Popen(command, env=dict(os.environ, PYTHONPATH=pythonpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE) (yield namedtuple('setup', 'pkgroot,proc')(pkgrootdir, proc))
[ "@", "pytest", ".", "fixture", "def", "setup_command", "(", "request", ",", "tempdir", ")", ":", "marker", "=", "request", ".", "node", ".", "get_marker", "(", "'setup_command'", ")", "args", "=", "(", "marker", ".", "args", "if", "marker", "else", "[", "]", ")", "pkgrootdir", "=", "(", "tempdir", "/", "'root'", ")", "root", ".", "copytree", "(", "pkgrootdir", ")", "with", "cd", "(", "pkgrootdir", ")", ":", "pythonpath", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "sphinx", ".", "__file__", ")", ")", "if", "os", ".", "getenv", "(", "'PYTHONPATH'", ")", ":", "pythonpath", "=", "(", "(", "os", ".", "getenv", "(", "'PYTHONPATH'", ")", "+", "os", ".", "pathsep", ")", "+", "pythonpath", ")", "command", "=", "[", "sys", ".", "executable", ",", "'setup.py'", ",", "'build_sphinx'", "]", "command", ".", "extend", "(", "args", ")", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "env", "=", "dict", "(", "os", ".", "environ", ",", "PYTHONPATH", "=", "pythonpath", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "yield", "namedtuple", "(", "'setup'", ",", "'pkgroot,proc'", ")", "(", "pkgrootdir", ",", "proc", ")", ")" ]
run setup .
train
false
7,908
def do_truncate(s, length=255, killwords=False, end='...'): if (len(s) <= length): return s elif killwords: return (s[:length] + end) words = s.split(' ') result = [] m = 0 for word in words: m += (len(word) + 1) if (m > length): break result.append(word) result.append(end) return u' '.join(result)
[ "def", "do_truncate", "(", "s", ",", "length", "=", "255", ",", "killwords", "=", "False", ",", "end", "=", "'...'", ")", ":", "if", "(", "len", "(", "s", ")", "<=", "length", ")", ":", "return", "s", "elif", "killwords", ":", "return", "(", "s", "[", ":", "length", "]", "+", "end", ")", "words", "=", "s", ".", "split", "(", "' '", ")", "result", "=", "[", "]", "m", "=", "0", "for", "word", "in", "words", ":", "m", "+=", "(", "len", "(", "word", ")", "+", "1", ")", "if", "(", "m", ">", "length", ")", ":", "break", "result", ".", "append", "(", "word", ")", "result", ".", "append", "(", "end", ")", "return", "u' '", ".", "join", "(", "result", ")" ]
return a truncated copy of the string .
train
true
7,909
def git_dag(model, args=None, settings=None, existing_view=None): branch = model.currentbranch branch_doubledash = ((branch and (branch + u' --')) or u'') ctx = dag.DAG(branch_doubledash, 1000) ctx.set_arguments(args) if (existing_view is None): view = GitDAG(model, ctx, settings=settings) else: view = existing_view view.set_context(ctx) if ctx.ref: view.display() return view
[ "def", "git_dag", "(", "model", ",", "args", "=", "None", ",", "settings", "=", "None", ",", "existing_view", "=", "None", ")", ":", "branch", "=", "model", ".", "currentbranch", "branch_doubledash", "=", "(", "(", "branch", "and", "(", "branch", "+", "u' --'", ")", ")", "or", "u''", ")", "ctx", "=", "dag", ".", "DAG", "(", "branch_doubledash", ",", "1000", ")", "ctx", ".", "set_arguments", "(", "args", ")", "if", "(", "existing_view", "is", "None", ")", ":", "view", "=", "GitDAG", "(", "model", ",", "ctx", ",", "settings", "=", "settings", ")", "else", ":", "view", "=", "existing_view", "view", ".", "set_context", "(", "ctx", ")", "if", "ctx", ".", "ref", ":", "view", ".", "display", "(", ")", "return", "view" ]
return a pre-populated git dag widget .
train
false
7,910
@pytest.mark.models def test_parser_sbd_serialization_projective(EN): text = u"I bought a couch from IKEA It wasn't very comfortable." transition = [u'L-nsubj', u'S', u'L-det', u'R-dobj', u'D', u'R-prep', u'R-pobj', u'B-ROOT', u'L-nsubj', u'R-neg', u'D', u'S', u'L-advmod', u'R-acomp', u'D', u'R-punct'] doc = EN.tokenizer(text) apply_transition_sequence(EN.parser, doc, transition) doc_serialized = Doc(EN.vocab).from_bytes(doc.to_bytes()) assert (doc.is_parsed == True) assert (doc_serialized.is_parsed == True) assert (doc.to_bytes() == doc_serialized.to_bytes()) assert ([s.text for s in doc.sents] == [s.text for s in doc_serialized.sents])
[ "@", "pytest", ".", "mark", ".", "models", "def", "test_parser_sbd_serialization_projective", "(", "EN", ")", ":", "text", "=", "u\"I bought a couch from IKEA It wasn't very comfortable.\"", "transition", "=", "[", "u'L-nsubj'", ",", "u'S'", ",", "u'L-det'", ",", "u'R-dobj'", ",", "u'D'", ",", "u'R-prep'", ",", "u'R-pobj'", ",", "u'B-ROOT'", ",", "u'L-nsubj'", ",", "u'R-neg'", ",", "u'D'", ",", "u'S'", ",", "u'L-advmod'", ",", "u'R-acomp'", ",", "u'D'", ",", "u'R-punct'", "]", "doc", "=", "EN", ".", "tokenizer", "(", "text", ")", "apply_transition_sequence", "(", "EN", ".", "parser", ",", "doc", ",", "transition", ")", "doc_serialized", "=", "Doc", "(", "EN", ".", "vocab", ")", ".", "from_bytes", "(", "doc", ".", "to_bytes", "(", ")", ")", "assert", "(", "doc", ".", "is_parsed", "==", "True", ")", "assert", "(", "doc_serialized", ".", "is_parsed", "==", "True", ")", "assert", "(", "doc", ".", "to_bytes", "(", ")", "==", "doc_serialized", ".", "to_bytes", "(", ")", ")", "assert", "(", "[", "s", ".", "text", "for", "s", "in", "doc", ".", "sents", "]", "==", "[", "s", ".", "text", "for", "s", "in", "doc_serialized", ".", "sents", "]", ")" ]
test that before and after serialization .
train
false
7,911
def check_dependencies(): if (not HAS_VIRTUALENV): raise Exception(('Virtualenv not found. ' + 'Try installing python-virtualenv')) print 'done.'
[ "def", "check_dependencies", "(", ")", ":", "if", "(", "not", "HAS_VIRTUALENV", ")", ":", "raise", "Exception", "(", "(", "'Virtualenv not found. '", "+", "'Try installing python-virtualenv'", ")", ")", "print", "'done.'" ]
ensure docker-py >= 0 .
train
false
7,912
@depends(skin=None) def make_multi_button(name, channel, number, midi_message_type, skin=None, default_states=None, **k): is_momentary = True return MultiButtonElement(USER_MODE_CHANNELS, is_momentary, midi_message_type, channel, number, name=name, skin=skin, default_states=default_states, **k)
[ "@", "depends", "(", "skin", "=", "None", ")", "def", "make_multi_button", "(", "name", ",", "channel", ",", "number", ",", "midi_message_type", ",", "skin", "=", "None", ",", "default_states", "=", "None", ",", "**", "k", ")", ":", "is_momentary", "=", "True", "return", "MultiButtonElement", "(", "USER_MODE_CHANNELS", ",", "is_momentary", ",", "midi_message_type", ",", "channel", ",", "number", ",", "name", "=", "name", ",", "skin", "=", "skin", ",", "default_states", "=", "default_states", ",", "**", "k", ")" ]
creates a special button element that is actually multiple buttons; one for the default channel (1) and one for each of the channels used in user layouts .
train
false
7,913
@manager.command def dropall(): if prompt_bool('Are you sure ? You will lose all your data !'): db.drop_all()
[ "@", "manager", ".", "command", "def", "dropall", "(", ")", ":", "if", "prompt_bool", "(", "'Are you sure ? You will lose all your data !'", ")", ":", "db", ".", "drop_all", "(", ")" ]
drops all database tables .
train
false
7,915
def patch_sessions(): from openedx.core.djangoapps.safe_sessions.testing import safe_cookie_test_session_patch safe_cookie_test_session_patch()
[ "def", "patch_sessions", "(", ")", ":", "from", "openedx", ".", "core", ".", "djangoapps", ".", "safe_sessions", ".", "testing", "import", "safe_cookie_test_session_patch", "safe_cookie_test_session_patch", "(", ")" ]
override the test clients session and login to support safe cookies .
train
false
7,916
def test_blacklist(keyhint, config_stub, key_config_stub): config_stub.set('ui', 'keyhint-blacklist', ['ab*']) key_config_stub.set_bindings_for('normal', OrderedDict([('aa', 'cmd-aa'), ('ab', 'cmd-ab'), ('aba', 'cmd-aba'), ('abb', 'cmd-abb'), ('xd', 'cmd-xd'), ('xe', 'cmd-xe')])) keyhint.update_keyhint('normal', 'a') assert (keyhint.text() == expected_text(('a', 'yellow', 'a', 'cmd-aa')))
[ "def", "test_blacklist", "(", "keyhint", ",", "config_stub", ",", "key_config_stub", ")", ":", "config_stub", ".", "set", "(", "'ui'", ",", "'keyhint-blacklist'", ",", "[", "'ab*'", "]", ")", "key_config_stub", ".", "set_bindings_for", "(", "'normal'", ",", "OrderedDict", "(", "[", "(", "'aa'", ",", "'cmd-aa'", ")", ",", "(", "'ab'", ",", "'cmd-ab'", ")", ",", "(", "'aba'", ",", "'cmd-aba'", ")", ",", "(", "'abb'", ",", "'cmd-abb'", ")", ",", "(", "'xd'", ",", "'cmd-xd'", ")", ",", "(", "'xe'", ",", "'cmd-xe'", ")", "]", ")", ")", "keyhint", ".", "update_keyhint", "(", "'normal'", ",", "'a'", ")", "assert", "(", "keyhint", ".", "text", "(", ")", "==", "expected_text", "(", "(", "'a'", ",", "'yellow'", ",", "'a'", ",", "'cmd-aa'", ")", ")", ")" ]
test that blacklisted keychains arent hinted .
train
false
7,917
def getConvertedName(name): if (name == 'def __init__'): return 'def !__init__' if (name == 'def main'): return 'def |main' return name.lower()
[ "def", "getConvertedName", "(", "name", ")", ":", "if", "(", "name", "==", "'def __init__'", ")", ":", "return", "'def !__init__'", "if", "(", "name", "==", "'def main'", ")", ":", "return", "'def |main'", "return", "name", ".", "lower", "(", ")" ]
get converted name with init at the beginning and main at the endcompare the function names .
train
false
7,918
def PrintUpdate(msg): if (verbosity > 0): timestamp = datetime.datetime.now() print >>sys.stderr, ('%s' % datetime.datetime.now().strftime('%I:%M %p')), print >>sys.stderr, msg
[ "def", "PrintUpdate", "(", "msg", ")", ":", "if", "(", "verbosity", ">", "0", ")", ":", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "print", ">>", "sys", ".", "stderr", ",", "(", "'%s'", "%", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%I:%M %p'", ")", ")", ",", "print", ">>", "sys", ".", "stderr", ",", "msg" ]
print a message to stderr or the given file-like object .
train
false
7,920
def loaded_api(): if ('PyQt4.QtCore' in sys.modules): if (qtapi_version() == 2): return QT_API_PYQT else: return QT_API_PYQTv1 elif ('PySide.QtCore' in sys.modules): return QT_API_PYSIDE elif ('PySide2.QtCore' in sys.modules): return QT_API_PYSIDE2 elif ('PyQt5.QtCore' in sys.modules): return QT_API_PYQT5 return None
[ "def", "loaded_api", "(", ")", ":", "if", "(", "'PyQt4.QtCore'", "in", "sys", ".", "modules", ")", ":", "if", "(", "qtapi_version", "(", ")", "==", "2", ")", ":", "return", "QT_API_PYQT", "else", ":", "return", "QT_API_PYQTv1", "elif", "(", "'PySide.QtCore'", "in", "sys", ".", "modules", ")", ":", "return", "QT_API_PYSIDE", "elif", "(", "'PySide2.QtCore'", "in", "sys", ".", "modules", ")", ":", "return", "QT_API_PYSIDE2", "elif", "(", "'PyQt5.QtCore'", "in", "sys", ".", "modules", ")", ":", "return", "QT_API_PYQT5", "return", "None" ]
return which api is loaded .
train
true
7,922
def get_next_disk_info(mapping, disk_bus, device_type='disk', boot_index=None, assigned_devices=None): disk_dev = find_disk_dev_for_disk_bus(mapping, disk_bus, assigned_devices) info = {'bus': disk_bus, 'dev': disk_dev, 'type': device_type} if ((boot_index is not None) and (boot_index >= 0)): info['boot_index'] = str(boot_index) return info
[ "def", "get_next_disk_info", "(", "mapping", ",", "disk_bus", ",", "device_type", "=", "'disk'", ",", "boot_index", "=", "None", ",", "assigned_devices", "=", "None", ")", ":", "disk_dev", "=", "find_disk_dev_for_disk_bus", "(", "mapping", ",", "disk_bus", ",", "assigned_devices", ")", "info", "=", "{", "'bus'", ":", "disk_bus", ",", "'dev'", ":", "disk_dev", ",", "'type'", ":", "device_type", "}", "if", "(", "(", "boot_index", "is", "not", "None", ")", "and", "(", "boot_index", ">=", "0", ")", ")", ":", "info", "[", "'boot_index'", "]", "=", "str", "(", "boot_index", ")", "return", "info" ]
determine the disk info for the next device on disk_bus .
train
false
7,923
def removeNtpd(vm, prompt=Prompt, ntpPackage='ntp'): log('* Removing ntpd') vm.sendline(('sudo -n apt-get -qy remove ' + ntpPackage)) vm.expect(prompt) vm.sendline('sudo -n pkill ntpd') vm.expect(prompt) log('* Getting seconds since epoch from this server') seconds = int(run('date +%s')) log('* Setting VM clock') vm.sendline(('sudo -n date -s @%d' % seconds))
[ "def", "removeNtpd", "(", "vm", ",", "prompt", "=", "Prompt", ",", "ntpPackage", "=", "'ntp'", ")", ":", "log", "(", "'* Removing ntpd'", ")", "vm", ".", "sendline", "(", "(", "'sudo -n apt-get -qy remove '", "+", "ntpPackage", ")", ")", "vm", ".", "expect", "(", "prompt", ")", "vm", ".", "sendline", "(", "'sudo -n pkill ntpd'", ")", "vm", ".", "expect", "(", "prompt", ")", "log", "(", "'* Getting seconds since epoch from this server'", ")", "seconds", "=", "int", "(", "run", "(", "'date +%s'", ")", ")", "log", "(", "'* Setting VM clock'", ")", "vm", ".", "sendline", "(", "(", "'sudo -n date -s @%d'", "%", "seconds", ")", ")" ]
remove ntpd and set clock immediately .
train
false
7,924
def get_lms_link_for_item(location, preview=False): assert isinstance(location, UsageKey) lms_base = SiteConfiguration.get_value_for_org(location.org, 'LMS_BASE', settings.LMS_BASE) if (lms_base is None): return None if preview: lms_base = SiteConfiguration.get_value_for_org(location.org, 'PREVIEW_LMS_BASE', settings.FEATURES.get('PREVIEW_LMS_BASE')) return u'//{lms_base}/courses/{course_key}/jump_to/{location}'.format(lms_base=lms_base, course_key=location.course_key.to_deprecated_string(), location=location.to_deprecated_string())
[ "def", "get_lms_link_for_item", "(", "location", ",", "preview", "=", "False", ")", ":", "assert", "isinstance", "(", "location", ",", "UsageKey", ")", "lms_base", "=", "SiteConfiguration", ".", "get_value_for_org", "(", "location", ".", "org", ",", "'LMS_BASE'", ",", "settings", ".", "LMS_BASE", ")", "if", "(", "lms_base", "is", "None", ")", ":", "return", "None", "if", "preview", ":", "lms_base", "=", "SiteConfiguration", ".", "get_value_for_org", "(", "location", ".", "org", ",", "'PREVIEW_LMS_BASE'", ",", "settings", ".", "FEATURES", ".", "get", "(", "'PREVIEW_LMS_BASE'", ")", ")", "return", "u'//{lms_base}/courses/{course_key}/jump_to/{location}'", ".", "format", "(", "lms_base", "=", "lms_base", ",", "course_key", "=", "location", ".", "course_key", ".", "to_deprecated_string", "(", ")", ",", "location", "=", "location", ".", "to_deprecated_string", "(", ")", ")" ]
returns an lms link to the course with a jump_to to the provided location .
train
false
7,925
def test_add_rel_nofollow(): eq_('<a href="http://yy.com" rel="nofollow">http://yy.com</a>', linkify('<a href="http://yy.com">http://yy.com</a>'))
[ "def", "test_add_rel_nofollow", "(", ")", ":", "eq_", "(", "'<a href=\"http://yy.com\" rel=\"nofollow\">http://yy.com</a>'", ",", "linkify", "(", "'<a href=\"http://yy.com\">http://yy.com</a>'", ")", ")" ]
verify that rel="nofollow" is added to an existing link .
train
false
7,926
def filter_parsedate(val): return datetime.fromtimestamp(mktime(parsedate(val)))
[ "def", "filter_parsedate", "(", "val", ")", ":", "return", "datetime", ".", "fromtimestamp", "(", "mktime", "(", "parsedate", "(", "val", ")", ")", ")" ]
attempts to parse a date according to the rules in rfc 2822 .
train
false
7,927
def _enclosure(post, lang): enclosure = post.meta(u'enclosure', lang) if enclosure: try: length = int((post.meta(u'enclosure_length', lang) or 0)) except KeyError: length = 0 except ValueError: utils.LOGGER.warn(u'Invalid enclosure length for post {0}'.format(post.source_path)) length = 0 url = enclosure mime = mimetypes.guess_type(url)[0] return (url, length, mime)
[ "def", "_enclosure", "(", "post", ",", "lang", ")", ":", "enclosure", "=", "post", ".", "meta", "(", "u'enclosure'", ",", "lang", ")", "if", "enclosure", ":", "try", ":", "length", "=", "int", "(", "(", "post", ".", "meta", "(", "u'enclosure_length'", ",", "lang", ")", "or", "0", ")", ")", "except", "KeyError", ":", "length", "=", "0", "except", "ValueError", ":", "utils", ".", "LOGGER", ".", "warn", "(", "u'Invalid enclosure length for post {0}'", ".", "format", "(", "post", ".", "source_path", ")", ")", "length", "=", "0", "url", "=", "enclosure", "mime", "=", "mimetypes", ".", "guess_type", "(", "url", ")", "[", "0", "]", "return", "(", "url", ",", "length", ",", "mime", ")" ]
add an enclosure to rss .
train
false
7,928
def create_keyspace_network_topology(name, dc_replication_map, durable_writes=True, connections=None): _create_keyspace(name, durable_writes, 'NetworkTopologyStrategy', dc_replication_map, connections=connections)
[ "def", "create_keyspace_network_topology", "(", "name", ",", "dc_replication_map", ",", "durable_writes", "=", "True", ",", "connections", "=", "None", ")", ":", "_create_keyspace", "(", "name", ",", "durable_writes", ",", "'NetworkTopologyStrategy'", ",", "dc_replication_map", ",", "connections", "=", "connections", ")" ]
creates a keyspace with networktopologystrategy for replica placement if the keyspace already exists .
train
true
7,929
def impulse(system, X0=None, T=None, N=None): if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse can only be used with continuous-time systems.') else: sys = lti(*system)._as_ss() if (X0 is None): X = squeeze(sys.B) else: X = squeeze((sys.B + X0)) if (N is None): N = 100 if (T is None): T = _default_response_times(sys.A, N) else: T = asarray(T) (_, h, _) = lsim(sys, 0.0, T, X, interp=False) return (T, h)
[ "def", "impulse", "(", "system", ",", "X0", "=", "None", ",", "T", "=", "None", ",", "N", "=", "None", ")", ":", "if", "isinstance", "(", "system", ",", "lti", ")", ":", "sys", "=", "system", ".", "_as_ss", "(", ")", "elif", "isinstance", "(", "system", ",", "dlti", ")", ":", "raise", "AttributeError", "(", "'impulse can only be used with continuous-time systems.'", ")", "else", ":", "sys", "=", "lti", "(", "*", "system", ")", ".", "_as_ss", "(", ")", "if", "(", "X0", "is", "None", ")", ":", "X", "=", "squeeze", "(", "sys", ".", "B", ")", "else", ":", "X", "=", "squeeze", "(", "(", "sys", ".", "B", "+", "X0", ")", ")", "if", "(", "N", "is", "None", ")", ":", "N", "=", "100", "if", "(", "T", "is", "None", ")", ":", "T", "=", "_default_response_times", "(", "sys", ".", "A", ",", "N", ")", "else", ":", "T", "=", "asarray", "(", "T", ")", "(", "_", ",", "h", ",", "_", ")", "=", "lsim", "(", "sys", ",", "0.0", ",", "T", ",", "X", ",", "interp", "=", "False", ")", "return", "(", "T", ",", "h", ")" ]
impulse response of continuous-time system .
train
false
7,930
def compute_node_get_by_host_and_nodename(context, host, nodename): return IMPL.compute_node_get_by_host_and_nodename(context, host, nodename)
[ "def", "compute_node_get_by_host_and_nodename", "(", "context", ",", "host", ",", "nodename", ")", ":", "return", "IMPL", ".", "compute_node_get_by_host_and_nodename", "(", "context", ",", "host", ",", "nodename", ")" ]
get a compute node by its associated host and nodename .
train
false
7,932
def getParameterSequence(functionName): parameterDictionary = {} parameterSequence = [] parameterText = functionName[(functionName.find('(') + 1):].replace('xmlElement', 'elementNode') snippet = Snippet(0, parameterText) strippedParameters = [] for parameter in snippet.parameters: strippedParameter = parameter.strip() if (strippedParameter != 'self'): strippedParameters.append(strippedParameter) for (parameterIndex, parameter) in enumerate(strippedParameters): parameterDictionary[parameter] = parameterIndex sortedParameters = strippedParameters[:] sortedParameters.sort() for sortedParameter in sortedParameters: parameterSequence.append(parameterDictionary[sortedParameter]) return parameterSequence
[ "def", "getParameterSequence", "(", "functionName", ")", ":", "parameterDictionary", "=", "{", "}", "parameterSequence", "=", "[", "]", "parameterText", "=", "functionName", "[", "(", "functionName", ".", "find", "(", "'('", ")", "+", "1", ")", ":", "]", ".", "replace", "(", "'xmlElement'", ",", "'elementNode'", ")", "snippet", "=", "Snippet", "(", "0", ",", "parameterText", ")", "strippedParameters", "=", "[", "]", "for", "parameter", "in", "snippet", ".", "parameters", ":", "strippedParameter", "=", "parameter", ".", "strip", "(", ")", "if", "(", "strippedParameter", "!=", "'self'", ")", ":", "strippedParameters", ".", "append", "(", "strippedParameter", ")", "for", "(", "parameterIndex", ",", "parameter", ")", "in", "enumerate", "(", "strippedParameters", ")", ":", "parameterDictionary", "[", "parameter", "]", "=", "parameterIndex", "sortedParameters", "=", "strippedParameters", "[", ":", "]", "sortedParameters", ".", "sort", "(", ")", "for", "sortedParameter", "in", "sortedParameters", ":", "parameterSequence", ".", "append", "(", "parameterDictionary", "[", "sortedParameter", "]", ")", "return", "parameterSequence" ]
get the parameter sequence .
train
false
7,933
def sign_url_v2(url_to_sign, expiry): return sign_url_base_v2(bucket=url_to_sign.bucket(), object=url_to_sign.object(), expiry=expiry)
[ "def", "sign_url_v2", "(", "url_to_sign", ",", "expiry", ")", ":", "return", "sign_url_base_v2", "(", "bucket", "=", "url_to_sign", ".", "bucket", "(", ")", ",", "object", "=", "url_to_sign", ".", "object", "(", ")", ",", "expiry", "=", "expiry", ")" ]
sign a url in s3://bucket/object form with the given expiry time .
train
false
7,934
def add_or_replace_jacket(container): name = find_existing_jacket(container) found = True if (name is None): jacket_item = container.generate_item(u'jacket.xhtml', id_prefix=u'jacket') name = container.href_to_name(jacket_item.get(u'href'), container.opf_name) found = False if found: remove_jacket_images(container, name) replace_jacket(container, name) if (not found): index = 0 sp = container.abspath_to_name(container.spine_items.next()) if (sp == find_cover_page(container)): index = 1 itemref = container.opf.makeelement(OPF(u'itemref'), idref=jacket_item.get(u'id')) container.insert_into_xml(container.opf_xpath(u'//opf:spine')[0], itemref, index=index) return found
[ "def", "add_or_replace_jacket", "(", "container", ")", ":", "name", "=", "find_existing_jacket", "(", "container", ")", "found", "=", "True", "if", "(", "name", "is", "None", ")", ":", "jacket_item", "=", "container", ".", "generate_item", "(", "u'jacket.xhtml'", ",", "id_prefix", "=", "u'jacket'", ")", "name", "=", "container", ".", "href_to_name", "(", "jacket_item", ".", "get", "(", "u'href'", ")", ",", "container", ".", "opf_name", ")", "found", "=", "False", "if", "found", ":", "remove_jacket_images", "(", "container", ",", "name", ")", "replace_jacket", "(", "container", ",", "name", ")", "if", "(", "not", "found", ")", ":", "index", "=", "0", "sp", "=", "container", ".", "abspath_to_name", "(", "container", ".", "spine_items", ".", "next", "(", ")", ")", "if", "(", "sp", "==", "find_cover_page", "(", "container", ")", ")", ":", "index", "=", "1", "itemref", "=", "container", ".", "opf", ".", "makeelement", "(", "OPF", "(", "u'itemref'", ")", ",", "idref", "=", "jacket_item", ".", "get", "(", "u'id'", ")", ")", "container", ".", "insert_into_xml", "(", "container", ".", "opf_xpath", "(", "u'//opf:spine'", ")", "[", "0", "]", ",", "itemref", ",", "index", "=", "index", ")", "return", "found" ]
either create a new jacket from the books metadata or replace an existing jacket .
train
false
7,935
def billboard_matrix(): m = get_model_matrix() m[0] = 1 m[1] = 0 m[2] = 0 m[4] = 0 m[5] = 1 m[6] = 0 m[8] = 0 m[9] = 0 m[10] = 1 glLoadMatrixf(m)
[ "def", "billboard_matrix", "(", ")", ":", "m", "=", "get_model_matrix", "(", ")", "m", "[", "0", "]", "=", "1", "m", "[", "1", "]", "=", "0", "m", "[", "2", "]", "=", "0", "m", "[", "4", "]", "=", "0", "m", "[", "5", "]", "=", "1", "m", "[", "6", "]", "=", "0", "m", "[", "8", "]", "=", "0", "m", "[", "9", "]", "=", "0", "m", "[", "10", "]", "=", "1", "glLoadMatrixf", "(", "m", ")" ]
removes rotational components of current matrix so that primitives are always drawn facing the viewer .
train
false
7,936
def test_iterable_types(Chart): chart = Chart(no_prefix=True) chart.add('A', [1, 2]) chart.add('B', []) if (not chart._dual): chart.x_labels = ('red', 'green', 'blue') chart1 = chart.render() chart = Chart(no_prefix=True) chart.add('A', (1, 2)) chart.add('B', tuple()) if (not chart._dual): chart.x_labels = ('red', 'green', 'blue') chart2 = chart.render() assert (chart1 == chart2)
[ "def", "test_iterable_types", "(", "Chart", ")", ":", "chart", "=", "Chart", "(", "no_prefix", "=", "True", ")", "chart", ".", "add", "(", "'A'", ",", "[", "1", ",", "2", "]", ")", "chart", ".", "add", "(", "'B'", ",", "[", "]", ")", "if", "(", "not", "chart", ".", "_dual", ")", ":", "chart", ".", "x_labels", "=", "(", "'red'", ",", "'green'", ",", "'blue'", ")", "chart1", "=", "chart", ".", "render", "(", ")", "chart", "=", "Chart", "(", "no_prefix", "=", "True", ")", "chart", ".", "add", "(", "'A'", ",", "(", "1", ",", "2", ")", ")", "chart", ".", "add", "(", "'B'", ",", "tuple", "(", ")", ")", "if", "(", "not", "chart", ".", "_dual", ")", ":", "chart", ".", "x_labels", "=", "(", "'red'", ",", "'green'", ",", "'blue'", ")", "chart2", "=", "chart", ".", "render", "(", ")", "assert", "(", "chart1", "==", "chart2", ")" ]
test serie as various iterable .
train
false
7,938
def _lstsq(X, y, indices, fit_intercept): fit_intercept = int(fit_intercept) n_features = (X.shape[1] + fit_intercept) n_subsamples = indices.shape[1] weights = np.empty((indices.shape[0], n_features)) X_subpopulation = np.ones((n_subsamples, n_features)) y_subpopulation = np.zeros(max(n_subsamples, n_features)) (lstsq,) = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation)) for (index, subset) in enumerate(indices): X_subpopulation[:, fit_intercept:] = X[subset, :] y_subpopulation[:n_subsamples] = y[subset] weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features] return weights
[ "def", "_lstsq", "(", "X", ",", "y", ",", "indices", ",", "fit_intercept", ")", ":", "fit_intercept", "=", "int", "(", "fit_intercept", ")", "n_features", "=", "(", "X", ".", "shape", "[", "1", "]", "+", "fit_intercept", ")", "n_subsamples", "=", "indices", ".", "shape", "[", "1", "]", "weights", "=", "np", ".", "empty", "(", "(", "indices", ".", "shape", "[", "0", "]", ",", "n_features", ")", ")", "X_subpopulation", "=", "np", ".", "ones", "(", "(", "n_subsamples", ",", "n_features", ")", ")", "y_subpopulation", "=", "np", ".", "zeros", "(", "max", "(", "n_subsamples", ",", "n_features", ")", ")", "(", "lstsq", ",", ")", "=", "get_lapack_funcs", "(", "(", "'gelss'", ",", ")", ",", "(", "X_subpopulation", ",", "y_subpopulation", ")", ")", "for", "(", "index", ",", "subset", ")", "in", "enumerate", "(", "indices", ")", ":", "X_subpopulation", "[", ":", ",", "fit_intercept", ":", "]", "=", "X", "[", "subset", ",", ":", "]", "y_subpopulation", "[", ":", "n_subsamples", "]", "=", "y", "[", "subset", "]", "weights", "[", "index", "]", "=", "lstsq", "(", "X_subpopulation", ",", "y_subpopulation", ")", "[", "1", "]", "[", ":", "n_features", "]", "return", "weights" ]
least squares estimator for theilsenregressor class .
train
false
7,939
def OpenHelpFile(fileName, helpCmd=None, helpArg=None): win32ui.DoWaitCursor(1) try: if (helpCmd is None): helpCmd = win32con.HELP_CONTENTS ext = os.path.splitext(fileName)[1].lower() if (ext == '.hlp'): win32api.WinHelp(win32ui.GetMainFrame().GetSafeHwnd(), fileName, helpCmd, helpArg) elif (0 and (ext == '.chm')): import win32help global htmlhelp_handle helpCmd = html_help_command_translators.get(helpCmd, helpCmd) frame = 0 if (htmlhelp_handle is None): (htmlhelp_hwnd, htmlhelp_handle) = win32help.HtmlHelp(frame, None, win32help.HH_INITIALIZE) win32help.HtmlHelp(frame, fileName, helpCmd, helpArg) else: win32api.ShellExecute(0, 'open', fileName, None, '', win32con.SW_SHOW) return fileName finally: win32ui.DoWaitCursor((-1))
[ "def", "OpenHelpFile", "(", "fileName", ",", "helpCmd", "=", "None", ",", "helpArg", "=", "None", ")", ":", "win32ui", ".", "DoWaitCursor", "(", "1", ")", "try", ":", "if", "(", "helpCmd", "is", "None", ")", ":", "helpCmd", "=", "win32con", ".", "HELP_CONTENTS", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fileName", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "(", "ext", "==", "'.hlp'", ")", ":", "win32api", ".", "WinHelp", "(", "win32ui", ".", "GetMainFrame", "(", ")", ".", "GetSafeHwnd", "(", ")", ",", "fileName", ",", "helpCmd", ",", "helpArg", ")", "elif", "(", "0", "and", "(", "ext", "==", "'.chm'", ")", ")", ":", "import", "win32help", "global", "htmlhelp_handle", "helpCmd", "=", "html_help_command_translators", ".", "get", "(", "helpCmd", ",", "helpCmd", ")", "frame", "=", "0", "if", "(", "htmlhelp_handle", "is", "None", ")", ":", "(", "htmlhelp_hwnd", ",", "htmlhelp_handle", ")", "=", "win32help", ".", "HtmlHelp", "(", "frame", ",", "None", ",", "win32help", ".", "HH_INITIALIZE", ")", "win32help", ".", "HtmlHelp", "(", "frame", ",", "fileName", ",", "helpCmd", ",", "helpArg", ")", "else", ":", "win32api", ".", "ShellExecute", "(", "0", ",", "'open'", ",", "fileName", ",", "None", ",", "''", ",", "win32con", ".", "SW_SHOW", ")", "return", "fileName", "finally", ":", "win32ui", ".", "DoWaitCursor", "(", "(", "-", "1", ")", ")" ]
open a help file .
train
false
7,940
@hook.command('spotify', 'sptrack') def spotify(text): params = {'q': text.strip()} request = requests.get('http://ws.spotify.com/search/1/track.json', params=params) if (request.status_code != requests.codes.ok): return 'Could not get track information: {}'.format(request.status_code) data = request.json() try: (_type, _id) = data['tracks'][0]['href'].split(':')[1:] except IndexError: return 'Could not find track.' url = web.try_shorten(gateway.format(_type, _id)) return '\x02{}\x02 by \x02{}\x02 - {}'.format(data['tracks'][0]['name'], data['tracks'][0]['artists'][0]['name'], url)
[ "@", "hook", ".", "command", "(", "'spotify'", ",", "'sptrack'", ")", "def", "spotify", "(", "text", ")", ":", "params", "=", "{", "'q'", ":", "text", ".", "strip", "(", ")", "}", "request", "=", "requests", ".", "get", "(", "'http://ws.spotify.com/search/1/track.json'", ",", "params", "=", "params", ")", "if", "(", "request", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ")", ":", "return", "'Could not get track information: {}'", ".", "format", "(", "request", ".", "status_code", ")", "data", "=", "request", ".", "json", "(", ")", "try", ":", "(", "_type", ",", "_id", ")", "=", "data", "[", "'tracks'", "]", "[", "0", "]", "[", "'href'", "]", ".", "split", "(", "':'", ")", "[", "1", ":", "]", "except", "IndexError", ":", "return", "'Could not find track.'", "url", "=", "web", ".", "try_shorten", "(", "gateway", ".", "format", "(", "_type", ",", "_id", ")", ")", "return", "'\\x02{}\\x02 by \\x02{}\\x02 - {}'", ".", "format", "(", "data", "[", "'tracks'", "]", "[", "0", "]", "[", "'name'", "]", ",", "data", "[", "'tracks'", "]", "[", "0", "]", "[", "'artists'", "]", "[", "0", "]", "[", "'name'", "]", ",", "url", ")" ]
spotify <song> -- search spotify for <song> .
train
false
7,941
def _check_children(node): for child in node.get_children(): ok = False if (child is None): print(('Hm, child of %s is None' % node)) continue if (not hasattr(child, 'parent')): print((' ERROR: %s has child %s %x with no parent' % (node, child, id(child)))) elif (not child.parent): print((' ERROR: %s has child %s %x with parent %r' % (node, child, id(child), child.parent))) elif (child.parent is not node): print((' ERROR: %s %x has child %s %x with wrong parent %s' % (node, id(node), child, id(child), child.parent))) else: ok = True if (not ok): print('lines;', node.lineno, child.lineno) print('of module', node.root(), node.root().name) raise AstroidBuildingException _check_children(child)
[ "def", "_check_children", "(", "node", ")", ":", "for", "child", "in", "node", ".", "get_children", "(", ")", ":", "ok", "=", "False", "if", "(", "child", "is", "None", ")", ":", "print", "(", "(", "'Hm, child of %s is None'", "%", "node", ")", ")", "continue", "if", "(", "not", "hasattr", "(", "child", ",", "'parent'", ")", ")", ":", "print", "(", "(", "' ERROR: %s has child %s %x with no parent'", "%", "(", "node", ",", "child", ",", "id", "(", "child", ")", ")", ")", ")", "elif", "(", "not", "child", ".", "parent", ")", ":", "print", "(", "(", "' ERROR: %s has child %s %x with parent %r'", "%", "(", "node", ",", "child", ",", "id", "(", "child", ")", ",", "child", ".", "parent", ")", ")", ")", "elif", "(", "child", ".", "parent", "is", "not", "node", ")", ":", "print", "(", "(", "' ERROR: %s %x has child %s %x with wrong parent %s'", "%", "(", "node", ",", "id", "(", "node", ")", ",", "child", ",", "id", "(", "child", ")", ",", "child", ".", "parent", ")", ")", ")", "else", ":", "ok", "=", "True", "if", "(", "not", "ok", ")", ":", "print", "(", "'lines;'", ",", "node", ".", "lineno", ",", "child", ".", "lineno", ")", "print", "(", "'of module'", ",", "node", ".", "root", "(", ")", ",", "node", ".", "root", "(", ")", ".", "name", ")", "raise", "AstroidBuildingException", "_check_children", "(", "child", ")" ]
a helper function to check children - parent relations .
train
false
7,942
def ParseMultipleIndexDefinitions(document): return yaml_object.BuildObjects(IndexDefinitions, document)
[ "def", "ParseMultipleIndexDefinitions", "(", "document", ")", ":", "return", "yaml_object", ".", "BuildObjects", "(", "IndexDefinitions", ",", "document", ")" ]
parse multiple index definitions documents from a string or stream .
train
false
7,943
def _kb_detail(request, readout_slug, readouts, main_view_name, main_dash_title, locale=None, product=None): return render(request, 'dashboards/kb_detail.html', {'readout': _kb_readout(request, readout_slug, readouts, locale, product=product), 'locale': locale, 'main_dash_view': main_view_name, 'main_dash_title': main_dash_title, 'product': product, 'products': Product.objects.filter(visible=True)})
[ "def", "_kb_detail", "(", "request", ",", "readout_slug", ",", "readouts", ",", "main_view_name", ",", "main_dash_title", ",", "locale", "=", "None", ",", "product", "=", "None", ")", ":", "return", "render", "(", "request", ",", "'dashboards/kb_detail.html'", ",", "{", "'readout'", ":", "_kb_readout", "(", "request", ",", "readout_slug", ",", "readouts", ",", "locale", ",", "product", "=", "product", ")", ",", "'locale'", ":", "locale", ",", "'main_dash_view'", ":", "main_view_name", ",", "'main_dash_title'", ":", "main_dash_title", ",", "'product'", ":", "product", ",", "'products'", ":", "Product", ".", "objects", ".", "filter", "(", "visible", "=", "True", ")", "}", ")" ]
show all the rows for the given kb article statistics table .
train
false
7,945
def demo__google_result_open_in_new_tab(raw_text, content_mime): def hexlify_to_json(ascii_str): _buff = '' for char in ascii_str: if (char in '\'"<>&='): _buff += ('\\x' + hex(ord(char))[2:]) else: _buff += char _buff = _buff.replace('\\', '\\\\') _buff = _buff.replace('/', '\\/') return _buff if (content_mime == 'application/json'): raw_text = raw_text.replace(hexlify_to_json('<h3 class="r"><a href="'), hexlify_to_json('<h3 class="r"><a target="_blank" href="')) raw_text = raw_text.replace(hexlify_to_json('<h3 class="r"><a class="l" href="'), hexlify_to_json('<h3 class="r"><a target="_blank" class="l" href="')) else: raw_text = raw_text.replace('<h3 class="r"><a href="', '<h3 class="r"><a target="_blank" href="') raw_text = raw_text.replace('<h3 class="r"><a class="l" href="', '<h3 class="r"><a target="_blank" class="l" href="') return raw_text
[ "def", "demo__google_result_open_in_new_tab", "(", "raw_text", ",", "content_mime", ")", ":", "def", "hexlify_to_json", "(", "ascii_str", ")", ":", "_buff", "=", "''", "for", "char", "in", "ascii_str", ":", "if", "(", "char", "in", "'\\'\"<>&='", ")", ":", "_buff", "+=", "(", "'\\\\x'", "+", "hex", "(", "ord", "(", "char", ")", ")", "[", "2", ":", "]", ")", "else", ":", "_buff", "+=", "char", "_buff", "=", "_buff", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "_buff", "=", "_buff", ".", "replace", "(", "'/'", ",", "'\\\\/'", ")", "return", "_buff", "if", "(", "content_mime", "==", "'application/json'", ")", ":", "raw_text", "=", "raw_text", ".", "replace", "(", "hexlify_to_json", "(", "'<h3 class=\"r\"><a href=\"'", ")", ",", "hexlify_to_json", "(", "'<h3 class=\"r\"><a target=\"_blank\" href=\"'", ")", ")", "raw_text", "=", "raw_text", ".", "replace", "(", "hexlify_to_json", "(", "'<h3 class=\"r\"><a class=\"l\" href=\"'", ")", ",", "hexlify_to_json", "(", "'<h3 class=\"r\"><a target=\"_blank\" class=\"l\" href=\"'", ")", ")", "else", ":", "raw_text", "=", "raw_text", ".", "replace", "(", "'<h3 class=\"r\"><a href=\"'", ",", "'<h3 class=\"r\"><a target=\"_blank\" href=\"'", ")", "raw_text", "=", "raw_text", ".", "replace", "(", "'<h3 class=\"r\"><a class=\"l\" href=\"'", ",", "'<h3 class=\"r\"><a target=\"_blank\" class=\"l\" href=\"'", ")", "return", "raw_text" ]
force google searchs result to open in new tab .
train
false
7,947
def apply_gaussian(X, sigma): return np.array([ndimage.gaussian_filter(x, sigma) for x in X])
[ "def", "apply_gaussian", "(", "X", ",", "sigma", ")", ":", "return", "np", ".", "array", "(", "[", "ndimage", ".", "gaussian_filter", "(", "x", ",", "sigma", ")", "for", "x", "in", "X", "]", ")" ]
a simple function to apply a gaussian blur on each image in x .
train
false
7,948
def S_ISFIFO(mode): return (S_IFMT(mode) == S_IFIFO)
[ "def", "S_ISFIFO", "(", "mode", ")", ":", "return", "(", "S_IFMT", "(", "mode", ")", "==", "S_IFIFO", ")" ]
return true if mode is from a fifo .
train
false