id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
24,174
def _get_placeholder_cache_version(placeholder, lang, site_id): from django.core.cache import cache key = _get_placeholder_cache_version_key(placeholder, lang, site_id) cached = cache.get(key) if cached: (version, vary_on_list) = cached else: version = int((time.time() * 1000000)) vary_on_list = [] _set_placeholder_cache_version(placeholder, lang, site_id, version, vary_on_list) return (version, vary_on_list)
[ "def", "_get_placeholder_cache_version", "(", "placeholder", ",", "lang", ",", "site_id", ")", ":", "from", "django", ".", "core", ".", "cache", "import", "cache", "key", "=", "_get_placeholder_cache_version_key", "(", "placeholder", ",", "lang", ",", "site_id", ")", "cached", "=", "cache", ".", "get", "(", "key", ")", "if", "cached", ":", "(", "version", ",", "vary_on_list", ")", "=", "cached", "else", ":", "version", "=", "int", "(", "(", "time", ".", "time", "(", ")", "*", "1000000", ")", ")", "vary_on_list", "=", "[", "]", "_set_placeholder_cache_version", "(", "placeholder", ",", "lang", ",", "site_id", ",", "version", ",", "vary_on_list", ")", "return", "(", "version", ",", "vary_on_list", ")" ]
gets the s current version and vary-on header-names list .
train
false
24,175
def _get_custom_cache_region(expiration_time=WEEK, backend=None, url=None): region = cache.create_region() region_params = {} if (expiration_time != 0): region_params['expiration_time'] = expiration_time if (backend == 'oslo_cache.dict'): region_params['arguments'] = {'expiration_time': expiration_time} elif (backend == 'dogpile.cache.memcached'): region_params['arguments'] = {'url': url} else: raise RuntimeError(_('old style configuration can use only dictionary or memcached backends')) region.configure(backend, **region_params) return region
[ "def", "_get_custom_cache_region", "(", "expiration_time", "=", "WEEK", ",", "backend", "=", "None", ",", "url", "=", "None", ")", ":", "region", "=", "cache", ".", "create_region", "(", ")", "region_params", "=", "{", "}", "if", "(", "expiration_time", "!=", "0", ")", ":", "region_params", "[", "'expiration_time'", "]", "=", "expiration_time", "if", "(", "backend", "==", "'oslo_cache.dict'", ")", ":", "region_params", "[", "'arguments'", "]", "=", "{", "'expiration_time'", ":", "expiration_time", "}", "elif", "(", "backend", "==", "'dogpile.cache.memcached'", ")", ":", "region_params", "[", "'arguments'", "]", "=", "{", "'url'", ":", "url", "}", "else", ":", "raise", "RuntimeError", "(", "_", "(", "'old style configuration can use only dictionary or memcached backends'", ")", ")", "region", ".", "configure", "(", "backend", ",", "**", "region_params", ")", "return", "region" ]
create instance of oslo_cache client .
train
false
24,176
def hex_to_rgb(value): value = value.lstrip('#') hex_total_length = len(value) rgb_section_length = (hex_total_length // 3) return tuple((int(value[i:(i + rgb_section_length)], 16) for i in range(0, hex_total_length, rgb_section_length)))
[ "def", "hex_to_rgb", "(", "value", ")", ":", "value", "=", "value", ".", "lstrip", "(", "'#'", ")", "hex_total_length", "=", "len", "(", "value", ")", "rgb_section_length", "=", "(", "hex_total_length", "//", "3", ")", "return", "tuple", "(", "(", "int", "(", "value", "[", "i", ":", "(", "i", "+", "rgb_section_length", ")", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "hex_total_length", ",", "rgb_section_length", ")", ")", ")" ]
calculates rgb values from a hex color code .
train
false
24,177
def _sort_candidates(candidates): return sorted(candidates, key=(lambda match: match.distance))
[ "def", "_sort_candidates", "(", "candidates", ")", ":", "return", "sorted", "(", "candidates", ",", "key", "=", "(", "lambda", "match", ":", "match", ".", "distance", ")", ")" ]
sort candidates by distance .
train
false
24,180
def hough_line(img, theta=None): if (theta is None): theta = np.linspace(((- np.pi) / 2), (np.pi / 2), 180) return _hough_line(img, theta)
[ "def", "hough_line", "(", "img", ",", "theta", "=", "None", ")", ":", "if", "(", "theta", "is", "None", ")", ":", "theta", "=", "np", ".", "linspace", "(", "(", "(", "-", "np", ".", "pi", ")", "/", "2", ")", ",", "(", "np", ".", "pi", "/", "2", ")", ",", "180", ")", "return", "_hough_line", "(", "img", ",", "theta", ")" ]
perform a straight line hough transform .
train
false
24,181
def _update_dataset_primary(deployment, dataset_id, primary): (primary_manifestation, old_primary_node) = _find_manifestation_and_node(deployment, dataset_id) old_primary_node = old_primary_node.transform(('manifestations', primary_manifestation.dataset_id), discard) deployment = deployment.update_node(old_primary_node) new_primary_node = deployment.get_node(primary) new_primary_node = new_primary_node.transform(('manifestations', dataset_id), primary_manifestation) deployment = deployment.update_node(new_primary_node) return deployment
[ "def", "_update_dataset_primary", "(", "deployment", ",", "dataset_id", ",", "primary", ")", ":", "(", "primary_manifestation", ",", "old_primary_node", ")", "=", "_find_manifestation_and_node", "(", "deployment", ",", "dataset_id", ")", "old_primary_node", "=", "old_primary_node", ".", "transform", "(", "(", "'manifestations'", ",", "primary_manifestation", ".", "dataset_id", ")", ",", "discard", ")", "deployment", "=", "deployment", ".", "update_node", "(", "old_primary_node", ")", "new_primary_node", "=", "deployment", ".", "get_node", "(", "primary", ")", "new_primary_node", "=", "new_primary_node", ".", "transform", "(", "(", "'manifestations'", ",", "dataset_id", ")", ",", "primary_manifestation", ")", "deployment", "=", "deployment", ".", "update_node", "(", "new_primary_node", ")", "return", "deployment" ]
update the deployment so that the dataset with the supplied dataset_id is on the node with the supplied primary address .
train
false
24,182
def param_set(name, value, retries=3): name = name.upper() return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
[ "def", "param_set", "(", "name", ",", "value", ",", "retries", "=", "3", ")", ":", "name", "=", "name", ".", "upper", "(", ")", "return", "mpstate", ".", "mav_param", ".", "mavset", "(", "mpstate", ".", "master", "(", ")", ",", "name", ",", "value", ",", "retries", "=", "retries", ")" ]
set a param in varnish cache cli example: .
train
true
24,185
def copystream(src, dest, size, chunk_size=(10 ** 5)): while (size > 0): if (size < chunk_size): data = src.read(size) else: data = src.read(chunk_size) length = len(data) if (length > size): (data, length) = (data[:size], size) size -= length if (length == 0): break dest.write(data) if (length < chunk_size): break dest.seek(0) return
[ "def", "copystream", "(", "src", ",", "dest", ",", "size", ",", "chunk_size", "=", "(", "10", "**", "5", ")", ")", ":", "while", "(", "size", ">", "0", ")", ":", "if", "(", "size", "<", "chunk_size", ")", ":", "data", "=", "src", ".", "read", "(", "size", ")", "else", ":", "data", "=", "src", ".", "read", "(", "chunk_size", ")", "length", "=", "len", "(", "data", ")", "if", "(", "length", ">", "size", ")", ":", "(", "data", ",", "length", ")", "=", "(", "data", "[", ":", "size", "]", ",", "size", ")", "size", "-=", "length", "if", "(", "length", "==", "0", ")", ":", "break", "dest", ".", "write", "(", "data", ")", "if", "(", "length", "<", "chunk_size", ")", ":", "break", "dest", ".", "seek", "(", "0", ")", "return" ]
this is here because i think there is a bug in shutil .
train
false
24,187
def get_default_company(user=None): from frappe.defaults import get_user_default_as_list if (not user): user = frappe.session.user companies = get_user_default_as_list(user, u'company') if companies: default_company = companies[0] else: default_company = frappe.db.get_single_value(u'Global Defaults', u'default_company') return default_company
[ "def", "get_default_company", "(", "user", "=", "None", ")", ":", "from", "frappe", ".", "defaults", "import", "get_user_default_as_list", "if", "(", "not", "user", ")", ":", "user", "=", "frappe", ".", "session", ".", "user", "companies", "=", "get_user_default_as_list", "(", "user", ",", "u'company'", ")", "if", "companies", ":", "default_company", "=", "companies", "[", "0", "]", "else", ":", "default_company", "=", "frappe", ".", "db", ".", "get_single_value", "(", "u'Global Defaults'", ",", "u'default_company'", ")", "return", "default_company" ]
get default company for user .
train
false
24,188
def items_overlap_with_suffix(left, lsuffix, right, rsuffix): to_rename = left.intersection(right) if (len(to_rename) == 0): return (left, right) else: if ((not lsuffix) and (not rsuffix)): raise ValueError(('columns overlap but no suffix specified: %s' % to_rename)) def lrenamer(x): if (x in to_rename): return ('%s%s' % (x, lsuffix)) return x def rrenamer(x): if (x in to_rename): return ('%s%s' % (x, rsuffix)) return x return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
[ "def", "items_overlap_with_suffix", "(", "left", ",", "lsuffix", ",", "right", ",", "rsuffix", ")", ":", "to_rename", "=", "left", ".", "intersection", "(", "right", ")", "if", "(", "len", "(", "to_rename", ")", "==", "0", ")", ":", "return", "(", "left", ",", "right", ")", "else", ":", "if", "(", "(", "not", "lsuffix", ")", "and", "(", "not", "rsuffix", ")", ")", ":", "raise", "ValueError", "(", "(", "'columns overlap but no suffix specified: %s'", "%", "to_rename", ")", ")", "def", "lrenamer", "(", "x", ")", ":", "if", "(", "x", "in", "to_rename", ")", ":", "return", "(", "'%s%s'", "%", "(", "x", ",", "lsuffix", ")", ")", "return", "x", "def", "rrenamer", "(", "x", ")", ":", "if", "(", "x", "in", "to_rename", ")", ":", "return", "(", "'%s%s'", "%", "(", "x", ",", "rsuffix", ")", ")", "return", "x", "return", "(", "_transform_index", "(", "left", ",", "lrenamer", ")", ",", "_transform_index", "(", "right", ",", "rrenamer", ")", ")" ]
if two indices overlap .
train
false
24,189
def XMPPClientFactory(jid, password): a = XMPPAuthenticator(jid, password) return xmlstream.XmlStreamFactory(a)
[ "def", "XMPPClientFactory", "(", "jid", ",", "password", ")", ":", "a", "=", "XMPPAuthenticator", "(", "jid", ",", "password", ")", "return", "xmlstream", ".", "XmlStreamFactory", "(", "a", ")" ]
client factory for xmpp 1 .
train
false
24,190
def dup_multi_deflate(polys, K): G = 0 for p in polys: if (dup_degree(p) <= 0): return (1, polys) g = 0 for i in range(len(p)): if (not p[((- i) - 1)]): continue g = igcd(g, i) if (g == 1): return (1, polys) G = igcd(G, g) return (G, tuple([p[::G] for p in polys]))
[ "def", "dup_multi_deflate", "(", "polys", ",", "K", ")", ":", "G", "=", "0", "for", "p", "in", "polys", ":", "if", "(", "dup_degree", "(", "p", ")", "<=", "0", ")", ":", "return", "(", "1", ",", "polys", ")", "g", "=", "0", "for", "i", "in", "range", "(", "len", "(", "p", ")", ")", ":", "if", "(", "not", "p", "[", "(", "(", "-", "i", ")", "-", "1", ")", "]", ")", ":", "continue", "g", "=", "igcd", "(", "g", ",", "i", ")", "if", "(", "g", "==", "1", ")", ":", "return", "(", "1", ",", "polys", ")", "G", "=", "igcd", "(", "G", ",", "g", ")", "return", "(", "G", ",", "tuple", "(", "[", "p", "[", ":", ":", "G", "]", "for", "p", "in", "polys", "]", ")", ")" ]
map x**m to y in a set of polynomials in k[x] .
train
false
24,191
def ensure_context_attribute_exists(context, name, default_value=None): if (not hasattr(context, name)): setattr(context, name, default_value)
[ "def", "ensure_context_attribute_exists", "(", "context", ",", "name", ",", "default_value", "=", "None", ")", ":", "if", "(", "not", "hasattr", "(", "context", ",", "name", ")", ")", ":", "setattr", "(", "context", ",", "name", ",", "default_value", ")" ]
ensure a behave resource exists as attribute in the behave context .
train
true
24,192
def get_from_user(title, prompt, hidden=False, value=None): if (value is None): value = '' script = '\n on run argv\n tell application "Alfred 2"\n activate\n set alfredPath to (path to application "Alfred 2")\n set alfredIcon to path to resource "appicon.icns" in bundle \xc2\xac\n (alfredPath as alias)\n\n set dlgTitle to (item 1 of argv)\n set dlgPrompt to (item 2 of argv)\n\n if (count of argv) is 3\n set dlgHidden to (item 3 of argv as boolean)\n else\n set dlgHidden to false\n end if\n\n if dlgHidden\n display dialog dlgPrompt & ":" with title dlgTitle \xc2\xac\n default answer "{v}" with icon alfredIcon with hidden answer\n else\n display dialog dlgPrompt & ":" with title dlgTitle \xc2\xac\n default answer "{v}" with icon alfredIcon\n end if\n\n set answer to text returned of result\n end tell\n end run'.format(v=value) from subprocess import Popen, PIPE cmd = ['osascript', '-', title, prompt] if hidden: cmd.append('true') p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate(script) return stdout.rstrip('\n')
[ "def", "get_from_user", "(", "title", ",", "prompt", ",", "hidden", "=", "False", ",", "value", "=", "None", ")", ":", "if", "(", "value", "is", "None", ")", ":", "value", "=", "''", "script", "=", "'\\n on run argv\\n tell application \"Alfred 2\"\\n activate\\n set alfredPath to (path to application \"Alfred 2\")\\n set alfredIcon to path to resource \"appicon.icns\" in bundle \\xc2\\xac\\n (alfredPath as alias)\\n\\n set dlgTitle to (item 1 of argv)\\n set dlgPrompt to (item 2 of argv)\\n\\n if (count of argv) is 3\\n set dlgHidden to (item 3 of argv as boolean)\\n else\\n set dlgHidden to false\\n end if\\n\\n if dlgHidden\\n display dialog dlgPrompt & \":\" with title dlgTitle \\xc2\\xac\\n default answer \"{v}\" with icon alfredIcon with hidden answer\\n else\\n display dialog dlgPrompt & \":\" with title dlgTitle \\xc2\\xac\\n default answer \"{v}\" with icon alfredIcon\\n end if\\n\\n set answer to text returned of result\\n end tell\\n end run'", ".", "format", "(", "v", "=", "value", ")", "from", "subprocess", "import", "Popen", ",", "PIPE", "cmd", "=", "[", "'osascript'", ",", "'-'", ",", "title", ",", "prompt", "]", "if", "hidden", ":", "cmd", ".", "append", "(", "'true'", ")", "p", "=", "Popen", "(", "cmd", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "(", "stdout", ",", "stderr", ")", "=", "p", ".", "communicate", "(", "script", ")", "return", "stdout", ".", "rstrip", "(", "'\\n'", ")" ]
popup a dialog to request some piece of information .
train
false
24,193
def install_as_MySQLdb(): sys.modules['MySQLdb'] = sys.modules['_mysql'] = sys.modules['pymysql']
[ "def", "install_as_MySQLdb", "(", ")", ":", "sys", ".", "modules", "[", "'MySQLdb'", "]", "=", "sys", ".", "modules", "[", "'_mysql'", "]", "=", "sys", ".", "modules", "[", "'pymysql'", "]" ]
after this function is called .
train
false
24,195
def run_hive_cmd(hivecmd, check_return_code=True): return run_hive(['-e', hivecmd], check_return_code)
[ "def", "run_hive_cmd", "(", "hivecmd", ",", "check_return_code", "=", "True", ")", ":", "return", "run_hive", "(", "[", "'-e'", ",", "hivecmd", "]", ",", "check_return_code", ")" ]
runs the given hive query and returns stdout .
train
false
24,196
def _resize_event(event, params): size = ','.join([str(s) for s in params['fig'].get_size_inches()]) set_config('MNE_BROWSE_RAW_SIZE', size, set_env=False) _layout_figure(params)
[ "def", "_resize_event", "(", "event", ",", "params", ")", ":", "size", "=", "','", ".", "join", "(", "[", "str", "(", "s", ")", "for", "s", "in", "params", "[", "'fig'", "]", ".", "get_size_inches", "(", ")", "]", ")", "set_config", "(", "'MNE_BROWSE_RAW_SIZE'", ",", "size", ",", "set_env", "=", "False", ")", "_layout_figure", "(", "params", ")" ]
handle resize event .
train
false
24,197
def _ofport_result_pending(result): try: int(result) return False except (ValueError, TypeError): return True
[ "def", "_ofport_result_pending", "(", "result", ")", ":", "try", ":", "int", "(", "result", ")", "return", "False", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "True" ]
return true if ovs-vsctl indicates the result is still pending .
train
false
24,198
def _describe_source(d): path = (d.get('path') or '') if (('num_lines' in d) and ('start_line' in d)): if (d['num_lines'] == 1): return ('line %d of %s' % ((d['start_line'] + 1), path)) else: return ('lines %d-%d of %s' % ((d['start_line'] + 1), (d['start_line'] + d['num_lines']), path)) else: return path
[ "def", "_describe_source", "(", "d", ")", ":", "path", "=", "(", "d", ".", "get", "(", "'path'", ")", "or", "''", ")", "if", "(", "(", "'num_lines'", "in", "d", ")", "and", "(", "'start_line'", "in", "d", ")", ")", ":", "if", "(", "d", "[", "'num_lines'", "]", "==", "1", ")", ":", "return", "(", "'line %d of %s'", "%", "(", "(", "d", "[", "'start_line'", "]", "+", "1", ")", ",", "path", ")", ")", "else", ":", "return", "(", "'lines %d-%d of %s'", "%", "(", "(", "d", "[", "'start_line'", "]", "+", "1", ")", ",", "(", "d", "[", "'start_line'", "]", "+", "d", "[", "'num_lines'", "]", ")", ",", "path", ")", ")", "else", ":", "return", "path" ]
return either <path> or line n of <path> or lines m-n of <path> .
train
false
24,200
def pp_to_opts(pp): pp = sabnzbd.interface.int_conv(pp) if (pp == 0): return (False, False, False) if (pp == 1): return (True, False, False) if (pp == 2): return (True, True, False) return (True, True, True)
[ "def", "pp_to_opts", "(", "pp", ")", ":", "pp", "=", "sabnzbd", ".", "interface", ".", "int_conv", "(", "pp", ")", "if", "(", "pp", "==", "0", ")", ":", "return", "(", "False", ",", "False", ",", "False", ")", "if", "(", "pp", "==", "1", ")", ":", "return", "(", "True", ",", "False", ",", "False", ")", "if", "(", "pp", "==", "2", ")", ":", "return", "(", "True", ",", "True", ",", "False", ")", "return", "(", "True", ",", "True", ",", "True", ")" ]
convert numeric processing options to .
train
false
24,201
def extract_crypto_meta(value): crypto_meta = None parts = value.split(';') if (len(parts) == 2): (value, param) = parts crypto_meta_tag = 'swift_meta=' if param.strip().startswith(crypto_meta_tag): param = param.strip()[len(crypto_meta_tag):] crypto_meta = load_crypto_meta(param) return (value, crypto_meta)
[ "def", "extract_crypto_meta", "(", "value", ")", ":", "crypto_meta", "=", "None", "parts", "=", "value", ".", "split", "(", "';'", ")", "if", "(", "len", "(", "parts", ")", "==", "2", ")", ":", "(", "value", ",", "param", ")", "=", "parts", "crypto_meta_tag", "=", "'swift_meta='", "if", "param", ".", "strip", "(", ")", ".", "startswith", "(", "crypto_meta_tag", ")", ":", "param", "=", "param", ".", "strip", "(", ")", "[", "len", "(", "crypto_meta_tag", ")", ":", "]", "crypto_meta", "=", "load_crypto_meta", "(", "param", ")", "return", "(", "value", ",", "crypto_meta", ")" ]
extract and deserialize any crypto meta from the end of a value .
train
false
24,203
def ddt(cls): for (name, func) in list(cls.__dict__.items()): if hasattr(func, DATA_ATTR): for (i, v) in enumerate(getattr(func, DATA_ATTR)): test_name = mk_test_name(name, getattr(v, '__name__', v), i) if hasattr(func, UNPACK_ATTR): if (isinstance(v, tuple) or isinstance(v, list)): add_test(cls, test_name, func, *v) else: add_test(cls, test_name, func, **v) else: add_test(cls, test_name, func, v) delattr(cls, name) elif hasattr(func, FILE_ATTR): file_attr = getattr(func, FILE_ATTR) process_file_data(cls, name, func, file_attr) delattr(cls, name) return cls
[ "def", "ddt", "(", "cls", ")", ":", "for", "(", "name", ",", "func", ")", "in", "list", "(", "cls", ".", "__dict__", ".", "items", "(", ")", ")", ":", "if", "hasattr", "(", "func", ",", "DATA_ATTR", ")", ":", "for", "(", "i", ",", "v", ")", "in", "enumerate", "(", "getattr", "(", "func", ",", "DATA_ATTR", ")", ")", ":", "test_name", "=", "mk_test_name", "(", "name", ",", "getattr", "(", "v", ",", "'__name__'", ",", "v", ")", ",", "i", ")", "if", "hasattr", "(", "func", ",", "UNPACK_ATTR", ")", ":", "if", "(", "isinstance", "(", "v", ",", "tuple", ")", "or", "isinstance", "(", "v", ",", "list", ")", ")", ":", "add_test", "(", "cls", ",", "test_name", ",", "func", ",", "*", "v", ")", "else", ":", "add_test", "(", "cls", ",", "test_name", ",", "func", ",", "**", "v", ")", "else", ":", "add_test", "(", "cls", ",", "test_name", ",", "func", ",", "v", ")", "delattr", "(", "cls", ",", "name", ")", "elif", "hasattr", "(", "func", ",", "FILE_ATTR", ")", ":", "file_attr", "=", "getattr", "(", "func", ",", "FILE_ATTR", ")", "process_file_data", "(", "cls", ",", "name", ",", "func", ",", "file_attr", ")", "delattr", "(", "cls", ",", "name", ")", "return", "cls" ]
class decorator for subclasses of unittest .
train
false
24,204
def get_date_format(format='medium', locale=LC_TIME): return Locale.parse(locale).date_formats[format]
[ "def", "get_date_format", "(", "format", "=", "'medium'", ",", "locale", "=", "LC_TIME", ")", ":", "return", "Locale", ".", "parse", "(", "locale", ")", ".", "date_formats", "[", "format", "]" ]
return the date formatting patterns used by the locale for the specified format .
train
false
24,206
def s_one_one(topics): s_one_one = [] for top_words in topics: s_one_one_t = [] for (w_prime_index, w_prime) in enumerate(top_words): for (w_star_index, w_star) in enumerate(top_words): if (w_prime_index == w_star_index): continue else: s_one_one_t.append((w_prime, w_star)) s_one_one.append(s_one_one_t) return s_one_one
[ "def", "s_one_one", "(", "topics", ")", ":", "s_one_one", "=", "[", "]", "for", "top_words", "in", "topics", ":", "s_one_one_t", "=", "[", "]", "for", "(", "w_prime_index", ",", "w_prime", ")", "in", "enumerate", "(", "top_words", ")", ":", "for", "(", "w_star_index", ",", "w_star", ")", "in", "enumerate", "(", "top_words", ")", ":", "if", "(", "w_prime_index", "==", "w_star_index", ")", ":", "continue", "else", ":", "s_one_one_t", ".", "append", "(", "(", "w_prime", ",", "w_star", ")", ")", "s_one_one", ".", "append", "(", "s_one_one_t", ")", "return", "s_one_one" ]
this function performs s_one_one segmentation on a list of topics .
train
false
24,207
def create_new_course_in_store(store, user, org, number, run, fields): fields.update({'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'), 'cert_html_view_enabled': True}) with modulestore().default_store(store): new_course = modulestore().create_course(org, number, run, user.id, fields=fields) add_instructor(new_course.id, user, user) initialize_permissions(new_course.id, user) return new_course
[ "def", "create_new_course_in_store", "(", "store", ",", "user", ",", "org", ",", "number", ",", "run", ",", "fields", ")", ":", "fields", ".", "update", "(", "{", "'language'", ":", "getattr", "(", "settings", ",", "'DEFAULT_COURSE_LANGUAGE'", ",", "'en'", ")", ",", "'cert_html_view_enabled'", ":", "True", "}", ")", "with", "modulestore", "(", ")", ".", "default_store", "(", "store", ")", ":", "new_course", "=", "modulestore", "(", ")", ".", "create_course", "(", "org", ",", "number", ",", "run", ",", "user", ".", "id", ",", "fields", "=", "fields", ")", "add_instructor", "(", "new_course", ".", "id", ",", "user", ",", "user", ")", "initialize_permissions", "(", "new_course", ".", "id", ",", "user", ")", "return", "new_course" ]
create course in store w/ handling instructor enrollment .
train
false
24,210
def pportInBusy(): if (port.DlPortReadPortUchar(statusRegAdrs) & 128): return 0 else: return 1
[ "def", "pportInBusy", "(", ")", ":", "if", "(", "port", ".", "DlPortReadPortUchar", "(", "statusRegAdrs", ")", "&", "128", ")", ":", "return", "0", "else", ":", "return", "1" ]
input from busy pin .
train
false
24,211
def validate_and_update_entities(db_access, zookeeper, log_postfix, total_entities): last_key = '' entities_checked = 0 last_logged = time.time() while True: logging.debug('Fetching {} entities'.format(BATCH_SIZE)) entities = get_entity_batch(last_key, db_access, BATCH_SIZE) if (not entities): break for entity in entities: process_entity(entity, db_access, zookeeper) last_key = entities[(-1)].keys()[0] entities_checked += len(entities) if (time.time() > (last_logged + LOG_PROGRESS_FREQUENCY)): progress = str(entities_checked) if (total_entities is not None): progress += '/{}'.format(total_entities) message = 'Processed {} entities'.format(progress) logging.info(message) write_to_json_file({'status': 'inProgress', 'message': message}, log_postfix) last_logged = time.time()
[ "def", "validate_and_update_entities", "(", "db_access", ",", "zookeeper", ",", "log_postfix", ",", "total_entities", ")", ":", "last_key", "=", "''", "entities_checked", "=", "0", "last_logged", "=", "time", ".", "time", "(", ")", "while", "True", ":", "logging", ".", "debug", "(", "'Fetching {} entities'", ".", "format", "(", "BATCH_SIZE", ")", ")", "entities", "=", "get_entity_batch", "(", "last_key", ",", "db_access", ",", "BATCH_SIZE", ")", "if", "(", "not", "entities", ")", ":", "break", "for", "entity", "in", "entities", ":", "process_entity", "(", "entity", ",", "db_access", ",", "zookeeper", ")", "last_key", "=", "entities", "[", "(", "-", "1", ")", "]", ".", "keys", "(", ")", "[", "0", "]", "entities_checked", "+=", "len", "(", "entities", ")", "if", "(", "time", ".", "time", "(", ")", ">", "(", "last_logged", "+", "LOG_PROGRESS_FREQUENCY", ")", ")", ":", "progress", "=", "str", "(", "entities_checked", ")", "if", "(", "total_entities", "is", "not", "None", ")", ":", "progress", "+=", "'/{}'", ".", "format", "(", "total_entities", ")", "message", "=", "'Processed {} entities'", ".", "format", "(", "progress", ")", "logging", ".", "info", "(", "message", ")", "write_to_json_file", "(", "{", "'status'", ":", "'inProgress'", ",", "'message'", ":", "message", "}", ",", "log_postfix", ")", "last_logged", "=", "time", ".", "time", "(", ")" ]
validates entities in batches of batch_size .
train
false
24,212
def libvlc_vlm_set_output(p_instance, psz_name, psz_output): f = (_Cfunctions.get('libvlc_vlm_set_output', None) or _Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,)), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)) return f(p_instance, psz_name, psz_output)
[ "def", "libvlc_vlm_set_output", "(", "p_instance", ",", "psz_name", ",", "psz_output", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_vlm_set_output'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_vlm_set_output'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "Instance", ",", "ctypes", ".", "c_char_p", ",", "ctypes", ".", "c_char_p", ")", ")", "return", "f", "(", "p_instance", ",", "psz_name", ",", "psz_output", ")" ]
set the output for a media .
train
true
24,214
def reorient(image, orientation): o = TIFF_ORIENTATIONS.get(orientation, orientation) if (o == 'top_left'): return image elif (o == 'top_right'): return image[..., ::(-1), :] elif (o == 'bottom_left'): return image[..., ::(-1), :, :] elif (o == 'bottom_right'): return image[..., ::(-1), ::(-1), :] elif (o == 'left_top'): return numpy.swapaxes(image, (-3), (-2)) elif (o == 'right_top'): return numpy.swapaxes(image, (-3), (-2))[..., ::(-1), :] elif (o == 'left_bottom'): return numpy.swapaxes(image, (-3), (-2))[..., ::(-1), :, :] elif (o == 'right_bottom'): return numpy.swapaxes(image, (-3), (-2))[..., ::(-1), ::(-1), :]
[ "def", "reorient", "(", "image", ",", "orientation", ")", ":", "o", "=", "TIFF_ORIENTATIONS", ".", "get", "(", "orientation", ",", "orientation", ")", "if", "(", "o", "==", "'top_left'", ")", ":", "return", "image", "elif", "(", "o", "==", "'top_right'", ")", ":", "return", "image", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", "]", "elif", "(", "o", "==", "'bottom_left'", ")", ":", "return", "image", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", ",", ":", "]", "elif", "(", "o", "==", "'bottom_right'", ")", ":", "return", "image", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", ":", "(", "-", "1", ")", ",", ":", "]", "elif", "(", "o", "==", "'left_top'", ")", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "(", "-", "3", ")", ",", "(", "-", "2", ")", ")", "elif", "(", "o", "==", "'right_top'", ")", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "(", "-", "3", ")", ",", "(", "-", "2", ")", ")", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", "]", "elif", "(", "o", "==", "'left_bottom'", ")", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "(", "-", "3", ")", ",", "(", "-", "2", ")", ")", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", ",", ":", "]", "elif", "(", "o", "==", "'right_bottom'", ")", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "(", "-", "3", ")", ",", "(", "-", "2", ")", ")", "[", "...", ",", ":", ":", "(", "-", "1", ")", ",", ":", ":", "(", "-", "1", ")", ",", ":", "]" ]
return reoriented view of image array .
train
true
24,215
def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True): z = None for r in xfr: if (z is None): if relativize: origin = r.origin else: origin = r.answer[0].name rdclass = r.answer[0].rdclass z = zone_factory(origin, rdclass, relativize=relativize) for rrset in r.answer: znode = z.nodes.get(rrset.name) if (not znode): znode = z.node_factory() z.nodes[rrset.name] = znode zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True) zrds.update_ttl(rrset.ttl) for rd in rrset: rd.choose_relativity(z.origin, relativize) zrds.add(rd) if check_origin: z.check_origin() return z
[ "def", "from_xfr", "(", "xfr", ",", "zone_factory", "=", "Zone", ",", "relativize", "=", "True", ",", "check_origin", "=", "True", ")", ":", "z", "=", "None", "for", "r", "in", "xfr", ":", "if", "(", "z", "is", "None", ")", ":", "if", "relativize", ":", "origin", "=", "r", ".", "origin", "else", ":", "origin", "=", "r", ".", "answer", "[", "0", "]", ".", "name", "rdclass", "=", "r", ".", "answer", "[", "0", "]", ".", "rdclass", "z", "=", "zone_factory", "(", "origin", ",", "rdclass", ",", "relativize", "=", "relativize", ")", "for", "rrset", "in", "r", ".", "answer", ":", "znode", "=", "z", ".", "nodes", ".", "get", "(", "rrset", ".", "name", ")", "if", "(", "not", "znode", ")", ":", "znode", "=", "z", ".", "node_factory", "(", ")", "z", ".", "nodes", "[", "rrset", ".", "name", "]", "=", "znode", "zrds", "=", "znode", ".", "find_rdataset", "(", "rrset", ".", "rdclass", ",", "rrset", ".", "rdtype", ",", "rrset", ".", "covers", ",", "True", ")", "zrds", ".", "update_ttl", "(", "rrset", ".", "ttl", ")", "for", "rd", "in", "rrset", ":", "rd", ".", "choose_relativity", "(", "z", ".", "origin", ",", "relativize", ")", "zrds", ".", "add", "(", "rd", ")", "if", "check_origin", ":", "z", ".", "check_origin", "(", ")", "return", "z" ]
convert the output of a zone transfer generator into a zone object .
train
true
24,216
@nottest def slow_test(f): f.slow_test = True return f
[ "@", "nottest", "def", "slow_test", "(", "f", ")", ":", "f", ".", "slow_test", "=", "True", "return", "f" ]
decorator for slow tests .
train
false
24,218
def auth_email_logout(token, user): redirect_url = cas.get_logout_url(service_url=cas.get_login_url(service_url=web_url_for('index', _absolute=True))) try: unconfirmed_email = user.get_unconfirmed_email_for_token(token) except InvalidTokenError: raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Bad token', 'message_long': 'The provided token is invalid.'}) except ExpiredTokenError: status.push_status_message('The private link you used is expired.') raise HTTPError(http.BAD_REQUEST, data={'message_short': 'Expired link', 'message_long': 'The private link you used is expired.'}) try: user_merge = User.find_one(Q('emails', 'eq', unconfirmed_email)) except NoResultsFound: user_merge = False if user_merge: remove_sessions_for_user(user_merge) user.email_verifications[token]['confirmed'] = True user.save() remove_sessions_for_user(user) resp = redirect(redirect_url) resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN) return resp
[ "def", "auth_email_logout", "(", "token", ",", "user", ")", ":", "redirect_url", "=", "cas", ".", "get_logout_url", "(", "service_url", "=", "cas", ".", "get_login_url", "(", "service_url", "=", "web_url_for", "(", "'index'", ",", "_absolute", "=", "True", ")", ")", ")", "try", ":", "unconfirmed_email", "=", "user", ".", "get_unconfirmed_email_for_token", "(", "token", ")", "except", "InvalidTokenError", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "{", "'message_short'", ":", "'Bad token'", ",", "'message_long'", ":", "'The provided token is invalid.'", "}", ")", "except", "ExpiredTokenError", ":", "status", ".", "push_status_message", "(", "'The private link you used is expired.'", ")", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "{", "'message_short'", ":", "'Expired link'", ",", "'message_long'", ":", "'The private link you used is expired.'", "}", ")", "try", ":", "user_merge", "=", "User", ".", "find_one", "(", "Q", "(", "'emails'", ",", "'eq'", ",", "unconfirmed_email", ")", ")", "except", "NoResultsFound", ":", "user_merge", "=", "False", "if", "user_merge", ":", "remove_sessions_for_user", "(", "user_merge", ")", "user", ".", "email_verifications", "[", "token", "]", "[", "'confirmed'", "]", "=", "True", "user", ".", "save", "(", ")", "remove_sessions_for_user", "(", "user", ")", "resp", "=", "redirect", "(", "redirect_url", ")", "resp", ".", "delete_cookie", "(", "settings", ".", "COOKIE_NAME", ",", "domain", "=", "settings", ".", "OSF_COOKIE_DOMAIN", ")", "return", "resp" ]
when a user is adding an email or merging an account .
train
false
24,219
def get_columns(pkt): columns = [] num_columns = endian_int(pkt[5:7]) pkt = pkt[7:] ctmp = '' for column in xrange(num_columns): cnt = 0 while True: tmp = pkt[cnt] if (tmp == '00'): columns.append(ctmp) ctmp = '' pkt = pkt[(cnt + 19):] break ctmp += tmp.decode('hex') cnt += 1 return (columns, pkt)
[ "def", "get_columns", "(", "pkt", ")", ":", "columns", "=", "[", "]", "num_columns", "=", "endian_int", "(", "pkt", "[", "5", ":", "7", "]", ")", "pkt", "=", "pkt", "[", "7", ":", "]", "ctmp", "=", "''", "for", "column", "in", "xrange", "(", "num_columns", ")", ":", "cnt", "=", "0", "while", "True", ":", "tmp", "=", "pkt", "[", "cnt", "]", "if", "(", "tmp", "==", "'00'", ")", ":", "columns", ".", "append", "(", "ctmp", ")", "ctmp", "=", "''", "pkt", "=", "pkt", "[", "(", "cnt", "+", "19", ")", ":", "]", "break", "ctmp", "+=", "tmp", ".", "decode", "(", "'hex'", ")", "cnt", "+=", "1", "return", "(", "columns", ",", "pkt", ")" ]
returns list of columns for given table .
train
false
24,220
def lease_response(lease, now): if (lease.expiration is None): expires = None else: expires = (lease.expiration - now).total_seconds() return {u'dataset_id': unicode(lease.dataset_id), u'node_uuid': unicode(lease.node_id), u'expires': expires}
[ "def", "lease_response", "(", "lease", ",", "now", ")", ":", "if", "(", "lease", ".", "expiration", "is", "None", ")", ":", "expires", "=", "None", "else", ":", "expires", "=", "(", "lease", ".", "expiration", "-", "now", ")", ".", "total_seconds", "(", ")", "return", "{", "u'dataset_id'", ":", "unicode", "(", "lease", ".", "dataset_id", ")", ",", "u'node_uuid'", ":", "unicode", "(", "lease", ".", "node_id", ")", ",", "u'expires'", ":", "expires", "}" ]
convert a lease to the corresponding objects for json serialization .
train
false
24,222
@cache_permission def can_manage_acl(user, project): return check_permission(user, project, 'trans.manage_acl')
[ "@", "cache_permission", "def", "can_manage_acl", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.manage_acl'", ")" ]
checks whether user can manage acl on given project .
train
false
24,226
def unit_impulse(shape, idx=None, dtype=float): out = zeros(shape, dtype) shape = np.atleast_1d(shape) if (idx is None): idx = ((0,) * len(shape)) elif (idx == 'mid'): idx = tuple((shape // 2)) elif (not hasattr(idx, '__iter__')): idx = ((idx,) * len(shape)) out[idx] = 1 return out
[ "def", "unit_impulse", "(", "shape", ",", "idx", "=", "None", ",", "dtype", "=", "float", ")", ":", "out", "=", "zeros", "(", "shape", ",", "dtype", ")", "shape", "=", "np", ".", "atleast_1d", "(", "shape", ")", "if", "(", "idx", "is", "None", ")", ":", "idx", "=", "(", "(", "0", ",", ")", "*", "len", "(", "shape", ")", ")", "elif", "(", "idx", "==", "'mid'", ")", ":", "idx", "=", "tuple", "(", "(", "shape", "//", "2", ")", ")", "elif", "(", "not", "hasattr", "(", "idx", ",", "'__iter__'", ")", ")", ":", "idx", "=", "(", "(", "idx", ",", ")", "*", "len", "(", "shape", ")", ")", "out", "[", "idx", "]", "=", "1", "return", "out" ]
unit impulse signal or unit basis vector .
train
false
24,228
def _removeSafely(path): if (not path.child('_trial_marker').exists()): raise _NoTrialMarker(('%r is not a trial temporary path, refusing to remove it' % (path,))) try: path.remove() except OSError as e: print ('could not remove %r, caught OSError [Errno %s]: %s' % (path, e.errno, e.strerror)) try: newPath = FilePath(('_trial_temp_old%s' % (randrange(1000000),))) path.moveTo(newPath) except OSError as e: print ('could not rename path, caught OSError [Errno %s]: %s' % (e.errno, e.strerror)) raise
[ "def", "_removeSafely", "(", "path", ")", ":", "if", "(", "not", "path", ".", "child", "(", "'_trial_marker'", ")", ".", "exists", "(", ")", ")", ":", "raise", "_NoTrialMarker", "(", "(", "'%r is not a trial temporary path, refusing to remove it'", "%", "(", "path", ",", ")", ")", ")", "try", ":", "path", ".", "remove", "(", ")", "except", "OSError", "as", "e", ":", "print", "(", "'could not remove %r, caught OSError [Errno %s]: %s'", "%", "(", "path", ",", "e", ".", "errno", ",", "e", ".", "strerror", ")", ")", "try", ":", "newPath", "=", "FilePath", "(", "(", "'_trial_temp_old%s'", "%", "(", "randrange", "(", "1000000", ")", ",", ")", ")", ")", "path", ".", "moveTo", "(", "newPath", ")", "except", "OSError", "as", "e", ":", "print", "(", "'could not rename path, caught OSError [Errno %s]: %s'", "%", "(", "e", ".", "errno", ",", "e", ".", "strerror", ")", ")", "raise" ]
safely remove a path .
train
false
24,229
@cronjobs.register def cleanup_validation_results(): all = FileValidation.objects.all() log.info(('Removing %s old validation results.' % all.count())) all.delete()
[ "@", "cronjobs", ".", "register", "def", "cleanup_validation_results", "(", ")", ":", "all", "=", "FileValidation", ".", "objects", ".", "all", "(", ")", "log", ".", "info", "(", "(", "'Removing %s old validation results.'", "%", "all", ".", "count", "(", ")", ")", ")", "all", ".", "delete", "(", ")" ]
will remove all validation results .
train
false
24,230
def _NormalizedSource(source): normalized = os.path.normpath(source) if (source.count('$') == normalized.count('$')): source = normalized return source
[ "def", "_NormalizedSource", "(", "source", ")", ":", "normalized", "=", "os", ".", "path", ".", "normpath", "(", "source", ")", "if", "(", "source", ".", "count", "(", "'$'", ")", "==", "normalized", ".", "count", "(", "'$'", ")", ")", ":", "source", "=", "normalized", "return", "source" ]
normalize the path .
train
false
24,231
def getLeftPoint(points): leftmost = 999999999.0 leftPointComplex = None for pointComplex in points: if (pointComplex.real < leftmost): leftmost = pointComplex.real leftPointComplex = pointComplex return leftPointComplex
[ "def", "getLeftPoint", "(", "points", ")", ":", "leftmost", "=", "999999999.0", "leftPointComplex", "=", "None", "for", "pointComplex", "in", "points", ":", "if", "(", "pointComplex", ".", "real", "<", "leftmost", ")", ":", "leftmost", "=", "pointComplex", ".", "real", "leftPointComplex", "=", "pointComplex", "return", "leftPointComplex" ]
get the leftmost complex point in the points .
train
false
24,232
def target_update(target, deps, cmd): if target_outdated(target, deps): system(cmd)
[ "def", "target_update", "(", "target", ",", "deps", ",", "cmd", ")", ":", "if", "target_outdated", "(", "target", ",", "deps", ")", ":", "system", "(", "cmd", ")" ]
update a target with a given command given a list of dependencies .
train
false
24,233
def dictCombinations(listdict): listdict = listdict.copy() if (len(listdict) == 0): return [{}] (k, vs) = listdict.popitem() res = dictCombinations(listdict) if (isinstance(vs, list) or isinstance(vs, tuple)): res = [dict(d, **{k: v}) for d in res for v in sorted(set(vs))] else: res = [dict(d, **{k: vs}) for d in res] return res
[ "def", "dictCombinations", "(", "listdict", ")", ":", "listdict", "=", "listdict", ".", "copy", "(", ")", "if", "(", "len", "(", "listdict", ")", "==", "0", ")", ":", "return", "[", "{", "}", "]", "(", "k", ",", "vs", ")", "=", "listdict", ".", "popitem", "(", ")", "res", "=", "dictCombinations", "(", "listdict", ")", "if", "(", "isinstance", "(", "vs", ",", "list", ")", "or", "isinstance", "(", "vs", ",", "tuple", ")", ")", ":", "res", "=", "[", "dict", "(", "d", ",", "**", "{", "k", ":", "v", "}", ")", "for", "d", "in", "res", "for", "v", "in", "sorted", "(", "set", "(", "vs", ")", ")", "]", "else", ":", "res", "=", "[", "dict", "(", "d", ",", "**", "{", "k", ":", "vs", "}", ")", "for", "d", "in", "res", "]", "return", "res" ]
iterates over dictionaries that go through every possible combination of key-value pairs as specified in the lists of values for each key in listdict .
train
false
24,234
def get_group_by_args(): group_by = request.args.get('group_by') if (not group_by): group_by = '' return group_by
[ "def", "get_group_by_args", "(", ")", ":", "group_by", "=", "request", ".", "args", ".", "get", "(", "'group_by'", ")", "if", "(", "not", "group_by", ")", ":", "group_by", "=", "''", "return", "group_by" ]
get page arguments for group by .
train
false
24,235
def p_field_seq(p): _parse_seq(p)
[ "def", "p_field_seq", "(", "p", ")", ":", "_parse_seq", "(", "p", ")" ]
field_seq : field sep field_seq | field field_seq .
train
false
24,236
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): return referred_cls.__name__.lower()
[ "def", "name_for_scalar_relationship", "(", "base", ",", "local_cls", ",", "referred_cls", ",", "constraint", ")", ":", "return", "referred_cls", ".", "__name__", ".", "lower", "(", ")" ]
return the attribute name that should be used to refer from one class to another .
train
true
24,237
def get_flow(db_api, image_service_api, availability_zones, create_what, scheduler_rpcapi=None, volume_rpcapi=None): flow_name = (ACTION.replace(':', '_') + '_api') api_flow = linear_flow.Flow(flow_name) api_flow.add(ExtractVolumeRequestTask(image_service_api, availability_zones, rebind={'size': 'raw_size', 'availability_zone': 'raw_availability_zone', 'volume_type': 'raw_volume_type'})) api_flow.add(QuotaReserveTask(), EntryCreateTask(), QuotaCommitTask()) if (scheduler_rpcapi and volume_rpcapi): api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api)) return taskflow.engines.load(api_flow, store=create_what)
[ "def", "get_flow", "(", "db_api", ",", "image_service_api", ",", "availability_zones", ",", "create_what", ",", "scheduler_rpcapi", "=", "None", ",", "volume_rpcapi", "=", "None", ")", ":", "flow_name", "=", "(", "ACTION", ".", "replace", "(", "':'", ",", "'_'", ")", "+", "'_api'", ")", "api_flow", "=", "linear_flow", ".", "Flow", "(", "flow_name", ")", "api_flow", ".", "add", "(", "ExtractVolumeRequestTask", "(", "image_service_api", ",", "availability_zones", ",", "rebind", "=", "{", "'size'", ":", "'raw_size'", ",", "'availability_zone'", ":", "'raw_availability_zone'", ",", "'volume_type'", ":", "'raw_volume_type'", "}", ")", ")", "api_flow", ".", "add", "(", "QuotaReserveTask", "(", ")", ",", "EntryCreateTask", "(", ")", ",", "QuotaCommitTask", "(", ")", ")", "if", "(", "scheduler_rpcapi", "and", "volume_rpcapi", ")", ":", "api_flow", ".", "add", "(", "VolumeCastTask", "(", "scheduler_rpcapi", ",", "volume_rpcapi", ",", "db_api", ")", ")", "return", "taskflow", ".", "engines", ".", "load", "(", "api_flow", ",", "store", "=", "create_what", ")" ]
constructs and returns the api entrypoint flow .
train
false
24,238
def firstmethod(method, on_call=None): def _matcher(it, *args, **kwargs): for obj in it: try: meth = getattr(maybe_evaluate(obj), method) reply = (on_call(meth, *args, **kwargs) if on_call else meth(*args, **kwargs)) except AttributeError: pass else: if (reply is not None): return reply return _matcher
[ "def", "firstmethod", "(", "method", ",", "on_call", "=", "None", ")", ":", "def", "_matcher", "(", "it", ",", "*", "args", ",", "**", "kwargs", ")", ":", "for", "obj", "in", "it", ":", "try", ":", "meth", "=", "getattr", "(", "maybe_evaluate", "(", "obj", ")", ",", "method", ")", "reply", "=", "(", "on_call", "(", "meth", ",", "*", "args", ",", "**", "kwargs", ")", "if", "on_call", "else", "meth", "(", "*", "args", ",", "**", "kwargs", ")", ")", "except", "AttributeError", ":", "pass", "else", ":", "if", "(", "reply", "is", "not", "None", ")", ":", "return", "reply", "return", "_matcher" ]
returns a functions that with a list of instances .
train
false
24,239
def get_permission_object_from_string(permission_string): (app_label, codename) = permission_string.split('.') return Permission.objects.get(content_type__app_label=app_label, codename=codename)
[ "def", "get_permission_object_from_string", "(", "permission_string", ")", ":", "(", "app_label", ",", "codename", ")", "=", "permission_string", ".", "split", "(", "'.'", ")", "return", "Permission", ".", "objects", ".", "get", "(", "content_type__app_label", "=", "app_label", ",", "codename", "=", "codename", ")" ]
given a permission string of the form app_label .
train
false
24,240
def annotate_content_models_by_youtube_id(channel='khan', language='en', youtube_ids=None): annotate_content_models(channel=channel, language=language, ids=youtube_ids, iterator_content_items=iterator_content_items_by_youtube_id)
[ "def", "annotate_content_models_by_youtube_id", "(", "channel", "=", "'khan'", ",", "language", "=", "'en'", ",", "youtube_ids", "=", "None", ")", ":", "annotate_content_models", "(", "channel", "=", "channel", ",", "language", "=", "language", ",", "ids", "=", "youtube_ids", ",", "iterator_content_items", "=", "iterator_content_items_by_youtube_id", ")" ]
annotate content models that have the youtube ids specified in a list .
train
false
24,242
def filter_raising_callables(it, exception, *args, **kwargs): for elem in it: try: (yield elem(*args, **kwargs)) except exception: pass
[ "def", "filter_raising_callables", "(", "it", ",", "exception", ",", "*", "args", ",", "**", "kwargs", ")", ":", "for", "elem", "in", "it", ":", "try", ":", "(", "yield", "elem", "(", "*", "args", ",", "**", "kwargs", ")", ")", "except", "exception", ":", "pass" ]
filters all callable items inside the given iterator that raise the given exceptions .
train
false
24,243
def backwords(environ, realm, username): password = list(username) password.reverse() password = ''.join(password) return digest_password(realm, username, password)
[ "def", "backwords", "(", "environ", ",", "realm", ",", "username", ")", ":", "password", "=", "list", "(", "username", ")", "password", ".", "reverse", "(", ")", "password", "=", "''", ".", "join", "(", "password", ")", "return", "digest_password", "(", "realm", ",", "username", ",", "password", ")" ]
dummy password hash .
train
false
24,244
def course_starts_within(start_date, look_ahead_days): return ((datetime.now(utc) + timedelta(days=look_ahead_days)) > start_date)
[ "def", "course_starts_within", "(", "start_date", ",", "look_ahead_days", ")", ":", "return", "(", "(", "datetime", ".", "now", "(", "utc", ")", "+", "timedelta", "(", "days", "=", "look_ahead_days", ")", ")", ">", "start_date", ")" ]
given a courses start datetime and look ahead days .
train
false
24,245
def getIsInsetPointInsideLoops(inside, loops, pointBegin, pointCenter, pointEnd, radius): centerMinusBegin = euclidean.getNormalized((pointCenter - pointBegin)) centerMinusBeginWiddershins = complex((- centerMinusBegin.imag), centerMinusBegin.real) endMinusCenter = euclidean.getNormalized((pointEnd - pointCenter)) endMinusCenterWiddershins = complex((- endMinusCenter.imag), endMinusCenter.real) widdershinsNormalized = (euclidean.getNormalized((centerMinusBeginWiddershins + endMinusCenterWiddershins)) * radius) return (euclidean.getIsInFilledRegion(loops, (pointCenter + widdershinsNormalized)) == inside)
[ "def", "getIsInsetPointInsideLoops", "(", "inside", ",", "loops", ",", "pointBegin", ",", "pointCenter", ",", "pointEnd", ",", "radius", ")", ":", "centerMinusBegin", "=", "euclidean", ".", "getNormalized", "(", "(", "pointCenter", "-", "pointBegin", ")", ")", "centerMinusBeginWiddershins", "=", "complex", "(", "(", "-", "centerMinusBegin", ".", "imag", ")", ",", "centerMinusBegin", ".", "real", ")", "endMinusCenter", "=", "euclidean", ".", "getNormalized", "(", "(", "pointEnd", "-", "pointCenter", ")", ")", "endMinusCenterWiddershins", "=", "complex", "(", "(", "-", "endMinusCenter", ".", "imag", ")", ",", "endMinusCenter", ".", "real", ")", "widdershinsNormalized", "=", "(", "euclidean", ".", "getNormalized", "(", "(", "centerMinusBeginWiddershins", "+", "endMinusCenterWiddershins", ")", ")", "*", "radius", ")", "return", "(", "euclidean", ".", "getIsInFilledRegion", "(", "loops", ",", "(", "pointCenter", "+", "widdershinsNormalized", ")", ")", "==", "inside", ")" ]
determine if the inset point is inside the loops .
train
false
24,246
def infer_ass(self, context=None): stmt = self.statement() if isinstance(stmt, nodes.AugAssign): return stmt.infer(context) stmts = list(self.assigned_stmts(context=context)) return _infer_stmts(stmts, context)
[ "def", "infer_ass", "(", "self", ",", "context", "=", "None", ")", ":", "stmt", "=", "self", ".", "statement", "(", ")", "if", "isinstance", "(", "stmt", ",", "nodes", ".", "AugAssign", ")", ":", "return", "stmt", ".", "infer", "(", "context", ")", "stmts", "=", "list", "(", "self", ".", "assigned_stmts", "(", "context", "=", "context", ")", ")", "return", "_infer_stmts", "(", "stmts", ",", "context", ")" ]
infer a assname/assattr: need to inspect the rhs part of the assign node .
train
true
24,247
def mixin_user_query(cls): name = ('Appender' + cls.__name__) return type(name, (AppenderMixin, cls), {'query_class': cls})
[ "def", "mixin_user_query", "(", "cls", ")", ":", "name", "=", "(", "'Appender'", "+", "cls", ".", "__name__", ")", "return", "type", "(", "name", ",", "(", "AppenderMixin", ",", "cls", ")", ",", "{", "'query_class'", ":", "cls", "}", ")" ]
return a new class with appenderquery functionality layered over .
train
false
24,249
def clear_finalizers(clear_all=False): ThreadSafeFinalizer.clear_finalizers(clear_all) Finalizer.clear_finalizers(clear_all)
[ "def", "clear_finalizers", "(", "clear_all", "=", "False", ")", ":", "ThreadSafeFinalizer", ".", "clear_finalizers", "(", "clear_all", ")", "Finalizer", ".", "clear_finalizers", "(", "clear_all", ")" ]
removes all registered finalizers in :class:threadsafefinalizer and :class:finalizer .
train
false
24,250
def get_current_version_name(): return os.environ['CURRENT_VERSION_ID'].split('.')[0]
[ "def", "get_current_version_name", "(", ")", ":", "return", "os", ".", "environ", "[", "'CURRENT_VERSION_ID'", "]", ".", "split", "(", "'.'", ")", "[", "0", "]" ]
returns the version of the current instance .
train
false
24,251
def is_broken_link(path): path = os.readlink(path) return (not os.path.exists(path))
[ "def", "is_broken_link", "(", "path", ")", ":", "path", "=", "os", ".", "readlink", "(", "path", ")", "return", "(", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ")" ]
returns true if the path given as is a broken symlink .
train
false
24,252
def do_boolean_sum(field): cond = {field: True} return Sum(Case(When(then=1, **cond), default=0, output_field=IntegerField()))
[ "def", "do_boolean_sum", "(", "field", ")", ":", "cond", "=", "{", "field", ":", "True", "}", "return", "Sum", "(", "Case", "(", "When", "(", "then", "=", "1", ",", "**", "cond", ")", ",", "default", "=", "0", ",", "output_field", "=", "IntegerField", "(", ")", ")", ")" ]
wrapper to generate sum on boolean values .
train
false
24,253
def finalize_path_expansion(paths_expansion_list): path_seen = set() merged_paths = [] for paths_expansion in paths_expansion_list: for p in paths_expansion.paths.dependencies: if (p not in path_seen): merged_paths.append(p) path_seen.add(p) return Paths(tuple(merged_paths))
[ "def", "finalize_path_expansion", "(", "paths_expansion_list", ")", ":", "path_seen", "=", "set", "(", ")", "merged_paths", "=", "[", "]", "for", "paths_expansion", "in", "paths_expansion_list", ":", "for", "p", "in", "paths_expansion", ".", "paths", ".", "dependencies", ":", "if", "(", "p", "not", "in", "path_seen", ")", ":", "merged_paths", ".", "append", "(", "p", ")", "path_seen", ".", "add", "(", "p", ")", "return", "Paths", "(", "tuple", "(", "merged_paths", ")", ")" ]
finalize and merge pathexpansion lists into paths .
train
false
24,255
def DumpNodeToString(node): if isinstance(node, pytree.Leaf): fmt = '{name}({value}) [lineno={lineno}, column={column}, prefix={prefix}]' return fmt.format(name=NodeName(node), value=_PytreeNodeRepr(node), lineno=node.lineno, column=node.column, prefix=repr(node.prefix)) else: fmt = '{node} [{len} children] [child_indent="{indent}"]' return fmt.format(node=NodeName(node), len=len(node.children), indent=GetNodeAnnotation(node, Annotation.CHILD_INDENT))
[ "def", "DumpNodeToString", "(", "node", ")", ":", "if", "isinstance", "(", "node", ",", "pytree", ".", "Leaf", ")", ":", "fmt", "=", "'{name}({value}) [lineno={lineno}, column={column}, prefix={prefix}]'", "return", "fmt", ".", "format", "(", "name", "=", "NodeName", "(", "node", ")", ",", "value", "=", "_PytreeNodeRepr", "(", "node", ")", ",", "lineno", "=", "node", ".", "lineno", ",", "column", "=", "node", ".", "column", ",", "prefix", "=", "repr", "(", "node", ".", "prefix", ")", ")", "else", ":", "fmt", "=", "'{node} [{len} children] [child_indent=\"{indent}\"]'", "return", "fmt", ".", "format", "(", "node", "=", "NodeName", "(", "node", ")", ",", "len", "=", "len", "(", "node", ".", "children", ")", ",", "indent", "=", "GetNodeAnnotation", "(", "node", ",", "Annotation", ".", "CHILD_INDENT", ")", ")" ]
dump a string representation of the given node .
train
false
24,256
def string_to_bool(string): return STRING_BOOLS[string.strip().lower()]
[ "def", "string_to_bool", "(", "string", ")", ":", "return", "STRING_BOOLS", "[", "string", ".", "strip", "(", ")", ".", "lower", "(", ")", "]" ]
convert a string to a bool based .
train
false
24,257
def func_is_3d(in_file): if isinstance(in_file, list): return func_is_3d(in_file[0]) else: img = load(in_file) shape = img.shape if ((len(shape) == 3) or ((len(shape) == 4) and (shape[3] == 1))): return True else: return False
[ "def", "func_is_3d", "(", "in_file", ")", ":", "if", "isinstance", "(", "in_file", ",", "list", ")", ":", "return", "func_is_3d", "(", "in_file", "[", "0", "]", ")", "else", ":", "img", "=", "load", "(", "in_file", ")", "shape", "=", "img", ".", "shape", "if", "(", "(", "len", "(", "shape", ")", "==", "3", ")", "or", "(", "(", "len", "(", "shape", ")", "==", "4", ")", "and", "(", "shape", "[", "3", "]", "==", "1", ")", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
checks if input functional files are 3d .
train
false
24,258
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None): if (format_string is None): format_string = '%(asctime)s %(name)s [%(levelname)s] %(message)s' logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter(format_string) handler.setFormatter(formatter) logger.addHandler(handler)
[ "def", "set_stream_logger", "(", "name", "=", "'boto3'", ",", "level", "=", "logging", ".", "DEBUG", ",", "format_string", "=", "None", ")", ":", "if", "(", "format_string", "is", "None", ")", ":", "format_string", "=", "'%(asctime)s %(name)s [%(levelname)s] %(message)s'", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "level", ")", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setLevel", "(", "level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "format_string", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "handler", ")" ]
add a stream handler for the given name and level to the logging module .
train
true
24,260
def humanBitRate(size): return ''.join((humanBitSize(size), '/sec'))
[ "def", "humanBitRate", "(", "size", ")", ":", "return", "''", ".", "join", "(", "(", "humanBitSize", "(", "size", ")", ",", "'/sec'", ")", ")" ]
convert a bit rate to human classic representation .
train
false
24,261
def _find_set_members(set): cmd = '{0} list {1}'.format(_ipset_cmd(), set) out = __salt__['cmd.run_all'](cmd, python_shell=False) if (out['retcode'] > 0): return False _tmp = out['stdout'].split('\n') members = [] startMembers = False for i in _tmp: if startMembers: members.append(i) if ('Members:' in i): startMembers = True return members
[ "def", "_find_set_members", "(", "set", ")", ":", "cmd", "=", "'{0} list {1}'", ".", "format", "(", "_ipset_cmd", "(", ")", ",", "set", ")", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "(", "out", "[", "'retcode'", "]", ">", "0", ")", ":", "return", "False", "_tmp", "=", "out", "[", "'stdout'", "]", ".", "split", "(", "'\\n'", ")", "members", "=", "[", "]", "startMembers", "=", "False", "for", "i", "in", "_tmp", ":", "if", "startMembers", ":", "members", ".", "append", "(", "i", ")", "if", "(", "'Members:'", "in", "i", ")", ":", "startMembers", "=", "True", "return", "members" ]
return list of members for a set .
train
true
24,262
def deactivate_mfa_device(user_name, serial, region=None, key=None, keyid=None, profile=None): user = get_user(user_name, region, key, keyid, profile) if (not user): msg = 'Username {0} does not exist' log.error(msg.format(user_name)) return False conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.deactivate_mfa_device(user_name, serial) log.info('Deactivated MFA device {1} for user {0}.'.format(user_name, serial)) return True except boto.exception.BotoServerError as e: log.debug(e) if ('Not Found' in e): log.info('MFA device {1} not associated with user {0}.'.format(user_name, serial)) return True msg = 'Failed to deactivate MFA device {1} for user {0}.' log.error(msg.format(user_name, serial)) return False
[ "def", "deactivate_mfa_device", "(", "user_name", ",", "serial", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "user", "=", "get_user", "(", "user_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "(", "not", "user", ")", ":", "msg", "=", "'Username {0} does not exist'", "log", ".", "error", "(", "msg", ".", "format", "(", "user_name", ")", ")", "return", "False", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "conn", ".", "deactivate_mfa_device", "(", "user_name", ",", "serial", ")", "log", ".", "info", "(", "'Deactivated MFA device {1} for user {0}.'", ".", "format", "(", "user_name", ",", "serial", ")", ")", "return", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "if", "(", "'Not Found'", "in", "e", ")", ":", "log", ".", "info", "(", "'MFA device {1} not associated with user {0}.'", ".", "format", "(", "user_name", ",", "serial", ")", ")", "return", "True", "msg", "=", "'Failed to deactivate MFA device {1} for user {0}.'", "log", ".", "error", "(", "msg", ".", "format", "(", "user_name", ",", "serial", ")", ")", "return", "False" ]
deactivates the specified mfa device and removes it from association with the user .
train
true
24,263
def label_folder_absent(name, node=None, apiserver=None): ret = __salt__['k8s.folder_absent'](name, node, apiserver) return ret
[ "def", "label_folder_absent", "(", "name", ",", "node", "=", "None", ",", "apiserver", "=", "None", ")", ":", "ret", "=", "__salt__", "[", "'k8s.folder_absent'", "]", "(", "name", ",", "node", ",", "apiserver", ")", "return", "ret" ]
ensure the label folder doesnt exist on the kube node .
train
false
24,265
def convert_nb(src, dst, to='html', template_file='basic'): from nbconvert import HTMLExporter, RSTExporter dispatch = {'rst': RSTExporter, 'html': HTMLExporter} exporter = dispatch[to.lower()](template_file=template_file) (body, resources) = exporter.from_filename(src) with io.open(dst, 'wt', encoding='utf-8') as f: f.write(body) return dst
[ "def", "convert_nb", "(", "src", ",", "dst", ",", "to", "=", "'html'", ",", "template_file", "=", "'basic'", ")", ":", "from", "nbconvert", "import", "HTMLExporter", ",", "RSTExporter", "dispatch", "=", "{", "'rst'", ":", "RSTExporter", ",", "'html'", ":", "HTMLExporter", "}", "exporter", "=", "dispatch", "[", "to", ".", "lower", "(", ")", "]", "(", "template_file", "=", "template_file", ")", "(", "body", ",", "resources", ")", "=", "exporter", ".", "from_filename", "(", "src", ")", "with", "io", ".", "open", "(", "dst", ",", "'wt'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "body", ")", "return", "dst" ]
convert a notebook src .
train
false
24,266
def test_cd_prefix(): some_path = '~/somepath' with cd(some_path): command_out = _prefix_commands('foo', 'remote') eq_(command_out, ('cd %s >/dev/null && foo' % some_path))
[ "def", "test_cd_prefix", "(", ")", ":", "some_path", "=", "'~/somepath'", "with", "cd", "(", "some_path", ")", ":", "command_out", "=", "_prefix_commands", "(", "'foo'", ",", "'remote'", ")", "eq_", "(", "command_out", ",", "(", "'cd %s >/dev/null && foo'", "%", "some_path", ")", ")" ]
cd prefix should direct output to /dev/null in case of cdpath .
train
false
24,267
@pytest.mark.parametrize('text, deleted, rest', [('delete this| test', 'delete this', '| test'), fixme(('delete <this> test', 'delete this', '| test')), ('delete <this> test', 'delete ', '|this test'), fixme(('f<oo>bar', 'foo', '|bar')), ('f<oo>bar', 'f', '|oobar')]) def test_rl_unix_line_discard(lineedit, bridge, text, deleted, rest): _validate_deletion(lineedit, bridge, bridge.rl_unix_line_discard, text, deleted, rest)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'text, deleted, rest'", ",", "[", "(", "'delete this| test'", ",", "'delete this'", ",", "'| test'", ")", ",", "fixme", "(", "(", "'delete <this> test'", ",", "'delete this'", ",", "'| test'", ")", ")", ",", "(", "'delete <this> test'", ",", "'delete '", ",", "'|this test'", ")", ",", "fixme", "(", "(", "'f<oo>bar'", ",", "'foo'", ",", "'|bar'", ")", ")", ",", "(", "'f<oo>bar'", ",", "'f'", ",", "'|oobar'", ")", "]", ")", "def", "test_rl_unix_line_discard", "(", "lineedit", ",", "bridge", ",", "text", ",", "deleted", ",", "rest", ")", ":", "_validate_deletion", "(", "lineedit", ",", "bridge", ",", "bridge", ".", "rl_unix_line_discard", ",", "text", ",", "deleted", ",", "rest", ")" ]
delete from the cursor to the beginning of the line and yank back .
train
false
24,268
def _writeFlattenedData(state, write, result): while True: try: element = next(state) except StopIteration: result.callback(None) except: result.errback() else: def cby(original): _writeFlattenedData(state, write, result) return original element.addCallbacks(cby, result.errback) break
[ "def", "_writeFlattenedData", "(", "state", ",", "write", ",", "result", ")", ":", "while", "True", ":", "try", ":", "element", "=", "next", "(", "state", ")", "except", "StopIteration", ":", "result", ".", "callback", "(", "None", ")", "except", ":", "result", ".", "errback", "(", ")", "else", ":", "def", "cby", "(", "original", ")", ":", "_writeFlattenedData", "(", "state", ",", "write", ",", "result", ")", "return", "original", "element", ".", "addCallbacks", "(", "cby", ",", "result", ".", "errback", ")", "break" ]
take strings from an iterator and pass them to a writer function .
train
false
24,269
def _write_string(f, s): binmode = fileobj_is_binary(f) if (binmode and isinstance(s, text_type)): s = encode_ascii(s) elif ((not binmode) and (not isinstance(f, text_type))): s = decode_ascii(s) elif (isinstance(f, StringIO) and isinstance(s, np.ndarray)): s = s.data f.write(s)
[ "def", "_write_string", "(", "f", ",", "s", ")", ":", "binmode", "=", "fileobj_is_binary", "(", "f", ")", "if", "(", "binmode", "and", "isinstance", "(", "s", ",", "text_type", ")", ")", ":", "s", "=", "encode_ascii", "(", "s", ")", "elif", "(", "(", "not", "binmode", ")", "and", "(", "not", "isinstance", "(", "f", ",", "text_type", ")", ")", ")", ":", "s", "=", "decode_ascii", "(", "s", ")", "elif", "(", "isinstance", "(", "f", ",", "StringIO", ")", "and", "isinstance", "(", "s", ",", "np", ".", "ndarray", ")", ")", ":", "s", "=", "s", ".", "data", "f", ".", "write", "(", "s", ")" ]
write a string to a file .
train
false
24,270
def libvlc_media_new_fd(p_instance, fd): f = (_Cfunctions.get('libvlc_media_new_fd', None) or _Cfunction('libvlc_media_new_fd', ((1,), (1,)), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_int)) return f(p_instance, fd)
[ "def", "libvlc_media_new_fd", "(", "p_instance", ",", "fd", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_new_fd'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_new_fd'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "class_result", "(", "Media", ")", ",", "ctypes", ".", "c_void_p", ",", "Instance", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_instance", ",", "fd", ")" ]
create a media for an already open file descriptor .
train
true
24,271
def _get_metadata_for_region(region_code): country_calling_code = country_code_for_region(region_code) main_country = region_code_for_country_code(country_calling_code) return PhoneMetadata.metadata_for_region(main_country, _EMPTY_METADATA)
[ "def", "_get_metadata_for_region", "(", "region_code", ")", ":", "country_calling_code", "=", "country_code_for_region", "(", "region_code", ")", "main_country", "=", "region_code_for_country_code", "(", "country_calling_code", ")", "return", "PhoneMetadata", ".", "metadata_for_region", "(", "main_country", ",", "_EMPTY_METADATA", ")" ]
the metadata needed by this class is the same for all regions sharing the same country calling code .
train
true
24,273
def fetch_things_with_retry(query, chunk_size=100, batch_fn=None, chunks=False, retry_min_wait_ms=500, max_retries=0): assert query._sort, 'you must specify the sort order in your query!' retrier = functools.partial(exponential_retrier, retry_min_wait_ms=retry_min_wait_ms, max_retries=max_retries) orig_rules = deepcopy(query._rules) query._limit = chunk_size items = retrier((lambda : list(query))) done = False while (items and (not done)): if (len(items) < chunk_size): done = True after = items[(-1)] if batch_fn: items = batch_fn(items) if chunks: (yield items) else: for i in items: (yield i) if (not done): query._rules = deepcopy(orig_rules) query._after(after) items = retrier((lambda : list(query)))
[ "def", "fetch_things_with_retry", "(", "query", ",", "chunk_size", "=", "100", ",", "batch_fn", "=", "None", ",", "chunks", "=", "False", ",", "retry_min_wait_ms", "=", "500", ",", "max_retries", "=", "0", ")", ":", "assert", "query", ".", "_sort", ",", "'you must specify the sort order in your query!'", "retrier", "=", "functools", ".", "partial", "(", "exponential_retrier", ",", "retry_min_wait_ms", "=", "retry_min_wait_ms", ",", "max_retries", "=", "max_retries", ")", "orig_rules", "=", "deepcopy", "(", "query", ".", "_rules", ")", "query", ".", "_limit", "=", "chunk_size", "items", "=", "retrier", "(", "(", "lambda", ":", "list", "(", "query", ")", ")", ")", "done", "=", "False", "while", "(", "items", "and", "(", "not", "done", ")", ")", ":", "if", "(", "len", "(", "items", ")", "<", "chunk_size", ")", ":", "done", "=", "True", "after", "=", "items", "[", "(", "-", "1", ")", "]", "if", "batch_fn", ":", "items", "=", "batch_fn", "(", "items", ")", "if", "chunks", ":", "(", "yield", "items", ")", "else", ":", "for", "i", "in", "items", ":", "(", "yield", "i", ")", "if", "(", "not", "done", ")", ":", "query", ".", "_rules", "=", "deepcopy", "(", "orig_rules", ")", "query", ".", "_after", "(", "after", ")", "items", "=", "retrier", "(", "(", "lambda", ":", "list", "(", "query", ")", ")", ")" ]
incrementally run query with a limit of chunk_size until there are no results left .
train
false
24,274
def create_assigned_tax_class(name, rates_to_assign): tax_class = TaxClass.objects.create(name=(u'TC-%s' % name)) for (n, tax_rate) in enumerate(rates_to_assign, 1): tax_name = ((u'Tax-%s' % name) if (len(rates_to_assign) == 1) else (u'Tax-%s-%d' % (name, n))) tax = Tax.objects.create(rate=tax_rate, name=tax_name) TaxRule.objects.create(tax=tax).tax_classes.add(tax_class) return tax_class
[ "def", "create_assigned_tax_class", "(", "name", ",", "rates_to_assign", ")", ":", "tax_class", "=", "TaxClass", ".", "objects", ".", "create", "(", "name", "=", "(", "u'TC-%s'", "%", "name", ")", ")", "for", "(", "n", ",", "tax_rate", ")", "in", "enumerate", "(", "rates_to_assign", ",", "1", ")", ":", "tax_name", "=", "(", "(", "u'Tax-%s'", "%", "name", ")", "if", "(", "len", "(", "rates_to_assign", ")", "==", "1", ")", "else", "(", "u'Tax-%s-%d'", "%", "(", "name", ",", "n", ")", ")", ")", "tax", "=", "Tax", ".", "objects", ".", "create", "(", "rate", "=", "tax_rate", ",", "name", "=", "tax_name", ")", "TaxRule", ".", "objects", ".", "create", "(", "tax", "=", "tax", ")", ".", "tax_classes", ".", "add", "(", "tax_class", ")", "return", "tax_class" ]
create a tax class and assign taxes for it with tax rules .
train
false
24,275
def _parse_btrfs_info(data): ret = {} for line in [line for line in data.split('\n') if line][:(-1)]: if line.startswith('Label:'): line = re.sub('Label:\\s+', '', line) (label, uuid_) = [tkn.strip() for tkn in line.split('uuid:')] ret['label'] = (((label != 'none') and label) or None) ret['uuid'] = uuid_ continue if line.startswith(' DCTB devid'): dev_data = re.split('\\s+', line.strip()) dev_id = dev_data[(-1)] ret[dev_id] = {'device_id': dev_data[1], 'size': dev_data[3], 'used': dev_data[5]} return ret
[ "def", "_parse_btrfs_info", "(", "data", ")", ":", "ret", "=", "{", "}", "for", "line", "in", "[", "line", "for", "line", "in", "data", ".", "split", "(", "'\\n'", ")", "if", "line", "]", "[", ":", "(", "-", "1", ")", "]", ":", "if", "line", ".", "startswith", "(", "'Label:'", ")", ":", "line", "=", "re", ".", "sub", "(", "'Label:\\\\s+'", ",", "''", ",", "line", ")", "(", "label", ",", "uuid_", ")", "=", "[", "tkn", ".", "strip", "(", ")", "for", "tkn", "in", "line", ".", "split", "(", "'uuid:'", ")", "]", "ret", "[", "'label'", "]", "=", "(", "(", "(", "label", "!=", "'none'", ")", "and", "label", ")", "or", "None", ")", "ret", "[", "'uuid'", "]", "=", "uuid_", "continue", "if", "line", ".", "startswith", "(", "' DCTB devid'", ")", ":", "dev_data", "=", "re", ".", "split", "(", "'\\\\s+'", ",", "line", ".", "strip", "(", ")", ")", "dev_id", "=", "dev_data", "[", "(", "-", "1", ")", "]", "ret", "[", "dev_id", "]", "=", "{", "'device_id'", ":", "dev_data", "[", "1", "]", ",", "'size'", ":", "dev_data", "[", "3", "]", ",", "'used'", ":", "dev_data", "[", "5", "]", "}", "return", "ret" ]
parse btrfs device info data .
train
true
24,276
def _eventlet_serve(sock, handle, concurrency): pool = eventlet.greenpool.GreenPool(concurrency) server_gt = eventlet.greenthread.getcurrent() while True: try: (conn, addr) = sock.accept() gt = pool.spawn(handle, conn, addr) gt.link(_eventlet_stop, server_gt, conn) (conn, addr, gt) = (None, None, None) except eventlet.StopServe: sock.close() pool.waitall() return
[ "def", "_eventlet_serve", "(", "sock", ",", "handle", ",", "concurrency", ")", ":", "pool", "=", "eventlet", ".", "greenpool", ".", "GreenPool", "(", "concurrency", ")", "server_gt", "=", "eventlet", ".", "greenthread", ".", "getcurrent", "(", ")", "while", "True", ":", "try", ":", "(", "conn", ",", "addr", ")", "=", "sock", ".", "accept", "(", ")", "gt", "=", "pool", ".", "spawn", "(", "handle", ",", "conn", ",", "addr", ")", "gt", ".", "link", "(", "_eventlet_stop", ",", "server_gt", ",", "conn", ")", "(", "conn", ",", "addr", ",", "gt", ")", "=", "(", "None", ",", "None", ",", "None", ")", "except", "eventlet", ".", "StopServe", ":", "sock", ".", "close", "(", ")", "pool", ".", "waitall", "(", ")", "return" ]
serve requests forever .
train
false
24,277
def check_random_state(seed): if ((seed is None) or (seed is np.random)): return np.random.mtrand._rand if isinstance(seed, (int, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError(('%r cannot be used to seed a numpy.random.RandomState instance' % seed))
[ "def", "check_random_state", "(", "seed", ")", ":", "if", "(", "(", "seed", "is", "None", ")", "or", "(", "seed", "is", "np", ".", "random", ")", ")", ":", "return", "np", ".", "random", ".", "mtrand", ".", "_rand", "if", "isinstance", "(", "seed", ",", "(", "int", ",", "np", ".", "integer", ")", ")", ":", "return", "np", ".", "random", ".", "RandomState", "(", "seed", ")", "if", "isinstance", "(", "seed", ",", "np", ".", "random", ".", "RandomState", ")", ":", "return", "seed", "raise", "ValueError", "(", "(", "'%r cannot be used to seed a numpy.random.RandomState instance'", "%", "seed", ")", ")" ]
turn seed into a np .
train
true
24,278
def track_distance(item, info): from beets.autotag.hooks import Distance dist = Distance() for plugin in find_plugins(): dist.update(plugin.track_distance(item, info)) return dist
[ "def", "track_distance", "(", "item", ",", "info", ")", ":", "from", "beets", ".", "autotag", ".", "hooks", "import", "Distance", "dist", "=", "Distance", "(", ")", "for", "plugin", "in", "find_plugins", "(", ")", ":", "dist", ".", "update", "(", "plugin", ".", "track_distance", "(", "item", ",", "info", ")", ")", "return", "dist" ]
determines the significance of a track metadata change .
train
false
24,279
def flatten_scalar(e): if isinstance(e, Matrix): if (e.shape == (1, 1)): e = e[0] if isinstance(e, (numpy_ndarray, scipy_sparse_matrix)): if (e.shape == (1, 1)): e = complex(e[(0, 0)]) return e
[ "def", "flatten_scalar", "(", "e", ")", ":", "if", "isinstance", "(", "e", ",", "Matrix", ")", ":", "if", "(", "e", ".", "shape", "==", "(", "1", ",", "1", ")", ")", ":", "e", "=", "e", "[", "0", "]", "if", "isinstance", "(", "e", ",", "(", "numpy_ndarray", ",", "scipy_sparse_matrix", ")", ")", ":", "if", "(", "e", ".", "shape", "==", "(", "1", ",", "1", ")", ")", ":", "e", "=", "complex", "(", "e", "[", "(", "0", ",", "0", ")", "]", ")", "return", "e" ]
flatten a 1x1 matrix to a scalar .
train
false
24,281
def test_basics(): import_file_to_module('basic', 'tests/resources/importer/basic.hy')
[ "def", "test_basics", "(", ")", ":", "import_file_to_module", "(", "'basic'", ",", "'tests/resources/importer/basic.hy'", ")" ]
make sure the basics of the importer work .
train
false
24,283
def is_api_view(callback): cls = getattr(callback, 'cls', None) return ((cls is not None) and issubclass(cls, APIView))
[ "def", "is_api_view", "(", "callback", ")", ":", "cls", "=", "getattr", "(", "callback", ",", "'cls'", ",", "None", ")", "return", "(", "(", "cls", "is", "not", "None", ")", "and", "issubclass", "(", "cls", ",", "APIView", ")", ")" ]
return true if the given view callback is a rest framework view/viewset .
train
false
24,284
def binary_concept(label, closures, subj, obj, records): if ((not (label == u'border')) and (not (label == u'contain'))): label = (label + u'_of') c = Concept(label, arity=2, closures=closures, extension=set()) for record in records: c.augment((record[subj], record[obj])) c.close() return c
[ "def", "binary_concept", "(", "label", ",", "closures", ",", "subj", ",", "obj", ",", "records", ")", ":", "if", "(", "(", "not", "(", "label", "==", "u'border'", ")", ")", "and", "(", "not", "(", "label", "==", "u'contain'", ")", ")", ")", ":", "label", "=", "(", "label", "+", "u'_of'", ")", "c", "=", "Concept", "(", "label", ",", "arity", "=", "2", ",", "closures", "=", "closures", ",", "extension", "=", "set", "(", ")", ")", "for", "record", "in", "records", ":", "c", ".", "augment", "(", "(", "record", "[", "subj", "]", ",", "record", "[", "obj", "]", ")", ")", "c", ".", "close", "(", ")", "return", "c" ]
make a binary concept out of the primary key and another field in a record .
train
false
24,287
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) @utils.arg('network_id', metavar='<network-id>', help=_('Network ID.')) def do_add_fixed_ip(cs, args): server = _find_server(cs, args.server) server.add_fixed_ip(args.network_id)
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "@", "utils", ".", "arg", "(", "'network_id'", ",", "metavar", "=", "'<network-id>'", ",", "help", "=", "_", "(", "'Network ID.'", ")", ")", "def", "do_add_fixed_ip", "(", "cs", ",", "args", ")", ":", "server", "=", "_find_server", "(", "cs", ",", "args", ".", "server", ")", "server", ".", "add_fixed_ip", "(", "args", ".", "network_id", ")" ]
add new ip address on a network to server .
train
false
24,288
def get_vm_ref_from_name(session, vm_name): vms = session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name']) for vm in vms: if (vm.propSet[0].val == vm_name): return vm.obj return None
[ "def", "get_vm_ref_from_name", "(", "session", ",", "vm_name", ")", ":", "vms", "=", "session", ".", "_call_method", "(", "vim_util", ",", "'get_objects'", ",", "'VirtualMachine'", ",", "[", "'name'", "]", ")", "for", "vm", "in", "vms", ":", "if", "(", "vm", ".", "propSet", "[", "0", "]", ".", "val", "==", "vm_name", ")", ":", "return", "vm", ".", "obj", "return", "None" ]
get reference to the vm with the name specified .
train
false
24,291
def get_affected_files(allow_limited=True): diff_base = None if in_travis(): if in_travis_pr(): diff_base = travis_branch() else: diff_base = local_diff_branch() if ((diff_base is not None) and allow_limited): result = subprocess.check_output(['git', 'diff', '--name-only', diff_base]) print(('Using files changed relative to %s:' % (diff_base,))) print(('-' * 60)) print(result.rstrip('\n')) print(('-' * 60)) else: print('Diff base not specified, listing all files in repository.') result = subprocess.check_output(['git', 'ls-files']) filenames = [filename for filename in result.rstrip('\n').split('\n') if os.path.exists(filename)] return (filenames, diff_base)
[ "def", "get_affected_files", "(", "allow_limited", "=", "True", ")", ":", "diff_base", "=", "None", "if", "in_travis", "(", ")", ":", "if", "in_travis_pr", "(", ")", ":", "diff_base", "=", "travis_branch", "(", ")", "else", ":", "diff_base", "=", "local_diff_branch", "(", ")", "if", "(", "(", "diff_base", "is", "not", "None", ")", "and", "allow_limited", ")", ":", "result", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'diff'", ",", "'--name-only'", ",", "diff_base", "]", ")", "print", "(", "(", "'Using files changed relative to %s:'", "%", "(", "diff_base", ",", ")", ")", ")", "print", "(", "(", "'-'", "*", "60", ")", ")", "print", "(", "result", ".", "rstrip", "(", "'\\n'", ")", ")", "print", "(", "(", "'-'", "*", "60", ")", ")", "else", ":", "print", "(", "'Diff base not specified, listing all files in repository.'", ")", "result", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'ls-files'", "]", ")", "filenames", "=", "[", "filename", "for", "filename", "in", "result", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", "]", "return", "(", "filenames", ",", "diff_base", ")" ]
gets a list of files in the repository .
train
false
24,292
def send_messages(connection, topic, input): while True: try: body = pickle.load(input) except EOFError: break print ('%s: %s' % (body.get('timestamp'), body.get('event_type', 'unknown event'))) connection.topic_send(topic, body)
[ "def", "send_messages", "(", "connection", ",", "topic", ",", "input", ")", ":", "while", "True", ":", "try", ":", "body", "=", "pickle", ".", "load", "(", "input", ")", "except", "EOFError", ":", "break", "print", "(", "'%s: %s'", "%", "(", "body", ".", "get", "(", "'timestamp'", ")", ",", "body", ".", "get", "(", "'event_type'", ",", "'unknown event'", ")", ")", ")", "connection", ".", "topic_send", "(", "topic", ",", "body", ")" ]
sends a a bunch of emailmessages .
train
false
24,293
def phrase_text_for_head(tokens, text, head_index): (begin, end) = phrase_extent_for_head(tokens, head_index) return text[begin:end]
[ "def", "phrase_text_for_head", "(", "tokens", ",", "text", ",", "head_index", ")", ":", "(", "begin", ",", "end", ")", "=", "phrase_extent_for_head", "(", "tokens", ",", "head_index", ")", "return", "text", "[", "begin", ":", "end", "]" ]
returns the entire phrase containing the head token and its dependents .
train
false
24,294
def add_task(name, timespec, user, command, environment=None): if (environment is None): environment = {} lines = [] for (key, value) in environment.iteritems(): lines.append(('%(key)s=%(value)s\n' % locals())) lines.append(('%(timespec)s %(user)s %(command)s\n' % locals())) from fabtools.require.files import file as require_file require_file(path=('/etc/cron.d/%(name)s' % locals()), contents=''.join(lines), owner='root', mode='0644', use_sudo=True)
[ "def", "add_task", "(", "name", ",", "timespec", ",", "user", ",", "command", ",", "environment", "=", "None", ")", ":", "if", "(", "environment", "is", "None", ")", ":", "environment", "=", "{", "}", "lines", "=", "[", "]", "for", "(", "key", ",", "value", ")", "in", "environment", ".", "iteritems", "(", ")", ":", "lines", ".", "append", "(", "(", "'%(key)s=%(value)s\\n'", "%", "locals", "(", ")", ")", ")", "lines", ".", "append", "(", "(", "'%(timespec)s %(user)s %(command)s\\n'", "%", "locals", "(", ")", ")", ")", "from", "fabtools", ".", "require", ".", "files", "import", "file", "as", "require_file", "require_file", "(", "path", "=", "(", "'/etc/cron.d/%(name)s'", "%", "locals", "(", ")", ")", ",", "contents", "=", "''", ".", "join", "(", "lines", ")", ",", "owner", "=", "'root'", ",", "mode", "=", "'0644'", ",", "use_sudo", "=", "True", ")" ]
add a cron task .
train
false
24,295
def parse_rgb(args, alpha): types = [arg.type for arg in args] if (types == [u'INTEGER', u'INTEGER', u'INTEGER']): (r, g, b) = [(arg.value / 255) for arg in args[:3]] return RGBA(r, g, b, alpha) elif (types == [u'PERCENTAGE', u'PERCENTAGE', u'PERCENTAGE']): (r, g, b) = [(arg.value / 100) for arg in args[:3]] return RGBA(r, g, b, alpha)
[ "def", "parse_rgb", "(", "args", ",", "alpha", ")", ":", "types", "=", "[", "arg", ".", "type", "for", "arg", "in", "args", "]", "if", "(", "types", "==", "[", "u'INTEGER'", ",", "u'INTEGER'", ",", "u'INTEGER'", "]", ")", ":", "(", "r", ",", "g", ",", "b", ")", "=", "[", "(", "arg", ".", "value", "/", "255", ")", "for", "arg", "in", "args", "[", ":", "3", "]", "]", "return", "RGBA", "(", "r", ",", "g", ",", "b", ",", "alpha", ")", "elif", "(", "types", "==", "[", "u'PERCENTAGE'", ",", "u'PERCENTAGE'", ",", "u'PERCENTAGE'", "]", ")", ":", "(", "r", ",", "g", ",", "b", ")", "=", "[", "(", "arg", ".", "value", "/", "100", ")", "for", "arg", "in", "args", "[", ":", "3", "]", "]", "return", "RGBA", "(", "r", ",", "g", ",", "b", ",", "alpha", ")" ]
if args is a list of 3 integer tokens or 3 percentage tokens .
train
false
24,296
def range2nets(rng): (start, stop) = rng if (type(start) is str): start = ip2int(start) if (type(stop) is str): stop = ip2int(stop) if (stop < start): raise ValueError() res = [] cur = start maskint = 32 mask = int2mask(maskint) while True: while (((cur & mask) == cur) and ((cur | ((~ mask) & 4294967295)) <= stop)): maskint -= 1 mask = int2mask(maskint) res.append(('%s/%d' % (int2ip(cur), (maskint + 1)))) mask = int2mask((maskint + 1)) if ((stop & mask) == cur): return res cur = ((cur | ((~ mask) & 4294967295)) + 1) maskint = 32 mask = int2mask(maskint)
[ "def", "range2nets", "(", "rng", ")", ":", "(", "start", ",", "stop", ")", "=", "rng", "if", "(", "type", "(", "start", ")", "is", "str", ")", ":", "start", "=", "ip2int", "(", "start", ")", "if", "(", "type", "(", "stop", ")", "is", "str", ")", ":", "stop", "=", "ip2int", "(", "stop", ")", "if", "(", "stop", "<", "start", ")", ":", "raise", "ValueError", "(", ")", "res", "=", "[", "]", "cur", "=", "start", "maskint", "=", "32", "mask", "=", "int2mask", "(", "maskint", ")", "while", "True", ":", "while", "(", "(", "(", "cur", "&", "mask", ")", "==", "cur", ")", "and", "(", "(", "cur", "|", "(", "(", "~", "mask", ")", "&", "4294967295", ")", ")", "<=", "stop", ")", ")", ":", "maskint", "-=", "1", "mask", "=", "int2mask", "(", "maskint", ")", "res", ".", "append", "(", "(", "'%s/%d'", "%", "(", "int2ip", "(", "cur", ")", ",", "(", "maskint", "+", "1", ")", ")", ")", ")", "mask", "=", "int2mask", "(", "(", "maskint", "+", "1", ")", ")", "if", "(", "(", "stop", "&", "mask", ")", "==", "cur", ")", ":", "return", "res", "cur", "=", "(", "(", "cur", "|", "(", "(", "~", "mask", ")", "&", "4294967295", ")", ")", "+", "1", ")", "maskint", "=", "32", "mask", "=", "int2mask", "(", "maskint", ")" ]
converts a tuple to a list of networks .
train
false
24,297
@register.tag(name=u'form_field') def do_form_field(parser, token): parts = token.contents.split(u' ', 2) if (len(parts) < 2): raise template.TemplateSyntaxError((u'%r tag must have the form field name as the first value, followed by optional key="value" attributes.' % parts[0])) html_attrs = {} if (len(parts) == 3): raw_args = list(args_split(parts[2])) if ((len(raw_args) % 2) != 0): raise template.TemplateSyntaxError((u'%r tag received the incorrect number of key=value arguments.' % parts[0])) for x in range(0, len(raw_args), 2): html_attrs[str(raw_args[x])] = Variable(raw_args[(x + 1)]) return FormFieldNode(parts[1], html_attrs)
[ "@", "register", ".", "tag", "(", "name", "=", "u'form_field'", ")", "def", "do_form_field", "(", "parser", ",", "token", ")", ":", "parts", "=", "token", ".", "contents", ".", "split", "(", "u' '", ",", "2", ")", "if", "(", "len", "(", "parts", ")", "<", "2", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "u'%r tag must have the form field name as the first value, followed by optional key=\"value\" attributes.'", "%", "parts", "[", "0", "]", ")", ")", "html_attrs", "=", "{", "}", "if", "(", "len", "(", "parts", ")", "==", "3", ")", ":", "raw_args", "=", "list", "(", "args_split", "(", "parts", "[", "2", "]", ")", ")", "if", "(", "(", "len", "(", "raw_args", ")", "%", "2", ")", "!=", "0", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "(", "u'%r tag received the incorrect number of key=value arguments.'", "%", "parts", "[", "0", "]", ")", ")", "for", "x", "in", "range", "(", "0", ",", "len", "(", "raw_args", ")", ",", "2", ")", ":", "html_attrs", "[", "str", "(", "raw_args", "[", "x", "]", ")", "]", "=", "Variable", "(", "raw_args", "[", "(", "x", "+", "1", ")", "]", ")", "return", "FormFieldNode", "(", "parts", "[", "1", "]", ",", "html_attrs", ")" ]
render a wtforms form field allowing optional html attributes .
train
false
24,299
def _admx_policy_parent_walk(path, policy_namespace, parent_category, policy_nsmap, admx_policy_definitions, return_full_policy_names, adml_policy_resources): category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]' using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using' if (parent_category.find(':') >= 0): policy_namespace = parent_category.split(':')[0] parent_category = parent_category.split(':')[1] using_xpath_string = using_xpath_string.format(policy_namespace) policy_nsmap = dictupdate.update(policy_nsmap, _buildElementNsmap(admx_policy_definitions.xpath(using_xpath_string, namespaces=policy_nsmap))) category_xpath_string = category_xpath_string.format(policy_namespace, parent_category) if admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap): tparent_category = admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap)[0] this_parent_name = _getFullPolicyName(tparent_category, tparent_category.attrib['name'], return_full_policy_names, adml_policy_resources) path.append(this_parent_name) if tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap): path = _admx_policy_parent_walk(path, policy_namespace, tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap)[0], policy_nsmap, admx_policy_definitions, return_full_policy_names, adml_policy_resources) return path
[ "def", "_admx_policy_parent_walk", "(", "path", ",", "policy_namespace", ",", "parent_category", ",", "policy_nsmap", ",", "admx_policy_definitions", ",", "return_full_policy_names", ",", "adml_policy_resources", ")", ":", "category_xpath_string", "=", "'/policyDefinitions/categories/{0}:category[@name=\"{1}\"]'", "using_xpath_string", "=", "'/policyDefinitions/policyNamespaces/{0}:using'", "if", "(", "parent_category", ".", "find", "(", "':'", ")", ">=", "0", ")", ":", "policy_namespace", "=", "parent_category", ".", "split", "(", "':'", ")", "[", "0", "]", "parent_category", "=", "parent_category", ".", "split", "(", "':'", ")", "[", "1", "]", "using_xpath_string", "=", "using_xpath_string", ".", "format", "(", "policy_namespace", ")", "policy_nsmap", "=", "dictupdate", ".", "update", "(", "policy_nsmap", ",", "_buildElementNsmap", "(", "admx_policy_definitions", ".", "xpath", "(", "using_xpath_string", ",", "namespaces", "=", "policy_nsmap", ")", ")", ")", "category_xpath_string", "=", "category_xpath_string", ".", "format", "(", "policy_namespace", ",", "parent_category", ")", "if", "admx_policy_definitions", ".", "xpath", "(", "category_xpath_string", ",", "namespaces", "=", "policy_nsmap", ")", ":", "tparent_category", "=", "admx_policy_definitions", ".", "xpath", "(", "category_xpath_string", ",", "namespaces", "=", "policy_nsmap", ")", "[", "0", "]", "this_parent_name", "=", "_getFullPolicyName", "(", "tparent_category", ",", "tparent_category", ".", "attrib", "[", "'name'", "]", ",", "return_full_policy_names", ",", "adml_policy_resources", ")", "path", ".", "append", "(", "this_parent_name", ")", "if", "tparent_category", ".", "xpath", "(", "'{0}:parentCategory/@ref'", ".", "format", "(", "policy_namespace", ")", ",", "namespaces", "=", "policy_nsmap", ")", ":", "path", "=", "_admx_policy_parent_walk", "(", "path", ",", "policy_namespace", ",", "tparent_category", ".", "xpath", "(", "'{0}:parentCategory/@ref'", ".", "format", "(", "policy_namespace", ")", ",", "namespaces", "=", "policy_nsmap", ")", "[", "0", "]", ",", "policy_nsmap", ",", "admx_policy_definitions", ",", "return_full_policy_names", ",", "adml_policy_resources", ")", "return", "path" ]
helper function to recursively walk up the admx namespaces and build the hierarchy for the policy .
train
false
24,300
def is_column_based(fname, sep=' DCTB ', skip=0, is_multi_byte=False): headers = get_headers(fname, sep, is_multi_byte=is_multi_byte) count = 0 if (not headers): return False for hdr in headers[skip:]: if (hdr and hdr[0] and (not hdr[0].startswith('#'))): if (len(hdr) > 1): count = len(hdr) break if (count < 2): return False for hdr in headers[skip:]: if (hdr and hdr[0] and (not hdr[0].startswith('#'))): if (len(hdr) != count): return False return True
[ "def", "is_column_based", "(", "fname", ",", "sep", "=", "' DCTB '", ",", "skip", "=", "0", ",", "is_multi_byte", "=", "False", ")", ":", "headers", "=", "get_headers", "(", "fname", ",", "sep", ",", "is_multi_byte", "=", "is_multi_byte", ")", "count", "=", "0", "if", "(", "not", "headers", ")", ":", "return", "False", "for", "hdr", "in", "headers", "[", "skip", ":", "]", ":", "if", "(", "hdr", "and", "hdr", "[", "0", "]", "and", "(", "not", "hdr", "[", "0", "]", ".", "startswith", "(", "'#'", ")", ")", ")", ":", "if", "(", "len", "(", "hdr", ")", ">", "1", ")", ":", "count", "=", "len", "(", "hdr", ")", "break", "if", "(", "count", "<", "2", ")", ":", "return", "False", "for", "hdr", "in", "headers", "[", "skip", ":", "]", ":", "if", "(", "hdr", "and", "hdr", "[", "0", "]", "and", "(", "not", "hdr", "[", "0", "]", ".", "startswith", "(", "'#'", ")", ")", ")", ":", "if", "(", "len", "(", "hdr", ")", "!=", "count", ")", ":", "return", "False", "return", "True" ]
see if the file is column based with respect to a separator .
train
false
24,302
@register.simple_tag def bootstrap_icon(icon, **kwargs): return render_icon(icon, **kwargs)
[ "@", "register", ".", "simple_tag", "def", "bootstrap_icon", "(", "icon", ",", "**", "kwargs", ")", ":", "return", "render_icon", "(", "icon", ",", "**", "kwargs", ")" ]
render an icon **tag name**:: bootstrap_icon **parameters**: icon icon name .
train
false
24,303
def _get_child_as(parent, tag, construct): child = parent.find(_ns(tag)) if (child is not None): return construct(child)
[ "def", "_get_child_as", "(", "parent", ",", "tag", ",", "construct", ")", ":", "child", "=", "parent", ".", "find", "(", "_ns", "(", "tag", ")", ")", "if", "(", "child", "is", "not", "None", ")", ":", "return", "construct", "(", "child", ")" ]
find a child node by tag .
train
false
24,304
def _get_default_annual_spacing(nyears): if (nyears < 11): (min_spacing, maj_spacing) = (1, 1) elif (nyears < 20): (min_spacing, maj_spacing) = (1, 2) elif (nyears < 50): (min_spacing, maj_spacing) = (1, 5) elif (nyears < 100): (min_spacing, maj_spacing) = (5, 10) elif (nyears < 200): (min_spacing, maj_spacing) = (5, 25) elif (nyears < 600): (min_spacing, maj_spacing) = (10, 50) else: factor = ((nyears // 1000) + 1) (min_spacing, maj_spacing) = ((factor * 20), (factor * 100)) return (min_spacing, maj_spacing)
[ "def", "_get_default_annual_spacing", "(", "nyears", ")", ":", "if", "(", "nyears", "<", "11", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "1", ",", "1", ")", "elif", "(", "nyears", "<", "20", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "1", ",", "2", ")", "elif", "(", "nyears", "<", "50", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "1", ",", "5", ")", "elif", "(", "nyears", "<", "100", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "5", ",", "10", ")", "elif", "(", "nyears", "<", "200", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "5", ",", "25", ")", "elif", "(", "nyears", "<", "600", ")", ":", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "10", ",", "50", ")", "else", ":", "factor", "=", "(", "(", "nyears", "//", "1000", ")", "+", "1", ")", "(", "min_spacing", ",", "maj_spacing", ")", "=", "(", "(", "factor", "*", "20", ")", ",", "(", "factor", "*", "100", ")", ")", "return", "(", "min_spacing", ",", "maj_spacing", ")" ]
returns a default spacing between consecutive ticks for annual data .
train
true
24,305
def get_snippet_model_from_url_params(app_name, model_name): try: model = apps.get_model(app_name, model_name) except LookupError: raise Http404 if (model not in get_snippet_models()): raise Http404 return model
[ "def", "get_snippet_model_from_url_params", "(", "app_name", ",", "model_name", ")", ":", "try", ":", "model", "=", "apps", ".", "get_model", "(", "app_name", ",", "model_name", ")", "except", "LookupError", ":", "raise", "Http404", "if", "(", "model", "not", "in", "get_snippet_models", "(", ")", ")", ":", "raise", "Http404", "return", "model" ]
retrieve a model from an app_label / model_name combo .
train
false