id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
7,003
def reformat_node(item=None, full=False): desired_keys = ['id', 'name', 'state', 'public_ips', 'private_ips', 'size', 'image', 'location'] item['private_ips'] = [] item['public_ips'] = [] if ('ips' in item): for ip in item['ips']: if is_public_ip(ip): item['public_ips'].append(ip) else: item['private_ips'].append(ip) for key in desired_keys: if (key not in item): item[key] = None to_del = [] if (not full): for key in six.iterkeys(item): if (key not in desired_keys): to_del.append(key) for key in to_del: del item[key] if ('state' in item): item['state'] = joyent_node_state(item['state']) return item
[ "def", "reformat_node", "(", "item", "=", "None", ",", "full", "=", "False", ")", ":", "desired_keys", "=", "[", "'id'", ",", "'name'", ",", "'state'", ",", "'public_ips'", ",", "'private_ips'", ",", "'size'", ",", "'image'", ",", "'location'", "]", "item", "[", "'private_ips'", "]", "=", "[", "]", "item", "[", "'public_ips'", "]", "=", "[", "]", "if", "(", "'ips'", "in", "item", ")", ":", "for", "ip", "in", "item", "[", "'ips'", "]", ":", "if", "is_public_ip", "(", "ip", ")", ":", "item", "[", "'public_ips'", "]", ".", "append", "(", "ip", ")", "else", ":", "item", "[", "'private_ips'", "]", ".", "append", "(", "ip", ")", "for", "key", "in", "desired_keys", ":", "if", "(", "key", "not", "in", "item", ")", ":", "item", "[", "key", "]", "=", "None", "to_del", "=", "[", "]", "if", "(", "not", "full", ")", ":", "for", "key", "in", "six", ".", "iterkeys", "(", "item", ")", ":", "if", "(", "key", "not", "in", "desired_keys", ")", ":", "to_del", ".", "append", "(", "key", ")", "for", "key", "in", "to_del", ":", "del", "item", "[", "key", "]", "if", "(", "'state'", "in", "item", ")", ":", "item", "[", "'state'", "]", "=", "joyent_node_state", "(", "item", "[", "'state'", "]", ")", "return", "item" ]
reformat the returned data from joyent .
train
true
7,004
@task def get_healthz(): url = 'https://localhost:8443/healthz' ret = 'FAIL' with settings(warn_only=True): ret = run(('curl -k %s' % url)) fprint(('Healthz status: %s' % ret)) return (ret == 'OK')
[ "@", "task", "def", "get_healthz", "(", ")", ":", "url", "=", "'https://localhost:8443/healthz'", "ret", "=", "'FAIL'", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "ret", "=", "run", "(", "(", "'curl -k %s'", "%", "url", ")", ")", "fprint", "(", "(", "'Healthz status: %s'", "%", "ret", ")", ")", "return", "(", "ret", "==", "'OK'", ")" ]
fetch healthz status for the local instance .
train
false
7,005
def apply_penalty(tensor_or_tensors, penalty, **kwargs): try: return sum((penalty(x, **kwargs) for x in tensor_or_tensors)) except (TypeError, ValueError): return penalty(tensor_or_tensors, **kwargs)
[ "def", "apply_penalty", "(", "tensor_or_tensors", ",", "penalty", ",", "**", "kwargs", ")", ":", "try", ":", "return", "sum", "(", "(", "penalty", "(", "x", ",", "**", "kwargs", ")", "for", "x", "in", "tensor_or_tensors", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "penalty", "(", "tensor_or_tensors", ",", "**", "kwargs", ")" ]
computes the total cost for applying a specified penalty to a tensor or group of tensors .
train
false
7,006
def dmp_sub_mul(f, g, h, u, K): return dmp_sub(f, dmp_mul(g, h, u, K), u, K)
[ "def", "dmp_sub_mul", "(", "f", ",", "g", ",", "h", ",", "u", ",", "K", ")", ":", "return", "dmp_sub", "(", "f", ",", "dmp_mul", "(", "g", ",", "h", ",", "u", ",", "K", ")", ",", "u", ",", "K", ")" ]
returns f - g*h where f .
train
false
7,007
def _convert_range_to_list(tgt, range_server): r = seco.range.Range(range_server) try: return r.expand(tgt) except seco.range.RangeException as err: log.error('Range server exception: {0}'.format(err)) return []
[ "def", "_convert_range_to_list", "(", "tgt", ",", "range_server", ")", ":", "r", "=", "seco", ".", "range", ".", "Range", "(", "range_server", ")", "try", ":", "return", "r", ".", "expand", "(", "tgt", ")", "except", "seco", ".", "range", ".", "RangeException", "as", "err", ":", "log", ".", "error", "(", "'Range server exception: {0}'", ".", "format", "(", "err", ")", ")", "return", "[", "]" ]
convert a seco .
train
true
7,008
def peakInterp(mX, pX, ploc): val = mX[ploc] lval = mX[(ploc - 1)] rval = mX[(ploc + 1)] iploc = (ploc + ((0.5 * (lval - rval)) / ((lval - (2 * val)) + rval))) ipmag = (val - ((0.25 * (lval - rval)) * (iploc - ploc))) ipphase = np.interp(iploc, np.arange(0, pX.size), pX) return (iploc, ipmag, ipphase)
[ "def", "peakInterp", "(", "mX", ",", "pX", ",", "ploc", ")", ":", "val", "=", "mX", "[", "ploc", "]", "lval", "=", "mX", "[", "(", "ploc", "-", "1", ")", "]", "rval", "=", "mX", "[", "(", "ploc", "+", "1", ")", "]", "iploc", "=", "(", "ploc", "+", "(", "(", "0.5", "*", "(", "lval", "-", "rval", ")", ")", "/", "(", "(", "lval", "-", "(", "2", "*", "val", ")", ")", "+", "rval", ")", ")", ")", "ipmag", "=", "(", "val", "-", "(", "(", "0.25", "*", "(", "lval", "-", "rval", ")", ")", "*", "(", "iploc", "-", "ploc", ")", ")", ")", "ipphase", "=", "np", ".", "interp", "(", "iploc", ",", "np", ".", "arange", "(", "0", ",", "pX", ".", "size", ")", ",", "pX", ")", "return", "(", "iploc", ",", "ipmag", ",", "ipphase", ")" ]
interpolate peak values using parabolic interpolation mx .
train
false
7,009
def get_override_for_user(user, block, name, default=None): if (not hasattr(block, '_student_overrides')): block._student_overrides = {} overrides = block._student_overrides.get(user.id) if (overrides is None): overrides = _get_overrides_for_user(user, block) block._student_overrides[user.id] = overrides return overrides.get(name, default)
[ "def", "get_override_for_user", "(", "user", ",", "block", ",", "name", ",", "default", "=", "None", ")", ":", "if", "(", "not", "hasattr", "(", "block", ",", "'_student_overrides'", ")", ")", ":", "block", ".", "_student_overrides", "=", "{", "}", "overrides", "=", "block", ".", "_student_overrides", ".", "get", "(", "user", ".", "id", ")", "if", "(", "overrides", "is", "None", ")", ":", "overrides", "=", "_get_overrides_for_user", "(", "user", ",", "block", ")", "block", ".", "_student_overrides", "[", "user", ".", "id", "]", "=", "overrides", "return", "overrides", ".", "get", "(", "name", ",", "default", ")" ]
gets the value of the overridden field for the user .
train
false
7,010
def get_pull_request_files(project, num, auth=False): url = 'https://api.github.com/repos/{project}/pulls/{num}/files'.format(project=project, num=num) if auth: header = make_auth_header() else: header = None return get_paged_request(url, headers=header)
[ "def", "get_pull_request_files", "(", "project", ",", "num", ",", "auth", "=", "False", ")", ":", "url", "=", "'https://api.github.com/repos/{project}/pulls/{num}/files'", ".", "format", "(", "project", "=", "project", ",", "num", "=", "num", ")", "if", "auth", ":", "header", "=", "make_auth_header", "(", ")", "else", ":", "header", "=", "None", "return", "get_paged_request", "(", "url", ",", "headers", "=", "header", ")" ]
get list of files in a pull request .
train
true
7,012
def ex_literal(val): if (val is None): return ast.Name('None', ast.Load()) elif isinstance(val, six.integer_types): return ast.Num(val) elif isinstance(val, bool): return ast.Name(bytes(val), ast.Load()) elif isinstance(val, six.string_types): return ast.Str(val) raise TypeError(u'no literal for {0}'.format(type(val)))
[ "def", "ex_literal", "(", "val", ")", ":", "if", "(", "val", "is", "None", ")", ":", "return", "ast", ".", "Name", "(", "'None'", ",", "ast", ".", "Load", "(", ")", ")", "elif", "isinstance", "(", "val", ",", "six", ".", "integer_types", ")", ":", "return", "ast", ".", "Num", "(", "val", ")", "elif", "isinstance", "(", "val", ",", "bool", ")", ":", "return", "ast", ".", "Name", "(", "bytes", "(", "val", ")", ",", "ast", ".", "Load", "(", ")", ")", "elif", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "return", "ast", ".", "Str", "(", "val", ")", "raise", "TypeError", "(", "u'no literal for {0}'", ".", "format", "(", "type", "(", "val", ")", ")", ")" ]
an int .
train
false
7,015
def split_seq(curr_seq, barcode_len, primer_seq_len): curr_barcode = curr_seq[0:barcode_len] rest_of_seq = curr_seq[barcode_len:] primer_seq = rest_of_seq[0:primer_seq_len] rest_of_seq = rest_of_seq[primer_seq_len:] return (curr_barcode, primer_seq, rest_of_seq)
[ "def", "split_seq", "(", "curr_seq", ",", "barcode_len", ",", "primer_seq_len", ")", ":", "curr_barcode", "=", "curr_seq", "[", "0", ":", "barcode_len", "]", "rest_of_seq", "=", "curr_seq", "[", "barcode_len", ":", "]", "primer_seq", "=", "rest_of_seq", "[", "0", ":", "primer_seq_len", "]", "rest_of_seq", "=", "rest_of_seq", "[", "primer_seq_len", ":", "]", "return", "(", "curr_barcode", ",", "primer_seq", ",", "rest_of_seq", ")" ]
split sequence into parts barcode .
train
false
7,016
def OpenDocumentSpreadsheet(): doc = OpenDocument('application/vnd.oasis.opendocument.spreadsheet') doc.spreadsheet = Spreadsheet() doc.body.addElement(doc.spreadsheet) return doc
[ "def", "OpenDocumentSpreadsheet", "(", ")", ":", "doc", "=", "OpenDocument", "(", "'application/vnd.oasis.opendocument.spreadsheet'", ")", "doc", ".", "spreadsheet", "=", "Spreadsheet", "(", ")", "doc", ".", "body", ".", "addElement", "(", "doc", ".", "spreadsheet", ")", "return", "doc" ]
creates a spreadsheet document .
train
false
7,017
def display_page(request, virtual_path): page = None for page_model in AbstractPage.__subclasses__(): try: page = page_model.objects.live(request.user).get(virtual_path=virtual_path) except ObjectDoesNotExist: pass if (page is None): raise Http404 if page.url: return redirect(page.url) template_name = 'staticpages/page_display.html' if request.is_ajax(): template_name = 'staticpages/_body.html' ctx = {'page': page} return render(request, template_name, ctx)
[ "def", "display_page", "(", "request", ",", "virtual_path", ")", ":", "page", "=", "None", "for", "page_model", "in", "AbstractPage", ".", "__subclasses__", "(", ")", ":", "try", ":", "page", "=", "page_model", ".", "objects", ".", "live", "(", "request", ".", "user", ")", ".", "get", "(", "virtual_path", "=", "virtual_path", ")", "except", "ObjectDoesNotExist", ":", "pass", "if", "(", "page", "is", "None", ")", ":", "raise", "Http404", "if", "page", ".", "url", ":", "return", "redirect", "(", "page", ".", "url", ")", "template_name", "=", "'staticpages/page_display.html'", "if", "request", ".", "is_ajax", "(", ")", ":", "template_name", "=", "'staticpages/_body.html'", "ctx", "=", "{", "'page'", ":", "page", "}", "return", "render", "(", "request", ",", "template_name", ",", "ctx", ")" ]
displays an active page defined in virtual_path .
train
false
7,018
def upcaseTokens(s, l, t): return [tt.upper() for tt in map(_ustr, t)]
[ "def", "upcaseTokens", "(", "s", ",", "l", ",", "t", ")", ":", "return", "[", "tt", ".", "upper", "(", ")", "for", "tt", "in", "map", "(", "_ustr", ",", "t", ")", "]" ]
helper parse action to convert tokens to upper case .
train
true
7,020
def fire_exception(exc, opts, job=None, node='minion'): if (job is None): job = {} event = salt.utils.event.SaltEvent(node, opts=opts, listen=False) event.fire_event(pack_exception(exc), '_salt_error')
[ "def", "fire_exception", "(", "exc", ",", "opts", ",", "job", "=", "None", ",", "node", "=", "'minion'", ")", ":", "if", "(", "job", "is", "None", ")", ":", "job", "=", "{", "}", "event", "=", "salt", ".", "utils", ".", "event", ".", "SaltEvent", "(", "node", ",", "opts", "=", "opts", ",", "listen", "=", "False", ")", "event", ".", "fire_event", "(", "pack_exception", "(", "exc", ")", ",", "'_salt_error'", ")" ]
fire raw exception across the event bus .
train
true
7,021
def forum_is_unread(forum, forumsread, user): if (not user.is_authenticated): return False read_cutoff = (time_utcnow() - timedelta(days=flaskbb_config['TRACKER_LENGTH'])) if (flaskbb_config['TRACKER_LENGTH'] == 0): return False if (forum and (forum.topic_count == 0)): return False if (forum.last_post_created < read_cutoff): return False if (forum and (not forumsread)): return (forum.last_post_created > read_cutoff) try: if (forum.last_post_created > forumsread.cleared): if (forum.last_post_created < forumsread.last_read): return False except TypeError: pass return (forum.last_post_created > forumsread.last_read)
[ "def", "forum_is_unread", "(", "forum", ",", "forumsread", ",", "user", ")", ":", "if", "(", "not", "user", ".", "is_authenticated", ")", ":", "return", "False", "read_cutoff", "=", "(", "time_utcnow", "(", ")", "-", "timedelta", "(", "days", "=", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", ")", ")", "if", "(", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", "==", "0", ")", ":", "return", "False", "if", "(", "forum", "and", "(", "forum", ".", "topic_count", "==", "0", ")", ")", ":", "return", "False", "if", "(", "forum", ".", "last_post_created", "<", "read_cutoff", ")", ":", "return", "False", "if", "(", "forum", "and", "(", "not", "forumsread", ")", ")", ":", "return", "(", "forum", ".", "last_post_created", ">", "read_cutoff", ")", "try", ":", "if", "(", "forum", ".", "last_post_created", ">", "forumsread", ".", "cleared", ")", ":", "if", "(", "forum", ".", "last_post_created", "<", "forumsread", ".", "last_read", ")", ":", "return", "False", "except", "TypeError", ":", "pass", "return", "(", "forum", ".", "last_post_created", ">", "forumsread", ".", "last_read", ")" ]
checks if a forum is unread .
train
false
7,022
def get_monitor_adapter(): tmp = init_app('iwconfig', True) for line in tmp.split('\n'): if line.startswith(' '): continue elif (len(line.split(' ')[0]) > 1): if ('Mode:Monitor' in line): return line.split(' ')[0] return None
[ "def", "get_monitor_adapter", "(", ")", ":", "tmp", "=", "init_app", "(", "'iwconfig'", ",", "True", ")", "for", "line", "in", "tmp", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "startswith", "(", "' '", ")", ":", "continue", "elif", "(", "len", "(", "line", ".", "split", "(", "' '", ")", "[", "0", "]", ")", ">", "1", ")", ":", "if", "(", "'Mode:Monitor'", "in", "line", ")", ":", "return", "line", ".", "split", "(", "' '", ")", "[", "0", "]", "return", "None" ]
try and automatically detect which adapter is in monitor mode .
train
false
7,023
def stub_out_glanceclient_create(stubs, sent_to_glance): orig_add_image = glanceclient.v1.images.ImageManager.create def fake_create(context, metadata, data=None): sent_to_glance['metadata'] = metadata sent_to_glance['data'] = data return orig_add_image(metadata, data) stubs.Set(glanceclient.v1.images.ImageManager, 'create', fake_create)
[ "def", "stub_out_glanceclient_create", "(", "stubs", ",", "sent_to_glance", ")", ":", "orig_add_image", "=", "glanceclient", ".", "v1", ".", "images", ".", "ImageManager", ".", "create", "def", "fake_create", "(", "context", ",", "metadata", ",", "data", "=", "None", ")", ":", "sent_to_glance", "[", "'metadata'", "]", "=", "metadata", "sent_to_glance", "[", "'data'", "]", "=", "data", "return", "orig_add_image", "(", "metadata", ",", "data", ")", "stubs", ".", "Set", "(", "glanceclient", ".", "v1", ".", "images", ".", "ImageManager", ",", "'create'", ",", "fake_create", ")" ]
we return the metadata sent to glance by modifying the sent_to_glance dict in place .
train
false
7,024
def extract_param_list(params, prefix=''): key_re = re.compile(('%s\\.member\\.([0-9]+)\\.(.*)' % prefix)) def get_param_data(params): for (param_name, value) in params.items(): match = key_re.match(param_name) if match: try: index = int(match.group(1)) except ValueError: pass else: key = match.group(2) (yield (index, (key, value))) def key_func(d): return d[0] data = sorted(get_param_data(params), key=key_func) members = itertools.groupby(data, key_func) return [dict((kv for (di, kv) in m)) for (mi, m) in members]
[ "def", "extract_param_list", "(", "params", ",", "prefix", "=", "''", ")", ":", "key_re", "=", "re", ".", "compile", "(", "(", "'%s\\\\.member\\\\.([0-9]+)\\\\.(.*)'", "%", "prefix", ")", ")", "def", "get_param_data", "(", "params", ")", ":", "for", "(", "param_name", ",", "value", ")", "in", "params", ".", "items", "(", ")", ":", "match", "=", "key_re", ".", "match", "(", "param_name", ")", "if", "match", ":", "try", ":", "index", "=", "int", "(", "match", ".", "group", "(", "1", ")", ")", "except", "ValueError", ":", "pass", "else", ":", "key", "=", "match", ".", "group", "(", "2", ")", "(", "yield", "(", "index", ",", "(", "key", ",", "value", ")", ")", ")", "def", "key_func", "(", "d", ")", ":", "return", "d", "[", "0", "]", "data", "=", "sorted", "(", "get_param_data", "(", "params", ")", ",", "key", "=", "key_func", ")", "members", "=", "itertools", ".", "groupby", "(", "data", ",", "key_func", ")", "return", "[", "dict", "(", "(", "kv", "for", "(", "di", ",", "kv", ")", "in", "m", ")", ")", "for", "(", "mi", ",", "m", ")", "in", "members", "]" ]
extract a list-of-dicts based on parameters containing aws style list .
train
false
7,026
def gps_newpos(lat, lon, bearing, distance): lat1 = math.radians(lat) lon1 = math.radians(lon) brng = math.radians(bearing) dr = (distance / radius_of_earth) lat2 = math.asin(((math.sin(lat1) * math.cos(dr)) + ((math.cos(lat1) * math.sin(dr)) * math.cos(brng)))) lon2 = (lon1 + math.atan2(((math.sin(brng) * math.sin(dr)) * math.cos(lat1)), (math.cos(dr) - (math.sin(lat1) * math.sin(lat2))))) return (math.degrees(lat2), wrap_valid_longitude(math.degrees(lon2)))
[ "def", "gps_newpos", "(", "lat", ",", "lon", ",", "bearing", ",", "distance", ")", ":", "lat1", "=", "math", ".", "radians", "(", "lat", ")", "lon1", "=", "math", ".", "radians", "(", "lon", ")", "brng", "=", "math", ".", "radians", "(", "bearing", ")", "dr", "=", "(", "distance", "/", "radius_of_earth", ")", "lat2", "=", "math", ".", "asin", "(", "(", "(", "math", ".", "sin", "(", "lat1", ")", "*", "math", ".", "cos", "(", "dr", ")", ")", "+", "(", "(", "math", ".", "cos", "(", "lat1", ")", "*", "math", ".", "sin", "(", "dr", ")", ")", "*", "math", ".", "cos", "(", "brng", ")", ")", ")", ")", "lon2", "=", "(", "lon1", "+", "math", ".", "atan2", "(", "(", "(", "math", ".", "sin", "(", "brng", ")", "*", "math", ".", "sin", "(", "dr", ")", ")", "*", "math", ".", "cos", "(", "lat1", ")", ")", ",", "(", "math", ".", "cos", "(", "dr", ")", "-", "(", "math", ".", "sin", "(", "lat1", ")", "*", "math", ".", "sin", "(", "lat2", ")", ")", ")", ")", ")", "return", "(", "math", ".", "degrees", "(", "lat2", ")", ",", "wrap_valid_longitude", "(", "math", ".", "degrees", "(", "lon2", ")", ")", ")" ]
extrapolate latitude/longitude given a heading and distance thanks to URL .
train
true
7,027
def _FindDirectXInstallation(): if hasattr(_FindDirectXInstallation, 'dxsdk_dir'): return _FindDirectXInstallation.dxsdk_dir dxsdk_dir = os.environ.get('DXSDK_DIR') if (not dxsdk_dir): cmd = ['reg.exe', 'query', 'HKLM\\Software\\Microsoft\\DirectX', '/s'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in p.communicate()[0].splitlines(): if ('InstallPath' in line): dxsdk_dir = (line.split(' ')[3] + '\\') _FindDirectXInstallation.dxsdk_dir = dxsdk_dir return dxsdk_dir
[ "def", "_FindDirectXInstallation", "(", ")", ":", "if", "hasattr", "(", "_FindDirectXInstallation", ",", "'dxsdk_dir'", ")", ":", "return", "_FindDirectXInstallation", ".", "dxsdk_dir", "dxsdk_dir", "=", "os", ".", "environ", ".", "get", "(", "'DXSDK_DIR'", ")", "if", "(", "not", "dxsdk_dir", ")", ":", "cmd", "=", "[", "'reg.exe'", ",", "'query'", ",", "'HKLM\\\\Software\\\\Microsoft\\\\DirectX'", ",", "'/s'", "]", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "for", "line", "in", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "splitlines", "(", ")", ":", "if", "(", "'InstallPath'", "in", "line", ")", ":", "dxsdk_dir", "=", "(", "line", ".", "split", "(", "' '", ")", "[", "3", "]", "+", "'\\\\'", ")", "_FindDirectXInstallation", ".", "dxsdk_dir", "=", "dxsdk_dir", "return", "dxsdk_dir" ]
try to find an installation location for the directx sdk .
train
false
7,028
def _get_rabbitmq_plugin(): rabbitmq = __context__.get('rabbitmq-plugins') if (rabbitmq is None): version = __salt__['pkg.version']('rabbitmq-server').split('-')[0] rabbitmq = '/usr/lib/rabbitmq/lib/rabbitmq_server-{0}/sbin/rabbitmq-plugins'.format(version) __context__['rabbitmq-plugins'] = rabbitmq return rabbitmq
[ "def", "_get_rabbitmq_plugin", "(", ")", ":", "rabbitmq", "=", "__context__", ".", "get", "(", "'rabbitmq-plugins'", ")", "if", "(", "rabbitmq", "is", "None", ")", ":", "version", "=", "__salt__", "[", "'pkg.version'", "]", "(", "'rabbitmq-server'", ")", ".", "split", "(", "'-'", ")", "[", "0", "]", "rabbitmq", "=", "'/usr/lib/rabbitmq/lib/rabbitmq_server-{0}/sbin/rabbitmq-plugins'", ".", "format", "(", "version", ")", "__context__", "[", "'rabbitmq-plugins'", "]", "=", "rabbitmq", "return", "rabbitmq" ]
returns the rabbitmq-plugin command path if were running an os that doesnt put it in the standard /usr/bin or /usr/local/bin this works by taking the rabbitmq-server version and looking for where it seems to be hidden in /usr/lib .
train
false
7,030
def rs_square(p1, x, prec): R = p1.ring p = R.zero iv = R.gens.index(x) get = p.get items = list(p1.items()) items.sort(key=(lambda e: e[0][iv])) monomial_mul = R.monomial_mul for i in range(len(items)): (exp1, v1) = items[i] for j in range(i): (exp2, v2) = items[j] if ((exp1[iv] + exp2[iv]) < prec): exp = monomial_mul(exp1, exp2) p[exp] = (get(exp, 0) + (v1 * v2)) else: break p = p.imul_num(2) get = p.get for (expv, v) in p1.items(): if ((2 * expv[iv]) < prec): e2 = monomial_mul(expv, expv) p[e2] = (get(e2, 0) + (v ** 2)) p.strip_zero() return p
[ "def", "rs_square", "(", "p1", ",", "x", ",", "prec", ")", ":", "R", "=", "p1", ".", "ring", "p", "=", "R", ".", "zero", "iv", "=", "R", ".", "gens", ".", "index", "(", "x", ")", "get", "=", "p", ".", "get", "items", "=", "list", "(", "p1", ".", "items", "(", ")", ")", "items", ".", "sort", "(", "key", "=", "(", "lambda", "e", ":", "e", "[", "0", "]", "[", "iv", "]", ")", ")", "monomial_mul", "=", "R", ".", "monomial_mul", "for", "i", "in", "range", "(", "len", "(", "items", ")", ")", ":", "(", "exp1", ",", "v1", ")", "=", "items", "[", "i", "]", "for", "j", "in", "range", "(", "i", ")", ":", "(", "exp2", ",", "v2", ")", "=", "items", "[", "j", "]", "if", "(", "(", "exp1", "[", "iv", "]", "+", "exp2", "[", "iv", "]", ")", "<", "prec", ")", ":", "exp", "=", "monomial_mul", "(", "exp1", ",", "exp2", ")", "p", "[", "exp", "]", "=", "(", "get", "(", "exp", ",", "0", ")", "+", "(", "v1", "*", "v2", ")", ")", "else", ":", "break", "p", "=", "p", ".", "imul_num", "(", "2", ")", "get", "=", "p", ".", "get", "for", "(", "expv", ",", "v", ")", "in", "p1", ".", "items", "(", ")", ":", "if", "(", "(", "2", "*", "expv", "[", "iv", "]", ")", "<", "prec", ")", ":", "e2", "=", "monomial_mul", "(", "expv", ",", "expv", ")", "p", "[", "e2", "]", "=", "(", "get", "(", "e2", ",", "0", ")", "+", "(", "v", "**", "2", ")", ")", "p", ".", "strip_zero", "(", ")", "return", "p" ]
square the series modulo o examples .
train
false
7,031
def ParseLDIF(f, ignore_attrs=None, maxentries=0): ldif_parser = LDIFRecordList(f, ignored_attr_types=ignore_attrs, max_entries=maxentries, process_url_schemes=0) ldif_parser.parse() return ldif_parser.all_records
[ "def", "ParseLDIF", "(", "f", ",", "ignore_attrs", "=", "None", ",", "maxentries", "=", "0", ")", ":", "ldif_parser", "=", "LDIFRecordList", "(", "f", ",", "ignored_attr_types", "=", "ignore_attrs", ",", "max_entries", "=", "maxentries", ",", "process_url_schemes", "=", "0", ")", "ldif_parser", ".", "parse", "(", ")", "return", "ldif_parser", ".", "all_records" ]
parse ldif records read from file .
train
false
7,035
def build_nodegraph_args(descr=None, epilog=None, parser=None, citations=None): parser = build_graph_args(descr=descr, epilog=epilog, parser=parser, citations=citations) return parser
[ "def", "build_nodegraph_args", "(", "descr", "=", "None", ",", "epilog", "=", "None", ",", "parser", "=", "None", ",", "citations", "=", "None", ")", ":", "parser", "=", "build_graph_args", "(", "descr", "=", "descr", ",", "epilog", "=", "epilog", ",", "parser", "=", "parser", ",", "citations", "=", "citations", ")", "return", "parser" ]
build an argumentparser with args for nodegraph based scripts .
train
false
7,036
def test_set_interval_overflow(): t = usertypes.Timer() with pytest.raises(OverflowError): t.setInterval((2 ** 64))
[ "def", "test_set_interval_overflow", "(", ")", ":", "t", "=", "usertypes", ".", "Timer", "(", ")", "with", "pytest", ".", "raises", "(", "OverflowError", ")", ":", "t", ".", "setInterval", "(", "(", "2", "**", "64", ")", ")" ]
make sure setinterval raises overflowerror with very big numbers .
train
false
7,038
@pytest.yield_fixture(params=[None, tdata]) def temp_server_with_excfmt(request): data = request.param log_format_exc = (lambda tb: 'CUSTOM TRACEBACK') stream = StringIO() s = Server(copy(data), formats=all_formats, allow_add=True, logfile=stream, log_exception_formatter=log_format_exc) s.app.testing = True with s.app.test_client() as client: (yield (client, stream))
[ "@", "pytest", ".", "yield_fixture", "(", "params", "=", "[", "None", ",", "tdata", "]", ")", "def", "temp_server_with_excfmt", "(", "request", ")", ":", "data", "=", "request", ".", "param", "log_format_exc", "=", "(", "lambda", "tb", ":", "'CUSTOM TRACEBACK'", ")", "stream", "=", "StringIO", "(", ")", "s", "=", "Server", "(", "copy", "(", "data", ")", ",", "formats", "=", "all_formats", ",", "allow_add", "=", "True", ",", "logfile", "=", "stream", ",", "log_exception_formatter", "=", "log_format_exc", ")", "s", ".", "app", ".", "testing", "=", "True", "with", "s", ".", "app", ".", "test_client", "(", ")", "as", "client", ":", "(", "yield", "(", "client", ",", "stream", ")", ")" ]
with a custom log exception formatter .
train
false
7,039
def get_mapview_config(): namespace = 'ckanext.spatial.common_map.' return dict([(k.replace(namespace, ''), v) for (k, v) in config.iteritems() if k.startswith(namespace)])
[ "def", "get_mapview_config", "(", ")", ":", "namespace", "=", "'ckanext.spatial.common_map.'", "return", "dict", "(", "[", "(", "k", ".", "replace", "(", "namespace", ",", "''", ")", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "config", ".", "iteritems", "(", ")", "if", "k", ".", "startswith", "(", "namespace", ")", "]", ")" ]
extracts and returns map view configuration of the reclineview extension .
train
false
7,040
@_call_aside def _initialize_master_working_set(): working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script run_main = run_script dist = None for dist in working_set: dist.activate(replace=False) del dist add_activation_listener((lambda dist: dist.activate(replace=True)), existing=False) working_set.entries = [] list(map(working_set.add_entry, sys.path)) globals().update(locals())
[ "@", "_call_aside", "def", "_initialize_master_working_set", "(", ")", ":", "working_set", "=", "WorkingSet", ".", "_build_master", "(", ")", "_declare_state", "(", "'object'", ",", "working_set", "=", "working_set", ")", "require", "=", "working_set", ".", "require", "iter_entry_points", "=", "working_set", ".", "iter_entry_points", "add_activation_listener", "=", "working_set", ".", "subscribe", "run_script", "=", "working_set", ".", "run_script", "run_main", "=", "run_script", "dist", "=", "None", "for", "dist", "in", "working_set", ":", "dist", ".", "activate", "(", "replace", "=", "False", ")", "del", "dist", "add_activation_listener", "(", "(", "lambda", "dist", ":", "dist", ".", "activate", "(", "replace", "=", "True", ")", ")", ",", "existing", "=", "False", ")", "working_set", ".", "entries", "=", "[", "]", "list", "(", "map", "(", "working_set", ".", "add_entry", ",", "sys", ".", "path", ")", ")", "globals", "(", ")", ".", "update", "(", "locals", "(", ")", ")" ]
prepare the master working set and make the require() api available .
train
true
7,041
def _regexp_path(name, *names): return os.path.join(name, *names).replace('\\', '\\\\')
[ "def", "_regexp_path", "(", "name", ",", "*", "names", ")", ":", "return", "os", ".", "path", ".", "join", "(", "name", ",", "*", "names", ")", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")" ]
join two or more path components and create a regexp that will match that path .
train
false
7,044
def getGeometryOutputByManipulation(geometryOutput, xmlElement): xmlProcessor = xmlElement.getXMLProcessor() matchingPlugins = evaluate.getFromCreationEvaluatorPlugins(xmlProcessor.manipulationEvaluatorDictionary, xmlElement) matchingPlugins += evaluate.getMatchingPlugins(xmlProcessor.manipulationShapeDictionary, xmlElement) matchingPlugins.sort(evaluate.compareExecutionOrderAscending) for matchingPlugin in matchingPlugins: geometryOutput = matchingPlugin.getManipulatedGeometryOutput(geometryOutput, xmlElement) return geometryOutput
[ "def", "getGeometryOutputByManipulation", "(", "geometryOutput", ",", "xmlElement", ")", ":", "xmlProcessor", "=", "xmlElement", ".", "getXMLProcessor", "(", ")", "matchingPlugins", "=", "evaluate", ".", "getFromCreationEvaluatorPlugins", "(", "xmlProcessor", ".", "manipulationEvaluatorDictionary", ",", "xmlElement", ")", "matchingPlugins", "+=", "evaluate", ".", "getMatchingPlugins", "(", "xmlProcessor", ".", "manipulationShapeDictionary", ",", "xmlElement", ")", "matchingPlugins", ".", "sort", "(", "evaluate", ".", "compareExecutionOrderAscending", ")", "for", "matchingPlugin", "in", "matchingPlugins", ":", "geometryOutput", "=", "matchingPlugin", ".", "getManipulatedGeometryOutput", "(", "geometryOutput", ",", "xmlElement", ")", "return", "geometryOutput" ]
get geometry output by manipulation .
train
false
7,045
def apply_momentum(updates, params=None, momentum=0.9): if (params is None): params = updates.keys() updates = OrderedDict(updates) for param in params: value = param.get_value(borrow=True) velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable) x = ((momentum * velocity) + updates[param]) updates[velocity] = (x - param) updates[param] = x return updates
[ "def", "apply_momentum", "(", "updates", ",", "params", "=", "None", ",", "momentum", "=", "0.9", ")", ":", "if", "(", "params", "is", "None", ")", ":", "params", "=", "updates", ".", "keys", "(", ")", "updates", "=", "OrderedDict", "(", "updates", ")", "for", "param", "in", "params", ":", "value", "=", "param", ".", "get_value", "(", "borrow", "=", "True", ")", "velocity", "=", "theano", ".", "shared", "(", "np", ".", "zeros", "(", "value", ".", "shape", ",", "dtype", "=", "value", ".", "dtype", ")", ",", "broadcastable", "=", "param", ".", "broadcastable", ")", "x", "=", "(", "(", "momentum", "*", "velocity", ")", "+", "updates", "[", "param", "]", ")", "updates", "[", "velocity", "]", "=", "(", "x", "-", "param", ")", "updates", "[", "param", "]", "=", "x", "return", "updates" ]
returns a modified update dictionary including momentum generates update expressions of the form: * velocity := momentum * velocity + updates[param] - param * param := param + velocity parameters updates : ordereddict a dictionary mapping parameters to update expressions params : iterable of shared variables .
train
false
7,046
def SetupSimulator(ios): simulator_cmd = ('xcodebuild -workspace %s/clients/ios/ViewfinderWorkspace.xcworkspace -arch i386 -scheme Viewfinder -sdk iphonesimulator7.0' % os.environ['VF_HOME']) call(simulator_cmd, shell=True)
[ "def", "SetupSimulator", "(", "ios", ")", ":", "simulator_cmd", "=", "(", "'xcodebuild -workspace %s/clients/ios/ViewfinderWorkspace.xcworkspace -arch i386 -scheme Viewfinder -sdk iphonesimulator7.0'", "%", "os", ".", "environ", "[", "'VF_HOME'", "]", ")", "call", "(", "simulator_cmd", ",", "shell", "=", "True", ")" ]
build simulator app if necessary .
train
false
7,048
def transform_key(key, seed, rounds): cipher = AES.new(seed, AES.MODE_ECB) for n in range(0, rounds): key = cipher.encrypt(key) return sha256(key)
[ "def", "transform_key", "(", "key", ",", "seed", ",", "rounds", ")", ":", "cipher", "=", "AES", ".", "new", "(", "seed", ",", "AES", ".", "MODE_ECB", ")", "for", "n", "in", "range", "(", "0", ",", "rounds", ")", ":", "key", "=", "cipher", ".", "encrypt", "(", "key", ")", "return", "sha256", "(", "key", ")" ]
transform key with seed rounds times using aes ecb .
train
false
7,049
@flake8ext def check_python3_no_filter(logical_line): msg = 'N343: Use list comprehension instead of filter(lambda obj: test(obj), data) on python3.' if filter_match.match(logical_line): (yield (0, msg))
[ "@", "flake8ext", "def", "check_python3_no_filter", "(", "logical_line", ")", ":", "msg", "=", "'N343: Use list comprehension instead of filter(lambda obj: test(obj), data) on python3.'", "if", "filter_match", ".", "match", "(", "logical_line", ")", ":", "(", "yield", "(", "0", ",", "msg", ")", ")" ]
n344 - use list comprehension instead of filter .
train
false
7,052
def parse_xmlsec_output(output): for line in output.split('\n'): if (line == 'OK'): return True elif (line == 'FAIL'): raise XmlsecError(output) raise XmlsecError(output)
[ "def", "parse_xmlsec_output", "(", "output", ")", ":", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "if", "(", "line", "==", "'OK'", ")", ":", "return", "True", "elif", "(", "line", "==", "'FAIL'", ")", ":", "raise", "XmlsecError", "(", "output", ")", "raise", "XmlsecError", "(", "output", ")" ]
parse the output from xmlsec to try to find out if the command was successfull or not .
train
false
7,053
def get_user_role(user, course_key): role = get_masquerade_role(user, course_key) if role: return role elif has_access(user, 'instructor', course_key): return 'instructor' elif has_access(user, 'staff', course_key): return 'staff' else: return 'student'
[ "def", "get_user_role", "(", "user", ",", "course_key", ")", ":", "role", "=", "get_masquerade_role", "(", "user", ",", "course_key", ")", "if", "role", ":", "return", "role", "elif", "has_access", "(", "user", ",", "'instructor'", ",", "course_key", ")", ":", "return", "'instructor'", "elif", "has_access", "(", "user", ",", "'staff'", ",", "course_key", ")", ":", "return", "'staff'", "else", ":", "return", "'student'" ]
what type of access: staff or instructor does this user have in studio? no code should use this for access control .
train
false
7,054
def _DoOneTry(function, args, kwargs): try: result = function(*args, **kwargs) except: original_exception = sys.exc_info() try: _GetConnection().rollback() except Exception: logging.exception('Exception sending Rollback:') (type, value, trace) = original_exception if isinstance(value, datastore_errors.Rollback): return (True, None) else: raise type, value, trace else: if _GetConnection().commit(): return (True, result) else: logging.warning('Transaction collision. Retrying... %s', '') return (False, None)
[ "def", "_DoOneTry", "(", "function", ",", "args", ",", "kwargs", ")", ":", "try", ":", "result", "=", "function", "(", "*", "args", ",", "**", "kwargs", ")", "except", ":", "original_exception", "=", "sys", ".", "exc_info", "(", ")", "try", ":", "_GetConnection", "(", ")", ".", "rollback", "(", ")", "except", "Exception", ":", "logging", ".", "exception", "(", "'Exception sending Rollback:'", ")", "(", "type", ",", "value", ",", "trace", ")", "=", "original_exception", "if", "isinstance", "(", "value", ",", "datastore_errors", ".", "Rollback", ")", ":", "return", "(", "True", ",", "None", ")", "else", ":", "raise", "type", ",", "value", ",", "trace", "else", ":", "if", "_GetConnection", "(", ")", ".", "commit", "(", ")", ":", "return", "(", "True", ",", "result", ")", "else", ":", "logging", ".", "warning", "(", "'Transaction collision. Retrying... %s'", ",", "''", ")", "return", "(", "False", ",", "None", ")" ]
helper to call a function in a transaction .
train
false
7,055
def dirents(buf): d = [] while buf: try: ent = linux_dirent(buf) except ValueError: break d.append(ent.d_name) buf = buf[len(ent):] return sorted(d)
[ "def", "dirents", "(", "buf", ")", ":", "d", "=", "[", "]", "while", "buf", ":", "try", ":", "ent", "=", "linux_dirent", "(", "buf", ")", "except", "ValueError", ":", "break", "d", ".", "append", "(", "ent", ".", "d_name", ")", "buf", "=", "buf", "[", "len", "(", "ent", ")", ":", "]", "return", "sorted", "(", "d", ")" ]
unpack_dents -> list extracts data from a buffer emitted by getdents() arguments: buf: byte array returns: a list of filenames .
train
false
7,057
def encode_multipart(boundary, data): lines = [] for (key, value) in data.items(): if isinstance(value, file): lines.extend([('--' + boundary), ('Content-Disposition: form-data; name="%s"' % key), '', ('--' + boundary), ('Content-Disposition: form-data; name="%s_file"; filename="%s"' % (key, value.name)), 'Content-Type: application/octet-stream', '', value.read()]) elif hasattr(value, '__iter__'): for item in value: lines.extend([('--' + boundary), ('Content-Disposition: form-data; name="%s"' % key), '', str(item)]) else: lines.extend([('--' + boundary), ('Content-Disposition: form-data; name="%s"' % key), '', str(value)]) lines.extend([(('--' + boundary) + '--'), '']) return '\r\n'.join(lines)
[ "def", "encode_multipart", "(", "boundary", ",", "data", ")", ":", "lines", "=", "[", "]", "for", "(", "key", ",", "value", ")", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "file", ")", ":", "lines", ".", "extend", "(", "[", "(", "'--'", "+", "boundary", ")", ",", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "key", ")", ",", "''", ",", "(", "'--'", "+", "boundary", ")", ",", "(", "'Content-Disposition: form-data; name=\"%s_file\"; filename=\"%s\"'", "%", "(", "key", ",", "value", ".", "name", ")", ")", ",", "'Content-Type: application/octet-stream'", ",", "''", ",", "value", ".", "read", "(", ")", "]", ")", "elif", "hasattr", "(", "value", ",", "'__iter__'", ")", ":", "for", "item", "in", "value", ":", "lines", ".", "extend", "(", "[", "(", "'--'", "+", "boundary", ")", ",", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "key", ")", ",", "''", ",", "str", "(", "item", ")", "]", ")", "else", ":", "lines", ".", "extend", "(", "[", "(", "'--'", "+", "boundary", ")", ",", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "key", ")", ",", "''", ",", "str", "(", "value", ")", "]", ")", "lines", ".", "extend", "(", "[", "(", "(", "'--'", "+", "boundary", ")", "+", "'--'", ")", ",", "''", "]", ")", "return", "'\\r\\n'", ".", "join", "(", "lines", ")" ]
like stream_encode_multipart but returns a tuple in the form where data is a bytestring .
train
false
7,058
def page_list(request, slug, template_name='groups/pages/page_list.html'): group = get_object_or_404(Group, slug=slug) return render(request, template_name, {'group': group, 'page_list': group.pages.all()})
[ "def", "page_list", "(", "request", ",", "slug", ",", "template_name", "=", "'groups/pages/page_list.html'", ")", ":", "group", "=", "get_object_or_404", "(", "Group", ",", "slug", "=", "slug", ")", "return", "render", "(", "request", ",", "template_name", ",", "{", "'group'", ":", "group", ",", "'page_list'", ":", "group", ".", "pages", ".", "all", "(", ")", "}", ")" ]
returns a list of pages for a group .
train
false
7,059
def test_launch_about_app(): client = logged_in_client() client.click(jquery='("img.hue-swoosh")[0]') client.waits.forElement(classname='Hue-ABOUT', timeout='2000')
[ "def", "test_launch_about_app", "(", ")", ":", "client", "=", "logged_in_client", "(", ")", "client", ".", "click", "(", "jquery", "=", "'(\"img.hue-swoosh\")[0]'", ")", "client", ".", "waits", ".", "forElement", "(", "classname", "=", "'Hue-ABOUT'", ",", "timeout", "=", "'2000'", ")" ]
launches the about app .
train
false
7,060
def flattop(M, sym=True): if _len_guards(M): return np.ones(M) (M, needs_trunc) = _extend(M, sym) a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368] w = _cos_win(M, a) return _truncate(w, needs_trunc)
[ "def", "flattop", "(", "M", ",", "sym", "=", "True", ")", ":", "if", "_len_guards", "(", "M", ")", ":", "return", "np", ".", "ones", "(", "M", ")", "(", "M", ",", "needs_trunc", ")", "=", "_extend", "(", "M", ",", "sym", ")", "a", "=", "[", "0.21557895", ",", "0.41663158", ",", "0.277263158", ",", "0.083578947", ",", "0.006947368", "]", "w", "=", "_cos_win", "(", "M", ",", "a", ")", "return", "_truncate", "(", "w", ",", "needs_trunc", ")" ]
return a flat top window .
train
false
7,061
def _is_xfce(): try: return _readfrom((_get_x11_vars() + 'xprop -root _DT_SAVE_MODE'), shell=1).strip().endswith(' = "xfce4"') except OSError: return 0
[ "def", "_is_xfce", "(", ")", ":", "try", ":", "return", "_readfrom", "(", "(", "_get_x11_vars", "(", ")", "+", "'xprop -root _DT_SAVE_MODE'", ")", ",", "shell", "=", "1", ")", ".", "strip", "(", ")", ".", "endswith", "(", "' = \"xfce4\"'", ")", "except", "OSError", ":", "return", "0" ]
return whether xfce is in use .
train
false
7,063
def prctile(x, p=(0.0, 25.0, 50.0, 75.0, 100.0)): def _interpolate(a, b, fraction): u"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n " return (a + ((b - a) * fraction)) per = np.array(p) values = np.sort(x, axis=None) idxs = ((per / 100) * (values.shape[0] - 1)) ai = idxs.astype(int) bi = (ai + 1) frac = (idxs % 1) cond = (bi >= len(values)) if per.ndim: ai[cond] -= 1 bi[cond] -= 1 frac[cond] += 1 elif cond: ai -= 1 bi -= 1 frac += 1 return _interpolate(values[ai], values[bi], frac)
[ "def", "prctile", "(", "x", ",", "p", "=", "(", "0.0", ",", "25.0", ",", "50.0", ",", "75.0", ",", "100.0", ")", ")", ":", "def", "_interpolate", "(", "a", ",", "b", ",", "fraction", ")", ":", "return", "(", "a", "+", "(", "(", "b", "-", "a", ")", "*", "fraction", ")", ")", "per", "=", "np", ".", "array", "(", "p", ")", "values", "=", "np", ".", "sort", "(", "x", ",", "axis", "=", "None", ")", "idxs", "=", "(", "(", "per", "/", "100", ")", "*", "(", "values", ".", "shape", "[", "0", "]", "-", "1", ")", ")", "ai", "=", "idxs", ".", "astype", "(", "int", ")", "bi", "=", "(", "ai", "+", "1", ")", "frac", "=", "(", "idxs", "%", "1", ")", "cond", "=", "(", "bi", ">=", "len", "(", "values", ")", ")", "if", "per", ".", "ndim", ":", "ai", "[", "cond", "]", "-=", "1", "bi", "[", "cond", "]", "-=", "1", "frac", "[", "cond", "]", "+=", "1", "elif", "cond", ":", "ai", "-=", "1", "bi", "-=", "1", "frac", "+=", "1", "return", "_interpolate", "(", "values", "[", "ai", "]", ",", "values", "[", "bi", "]", ",", "frac", ")" ]
return the percentiles of *x* .
train
false
7,065
def parse_synonyms(text): errors = [] synonyms = set() for (i, line) in enumerate(text.split('\n'), 1): line = line.strip() if (not line): continue count = line.count('=>') if (count < 1): errors.append(('Syntax error on line %d: No => found.' % i)) elif (count > 1): errors.append(('Syntax error on line %d: Too many => found.' % i)) else: (from_words, to_words) = [s.strip() for s in line.split('=>')] synonyms.add((from_words, to_words)) if errors: raise SynonymParseError(errors) else: return synonyms
[ "def", "parse_synonyms", "(", "text", ")", ":", "errors", "=", "[", "]", "synonyms", "=", "set", "(", ")", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "text", ".", "split", "(", "'\\n'", ")", ",", "1", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "(", "not", "line", ")", ":", "continue", "count", "=", "line", ".", "count", "(", "'=>'", ")", "if", "(", "count", "<", "1", ")", ":", "errors", ".", "append", "(", "(", "'Syntax error on line %d: No => found.'", "%", "i", ")", ")", "elif", "(", "count", ">", "1", ")", ":", "errors", ".", "append", "(", "(", "'Syntax error on line %d: Too many => found.'", "%", "i", ")", ")", "else", ":", "(", "from_words", ",", "to_words", ")", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "line", ".", "split", "(", "'=>'", ")", "]", "synonyms", ".", "add", "(", "(", "from_words", ",", "to_words", ")", ")", "if", "errors", ":", "raise", "SynonymParseError", "(", "errors", ")", "else", ":", "return", "synonyms" ]
parse synonyms from user entered text .
train
false
7,066
def build_product_order(arg, gens): gens2idx = {} for (i, g) in enumerate(gens): gens2idx[g] = i order = [] for expr in arg: name = expr[0] var = expr[1:] def makelambda(var): return _ItemGetter((gens2idx[g] for g in var)) order.append((monomial_key(name), makelambda(var))) return ProductOrder(*order)
[ "def", "build_product_order", "(", "arg", ",", "gens", ")", ":", "gens2idx", "=", "{", "}", "for", "(", "i", ",", "g", ")", "in", "enumerate", "(", "gens", ")", ":", "gens2idx", "[", "g", "]", "=", "i", "order", "=", "[", "]", "for", "expr", "in", "arg", ":", "name", "=", "expr", "[", "0", "]", "var", "=", "expr", "[", "1", ":", "]", "def", "makelambda", "(", "var", ")", ":", "return", "_ItemGetter", "(", "(", "gens2idx", "[", "g", "]", "for", "g", "in", "var", ")", ")", "order", ".", "append", "(", "(", "monomial_key", "(", "name", ")", ",", "makelambda", "(", "var", ")", ")", ")", "return", "ProductOrder", "(", "*", "order", ")" ]
build a monomial order on gens .
train
false
7,068
def rws(t): for c in ['\n', ' DCTB ', ' ']: t = t.replace(c, '') return t
[ "def", "rws", "(", "t", ")", ":", "for", "c", "in", "[", "'\\n'", ",", "' DCTB '", ",", "' '", "]", ":", "t", "=", "t", ".", "replace", "(", "c", ",", "''", ")", "return", "t" ]
remove white spaces .
train
false
7,070
def discretize_integrate_2D(model, x_range, y_range): from scipy.integrate import dblquad x = np.arange((x_range[0] - 0.5), (x_range[1] + 0.5)) y = np.arange((y_range[0] - 0.5), (y_range[1] + 0.5)) values = np.empty(((y.size - 1), (x.size - 1))) for i in range((x.size - 1)): for j in range((y.size - 1)): values[(j, i)] = dblquad(model, x[i], x[(i + 1)], (lambda x: y[j]), (lambda x: y[(j + 1)]))[0] return values
[ "def", "discretize_integrate_2D", "(", "model", ",", "x_range", ",", "y_range", ")", ":", "from", "scipy", ".", "integrate", "import", "dblquad", "x", "=", "np", ".", "arange", "(", "(", "x_range", "[", "0", "]", "-", "0.5", ")", ",", "(", "x_range", "[", "1", "]", "+", "0.5", ")", ")", "y", "=", "np", ".", "arange", "(", "(", "y_range", "[", "0", "]", "-", "0.5", ")", ",", "(", "y_range", "[", "1", "]", "+", "0.5", ")", ")", "values", "=", "np", ".", "empty", "(", "(", "(", "y", ".", "size", "-", "1", ")", ",", "(", "x", ".", "size", "-", "1", ")", ")", ")", "for", "i", "in", "range", "(", "(", "x", ".", "size", "-", "1", ")", ")", ":", "for", "j", "in", "range", "(", "(", "y", ".", "size", "-", "1", ")", ")", ":", "values", "[", "(", "j", ",", "i", ")", "]", "=", "dblquad", "(", "model", ",", "x", "[", "i", "]", ",", "x", "[", "(", "i", "+", "1", ")", "]", ",", "(", "lambda", "x", ":", "y", "[", "j", "]", ")", ",", "(", "lambda", "x", ":", "y", "[", "(", "j", "+", "1", ")", "]", ")", ")", "[", "0", "]", "return", "values" ]
discretize model by integrating the model over the pixel .
train
false
7,071
def is_ancestor_name(frame, node): try: bases = frame.bases except AttributeError: return False for base in bases: if (node in base.nodes_of_class(astroid.Name)): return True return False
[ "def", "is_ancestor_name", "(", "frame", ",", "node", ")", ":", "try", ":", "bases", "=", "frame", ".", "bases", "except", "AttributeError", ":", "return", "False", "for", "base", "in", "bases", ":", "if", "(", "node", "in", "base", ".", "nodes_of_class", "(", "astroid", ".", "Name", ")", ")", ":", "return", "True", "return", "False" ]
return true if frame is a astroid .
train
false
7,072
@set_database def setup_content_paths(context, db): context.unavailable_content_path = 'khan/foo/bar/unavail' context.available_content_path = get_random_content(kinds=['Exercise'], available=True)[0]['path'] def iterator_content_items(ids=None, channel='khan', language='en'): return [(context.available_content_path, {'available': True})] annotate_content_models(db=db, iterator_content_items=iterator_content_items) with Using(db, [Item], with_transaction=False): context._unavailable_item = Item.create(title='Unavailable item', description='baz', available=False, kind='Video', id='3', slug='unavail', path=context.unavailable_content_path)
[ "@", "set_database", "def", "setup_content_paths", "(", "context", ",", "db", ")", ":", "context", ".", "unavailable_content_path", "=", "'khan/foo/bar/unavail'", "context", ".", "available_content_path", "=", "get_random_content", "(", "kinds", "=", "[", "'Exercise'", "]", ",", "available", "=", "True", ")", "[", "0", "]", "[", "'path'", "]", "def", "iterator_content_items", "(", "ids", "=", "None", ",", "channel", "=", "'khan'", ",", "language", "=", "'en'", ")", ":", "return", "[", "(", "context", ".", "available_content_path", ",", "{", "'available'", ":", "True", "}", ")", "]", "annotate_content_models", "(", "db", "=", "db", ",", "iterator_content_items", "=", "iterator_content_items", ")", "with", "Using", "(", "db", ",", "[", "Item", "]", ",", "with_transaction", "=", "False", ")", ":", "context", ".", "_unavailable_item", "=", "Item", ".", "create", "(", "title", "=", "'Unavailable item'", ",", "description", "=", "'baz'", ",", "available", "=", "False", ",", "kind", "=", "'Video'", ",", "id", "=", "'3'", ",", "slug", "=", "'unavail'", ",", "path", "=", "context", ".", "unavailable_content_path", ")" ]
creaters available content items and adds their urls to the context object .
train
false
7,073
def is_peering_connection_pending(conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None): if (not _exactly_one((conn_id, conn_name))): raise SaltInvocationError('Exactly one of conn_id or conn_name must be provided.') conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) if conn_id: vpcs = conn.describe_vpc_peering_connections(VpcPeeringConnectionIds=[conn_id]).get('VpcPeeringConnections', []) else: filters = [{'Name': 'tag:Name', 'Values': [conn_name]}, {'Name': 'status-code', 'Values': [ACTIVE, PENDING_ACCEPTANCE, PROVISIONING]}] vpcs = conn.describe_vpc_peering_connections(Filters=filters).get('VpcPeeringConnections', []) if (not vpcs): return False elif (len(vpcs) > 1): raise SaltInvocationError('Found more than one ID for the VPC peering connection ({0}). Please call this function with an ID instead.'.format((conn_id or conn_name))) else: status = vpcs[0]['Status']['Code'] return (status == PENDING_ACCEPTANCE)
[ "def", "is_peering_connection_pending", "(", "conn_id", "=", "None", ",", "conn_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "(", "not", "_exactly_one", "(", "(", "conn_id", ",", "conn_name", ")", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Exactly one of conn_id or conn_name must be provided.'", ")", "conn", "=", "_get_conn3", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "conn_id", ":", "vpcs", "=", "conn", ".", "describe_vpc_peering_connections", "(", "VpcPeeringConnectionIds", "=", "[", "conn_id", "]", ")", ".", "get", "(", "'VpcPeeringConnections'", ",", "[", "]", ")", "else", ":", "filters", "=", "[", "{", "'Name'", ":", "'tag:Name'", ",", "'Values'", ":", "[", "conn_name", "]", "}", ",", "{", "'Name'", ":", "'status-code'", ",", "'Values'", ":", "[", "ACTIVE", ",", "PENDING_ACCEPTANCE", ",", "PROVISIONING", "]", "}", "]", "vpcs", "=", "conn", ".", "describe_vpc_peering_connections", "(", "Filters", "=", "filters", ")", ".", "get", "(", "'VpcPeeringConnections'", ",", "[", "]", ")", "if", "(", "not", "vpcs", ")", ":", "return", "False", "elif", "(", "len", "(", "vpcs", ")", ">", "1", ")", ":", "raise", "SaltInvocationError", "(", "'Found more than one ID for the VPC peering connection ({0}). Please call this function with an ID instead.'", ".", "format", "(", "(", "conn_id", "or", "conn_name", ")", ")", ")", "else", ":", "status", "=", "vpcs", "[", "0", "]", "[", "'Status'", "]", "[", "'Code'", "]", "return", "(", "status", "==", "PENDING_ACCEPTANCE", ")" ]
check if a vpc peering connection is in the pending state .
train
true
7,074
def pythagoras(zcontext, url): zsock = zcontext.socket(zmq.REP) zsock.bind(url) while True: numbers = zsock.recv_json() zsock.send_json(sum(((n * n) for n in numbers)))
[ "def", "pythagoras", "(", "zcontext", ",", "url", ")", ":", "zsock", "=", "zcontext", ".", "socket", "(", "zmq", ".", "REP", ")", "zsock", ".", "bind", "(", "url", ")", "while", "True", ":", "numbers", "=", "zsock", ".", "recv_json", "(", ")", "zsock", ".", "send_json", "(", "sum", "(", "(", "(", "n", "*", "n", ")", "for", "n", "in", "numbers", ")", ")", ")" ]
return the sum-of-squares of number sequences .
train
false
7,075
def test_steradian(): assert u.sr.is_equivalent((u.rad * u.rad)) results = u.sr.compose(units=u.cgs.bases) assert (results[0].bases[0] is u.rad) results = u.sr.compose(units=u.cgs.__dict__) assert (results[0].bases[0] is u.sr)
[ "def", "test_steradian", "(", ")", ":", "assert", "u", ".", "sr", ".", "is_equivalent", "(", "(", "u", ".", "rad", "*", "u", ".", "rad", ")", ")", "results", "=", "u", ".", "sr", ".", "compose", "(", "units", "=", "u", ".", "cgs", ".", "bases", ")", "assert", "(", "results", "[", "0", "]", ".", "bases", "[", "0", "]", "is", "u", ".", "rad", ")", "results", "=", "u", ".", "sr", ".", "compose", "(", "units", "=", "u", ".", "cgs", ".", "__dict__", ")", "assert", "(", "results", "[", "0", "]", ".", "bases", "[", "0", "]", "is", "u", ".", "sr", ")" ]
issue #599 .
train
false
7,076
def load_from_library(library, label, names): subset = Library() for name in names: found = False if (name in library.tags): found = True subset.tags[name] = library.tags[name] if (name in library.filters): found = True subset.filters[name] = library.filters[name] if (found is False): raise TemplateSyntaxError(("'%s' is not a valid tag or filter in tag library '%s'" % (name, label))) return subset
[ "def", "load_from_library", "(", "library", ",", "label", ",", "names", ")", ":", "subset", "=", "Library", "(", ")", "for", "name", "in", "names", ":", "found", "=", "False", "if", "(", "name", "in", "library", ".", "tags", ")", ":", "found", "=", "True", "subset", ".", "tags", "[", "name", "]", "=", "library", ".", "tags", "[", "name", "]", "if", "(", "name", "in", "library", ".", "filters", ")", ":", "found", "=", "True", "subset", ".", "filters", "[", "name", "]", "=", "library", ".", "filters", "[", "name", "]", "if", "(", "found", "is", "False", ")", ":", "raise", "TemplateSyntaxError", "(", "(", "\"'%s' is not a valid tag or filter in tag library '%s'\"", "%", "(", "name", ",", "label", ")", ")", ")", "return", "subset" ]
return a subset of tags and filters from a library .
train
false
7,077
def join_rows(rows, joiner=' '): rows = list(rows) fixed_row = rows[0][:] for row in rows[1:]: if (len(row) == 0): row = [''] fixed_row[(-1)] += ('%s%s' % (joiner, row[0])) fixed_row.extend(row[1:]) return fixed_row
[ "def", "join_rows", "(", "rows", ",", "joiner", "=", "' '", ")", ":", "rows", "=", "list", "(", "rows", ")", "fixed_row", "=", "rows", "[", "0", "]", "[", ":", "]", "for", "row", "in", "rows", "[", "1", ":", "]", ":", "if", "(", "len", "(", "row", ")", "==", "0", ")", ":", "row", "=", "[", "''", "]", "fixed_row", "[", "(", "-", "1", ")", "]", "+=", "(", "'%s%s'", "%", "(", "joiner", ",", "row", "[", "0", "]", ")", ")", "fixed_row", ".", "extend", "(", "row", "[", "1", ":", "]", ")", "return", "fixed_row" ]
given a series of rows .
train
false
7,078
def if_current_page(context, link_url, positive=True, negative=False): page = context['__CACTUS_CURRENT_PAGE__'] return (positive if (page.link_url == link_url) else negative)
[ "def", "if_current_page", "(", "context", ",", "link_url", ",", "positive", "=", "True", ",", "negative", "=", "False", ")", ":", "page", "=", "context", "[", "'__CACTUS_CURRENT_PAGE__'", "]", "return", "(", "positive", "if", "(", "page", ".", "link_url", "==", "link_url", ")", "else", "negative", ")" ]
return one of the passed parameters if the url passed is the current one .
train
false
7,079
def _parse_jid(data): match = JID_PATTERN.match(data) if (not match): raise InvalidJID(u'JID could not be parsed') (node, domain, resource) = match.groups() node = _validate_node(node) domain = _validate_domain(domain) resource = _validate_resource(resource) return (node, domain, resource)
[ "def", "_parse_jid", "(", "data", ")", ":", "match", "=", "JID_PATTERN", ".", "match", "(", "data", ")", "if", "(", "not", "match", ")", ":", "raise", "InvalidJID", "(", "u'JID could not be parsed'", ")", "(", "node", ",", "domain", ",", "resource", ")", "=", "match", ".", "groups", "(", ")", "node", "=", "_validate_node", "(", "node", ")", "domain", "=", "_validate_domain", "(", "domain", ")", "resource", "=", "_validate_resource", "(", "resource", ")", "return", "(", "node", ",", "domain", ",", "resource", ")" ]
parse string data into the node .
train
false
7,080
def trainExtraTrees(features, n_estimators): [X, Y] = listOfFeatures2Matrix(features) et = sklearn.ensemble.ExtraTreesClassifier(n_estimators=n_estimators) et.fit(X, Y) return et
[ "def", "trainExtraTrees", "(", "features", ",", "n_estimators", ")", ":", "[", "X", ",", "Y", "]", "=", "listOfFeatures2Matrix", "(", "features", ")", "et", "=", "sklearn", ".", "ensemble", ".", "ExtraTreesClassifier", "(", "n_estimators", "=", "n_estimators", ")", "et", ".", "fit", "(", "X", ",", "Y", ")", "return", "et" ]
train a gradient boosting classifier note: this function is simply a wrapper to the sklearn functionality for extra tree classifiers see function trainsvm_feature() to use a wrapper on both the feature extraction and the svm training processes .
train
false
7,081
@pytest.mark.django_db def test_multiselect_inactive_users_and_contacts(rf, regular_user): view = MultiselectAjaxView.as_view() assert ('joe' in regular_user.username) results = _get_search_results(rf, view, 'auth.User', 'joe') assert (len(results) == 1) assert (results[0].get('id') == regular_user.id) assert (results[0].get('name') == regular_user.username) contact = PersonContact.objects.create(first_name='Joe', last_name='Somebody') results = _get_search_results(rf, view, 'shuup.PersonContact', 'joe') assert (len(results) == 1) assert (results[0].get('id') == contact.id) assert (results[0].get('name') == contact.name) contact.is_active = False contact.save() results = _get_search_results(rf, view, 'shuup.PersonContact', 'joe') assert (len(results) == 0)
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_multiselect_inactive_users_and_contacts", "(", "rf", ",", "regular_user", ")", ":", "view", "=", "MultiselectAjaxView", ".", "as_view", "(", ")", "assert", "(", "'joe'", "in", "regular_user", ".", "username", ")", "results", "=", "_get_search_results", "(", "rf", ",", "view", ",", "'auth.User'", ",", "'joe'", ")", "assert", "(", "len", "(", "results", ")", "==", "1", ")", "assert", "(", "results", "[", "0", "]", ".", "get", "(", "'id'", ")", "==", "regular_user", ".", "id", ")", "assert", "(", "results", "[", "0", "]", ".", "get", "(", "'name'", ")", "==", "regular_user", ".", "username", ")", "contact", "=", "PersonContact", ".", "objects", ".", "create", "(", "first_name", "=", "'Joe'", ",", "last_name", "=", "'Somebody'", ")", "results", "=", "_get_search_results", "(", "rf", ",", "view", ",", "'shuup.PersonContact'", ",", "'joe'", ")", "assert", "(", "len", "(", "results", ")", "==", "1", ")", "assert", "(", "results", "[", "0", "]", ".", "get", "(", "'id'", ")", "==", "contact", ".", "id", ")", "assert", "(", "results", "[", "0", "]", ".", "get", "(", "'name'", ")", "==", "contact", ".", "name", ")", "contact", ".", "is_active", "=", "False", "contact", ".", "save", "(", ")", "results", "=", "_get_search_results", "(", "rf", ",", "view", ",", "'shuup.PersonContact'", ",", "'joe'", ")", "assert", "(", "len", "(", "results", ")", "==", "0", ")" ]
make sure inactive users and contacts are filtered from search results .
train
false
7,082
def get_user_settings(user_id, strict=False): user_settings = get_users_settings([user_id])[0] if (strict and (user_settings is None)): logging.error(('Could not find user with id %s' % user_id)) raise Exception('User not found.') return user_settings
[ "def", "get_user_settings", "(", "user_id", ",", "strict", "=", "False", ")", ":", "user_settings", "=", "get_users_settings", "(", "[", "user_id", "]", ")", "[", "0", "]", "if", "(", "strict", "and", "(", "user_settings", "is", "None", ")", ")", ":", "logging", ".", "error", "(", "(", "'Could not find user with id %s'", "%", "user_id", ")", ")", "raise", "Exception", "(", "'User not found.'", ")", "return", "user_settings" ]
return the user settings for a single user .
train
false
7,083
def in_test_phase(x, alt): if (learning_phase() is 1): return alt elif (learning_phase() is 0): return x x = switch(learning_phase(), alt, x) x._uses_learning_phase = True return x
[ "def", "in_test_phase", "(", "x", ",", "alt", ")", ":", "if", "(", "learning_phase", "(", ")", "is", "1", ")", ":", "return", "alt", "elif", "(", "learning_phase", "(", ")", "is", "0", ")", ":", "return", "x", "x", "=", "switch", "(", "learning_phase", "(", ")", ",", "alt", ",", "x", ")", "x", ".", "_uses_learning_phase", "=", "True", "return", "x" ]
selects x in test phase .
train
false
7,084
def get_path_if_valid(pathstr, cwd=None, relative=False, check_exists=False): pathstr = pathstr.strip() log.url.debug('Checking if {!r} is a path'.format(pathstr)) expanded = os.path.expanduser(pathstr) if os.path.isabs(expanded): path = expanded elif (relative and cwd): path = os.path.join(cwd, expanded) elif relative: try: path = os.path.abspath(expanded) except OSError: path = None else: path = None if check_exists: if ((path is not None) and os.path.exists(path)): log.url.debug('URL is a local file') else: path = None return path
[ "def", "get_path_if_valid", "(", "pathstr", ",", "cwd", "=", "None", ",", "relative", "=", "False", ",", "check_exists", "=", "False", ")", ":", "pathstr", "=", "pathstr", ".", "strip", "(", ")", "log", ".", "url", ".", "debug", "(", "'Checking if {!r} is a path'", ".", "format", "(", "pathstr", ")", ")", "expanded", "=", "os", ".", "path", ".", "expanduser", "(", "pathstr", ")", "if", "os", ".", "path", ".", "isabs", "(", "expanded", ")", ":", "path", "=", "expanded", "elif", "(", "relative", "and", "cwd", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "expanded", ")", "elif", "relative", ":", "try", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "expanded", ")", "except", "OSError", ":", "path", "=", "None", "else", ":", "path", "=", "None", "if", "check_exists", ":", "if", "(", "(", "path", "is", "not", "None", ")", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ")", ":", "log", ".", "url", ".", "debug", "(", "'URL is a local file'", ")", "else", ":", "path", "=", "None", "return", "path" ]
check if path is a valid path .
train
false
7,086
def add_masquerade(zone=None, permanent=True): if zone: cmd = '--zone={0} --add-masquerade'.format(zone) else: cmd = '--add-masquerade' if permanent: cmd += ' --permanent' return __firewall_cmd(cmd)
[ "def", "add_masquerade", "(", "zone", "=", "None", ",", "permanent", "=", "True", ")", ":", "if", "zone", ":", "cmd", "=", "'--zone={0} --add-masquerade'", ".", "format", "(", "zone", ")", "else", ":", "cmd", "=", "'--add-masquerade'", "if", "permanent", ":", "cmd", "+=", "' --permanent'", "return", "__firewall_cmd", "(", "cmd", ")" ]
enable masquerade on a zone .
train
true
7,087
def fmatch_iter(needle, haystack, min_ratio=0.6): for key in haystack: ratio = SequenceMatcher(None, needle, key).ratio() if (ratio >= min_ratio): (yield (ratio, key))
[ "def", "fmatch_iter", "(", "needle", ",", "haystack", ",", "min_ratio", "=", "0.6", ")", ":", "for", "key", "in", "haystack", ":", "ratio", "=", "SequenceMatcher", "(", "None", ",", "needle", ",", "key", ")", ".", "ratio", "(", ")", "if", "(", "ratio", ">=", "min_ratio", ")", ":", "(", "yield", "(", "ratio", ",", "key", ")", ")" ]
fuzzy match: iteratively .
train
false
7,088
def decode_smtp_header(smtp_header): if smtp_header: text = decode_header(smtp_header.replace('\r', '')) return ' '.join([ustr(x[0], x[1]) for x in text]) return u''
[ "def", "decode_smtp_header", "(", "smtp_header", ")", ":", "if", "smtp_header", ":", "text", "=", "decode_header", "(", "smtp_header", ".", "replace", "(", "'\\r'", ",", "''", ")", ")", "return", "' '", ".", "join", "(", "[", "ustr", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "text", "]", ")", "return", "u''" ]
returns unicode() string conversion of the given encoded smtp header text .
train
false
7,089
def revert_property(committer_id, name): config_property = config_domain.Registry.get_config_property(name) if (config_property is None): raise Exception('No config property with name %s found.') set_property(committer_id, name, config_property.default_value)
[ "def", "revert_property", "(", "committer_id", ",", "name", ")", ":", "config_property", "=", "config_domain", ".", "Registry", ".", "get_config_property", "(", "name", ")", "if", "(", "config_property", "is", "None", ")", ":", "raise", "Exception", "(", "'No config property with name %s found.'", ")", "set_property", "(", "committer_id", ",", "name", ",", "config_property", ".", "default_value", ")" ]
reverts a property value to the default value .
train
false
7,090
def _find_all_simple(path): results = (os.path.join(base, file) for (base, dirs, files) in os.walk(path, followlinks=True) for file in files) return filter(os.path.isfile, results)
[ "def", "_find_all_simple", "(", "path", ")", ":", "results", "=", "(", "os", ".", "path", ".", "join", "(", "base", ",", "file", ")", "for", "(", "base", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "path", ",", "followlinks", "=", "True", ")", "for", "file", "in", "files", ")", "return", "filter", "(", "os", ".", "path", ".", "isfile", ",", "results", ")" ]
find all files under path .
train
true
7,091
def mocked_exception(*args, **kwargs): raise OSError
[ "def", "mocked_exception", "(", "*", "args", ",", "**", "kwargs", ")", ":", "raise", "OSError" ]
mock exception thrown by requests .
train
false
7,092
def test_template_complex(): schema = vol.Schema(cv.template_complex) for value in (None, '{{ partial_print }', '{% if True %}Hello'): with pytest.raises(vol.MultipleInvalid): schema(value) options = (1, 'Hello', '{{ beer }}', '{% if 1 == 1 %}Hello{% else %}World{% endif %}', {'test': 1, 'test2': '{{ beer }}'}, ['{{ beer }}', 1]) for value in options: schema(value)
[ "def", "test_template_complex", "(", ")", ":", "schema", "=", "vol", ".", "Schema", "(", "cv", ".", "template_complex", ")", "for", "value", "in", "(", "None", ",", "'{{ partial_print }'", ",", "'{% if True %}Hello'", ")", ":", "with", "pytest", ".", "raises", "(", "vol", ".", "MultipleInvalid", ")", ":", "schema", "(", "value", ")", "options", "=", "(", "1", ",", "'Hello'", ",", "'{{ beer }}'", ",", "'{% if 1 == 1 %}Hello{% else %}World{% endif %}'", ",", "{", "'test'", ":", "1", ",", "'test2'", ":", "'{{ beer }}'", "}", ",", "[", "'{{ beer }}'", ",", "1", "]", ")", "for", "value", "in", "options", ":", "schema", "(", "value", ")" ]
test template_complex validator .
train
false
7,094
def validate_url_path(val): if (not validate_str()(val)): return False return (VALIDATE_PATH_RE.match(val).end() == len(val))
[ "def", "validate_url_path", "(", "val", ")", ":", "if", "(", "not", "validate_str", "(", ")", "(", "val", ")", ")", ":", "return", "False", "return", "(", "VALIDATE_PATH_RE", ".", "match", "(", "val", ")", ".", "end", "(", ")", "==", "len", "(", "val", ")", ")" ]
true if val is matched by the path component grammar in rfc3986 .
train
false
7,095
def require_target_existed(targets): if (not targets['list']): msg = output_log(MSG.NO_CONNECTED_TARGET) raise exception.VSPError(msg)
[ "def", "require_target_existed", "(", "targets", ")", ":", "if", "(", "not", "targets", "[", "'list'", "]", ")", ":", "msg", "=", "output_log", "(", "MSG", ".", "NO_CONNECTED_TARGET", ")", "raise", "exception", ".", "VSPError", "(", "msg", ")" ]
check if the target list includes one or more members .
train
false
7,096
def cinder_except_format_assert(logical_line): if logical_line.startswith('self.assertRaises(Exception'): (yield (1, 'CINDER N202: assertRaises Exception too broad'))
[ "def", "cinder_except_format_assert", "(", "logical_line", ")", ":", "if", "logical_line", ".", "startswith", "(", "'self.assertRaises(Exception'", ")", ":", "(", "yield", "(", "1", ",", "'CINDER N202: assertRaises Exception too broad'", ")", ")" ]
check for assertraises(exception .
train
false
7,099
def sorting_dates(start, advertised_start, announcement): try: start = dateutil.parser.parse(advertised_start) if (start.tzinfo is None): start = start.replace(tzinfo=utc) except (ValueError, AttributeError): start = start now = datetime.now(utc) return (announcement, start, now)
[ "def", "sorting_dates", "(", "start", ",", "advertised_start", ",", "announcement", ")", ":", "try", ":", "start", "=", "dateutil", ".", "parser", ".", "parse", "(", "advertised_start", ")", "if", "(", "start", ".", "tzinfo", "is", "None", ")", ":", "start", "=", "start", ".", "replace", "(", "tzinfo", "=", "utc", ")", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "start", "=", "start", "now", "=", "datetime", ".", "now", "(", "utc", ")", "return", "(", "announcement", ",", "start", ",", "now", ")" ]
utility function to get datetime objects for dates used to compute the is_new flag and the sorting_score .
train
false
7,100
def squared_clustering_errors(inputs, k): clusterer = KMeans(k) clusterer.train(inputs) means = clusterer.means assignments = map(clusterer.classify, inputs) return sum((squared_distance(input, means[cluster]) for (input, cluster) in zip(inputs, assignments)))
[ "def", "squared_clustering_errors", "(", "inputs", ",", "k", ")", ":", "clusterer", "=", "KMeans", "(", "k", ")", "clusterer", ".", "train", "(", "inputs", ")", "means", "=", "clusterer", ".", "means", "assignments", "=", "map", "(", "clusterer", ".", "classify", ",", "inputs", ")", "return", "sum", "(", "(", "squared_distance", "(", "input", ",", "means", "[", "cluster", "]", ")", "for", "(", "input", ",", "cluster", ")", "in", "zip", "(", "inputs", ",", "assignments", ")", ")", ")" ]
finds the total squared error from k-means clustering the inputs .
train
false
7,102
def int_validator(value, context): if (value is None): return None if (hasattr(value, 'strip') and (not value.strip())): return None try: (whole, part) = divmod(value, 1) except TypeError: try: return int(value) except ValueError: pass else: if (not part): try: return int(whole) except TypeError: pass raise Invalid(_('Invalid integer'))
[ "def", "int_validator", "(", "value", ",", "context", ")", ":", "if", "(", "value", "is", "None", ")", ":", "return", "None", "if", "(", "hasattr", "(", "value", ",", "'strip'", ")", "and", "(", "not", "value", ".", "strip", "(", ")", ")", ")", ":", "return", "None", "try", ":", "(", "whole", ",", "part", ")", "=", "divmod", "(", "value", ",", "1", ")", "except", "TypeError", ":", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "else", ":", "if", "(", "not", "part", ")", ":", "try", ":", "return", "int", "(", "whole", ")", "except", "TypeError", ":", "pass", "raise", "Invalid", "(", "_", "(", "'Invalid integer'", ")", ")" ]
return an integer for value .
train
false
7,103
def traverse_depthfirst(obj, opts, visitors): return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
[ "def", "traverse_depthfirst", "(", "obj", ",", "opts", ",", "visitors", ")", ":", "return", "traverse_using", "(", "iterate_depthfirst", "(", "obj", ",", "opts", ")", ",", "obj", ",", "visitors", ")" ]
traverse and visit the given expression structure using the depth-first iterator .
train
false
7,104
def _insert_object_resp(bucket=None, name=None, data=None): assert (type(data) is bytes) hasher = hashlib.md5() hasher.update(data) md5_hex_hash = hasher.hexdigest() return {u'bucket': bucket, u'name': name, u'md5Hash': _hex_to_base64(md5_hex_hash), u'timeCreated': _datetime_to_gcptime(), u'size': str(len(data)), u'_data': data}
[ "def", "_insert_object_resp", "(", "bucket", "=", "None", ",", "name", "=", "None", ",", "data", "=", "None", ")", ":", "assert", "(", "type", "(", "data", ")", "is", "bytes", ")", "hasher", "=", "hashlib", ".", "md5", "(", ")", "hasher", ".", "update", "(", "data", ")", "md5_hex_hash", "=", "hasher", ".", "hexdigest", "(", ")", "return", "{", "u'bucket'", ":", "bucket", ",", "u'name'", ":", "name", ",", "u'md5Hash'", ":", "_hex_to_base64", "(", "md5_hex_hash", ")", ",", "u'timeCreated'", ":", "_datetime_to_gcptime", "(", ")", ",", "u'size'", ":", "str", "(", "len", "(", "data", ")", ")", ",", "u'_data'", ":", "data", "}" ]
fake gcs object metadata .
train
false
7,105
def create_xml_attributes(module, xml): xml_attrs = {} for (attr, val) in xml.attrib.iteritems(): if (attr not in module.fields): if (attr == 'parent_sequential_url'): attr = 'parent_url' xml_attrs[attr] = val module.xml_attributes = xml_attrs
[ "def", "create_xml_attributes", "(", "module", ",", "xml", ")", ":", "xml_attrs", "=", "{", "}", "for", "(", "attr", ",", "val", ")", "in", "xml", ".", "attrib", ".", "iteritems", "(", ")", ":", "if", "(", "attr", "not", "in", "module", ".", "fields", ")", ":", "if", "(", "attr", "==", "'parent_sequential_url'", ")", ":", "attr", "=", "'parent_url'", "xml_attrs", "[", "attr", "]", "=", "val", "module", ".", "xml_attributes", "=", "xml_attrs" ]
make up for modules which dont define xml_attributes by creating them here and populating .
train
false
7,106
def to_numpy_code(code): code = code.lower() if (code is None): return native_code if (code in aliases['little']): return '<' elif (code in aliases['big']): return '>' elif (code in aliases['native']): return native_code elif (code in aliases['swapped']): return swapped_code else: raise ValueError(('We cannot handle byte order %s' % code))
[ "def", "to_numpy_code", "(", "code", ")", ":", "code", "=", "code", ".", "lower", "(", ")", "if", "(", "code", "is", "None", ")", ":", "return", "native_code", "if", "(", "code", "in", "aliases", "[", "'little'", "]", ")", ":", "return", "'<'", "elif", "(", "code", "in", "aliases", "[", "'big'", "]", ")", ":", "return", "'>'", "elif", "(", "code", "in", "aliases", "[", "'native'", "]", ")", ":", "return", "native_code", "elif", "(", "code", "in", "aliases", "[", "'swapped'", "]", ")", ":", "return", "swapped_code", "else", ":", "raise", "ValueError", "(", "(", "'We cannot handle byte order %s'", "%", "code", ")", ")" ]
convert various order codings to numpy format .
train
false
7,107
def get_qiime_library_version(): qiime_dir = get_qiime_project_dir() qiime_version = qiime_library_version sha_cmd = ('git --git-dir %s/.git rev-parse HEAD' % qiime_dir) (sha_o, sha_e, sha_r) = qiime_system_call(sha_cmd) git_sha = sha_o.strip() branch_cmd = ('git --git-dir %s/.git rev-parse --abbrev-ref HEAD' % qiime_dir) (branch_o, branch_e, branch_r) = qiime_system_call(branch_cmd) git_branch = branch_o.strip() if (is_valid_git_refname(git_branch) and is_valid_git_sha1(git_sha)): return ('%s, %s@%s' % (__version__, git_branch, git_sha[0:7])) else: return ('%s' % __version__)
[ "def", "get_qiime_library_version", "(", ")", ":", "qiime_dir", "=", "get_qiime_project_dir", "(", ")", "qiime_version", "=", "qiime_library_version", "sha_cmd", "=", "(", "'git --git-dir %s/.git rev-parse HEAD'", "%", "qiime_dir", ")", "(", "sha_o", ",", "sha_e", ",", "sha_r", ")", "=", "qiime_system_call", "(", "sha_cmd", ")", "git_sha", "=", "sha_o", ".", "strip", "(", ")", "branch_cmd", "=", "(", "'git --git-dir %s/.git rev-parse --abbrev-ref HEAD'", "%", "qiime_dir", ")", "(", "branch_o", ",", "branch_e", ",", "branch_r", ")", "=", "qiime_system_call", "(", "branch_cmd", ")", "git_branch", "=", "branch_o", ".", "strip", "(", ")", "if", "(", "is_valid_git_refname", "(", "git_branch", ")", "and", "is_valid_git_sha1", "(", "git_sha", ")", ")", ":", "return", "(", "'%s, %s@%s'", "%", "(", "__version__", ",", "git_branch", ",", "git_sha", "[", "0", ":", "7", "]", ")", ")", "else", ":", "return", "(", "'%s'", "%", "__version__", ")" ]
get qiime version and the git sha + current branch .
train
false
7,108
def rar_sort(a, b): aext = a.split('.')[(-1)] bext = b.split('.')[(-1)] if ((aext == 'rar') and (bext == 'rar')): return cmp(a, b) elif (aext == 'rar'): return (-1) elif (bext == 'rar'): return 1 else: return cmp(a, b)
[ "def", "rar_sort", "(", "a", ",", "b", ")", ":", "aext", "=", "a", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", "bext", "=", "b", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", "if", "(", "(", "aext", "==", "'rar'", ")", "and", "(", "bext", "==", "'rar'", ")", ")", ":", "return", "cmp", "(", "a", ",", "b", ")", "elif", "(", "aext", "==", "'rar'", ")", ":", "return", "(", "-", "1", ")", "elif", "(", "bext", "==", "'rar'", ")", ":", "return", "1", "else", ":", "return", "cmp", "(", "a", ",", "b", ")" ]
define sort method for rar file names .
train
false
7,109
def date_format(date=None, format='%Y-%m-%d'): return date_cast(date).strftime(format)
[ "def", "date_format", "(", "date", "=", "None", ",", "format", "=", "'%Y-%m-%d'", ")", ":", "return", "date_cast", "(", "date", ")", ".", "strftime", "(", "format", ")" ]
format dates parameters format: date format using standard strftime format .
train
false
7,112
def test_pickling(): el = EarthLocation((0.0 * u.m), (6000 * u.km), (6000 * u.km)) s = pickle.dumps(el) el2 = pickle.loads(s) assert (el == el2)
[ "def", "test_pickling", "(", ")", ":", "el", "=", "EarthLocation", "(", "(", "0.0", "*", "u", ".", "m", ")", ",", "(", "6000", "*", "u", ".", "km", ")", ",", "(", "6000", "*", "u", ".", "km", ")", ")", "s", "=", "pickle", ".", "dumps", "(", "el", ")", "el2", "=", "pickle", ".", "loads", "(", "s", ")", "assert", "(", "el", "==", "el2", ")" ]
regression test against #4304 .
train
false
7,113
def dmp_zz_mignotte_bound(f, u, K): a = dmp_max_norm(f, u, K) b = abs(dmp_ground_LC(f, u, K)) n = sum(dmp_degree_list(f, u)) return (((K.sqrt(K((n + 1))) * (2 ** n)) * a) * b)
[ "def", "dmp_zz_mignotte_bound", "(", "f", ",", "u", ",", "K", ")", ":", "a", "=", "dmp_max_norm", "(", "f", ",", "u", ",", "K", ")", "b", "=", "abs", "(", "dmp_ground_LC", "(", "f", ",", "u", ",", "K", ")", ")", "n", "=", "sum", "(", "dmp_degree_list", "(", "f", ",", "u", ")", ")", "return", "(", "(", "(", "K", ".", "sqrt", "(", "K", "(", "(", "n", "+", "1", ")", ")", ")", "*", "(", "2", "**", "n", ")", ")", "*", "a", ")", "*", "b", ")" ]
mignotte bound for multivariate polynomials in k[x] .
train
false
7,117
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) @utils.arg('address', metavar='<address>', help=_('IP Address.')) def do_floating_ip_disassociate(cs, args): _disassociate_floating_ip(cs, args)
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "@", "utils", ".", "arg", "(", "'address'", ",", "metavar", "=", "'<address>'", ",", "help", "=", "_", "(", "'IP Address.'", ")", ")", "def", "do_floating_ip_disassociate", "(", "cs", ",", "args", ")", ":", "_disassociate_floating_ip", "(", "cs", ",", "args", ")" ]
disassociate a floating ip address from a server .
train
false
7,118
@register.filter def format_name(user, format='first_last'): last_name = (getattr(user, 'last_name', None) or user.get('last_name', None)) first_name = (getattr(user, 'first_name', None) or user.get('first_name', None)) username = (getattr(user, 'username', None) or user.get('username', None)) if (format == 'first_last'): if (last_name and first_name): return ('%s %s' % (first_name, last_name)) else: return (first_name or last_name or username) elif (format == 'last_first'): if (last_name and first_name): return ('%s, %s' % (last_name, first_name)) else: return (last_name or first_name or username) else: raise NotImplementedError(('Unrecognized format string: %s' % format))
[ "@", "register", ".", "filter", "def", "format_name", "(", "user", ",", "format", "=", "'first_last'", ")", ":", "last_name", "=", "(", "getattr", "(", "user", ",", "'last_name'", ",", "None", ")", "or", "user", ".", "get", "(", "'last_name'", ",", "None", ")", ")", "first_name", "=", "(", "getattr", "(", "user", ",", "'first_name'", ",", "None", ")", "or", "user", ".", "get", "(", "'first_name'", ",", "None", ")", ")", "username", "=", "(", "getattr", "(", "user", ",", "'username'", ",", "None", ")", "or", "user", ".", "get", "(", "'username'", ",", "None", ")", ")", "if", "(", "format", "==", "'first_last'", ")", ":", "if", "(", "last_name", "and", "first_name", ")", ":", "return", "(", "'%s %s'", "%", "(", "first_name", ",", "last_name", ")", ")", "else", ":", "return", "(", "first_name", "or", "last_name", "or", "username", ")", "elif", "(", "format", "==", "'last_first'", ")", ":", "if", "(", "last_name", "and", "first_name", ")", ":", "return", "(", "'%s, %s'", "%", "(", "last_name", ",", "first_name", ")", ")", "else", ":", "return", "(", "last_name", "or", "first_name", "or", "username", ")", "else", ":", "raise", "NotImplementedError", "(", "(", "'Unrecognized format string: %s'", "%", "format", ")", ")" ]
can be used for objects or dictionaries .
train
false
7,119
def trim_emerge_default_opts(value): return trim_var('EMERGE_DEFAULT_OPTS', value)
[ "def", "trim_emerge_default_opts", "(", "value", ")", ":", "return", "trim_var", "(", "'EMERGE_DEFAULT_OPTS'", ",", "value", ")" ]
remove a value from emerge_default_opts variable in the make .
train
false
7,121
def test_choices(context_file, default_context, extra_context): expected_context = {u'choices_template': OrderedDict([(u'full_name', u'Raphael Pierzina'), (u'github_username', u'hackebrot'), (u'project_name', u'Kivy Project'), (u'repo_name', u'{{cookiecutter.project_name|lower}}'), (u'orientation', [u'landscape', u'all', u'portrait'])])} generated_context = generate.generate_context(context_file, default_context, extra_context) assert (generated_context == expected_context)
[ "def", "test_choices", "(", "context_file", ",", "default_context", ",", "extra_context", ")", ":", "expected_context", "=", "{", "u'choices_template'", ":", "OrderedDict", "(", "[", "(", "u'full_name'", ",", "u'Raphael Pierzina'", ")", ",", "(", "u'github_username'", ",", "u'hackebrot'", ")", ",", "(", "u'project_name'", ",", "u'Kivy Project'", ")", ",", "(", "u'repo_name'", ",", "u'{{cookiecutter.project_name|lower}}'", ")", ",", "(", "u'orientation'", ",", "[", "u'landscape'", ",", "u'all'", ",", "u'portrait'", "]", ")", "]", ")", "}", "generated_context", "=", "generate", ".", "generate_context", "(", "context_file", ",", "default_context", ",", "extra_context", ")", "assert", "(", "generated_context", "==", "expected_context", ")" ]
make sure that the default for list variables is based on the user config and the list as such is not changed to a single value .
train
false
7,122
def create_download_tasks(subtitles_by_video, languages, multi): tasks = [] for (video, subtitles) in subtitles_by_video.iteritems(): if (not subtitles): continue if (not multi): task = DownloadTask(video, list(subtitles)) logger.debug((u'Created task %r' % task)) tasks.append(task) continue for (_, by_language) in groupby(subtitles, (lambda s: languages.index(s.language))): task = DownloadTask(video, list(by_language)) logger.debug((u'Created task %r' % task)) tasks.append(task) return tasks
[ "def", "create_download_tasks", "(", "subtitles_by_video", ",", "languages", ",", "multi", ")", ":", "tasks", "=", "[", "]", "for", "(", "video", ",", "subtitles", ")", "in", "subtitles_by_video", ".", "iteritems", "(", ")", ":", "if", "(", "not", "subtitles", ")", ":", "continue", "if", "(", "not", "multi", ")", ":", "task", "=", "DownloadTask", "(", "video", ",", "list", "(", "subtitles", ")", ")", "logger", ".", "debug", "(", "(", "u'Created task %r'", "%", "task", ")", ")", "tasks", ".", "append", "(", "task", ")", "continue", "for", "(", "_", ",", "by_language", ")", "in", "groupby", "(", "subtitles", ",", "(", "lambda", "s", ":", "languages", ".", "index", "(", "s", ".", "language", ")", ")", ")", ":", "task", "=", "DownloadTask", "(", "video", ",", "list", "(", "by_language", ")", ")", "logger", ".", "debug", "(", "(", "u'Created task %r'", "%", "task", ")", ")", "tasks", ".", "append", "(", "task", ")", "return", "tasks" ]
create a list of :class:~subliminal .
train
false
7,124
def _finish_auth_url(params): return u'{}?{}'.format(reverse('finish_auth'), urllib.urlencode(params))
[ "def", "_finish_auth_url", "(", "params", ")", ":", "return", "u'{}?{}'", ".", "format", "(", "reverse", "(", "'finish_auth'", ")", ",", "urllib", ".", "urlencode", "(", "params", ")", ")" ]
construct the url that follows login/registration if we are doing auto-enrollment .
train
false
7,125
def HttpServer(): return GlobalProcess().HttpServer()
[ "def", "HttpServer", "(", ")", ":", "return", "GlobalProcess", "(", ")", ".", "HttpServer", "(", ")" ]
returns the httpserver used by this process .
train
false
7,126
def list_privileges(name, **client_args): client = _client(**client_args) res = {} for item in client.get_list_privileges(name): res[item['database']] = item['privilege'].split()[0].lower() return res
[ "def", "list_privileges", "(", "name", ",", "**", "client_args", ")", ":", "client", "=", "_client", "(", "**", "client_args", ")", "res", "=", "{", "}", "for", "item", "in", "client", ".", "get_list_privileges", "(", "name", ")", ":", "res", "[", "item", "[", "'database'", "]", "]", "=", "item", "[", "'privilege'", "]", ".", "split", "(", ")", "[", "0", "]", ".", "lower", "(", ")", "return", "res" ]
list privileges from a user .
train
true
7,127
def check_lapack_return(context, builder, res): with builder.if_then(cgutils.is_not_null(builder, res), likely=False): pyapi = context.get_python_api(builder) pyapi.gil_ensure() pyapi.fatal_error('LAPACK wrapper returned with an error')
[ "def", "check_lapack_return", "(", "context", ",", "builder", ",", "res", ")", ":", "with", "builder", ".", "if_then", "(", "cgutils", ".", "is_not_null", "(", "builder", ",", "res", ")", ",", "likely", "=", "False", ")", ":", "pyapi", "=", "context", ".", "get_python_api", "(", "builder", ")", "pyapi", ".", "gil_ensure", "(", ")", "pyapi", ".", "fatal_error", "(", "'LAPACK wrapper returned with an error'", ")" ]
check the integer error return from one of the lapack wrappers in _helperlib .
train
false
7,130
def resolve_environment(service_dict, environment=None): env = {} for env_file in service_dict.get(u'env_file', []): env.update(env_vars_from_file(env_file)) env.update(parse_environment(service_dict.get(u'environment'))) return dict((resolve_env_var(k, v, environment) for (k, v) in six.iteritems(env)))
[ "def", "resolve_environment", "(", "service_dict", ",", "environment", "=", "None", ")", ":", "env", "=", "{", "}", "for", "env_file", "in", "service_dict", ".", "get", "(", "u'env_file'", ",", "[", "]", ")", ":", "env", ".", "update", "(", "env_vars_from_file", "(", "env_file", ")", ")", "env", ".", "update", "(", "parse_environment", "(", "service_dict", ".", "get", "(", "u'environment'", ")", ")", ")", "return", "dict", "(", "(", "resolve_env_var", "(", "k", ",", "v", ",", "environment", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "env", ")", ")", ")" ]
unpack any environment variables from an env_file .
train
false
7,131
def empty_asset_trashcan(course_locs): store = contentstore('trashcan') for course_loc in course_locs: thumbs = store.get_all_content_thumbnails_for_course(course_loc) for thumb in thumbs: print 'Deleting {0}...'.format(thumb) store.delete(thumb['_id']) (assets, __) = store.get_all_content_for_course(course_loc) for asset in assets: print 'Deleting {0}...'.format(asset) store.delete(asset['_id'])
[ "def", "empty_asset_trashcan", "(", "course_locs", ")", ":", "store", "=", "contentstore", "(", "'trashcan'", ")", "for", "course_loc", "in", "course_locs", ":", "thumbs", "=", "store", ".", "get_all_content_thumbnails_for_course", "(", "course_loc", ")", "for", "thumb", "in", "thumbs", ":", "print", "'Deleting {0}...'", ".", "format", "(", "thumb", ")", "store", ".", "delete", "(", "thumb", "[", "'_id'", "]", ")", "(", "assets", ",", "__", ")", "=", "store", ".", "get_all_content_for_course", "(", "course_loc", ")", "for", "asset", "in", "assets", ":", "print", "'Deleting {0}...'", ".", "format", "(", "asset", ")", "store", ".", "delete", "(", "asset", "[", "'_id'", "]", ")" ]
this method will hard delete all assets from the trashcan .
train
false
7,132
def create_base_network(input_dim): seq = Sequential() seq.add(Dense(128, input_shape=(input_dim,), activation='relu')) seq.add(Dropout(0.1)) seq.add(Dense(128, activation='relu')) seq.add(Dropout(0.1)) seq.add(Dense(128, activation='relu')) return seq
[ "def", "create_base_network", "(", "input_dim", ")", ":", "seq", "=", "Sequential", "(", ")", "seq", ".", "add", "(", "Dense", "(", "128", ",", "input_shape", "=", "(", "input_dim", ",", ")", ",", "activation", "=", "'relu'", ")", ")", "seq", ".", "add", "(", "Dropout", "(", "0.1", ")", ")", "seq", ".", "add", "(", "Dense", "(", "128", ",", "activation", "=", "'relu'", ")", ")", "seq", ".", "add", "(", "Dropout", "(", "0.1", ")", ")", "seq", ".", "add", "(", "Dense", "(", "128", ",", "activation", "=", "'relu'", ")", ")", "return", "seq" ]
base network to be shared .
train
false
7,133
def find_files_with_cmd(dirname='.'): try: proc = subprocess.Popen(['hg', 'locate', '-I', os.path.abspath(dirname)], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=dirname) (stdout, stderr) = proc.communicate() except: return [] return stdout.splitlines()
[ "def", "find_files_with_cmd", "(", "dirname", "=", "'.'", ")", ":", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "'hg'", ",", "'locate'", ",", "'-I'", ",", "os", ".", "path", ".", "abspath", "(", "dirname", ")", "]", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "cwd", "=", "dirname", ")", "(", "stdout", ",", "stderr", ")", "=", "proc", ".", "communicate", "(", ")", "except", ":", "return", "[", "]", "return", "stdout", ".", "splitlines", "(", ")" ]
use the hg command to recursively find versioned files in dirname .
train
false
7,134
@protocol.commands.add(u'consume', state=protocol.BOOL) def consume(context, state): context.core.tracklist.set_consume(state)
[ "@", "protocol", ".", "commands", ".", "add", "(", "u'consume'", ",", "state", "=", "protocol", ".", "BOOL", ")", "def", "consume", "(", "context", ",", "state", ")", ":", "context", ".", "core", ".", "tracklist", ".", "set_consume", "(", "state", ")" ]
advance the iterator n-steps ahead .
train
false
7,136
def get_ignored_traceback(tb): tb_list = [] while tb: tb_list.append(tb) tb = tb.tb_next ignored_tracebacks = [] for tb in reversed(tb_list): if ('__unittest' in tb.tb_frame.f_globals): ignored_tracebacks.append(tb) else: break if ignored_tracebacks: return ignored_tracebacks[(-1)]
[ "def", "get_ignored_traceback", "(", "tb", ")", ":", "tb_list", "=", "[", "]", "while", "tb", ":", "tb_list", ".", "append", "(", "tb", ")", "tb", "=", "tb", ".", "tb_next", "ignored_tracebacks", "=", "[", "]", "for", "tb", "in", "reversed", "(", "tb_list", ")", ":", "if", "(", "'__unittest'", "in", "tb", ".", "tb_frame", ".", "f_globals", ")", ":", "ignored_tracebacks", ".", "append", "(", "tb", ")", "else", ":", "break", "if", "ignored_tracebacks", ":", "return", "ignored_tracebacks", "[", "(", "-", "1", ")", "]" ]
retrieve the first traceback of an ignored trailing chain .
train
false
7,137
def locale_identifiers(): return [stem for (stem, extension) in [os.path.splitext(filename) for filename in os.listdir(_dirname)] if ((extension == '.dat') and (stem != 'root'))]
[ "def", "locale_identifiers", "(", ")", ":", "return", "[", "stem", "for", "(", "stem", ",", "extension", ")", "in", "[", "os", ".", "path", ".", "splitext", "(", "filename", ")", "for", "filename", "in", "os", ".", "listdir", "(", "_dirname", ")", "]", "if", "(", "(", "extension", "==", "'.dat'", ")", "and", "(", "stem", "!=", "'root'", ")", ")", "]" ]
return a list of all locale identifiers for which locale data is available .
train
false