id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
3,221
def use_bulk_ops(view_func): @wraps(view_func) def wrapped_view(request, course_id, *args, **kwargs): course_key = CourseKey.from_string(course_id) with modulestore().bulk_operations(course_key): return view_func(request, course_key, *args, **kwargs) return wrapped_view
[ "def", "use_bulk_ops", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "wrapped_view", "(", "request", ",", "course_id", ",", "*", "args", ",", "**", "kwargs", ")", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "with", "modulestore", "(", ")", ".", "bulk_operations", "(", "course_key", ")", ":", "return", "view_func", "(", "request", ",", "course_key", ",", "*", "args", ",", "**", "kwargs", ")", "return", "wrapped_view" ]
wraps internal request handling inside a modulestore bulk op .
train
false
3,222
def cbranch_or_continue(builder, cond, bbtrue): bbcont = builder.append_basic_block('.continue') builder.cbranch(cond, bbtrue, bbcont) builder.position_at_end(bbcont) return bbcont
[ "def", "cbranch_or_continue", "(", "builder", ",", "cond", ",", "bbtrue", ")", ":", "bbcont", "=", "builder", ".", "append_basic_block", "(", "'.continue'", ")", "builder", ".", "cbranch", "(", "cond", ",", "bbtrue", ",", "bbcont", ")", "builder", ".", "position_at_end", "(", "bbcont", ")", "return", "bbcont" ]
branch conditionally or continue .
train
false
3,223
def track_info(recording, index=None, medium=None, medium_index=None, medium_total=None): info = beets.autotag.hooks.TrackInfo(recording['title'], recording['id'], index=index, medium=medium, medium_index=medium_index, medium_total=medium_total, data_source=u'MusicBrainz', data_url=track_url(recording['id'])) if recording.get('artist-credit'): (info.artist, info.artist_sort, info.artist_credit) = _flatten_artist_credit(recording['artist-credit']) artist = recording['artist-credit'][0]['artist'] info.artist_id = artist['id'] if recording.get('length'): info.length = (int(recording['length']) / 1000.0) lyricist = [] composer = [] for work_relation in recording.get('work-relation-list', ()): if (work_relation['type'] != 'performance'): continue for artist_relation in work_relation['work'].get('artist-relation-list', ()): if ('type' in artist_relation): type = artist_relation['type'] if (type == 'lyricist'): lyricist.append(artist_relation['artist']['name']) elif (type == 'composer'): composer.append(artist_relation['artist']['name']) if lyricist: info.lyricist = u', '.join(lyricist) if composer: info.composer = u', '.join(composer) arranger = [] for artist_relation in recording.get('artist-relation-list', ()): if ('type' in artist_relation): type = artist_relation['type'] if (type == 'arranger'): arranger.append(artist_relation['artist']['name']) if arranger: info.arranger = u', '.join(arranger) info.decode() return info
[ "def", "track_info", "(", "recording", ",", "index", "=", "None", ",", "medium", "=", "None", ",", "medium_index", "=", "None", ",", "medium_total", "=", "None", ")", ":", "info", "=", "beets", ".", "autotag", ".", "hooks", ".", "TrackInfo", "(", "recording", "[", "'title'", "]", ",", "recording", "[", "'id'", "]", ",", "index", "=", "index", ",", "medium", "=", "medium", ",", "medium_index", "=", "medium_index", ",", "medium_total", "=", "medium_total", ",", "data_source", "=", "u'MusicBrainz'", ",", "data_url", "=", "track_url", "(", "recording", "[", "'id'", "]", ")", ")", "if", "recording", ".", "get", "(", "'artist-credit'", ")", ":", "(", "info", ".", "artist", ",", "info", ".", "artist_sort", ",", "info", ".", "artist_credit", ")", "=", "_flatten_artist_credit", "(", "recording", "[", "'artist-credit'", "]", ")", "artist", "=", "recording", "[", "'artist-credit'", "]", "[", "0", "]", "[", "'artist'", "]", "info", ".", "artist_id", "=", "artist", "[", "'id'", "]", "if", "recording", ".", "get", "(", "'length'", ")", ":", "info", ".", "length", "=", "(", "int", "(", "recording", "[", "'length'", "]", ")", "/", "1000.0", ")", "lyricist", "=", "[", "]", "composer", "=", "[", "]", "for", "work_relation", "in", "recording", ".", "get", "(", "'work-relation-list'", ",", "(", ")", ")", ":", "if", "(", "work_relation", "[", "'type'", "]", "!=", "'performance'", ")", ":", "continue", "for", "artist_relation", "in", "work_relation", "[", "'work'", "]", ".", "get", "(", "'artist-relation-list'", ",", "(", ")", ")", ":", "if", "(", "'type'", "in", "artist_relation", ")", ":", "type", "=", "artist_relation", "[", "'type'", "]", "if", "(", "type", "==", "'lyricist'", ")", ":", "lyricist", ".", "append", "(", "artist_relation", "[", "'artist'", "]", "[", "'name'", "]", ")", "elif", "(", "type", "==", "'composer'", ")", ":", "composer", ".", "append", "(", "artist_relation", "[", "'artist'", "]", "[", "'name'", "]", ")", "if", "lyricist", ":", "info", ".", "lyricist", "=", "u', '", ".", "join", "(", "lyricist", ")", "if", "composer", ":", "info", ".", "composer", "=", "u', '", ".", "join", "(", "composer", ")", "arranger", "=", "[", "]", "for", "artist_relation", "in", "recording", ".", "get", "(", "'artist-relation-list'", ",", "(", ")", ")", ":", "if", "(", "'type'", "in", "artist_relation", ")", ":", "type", "=", "artist_relation", "[", "'type'", "]", "if", "(", "type", "==", "'arranger'", ")", ":", "arranger", ".", "append", "(", "artist_relation", "[", "'artist'", "]", "[", "'name'", "]", ")", "if", "arranger", ":", "info", ".", "arranger", "=", "u', '", ".", "join", "(", "arranger", ")", "info", ".", "decode", "(", ")", "return", "info" ]
translates a musicbrainz recording result dictionary into a beets trackinfo object .
train
false
3,224
def user_num_documents(user): return Document.objects.filter(revisions__creator=user).exclude(html__startswith='<p>REDIRECT <a').distinct().count()
[ "def", "user_num_documents", "(", "user", ")", ":", "return", "Document", ".", "objects", ".", "filter", "(", "revisions__creator", "=", "user", ")", ".", "exclude", "(", "html__startswith", "=", "'<p>REDIRECT <a'", ")", ".", "distinct", "(", ")", ".", "count", "(", ")" ]
count the number of documents a user has contributed to .
train
false
3,225
def remote_interpreter(conn, namespace=None): if (namespace is None): namespace = {'conn': conn} std = RedirectedStd(conn) try: std.redirect() conn.modules[__name__]._remote_interpreter_server_side(**namespace) finally: std.restore()
[ "def", "remote_interpreter", "(", "conn", ",", "namespace", "=", "None", ")", ":", "if", "(", "namespace", "is", "None", ")", ":", "namespace", "=", "{", "'conn'", ":", "conn", "}", "std", "=", "RedirectedStd", "(", "conn", ")", "try", ":", "std", ".", "redirect", "(", ")", "conn", ".", "modules", "[", "__name__", "]", ".", "_remote_interpreter_server_side", "(", "**", "namespace", ")", "finally", ":", "std", ".", "restore", "(", ")" ]
starts an interactive interpreter on the server .
train
false
3,226
def demo_learning_curve(): postag(incremental_stats=True, separate_baseline_data=True, learning_curve_output='learningcurve.png')
[ "def", "demo_learning_curve", "(", ")", ":", "postag", "(", "incremental_stats", "=", "True", ",", "separate_baseline_data", "=", "True", ",", "learning_curve_output", "=", "'learningcurve.png'", ")" ]
plot a learning curve -- the contribution on tagging accuracy of the individual rules .
train
false
3,227
def get_chassis_datacenter(host=None, admin_username=None, admin_password=None): return get_general('cfgLocation', 'cfgLocationDatacenter', host=host, admin_username=admin_username, admin_password=admin_password)
[ "def", "get_chassis_datacenter", "(", "host", "=", "None", ",", "admin_username", "=", "None", ",", "admin_password", "=", "None", ")", ":", "return", "get_general", "(", "'cfgLocation'", ",", "'cfgLocationDatacenter'", ",", "host", "=", "host", ",", "admin_username", "=", "admin_username", ",", "admin_password", "=", "admin_password", ")" ]
get the datacenter of the chassis .
train
true
3,228
def dmp_max_norm(f, u, K): if (not u): return dup_max_norm(f, K) v = (u - 1) return max([dmp_max_norm(c, v, K) for c in f])
[ "def", "dmp_max_norm", "(", "f", ",", "u", ",", "K", ")", ":", "if", "(", "not", "u", ")", ":", "return", "dup_max_norm", "(", "f", ",", "K", ")", "v", "=", "(", "u", "-", "1", ")", "return", "max", "(", "[", "dmp_max_norm", "(", "c", ",", "v", ",", "K", ")", "for", "c", "in", "f", "]", ")" ]
returns maximum norm of a polynomial in k[x] .
train
false
3,229
def load_crate(): return np.load(load_data_file('orig/crate.npz'))['crate']
[ "def", "load_crate", "(", ")", ":", "return", "np", ".", "load", "(", "load_data_file", "(", "'orig/crate.npz'", ")", ")", "[", "'crate'", "]" ]
load an image of a crate returns crate : array 256x256x3 crate image .
train
false
3,230
def _is_cmyk(filename): blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] return (filename.split('/')[(-1)] in blacklist)
[ "def", "_is_cmyk", "(", "filename", ")", ":", "blacklist", "=", "[", "'n01739381_1309.JPEG'", ",", "'n02077923_14822.JPEG'", ",", "'n02447366_23489.JPEG'", ",", "'n02492035_15739.JPEG'", ",", "'n02747177_10752.JPEG'", ",", "'n03018349_4028.JPEG'", ",", "'n03062245_4620.JPEG'", ",", "'n03347037_9675.JPEG'", ",", "'n03467068_12171.JPEG'", ",", "'n03529860_11437.JPEG'", ",", "'n03544143_17228.JPEG'", ",", "'n03633091_5218.JPEG'", ",", "'n03710637_5125.JPEG'", ",", "'n03961711_5286.JPEG'", ",", "'n04033995_2932.JPEG'", ",", "'n04258138_17003.JPEG'", ",", "'n04264628_27969.JPEG'", ",", "'n04336792_7448.JPEG'", ",", "'n04371774_5854.JPEG'", ",", "'n04596742_4225.JPEG'", ",", "'n07583066_647.JPEG'", ",", "'n13037406_4650.JPEG'", "]", "return", "(", "filename", ".", "split", "(", "'/'", ")", "[", "(", "-", "1", ")", "]", "in", "blacklist", ")" ]
determine if file contains a cmyk jpeg format image .
train
false
3,233
def degree_dist(graph, limits=(0, 0), bin_num=10, mode='out'): deg = [] if (mode == 'inc'): get_deg = graph.inc_degree else: get_deg = graph.out_degree for node in graph: deg.append(get_deg(node)) if (not deg): return [] results = _binning(values=deg, limits=limits, bin_num=bin_num) return results
[ "def", "degree_dist", "(", "graph", ",", "limits", "=", "(", "0", ",", "0", ")", ",", "bin_num", "=", "10", ",", "mode", "=", "'out'", ")", ":", "deg", "=", "[", "]", "if", "(", "mode", "==", "'inc'", ")", ":", "get_deg", "=", "graph", ".", "inc_degree", "else", ":", "get_deg", "=", "graph", ".", "out_degree", "for", "node", "in", "graph", ":", "deg", ".", "append", "(", "get_deg", "(", "node", ")", ")", "if", "(", "not", "deg", ")", ":", "return", "[", "]", "results", "=", "_binning", "(", "values", "=", "deg", ",", "limits", "=", "limits", ",", "bin_num", "=", "bin_num", ")", "return", "results" ]
computes the degree distribution for a graph .
train
true
3,236
def _sc_encode(gain, peak): peak *= 32768.0 g1 = int(min(round(((10 ** (gain / (-10))) * 1000)), 65534)) g2 = int(min(round(((10 ** (gain / (-10))) * 2500)), 65534)) uk = 0 values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk) return ((u' %08X' * 10) % values)
[ "def", "_sc_encode", "(", "gain", ",", "peak", ")", ":", "peak", "*=", "32768.0", "g1", "=", "int", "(", "min", "(", "round", "(", "(", "(", "10", "**", "(", "gain", "/", "(", "-", "10", ")", ")", ")", "*", "1000", ")", ")", ",", "65534", ")", ")", "g2", "=", "int", "(", "min", "(", "round", "(", "(", "(", "10", "**", "(", "gain", "/", "(", "-", "10", ")", ")", ")", "*", "2500", ")", ")", ",", "65534", ")", ")", "uk", "=", "0", "values", "=", "(", "g1", ",", "g1", ",", "g2", ",", "g2", ",", "uk", ",", "uk", ",", "int", "(", "peak", ")", ",", "int", "(", "peak", ")", ",", "uk", ",", "uk", ")", "return", "(", "(", "u' %08X'", "*", "10", ")", "%", "values", ")" ]
encode replaygain gain/peak values as a sound check string .
train
true
3,237
@require_POST @permission_required('kbforums.lock_thread') def lock_thread(request, document_slug, thread_id): doc = get_document(document_slug, request) thread = get_object_or_404(Thread, pk=thread_id, document=doc) thread.is_locked = (not thread.is_locked) log.info(('User %s set is_locked=%s on KB thread with id=%s ' % (request.user, thread.is_locked, thread.id))) thread.save() return HttpResponseRedirect(reverse('wiki.discuss.posts', args=[document_slug, thread_id]))
[ "@", "require_POST", "@", "permission_required", "(", "'kbforums.lock_thread'", ")", "def", "lock_thread", "(", "request", ",", "document_slug", ",", "thread_id", ")", ":", "doc", "=", "get_document", "(", "document_slug", ",", "request", ")", "thread", "=", "get_object_or_404", "(", "Thread", ",", "pk", "=", "thread_id", ",", "document", "=", "doc", ")", "thread", ".", "is_locked", "=", "(", "not", "thread", ".", "is_locked", ")", "log", ".", "info", "(", "(", "'User %s set is_locked=%s on KB thread with id=%s '", "%", "(", "request", ".", "user", ",", "thread", ".", "is_locked", ",", "thread", ".", "id", ")", ")", ")", "thread", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'wiki.discuss.posts'", ",", "args", "=", "[", "document_slug", ",", "thread_id", "]", ")", ")" ]
lock/unlock a thread .
train
false
3,238
def transitivity(G): triangles = sum((t for (v, d, t, _) in _triangles_and_degree_iter(G))) contri = sum(((d * (d - 1)) for (v, d, t, _) in _triangles_and_degree_iter(G))) return (0 if (triangles == 0) else (triangles / contri))
[ "def", "transitivity", "(", "G", ")", ":", "triangles", "=", "sum", "(", "(", "t", "for", "(", "v", ",", "d", ",", "t", ",", "_", ")", "in", "_triangles_and_degree_iter", "(", "G", ")", ")", ")", "contri", "=", "sum", "(", "(", "(", "d", "*", "(", "d", "-", "1", ")", ")", "for", "(", "v", ",", "d", ",", "t", ",", "_", ")", "in", "_triangles_and_degree_iter", "(", "G", ")", ")", ")", "return", "(", "0", "if", "(", "triangles", "==", "0", ")", "else", "(", "triangles", "/", "contri", ")", ")" ]
compute graph transitivity .
train
false
3,239
def gui_ebook_edit(path=None, notify=None): init_dbus() from calibre.gui2.tweak_book.main import gui_main gui_main(path, notify)
[ "def", "gui_ebook_edit", "(", "path", "=", "None", ",", "notify", "=", "None", ")", ":", "init_dbus", "(", ")", "from", "calibre", ".", "gui2", ".", "tweak_book", ".", "main", "import", "gui_main", "gui_main", "(", "path", ",", "notify", ")" ]
for launching the editor from inside calibre .
train
false
3,240
def sample_distribution(distribution): r = random.uniform(0, 1) s = 0 for i in range(len(distribution)): s += distribution[i] if (s >= r): return i return (len(distribution) - 1)
[ "def", "sample_distribution", "(", "distribution", ")", ":", "r", "=", "random", ".", "uniform", "(", "0", ",", "1", ")", "s", "=", "0", "for", "i", "in", "range", "(", "len", "(", "distribution", ")", ")", ":", "s", "+=", "distribution", "[", "i", "]", "if", "(", "s", ">=", "r", ")", ":", "return", "i", "return", "(", "len", "(", "distribution", ")", "-", "1", ")" ]
sample one element from a distribution assumed to be an array of normalized probabilities .
train
false
3,241
def unicode_or_json_validator(value, context): try: if (value is None): return value v = json_validator(value, context) if (not isinstance(v, dict)): return unicode(value) else: return v except df.Invalid: return unicode(value)
[ "def", "unicode_or_json_validator", "(", "value", ",", "context", ")", ":", "try", ":", "if", "(", "value", "is", "None", ")", ":", "return", "value", "v", "=", "json_validator", "(", "value", ",", "context", ")", "if", "(", "not", "isinstance", "(", "v", ",", "dict", ")", ")", ":", "return", "unicode", "(", "value", ")", "else", ":", "return", "v", "except", "df", ".", "Invalid", ":", "return", "unicode", "(", "value", ")" ]
return a parsed json object when applicable .
train
false
3,242
def _get_repo_options(**kwargs): fromrepo = kwargs.get('fromrepo', '') repo = kwargs.get('repo', '') disablerepo = kwargs.get('disablerepo', '') enablerepo = kwargs.get('enablerepo', '') if (repo and (not fromrepo)): fromrepo = repo ret = [] if fromrepo: log.info("Restricting to repo '%s'", fromrepo) ret.extend(['--disablerepo=*', ('--enablerepo=' + fromrepo)]) else: if disablerepo: targets = ([disablerepo] if (not isinstance(disablerepo, list)) else disablerepo) log.info('Disabling repo(s): %s', ', '.join(targets)) ret.extend(['--disablerepo={0}'.format(x) for x in targets]) if enablerepo: targets = ([enablerepo] if (not isinstance(enablerepo, list)) else enablerepo) log.info('Enabling repo(s): %s', ', '.join(targets)) ret.extend(['--enablerepo={0}'.format(x) for x in targets]) return ret
[ "def", "_get_repo_options", "(", "**", "kwargs", ")", ":", "fromrepo", "=", "kwargs", ".", "get", "(", "'fromrepo'", ",", "''", ")", "repo", "=", "kwargs", ".", "get", "(", "'repo'", ",", "''", ")", "disablerepo", "=", "kwargs", ".", "get", "(", "'disablerepo'", ",", "''", ")", "enablerepo", "=", "kwargs", ".", "get", "(", "'enablerepo'", ",", "''", ")", "if", "(", "repo", "and", "(", "not", "fromrepo", ")", ")", ":", "fromrepo", "=", "repo", "ret", "=", "[", "]", "if", "fromrepo", ":", "log", ".", "info", "(", "\"Restricting to repo '%s'\"", ",", "fromrepo", ")", "ret", ".", "extend", "(", "[", "'--disablerepo=*'", ",", "(", "'--enablerepo='", "+", "fromrepo", ")", "]", ")", "else", ":", "if", "disablerepo", ":", "targets", "=", "(", "[", "disablerepo", "]", "if", "(", "not", "isinstance", "(", "disablerepo", ",", "list", ")", ")", "else", "disablerepo", ")", "log", ".", "info", "(", "'Disabling repo(s): %s'", ",", "', '", ".", "join", "(", "targets", ")", ")", "ret", ".", "extend", "(", "[", "'--disablerepo={0}'", ".", "format", "(", "x", ")", "for", "x", "in", "targets", "]", ")", "if", "enablerepo", ":", "targets", "=", "(", "[", "enablerepo", "]", "if", "(", "not", "isinstance", "(", "enablerepo", ",", "list", ")", ")", "else", "enablerepo", ")", "log", ".", "info", "(", "'Enabling repo(s): %s'", ",", "', '", ".", "join", "(", "targets", ")", ")", "ret", ".", "extend", "(", "[", "'--enablerepo={0}'", ".", "format", "(", "x", ")", "for", "x", "in", "targets", "]", ")", "return", "ret" ]
return a list of tuples to seed the "env" list .
train
false
3,244
def dh_public_key(key): (p, g, a) = key return (p, g, pow(g, a, p))
[ "def", "dh_public_key", "(", "key", ")", ":", "(", "p", ",", "g", ",", "a", ")", "=", "key", "return", "(", "p", ",", "g", ",", "pow", "(", "g", ",", "a", ",", "p", ")", ")" ]
return three number tuple as public key .
train
false
3,245
def guess_encoding(data): successful_encoding = None encodings = ['utf-8'] try: encodings.append(locale.nl_langinfo(locale.CODESET)) except AttributeError: pass try: encodings.append(locale.getlocale()[1]) except (AttributeError, IndexError): pass try: encodings.append(locale.getdefaultlocale()[1]) except (AttributeError, IndexError): pass encodings.append('latin-1') for enc in encodings: if (not enc): continue try: decoded = text_type(data, enc) successful_encoding = enc except (UnicodeError, LookupError): pass else: break if (not successful_encoding): raise UnicodeError(('Unable to decode input data. Tried the following encodings: %s.' % ', '.join([repr(enc) for enc in encodings if enc]))) else: return (decoded, successful_encoding)
[ "def", "guess_encoding", "(", "data", ")", ":", "successful_encoding", "=", "None", "encodings", "=", "[", "'utf-8'", "]", "try", ":", "encodings", ".", "append", "(", "locale", ".", "nl_langinfo", "(", "locale", ".", "CODESET", ")", ")", "except", "AttributeError", ":", "pass", "try", ":", "encodings", ".", "append", "(", "locale", ".", "getlocale", "(", ")", "[", "1", "]", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "pass", "try", ":", "encodings", ".", "append", "(", "locale", ".", "getdefaultlocale", "(", ")", "[", "1", "]", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "pass", "encodings", ".", "append", "(", "'latin-1'", ")", "for", "enc", "in", "encodings", ":", "if", "(", "not", "enc", ")", ":", "continue", "try", ":", "decoded", "=", "text_type", "(", "data", ",", "enc", ")", "successful_encoding", "=", "enc", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "pass", "else", ":", "break", "if", "(", "not", "successful_encoding", ")", ":", "raise", "UnicodeError", "(", "(", "'Unable to decode input data. Tried the following encodings: %s.'", "%", "', '", ".", "join", "(", "[", "repr", "(", "enc", ")", "for", "enc", "in", "encodings", "if", "enc", "]", ")", ")", ")", "else", ":", "return", "(", "decoded", ",", "successful_encoding", ")" ]
given a byte string .
train
false
3,246
def create_txn(asset, dt, price, amount): if (not isinstance(asset, Asset)): raise ValueError('pass an asset to create_txn') mock_order = Order(dt, asset, amount, id=None) return create_transaction(mock_order, dt, price, amount)
[ "def", "create_txn", "(", "asset", ",", "dt", ",", "price", ",", "amount", ")", ":", "if", "(", "not", "isinstance", "(", "asset", ",", "Asset", ")", ")", ":", "raise", "ValueError", "(", "'pass an asset to create_txn'", ")", "mock_order", "=", "Order", "(", "dt", ",", "asset", ",", "amount", ",", "id", "=", "None", ")", "return", "create_transaction", "(", "mock_order", ",", "dt", ",", "price", ",", "amount", ")" ]
create a fake transaction to be filled and processed prior to the execution of a given trade event .
train
false
3,247
def metadef_object_count(context, namespace_name, session=None): session = (session or get_session()) return metadef_object_api.count(context, namespace_name, session)
[ "def", "metadef_object_count", "(", "context", ",", "namespace_name", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "return", "metadef_object_api", ".", "count", "(", "context", ",", "namespace_name", ",", "session", ")" ]
get metadef object count in a namespace .
train
false
3,248
def _get_proc_name(proc): try: return (proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return []
[ "def", "_get_proc_name", "(", "proc", ")", ":", "try", ":", "return", "(", "proc", ".", "name", "(", ")", "if", "PSUTIL2", "else", "proc", ".", "name", ")", "except", "(", "psutil", ".", "NoSuchProcess", ",", "psutil", ".", "AccessDenied", ")", ":", "return", "[", "]" ]
returns the name of a process instance .
train
false
3,253
def detect_distro_type(): if os.path.exists('/etc/redhat-release'): return 'redhat' elif os.path.exists('/etc/debian_version'): return 'debian' elif os.path.exists('/etc/SuSE-release'): return 'suse' else: return None
[ "def", "detect_distro_type", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "'/etc/redhat-release'", ")", ":", "return", "'redhat'", "elif", "os", ".", "path", ".", "exists", "(", "'/etc/debian_version'", ")", ":", "return", "'debian'", "elif", "os", ".", "path", ".", "exists", "(", "'/etc/SuSE-release'", ")", ":", "return", "'suse'", "else", ":", "return", "None" ]
simple distro detection based on release/version files .
train
false
3,255
@pytest.mark.parametrize('url, expected', (('http://192.168.0.1:5000/', True), ('http://192.168.0.1/', True), ('http://172.16.1.1/', True), ('http://172.16.1.1:5000/', True), ('http://localhost.localdomain:5000/v1.0/', True), ('http://172.16.1.12/', False), ('http://172.16.1.12:5000/', False), ('http://google.com:5000/v1.0/', False))) def test_should_bypass_proxies(url, expected, monkeypatch): monkeypatch.setenv('no_proxy', '192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1') monkeypatch.setenv('NO_PROXY', '192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1') assert (should_bypass_proxies(url) == expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'url, expected'", ",", "(", "(", "'http://192.168.0.1:5000/'", ",", "True", ")", ",", "(", "'http://192.168.0.1/'", ",", "True", ")", ",", "(", "'http://172.16.1.1/'", ",", "True", ")", ",", "(", "'http://172.16.1.1:5000/'", ",", "True", ")", ",", "(", "'http://localhost.localdomain:5000/v1.0/'", ",", "True", ")", ",", "(", "'http://172.16.1.12/'", ",", "False", ")", ",", "(", "'http://172.16.1.12:5000/'", ",", "False", ")", ",", "(", "'http://google.com:5000/v1.0/'", ",", "False", ")", ")", ")", "def", "test_should_bypass_proxies", "(", "url", ",", "expected", ",", "monkeypatch", ")", ":", "monkeypatch", ".", "setenv", "(", "'no_proxy'", ",", "'192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1'", ")", "monkeypatch", ".", "setenv", "(", "'NO_PROXY'", ",", "'192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1'", ")", "assert", "(", "should_bypass_proxies", "(", "url", ")", "==", "expected", ")" ]
tests for function should_bypass_proxies to check if proxy can be bypassed or not .
train
false
3,257
def generate_test_files(src, dest, nseconds, fmts=['.mp3', '.wav'], padding=10): for directory in [src, dest]: try: os.stat(directory) except: os.mkdir(directory) for fmt in fmts: testsources = get_files_recursive(src, fmt) for audiosource in testsources: print 'audiosource:', audiosource (filename, extension) = os.path.splitext(os.path.basename(audiosource)) length = get_length_audio(audiosource, extension) starttime = get_starttime(length, nseconds, padding) test_file_name = ('%s_%s_%ssec.%s' % (os.path.join(dest, filename), starttime, nseconds, extension.replace('.', ''))) subprocess.check_output(['ffmpeg', '-y', '-ss', ('%d' % starttime), '-t', ('%d' % nseconds), '-i', audiosource, test_file_name])
[ "def", "generate_test_files", "(", "src", ",", "dest", ",", "nseconds", ",", "fmts", "=", "[", "'.mp3'", ",", "'.wav'", "]", ",", "padding", "=", "10", ")", ":", "for", "directory", "in", "[", "src", ",", "dest", "]", ":", "try", ":", "os", ".", "stat", "(", "directory", ")", "except", ":", "os", ".", "mkdir", "(", "directory", ")", "for", "fmt", "in", "fmts", ":", "testsources", "=", "get_files_recursive", "(", "src", ",", "fmt", ")", "for", "audiosource", "in", "testsources", ":", "print", "'audiosource:'", ",", "audiosource", "(", "filename", ",", "extension", ")", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "audiosource", ")", ")", "length", "=", "get_length_audio", "(", "audiosource", ",", "extension", ")", "starttime", "=", "get_starttime", "(", "length", ",", "nseconds", ",", "padding", ")", "test_file_name", "=", "(", "'%s_%s_%ssec.%s'", "%", "(", "os", ".", "path", ".", "join", "(", "dest", ",", "filename", ")", ",", "starttime", ",", "nseconds", ",", "extension", ".", "replace", "(", "'.'", ",", "''", ")", ")", ")", "subprocess", ".", "check_output", "(", "[", "'ffmpeg'", ",", "'-y'", ",", "'-ss'", ",", "(", "'%d'", "%", "starttime", ")", ",", "'-t'", ",", "(", "'%d'", "%", "nseconds", ")", ",", "'-i'", ",", "audiosource", ",", "test_file_name", "]", ")" ]
generates a test file for each file recursively in src directory of given format using nseconds sampled from the audio file .
train
false
3,261
def attr_as_boolean(val_attr): return strutils.bool_from_string(val_attr, default=True)
[ "def", "attr_as_boolean", "(", "val_attr", ")", ":", "return", "strutils", ".", "bool_from_string", "(", "val_attr", ",", "default", "=", "True", ")" ]
return the boolean value .
train
false
3,263
def base_repr(number, base=2, padding=0): chars = u'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if (number < base): return (((padding - 1) * chars[0]) + chars[int(number)]) max_exponent = int((math.log(number) / math.log(base))) max_power = (long(base) ** max_exponent) lead_digit = int((number / max_power)) return (chars[lead_digit] + base_repr((number - (max_power * lead_digit)), base, max((padding - 1), max_exponent)))
[ "def", "base_repr", "(", "number", ",", "base", "=", "2", ",", "padding", "=", "0", ")", ":", "chars", "=", "u'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'", "if", "(", "number", "<", "base", ")", ":", "return", "(", "(", "(", "padding", "-", "1", ")", "*", "chars", "[", "0", "]", ")", "+", "chars", "[", "int", "(", "number", ")", "]", ")", "max_exponent", "=", "int", "(", "(", "math", ".", "log", "(", "number", ")", "/", "math", ".", "log", "(", "base", ")", ")", ")", "max_power", "=", "(", "long", "(", "base", ")", "**", "max_exponent", ")", "lead_digit", "=", "int", "(", "(", "number", "/", "max_power", ")", ")", "return", "(", "chars", "[", "lead_digit", "]", "+", "base_repr", "(", "(", "number", "-", "(", "max_power", "*", "lead_digit", ")", ")", ",", "base", ",", "max", "(", "(", "padding", "-", "1", ")", ",", "max_exponent", ")", ")", ")" ]
return the representation of a *number* in any given *base* .
train
false
3,265
def var_quadratic_sum(A, C, H, beta, x0): (A, C, H) = list(map(np.atleast_2d, (A, C, H))) x0 = np.atleast_1d(x0) Q = scipy.linalg.solve_discrete_lyapunov((sqrt(beta) * A.T), H) cq = dot(dot(C.T, Q), C) v = ((np.trace(cq) * beta) / (1 - beta)) q0 = (dot(dot(x0.T, Q), x0) + v) return q0
[ "def", "var_quadratic_sum", "(", "A", ",", "C", ",", "H", ",", "beta", ",", "x0", ")", ":", "(", "A", ",", "C", ",", "H", ")", "=", "list", "(", "map", "(", "np", ".", "atleast_2d", ",", "(", "A", ",", "C", ",", "H", ")", ")", ")", "x0", "=", "np", ".", "atleast_1d", "(", "x0", ")", "Q", "=", "scipy", ".", "linalg", ".", "solve_discrete_lyapunov", "(", "(", "sqrt", "(", "beta", ")", "*", "A", ".", "T", ")", ",", "H", ")", "cq", "=", "dot", "(", "dot", "(", "C", ".", "T", ",", "Q", ")", ",", "C", ")", "v", "=", "(", "(", "np", ".", "trace", "(", "cq", ")", "*", "beta", ")", "/", "(", "1", "-", "beta", ")", ")", "q0", "=", "(", "dot", "(", "dot", "(", "x0", ".", "T", ",", "Q", ")", ",", "x0", ")", "+", "v", ")", "return", "q0" ]
computes the expected discounted quadratic sum .
train
true
3,266
def isValidVariableName(name): if (not name): return (False, "Variables cannot be missing, None, or ''") if (not (type(name) in (str, unicode, numpy.string_, numpy.unicode_))): return (False, 'Variables must be string-like') try: name = str(name) except Exception: if (type(name) in [unicode, numpy.unicode_]): msg = 'name %s (type %s) contains non-ASCII characters (e.g. accents)' raise AttributeError((msg % (name, type(name)))) else: msg = 'name %s (type %s) could not be converted to a string' raise AttributeError((msg % (name, type(name)))) if name[0].isdigit(): return (False, 'Variables cannot begin with numeric character') if _nonalphanumeric_re.search(name): return (False, 'Variables cannot contain punctuation or spaces') return (True, '')
[ "def", "isValidVariableName", "(", "name", ")", ":", "if", "(", "not", "name", ")", ":", "return", "(", "False", ",", "\"Variables cannot be missing, None, or ''\"", ")", "if", "(", "not", "(", "type", "(", "name", ")", "in", "(", "str", ",", "unicode", ",", "numpy", ".", "string_", ",", "numpy", ".", "unicode_", ")", ")", ")", ":", "return", "(", "False", ",", "'Variables must be string-like'", ")", "try", ":", "name", "=", "str", "(", "name", ")", "except", "Exception", ":", "if", "(", "type", "(", "name", ")", "in", "[", "unicode", ",", "numpy", ".", "unicode_", "]", ")", ":", "msg", "=", "'name %s (type %s) contains non-ASCII characters (e.g. accents)'", "raise", "AttributeError", "(", "(", "msg", "%", "(", "name", ",", "type", "(", "name", ")", ")", ")", ")", "else", ":", "msg", "=", "'name %s (type %s) could not be converted to a string'", "raise", "AttributeError", "(", "(", "msg", "%", "(", "name", ",", "type", "(", "name", ")", ")", ")", ")", "if", "name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "return", "(", "False", ",", "'Variables cannot begin with numeric character'", ")", "if", "_nonalphanumeric_re", ".", "search", "(", "name", ")", ":", "return", "(", "False", ",", "'Variables cannot contain punctuation or spaces'", ")", "return", "(", "True", ",", "''", ")" ]
checks whether a certain string could be used as a valid variable .
train
false
3,267
def get_func_kwargs(func): return compat.inspect_getargspec(func)[0]
[ "def", "get_func_kwargs", "(", "func", ")", ":", "return", "compat", ".", "inspect_getargspec", "(", "func", ")", "[", "0", "]" ]
return the set of legal kwargs for the given func .
train
false
3,268
def output(): return s3_rest_controller()
[ "def", "output", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
appends string_ to the response .
train
false
3,269
def resolve_authconfig(authconfig, registry=None): if ('credsStore' in authconfig): log.debug('Using credentials store "{0}"'.format(authconfig['credsStore'])) return _resolve_authconfig_credstore(authconfig, registry, authconfig['credsStore']) registry = (resolve_index_name(registry) if registry else INDEX_NAME) log.debug('Looking for auth entry for {0}'.format(repr(registry))) if (registry in authconfig): log.debug('Found {0}'.format(repr(registry))) return authconfig[registry] for (key, config) in six.iteritems(authconfig): if (resolve_index_name(key) == registry): log.debug('Found {0}'.format(repr(key))) return config log.debug('No entry found') return None
[ "def", "resolve_authconfig", "(", "authconfig", ",", "registry", "=", "None", ")", ":", "if", "(", "'credsStore'", "in", "authconfig", ")", ":", "log", ".", "debug", "(", "'Using credentials store \"{0}\"'", ".", "format", "(", "authconfig", "[", "'credsStore'", "]", ")", ")", "return", "_resolve_authconfig_credstore", "(", "authconfig", ",", "registry", ",", "authconfig", "[", "'credsStore'", "]", ")", "registry", "=", "(", "resolve_index_name", "(", "registry", ")", "if", "registry", "else", "INDEX_NAME", ")", "log", ".", "debug", "(", "'Looking for auth entry for {0}'", ".", "format", "(", "repr", "(", "registry", ")", ")", ")", "if", "(", "registry", "in", "authconfig", ")", ":", "log", ".", "debug", "(", "'Found {0}'", ".", "format", "(", "repr", "(", "registry", ")", ")", ")", "return", "authconfig", "[", "registry", "]", "for", "(", "key", ",", "config", ")", "in", "six", ".", "iteritems", "(", "authconfig", ")", ":", "if", "(", "resolve_index_name", "(", "key", ")", "==", "registry", ")", ":", "log", ".", "debug", "(", "'Found {0}'", ".", "format", "(", "repr", "(", "key", ")", ")", ")", "return", "config", "log", ".", "debug", "(", "'No entry found'", ")", "return", "None" ]
returns the authentication data from the given auth configuration for a specific registry .
train
false
3,270
def ParsePropertyQuery(query, filters, orders): Check((not query.has_transaction()), 'transactional queries on __property__ not allowed') key_range = ParseKeyFilteredQuery(filters, orders) key_range.Remap((lambda x: _PropertyKeyToString(x, ''))) if query.has_ancestor(): ancestor = datastore_types.Key._FromPb(query.ancestor()) (ancestor_kind, ancestor_property) = _PropertyKeyToString(ancestor, None) if (ancestor_property is not None): key_range.Update(datastore_pb.Query_Filter.EQUAL, (ancestor_kind, ancestor_property)) else: key_range.Update(datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL, (ancestor_kind, '')) key_range.Update(datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL, ((ancestor_kind + '\x00'), '')) query.clear_ancestor() return key_range
[ "def", "ParsePropertyQuery", "(", "query", ",", "filters", ",", "orders", ")", ":", "Check", "(", "(", "not", "query", ".", "has_transaction", "(", ")", ")", ",", "'transactional queries on __property__ not allowed'", ")", "key_range", "=", "ParseKeyFilteredQuery", "(", "filters", ",", "orders", ")", "key_range", ".", "Remap", "(", "(", "lambda", "x", ":", "_PropertyKeyToString", "(", "x", ",", "''", ")", ")", ")", "if", "query", ".", "has_ancestor", "(", ")", ":", "ancestor", "=", "datastore_types", ".", "Key", ".", "_FromPb", "(", "query", ".", "ancestor", "(", ")", ")", "(", "ancestor_kind", ",", "ancestor_property", ")", "=", "_PropertyKeyToString", "(", "ancestor", ",", "None", ")", "if", "(", "ancestor_property", "is", "not", "None", ")", ":", "key_range", ".", "Update", "(", "datastore_pb", ".", "Query_Filter", ".", "EQUAL", ",", "(", "ancestor_kind", ",", "ancestor_property", ")", ")", "else", ":", "key_range", ".", "Update", "(", "datastore_pb", ".", "Query_Filter", ".", "GREATER_THAN_OR_EQUAL", ",", "(", "ancestor_kind", ",", "''", ")", ")", "key_range", ".", "Update", "(", "datastore_pb", ".", "Query_Filter", ".", "LESS_THAN_OR_EQUAL", ",", "(", "(", "ancestor_kind", "+", "'\\x00'", ")", ",", "''", ")", ")", "query", ".", "clear_ancestor", "(", ")", "return", "key_range" ]
parse __property__ queries .
train
false
3,271
def group_order(tlist): (tidx, token) = tlist.token_next_by(t=T.Keyword.Order) while token: (pidx, prev_) = tlist.token_prev(tidx) if imt(prev_, i=sql.Identifier, t=T.Number): tlist.group_tokens(sql.Identifier, pidx, tidx) tidx = pidx (tidx, token) = tlist.token_next_by(t=T.Keyword.Order, idx=tidx)
[ "def", "group_order", "(", "tlist", ")", ":", "(", "tidx", ",", "token", ")", "=", "tlist", ".", "token_next_by", "(", "t", "=", "T", ".", "Keyword", ".", "Order", ")", "while", "token", ":", "(", "pidx", ",", "prev_", ")", "=", "tlist", ".", "token_prev", "(", "tidx", ")", "if", "imt", "(", "prev_", ",", "i", "=", "sql", ".", "Identifier", ",", "t", "=", "T", ".", "Number", ")", ":", "tlist", ".", "group_tokens", "(", "sql", ".", "Identifier", ",", "pidx", ",", "tidx", ")", "tidx", "=", "pidx", "(", "tidx", ",", "token", ")", "=", "tlist", ".", "token_next_by", "(", "t", "=", "T", ".", "Keyword", ".", "Order", ",", "idx", "=", "tidx", ")" ]
group together identifier and asc/desc token .
train
true
3,273
def print_routes(api, verbose=False): traverse(api._router._roots, verbose=verbose)
[ "def", "print_routes", "(", "api", ",", "verbose", "=", "False", ")", ":", "traverse", "(", "api", ".", "_router", ".", "_roots", ",", "verbose", "=", "verbose", ")" ]
initial call .
train
false
3,274
def arbitrary_string(size=4, base_text=None): if (not base_text): base_text = 'test' return ''.join(itertools.islice(itertools.cycle(base_text), size))
[ "def", "arbitrary_string", "(", "size", "=", "4", ",", "base_text", "=", "None", ")", ":", "if", "(", "not", "base_text", ")", ":", "base_text", "=", "'test'", "return", "''", ".", "join", "(", "itertools", ".", "islice", "(", "itertools", ".", "cycle", "(", "base_text", ")", ",", "size", ")", ")" ]
return size characters from base_text this generates a string with an arbitrary number of characters .
train
false
3,275
def get_cache(): return requests.Session().cache
[ "def", "get_cache", "(", ")", ":", "return", "requests", ".", "Session", "(", ")", ".", "cache" ]
function to load a cache backend dynamically .
train
false
3,276
def urlnormalize(href): parts = urlparse(href) if ((not parts.scheme) or (parts.scheme == 'file')): (path, frag) = urldefrag(href) parts = ('', '', path, '', '', frag) parts = (part.replace('\\', '/') for part in parts) parts = (urlunquote(part) for part in parts) parts = (urlquote(part) for part in parts) return urlunparse(parts)
[ "def", "urlnormalize", "(", "href", ")", ":", "parts", "=", "urlparse", "(", "href", ")", "if", "(", "(", "not", "parts", ".", "scheme", ")", "or", "(", "parts", ".", "scheme", "==", "'file'", ")", ")", ":", "(", "path", ",", "frag", ")", "=", "urldefrag", "(", "href", ")", "parts", "=", "(", "''", ",", "''", ",", "path", ",", "''", ",", "''", ",", "frag", ")", "parts", "=", "(", "part", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "for", "part", "in", "parts", ")", "parts", "=", "(", "urlunquote", "(", "part", ")", "for", "part", "in", "parts", ")", "parts", "=", "(", "urlquote", "(", "part", ")", "for", "part", "in", "parts", ")", "return", "urlunparse", "(", "parts", ")" ]
convert a url into normalized form .
train
false
3,277
def FindCheckMacro(line): for macro in _CHECK_MACROS: i = line.find(macro) if (i >= 0): matched = Match((('^(.*\\b' + macro) + '\\s*)\\('), line) if (not matched): continue return (macro, len(matched.group(1))) return (None, (-1))
[ "def", "FindCheckMacro", "(", "line", ")", ":", "for", "macro", "in", "_CHECK_MACROS", ":", "i", "=", "line", ".", "find", "(", "macro", ")", "if", "(", "i", ">=", "0", ")", ":", "matched", "=", "Match", "(", "(", "(", "'^(.*\\\\b'", "+", "macro", ")", "+", "'\\\\s*)\\\\('", ")", ",", "line", ")", "if", "(", "not", "matched", ")", ":", "continue", "return", "(", "macro", ",", "len", "(", "matched", ".", "group", "(", "1", ")", ")", ")", "return", "(", "None", ",", "(", "-", "1", ")", ")" ]
find a replaceable check-like macro .
train
true
3,278
def evalRnnOnSeqDataset(net, DS, verbose=False, silent=False): r = 0.0 samples = 0.0 for seq in DS: net.reset() for (i, t) in seq: res = net.activate(i) if verbose: print(t, res) r += sum(((t - res) ** 2)) samples += 1 if verbose: print(('-' * 20)) r /= samples if (not silent): print('MSE:', r) return r
[ "def", "evalRnnOnSeqDataset", "(", "net", ",", "DS", ",", "verbose", "=", "False", ",", "silent", "=", "False", ")", ":", "r", "=", "0.0", "samples", "=", "0.0", "for", "seq", "in", "DS", ":", "net", ".", "reset", "(", ")", "for", "(", "i", ",", "t", ")", "in", "seq", ":", "res", "=", "net", ".", "activate", "(", "i", ")", "if", "verbose", ":", "print", "(", "t", ",", "res", ")", "r", "+=", "sum", "(", "(", "(", "t", "-", "res", ")", "**", "2", ")", ")", "samples", "+=", "1", "if", "verbose", ":", "print", "(", "(", "'-'", "*", "20", ")", ")", "r", "/=", "samples", "if", "(", "not", "silent", ")", ":", "print", "(", "'MSE:'", ",", "r", ")", "return", "r" ]
evaluate the network on all the sequences of a dataset .
train
false
3,280
@bdd.then(bdd.parsers.parse('The unordered requests should be:\n{pages}')) def list_of_requests_unordered(httpbin, pages): expected_requests = [httpbin.ExpectedRequest('GET', ('/' + path.strip())) for path in pages.split('\n')] actual_requests = httpbin.get_requests() actual_requests = [httpbin.ExpectedRequest.from_request(req) for req in actual_requests] assert (collections.Counter(actual_requests) == collections.Counter(expected_requests))
[ "@", "bdd", ".", "then", "(", "bdd", ".", "parsers", ".", "parse", "(", "'The unordered requests should be:\\n{pages}'", ")", ")", "def", "list_of_requests_unordered", "(", "httpbin", ",", "pages", ")", ":", "expected_requests", "=", "[", "httpbin", ".", "ExpectedRequest", "(", "'GET'", ",", "(", "'/'", "+", "path", ".", "strip", "(", ")", ")", ")", "for", "path", "in", "pages", ".", "split", "(", "'\\n'", ")", "]", "actual_requests", "=", "httpbin", ".", "get_requests", "(", ")", "actual_requests", "=", "[", "httpbin", ".", "ExpectedRequest", ".", "from_request", "(", "req", ")", "for", "req", "in", "actual_requests", "]", "assert", "(", "collections", ".", "Counter", "(", "actual_requests", ")", "==", "collections", ".", "Counter", "(", "expected_requests", ")", ")" ]
make sure the given requests were done .
train
false
3,281
def get_user_from_identifier(identifier): identifier = identifier.strip() if ('@' in identifier): user = User.objects.get(email=identifier) else: user = User.objects.get(username=identifier) return user
[ "def", "get_user_from_identifier", "(", "identifier", ")", ":", "identifier", "=", "identifier", ".", "strip", "(", ")", "if", "(", "'@'", "in", "identifier", ")", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "email", "=", "identifier", ")", "else", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "identifier", ")", "return", "user" ]
this function takes the string identifier and fetch relevant user object from database .
train
false
3,282
def get_pre_requisite_courses_not_completed(user, enrolled_courses): if (not is_prerequisite_courses_enabled()): return {} pre_requisite_courses = {} for course_key in enrolled_courses: required_courses = [] fulfillment_paths = milestones_api.get_course_milestones_fulfillment_paths(course_key, {'id': user.id}) for (__, milestone_value) in fulfillment_paths.items(): for (key, value) in milestone_value.items(): if ((key == 'courses') and value): for required_course in value: required_course_key = CourseKey.from_string(required_course) required_course_overview = CourseOverview.get_from_id(required_course_key) required_courses.append({'key': required_course_key, 'display': get_course_display_string(required_course_overview)}) if required_courses: pre_requisite_courses[course_key] = {'courses': required_courses} return pre_requisite_courses
[ "def", "get_pre_requisite_courses_not_completed", "(", "user", ",", "enrolled_courses", ")", ":", "if", "(", "not", "is_prerequisite_courses_enabled", "(", ")", ")", ":", "return", "{", "}", "pre_requisite_courses", "=", "{", "}", "for", "course_key", "in", "enrolled_courses", ":", "required_courses", "=", "[", "]", "fulfillment_paths", "=", "milestones_api", ".", "get_course_milestones_fulfillment_paths", "(", "course_key", ",", "{", "'id'", ":", "user", ".", "id", "}", ")", "for", "(", "__", ",", "milestone_value", ")", "in", "fulfillment_paths", ".", "items", "(", ")", ":", "for", "(", "key", ",", "value", ")", "in", "milestone_value", ".", "items", "(", ")", ":", "if", "(", "(", "key", "==", "'courses'", ")", "and", "value", ")", ":", "for", "required_course", "in", "value", ":", "required_course_key", "=", "CourseKey", ".", "from_string", "(", "required_course", ")", "required_course_overview", "=", "CourseOverview", ".", "get_from_id", "(", "required_course_key", ")", "required_courses", ".", "append", "(", "{", "'key'", ":", "required_course_key", ",", "'display'", ":", "get_course_display_string", "(", "required_course_overview", ")", "}", ")", "if", "required_courses", ":", "pre_requisite_courses", "[", "course_key", "]", "=", "{", "'courses'", ":", "required_courses", "}", "return", "pre_requisite_courses" ]
makes a dict mapping courses to their unfulfilled milestones using the fulfillment api of the milestones app .
train
false
3,283
def get_eulerian_tour(): global graph tour = get_a_tour() if graph: loop = enumerate(tour[:(-1)]) l = loop.__next__() i = l[0] node = l[1] try: while True: if ((node in list(zip(*graph))[0]) or (node in list(zip(*graph))[1])): t = get_a_tour() j = t.index(node) tour = (((tour[:i] + t[j:(-1)]) + t[:(j + 1)]) + tour[(i + 1):]) if (not graph): return tour loop = enumerate(tour[:(-1)]) l = loop.__next__() i = l[0] node = l[1] except StopIteration: print "Your graph doesn't seem to be connected" exit() else: return tour
[ "def", "get_eulerian_tour", "(", ")", ":", "global", "graph", "tour", "=", "get_a_tour", "(", ")", "if", "graph", ":", "loop", "=", "enumerate", "(", "tour", "[", ":", "(", "-", "1", ")", "]", ")", "l", "=", "loop", ".", "__next__", "(", ")", "i", "=", "l", "[", "0", "]", "node", "=", "l", "[", "1", "]", "try", ":", "while", "True", ":", "if", "(", "(", "node", "in", "list", "(", "zip", "(", "*", "graph", ")", ")", "[", "0", "]", ")", "or", "(", "node", "in", "list", "(", "zip", "(", "*", "graph", ")", ")", "[", "1", "]", ")", ")", ":", "t", "=", "get_a_tour", "(", ")", "j", "=", "t", ".", "index", "(", "node", ")", "tour", "=", "(", "(", "(", "tour", "[", ":", "i", "]", "+", "t", "[", "j", ":", "(", "-", "1", ")", "]", ")", "+", "t", "[", ":", "(", "j", "+", "1", ")", "]", ")", "+", "tour", "[", "(", "i", "+", "1", ")", ":", "]", ")", "if", "(", "not", "graph", ")", ":", "return", "tour", "loop", "=", "enumerate", "(", "tour", "[", ":", "(", "-", "1", ")", "]", ")", "l", "=", "loop", ".", "__next__", "(", ")", "i", "=", "l", "[", "0", "]", "node", "=", "l", "[", "1", "]", "except", "StopIteration", ":", "print", "\"Your graph doesn't seem to be connected\"", "exit", "(", ")", "else", ":", "return", "tour" ]
this function returns a eulerian tour for the input graph .
train
false
3,285
def earned_exp(base_exp, level): return ((base_exp * level) // 7)
[ "def", "earned_exp", "(", "base_exp", ",", "level", ")", ":", "return", "(", "(", "base_exp", "*", "level", ")", "//", "7", ")" ]
returns the amount of exp earned when defeating a pokémon at the given level .
train
false
3,287
def add_to_deleted_document(doc): if ((doc.doctype != u'Deleted Document') and (frappe.flags.in_install != u'frappe')): frappe.get_doc(dict(doctype=u'Deleted Document', deleted_doctype=doc.doctype, deleted_name=doc.name, data=doc.as_json())).db_insert()
[ "def", "add_to_deleted_document", "(", "doc", ")", ":", "if", "(", "(", "doc", ".", "doctype", "!=", "u'Deleted Document'", ")", "and", "(", "frappe", ".", "flags", ".", "in_install", "!=", "u'frappe'", ")", ")", ":", "frappe", ".", "get_doc", "(", "dict", "(", "doctype", "=", "u'Deleted Document'", ",", "deleted_doctype", "=", "doc", ".", "doctype", ",", "deleted_name", "=", "doc", ".", "name", ",", "data", "=", "doc", ".", "as_json", "(", ")", ")", ")", ".", "db_insert", "(", ")" ]
add this document to deleted document table .
train
false
3,288
def test_values(enum): assert (enum.one.value == 1) assert (enum.two.value == 2)
[ "def", "test_values", "(", "enum", ")", ":", "assert", "(", "enum", ".", "one", ".", "value", "==", "1", ")", "assert", "(", "enum", ".", "two", ".", "value", "==", "2", ")" ]
test if enum members resolve to the right values .
train
false
3,290
def issued_certificates(course_key, features): report_run_date = datetime.date.today().strftime('%B %d, %Y') certificate_features = [x for x in CERTIFICATE_FEATURES if (x in features)] generated_certificates = list(GeneratedCertificate.eligible_certificates.filter(course_id=course_key, status=CertificateStatuses.downloadable).values(*certificate_features).annotate(total_issued_certificate=Count('mode'))) for data in generated_certificates: data['report_run_date'] = report_run_date return generated_certificates
[ "def", "issued_certificates", "(", "course_key", ",", "features", ")", ":", "report_run_date", "=", "datetime", ".", "date", ".", "today", "(", ")", ".", "strftime", "(", "'%B %d, %Y'", ")", "certificate_features", "=", "[", "x", "for", "x", "in", "CERTIFICATE_FEATURES", "if", "(", "x", "in", "features", ")", "]", "generated_certificates", "=", "list", "(", "GeneratedCertificate", ".", "eligible_certificates", ".", "filter", "(", "course_id", "=", "course_key", ",", "status", "=", "CertificateStatuses", ".", "downloadable", ")", ".", "values", "(", "*", "certificate_features", ")", ".", "annotate", "(", "total_issued_certificate", "=", "Count", "(", "'mode'", ")", ")", ")", "for", "data", "in", "generated_certificates", ":", "data", "[", "'report_run_date'", "]", "=", "report_run_date", "return", "generated_certificates" ]
return list of issued certificates as dictionaries against the given course key .
train
false
3,292
def writes_models(model): def decorated(func): '\n Decorator for the creation function.\n ' _WRITE_MODEL[model] = func return func return decorated
[ "def", "writes_models", "(", "model", ")", ":", "def", "decorated", "(", "func", ")", ":", "_WRITE_MODEL", "[", "model", "]", "=", "func", "return", "func", "return", "decorated" ]
register a model-specific create and update function .
train
false
3,293
@task(aliases=['elastic']) def elasticsearch(ctx): import platform if (platform.linux_distribution()[0] == 'Ubuntu'): ctx.run('sudo service elasticsearch start') elif (platform.system() == 'Darwin'): ctx.run('elasticsearch') else: print 'Your system is not recognized, you will have to start elasticsearch manually'
[ "@", "task", "(", "aliases", "=", "[", "'elastic'", "]", ")", "def", "elasticsearch", "(", "ctx", ")", ":", "import", "platform", "if", "(", "platform", ".", "linux_distribution", "(", ")", "[", "0", "]", "==", "'Ubuntu'", ")", ":", "ctx", ".", "run", "(", "'sudo service elasticsearch start'", ")", "elif", "(", "platform", ".", "system", "(", ")", "==", "'Darwin'", ")", ":", "ctx", ".", "run", "(", "'elasticsearch'", ")", "else", ":", "print", "'Your system is not recognized, you will have to start elasticsearch manually'" ]
start a local elasticsearch server note: requires that elasticsearch is installed .
train
false
3,294
def test_blank_image_nans(): detectors = [corner_moravec, corner_harris, corner_shi_tomasi, corner_kitchen_rosenfeld, corner_foerstner] constant_image = np.zeros((20, 20)) for det in detectors: response = det(constant_image) assert np.all(np.isfinite(response))
[ "def", "test_blank_image_nans", "(", ")", ":", "detectors", "=", "[", "corner_moravec", ",", "corner_harris", ",", "corner_shi_tomasi", ",", "corner_kitchen_rosenfeld", ",", "corner_foerstner", "]", "constant_image", "=", "np", ".", "zeros", "(", "(", "20", ",", "20", ")", ")", "for", "det", "in", "detectors", ":", "response", "=", "det", "(", "constant_image", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "response", ")", ")" ]
some of the corner detectors had a weakness in terms of returning nan when presented with regions of constant intensity .
train
false
3,297
def plot_ccpr_grid(results, exog_idx=None, grid=None, fig=None): fig = utils.create_mpl_fig(fig) (exog_name, exog_idx) = utils.maybe_name_or_idx(exog_idx, results.model) if (grid is not None): (nrows, ncols) = grid elif (len(exog_idx) > 2): nrows = int(np.ceil((len(exog_idx) / 2.0))) ncols = 2 else: nrows = len(exog_idx) ncols = 1 seen_constant = 0 for (i, idx) in enumerate(exog_idx): if (results.model.exog[:, idx].var() == 0): seen_constant = 1 continue ax = fig.add_subplot(nrows, ncols, ((i + 1) - seen_constant)) fig = plot_ccpr(results, exog_idx=idx, ax=ax) ax.set_title('') fig.suptitle('Component-Component Plus Residual Plot', fontsize='large') fig.tight_layout() fig.subplots_adjust(top=0.95) return fig
[ "def", "plot_ccpr_grid", "(", "results", ",", "exog_idx", "=", "None", ",", "grid", "=", "None", ",", "fig", "=", "None", ")", ":", "fig", "=", "utils", ".", "create_mpl_fig", "(", "fig", ")", "(", "exog_name", ",", "exog_idx", ")", "=", "utils", ".", "maybe_name_or_idx", "(", "exog_idx", ",", "results", ".", "model", ")", "if", "(", "grid", "is", "not", "None", ")", ":", "(", "nrows", ",", "ncols", ")", "=", "grid", "elif", "(", "len", "(", "exog_idx", ")", ">", "2", ")", ":", "nrows", "=", "int", "(", "np", ".", "ceil", "(", "(", "len", "(", "exog_idx", ")", "/", "2.0", ")", ")", ")", "ncols", "=", "2", "else", ":", "nrows", "=", "len", "(", "exog_idx", ")", "ncols", "=", "1", "seen_constant", "=", "0", "for", "(", "i", ",", "idx", ")", "in", "enumerate", "(", "exog_idx", ")", ":", "if", "(", "results", ".", "model", ".", "exog", "[", ":", ",", "idx", "]", ".", "var", "(", ")", "==", "0", ")", ":", "seen_constant", "=", "1", "continue", "ax", "=", "fig", ".", "add_subplot", "(", "nrows", ",", "ncols", ",", "(", "(", "i", "+", "1", ")", "-", "seen_constant", ")", ")", "fig", "=", "plot_ccpr", "(", "results", ",", "exog_idx", "=", "idx", ",", "ax", "=", "ax", ")", "ax", ".", "set_title", "(", "''", ")", "fig", ".", "suptitle", "(", "'Component-Component Plus Residual Plot'", ",", "fontsize", "=", "'large'", ")", "fig", ".", "tight_layout", "(", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.95", ")", "return", "fig" ]
generate ccpr plots against a set of regressors .
train
false
3,298
def num_active_contributors(from_date, to_date=None, locale=None, product=None): return len(_active_contributors_id(from_date, to_date, locale, product))
[ "def", "num_active_contributors", "(", "from_date", ",", "to_date", "=", "None", ",", "locale", "=", "None", ",", "product", "=", "None", ")", ":", "return", "len", "(", "_active_contributors_id", "(", "from_date", ",", "to_date", ",", "locale", ",", "product", ")", ")" ]
return number of active kb contributors for the specified parameters .
train
false
3,299
@pytest.fixture(params=[(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'full'), (amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW, 'full')], ids=id_function) def use_case(request, db): (addon_status, file_status, review_type) = request.param addon = addon_factory(status=addon_status, guid='foo') version = addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED) file1 = version.files.get() file1.update(status=file_status) file2 = amo.tests.file_factory(version=version, status=file_status) addon.update(status=addon_status) assert (addon.reload().status == addon_status) assert (file1.reload().status == file_status) assert (file2.reload().status == file_status) return (addon, file1, file2, review_type)
[ "@", "pytest", ".", "fixture", "(", "params", "=", "[", "(", "amo", ".", "STATUS_NOMINATED", ",", "amo", ".", "STATUS_AWAITING_REVIEW", ",", "'full'", ")", ",", "(", "amo", ".", "STATUS_PUBLIC", ",", "amo", ".", "STATUS_AWAITING_REVIEW", ",", "'full'", ")", "]", ",", "ids", "=", "id_function", ")", "def", "use_case", "(", "request", ",", "db", ")", ":", "(", "addon_status", ",", "file_status", ",", "review_type", ")", "=", "request", ".", "param", "addon", "=", "addon_factory", "(", "status", "=", "addon_status", ",", "guid", "=", "'foo'", ")", "version", "=", "addon", ".", "find_latest_version", "(", "amo", ".", "RELEASE_CHANNEL_LISTED", ")", "file1", "=", "version", ".", "files", ".", "get", "(", ")", "file1", ".", "update", "(", "status", "=", "file_status", ")", "file2", "=", "amo", ".", "tests", ".", "file_factory", "(", "version", "=", "version", ",", "status", "=", "file_status", ")", "addon", ".", "update", "(", "status", "=", "addon_status", ")", "assert", "(", "addon", ".", "reload", "(", ")", ".", "status", "==", "addon_status", ")", "assert", "(", "file1", ".", "reload", "(", ")", ".", "status", "==", "file_status", ")", "assert", "(", "file2", ".", "reload", "(", ")", ".", "status", "==", "file_status", ")", "return", "(", "addon", ",", "file1", ",", "file2", ",", "review_type", ")" ]
this fixture will return quadruples for different use cases .
train
false
3,302
def GetImages(region, owner_ids=None): ec2 = _Connect(region) if (not owner_ids): return None return ec2.get_all_images(owners=owner_ids)
[ "def", "GetImages", "(", "region", ",", "owner_ids", "=", "None", ")", ":", "ec2", "=", "_Connect", "(", "region", ")", "if", "(", "not", "owner_ids", ")", ":", "return", "None", "return", "ec2", ".", "get_all_images", "(", "owners", "=", "owner_ids", ")" ]
return the list of images owned ids from owner_ids .
train
false
3,303
def authenticate_cookie(controller, cookie_path, suppress_ctl_errors=True): cookie_data = _read_cookie(cookie_path, False) try: auth_token_hex = binascii.b2a_hex(stem.util.str_tools._to_bytes(cookie_data)) msg = ('AUTHENTICATE %s' % stem.util.str_tools._to_unicode(auth_token_hex)) auth_response = _msg(controller, msg) if (str(auth_response) != 'OK'): try: controller.connect() except: pass if (('*or* authentication cookie.' in str(auth_response)) or ('Authentication cookie did not match expected value.' in str(auth_response))): raise IncorrectCookieValue(str(auth_response), cookie_path, False, auth_response) else: raise CookieAuthRejected(str(auth_response), cookie_path, False, auth_response) except stem.ControllerError as exc: try: controller.connect() except: pass if (not suppress_ctl_errors): raise exc else: raise CookieAuthRejected(('Socket failed (%s)' % exc), cookie_path, False)
[ "def", "authenticate_cookie", "(", "controller", ",", "cookie_path", ",", "suppress_ctl_errors", "=", "True", ")", ":", "cookie_data", "=", "_read_cookie", "(", "cookie_path", ",", "False", ")", "try", ":", "auth_token_hex", "=", "binascii", ".", "b2a_hex", "(", "stem", ".", "util", ".", "str_tools", ".", "_to_bytes", "(", "cookie_data", ")", ")", "msg", "=", "(", "'AUTHENTICATE %s'", "%", "stem", ".", "util", ".", "str_tools", ".", "_to_unicode", "(", "auth_token_hex", ")", ")", "auth_response", "=", "_msg", "(", "controller", ",", "msg", ")", "if", "(", "str", "(", "auth_response", ")", "!=", "'OK'", ")", ":", "try", ":", "controller", ".", "connect", "(", ")", "except", ":", "pass", "if", "(", "(", "'*or* authentication cookie.'", "in", "str", "(", "auth_response", ")", ")", "or", "(", "'Authentication cookie did not match expected value.'", "in", "str", "(", "auth_response", ")", ")", ")", ":", "raise", "IncorrectCookieValue", "(", "str", "(", "auth_response", ")", ",", "cookie_path", ",", "False", ",", "auth_response", ")", "else", ":", "raise", "CookieAuthRejected", "(", "str", "(", "auth_response", ")", ",", "cookie_path", ",", "False", ",", "auth_response", ")", "except", "stem", ".", "ControllerError", "as", "exc", ":", "try", ":", "controller", ".", "connect", "(", ")", "except", ":", "pass", "if", "(", "not", "suppress_ctl_errors", ")", ":", "raise", "exc", "else", ":", "raise", "CookieAuthRejected", "(", "(", "'Socket failed (%s)'", "%", "exc", ")", ",", "cookie_path", ",", "False", ")" ]
authenticates to a control socket that uses the contents of an authentication cookie .
train
false
3,306
@lru_cache() def time_to_days(value): if (value.tzinfo is not None): value = value.astimezone(UTC) return (((((value.hour * 3600) + (value.minute * 60)) + value.second) + (value.microsecond / (10 ** 6))) / SECS_PER_DAY)
[ "@", "lru_cache", "(", ")", "def", "time_to_days", "(", "value", ")", ":", "if", "(", "value", ".", "tzinfo", "is", "not", "None", ")", ":", "value", "=", "value", ".", "astimezone", "(", "UTC", ")", "return", "(", "(", "(", "(", "(", "value", ".", "hour", "*", "3600", ")", "+", "(", "value", ".", "minute", "*", "60", ")", ")", "+", "value", ".", "second", ")", "+", "(", "value", ".", "microsecond", "/", "(", "10", "**", "6", ")", ")", ")", "/", "SECS_PER_DAY", ")" ]
convert a time value to fractions of day .
train
false
3,308
def randn(*size, **kwarg): dtype = kwarg.pop('dtype', float) if kwarg: raise TypeError(('randn() got unexpected keyword arguments %s' % ', '.join(kwarg.keys()))) return distributions.normal(size=size, dtype=dtype)
[ "def", "randn", "(", "*", "size", ",", "**", "kwarg", ")", ":", "dtype", "=", "kwarg", ".", "pop", "(", "'dtype'", ",", "float", ")", "if", "kwarg", ":", "raise", "TypeError", "(", "(", "'randn() got unexpected keyword arguments %s'", "%", "', '", ".", "join", "(", "kwarg", ".", "keys", "(", ")", ")", ")", ")", "return", "distributions", ".", "normal", "(", "size", "=", "size", ",", "dtype", "=", "dtype", ")" ]
returns an array of standard normal random values .
train
false
3,309
def S_IMODE(mode): return (mode & 4095)
[ "def", "S_IMODE", "(", "mode", ")", ":", "return", "(", "mode", "&", "4095", ")" ]
return the portion of the files mode that can be set by os .
train
false
3,311
def assert_mock_called_once_with_partial(mock, *args, **kwargs): assert (len(mock.mock_calls) == 1) (m_args, m_kwargs) = mock.call_args for (i, arg) in enumerate(args): assert (m_args[i] == arg) for kwarg in kwargs: assert (m_kwargs[kwarg] == kwargs[kwarg])
[ "def", "assert_mock_called_once_with_partial", "(", "mock", ",", "*", "args", ",", "**", "kwargs", ")", ":", "assert", "(", "len", "(", "mock", ".", "mock_calls", ")", "==", "1", ")", "(", "m_args", ",", "m_kwargs", ")", "=", "mock", ".", "call_args", "for", "(", "i", ",", "arg", ")", "in", "enumerate", "(", "args", ")", ":", "assert", "(", "m_args", "[", "i", "]", "==", "arg", ")", "for", "kwarg", "in", "kwargs", ":", "assert", "(", "m_kwargs", "[", "kwarg", "]", "==", "kwargs", "[", "kwarg", "]", ")" ]
similar to mock .
train
false
3,312
def needs_g77_abi_wrapper(info): if (uses_accelerate(info) or uses_veclib(info)): return True elif uses_mkl(info): return True else: return False
[ "def", "needs_g77_abi_wrapper", "(", "info", ")", ":", "if", "(", "uses_accelerate", "(", "info", ")", "or", "uses_veclib", "(", "info", ")", ")", ":", "return", "True", "elif", "uses_mkl", "(", "info", ")", ":", "return", "True", "else", ":", "return", "False" ]
returns true if g77 abi wrapper must be used .
train
false
3,314
@pytest.mark.network def test_download_setuptools(script): result = script.pip('download', 'setuptools') setuptools_prefix = str((Path('scratch') / 'setuptools')) assert any((path.startswith(setuptools_prefix) for path in result.files_created))
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_download_setuptools", "(", "script", ")", ":", "result", "=", "script", ".", "pip", "(", "'download'", ",", "'setuptools'", ")", "setuptools_prefix", "=", "str", "(", "(", "Path", "(", "'scratch'", ")", "/", "'setuptools'", ")", ")", "assert", "any", "(", "(", "path", ".", "startswith", "(", "setuptools_prefix", ")", "for", "path", "in", "result", ".", "files_created", ")", ")" ]
it should download and not install if requested .
train
false
3,315
def is_image_extendable(image): LOG.debug('Checking if we can extend filesystem inside %(image)s.', {'image': image}) if ((not isinstance(image, imgmodel.LocalImage)) or (image.format != imgmodel.FORMAT_RAW)): fs = None try: fs = vfs.VFS.instance_for_image(image, None) fs.setup(mount=False) if (fs.get_image_fs() in SUPPORTED_FS_TO_EXTEND): return True except exception.NovaException as e: LOG.warning(_LW('Unable to mount image %(image)s with error %(error)s. Cannot resize.'), {'image': image, 'error': e}) finally: if (fs is not None): fs.teardown() return False else: try: utils.execute('e2label', image.path) except processutils.ProcessExecutionError as e: LOG.debug('Unable to determine label for image %(image)s with error %(error)s. Cannot resize.', {'image': image, 'error': e}) return False return True
[ "def", "is_image_extendable", "(", "image", ")", ":", "LOG", ".", "debug", "(", "'Checking if we can extend filesystem inside %(image)s.'", ",", "{", "'image'", ":", "image", "}", ")", "if", "(", "(", "not", "isinstance", "(", "image", ",", "imgmodel", ".", "LocalImage", ")", ")", "or", "(", "image", ".", "format", "!=", "imgmodel", ".", "FORMAT_RAW", ")", ")", ":", "fs", "=", "None", "try", ":", "fs", "=", "vfs", ".", "VFS", ".", "instance_for_image", "(", "image", ",", "None", ")", "fs", ".", "setup", "(", "mount", "=", "False", ")", "if", "(", "fs", ".", "get_image_fs", "(", ")", "in", "SUPPORTED_FS_TO_EXTEND", ")", ":", "return", "True", "except", "exception", ".", "NovaException", "as", "e", ":", "LOG", ".", "warning", "(", "_LW", "(", "'Unable to mount image %(image)s with error %(error)s. Cannot resize.'", ")", ",", "{", "'image'", ":", "image", ",", "'error'", ":", "e", "}", ")", "finally", ":", "if", "(", "fs", "is", "not", "None", ")", ":", "fs", ".", "teardown", "(", ")", "return", "False", "else", ":", "try", ":", "utils", ".", "execute", "(", "'e2label'", ",", "image", ".", "path", ")", "except", "processutils", ".", "ProcessExecutionError", "as", "e", ":", "LOG", ".", "debug", "(", "'Unable to determine label for image %(image)s with error %(error)s. Cannot resize.'", ",", "{", "'image'", ":", "image", ",", "'error'", ":", "e", "}", ")", "return", "False", "return", "True" ]
check whether we can extend the image .
train
false
3,317
@login_required def edit_answer(request, question_id, answer_id): answer = get_object_or_404(Answer, pk=answer_id, question=question_id) answer_preview = None if (not answer.allows_edit(request.user)): raise PermissionDenied upload_imageattachment(request, answer) if (request.method == 'GET'): form = AnswerForm({'content': answer.content}) return render(request, 'questions/edit_answer.html', {'form': form, 'answer': answer}) form = AnswerForm(request.POST) if form.is_valid(): answer.content = form.cleaned_data['content'] answer.updated_by = request.user if ('preview' in request.POST): answer.updated = datetime.now() answer_preview = answer else: log.warning(('User %s is editing answer with id=%s' % (request.user, answer.id))) answer.save() return HttpResponseRedirect(answer.get_absolute_url()) return render(request, 'questions/edit_answer.html', {'form': form, 'answer': answer, 'answer_preview': answer_preview})
[ "@", "login_required", "def", "edit_answer", "(", "request", ",", "question_id", ",", "answer_id", ")", ":", "answer", "=", "get_object_or_404", "(", "Answer", ",", "pk", "=", "answer_id", ",", "question", "=", "question_id", ")", "answer_preview", "=", "None", "if", "(", "not", "answer", ".", "allows_edit", "(", "request", ".", "user", ")", ")", ":", "raise", "PermissionDenied", "upload_imageattachment", "(", "request", ",", "answer", ")", "if", "(", "request", ".", "method", "==", "'GET'", ")", ":", "form", "=", "AnswerForm", "(", "{", "'content'", ":", "answer", ".", "content", "}", ")", "return", "render", "(", "request", ",", "'questions/edit_answer.html'", ",", "{", "'form'", ":", "form", ",", "'answer'", ":", "answer", "}", ")", "form", "=", "AnswerForm", "(", "request", ".", "POST", ")", "if", "form", ".", "is_valid", "(", ")", ":", "answer", ".", "content", "=", "form", ".", "cleaned_data", "[", "'content'", "]", "answer", ".", "updated_by", "=", "request", ".", "user", "if", "(", "'preview'", "in", "request", ".", "POST", ")", ":", "answer", ".", "updated", "=", "datetime", ".", "now", "(", ")", "answer_preview", "=", "answer", "else", ":", "log", ".", "warning", "(", "(", "'User %s is editing answer with id=%s'", "%", "(", "request", ".", "user", ",", "answer", ".", "id", ")", ")", ")", "answer", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "answer", ".", "get_absolute_url", "(", ")", ")", "return", "render", "(", "request", ",", "'questions/edit_answer.html'", ",", "{", "'form'", ":", "form", ",", "'answer'", ":", "answer", ",", "'answer_preview'", ":", "answer_preview", "}", ")" ]
edit an answer .
train
false
3,318
def main_check_all(): for (test_file, src_file) in PERFECT_FILES: if (test_file is None): continue subprocess.check_call([sys.executable, '-m', 'pytest', '--cov', 'qutebrowser', '--cov-report', 'xml', test_file]) with open('coverage.xml', encoding='utf-8') as f: messages = check(f, [(test_file, src_file)]) os.remove('coverage.xml') messages = [msg for msg in messages if (msg.typ == MsgType.insufficent_coverage)] if messages: for msg in messages: print msg.text return 1 else: print 'Check ok!' return 0
[ "def", "main_check_all", "(", ")", ":", "for", "(", "test_file", ",", "src_file", ")", "in", "PERFECT_FILES", ":", "if", "(", "test_file", "is", "None", ")", ":", "continue", "subprocess", ".", "check_call", "(", "[", "sys", ".", "executable", ",", "'-m'", ",", "'pytest'", ",", "'--cov'", ",", "'qutebrowser'", ",", "'--cov-report'", ",", "'xml'", ",", "test_file", "]", ")", "with", "open", "(", "'coverage.xml'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "messages", "=", "check", "(", "f", ",", "[", "(", "test_file", ",", "src_file", ")", "]", ")", "os", ".", "remove", "(", "'coverage.xml'", ")", "messages", "=", "[", "msg", "for", "msg", "in", "messages", "if", "(", "msg", ".", "typ", "==", "MsgType", ".", "insufficent_coverage", ")", "]", "if", "messages", ":", "for", "msg", "in", "messages", ":", "print", "msg", ".", "text", "return", "1", "else", ":", "print", "'Check ok!'", "return", "0" ]
check the coverage for all files individually .
train
false
3,319
def DirectProduct(*groups): degrees = [] gens_count = [] total_degree = 0 total_gens = 0 for group in groups: current_deg = group.degree current_num_gens = len(group.generators) degrees.append(current_deg) total_degree += current_deg gens_count.append(current_num_gens) total_gens += current_num_gens array_gens = [] for i in range(total_gens): array_gens.append(list(range(total_degree))) current_gen = 0 current_deg = 0 for i in range(len(gens_count)): for j in range(current_gen, (current_gen + gens_count[i])): gen = groups[i].generators[(j - current_gen)].array_form array_gens[j][current_deg:(current_deg + degrees[i])] = [(x + current_deg) for x in gen] current_gen += gens_count[i] current_deg += degrees[i] perm_gens = list(uniq([_af_new(list(a)) for a in array_gens])) return PermutationGroup(perm_gens, dups=False)
[ "def", "DirectProduct", "(", "*", "groups", ")", ":", "degrees", "=", "[", "]", "gens_count", "=", "[", "]", "total_degree", "=", "0", "total_gens", "=", "0", "for", "group", "in", "groups", ":", "current_deg", "=", "group", ".", "degree", "current_num_gens", "=", "len", "(", "group", ".", "generators", ")", "degrees", ".", "append", "(", "current_deg", ")", "total_degree", "+=", "current_deg", "gens_count", ".", "append", "(", "current_num_gens", ")", "total_gens", "+=", "current_num_gens", "array_gens", "=", "[", "]", "for", "i", "in", "range", "(", "total_gens", ")", ":", "array_gens", ".", "append", "(", "list", "(", "range", "(", "total_degree", ")", ")", ")", "current_gen", "=", "0", "current_deg", "=", "0", "for", "i", "in", "range", "(", "len", "(", "gens_count", ")", ")", ":", "for", "j", "in", "range", "(", "current_gen", ",", "(", "current_gen", "+", "gens_count", "[", "i", "]", ")", ")", ":", "gen", "=", "groups", "[", "i", "]", ".", "generators", "[", "(", "j", "-", "current_gen", ")", "]", ".", "array_form", "array_gens", "[", "j", "]", "[", "current_deg", ":", "(", "current_deg", "+", "degrees", "[", "i", "]", ")", "]", "=", "[", "(", "x", "+", "current_deg", ")", "for", "x", "in", "gen", "]", "current_gen", "+=", "gens_count", "[", "i", "]", "current_deg", "+=", "degrees", "[", "i", "]", "perm_gens", "=", "list", "(", "uniq", "(", "[", "_af_new", "(", "list", "(", "a", ")", ")", "for", "a", "in", "array_gens", "]", ")", ")", "return", "PermutationGroup", "(", "perm_gens", ",", "dups", "=", "False", ")" ]
returns the direct product of several groups as a permutation group .
train
false
3,321
@contextlib.contextmanager def no_internet(verbose=False): already_disabled = INTERNET_OFF turn_off_internet(verbose=verbose) try: (yield) finally: if (not already_disabled): turn_on_internet(verbose=verbose)
[ "@", "contextlib", ".", "contextmanager", "def", "no_internet", "(", "verbose", "=", "False", ")", ":", "already_disabled", "=", "INTERNET_OFF", "turn_off_internet", "(", "verbose", "=", "verbose", ")", "try", ":", "(", "yield", ")", "finally", ":", "if", "(", "not", "already_disabled", ")", ":", "turn_on_internet", "(", "verbose", "=", "verbose", ")" ]
context manager to temporarily disable internet access .
train
false
3,322
def clean_node(node): (host, port) = partition_node(node) return (host.lower(), port)
[ "def", "clean_node", "(", "node", ")", ":", "(", "host", ",", "port", ")", "=", "partition_node", "(", "node", ")", "return", "(", "host", ".", "lower", "(", ")", ",", "port", ")" ]
split and normalize a node name from an ismaster response .
train
false
3,325
def get_sandbox_python_binary_path(pack=None): system_base_path = cfg.CONF.system.base_path virtualenv_path = os.path.join(system_base_path, 'virtualenvs', pack) if (pack in SYSTEM_PACK_NAMES): python_path = sys.executable else: python_path = os.path.join(virtualenv_path, 'bin/python') return python_path
[ "def", "get_sandbox_python_binary_path", "(", "pack", "=", "None", ")", ":", "system_base_path", "=", "cfg", ".", "CONF", ".", "system", ".", "base_path", "virtualenv_path", "=", "os", ".", "path", ".", "join", "(", "system_base_path", ",", "'virtualenvs'", ",", "pack", ")", "if", "(", "pack", "in", "SYSTEM_PACK_NAMES", ")", ":", "python_path", "=", "sys", ".", "executable", "else", ":", "python_path", "=", "os", ".", "path", ".", "join", "(", "virtualenv_path", ",", "'bin/python'", ")", "return", "python_path" ]
return path to the python binary for the provided pack .
train
false
3,326
def _should_skip_elem(elem, type=None, dest=None): if (('draft' in elem.attrib) or ('alt' in elem.attrib)): if ((dest is None) or (type in dest)): return True
[ "def", "_should_skip_elem", "(", "elem", ",", "type", "=", "None", ",", "dest", "=", "None", ")", ":", "if", "(", "(", "'draft'", "in", "elem", ".", "attrib", ")", "or", "(", "'alt'", "in", "elem", ".", "attrib", ")", ")", ":", "if", "(", "(", "dest", "is", "None", ")", "or", "(", "type", "in", "dest", ")", ")", ":", "return", "True" ]
check whether the given element should be skipped .
train
false
3,327
def gf_shoup(f, p, K): factors = [] for (factor, n) in gf_ddf_shoup(f, p, K): factors += gf_edf_shoup(factor, n, p, K) return _sort_factors(factors, multiple=False)
[ "def", "gf_shoup", "(", "f", ",", "p", ",", "K", ")", ":", "factors", "=", "[", "]", "for", "(", "factor", ",", "n", ")", "in", "gf_ddf_shoup", "(", "f", ",", "p", ",", "K", ")", ":", "factors", "+=", "gf_edf_shoup", "(", "factor", ",", "n", ",", "p", ",", "K", ")", "return", "_sort_factors", "(", "factors", ",", "multiple", "=", "False", ")" ]
factor a square-free f in gf(p)[x] for large p .
train
false
3,328
def get_volume_extra_specs(volume): ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') if (type_id is None): return {} volume_type = volume_types.get_volume_type(ctxt, type_id) if (volume_type is None): return {} extra_specs = volume_type.get('extra_specs', {}) log_extra_spec_warnings(extra_specs) return extra_specs
[ "def", "get_volume_extra_specs", "(", "volume", ")", ":", "ctxt", "=", "context", ".", "get_admin_context", "(", ")", "type_id", "=", "volume", ".", "get", "(", "'volume_type_id'", ")", "if", "(", "type_id", "is", "None", ")", ":", "return", "{", "}", "volume_type", "=", "volume_types", ".", "get_volume_type", "(", "ctxt", ",", "type_id", ")", "if", "(", "volume_type", "is", "None", ")", ":", "return", "{", "}", "extra_specs", "=", "volume_type", ".", "get", "(", "'extra_specs'", ",", "{", "}", ")", "log_extra_spec_warnings", "(", "extra_specs", ")", "return", "extra_specs" ]
provides extra specs associated with volume .
train
false
3,329
def _netbsd_gpu_data(): known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware'] gpus = [] try: pcictl_out = __salt__['cmd.run']('pcictl pci0 list') for line in pcictl_out.splitlines(): for vendor in known_vendors: vendor_match = re.match('[0-9:]+ ({0}) (.+) \\(VGA .+\\)'.format(vendor), line, re.IGNORECASE) if vendor_match: gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)}) except OSError: pass grains = {} grains['num_gpus'] = len(gpus) grains['gpus'] = gpus return grains
[ "def", "_netbsd_gpu_data", "(", ")", ":", "known_vendors", "=", "[", "'nvidia'", ",", "'amd'", ",", "'ati'", ",", "'intel'", ",", "'cirrus logic'", ",", "'vmware'", "]", "gpus", "=", "[", "]", "try", ":", "pcictl_out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'pcictl pci0 list'", ")", "for", "line", "in", "pcictl_out", ".", "splitlines", "(", ")", ":", "for", "vendor", "in", "known_vendors", ":", "vendor_match", "=", "re", ".", "match", "(", "'[0-9:]+ ({0}) (.+) \\\\(VGA .+\\\\)'", ".", "format", "(", "vendor", ")", ",", "line", ",", "re", ".", "IGNORECASE", ")", "if", "vendor_match", ":", "gpus", ".", "append", "(", "{", "'vendor'", ":", "vendor_match", ".", "group", "(", "1", ")", ",", "'model'", ":", "vendor_match", ".", "group", "(", "2", ")", "}", ")", "except", "OSError", ":", "pass", "grains", "=", "{", "}", "grains", "[", "'num_gpus'", "]", "=", "len", "(", "gpus", ")", "grains", "[", "'gpus'", "]", "=", "gpus", "return", "grains" ]
num_gpus: int gpus: - vendor: nvidia|amd|ati| .
train
true
3,330
def copy_over(source, dest): if (os.path.exists(dest) and os.path.isdir(dest)): shutil.rmtree(dest) shutil.copytree(source, dest) os.chmod(dest, ((((stat.S_IRWXU | stat.S_IRGRP) | stat.S_IXGRP) | stat.S_IROTH) | stat.S_IXOTH)) shutil.rmtree(source)
[ "def", "copy_over", "(", "source", ",", "dest", ")", ":", "if", "(", "os", ".", "path", ".", "exists", "(", "dest", ")", "and", "os", ".", "path", ".", "isdir", "(", "dest", ")", ")", ":", "shutil", ".", "rmtree", "(", "dest", ")", "shutil", ".", "copytree", "(", "source", ",", "dest", ")", "os", ".", "chmod", "(", "dest", ",", "(", "(", "(", "(", "stat", ".", "S_IRWXU", "|", "stat", ".", "S_IRGRP", ")", "|", "stat", ".", "S_IXGRP", ")", "|", "stat", ".", "S_IROTH", ")", "|", "stat", ".", "S_IXOTH", ")", ")", "shutil", ".", "rmtree", "(", "source", ")" ]
copies from the source to the destination .
train
false
3,332
def download_attachments(output_path, urls): locations = [] for url in urls: path = urlparse(url).path path = path.split(u'/') filename = path.pop((-1)) localpath = u'' for item in path: if ((sys.platform != u'win32') or (u':' not in item)): localpath = os.path.join(localpath, item) full_path = os.path.join(output_path, localpath) if (not os.path.exists(full_path)): os.makedirs(full_path) print(u'downloading {}'.format(filename)) try: urlretrieve(url, os.path.join(full_path, filename)) locations.append(os.path.join(localpath, filename)) except (URLError, IOError) as e: logger.warning(u'No file could be downloaded from %s\n%s', url, e) return locations
[ "def", "download_attachments", "(", "output_path", ",", "urls", ")", ":", "locations", "=", "[", "]", "for", "url", "in", "urls", ":", "path", "=", "urlparse", "(", "url", ")", ".", "path", "path", "=", "path", ".", "split", "(", "u'/'", ")", "filename", "=", "path", ".", "pop", "(", "(", "-", "1", ")", ")", "localpath", "=", "u''", "for", "item", "in", "path", ":", "if", "(", "(", "sys", ".", "platform", "!=", "u'win32'", ")", "or", "(", "u':'", "not", "in", "item", ")", ")", ":", "localpath", "=", "os", ".", "path", ".", "join", "(", "localpath", ",", "item", ")", "full_path", "=", "os", ".", "path", ".", "join", "(", "output_path", ",", "localpath", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "full_path", ")", ")", ":", "os", ".", "makedirs", "(", "full_path", ")", "print", "(", "u'downloading {}'", ".", "format", "(", "filename", ")", ")", "try", ":", "urlretrieve", "(", "url", ",", "os", ".", "path", ".", "join", "(", "full_path", ",", "filename", ")", ")", "locations", ".", "append", "(", "os", ".", "path", ".", "join", "(", "localpath", ",", "filename", ")", ")", "except", "(", "URLError", ",", "IOError", ")", "as", "e", ":", "logger", ".", "warning", "(", "u'No file could be downloaded from %s\\n%s'", ",", "url", ",", "e", ")", "return", "locations" ]
downloads wordpress attachments and returns a list of paths to attachments that can be associated with a post .
train
false
3,333
def lookup_es_key(lookup_dict, term): (value_dict, value_key) = _find_es_dict_by_key(lookup_dict, term) return (None if (value_key is None) else value_dict[value_key])
[ "def", "lookup_es_key", "(", "lookup_dict", ",", "term", ")", ":", "(", "value_dict", ",", "value_key", ")", "=", "_find_es_dict_by_key", "(", "lookup_dict", ",", "term", ")", "return", "(", "None", "if", "(", "value_key", "is", "None", ")", "else", "value_dict", "[", "value_key", "]", ")" ]
performs iterative dictionary search for the given term .
train
false
3,334
def processor(): return uname()[5]
[ "def", "processor", "(", ")", ":", "return", "uname", "(", ")", "[", "5", "]" ]
returns the processor name .
train
false
3,335
def _identify_user_default(): g.user = request.environ.get(u'REMOTE_USER', u'') if g.user: g.user = g.user.decode(u'utf8') g.userobj = model.User.by_name(g.user) if ((g.userobj is None) or (not g.userobj.is_active())): ev = request.environ if (u'repoze.who.plugins' in ev): pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'], u'logout_handler_path') redirect(pth) else: g.userobj = _get_user_for_apikey() if (g.userobj is not None): g.user = g.userobj.name
[ "def", "_identify_user_default", "(", ")", ":", "g", ".", "user", "=", "request", ".", "environ", ".", "get", "(", "u'REMOTE_USER'", ",", "u''", ")", "if", "g", ".", "user", ":", "g", ".", "user", "=", "g", ".", "user", ".", "decode", "(", "u'utf8'", ")", "g", ".", "userobj", "=", "model", ".", "User", ".", "by_name", "(", "g", ".", "user", ")", "if", "(", "(", "g", ".", "userobj", "is", "None", ")", "or", "(", "not", "g", ".", "userobj", ".", "is_active", "(", ")", ")", ")", ":", "ev", "=", "request", ".", "environ", "if", "(", "u'repoze.who.plugins'", "in", "ev", ")", ":", "pth", "=", "getattr", "(", "ev", "[", "u'repoze.who.plugins'", "]", "[", "u'friendlyform'", "]", ",", "u'logout_handler_path'", ")", "redirect", "(", "pth", ")", "else", ":", "g", ".", "userobj", "=", "_get_user_for_apikey", "(", ")", "if", "(", "g", ".", "userobj", "is", "not", "None", ")", ":", "g", ".", "user", "=", "g", ".", "userobj", ".", "name" ]
identifies the user using two methods: a) if they logged into the web interface then repoze .
train
false
3,336
def most_general_unification(a, b, bindings=None): if (bindings is None): bindings = BindingDict() if (a == b): return bindings elif isinstance(a, IndividualVariableExpression): return _mgu_var(a, b, bindings) elif isinstance(b, IndividualVariableExpression): return _mgu_var(b, a, bindings) elif (isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression)): return (most_general_unification(a.function, b.function, bindings) + most_general_unification(a.argument, b.argument, bindings)) raise BindingException((a, b))
[ "def", "most_general_unification", "(", "a", ",", "b", ",", "bindings", "=", "None", ")", ":", "if", "(", "bindings", "is", "None", ")", ":", "bindings", "=", "BindingDict", "(", ")", "if", "(", "a", "==", "b", ")", ":", "return", "bindings", "elif", "isinstance", "(", "a", ",", "IndividualVariableExpression", ")", ":", "return", "_mgu_var", "(", "a", ",", "b", ",", "bindings", ")", "elif", "isinstance", "(", "b", ",", "IndividualVariableExpression", ")", ":", "return", "_mgu_var", "(", "b", ",", "a", ",", "bindings", ")", "elif", "(", "isinstance", "(", "a", ",", "ApplicationExpression", ")", "and", "isinstance", "(", "b", ",", "ApplicationExpression", ")", ")", ":", "return", "(", "most_general_unification", "(", "a", ".", "function", ",", "b", ".", "function", ",", "bindings", ")", "+", "most_general_unification", "(", "a", ".", "argument", ",", "b", ".", "argument", ",", "bindings", ")", ")", "raise", "BindingException", "(", "(", "a", ",", "b", ")", ")" ]
find the most general unification of the two given expressions .
train
false
3,337
def _round(a): return int(np.floor((a + 0.5)))
[ "def", "_round", "(", "a", ")", ":", "return", "int", "(", "np", ".", "floor", "(", "(", "a", "+", "0.5", ")", ")", ")" ]
always round up .
train
false
3,338
def _FilterLine(uwline): return [ft for ft in uwline.tokens if (ft.name not in pytree_utils.NONSEMANTIC_TOKENS)]
[ "def", "_FilterLine", "(", "uwline", ")", ":", "return", "[", "ft", "for", "ft", "in", "uwline", ".", "tokens", "if", "(", "ft", ".", "name", "not", "in", "pytree_utils", ".", "NONSEMANTIC_TOKENS", ")", "]" ]
filter out nonsemantic tokens from the unwrappedlines .
train
false
3,339
def compute_nodes_get_by_service_id(context, service_id): return IMPL.compute_nodes_get_by_service_id(context, service_id)
[ "def", "compute_nodes_get_by_service_id", "(", "context", ",", "service_id", ")", ":", "return", "IMPL", ".", "compute_nodes_get_by_service_id", "(", "context", ",", "service_id", ")" ]
get a list of compute nodes by their associated service id .
train
false
3,341
def delayed_fail(): time.sleep(5) raise ValueError('Expected failure.')
[ "def", "delayed_fail", "(", ")", ":", "time", ".", "sleep", "(", "5", ")", "raise", "ValueError", "(", "'Expected failure.'", ")" ]
delayed failure to make sure that processes are running before the error is raised .
train
false
3,342
def image_schema(profile=None): return schema_get('image', profile)
[ "def", "image_schema", "(", "profile", "=", "None", ")", ":", "return", "schema_get", "(", "'image'", ",", "profile", ")" ]
returns names and descriptions of the schema "image"s properties for this profiles instance of glance cli example: .
train
false
3,343
def __routes_doctest(): pass
[ "def", "__routes_doctest", "(", ")", ":", "pass" ]
dummy function for doctesting autoroutes .
train
false
3,344
def _change_source_state(name, state): choc_path = _find_chocolatey(__context__, __salt__) cmd = [choc_path, 'source', state, '--name', name] result = __salt__['cmd.run_all'](cmd, python_shell=False) if (result['retcode'] != 0): err = 'Running chocolatey failed: {0}'.format(result['stdout']) raise CommandExecutionError(err) return result['stdout']
[ "def", "_change_source_state", "(", "name", ",", "state", ")", ":", "choc_path", "=", "_find_chocolatey", "(", "__context__", ",", "__salt__", ")", "cmd", "=", "[", "choc_path", ",", "'source'", ",", "state", ",", "'--name'", ",", "name", "]", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "(", "result", "[", "'retcode'", "]", "!=", "0", ")", ":", "err", "=", "'Running chocolatey failed: {0}'", ".", "format", "(", "result", "[", "'stdout'", "]", ")", "raise", "CommandExecutionError", "(", "err", ")", "return", "result", "[", "'stdout'", "]" ]
instructs chocolatey to change the state of a source .
train
true
3,345
def libvlc_media_list_player_release(p_mlp): f = (_Cfunctions.get('libvlc_media_list_player_release', None) or _Cfunction('libvlc_media_list_player_release', ((1,),), None, None, MediaListPlayer)) return f(p_mlp)
[ "def", "libvlc_media_list_player_release", "(", "p_mlp", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_player_release'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_player_release'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaListPlayer", ")", ")", "return", "f", "(", "p_mlp", ")" ]
release a media_list_player after use decrement the reference count of a media player object .
train
false
3,346
def libvlc_media_list_release(p_ml): f = (_Cfunctions.get('libvlc_media_list_release', None) or _Cfunction('libvlc_media_list_release', ((1,),), None, None, MediaList)) return f(p_ml)
[ "def", "libvlc_media_list_release", "(", "p_ml", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_release'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_release'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaList", ")", ")", "return", "f", "(", "p_ml", ")" ]
release media list created with l{libvlc_media_list_new}() .
train
false
3,348
def getLayerHeight(elementNode): if (elementNode == None): return 0.4 preferences = skeinforge_craft.getCraftPreferences('carve') return getCascadeFloatWithoutSelf(skeinforge_craft.getCraftValue('Layer Height', preferences), elementNode, 'layerHeight')
[ "def", "getLayerHeight", "(", "elementNode", ")", ":", "if", "(", "elementNode", "==", "None", ")", ":", "return", "0.4", "preferences", "=", "skeinforge_craft", ".", "getCraftPreferences", "(", "'carve'", ")", "return", "getCascadeFloatWithoutSelf", "(", "skeinforge_craft", ".", "getCraftValue", "(", "'Layer Height'", ",", "preferences", ")", ",", "elementNode", ",", "'layerHeight'", ")" ]
get the layer height .
train
false
3,349
def get_default_secret_key(): secret_access_key_script = AWS_ACCOUNTS['default'].SECRET_ACCESS_KEY_SCRIPT.get() return (secret_access_key_script or get_s3a_secret_key())
[ "def", "get_default_secret_key", "(", ")", ":", "secret_access_key_script", "=", "AWS_ACCOUNTS", "[", "'default'", "]", ".", "SECRET_ACCESS_KEY_SCRIPT", ".", "get", "(", ")", "return", "(", "secret_access_key_script", "or", "get_s3a_secret_key", "(", ")", ")" ]
attempt to set aws secret key from script .
train
false
3,351
@sync_performer def perform_list_s3_keys(dispatcher, intent): s3 = boto.connect_s3() bucket = s3.get_bucket(intent.bucket) return {key.name[len(intent.prefix):] for key in bucket.list(intent.prefix)}
[ "@", "sync_performer", "def", "perform_list_s3_keys", "(", "dispatcher", ",", "intent", ")", ":", "s3", "=", "boto", ".", "connect_s3", "(", ")", "bucket", "=", "s3", ".", "get_bucket", "(", "intent", ".", "bucket", ")", "return", "{", "key", ".", "name", "[", "len", "(", "intent", ".", "prefix", ")", ":", "]", "for", "key", "in", "bucket", ".", "list", "(", "intent", ".", "prefix", ")", "}" ]
see :class:lists3keys .
train
false
3,352
def getLocationFromSplitLine(oldLocation, splitLine): if (oldLocation == None): oldLocation = Vector3() return Vector3(getDoubleFromCharacterSplitLineValue('X', splitLine, oldLocation.x), getDoubleFromCharacterSplitLineValue('Y', splitLine, oldLocation.y), getDoubleFromCharacterSplitLineValue('Z', splitLine, oldLocation.z))
[ "def", "getLocationFromSplitLine", "(", "oldLocation", ",", "splitLine", ")", ":", "if", "(", "oldLocation", "==", "None", ")", ":", "oldLocation", "=", "Vector3", "(", ")", "return", "Vector3", "(", "getDoubleFromCharacterSplitLineValue", "(", "'X'", ",", "splitLine", ",", "oldLocation", ".", "x", ")", ",", "getDoubleFromCharacterSplitLineValue", "(", "'Y'", ",", "splitLine", ",", "oldLocation", ".", "y", ")", ",", "getDoubleFromCharacterSplitLineValue", "(", "'Z'", ",", "splitLine", ",", "oldLocation", ".", "z", ")", ")" ]
get the location from the split line .
train
false
3,353
def getTetragridTimesOther(firstTetragrid, otherTetragrid): tetragridTimesOther = [] for row in xrange(4): matrixRow = firstTetragrid[row] tetragridTimesOtherRow = [] tetragridTimesOther.append(tetragridTimesOtherRow) for column in xrange(4): dotProduct = 0 for elementIndex in xrange(4): dotProduct += (matrixRow[elementIndex] * otherTetragrid[elementIndex][column]) tetragridTimesOtherRow.append(dotProduct) return tetragridTimesOther
[ "def", "getTetragridTimesOther", "(", "firstTetragrid", ",", "otherTetragrid", ")", ":", "tetragridTimesOther", "=", "[", "]", "for", "row", "in", "xrange", "(", "4", ")", ":", "matrixRow", "=", "firstTetragrid", "[", "row", "]", "tetragridTimesOtherRow", "=", "[", "]", "tetragridTimesOther", ".", "append", "(", "tetragridTimesOtherRow", ")", "for", "column", "in", "xrange", "(", "4", ")", ":", "dotProduct", "=", "0", "for", "elementIndex", "in", "xrange", "(", "4", ")", ":", "dotProduct", "+=", "(", "matrixRow", "[", "elementIndex", "]", "*", "otherTetragrid", "[", "elementIndex", "]", "[", "column", "]", ")", "tetragridTimesOtherRow", ".", "append", "(", "dotProduct", ")", "return", "tetragridTimesOther" ]
get this matrix multiplied by the other matrix .
train
false
3,356
def randrange_fmt(mode, char, obj): x = randrange(*fmtdict[mode][char]) if (char == 'c'): x = bytes(chr(x), 'latin1') if (char == '?'): x = bool(x) if ((char == 'f') or (char == 'd')): x = struct.pack(char, x) x = struct.unpack(char, x)[0] if ((obj == 'numpy') and (x == '\x00')): x = '\x01' return x
[ "def", "randrange_fmt", "(", "mode", ",", "char", ",", "obj", ")", ":", "x", "=", "randrange", "(", "*", "fmtdict", "[", "mode", "]", "[", "char", "]", ")", "if", "(", "char", "==", "'c'", ")", ":", "x", "=", "bytes", "(", "chr", "(", "x", ")", ",", "'latin1'", ")", "if", "(", "char", "==", "'?'", ")", ":", "x", "=", "bool", "(", "x", ")", "if", "(", "(", "char", "==", "'f'", ")", "or", "(", "char", "==", "'d'", ")", ")", ":", "x", "=", "struct", ".", "pack", "(", "char", ",", "x", ")", "x", "=", "struct", ".", "unpack", "(", "char", ",", "x", ")", "[", "0", "]", "if", "(", "(", "obj", "==", "'numpy'", ")", "and", "(", "x", "==", "'\\x00'", ")", ")", ":", "x", "=", "'\\x01'", "return", "x" ]
return random item for a type specified by a mode and a single format character .
train
false
3,357
def get_json(environ): content_type = environ.get('CONTENT_TYPE', '') if (content_type != 'application/json'): raise HTTPError(406, 'JSON required') try: return json.loads(read_body(environ)) except ValueError as exc: raise HTTPError(400, exc)
[ "def", "get_json", "(", "environ", ")", ":", "content_type", "=", "environ", ".", "get", "(", "'CONTENT_TYPE'", ",", "''", ")", "if", "(", "content_type", "!=", "'application/json'", ")", ":", "raise", "HTTPError", "(", "406", ",", "'JSON required'", ")", "try", ":", "return", "json", ".", "loads", "(", "read_body", "(", "environ", ")", ")", "except", "ValueError", "as", "exc", ":", "raise", "HTTPError", "(", "400", ",", "exc", ")" ]
fetch page for given url and return json python object .
train
true
3,358
def _unquote_domain(domain): return urllib.parse.unquote(domain).replace('%2E', '.')
[ "def", "_unquote_domain", "(", "domain", ")", ":", "return", "urllib", ".", "parse", ".", "unquote", "(", "domain", ")", ".", "replace", "(", "'%2E'", ",", "'.'", ")" ]
unquoting function for receiving a domain name in a url .
train
false
3,359
def mutUniform(individual, expr, pset): index = random.randrange(len(individual)) slice_ = individual.searchSubtree(index) type_ = individual[index].ret individual[slice_] = expr(pset=pset, type_=type_) return (individual,)
[ "def", "mutUniform", "(", "individual", ",", "expr", ",", "pset", ")", ":", "index", "=", "random", ".", "randrange", "(", "len", "(", "individual", ")", ")", "slice_", "=", "individual", ".", "searchSubtree", "(", "index", ")", "type_", "=", "individual", "[", "index", "]", ".", "ret", "individual", "[", "slice_", "]", "=", "expr", "(", "pset", "=", "pset", ",", "type_", "=", "type_", ")", "return", "(", "individual", ",", ")" ]
randomly select a point in the tree *individual* .
train
false
3,360
def update_all(autotest_dir, add_noncompliant, add_experimental): for path in ['server/tests', 'server/site_tests', 'client/tests', 'client/site_tests', 'client/samples']: test_path = os.path.join(autotest_dir, path) if (not os.path.exists(test_path)): continue logging.info('Scanning %s', test_path) tests = [] tests = get_tests_from_fs(test_path, '^control.*', add_noncompliant=add_noncompliant) update_tests_in_db(tests, add_experimental=add_experimental, add_noncompliant=add_noncompliant, autotest_dir=autotest_dir) test_suite_path = os.path.join(autotest_dir, 'test_suites') if os.path.exists(test_suite_path): logging.info('Scanning %s', test_suite_path) tests = get_tests_from_fs(test_suite_path, '.*', add_noncompliant=add_noncompliant) update_tests_in_db(tests, add_experimental=add_experimental, add_noncompliant=add_noncompliant, autotest_dir=autotest_dir) profilers_path = os.path.join(autotest_dir, 'client/profilers') if os.path.exists(profilers_path): logging.info('Scanning %s', profilers_path) profilers = get_tests_from_fs(profilers_path, '.*py$') update_profilers_in_db(profilers, add_noncompliant=add_noncompliant, description='NA') db_clean_broken(autotest_dir)
[ "def", "update_all", "(", "autotest_dir", ",", "add_noncompliant", ",", "add_experimental", ")", ":", "for", "path", "in", "[", "'server/tests'", ",", "'server/site_tests'", ",", "'client/tests'", ",", "'client/site_tests'", ",", "'client/samples'", "]", ":", "test_path", "=", "os", ".", "path", ".", "join", "(", "autotest_dir", ",", "path", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "test_path", ")", ")", ":", "continue", "logging", ".", "info", "(", "'Scanning %s'", ",", "test_path", ")", "tests", "=", "[", "]", "tests", "=", "get_tests_from_fs", "(", "test_path", ",", "'^control.*'", ",", "add_noncompliant", "=", "add_noncompliant", ")", "update_tests_in_db", "(", "tests", ",", "add_experimental", "=", "add_experimental", ",", "add_noncompliant", "=", "add_noncompliant", ",", "autotest_dir", "=", "autotest_dir", ")", "test_suite_path", "=", "os", ".", "path", ".", "join", "(", "autotest_dir", ",", "'test_suites'", ")", "if", "os", ".", "path", ".", "exists", "(", "test_suite_path", ")", ":", "logging", ".", "info", "(", "'Scanning %s'", ",", "test_suite_path", ")", "tests", "=", "get_tests_from_fs", "(", "test_suite_path", ",", "'.*'", ",", "add_noncompliant", "=", "add_noncompliant", ")", "update_tests_in_db", "(", "tests", ",", "add_experimental", "=", "add_experimental", ",", "add_noncompliant", "=", "add_noncompliant", ",", "autotest_dir", "=", "autotest_dir", ")", "profilers_path", "=", "os", ".", "path", ".", "join", "(", "autotest_dir", ",", "'client/profilers'", ")", "if", "os", ".", "path", ".", "exists", "(", "profilers_path", ")", ":", "logging", ".", "info", "(", "'Scanning %s'", ",", "profilers_path", ")", "profilers", "=", "get_tests_from_fs", "(", "profilers_path", ",", "'.*py$'", ")", "update_profilers_in_db", "(", "profilers", ",", "add_noncompliant", "=", "add_noncompliant", ",", "description", "=", "'NA'", ")", "db_clean_broken", "(", "autotest_dir", ")" ]
install all available updates .
train
false
3,361
def bootstrap_url(postfix): return (get_bootstrap_setting(u'base_url') + postfix)
[ "def", "bootstrap_url", "(", "postfix", ")", ":", "return", "(", "get_bootstrap_setting", "(", "u'base_url'", ")", "+", "postfix", ")" ]
prefix a relative url with the bootstrap base url .
train
false