id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
40,798
def prepend_timestamp(msg, format): if (type(format) is str): timestamp = time.strftime(format, time.localtime()) elif callable(format): timestamp = str(format()) else: raise InvalidTimestampFormat return ('%s DCTB %s' % (timestamp, msg))
[ "def", "prepend_timestamp", "(", "msg", ",", "format", ")", ":", "if", "(", "type", "(", "format", ")", "is", "str", ")", ":", "timestamp", "=", "time", ".", "strftime", "(", "format", ",", "time", ".", "localtime", "(", ")", ")", "elif", "callable", "(", "format", ")", ":", "timestamp", "=", "str", "(", "format", "(", ")", ")", "else", ":", "raise", "InvalidTimestampFormat", "return", "(", "'%s DCTB %s'", "%", "(", "timestamp", ",", "msg", ")", ")" ]
prepend timestamp to a message in a standard way .
train
false
40,800
def run_callback_threadsafe(loop, callback, *args): ident = loop.__dict__.get('_thread_ident') if ((ident is not None) and (ident == threading.get_ident())): raise RuntimeError('Cannot be called from within the event loop') future = concurrent.futures.Future() def run_callback(): 'Run callback and store result.' try: future.set_result(callback(*args)) except Exception as exc: if future.set_running_or_notify_cancel(): future.set_exception(exc) else: _LOGGER.warning('Exception on lost future: ', exc_info=True) loop.call_soon_threadsafe(run_callback) return future
[ "def", "run_callback_threadsafe", "(", "loop", ",", "callback", ",", "*", "args", ")", ":", "ident", "=", "loop", ".", "__dict__", ".", "get", "(", "'_thread_ident'", ")", "if", "(", "(", "ident", "is", "not", "None", ")", "and", "(", "ident", "==", "threading", ".", "get_ident", "(", ")", ")", ")", ":", "raise", "RuntimeError", "(", "'Cannot be called from within the event loop'", ")", "future", "=", "concurrent", ".", "futures", ".", "Future", "(", ")", "def", "run_callback", "(", ")", ":", "try", ":", "future", ".", "set_result", "(", "callback", "(", "*", "args", ")", ")", "except", "Exception", "as", "exc", ":", "if", "future", ".", "set_running_or_notify_cancel", "(", ")", ":", "future", ".", "set_exception", "(", "exc", ")", "else", ":", "_LOGGER", ".", "warning", "(", "'Exception on lost future: '", ",", "exc_info", "=", "True", ")", "loop", ".", "call_soon_threadsafe", "(", "run_callback", ")", "return", "future" ]
submit a callback object to a given event loop .
train
false
40,801
def get_repository_categories(app, id): sa_session = app.model.context.current return sa_session.query(app.model.RepositoryCategoryAssociation).filter((app.model.RepositoryCategoryAssociation.table.c.repository_id == app.security.decode_id(id)))
[ "def", "get_repository_categories", "(", "app", ",", "id", ")", ":", "sa_session", "=", "app", ".", "model", ".", "context", ".", "current", "return", "sa_session", ".", "query", "(", "app", ".", "model", ".", "RepositoryCategoryAssociation", ")", ".", "filter", "(", "(", "app", ".", "model", ".", "RepositoryCategoryAssociation", ".", "table", ".", "c", ".", "repository_id", "==", "app", ".", "security", ".", "decode_id", "(", "id", ")", ")", ")" ]
get categories of a repository on the tool shed side from the database via id .
train
false
40,804
def TimestampFromTicks(ticks): return datetime(*localtime(ticks)[:6])
[ "def", "TimestampFromTicks", "(", "ticks", ")", ":", "return", "datetime", "(", "*", "localtime", "(", "ticks", ")", "[", ":", "6", "]", ")" ]
construct an object holding a timestamp value from the given ticks value .
train
false
40,805
def getPublicTypeMembers(type_, onlyValues=False): for (name, value) in inspect.getmembers(type_): if (not name.startswith('__')): if (not onlyValues): (yield (name, value)) else: (yield value)
[ "def", "getPublicTypeMembers", "(", "type_", ",", "onlyValues", "=", "False", ")", ":", "for", "(", "name", ",", "value", ")", "in", "inspect", ".", "getmembers", "(", "type_", ")", ":", "if", "(", "not", "name", ".", "startswith", "(", "'__'", ")", ")", ":", "if", "(", "not", "onlyValues", ")", ":", "(", "yield", "(", "name", ",", "value", ")", ")", "else", ":", "(", "yield", "value", ")" ]
useful for getting members from types .
train
false
40,806
def replaced(fmri): return _fmadm_action_fmri('replaced', fmri)
[ "def", "replaced", "(", "fmri", ")", ":", "return", "_fmadm_action_fmri", "(", "'replaced'", ",", "fmri", ")" ]
notify fault manager that resource has been replaced fmri: string fmri cli example: .
train
false
40,807
def strip_all(string): return strip_irc(strip(strip_irc(string)))
[ "def", "strip_all", "(", "string", ")", ":", "return", "strip_irc", "(", "strip", "(", "strip_irc", "(", "string", ")", ")", ")" ]
removes all $() syntax and mirc formatting codes from the input string and returns it .
train
false
40,808
@scope.define_info(o_len=2) def ap_filter_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF): (o_idxs, o_vals, l_idxs, l_vals) = list(map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])) n_below = min(int(np.ceil((gamma * np.sqrt(len(l_vals))))), gamma_cap) l_order = np.argsort(l_vals) keep_idxs = set(l_idxs[l_order[:n_below]]) below = [v for (i, v) in zip(o_idxs, o_vals) if (i in keep_idxs)] if 0: print('DEBUG: thresh', l_vals[l_order[:n_below]]) keep_idxs = set(l_idxs[l_order[n_below:]]) above = [v for (i, v) in zip(o_idxs, o_vals) if (i in keep_idxs)] return (np.asarray(below), np.asarray(above))
[ "@", "scope", ".", "define_info", "(", "o_len", "=", "2", ")", "def", "ap_filter_trials", "(", "o_idxs", ",", "o_vals", ",", "l_idxs", ",", "l_vals", ",", "gamma", ",", "gamma_cap", "=", "DEFAULT_LF", ")", ":", "(", "o_idxs", ",", "o_vals", ",", "l_idxs", ",", "l_vals", ")", "=", "list", "(", "map", "(", "np", ".", "asarray", ",", "[", "o_idxs", ",", "o_vals", ",", "l_idxs", ",", "l_vals", "]", ")", ")", "n_below", "=", "min", "(", "int", "(", "np", ".", "ceil", "(", "(", "gamma", "*", "np", ".", "sqrt", "(", "len", "(", "l_vals", ")", ")", ")", ")", ")", ",", "gamma_cap", ")", "l_order", "=", "np", ".", "argsort", "(", "l_vals", ")", "keep_idxs", "=", "set", "(", "l_idxs", "[", "l_order", "[", ":", "n_below", "]", "]", ")", "below", "=", "[", "v", "for", "(", "i", ",", "v", ")", "in", "zip", "(", "o_idxs", ",", "o_vals", ")", "if", "(", "i", "in", "keep_idxs", ")", "]", "if", "0", ":", "print", "(", "'DEBUG: thresh'", ",", "l_vals", "[", "l_order", "[", ":", "n_below", "]", "]", ")", "keep_idxs", "=", "set", "(", "l_idxs", "[", "l_order", "[", "n_below", ":", "]", "]", ")", "above", "=", "[", "v", "for", "(", "i", ",", "v", ")", "in", "zip", "(", "o_idxs", ",", "o_vals", ")", "if", "(", "i", "in", "keep_idxs", ")", "]", "return", "(", "np", ".", "asarray", "(", "below", ")", ",", "np", ".", "asarray", "(", "above", ")", ")" ]
return the elements of o_vals that correspond to trials whose losses were above gamma .
train
false
40,809
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None, classes=None): if (classes is not None): warnings.warn("'classes' was renamed to 'labels' in version 0.18 and will be removed in 0.20.", DeprecationWarning) labels = classes (y_type, y_true, y_pred) = _check_targets(y_true, y_pred) if (labels is None): labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) if (sample_weight is None): weight_average = 1.0 else: weight_average = np.mean(sample_weight) if y_type.startswith('multilabel'): n_differences = count_nonzero((y_true - y_pred), sample_weight=sample_weight) return (n_differences / ((y_true.shape[0] * len(labels)) * weight_average)) elif (y_type in ['binary', 'multiclass']): return _weighted_sum((y_true != y_pred), sample_weight, normalize=True) else: raise ValueError('{0} is not supported'.format(y_type))
[ "def", "hamming_loss", "(", "y_true", ",", "y_pred", ",", "labels", "=", "None", ",", "sample_weight", "=", "None", ",", "classes", "=", "None", ")", ":", "if", "(", "classes", "is", "not", "None", ")", ":", "warnings", ".", "warn", "(", "\"'classes' was renamed to 'labels' in version 0.18 and will be removed in 0.20.\"", ",", "DeprecationWarning", ")", "labels", "=", "classes", "(", "y_type", ",", "y_true", ",", "y_pred", ")", "=", "_check_targets", "(", "y_true", ",", "y_pred", ")", "if", "(", "labels", "is", "None", ")", ":", "labels", "=", "unique_labels", "(", "y_true", ",", "y_pred", ")", "else", ":", "labels", "=", "np", ".", "asarray", "(", "labels", ")", "if", "(", "sample_weight", "is", "None", ")", ":", "weight_average", "=", "1.0", "else", ":", "weight_average", "=", "np", ".", "mean", "(", "sample_weight", ")", "if", "y_type", ".", "startswith", "(", "'multilabel'", ")", ":", "n_differences", "=", "count_nonzero", "(", "(", "y_true", "-", "y_pred", ")", ",", "sample_weight", "=", "sample_weight", ")", "return", "(", "n_differences", "/", "(", "(", "y_true", ".", "shape", "[", "0", "]", "*", "len", "(", "labels", ")", ")", "*", "weight_average", ")", ")", "elif", "(", "y_type", "in", "[", "'binary'", ",", "'multiclass'", "]", ")", ":", "return", "_weighted_sum", "(", "(", "y_true", "!=", "y_pred", ")", ",", "sample_weight", ",", "normalize", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "'{0} is not supported'", ".", "format", "(", "y_type", ")", ")" ]
compute the average hamming loss .
train
false
40,810
def euler_from_quaternion(quaternion, axes='sxyz'): return euler_from_matrix(quaternion_matrix(quaternion), axes)
[ "def", "euler_from_quaternion", "(", "quaternion", ",", "axes", "=", "'sxyz'", ")", ":", "return", "euler_from_matrix", "(", "quaternion_matrix", "(", "quaternion", ")", ",", "axes", ")" ]
return euler angles from quaternion for specified axis sequence .
train
false
40,811
def show_ipsecpolicy(ipsecpolicy, profile=None): conn = _auth(profile) return conn.show_ipsecpolicy(ipsecpolicy)
[ "def", "show_ipsecpolicy", "(", "ipsecpolicy", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "show_ipsecpolicy", "(", "ipsecpolicy", ")" ]
fetches information of a specific ipsecpolicy cli example: .
train
false
40,812
@domain_constructor(loss_target=0) def q1_lognormal(): return {'loss': scope.min((0.1 * ((hp.lognormal('x', 0, 2) - 10) ** 2)), 10), 'status': base.STATUS_OK}
[ "@", "domain_constructor", "(", "loss_target", "=", "0", ")", "def", "q1_lognormal", "(", ")", ":", "return", "{", "'loss'", ":", "scope", ".", "min", "(", "(", "0.1", "*", "(", "(", "hp", ".", "lognormal", "(", "'x'", ",", "0", ",", "2", ")", "-", "10", ")", "**", "2", ")", ")", ",", "10", ")", ",", "'status'", ":", "base", ".", "STATUS_OK", "}" ]
about the simplest problem you could ask for: optimize a one-variable quadratic function .
train
false
40,815
def _has_db_updated_with_new_score(scored_block_usage_key, **kwargs): if (kwargs['score_db_table'] == ScoreDatabaseTableEnum.courseware_student_module): score = get_score(kwargs['user_id'], scored_block_usage_key) found_modified_time = (score.modified if (score is not None) else None) else: assert (kwargs['score_db_table'] == ScoreDatabaseTableEnum.submissions) score = sub_api.get_score({'student_id': kwargs['anonymous_user_id'], 'course_id': unicode(scored_block_usage_key.course_key), 'item_id': unicode(scored_block_usage_key), 'item_type': scored_block_usage_key.block_type}) found_modified_time = (score['created_at'] if (score is not None) else None) if (score is None): return kwargs['score_deleted'] return (found_modified_time >= from_timestamp(kwargs['expected_modified_time']))
[ "def", "_has_db_updated_with_new_score", "(", "scored_block_usage_key", ",", "**", "kwargs", ")", ":", "if", "(", "kwargs", "[", "'score_db_table'", "]", "==", "ScoreDatabaseTableEnum", ".", "courseware_student_module", ")", ":", "score", "=", "get_score", "(", "kwargs", "[", "'user_id'", "]", ",", "scored_block_usage_key", ")", "found_modified_time", "=", "(", "score", ".", "modified", "if", "(", "score", "is", "not", "None", ")", "else", "None", ")", "else", ":", "assert", "(", "kwargs", "[", "'score_db_table'", "]", "==", "ScoreDatabaseTableEnum", ".", "submissions", ")", "score", "=", "sub_api", ".", "get_score", "(", "{", "'student_id'", ":", "kwargs", "[", "'anonymous_user_id'", "]", ",", "'course_id'", ":", "unicode", "(", "scored_block_usage_key", ".", "course_key", ")", ",", "'item_id'", ":", "unicode", "(", "scored_block_usage_key", ")", ",", "'item_type'", ":", "scored_block_usage_key", ".", "block_type", "}", ")", "found_modified_time", "=", "(", "score", "[", "'created_at'", "]", "if", "(", "score", "is", "not", "None", ")", "else", "None", ")", "if", "(", "score", "is", "None", ")", ":", "return", "kwargs", "[", "'score_deleted'", "]", "return", "(", "found_modified_time", ">=", "from_timestamp", "(", "kwargs", "[", "'expected_modified_time'", "]", ")", ")" ]
returns whether the database has been updated with the expected new score values for the given problem and user .
train
false
40,816
def get_or_set_hash(name, length=8, chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'): ret = get(name, None) if (ret is None): val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)]) if (DEFAULT_TARGET_DELIM in name): (root, rest) = name.split(DEFAULT_TARGET_DELIM, 1) curr = get(root, _infinitedict()) val = _dict_from_path(rest, val) curr.update(val) setval(root, curr) else: setval(name, val) return get(name)
[ "def", "get_or_set_hash", "(", "name", ",", "length", "=", "8", ",", "chars", "=", "'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'", ")", ":", "ret", "=", "get", "(", "name", ",", "None", ")", "if", "(", "ret", "is", "None", ")", ":", "val", "=", "''", ".", "join", "(", "[", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "chars", ")", "for", "_", "in", "range", "(", "length", ")", "]", ")", "if", "(", "DEFAULT_TARGET_DELIM", "in", "name", ")", ":", "(", "root", ",", "rest", ")", "=", "name", ".", "split", "(", "DEFAULT_TARGET_DELIM", ",", "1", ")", "curr", "=", "get", "(", "root", ",", "_infinitedict", "(", ")", ")", "val", "=", "_dict_from_path", "(", "rest", ",", "val", ")", "curr", ".", "update", "(", "val", ")", "setval", "(", "root", ",", "curr", ")", "else", ":", "setval", "(", "name", ",", "val", ")", "return", "get", "(", "name", ")" ]
perform a one-time generation of a hash and write it to the local grains .
train
true
40,818
def get_native_encoding_type(): if (sys.maxunicode == 65535): return 'UTF16' else: return 'UTF32'
[ "def", "get_native_encoding_type", "(", ")", ":", "if", "(", "sys", ".", "maxunicode", "==", "65535", ")", ":", "return", "'UTF16'", "else", ":", "return", "'UTF32'" ]
returns the encoding type that matches pythons native strings .
train
false
40,819
def write_NetCDF_georeference(origin, outfile): geo_ref = ensure_geo_reference(origin) geo_ref.write_NetCDF(outfile) return geo_ref
[ "def", "write_NetCDF_georeference", "(", "origin", ",", "outfile", ")", ":", "geo_ref", "=", "ensure_geo_reference", "(", "origin", ")", "geo_ref", ".", "write_NetCDF", "(", "outfile", ")", "return", "geo_ref" ]
write georeference info to a netcdf file .
train
true
40,820
def _compute_rc(G): deghist = nx.degree_histogram(G) total = sum(deghist) nks = ((total - cs) for cs in accumulate(deghist) if ((total - cs) > 1)) edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True) ek = G.number_of_edges() (k1, k2) = edge_degrees.pop() rc = {} for (d, nk) in enumerate(nks): while (k1 <= d): if (len(edge_degrees) == 0): ek = 0 break (k1, k2) = edge_degrees.pop() ek -= 1 rc[d] = ((2 * ek) / (nk * (nk - 1))) return rc
[ "def", "_compute_rc", "(", "G", ")", ":", "deghist", "=", "nx", ".", "degree_histogram", "(", "G", ")", "total", "=", "sum", "(", "deghist", ")", "nks", "=", "(", "(", "total", "-", "cs", ")", "for", "cs", "in", "accumulate", "(", "deghist", ")", "if", "(", "(", "total", "-", "cs", ")", ">", "1", ")", ")", "edge_degrees", "=", "sorted", "(", "(", "sorted", "(", "map", "(", "G", ".", "degree", ",", "e", ")", ")", "for", "e", "in", "G", ".", "edges", "(", ")", ")", ",", "reverse", "=", "True", ")", "ek", "=", "G", ".", "number_of_edges", "(", ")", "(", "k1", ",", "k2", ")", "=", "edge_degrees", ".", "pop", "(", ")", "rc", "=", "{", "}", "for", "(", "d", ",", "nk", ")", "in", "enumerate", "(", "nks", ")", ":", "while", "(", "k1", "<=", "d", ")", ":", "if", "(", "len", "(", "edge_degrees", ")", "==", "0", ")", ":", "ek", "=", "0", "break", "(", "k1", ",", "k2", ")", "=", "edge_degrees", ".", "pop", "(", ")", "ek", "-=", "1", "rc", "[", "d", "]", "=", "(", "(", "2", "*", "ek", ")", "/", "(", "nk", "*", "(", "nk", "-", "1", ")", ")", ")", "return", "rc" ]
returns the rich-club coefficient for each degree in the graph g .
train
false
40,821
def countArrayElements(array): elements = [] counters = [] for element in array: if (element in elements): indx = elements.index(element) counters[indx] += 1 else: elements.append(element) counters.append(1) return (elements, counters)
[ "def", "countArrayElements", "(", "array", ")", ":", "elements", "=", "[", "]", "counters", "=", "[", "]", "for", "element", "in", "array", ":", "if", "(", "element", "in", "elements", ")", ":", "indx", "=", "elements", ".", "index", "(", "element", ")", "counters", "[", "indx", "]", "+=", "1", "else", ":", "elements", ".", "append", "(", "element", ")", "counters", ".", "append", "(", "1", ")", "return", "(", "elements", ",", "counters", ")" ]
simple method to count the repetitions of elements in an array .
train
false
40,822
@mock_ec2 def test_create_dhcp_options_invalid_options(): conn = boto.connect_vpc(u'the_key', u'the_secret') servers = [u'f', u'f', u'f', u'f', u'f'] with assert_raises(EC2ResponseError) as cm: conn.create_dhcp_options(ntp_servers=servers) cm.exception.code.should.equal(u'InvalidParameterValue') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none with assert_raises(EC2ResponseError) as cm: conn.create_dhcp_options(netbios_node_type=u'0') cm.exception.code.should.equal(u'InvalidParameterValue') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none
[ "@", "mock_ec2", "def", "test_create_dhcp_options_invalid_options", "(", ")", ":", "conn", "=", "boto", ".", "connect_vpc", "(", "u'the_key'", ",", "u'the_secret'", ")", "servers", "=", "[", "u'f'", ",", "u'f'", ",", "u'f'", ",", "u'f'", ",", "u'f'", "]", "with", "assert_raises", "(", "EC2ResponseError", ")", "as", "cm", ":", "conn", ".", "create_dhcp_options", "(", "ntp_servers", "=", "servers", ")", "cm", ".", "exception", ".", "code", ".", "should", ".", "equal", "(", "u'InvalidParameterValue'", ")", "cm", ".", "exception", ".", "status", ".", "should", ".", "equal", "(", "400", ")", "cm", ".", "exception", ".", "request_id", ".", "should_not", ".", "be", ".", "none", "with", "assert_raises", "(", "EC2ResponseError", ")", "as", "cm", ":", "conn", ".", "create_dhcp_options", "(", "netbios_node_type", "=", "u'0'", ")", "cm", ".", "exception", ".", "code", ".", "should", ".", "equal", "(", "u'InvalidParameterValue'", ")", "cm", ".", "exception", ".", "status", ".", "should", ".", "equal", "(", "400", ")", "cm", ".", "exception", ".", "request_id", ".", "should_not", ".", "be", ".", "none" ]
create invalid dhcp options .
train
false
40,823
def generate_user(email): username = slugify(email) (user, _) = UserProfile.objects.get_or_create(email=email, defaults={'username': username}) return user
[ "def", "generate_user", "(", "email", ")", ":", "username", "=", "slugify", "(", "email", ")", "(", "user", ",", "_", ")", "=", "UserProfile", ".", "objects", ".", "get_or_create", "(", "email", "=", "email", ",", "defaults", "=", "{", "'username'", ":", "username", "}", ")", "return", "user" ]
generate a userprofile given the email provided .
train
false
40,825
def configure_template_filters(app): app.jinja_env.filters['pretty_date'] = pretty_date app.jinja_env.filters['format_date'] = format_date app.jinja_env.filters['nl2br'] = nl2br
[ "def", "configure_template_filters", "(", "app", ")", ":", "app", ".", "jinja_env", ".", "filters", "[", "'pretty_date'", "]", "=", "pretty_date", "app", ".", "jinja_env", ".", "filters", "[", "'format_date'", "]", "=", "format_date", "app", ".", "jinja_env", ".", "filters", "[", "'nl2br'", "]", "=", "nl2br" ]
configure filters .
train
false
40,828
def full_like(array, fill_value, stream=None): warnings.warn('chainer.cuda.full_like is deprecated. Use cupy.full_like instead.', DeprecationWarning) check_cuda_available() assert (stream is None) if isinstance(array, cupy.ndarray): return cupy.full_like(array, fill_value) return cupy.full(array.shape, fill_value, dtype=array.dtype)
[ "def", "full_like", "(", "array", ",", "fill_value", ",", "stream", "=", "None", ")", ":", "warnings", ".", "warn", "(", "'chainer.cuda.full_like is deprecated. Use cupy.full_like instead.'", ",", "DeprecationWarning", ")", "check_cuda_available", "(", ")", "assert", "(", "stream", "is", "None", ")", "if", "isinstance", "(", "array", ",", "cupy", ".", "ndarray", ")", ":", "return", "cupy", ".", "full_like", "(", "array", ",", "fill_value", ")", "return", "cupy", ".", "full", "(", "array", ".", "shape", ",", "fill_value", ",", "dtype", "=", "array", ".", "dtype", ")" ]
compatibility for numpy<1 .
train
false
40,829
def tags_eq(tagged_object, tag_names): eq_(sorted([t.name for t in tagged_object.tags.all()]), sorted(tag_names))
[ "def", "tags_eq", "(", "tagged_object", ",", "tag_names", ")", ":", "eq_", "(", "sorted", "(", "[", "t", ".", "name", "for", "t", "in", "tagged_object", ".", "tags", ".", "all", "(", ")", "]", ")", ",", "sorted", "(", "tag_names", ")", ")" ]
assert that the names of the tags on tagged_object are tag_names .
train
false
40,830
def expr_from_dict(rep, *gens): result = [] for (monom, coeff) in rep.items(): term = [coeff] for (g, m) in zip(gens, monom): if m: term.append(Pow(g, m)) result.append(Mul(*term)) return Add(*result)
[ "def", "expr_from_dict", "(", "rep", ",", "*", "gens", ")", ":", "result", "=", "[", "]", "for", "(", "monom", ",", "coeff", ")", "in", "rep", ".", "items", "(", ")", ":", "term", "=", "[", "coeff", "]", "for", "(", "g", ",", "m", ")", "in", "zip", "(", "gens", ",", "monom", ")", ":", "if", "m", ":", "term", ".", "append", "(", "Pow", "(", "g", ",", "m", ")", ")", "result", ".", "append", "(", "Mul", "(", "*", "term", ")", ")", "return", "Add", "(", "*", "result", ")" ]
convert a multinomial form into an expression .
train
false
40,831
def start_filter(app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, consume_from=None, state=None, accept=None, **kwargs): return Filterer(app, conn, filter, limit=limit, timeout=timeout, ack_messages=ack_messages, tasks=tasks, queues=queues, callback=callback, forever=forever, on_declare_queue=on_declare_queue, consume_from=consume_from, state=state, accept=accept, **kwargs).start()
[ "def", "start_filter", "(", "app", ",", "conn", ",", "filter", ",", "limit", "=", "None", ",", "timeout", "=", "1.0", ",", "ack_messages", "=", "False", ",", "tasks", "=", "None", ",", "queues", "=", "None", ",", "callback", "=", "None", ",", "forever", "=", "False", ",", "on_declare_queue", "=", "None", ",", "consume_from", "=", "None", ",", "state", "=", "None", ",", "accept", "=", "None", ",", "**", "kwargs", ")", ":", "return", "Filterer", "(", "app", ",", "conn", ",", "filter", ",", "limit", "=", "limit", ",", "timeout", "=", "timeout", ",", "ack_messages", "=", "ack_messages", ",", "tasks", "=", "tasks", ",", "queues", "=", "queues", ",", "callback", "=", "callback", ",", "forever", "=", "forever", ",", "on_declare_queue", "=", "on_declare_queue", ",", "consume_from", "=", "consume_from", ",", "state", "=", "state", ",", "accept", "=", "accept", ",", "**", "kwargs", ")", ".", "start", "(", ")" ]
filter tasks .
train
false
40,832
def clean_tables(): for f in TABLES: if os.path.isfile(f): os.remove(f) print((u'Removed ' + f))
[ "def", "clean_tables", "(", ")", ":", "for", "f", "in", "TABLES", ":", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", ":", "os", ".", "remove", "(", "f", ")", "print", "(", "(", "u'Removed '", "+", "f", ")", ")" ]
remove the lexer/parser modules that are dynamically created .
train
false
40,833
def validate_twitter(username): if username: username = re.sub('https?://(www\\.)?twitter\\.com/|@', '', username) if (len(username) > 15): raise ValidationError(_('Twitter usernames cannot be longer than 15 characters.')) if (not re.match('^\\w+$', username)): raise ValidationError(_('Twitter usernames must contain only alphanumeric characters and the underscore.')) return username
[ "def", "validate_twitter", "(", "username", ")", ":", "if", "username", ":", "username", "=", "re", ".", "sub", "(", "'https?://(www\\\\.)?twitter\\\\.com/|@'", ",", "''", ",", "username", ")", "if", "(", "len", "(", "username", ")", ">", "15", ")", ":", "raise", "ValidationError", "(", "_", "(", "'Twitter usernames cannot be longer than 15 characters.'", ")", ")", "if", "(", "not", "re", ".", "match", "(", "'^\\\\w+$'", ",", "username", ")", ")", ":", "raise", "ValidationError", "(", "_", "(", "'Twitter usernames must contain only alphanumeric characters and the underscore.'", ")", ")", "return", "username" ]
return a twitter username given @ or http(s) strings .
train
false
40,834
def _find_method(f, version_float): qualified_name = _fully_qualified_name(f) method_list = VERSIONED_METHODS.get(qualified_name, []) for (min_version, max_version, func) in method_list: if (min_version <= version_float <= max_version): return func raise webob.exc.HTTPNotFound()
[ "def", "_find_method", "(", "f", ",", "version_float", ")", ":", "qualified_name", "=", "_fully_qualified_name", "(", "f", ")", "method_list", "=", "VERSIONED_METHODS", ".", "get", "(", "qualified_name", ",", "[", "]", ")", "for", "(", "min_version", ",", "max_version", ",", "func", ")", "in", "method_list", ":", "if", "(", "min_version", "<=", "version_float", "<=", "max_version", ")", ":", "return", "func", "raise", "webob", ".", "exc", ".", "HTTPNotFound", "(", ")" ]
look in versioned_methods for method with right name matching version .
train
false
40,835
def _updateIndices(db): db.executescript('\n-- syncing\ncreate index if not exists ix_notes_usn on notes (usn);\ncreate index if not exists ix_cards_usn on cards (usn);\ncreate index if not exists ix_revlog_usn on revlog (usn);\n-- card spacing, etc\ncreate index if not exists ix_cards_nid on cards (nid);\n-- scheduling and deck limiting\ncreate index if not exists ix_cards_sched on cards (did, queue, due);\n-- revlog by card\ncreate index if not exists ix_revlog_cid on revlog (cid);\n-- field uniqueness\ncreate index if not exists ix_notes_csum on notes (csum);\n')
[ "def", "_updateIndices", "(", "db", ")", ":", "db", ".", "executescript", "(", "'\\n-- syncing\\ncreate index if not exists ix_notes_usn on notes (usn);\\ncreate index if not exists ix_cards_usn on cards (usn);\\ncreate index if not exists ix_revlog_usn on revlog (usn);\\n-- card spacing, etc\\ncreate index if not exists ix_cards_nid on cards (nid);\\n-- scheduling and deck limiting\\ncreate index if not exists ix_cards_sched on cards (did, queue, due);\\n-- revlog by card\\ncreate index if not exists ix_revlog_cid on revlog (cid);\\n-- field uniqueness\\ncreate index if not exists ix_notes_csum on notes (csum);\\n'", ")" ]
add indices to the db .
train
false
40,836
def _items(mappingorseq): if hasattr(mappingorseq, 'items'): return iteritems(mappingorseq) return mappingorseq
[ "def", "_items", "(", "mappingorseq", ")", ":", "if", "hasattr", "(", "mappingorseq", ",", "'items'", ")", ":", "return", "iteritems", "(", "mappingorseq", ")", "return", "mappingorseq" ]
wrapper for efficient iteration over mappings represented by dicts or sequences:: .
train
false
40,837
def hostgroup_get(name=None, groupids=None, hostids=None, **connection_args): conn_args = _login(**connection_args) try: if conn_args: method = 'hostgroup.get' params = {'output': 'extend'} if ((not groupids) and (not name) and (not hostids)): return False if name: name_dict = {'name': name} params.setdefault('filter', name_dict) if groupids: params.setdefault('groupids', groupids) if hostids: params.setdefault('hostids', hostids) params = _params_extend(params, **connection_args) ret = _query(method, params, conn_args['url'], conn_args['auth']) return (ret['result'] if (len(ret['result']) > 0) else False) else: raise KeyError except KeyError: return False
[ "def", "hostgroup_get", "(", "name", "=", "None", ",", "groupids", "=", "None", ",", "hostids", "=", "None", ",", "**", "connection_args", ")", ":", "conn_args", "=", "_login", "(", "**", "connection_args", ")", "try", ":", "if", "conn_args", ":", "method", "=", "'hostgroup.get'", "params", "=", "{", "'output'", ":", "'extend'", "}", "if", "(", "(", "not", "groupids", ")", "and", "(", "not", "name", ")", "and", "(", "not", "hostids", ")", ")", ":", "return", "False", "if", "name", ":", "name_dict", "=", "{", "'name'", ":", "name", "}", "params", ".", "setdefault", "(", "'filter'", ",", "name_dict", ")", "if", "groupids", ":", "params", ".", "setdefault", "(", "'groupids'", ",", "groupids", ")", "if", "hostids", ":", "params", ".", "setdefault", "(", "'hostids'", ",", "hostids", ")", "params", "=", "_params_extend", "(", "params", ",", "**", "connection_args", ")", "ret", "=", "_query", "(", "method", ",", "params", ",", "conn_args", "[", "'url'", "]", ",", "conn_args", "[", "'auth'", "]", ")", "return", "(", "ret", "[", "'result'", "]", "if", "(", "len", "(", "ret", "[", "'result'", "]", ")", ">", "0", ")", "else", "False", ")", "else", ":", "raise", "KeyError", "except", "KeyError", ":", "return", "False" ]
retrieve host groups according to the given parameters .
train
true
40,838
def create_initial_config_file(args, extensions_data): config_file = args.config_files[(-1)] if os.path.exists(path.expand_path(config_file)): return try: default = config_lib.format_initial(extensions_data) path.get_or_create_file(config_file, mkdir=False, content=default) logger.info(u'Initialized %s with default config', config_file) except IOError as error: logger.warning(u'Unable to initialize %s with default config: %s', config_file, encoding.locale_decode(error))
[ "def", "create_initial_config_file", "(", "args", ",", "extensions_data", ")", ":", "config_file", "=", "args", ".", "config_files", "[", "(", "-", "1", ")", "]", "if", "os", ".", "path", ".", "exists", "(", "path", ".", "expand_path", "(", "config_file", ")", ")", ":", "return", "try", ":", "default", "=", "config_lib", ".", "format_initial", "(", "extensions_data", ")", "path", ".", "get_or_create_file", "(", "config_file", ",", "mkdir", "=", "False", ",", "content", "=", "default", ")", "logger", ".", "info", "(", "u'Initialized %s with default config'", ",", "config_file", ")", "except", "IOError", "as", "error", ":", "logger", ".", "warning", "(", "u'Unable to initialize %s with default config: %s'", ",", "config_file", ",", "encoding", ".", "locale_decode", "(", "error", ")", ")" ]
initialize whatever the last config file is with defaults .
train
false
40,839
def criteria(course_key): about_path = reverse('about_course', kwargs={'course_id': unicode(course_key)}) return u'{}{}'.format(site_prefix(), about_path)
[ "def", "criteria", "(", "course_key", ")", ":", "about_path", "=", "reverse", "(", "'about_course'", ",", "kwargs", "=", "{", "'course_id'", ":", "unicode", "(", "course_key", ")", "}", ")", "return", "u'{}{}'", ".", "format", "(", "site_prefix", "(", ")", ",", "about_path", ")" ]
constructs the criteria url from the course about page .
train
false
40,840
def dup_zz_cyclotomic_poly(n, K): h = [K.one, (- K.one)] for (p, k) in factorint(n).items(): h = dup_quo(dup_inflate(h, p, K), h, K) h = dup_inflate(h, (p ** (k - 1)), K) return h
[ "def", "dup_zz_cyclotomic_poly", "(", "n", ",", "K", ")", ":", "h", "=", "[", "K", ".", "one", ",", "(", "-", "K", ".", "one", ")", "]", "for", "(", "p", ",", "k", ")", "in", "factorint", "(", "n", ")", ".", "items", "(", ")", ":", "h", "=", "dup_quo", "(", "dup_inflate", "(", "h", ",", "p", ",", "K", ")", ",", "h", ",", "K", ")", "h", "=", "dup_inflate", "(", "h", ",", "(", "p", "**", "(", "k", "-", "1", ")", ")", ",", "K", ")", "return", "h" ]
efficiently generate n-th cyclotomic polnomial .
train
false
40,841
def timestamp_from_record_tuple(record): return record[0]['timestamp']
[ "def", "timestamp_from_record_tuple", "(", "record", ")", ":", "return", "record", "[", "0", "]", "[", "'timestamp'", "]" ]
extract timestamp from hbase tuple record .
train
false
40,843
def create_image_bdm(image_ref, boot_index=0): return BlockDeviceDict({'source_type': 'image', 'image_id': image_ref, 'delete_on_termination': True, 'boot_index': boot_index, 'device_type': 'disk', 'destination_type': 'local'})
[ "def", "create_image_bdm", "(", "image_ref", ",", "boot_index", "=", "0", ")", ":", "return", "BlockDeviceDict", "(", "{", "'source_type'", ":", "'image'", ",", "'image_id'", ":", "image_ref", ",", "'delete_on_termination'", ":", "True", ",", "'boot_index'", ":", "boot_index", ",", "'device_type'", ":", "'disk'", ",", "'destination_type'", ":", "'local'", "}", ")" ]
create a block device dict based on the image_ref .
train
false
40,844
@pytest.fixture(autouse=True) def reset_cache_backend_state(celery_app): (yield) backend = celery_app.__dict__.get(u'backend') if (backend is not None): if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear()
[ "@", "pytest", ".", "fixture", "(", "autouse", "=", "True", ")", "def", "reset_cache_backend_state", "(", "celery_app", ")", ":", "(", "yield", ")", "backend", "=", "celery_app", ".", "__dict__", ".", "get", "(", "u'backend'", ")", "if", "(", "backend", "is", "not", "None", ")", ":", "if", "isinstance", "(", "backend", ",", "CacheBackend", ")", ":", "if", "isinstance", "(", "backend", ".", "client", ",", "DummyClient", ")", ":", "backend", ".", "client", ".", "cache", ".", "clear", "(", ")", "backend", ".", "_cache", ".", "clear", "(", ")" ]
fixture that resets the internal state of the cache result backend .
train
false
40,845
@register(u'operate-and-get-next') def operate_and_get_next(event): buff = event.current_buffer new_index = (buff.working_index + 1) buff.accept_action.validate_and_handle(event.cli, buff) def set_working_index(): buff.working_index = new_index event.cli.pre_run_callables.append(set_working_index)
[ "@", "register", "(", "u'operate-and-get-next'", ")", "def", "operate_and_get_next", "(", "event", ")", ":", "buff", "=", "event", ".", "current_buffer", "new_index", "=", "(", "buff", ".", "working_index", "+", "1", ")", "buff", ".", "accept_action", ".", "validate_and_handle", "(", "event", ".", "cli", ",", "buff", ")", "def", "set_working_index", "(", ")", ":", "buff", ".", "working_index", "=", "new_index", "event", ".", "cli", ".", "pre_run_callables", ".", "append", "(", "set_working_index", ")" ]
accept the current line for execution and fetch the next line relative to the current line from the history for editing .
train
false
40,846
def _delete_cookie(response): response.set_cookie(settings.SESSION_COOKIE_NAME, max_age=0, expires='Thu, 01-Jan-1970 00:00:00 GMT', domain=settings.SESSION_COOKIE_DOMAIN, secure=(settings.SESSION_COOKIE_SECURE or None), httponly=(settings.SESSION_COOKIE_HTTPONLY or None))
[ "def", "_delete_cookie", "(", "response", ")", ":", "response", ".", "set_cookie", "(", "settings", ".", "SESSION_COOKIE_NAME", ",", "max_age", "=", "0", ",", "expires", "=", "'Thu, 01-Jan-1970 00:00:00 GMT'", ",", "domain", "=", "settings", ".", "SESSION_COOKIE_DOMAIN", ",", "secure", "=", "(", "settings", ".", "SESSION_COOKIE_SECURE", "or", "None", ")", ",", "httponly", "=", "(", "settings", ".", "SESSION_COOKIE_HTTPONLY", "or", "None", ")", ")" ]
delete the cookie by setting the expiration to a date in the past .
train
false
40,847
def mutGaussian(individual, mu, sigma, indpb): size = len(individual) if (not isinstance(mu, Sequence)): mu = repeat(mu, size) elif (len(mu) < size): raise IndexError(('mu must be at least the size of individual: %d < %d' % (len(mu), size))) if (not isinstance(sigma, Sequence)): sigma = repeat(sigma, size) elif (len(sigma) < size): raise IndexError(('sigma must be at least the size of individual: %d < %d' % (len(sigma), size))) for (i, m, s) in zip(xrange(size), mu, sigma): if (random.random() < indpb): individual[i] += random.gauss(m, s) return (individual,)
[ "def", "mutGaussian", "(", "individual", ",", "mu", ",", "sigma", ",", "indpb", ")", ":", "size", "=", "len", "(", "individual", ")", "if", "(", "not", "isinstance", "(", "mu", ",", "Sequence", ")", ")", ":", "mu", "=", "repeat", "(", "mu", ",", "size", ")", "elif", "(", "len", "(", "mu", ")", "<", "size", ")", ":", "raise", "IndexError", "(", "(", "'mu must be at least the size of individual: %d < %d'", "%", "(", "len", "(", "mu", ")", ",", "size", ")", ")", ")", "if", "(", "not", "isinstance", "(", "sigma", ",", "Sequence", ")", ")", ":", "sigma", "=", "repeat", "(", "sigma", ",", "size", ")", "elif", "(", "len", "(", "sigma", ")", "<", "size", ")", ":", "raise", "IndexError", "(", "(", "'sigma must be at least the size of individual: %d < %d'", "%", "(", "len", "(", "sigma", ")", ",", "size", ")", ")", ")", "for", "(", "i", ",", "m", ",", "s", ")", "in", "zip", "(", "xrange", "(", "size", ")", ",", "mu", ",", "sigma", ")", ":", "if", "(", "random", ".", "random", "(", ")", "<", "indpb", ")", ":", "individual", "[", "i", "]", "+=", "random", ".", "gauss", "(", "m", ",", "s", ")", "return", "(", "individual", ",", ")" ]
this function applies a gaussian mutation of mean *mu* and standard deviation *sigma* on the input individual .
train
false
40,849
def ColorScaleRule(start_type=None, start_value=None, start_color=None, mid_type=None, mid_value=None, mid_color=None, end_type=None, end_value=None, end_color=None): formats = [] if (start_type is not None): formats.append(FormatObject(type=start_type, val=start_value)) if (mid_type is not None): formats.append(FormatObject(type=mid_type, val=mid_value)) if (end_type is not None): formats.append(FormatObject(type=end_type, val=end_value)) colors = [] for v in (start_color, mid_color, end_color): if (v is not None): if (not isinstance(v, Color)): v = Color(v) colors.append(v) cs = ColorScale(cfvo=formats, color=colors) rule = Rule(type='colorScale', colorScale=cs) return rule
[ "def", "ColorScaleRule", "(", "start_type", "=", "None", ",", "start_value", "=", "None", ",", "start_color", "=", "None", ",", "mid_type", "=", "None", ",", "mid_value", "=", "None", ",", "mid_color", "=", "None", ",", "end_type", "=", "None", ",", "end_value", "=", "None", ",", "end_color", "=", "None", ")", ":", "formats", "=", "[", "]", "if", "(", "start_type", "is", "not", "None", ")", ":", "formats", ".", "append", "(", "FormatObject", "(", "type", "=", "start_type", ",", "val", "=", "start_value", ")", ")", "if", "(", "mid_type", "is", "not", "None", ")", ":", "formats", ".", "append", "(", "FormatObject", "(", "type", "=", "mid_type", ",", "val", "=", "mid_value", ")", ")", "if", "(", "end_type", "is", "not", "None", ")", ":", "formats", ".", "append", "(", "FormatObject", "(", "type", "=", "end_type", ",", "val", "=", "end_value", ")", ")", "colors", "=", "[", "]", "for", "v", "in", "(", "start_color", ",", "mid_color", ",", "end_color", ")", ":", "if", "(", "v", "is", "not", "None", ")", ":", "if", "(", "not", "isinstance", "(", "v", ",", "Color", ")", ")", ":", "v", "=", "Color", "(", "v", ")", "colors", ".", "append", "(", "v", ")", "cs", "=", "ColorScale", "(", "cfvo", "=", "formats", ",", "color", "=", "colors", ")", "rule", "=", "Rule", "(", "type", "=", "'colorScale'", ",", "colorScale", "=", "cs", ")", "return", "rule" ]
backwards compatibility .
train
false
40,851
def dump_privatekey(type, pkey, cipher=None, passphrase=None): bio = _new_mem_buf() if (cipher is not None): if (passphrase is None): raise TypeError('if a value is given for cipher one must also be given for passphrase') cipher_obj = _lib.EVP_get_cipherbyname(_byte_string(cipher)) if (cipher_obj == _ffi.NULL): raise ValueError('Invalid cipher name') else: cipher_obj = _ffi.NULL helper = _PassphraseHelper(type, passphrase) if (type == FILETYPE_PEM): result_code = _lib.PEM_write_bio_PrivateKey(bio, pkey._pkey, cipher_obj, _ffi.NULL, 0, helper.callback, helper.callback_args) helper.raise_if_problem() elif (type == FILETYPE_ASN1): result_code = _lib.i2d_PrivateKey_bio(bio, pkey._pkey) elif (type == FILETYPE_TEXT): rsa = _lib.EVP_PKEY_get1_RSA(pkey._pkey) result_code = _lib.RSA_print(bio, rsa, 0) else: raise ValueError('type argument must be FILETYPE_PEM, FILETYPE_ASN1, or FILETYPE_TEXT') if (result_code == 0): _raise_current_error() return _bio_to_string(bio)
[ "def", "dump_privatekey", "(", "type", ",", "pkey", ",", "cipher", "=", "None", ",", "passphrase", "=", "None", ")", ":", "bio", "=", "_new_mem_buf", "(", ")", "if", "(", "cipher", "is", "not", "None", ")", ":", "if", "(", "passphrase", "is", "None", ")", ":", "raise", "TypeError", "(", "'if a value is given for cipher one must also be given for passphrase'", ")", "cipher_obj", "=", "_lib", ".", "EVP_get_cipherbyname", "(", "_byte_string", "(", "cipher", ")", ")", "if", "(", "cipher_obj", "==", "_ffi", ".", "NULL", ")", ":", "raise", "ValueError", "(", "'Invalid cipher name'", ")", "else", ":", "cipher_obj", "=", "_ffi", ".", "NULL", "helper", "=", "_PassphraseHelper", "(", "type", ",", "passphrase", ")", "if", "(", "type", "==", "FILETYPE_PEM", ")", ":", "result_code", "=", "_lib", ".", "PEM_write_bio_PrivateKey", "(", "bio", ",", "pkey", ".", "_pkey", ",", "cipher_obj", ",", "_ffi", ".", "NULL", ",", "0", ",", "helper", ".", "callback", ",", "helper", ".", "callback_args", ")", "helper", ".", "raise_if_problem", "(", ")", "elif", "(", "type", "==", "FILETYPE_ASN1", ")", ":", "result_code", "=", "_lib", ".", "i2d_PrivateKey_bio", "(", "bio", ",", "pkey", ".", "_pkey", ")", "elif", "(", "type", "==", "FILETYPE_TEXT", ")", ":", "rsa", "=", "_lib", ".", "EVP_PKEY_get1_RSA", "(", "pkey", ".", "_pkey", ")", "result_code", "=", "_lib", ".", "RSA_print", "(", "bio", ",", "rsa", ",", "0", ")", "else", ":", "raise", "ValueError", "(", "'type argument must be FILETYPE_PEM, FILETYPE_ASN1, or FILETYPE_TEXT'", ")", "if", "(", "result_code", "==", "0", ")", ":", "_raise_current_error", "(", ")", "return", "_bio_to_string", "(", "bio", ")" ]
dump a private key to a buffer .
train
false
40,852
def pearson_chi_squared_test(observed=[], expected=[], df=None, tail=UPPER): o = list(observed) e = (list(expected) or _expected(o)) n = len(o) m = (len(o[0]) if o else 0) df = (df or ((n - 1) * (m - 1))) df = (df or (((m == 1) and (n - 1)) or (m - 1))) x2 = 0.0 for i in xrange(n): for j in xrange(m): if ((o[i][j] != 0) and (e[i][j] != 0)): x2 += (((o[i][j] - e[i][j]) ** 2.0) / e[i][j]) p = gammai((df * 0.5), (x2 * 0.5), tail) return (x2, p)
[ "def", "pearson_chi_squared_test", "(", "observed", "=", "[", "]", ",", "expected", "=", "[", "]", ",", "df", "=", "None", ",", "tail", "=", "UPPER", ")", ":", "o", "=", "list", "(", "observed", ")", "e", "=", "(", "list", "(", "expected", ")", "or", "_expected", "(", "o", ")", ")", "n", "=", "len", "(", "o", ")", "m", "=", "(", "len", "(", "o", "[", "0", "]", ")", "if", "o", "else", "0", ")", "df", "=", "(", "df", "or", "(", "(", "n", "-", "1", ")", "*", "(", "m", "-", "1", ")", ")", ")", "df", "=", "(", "df", "or", "(", "(", "(", "m", "==", "1", ")", "and", "(", "n", "-", "1", ")", ")", "or", "(", "m", "-", "1", ")", ")", ")", "x2", "=", "0.0", "for", "i", "in", "xrange", "(", "n", ")", ":", "for", "j", "in", "xrange", "(", "m", ")", ":", "if", "(", "(", "o", "[", "i", "]", "[", "j", "]", "!=", "0", ")", "and", "(", "e", "[", "i", "]", "[", "j", "]", "!=", "0", ")", ")", ":", "x2", "+=", "(", "(", "(", "o", "[", "i", "]", "[", "j", "]", "-", "e", "[", "i", "]", "[", "j", "]", ")", "**", "2.0", ")", "/", "e", "[", "i", "]", "[", "j", "]", ")", "p", "=", "gammai", "(", "(", "df", "*", "0.5", ")", ",", "(", "x2", "*", "0.5", ")", ",", "tail", ")", "return", "(", "x2", ",", "p", ")" ]
returns for the n x m observed and expected data .
train
false
40,853
def task_configure_flocker_agent(control_node, dataset_backend, dataset_backend_configuration, logging_config=None): dataset_backend_configuration = dataset_backend_configuration.copy() dataset_backend_configuration.update({u'backend': dataset_backend.name}) content = {'version': 1, 'control-service': {'hostname': control_node, 'port': 4524}, 'dataset': dataset_backend_configuration} if (logging_config is not None): content['logging'] = logging_config put_config_file = put(path='/etc/flocker/agent.yml', content=yaml.safe_dump(content), log_content_filter=_remove_dataset_fields) return sequence([put_config_file])
[ "def", "task_configure_flocker_agent", "(", "control_node", ",", "dataset_backend", ",", "dataset_backend_configuration", ",", "logging_config", "=", "None", ")", ":", "dataset_backend_configuration", "=", "dataset_backend_configuration", ".", "copy", "(", ")", "dataset_backend_configuration", ".", "update", "(", "{", "u'backend'", ":", "dataset_backend", ".", "name", "}", ")", "content", "=", "{", "'version'", ":", "1", ",", "'control-service'", ":", "{", "'hostname'", ":", "control_node", ",", "'port'", ":", "4524", "}", ",", "'dataset'", ":", "dataset_backend_configuration", "}", "if", "(", "logging_config", "is", "not", "None", ")", ":", "content", "[", "'logging'", "]", "=", "logging_config", "put_config_file", "=", "put", "(", "path", "=", "'/etc/flocker/agent.yml'", ",", "content", "=", "yaml", ".", "safe_dump", "(", "content", ")", ",", "log_content_filter", "=", "_remove_dataset_fields", ")", "return", "sequence", "(", "[", "put_config_file", "]", ")" ]
configure the flocker agents by writing out the configuration file .
train
false
40,855
def test_get_words_cplusplus(): expected_words = ['Consider', 'Create', 'Implement', 'Obj', 'ObjContainer', 'Postfix', 'Prefix', 'Return', 'SmartPointer', 'Static', 'Zero', 'a', 'above', 'access', 'actual', 'add', 'an', 'back', 'bool', 'call', 'class', 'const', 'container', 'cout', 'definitions', 'do', 'end', 'endl', 'f', 'false', 'for', 'friend', 'g', 'i', 'if', 'implement', 'include', 'index', 'indicates', 'int', 'iostream', 'iterator', 'j', 'list', 'main', 'member', 'method', 'namespace', 'o', 'obj', 'objc', 'oc', 'of', 'operator', 'overload', 'pointer', 'public', 'push', 'return', 's', 'size', 'smart', 'sp', 'standard', 'static', 'std', 'sz', 'the', 'to', 'true', 'using', 'value', 'vector', 'version', 'void', 'while'] assert (sorted(expected_words) == sorted(get_words_by_filename('example.cpp'))) assert (sorted(expected_words) == sorted(get_words_by_content('example.cpp')))
[ "def", "test_get_words_cplusplus", "(", ")", ":", "expected_words", "=", "[", "'Consider'", ",", "'Create'", ",", "'Implement'", ",", "'Obj'", ",", "'ObjContainer'", ",", "'Postfix'", ",", "'Prefix'", ",", "'Return'", ",", "'SmartPointer'", ",", "'Static'", ",", "'Zero'", ",", "'a'", ",", "'above'", ",", "'access'", ",", "'actual'", ",", "'add'", ",", "'an'", ",", "'back'", ",", "'bool'", ",", "'call'", ",", "'class'", ",", "'const'", ",", "'container'", ",", "'cout'", ",", "'definitions'", ",", "'do'", ",", "'end'", ",", "'endl'", ",", "'f'", ",", "'false'", ",", "'for'", ",", "'friend'", ",", "'g'", ",", "'i'", ",", "'if'", ",", "'implement'", ",", "'include'", ",", "'index'", ",", "'indicates'", ",", "'int'", ",", "'iostream'", ",", "'iterator'", ",", "'j'", ",", "'list'", ",", "'main'", ",", "'member'", ",", "'method'", ",", "'namespace'", ",", "'o'", ",", "'obj'", ",", "'objc'", ",", "'oc'", ",", "'of'", ",", "'operator'", ",", "'overload'", ",", "'pointer'", ",", "'public'", ",", "'push'", ",", "'return'", ",", "'s'", ",", "'size'", ",", "'smart'", ",", "'sp'", ",", "'standard'", ",", "'static'", ",", "'std'", ",", "'sz'", ",", "'the'", ",", "'to'", ",", "'true'", ",", "'using'", ",", "'value'", ",", "'vector'", ",", "'version'", ",", "'void'", ",", "'while'", "]", "assert", "(", "sorted", "(", "expected_words", ")", "==", "sorted", "(", "get_words_by_filename", "(", "'example.cpp'", ")", ")", ")", "assert", "(", "sorted", "(", "expected_words", ")", "==", "sorted", "(", "get_words_by_content", "(", "'example.cpp'", ")", ")", ")" ]
test for get word from c++ file syntax .
train
false
40,856
def generate_dataset(number_items=1000): data = [] names = get_names() totalnames = len(names) random.seed() for i in range(number_items): data.append({'name': names[random.randint(0, (totalnames - 1))], 'age': random.randint(1, 100), 'description': li_words(50, False)}) return data
[ "def", "generate_dataset", "(", "number_items", "=", "1000", ")", ":", "data", "=", "[", "]", "names", "=", "get_names", "(", ")", "totalnames", "=", "len", "(", "names", ")", "random", ".", "seed", "(", ")", "for", "i", "in", "range", "(", "number_items", ")", ":", "data", ".", "append", "(", "{", "'name'", ":", "names", "[", "random", ".", "randint", "(", "0", ",", "(", "totalnames", "-", "1", ")", ")", "]", ",", "'age'", ":", "random", ".", "randint", "(", "1", ",", "100", ")", ",", "'description'", ":", "li_words", "(", "50", ",", "False", ")", "}", ")", "return", "data" ]
generate a dataset with number_items elements .
train
true
40,859
def test_embed_mixture(): em = EmbedMixture(10, 2, 5, dropout_ratio=0.0) doc_ids = Variable(np.arange(1, dtype='int32')) doc_vector = em(doc_ids).data weights = softmax(em.weights.W.data[0, :]) un_weights = softmax(em.unnormalized_weights(doc_ids).data[0, :]) test = np.sum((weights * em.factors.W.data.T), axis=1) assert np.allclose(doc_vector, test) assert np.allclose(un_weights, weights)
[ "def", "test_embed_mixture", "(", ")", ":", "em", "=", "EmbedMixture", "(", "10", ",", "2", ",", "5", ",", "dropout_ratio", "=", "0.0", ")", "doc_ids", "=", "Variable", "(", "np", ".", "arange", "(", "1", ",", "dtype", "=", "'int32'", ")", ")", "doc_vector", "=", "em", "(", "doc_ids", ")", ".", "data", "weights", "=", "softmax", "(", "em", ".", "weights", ".", "W", ".", "data", "[", "0", ",", ":", "]", ")", "un_weights", "=", "softmax", "(", "em", ".", "unnormalized_weights", "(", "doc_ids", ")", ".", "data", "[", "0", ",", ":", "]", ")", "test", "=", "np", ".", "sum", "(", "(", "weights", "*", "em", ".", "factors", ".", "W", ".", "data", ".", "T", ")", ",", "axis", "=", "1", ")", "assert", "np", ".", "allclose", "(", "doc_vector", ",", "test", ")", "assert", "np", ".", "allclose", "(", "un_weights", ",", "weights", ")" ]
manually test the projection logic between topic weights and vectors .
train
false
40,860
def token_kwargs(bits, parser, support_legacy=False): if (not bits): return {} match = kwarg_re.match(bits[0]) kwarg_format = (match and match.group(1)) if (not kwarg_format): if (not support_legacy): return {} if ((len(bits) < 3) or (bits[1] != 'as')): return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if ((not match) or (not match.group(1))): return kwargs (key, value) = match.groups() del bits[:1] else: if ((len(bits) < 3) or (bits[1] != 'as')): return kwargs (key, value) = (bits[2], bits[0]) del bits[:3] kwargs[key] = parser.compile_filter(value) if (bits and (not kwarg_format)): if (bits[0] != 'and'): return kwargs del bits[:1] return kwargs
[ "def", "token_kwargs", "(", "bits", ",", "parser", ",", "support_legacy", "=", "False", ")", ":", "if", "(", "not", "bits", ")", ":", "return", "{", "}", "match", "=", "kwarg_re", ".", "match", "(", "bits", "[", "0", "]", ")", "kwarg_format", "=", "(", "match", "and", "match", ".", "group", "(", "1", ")", ")", "if", "(", "not", "kwarg_format", ")", ":", "if", "(", "not", "support_legacy", ")", ":", "return", "{", "}", "if", "(", "(", "len", "(", "bits", ")", "<", "3", ")", "or", "(", "bits", "[", "1", "]", "!=", "'as'", ")", ")", ":", "return", "{", "}", "kwargs", "=", "{", "}", "while", "bits", ":", "if", "kwarg_format", ":", "match", "=", "kwarg_re", ".", "match", "(", "bits", "[", "0", "]", ")", "if", "(", "(", "not", "match", ")", "or", "(", "not", "match", ".", "group", "(", "1", ")", ")", ")", ":", "return", "kwargs", "(", "key", ",", "value", ")", "=", "match", ".", "groups", "(", ")", "del", "bits", "[", ":", "1", "]", "else", ":", "if", "(", "(", "len", "(", "bits", ")", "<", "3", ")", "or", "(", "bits", "[", "1", "]", "!=", "'as'", ")", ")", ":", "return", "kwargs", "(", "key", ",", "value", ")", "=", "(", "bits", "[", "2", "]", ",", "bits", "[", "0", "]", ")", "del", "bits", "[", ":", "3", "]", "kwargs", "[", "key", "]", "=", "parser", ".", "compile_filter", "(", "value", ")", "if", "(", "bits", "and", "(", "not", "kwarg_format", ")", ")", ":", "if", "(", "bits", "[", "0", "]", "!=", "'and'", ")", ":", "return", "kwargs", "del", "bits", "[", ":", "1", "]", "return", "kwargs" ]
a utility method for parsing token keyword arguments .
train
false
40,861
def find_position(string, index, last_index, last_pos): lines = string.count('\n', last_index, index) if (lines > 0): column = (index - string.rfind('\n', last_index, index)) else: column = (last_pos[1] + (index - last_index)) return ((last_pos[0] + lines), column)
[ "def", "find_position", "(", "string", ",", "index", ",", "last_index", ",", "last_pos", ")", ":", "lines", "=", "string", ".", "count", "(", "'\\n'", ",", "last_index", ",", "index", ")", "if", "(", "lines", ">", "0", ")", ":", "column", "=", "(", "index", "-", "string", ".", "rfind", "(", "'\\n'", ",", "last_index", ",", "index", ")", ")", "else", ":", "column", "=", "(", "last_pos", "[", "1", "]", "+", "(", "index", "-", "last_index", ")", ")", "return", "(", "(", "last_pos", "[", "0", "]", "+", "lines", ")", ",", "column", ")" ]
given a string and index .
train
false
40,863
def make_qual(sff_fp, output_fp, use_sfftools=False, no_trim=False): if use_sfftools: _fail_on_gzipped_sff(sff_fp) check_sffinfo() if no_trim: _check_call(['sffinfo', '-notrim', '-q', sff_fp], stdout=open(output_fp, 'w')) else: _check_call(['sffinfo', '-q', sff_fp], stdout=open(output_fp, 'w')) else: try: format_binary_sff_as_fna(qiime_open(sff_fp, 'rb'), open(output_fp, 'w'), qual=True) except: raise IOError(('Could not parse SFF %s' % sff_fp))
[ "def", "make_qual", "(", "sff_fp", ",", "output_fp", ",", "use_sfftools", "=", "False", ",", "no_trim", "=", "False", ")", ":", "if", "use_sfftools", ":", "_fail_on_gzipped_sff", "(", "sff_fp", ")", "check_sffinfo", "(", ")", "if", "no_trim", ":", "_check_call", "(", "[", "'sffinfo'", ",", "'-notrim'", ",", "'-q'", ",", "sff_fp", "]", ",", "stdout", "=", "open", "(", "output_fp", ",", "'w'", ")", ")", "else", ":", "_check_call", "(", "[", "'sffinfo'", ",", "'-q'", ",", "sff_fp", "]", ",", "stdout", "=", "open", "(", "output_fp", ",", "'w'", ")", ")", "else", ":", "try", ":", "format_binary_sff_as_fna", "(", "qiime_open", "(", "sff_fp", ",", "'rb'", ")", ",", "open", "(", "output_fp", ",", "'w'", ")", ",", "qual", "=", "True", ")", "except", ":", "raise", "IOError", "(", "(", "'Could not parse SFF %s'", "%", "sff_fp", ")", ")" ]
makes qual file from sff file .
train
false
40,864
def strip_microseconds(delta): return timedelta(delta.days, delta.seconds)
[ "def", "strip_microseconds", "(", "delta", ")", ":", "return", "timedelta", "(", "delta", ".", "days", ",", "delta", ".", "seconds", ")" ]
return the given :py:class:datetime .
train
false
40,865
def p_command_for_bad_step(p): p[0] = 'MALFORMED STEP IN FOR STATEMENT'
[ "def", "p_command_for_bad_step", "(", "p", ")", ":", "p", "[", "0", "]", "=", "'MALFORMED STEP IN FOR STATEMENT'" ]
command : for id equals expr to expr step error .
train
false
40,866
def try_printout(data, out, opts, **kwargs): try: printout = get_printout(out, opts)(data, **kwargs) if (printout is not None): return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if (printout is not None): return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if (printout is not None): return printout.rstrip()
[ "def", "try_printout", "(", "data", ",", "out", ",", "opts", ",", "**", "kwargs", ")", ":", "try", ":", "printout", "=", "get_printout", "(", "out", ",", "opts", ")", "(", "data", ",", "**", "kwargs", ")", "if", "(", "printout", "is", "not", "None", ")", ":", "return", "printout", ".", "rstrip", "(", ")", "except", "(", "KeyError", ",", "AttributeError", ",", "TypeError", ")", ":", "log", ".", "debug", "(", "traceback", ".", "format_exc", "(", ")", ")", "try", ":", "printout", "=", "get_printout", "(", "'nested'", ",", "opts", ")", "(", "data", ",", "**", "kwargs", ")", "if", "(", "printout", "is", "not", "None", ")", ":", "return", "printout", ".", "rstrip", "(", ")", "except", "(", "KeyError", ",", "AttributeError", ",", "TypeError", ")", ":", "log", ".", "error", "(", "'Nested output failed: '", ",", "exc_info", "=", "True", ")", "printout", "=", "get_printout", "(", "'raw'", ",", "opts", ")", "(", "data", ",", "**", "kwargs", ")", "if", "(", "printout", "is", "not", "None", ")", ":", "return", "printout", ".", "rstrip", "(", ")" ]
safely get the string to print out .
train
true
40,867
@utils.arg('ip_proto', metavar='<ip-proto>', help=_('IP protocol (icmp, tcp, udp).')) @utils.arg('from_port', metavar='<from-port>', help=_('Port at start of range.')) @utils.arg('to_port', metavar='<to-port>', help=_('Port at end of range.')) @utils.arg('cidr', metavar='<cidr>', help=_('CIDR for address range.')) @deprecated_network def do_secgroup_delete_default_rule(cs, args): for rule in cs.security_group_default_rules.list(): if (rule.ip_protocol and (rule.ip_protocol.upper() == args.ip_proto.upper()) and (rule.from_port == int(args.from_port)) and (rule.to_port == int(args.to_port)) and (rule.ip_range['cidr'] == args.cidr)): _print_secgroup_rules([rule], show_source_group=False) return cs.security_group_default_rules.delete(rule.id) raise exceptions.CommandError(_('Rule not found'))
[ "@", "utils", ".", "arg", "(", "'ip_proto'", ",", "metavar", "=", "'<ip-proto>'", ",", "help", "=", "_", "(", "'IP protocol (icmp, tcp, udp).'", ")", ")", "@", "utils", ".", "arg", "(", "'from_port'", ",", "metavar", "=", "'<from-port>'", ",", "help", "=", "_", "(", "'Port at start of range.'", ")", ")", "@", "utils", ".", "arg", "(", "'to_port'", ",", "metavar", "=", "'<to-port>'", ",", "help", "=", "_", "(", "'Port at end of range.'", ")", ")", "@", "utils", ".", "arg", "(", "'cidr'", ",", "metavar", "=", "'<cidr>'", ",", "help", "=", "_", "(", "'CIDR for address range.'", ")", ")", "@", "deprecated_network", "def", "do_secgroup_delete_default_rule", "(", "cs", ",", "args", ")", ":", "for", "rule", "in", "cs", ".", "security_group_default_rules", ".", "list", "(", ")", ":", "if", "(", "rule", ".", "ip_protocol", "and", "(", "rule", ".", "ip_protocol", ".", "upper", "(", ")", "==", "args", ".", "ip_proto", ".", "upper", "(", ")", ")", "and", "(", "rule", ".", "from_port", "==", "int", "(", "args", ".", "from_port", ")", ")", "and", "(", "rule", ".", "to_port", "==", "int", "(", "args", ".", "to_port", ")", ")", "and", "(", "rule", ".", "ip_range", "[", "'cidr'", "]", "==", "args", ".", "cidr", ")", ")", ":", "_print_secgroup_rules", "(", "[", "rule", "]", ",", "show_source_group", "=", "False", ")", "return", "cs", ".", "security_group_default_rules", ".", "delete", "(", "rule", ".", "id", ")", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "'Rule not found'", ")", ")" ]
delete a rule from the set of rules that will be added to the default security group for new tenants .
train
false
40,868
def instance_type_access_get_by_flavor_id(context, flavor_id): return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
[ "def", "instance_type_access_get_by_flavor_id", "(", "context", ",", "flavor_id", ")", ":", "return", "IMPL", ".", "instance_type_access_get_by_flavor_id", "(", "context", ",", "flavor_id", ")" ]
get flavor access by flavor id .
train
false
40,870
def output(): return s3_rest_controller()
[ "def", "output", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
print the output data in json .
train
false
40,871
def apply_setup(input_dim, broadcastable, conserve_memory, mean_only, learn_scale, learn_shift): bn = BatchNormalization(input_dim, broadcastable, conserve_memory, epsilon=0.0001, mean_only=mean_only, learn_scale=learn_scale, learn_shift=learn_shift) bn.initialize() b_len = (len(input_dim) if isinstance(input_dim, collections.Sequence) else 1) x = tensor.TensorType(theano.config.floatX, ([False] * (b_len + 1)))() return (bn, x)
[ "def", "apply_setup", "(", "input_dim", ",", "broadcastable", ",", "conserve_memory", ",", "mean_only", ",", "learn_scale", ",", "learn_shift", ")", ":", "bn", "=", "BatchNormalization", "(", "input_dim", ",", "broadcastable", ",", "conserve_memory", ",", "epsilon", "=", "0.0001", ",", "mean_only", "=", "mean_only", ",", "learn_scale", "=", "learn_scale", ",", "learn_shift", "=", "learn_shift", ")", "bn", ".", "initialize", "(", ")", "b_len", "=", "(", "len", "(", "input_dim", ")", "if", "isinstance", "(", "input_dim", ",", "collections", ".", "Sequence", ")", "else", "1", ")", "x", "=", "tensor", ".", "TensorType", "(", "theano", ".", "config", ".", "floatX", ",", "(", "[", "False", "]", "*", "(", "b_len", "+", "1", ")", ")", ")", "(", ")", "return", "(", "bn", ",", "x", ")" ]
common setup code .
train
false
40,872
@manager.command def createdb(): from june.models import db db.create_all()
[ "@", "manager", ".", "command", "def", "createdb", "(", ")", ":", "from", "june", ".", "models", "import", "db", "db", ".", "create_all", "(", ")" ]
create database for june .
train
false
40,874
def send_email_sns(sender, subject, message, topic_ARN, image_png): from boto3 import resource as boto3_resource sns = boto3_resource('sns') topic = sns.Topic(topic_ARN[0]) if (len(subject) > 100): subject = ((subject[0:48] + '...') + subject[(-49):]) response = topic.publish(Subject=subject, Message=message) logger.debug('Message sent to SNS.\nMessageId: {},\nRequestId: {},\nHTTPSStatusCode: {}'.format(response['MessageId'], response['ResponseMetadata']['RequestId'], response['ResponseMetadata']['HTTPStatusCode']))
[ "def", "send_email_sns", "(", "sender", ",", "subject", ",", "message", ",", "topic_ARN", ",", "image_png", ")", ":", "from", "boto3", "import", "resource", "as", "boto3_resource", "sns", "=", "boto3_resource", "(", "'sns'", ")", "topic", "=", "sns", ".", "Topic", "(", "topic_ARN", "[", "0", "]", ")", "if", "(", "len", "(", "subject", ")", ">", "100", ")", ":", "subject", "=", "(", "(", "subject", "[", "0", ":", "48", "]", "+", "'...'", ")", "+", "subject", "[", "(", "-", "49", ")", ":", "]", ")", "response", "=", "topic", ".", "publish", "(", "Subject", "=", "subject", ",", "Message", "=", "message", ")", "logger", ".", "debug", "(", "'Message sent to SNS.\\nMessageId: {},\\nRequestId: {},\\nHTTPSStatusCode: {}'", ".", "format", "(", "response", "[", "'MessageId'", "]", ",", "response", "[", "'ResponseMetadata'", "]", "[", "'RequestId'", "]", ",", "response", "[", "'ResponseMetadata'", "]", "[", "'HTTPStatusCode'", "]", ")", ")" ]
sends notification through aws sns .
train
true
40,877
def getGearProfileAnnulus(derivation, pitchRadius, teeth, toothProfile): gearProfileCylinder = getGearProfileCylinder(teeth, toothProfile) annulusRadius = ((derivation.dedendum + derivation.rimWidth) - pitchRadius) return [euclidean.getComplexPolygon(complex(), annulusRadius, (- teeth), (0.5 * math.pi)), gearProfileCylinder]
[ "def", "getGearProfileAnnulus", "(", "derivation", ",", "pitchRadius", ",", "teeth", ",", "toothProfile", ")", ":", "gearProfileCylinder", "=", "getGearProfileCylinder", "(", "teeth", ",", "toothProfile", ")", "annulusRadius", "=", "(", "(", "derivation", ".", "dedendum", "+", "derivation", ".", "rimWidth", ")", "-", "pitchRadius", ")", "return", "[", "euclidean", ".", "getComplexPolygon", "(", "complex", "(", ")", ",", "annulusRadius", ",", "(", "-", "teeth", ")", ",", "(", "0.5", "*", "math", ".", "pi", ")", ")", ",", "gearProfileCylinder", "]" ]
get gear profile for an annulus gear .
train
false
40,878
def addFacesByConvexBottomTopLoop(faces, indexedLoopBottom, indexedLoopTop): if ((len(indexedLoopBottom) == 0) or (len(indexedLoopTop) == 0)): return for indexedPointIndex in xrange(max(len(indexedLoopBottom), len(indexedLoopTop))): indexedConvex = [] if (len(indexedLoopBottom) > 1): indexedConvex.append(indexedLoopBottom[indexedPointIndex]) indexedConvex.append(indexedLoopBottom[((indexedPointIndex + 1) % len(indexedLoopBottom))]) else: indexedConvex.append(indexedLoopBottom[0]) if (len(indexedLoopTop) > 1): indexedConvex.append(indexedLoopTop[((indexedPointIndex + 1) % len(indexedLoopTop))]) indexedConvex.append(indexedLoopTop[indexedPointIndex]) else: indexedConvex.append(indexedLoopTop[0]) addFacesByConvex(faces, indexedConvex)
[ "def", "addFacesByConvexBottomTopLoop", "(", "faces", ",", "indexedLoopBottom", ",", "indexedLoopTop", ")", ":", "if", "(", "(", "len", "(", "indexedLoopBottom", ")", "==", "0", ")", "or", "(", "len", "(", "indexedLoopTop", ")", "==", "0", ")", ")", ":", "return", "for", "indexedPointIndex", "in", "xrange", "(", "max", "(", "len", "(", "indexedLoopBottom", ")", ",", "len", "(", "indexedLoopTop", ")", ")", ")", ":", "indexedConvex", "=", "[", "]", "if", "(", "len", "(", "indexedLoopBottom", ")", ">", "1", ")", ":", "indexedConvex", ".", "append", "(", "indexedLoopBottom", "[", "indexedPointIndex", "]", ")", "indexedConvex", ".", "append", "(", "indexedLoopBottom", "[", "(", "(", "indexedPointIndex", "+", "1", ")", "%", "len", "(", "indexedLoopBottom", ")", ")", "]", ")", "else", ":", "indexedConvex", ".", "append", "(", "indexedLoopBottom", "[", "0", "]", ")", "if", "(", "len", "(", "indexedLoopTop", ")", ">", "1", ")", ":", "indexedConvex", ".", "append", "(", "indexedLoopTop", "[", "(", "(", "indexedPointIndex", "+", "1", ")", "%", "len", "(", "indexedLoopTop", ")", ")", "]", ")", "indexedConvex", ".", "append", "(", "indexedLoopTop", "[", "indexedPointIndex", "]", ")", "else", ":", "indexedConvex", ".", "append", "(", "indexedLoopTop", "[", "0", "]", ")", "addFacesByConvex", "(", "faces", ",", "indexedConvex", ")" ]
add faces from loops .
train
false
40,879
def subtest_fatal(function): def wrapped(self, *args, **kwds): self._fatal = True self.decored() result = function(self, *args, **kwds) return result wrapped.func_name = function.func_name return wrapped
[ "def", "subtest_fatal", "(", "function", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "**", "kwds", ")", ":", "self", ".", "_fatal", "=", "True", "self", ".", "decored", "(", ")", "result", "=", "function", "(", "self", ",", "*", "args", ",", "**", "kwds", ")", "return", "result", "wrapped", ".", "func_name", "=", "function", ".", "func_name", "return", "wrapped" ]
decorator which mark test critical .
train
false
40,880
@register.filter(name='as_crispy_field') def as_crispy_field(field, template_pack=TEMPLATE_PACK): if ((not isinstance(field, forms.BoundField)) and settings.DEBUG): raise CrispyError('|as_crispy_field got passed an invalid or inexistent field') template = get_template(('%s/field.html' % template_pack)) c = Context({'field': field, 'form_show_errors': True, 'form_show_labels': True}).flatten() return template.render(c)
[ "@", "register", ".", "filter", "(", "name", "=", "'as_crispy_field'", ")", "def", "as_crispy_field", "(", "field", ",", "template_pack", "=", "TEMPLATE_PACK", ")", ":", "if", "(", "(", "not", "isinstance", "(", "field", ",", "forms", ".", "BoundField", ")", ")", "and", "settings", ".", "DEBUG", ")", ":", "raise", "CrispyError", "(", "'|as_crispy_field got passed an invalid or inexistent field'", ")", "template", "=", "get_template", "(", "(", "'%s/field.html'", "%", "template_pack", ")", ")", "c", "=", "Context", "(", "{", "'field'", ":", "field", ",", "'form_show_errors'", ":", "True", ",", "'form_show_labels'", ":", "True", "}", ")", ".", "flatten", "(", ")", "return", "template", ".", "render", "(", "c", ")" ]
renders a form field like a django-crispy-forms field:: {% load crispy_forms_tags %} {{ form .
train
false
40,881
def _check_constant_args_pool(ndim, ws, stride, pad, node): try: ws = tuple((tensor.get_scalar_constant_value(ws[i]) for i in range(ndim))) stride = tuple((tensor.get_scalar_constant_value(stride[i]) for i in range(ndim))) pad = tuple((tensor.get_scalar_constant_value(pad[i]) for i in range(ndim))) except tensor.NotScalarConstantError: msg = ('Pool with tensor variable for the window size, stride or padding is only supported in the new GPU backend, so this op will run on CPU. (op %s)' % node) if (config.assert_no_cpu_op == 'warn'): _logger.warning(msg) elif (config.assert_no_cpu_op == 'raise'): raise AssertionError(msg) return None return (ws, stride, pad)
[ "def", "_check_constant_args_pool", "(", "ndim", ",", "ws", ",", "stride", ",", "pad", ",", "node", ")", ":", "try", ":", "ws", "=", "tuple", "(", "(", "tensor", ".", "get_scalar_constant_value", "(", "ws", "[", "i", "]", ")", "for", "i", "in", "range", "(", "ndim", ")", ")", ")", "stride", "=", "tuple", "(", "(", "tensor", ".", "get_scalar_constant_value", "(", "stride", "[", "i", "]", ")", "for", "i", "in", "range", "(", "ndim", ")", ")", ")", "pad", "=", "tuple", "(", "(", "tensor", ".", "get_scalar_constant_value", "(", "pad", "[", "i", "]", ")", "for", "i", "in", "range", "(", "ndim", ")", ")", ")", "except", "tensor", ".", "NotScalarConstantError", ":", "msg", "=", "(", "'Pool with tensor variable for the window size, stride or padding is only supported in the new GPU backend, so this op will run on CPU. (op %s)'", "%", "node", ")", "if", "(", "config", ".", "assert_no_cpu_op", "==", "'warn'", ")", ":", "_logger", ".", "warning", "(", "msg", ")", "elif", "(", "config", ".", "assert_no_cpu_op", "==", "'raise'", ")", ":", "raise", "AssertionError", "(", "msg", ")", "return", "None", "return", "(", "ws", ",", "stride", ",", "pad", ")" ]
check if the args of pool are constants .
train
false
40,883
def test_constant_data(): shape = (10, 10) data = np.ones(shape) interval = MinMaxInterval() limits = interval.get_limits(data) values = interval(data) np.testing.assert_allclose(limits, (1.0, 1.0)) np.testing.assert_allclose(values, np.zeros(shape))
[ "def", "test_constant_data", "(", ")", ":", "shape", "=", "(", "10", ",", "10", ")", "data", "=", "np", ".", "ones", "(", "shape", ")", "interval", "=", "MinMaxInterval", "(", ")", "limits", "=", "interval", ".", "get_limits", "(", "data", ")", "values", "=", "interval", "(", "data", ")", "np", ".", "testing", ".", "assert_allclose", "(", "limits", ",", "(", "1.0", ",", "1.0", ")", ")", "np", ".", "testing", ".", "assert_allclose", "(", "values", ",", "np", ".", "zeros", "(", "shape", ")", ")" ]
test intervals with constant data .
train
false
40,884
def iso9660(path): IMPLEMENTATIONS = [('isoinfo', has_isoinfo, Iso9660IsoInfo), ('iso-read', has_isoread, Iso9660IsoRead), ('mount', can_mount, Iso9660Mount)] for (name, check, klass) in IMPLEMENTATIONS: if check(): logging.debug('Automatically chosen class for iso9660: %s', name) return klass(path) return None
[ "def", "iso9660", "(", "path", ")", ":", "IMPLEMENTATIONS", "=", "[", "(", "'isoinfo'", ",", "has_isoinfo", ",", "Iso9660IsoInfo", ")", ",", "(", "'iso-read'", ",", "has_isoread", ",", "Iso9660IsoRead", ")", ",", "(", "'mount'", ",", "can_mount", ",", "Iso9660Mount", ")", "]", "for", "(", "name", ",", "check", ",", "klass", ")", "in", "IMPLEMENTATIONS", ":", "if", "check", "(", ")", ":", "logging", ".", "debug", "(", "'Automatically chosen class for iso9660: %s'", ",", "name", ")", "return", "klass", "(", "path", ")", "return", "None" ]
checks the avaiable tools on a system and chooses class accordingly this is a convinience function .
train
false
40,885
def test_random_sample_different_definitions(): a = db.from_sequence(range(50), npartitions=5) assert (list(a.random_sample(0.5)) != list(a.random_sample(0.5))) assert (a.random_sample(0.5).name != a.random_sample(0.5).name)
[ "def", "test_random_sample_different_definitions", "(", ")", ":", "a", "=", "db", ".", "from_sequence", "(", "range", "(", "50", ")", ",", "npartitions", "=", "5", ")", "assert", "(", "list", "(", "a", ".", "random_sample", "(", "0.5", ")", ")", "!=", "list", "(", "a", ".", "random_sample", "(", "0.5", ")", ")", ")", "assert", "(", "a", ".", "random_sample", "(", "0.5", ")", ".", "name", "!=", "a", ".", "random_sample", "(", "0.5", ")", ".", "name", ")" ]
repeatedly defining a random sampling operation yields different results upon computation if no random seed is specified .
train
false
40,886
def ensure_dir_is_templated(dirname): if ((u'{{' in dirname) and (u'}}' in dirname)): return True else: raise NonTemplatedInputDirException
[ "def", "ensure_dir_is_templated", "(", "dirname", ")", ":", "if", "(", "(", "u'{{'", "in", "dirname", ")", "and", "(", "u'}}'", "in", "dirname", ")", ")", ":", "return", "True", "else", ":", "raise", "NonTemplatedInputDirException" ]
ensure that dirname is a templated directory name .
train
false
40,887
def merge(file, names, config, coord): inputs = get_tiles(names, config, coord) output = {'type': 'Topology', 'transform': inputs[0]['transform'], 'objects': dict(), 'arcs': list()} for (name, input) in zip(names, inputs): for (index, object) in enumerate(input['objects'].values()): if (len(input['objects']) > 1): output['objects'][('%(name)s-%(index)d' % locals())] = object else: output['objects'][name] = object for geometry in object['geometries']: update_arc_indexes(geometry, output['arcs'], input['arcs']) file.write(json.dumps(output, separators=(',', ':')).encode('utf8'))
[ "def", "merge", "(", "file", ",", "names", ",", "config", ",", "coord", ")", ":", "inputs", "=", "get_tiles", "(", "names", ",", "config", ",", "coord", ")", "output", "=", "{", "'type'", ":", "'Topology'", ",", "'transform'", ":", "inputs", "[", "0", "]", "[", "'transform'", "]", ",", "'objects'", ":", "dict", "(", ")", ",", "'arcs'", ":", "list", "(", ")", "}", "for", "(", "name", ",", "input", ")", "in", "zip", "(", "names", ",", "inputs", ")", ":", "for", "(", "index", ",", "object", ")", "in", "enumerate", "(", "input", "[", "'objects'", "]", ".", "values", "(", ")", ")", ":", "if", "(", "len", "(", "input", "[", "'objects'", "]", ")", ">", "1", ")", ":", "output", "[", "'objects'", "]", "[", "(", "'%(name)s-%(index)d'", "%", "locals", "(", ")", ")", "]", "=", "object", "else", ":", "output", "[", "'objects'", "]", "[", "name", "]", "=", "object", "for", "geometry", "in", "object", "[", "'geometries'", "]", ":", "update_arc_indexes", "(", "geometry", ",", "output", "[", "'arcs'", "]", ",", "input", "[", "'arcs'", "]", ")", "file", ".", "write", "(", "json", ".", "dumps", "(", "output", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", ".", "encode", "(", "'utf8'", ")", ")" ]
merges all .
train
false
40,888
def remove_property_value(prop, predicate): removed_vals = [] removed_vals = filter(predicate, prop.propertyValue) if (len(removed_vals) == len(prop.propertyValue)): prop.parent.removeProperty(prop.name) else: x = prop.propertyValue.cssText for v in removed_vals: x = x.replace(v.cssText, u'').strip() prop.propertyValue.cssText = x return bool(removed_vals)
[ "def", "remove_property_value", "(", "prop", ",", "predicate", ")", ":", "removed_vals", "=", "[", "]", "removed_vals", "=", "filter", "(", "predicate", ",", "prop", ".", "propertyValue", ")", "if", "(", "len", "(", "removed_vals", ")", "==", "len", "(", "prop", ".", "propertyValue", ")", ")", ":", "prop", ".", "parent", ".", "removeProperty", "(", "prop", ".", "name", ")", "else", ":", "x", "=", "prop", ".", "propertyValue", ".", "cssText", "for", "v", "in", "removed_vals", ":", "x", "=", "x", ".", "replace", "(", "v", ".", "cssText", ",", "u''", ")", ".", "strip", "(", ")", "prop", ".", "propertyValue", ".", "cssText", "=", "x", "return", "bool", "(", "removed_vals", ")" ]
remove the values that match the predicate from this property .
train
false
40,889
@frappe.whitelist() def get_default_address_template(): return ((((((u'{{ address_line1 }}<br>{% if address_line2 %}{{ address_line2 }}<br>{% endif -%}{{ city }}<br>\n{% if state %}{{ state }}<br>{% endif -%}\n{% if pincode %}{{ pincode }}<br>{% endif -%}\n{{ country }}<br>\n{% if phone %}' + _(u'Phone')) + u': {{ phone }}<br>{% endif -%}\n{% if fax %}') + _(u'Fax')) + u': {{ fax }}<br>{% endif -%}\n{% if email_id %}') + _(u'Email')) + u': {{ email_id }}<br>{% endif -%}')
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "get_default_address_template", "(", ")", ":", "return", "(", "(", "(", "(", "(", "(", "u'{{ address_line1 }}<br>{% if address_line2 %}{{ address_line2 }}<br>{% endif -%}{{ city }}<br>\\n{% if state %}{{ state }}<br>{% endif -%}\\n{% if pincode %}{{ pincode }}<br>{% endif -%}\\n{{ country }}<br>\\n{% if phone %}'", "+", "_", "(", "u'Phone'", ")", ")", "+", "u': {{ phone }}<br>{% endif -%}\\n{% if fax %}'", ")", "+", "_", "(", "u'Fax'", ")", ")", "+", "u': {{ fax }}<br>{% endif -%}\\n{% if email_id %}'", ")", "+", "_", "(", "u'Email'", ")", ")", "+", "u': {{ email_id }}<br>{% endif -%}'", ")" ]
get default address template .
train
false
40,890
def test_dynamically_emptied_directories(tmpdir): adir = tmpdir.join('adir').ensure(dir=True) bdir = adir.join('bdir').ensure(dir=True) some_file = bdir.join('afile') some_file.write('1234567890') base_dir = adir.strpath (spec, parts) = tar_partition.partition(base_dir) tar_paths = [] for part in parts: for tar_info in part: rel_path = os.path.relpath(tar_info.submitted_path, base_dir) tar_paths.append(rel_path) assert ('bdir' in tar_paths)
[ "def", "test_dynamically_emptied_directories", "(", "tmpdir", ")", ":", "adir", "=", "tmpdir", ".", "join", "(", "'adir'", ")", ".", "ensure", "(", "dir", "=", "True", ")", "bdir", "=", "adir", ".", "join", "(", "'bdir'", ")", ".", "ensure", "(", "dir", "=", "True", ")", "some_file", "=", "bdir", ".", "join", "(", "'afile'", ")", "some_file", ".", "write", "(", "'1234567890'", ")", "base_dir", "=", "adir", ".", "strpath", "(", "spec", ",", "parts", ")", "=", "tar_partition", ".", "partition", "(", "base_dir", ")", "tar_paths", "=", "[", "]", "for", "part", "in", "parts", ":", "for", "tar_info", "in", "part", ":", "rel_path", "=", "os", ".", "path", ".", "relpath", "(", "tar_info", ".", "submitted_path", ",", "base_dir", ")", "tar_paths", ".", "append", "(", "rel_path", ")", "assert", "(", "'bdir'", "in", "tar_paths", ")" ]
ensure empty directories in the base backup are created particularly in the case when postgresql empties the files in those directories in parallel .
train
false
40,891
def clone_bench_repo(args): if os.path.exists(tmp_bench_repo): return 0 elif args.without_bench_setup: clone_path = os.path.join(os.path.expanduser('~'), 'bench') else: clone_path = tmp_bench_repo branch = (args.bench_branch or 'master') repo_url = (args.repo_url or 'https://github.com/frappe/bench') success = run_os_command({'git': 'git clone {repo_url} {bench_repo} --depth 1 --branch {branch}'.format(repo_url=repo_url, bench_repo=clone_path, branch=branch)}) return success
[ "def", "clone_bench_repo", "(", "args", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "tmp_bench_repo", ")", ":", "return", "0", "elif", "args", ".", "without_bench_setup", ":", "clone_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'bench'", ")", "else", ":", "clone_path", "=", "tmp_bench_repo", "branch", "=", "(", "args", ".", "bench_branch", "or", "'master'", ")", "repo_url", "=", "(", "args", ".", "repo_url", "or", "'https://github.com/frappe/bench'", ")", "success", "=", "run_os_command", "(", "{", "'git'", ":", "'git clone {repo_url} {bench_repo} --depth 1 --branch {branch}'", ".", "format", "(", "repo_url", "=", "repo_url", ",", "bench_repo", "=", "clone_path", ",", "branch", "=", "branch", ")", "}", ")", "return", "success" ]
clones the bench repository in the user folder .
train
false
40,892
def legendre(n, monic=False): if (n < 0): raise ValueError('n must be nonnegative.') if (n == 0): n1 = (n + 1) else: n1 = n (x, w, mu0) = roots_legendre(n1, mu=True) if (n == 0): (x, w) = ([], []) hn = (2.0 / ((2 * n) + 1)) kn = ((_gam(((2 * n) + 1)) / (_gam((n + 1)) ** 2)) / (2.0 ** n)) p = orthopoly1d(x, w, hn, kn, wfunc=(lambda x: 1.0), limits=((-1), 1), monic=monic, eval_func=(lambda x: eval_legendre(n, x))) return p
[ "def", "legendre", "(", "n", ",", "monic", "=", "False", ")", ":", "if", "(", "n", "<", "0", ")", ":", "raise", "ValueError", "(", "'n must be nonnegative.'", ")", "if", "(", "n", "==", "0", ")", ":", "n1", "=", "(", "n", "+", "1", ")", "else", ":", "n1", "=", "n", "(", "x", ",", "w", ",", "mu0", ")", "=", "roots_legendre", "(", "n1", ",", "mu", "=", "True", ")", "if", "(", "n", "==", "0", ")", ":", "(", "x", ",", "w", ")", "=", "(", "[", "]", ",", "[", "]", ")", "hn", "=", "(", "2.0", "/", "(", "(", "2", "*", "n", ")", "+", "1", ")", ")", "kn", "=", "(", "(", "_gam", "(", "(", "(", "2", "*", "n", ")", "+", "1", ")", ")", "/", "(", "_gam", "(", "(", "n", "+", "1", ")", ")", "**", "2", ")", ")", "/", "(", "2.0", "**", "n", ")", ")", "p", "=", "orthopoly1d", "(", "x", ",", "w", ",", "hn", ",", "kn", ",", "wfunc", "=", "(", "lambda", "x", ":", "1.0", ")", ",", "limits", "=", "(", "(", "-", "1", ")", ",", "1", ")", ",", "monic", "=", "monic", ",", "eval_func", "=", "(", "lambda", "x", ":", "eval_legendre", "(", "n", ",", "x", ")", ")", ")", "return", "p" ]
legendre polynomial .
train
false
40,894
def register_context(name, cls, *args, **kwargs): instance = cls(*args, **kwargs) proxy = ProxyContext(instance) _contexts[name] = {'cls': cls, 'args': args, 'kwargs': kwargs, 'proxy': proxy} _default_context[name] = instance return proxy
[ "def", "register_context", "(", "name", ",", "cls", ",", "*", "args", ",", "**", "kwargs", ")", ":", "instance", "=", "cls", "(", "*", "args", ",", "**", "kwargs", ")", "proxy", "=", "ProxyContext", "(", "instance", ")", "_contexts", "[", "name", "]", "=", "{", "'cls'", ":", "cls", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", ",", "'proxy'", ":", "proxy", "}", "_default_context", "[", "name", "]", "=", "instance", "return", "proxy" ]
register a new context .
train
false
40,896
def _strip_once(value): s = MLStripper() try: s.feed(value) except HTMLParseError: return value try: s.close() except HTMLParseError: return (s.get_data() + s.rawdata) else: return s.get_data()
[ "def", "_strip_once", "(", "value", ")", ":", "s", "=", "MLStripper", "(", ")", "try", ":", "s", ".", "feed", "(", "value", ")", "except", "HTMLParseError", ":", "return", "value", "try", ":", "s", ".", "close", "(", ")", "except", "HTMLParseError", ":", "return", "(", "s", ".", "get_data", "(", ")", "+", "s", ".", "rawdata", ")", "else", ":", "return", "s", ".", "get_data", "(", ")" ]
internal tag stripping utility used by strip_tags .
train
false
40,898
def print_query(results): print('Query Parameters:') query = results.get('query') for (key, value) in query.iteritems(): print(('%s = %s' % (key, value))) print()
[ "def", "print_query", "(", "results", ")", ":", "print", "(", "'Query Parameters:'", ")", "query", "=", "results", ".", "get", "(", "'query'", ")", "for", "(", "key", ",", "value", ")", "in", "query", ".", "iteritems", "(", ")", ":", "print", "(", "(", "'%s = %s'", "%", "(", "key", ",", "value", ")", ")", ")", "print", "(", ")" ]
the query returns the original report query as a dict .
train
false
40,899
def filter_css(container, properties, names=()): properties = normalize_filter_css(properties) return transform_css(container, transform_sheet=partial(filter_sheet, properties=properties), transform_style=partial(filter_declaration, properties=properties), names=names)
[ "def", "filter_css", "(", "container", ",", "properties", ",", "names", "=", "(", ")", ")", ":", "properties", "=", "normalize_filter_css", "(", "properties", ")", "return", "transform_css", "(", "container", ",", "transform_sheet", "=", "partial", "(", "filter_sheet", ",", "properties", "=", "properties", ")", ",", "transform_style", "=", "partial", "(", "filter_declaration", ",", "properties", "=", "properties", ")", ",", "names", "=", "names", ")" ]
remove the specified css properties from all css rules in the book .
train
false
40,900
def test_video(): gif_path = os.path.join(BASE_DIRECTORY, 'artwork', 'example.gif') assert hasattr(hug.output_format.mp4_video(gif_path, hug.Response()), 'read') with open(gif_path, 'rb') as image_file: assert hasattr(hug.output_format.mp4_video(image_file, hug.Response()), 'read') assert (hug.output_format.mp4_video('Not Existent', hug.Response()) is None) class FakeVideoWithSave: def save(self, to, format): to.write('test') assert hasattr(hug.output_format.mp4_video(FakeVideoWithSave(), hug.Response()), 'read') class FakeVideoWithSave: def render(self): return 'test' assert (hug.output_format.avi_video(FakeVideoWithSave(), hug.Response()) == 'test')
[ "def", "test_video", "(", ")", ":", "gif_path", "=", "os", ".", "path", ".", "join", "(", "BASE_DIRECTORY", ",", "'artwork'", ",", "'example.gif'", ")", "assert", "hasattr", "(", "hug", ".", "output_format", ".", "mp4_video", "(", "gif_path", ",", "hug", ".", "Response", "(", ")", ")", ",", "'read'", ")", "with", "open", "(", "gif_path", ",", "'rb'", ")", "as", "image_file", ":", "assert", "hasattr", "(", "hug", ".", "output_format", ".", "mp4_video", "(", "image_file", ",", "hug", ".", "Response", "(", ")", ")", ",", "'read'", ")", "assert", "(", "hug", ".", "output_format", ".", "mp4_video", "(", "'Not Existent'", ",", "hug", ".", "Response", "(", ")", ")", "is", "None", ")", "class", "FakeVideoWithSave", ":", "def", "save", "(", "self", ",", "to", ",", "format", ")", ":", "to", ".", "write", "(", "'test'", ")", "assert", "hasattr", "(", "hug", ".", "output_format", ".", "mp4_video", "(", "FakeVideoWithSave", "(", ")", ",", "hug", ".", "Response", "(", ")", ")", ",", "'read'", ")", "class", "FakeVideoWithSave", ":", "def", "render", "(", "self", ")", ":", "return", "'test'", "assert", "(", "hug", ".", "output_format", ".", "avi_video", "(", "FakeVideoWithSave", "(", ")", ",", "hug", ".", "Response", "(", ")", ")", "==", "'test'", ")" ]
ensure that its possible to output videos with hug .
train
false
40,901
def get_continuous_computations_info(cc_classes): cc_models = job_models.ContinuousComputationModel.get_multi([cc_class.__name__ for cc_class in cc_classes]) result = [] for (ind, model) in enumerate(cc_models): if (model is None): cc_dict = {'computation_type': cc_classes[ind].__name__, 'status_code': 'never_started', 'last_started_msec': None, 'last_finished_msec': None, 'last_stopped_msec': None, 'active_realtime_layer_index': None, 'is_startable': True, 'is_stoppable': False} else: cc_dict = {'computation_type': cc_classes[ind].__name__, 'status_code': model.status_code, 'last_started_msec': model.last_started_msec, 'last_finished_msec': model.last_finished_msec, 'last_stopped_msec': model.last_stopped_msec, 'active_realtime_layer_index': model.active_realtime_layer_index, 'is_startable': (model.status_code == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE), 'is_stoppable': (model.status_code == job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)} result.append(cc_dict) return result
[ "def", "get_continuous_computations_info", "(", "cc_classes", ")", ":", "cc_models", "=", "job_models", ".", "ContinuousComputationModel", ".", "get_multi", "(", "[", "cc_class", ".", "__name__", "for", "cc_class", "in", "cc_classes", "]", ")", "result", "=", "[", "]", "for", "(", "ind", ",", "model", ")", "in", "enumerate", "(", "cc_models", ")", ":", "if", "(", "model", "is", "None", ")", ":", "cc_dict", "=", "{", "'computation_type'", ":", "cc_classes", "[", "ind", "]", ".", "__name__", ",", "'status_code'", ":", "'never_started'", ",", "'last_started_msec'", ":", "None", ",", "'last_finished_msec'", ":", "None", ",", "'last_stopped_msec'", ":", "None", ",", "'active_realtime_layer_index'", ":", "None", ",", "'is_startable'", ":", "True", ",", "'is_stoppable'", ":", "False", "}", "else", ":", "cc_dict", "=", "{", "'computation_type'", ":", "cc_classes", "[", "ind", "]", ".", "__name__", ",", "'status_code'", ":", "model", ".", "status_code", ",", "'last_started_msec'", ":", "model", ".", "last_started_msec", ",", "'last_finished_msec'", ":", "model", ".", "last_finished_msec", ",", "'last_stopped_msec'", ":", "model", ".", "last_stopped_msec", ",", "'active_realtime_layer_index'", ":", "model", ".", "active_realtime_layer_index", ",", "'is_startable'", ":", "(", "model", ".", "status_code", "==", "job_models", ".", "CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE", ")", ",", "'is_stoppable'", ":", "(", "model", ".", "status_code", "==", "job_models", ".", "CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING", ")", "}", "result", ".", "append", "(", "cc_dict", ")", "return", "result" ]
returns data about the given computations .
train
false
40,902
def _send_decision_email(instance): context = {'name': instance.user.username, 'api_management_url': urlunsplit((('https' if (settings.HTTPS == 'on') else 'http'), instance.site.domain, reverse('api_admin:api-status'), '', '')), 'authentication_docs_url': settings.AUTH_DOCUMENTATION_URL, 'api_docs_url': settings.API_DOCUMENTATION_URL, 'support_email_address': settings.API_ACCESS_FROM_EMAIL, 'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)} message = render_to_string('api_admin/api_access_request_email_{status}.txt'.format(status=instance.status), context) try: send_mail(_('API access request'), message, settings.API_ACCESS_FROM_EMAIL, [instance.user.email], fail_silently=False) instance.contacted = True except SMTPException: log.exception('Error sending API user notification email for request [%s].', instance.id)
[ "def", "_send_decision_email", "(", "instance", ")", ":", "context", "=", "{", "'name'", ":", "instance", ".", "user", ".", "username", ",", "'api_management_url'", ":", "urlunsplit", "(", "(", "(", "'https'", "if", "(", "settings", ".", "HTTPS", "==", "'on'", ")", "else", "'http'", ")", ",", "instance", ".", "site", ".", "domain", ",", "reverse", "(", "'api_admin:api-status'", ")", ",", "''", ",", "''", ")", ")", ",", "'authentication_docs_url'", ":", "settings", ".", "AUTH_DOCUMENTATION_URL", ",", "'api_docs_url'", ":", "settings", ".", "API_DOCUMENTATION_URL", ",", "'support_email_address'", ":", "settings", ".", "API_ACCESS_FROM_EMAIL", ",", "'platform_name'", ":", "configuration_helpers", ".", "get_value", "(", "'PLATFORM_NAME'", ",", "settings", ".", "PLATFORM_NAME", ")", "}", "message", "=", "render_to_string", "(", "'api_admin/api_access_request_email_{status}.txt'", ".", "format", "(", "status", "=", "instance", ".", "status", ")", ",", "context", ")", "try", ":", "send_mail", "(", "_", "(", "'API access request'", ")", ",", "message", ",", "settings", ".", "API_ACCESS_FROM_EMAIL", ",", "[", "instance", ".", "user", ".", "email", "]", ",", "fail_silently", "=", "False", ")", "instance", ".", "contacted", "=", "True", "except", "SMTPException", ":", "log", ".", "exception", "(", "'Error sending API user notification email for request [%s].'", ",", "instance", ".", "id", ")" ]
send an email to requesting user with the decision made about their request .
train
false
40,904
def pil_to_nparray(pil_image): pil_image.load() return np.asarray(pil_image, dtype='float32')
[ "def", "pil_to_nparray", "(", "pil_image", ")", ":", "pil_image", ".", "load", "(", ")", "return", "np", ".", "asarray", "(", "pil_image", ",", "dtype", "=", "'float32'", ")" ]
convert a pil .
train
false
40,905
def Step(data=None, x=None, y=None, **kws): kws['x'] = x kws['y'] = y return create_and_build(StepBuilder, data, **kws)
[ "def", "Step", "(", "data", "=", "None", ",", "x", "=", "None", ",", "y", "=", "None", ",", "**", "kws", ")", ":", "kws", "[", "'x'", "]", "=", "x", "kws", "[", "'y'", "]", "=", "y", "return", "create_and_build", "(", "StepBuilder", ",", "data", ",", "**", "kws", ")" ]
create a step chart using :class:stepbuilder <bokeh .
train
false
40,908
def mr(n, bases): from sympy.ntheory.factor_ import trailing from sympy.polys.domains import ZZ n = as_int(n) if (n < 2): return False s = trailing((n - 1)) t = (n >> s) for base in bases: if (base >= n): base %= n if (base >= 2): base = ZZ(base) if (not _test(n, base, s, t)): return False return True
[ "def", "mr", "(", "n", ",", "bases", ")", ":", "from", "sympy", ".", "ntheory", ".", "factor_", "import", "trailing", "from", "sympy", ".", "polys", ".", "domains", "import", "ZZ", "n", "=", "as_int", "(", "n", ")", "if", "(", "n", "<", "2", ")", ":", "return", "False", "s", "=", "trailing", "(", "(", "n", "-", "1", ")", ")", "t", "=", "(", "n", ">>", "s", ")", "for", "base", "in", "bases", ":", "if", "(", "base", ">=", "n", ")", ":", "base", "%=", "n", "if", "(", "base", ">=", "2", ")", ":", "base", "=", "ZZ", "(", "base", ")", "if", "(", "not", "_test", "(", "n", ",", "base", ",", "s", ",", "t", ")", ")", ":", "return", "False", "return", "True" ]
perform a miller-rabin strong pseudoprime test on n using a given list of bases/witnesses .
train
false
40,909
@task def haproxy_install(): fprint('Rsync thirdparty/haproxy ~/haproxy') rsync_project(local_dir='third_party/haproxy/', remote_dir='~/haproxy/', ssh_opts='-o StrictHostKeyChecking=no') fprint('Building haproxy') run('haproxy/build.sh ~/') fprint('Generating viewfinder.pem for haproxy') vf_passphrase = load_passphrase_from_file() local(('scripts/generate_haproxy_certificate.sh viewfinder.co %s viewfinder.pem' % vf_passphrase)) run('mkdir -p ~/conf') run('rm -f ~/conf/viewfinder.pem') put('viewfinder.pem', '~/conf/viewfinder.pem') run('chmod 400 ~/conf/viewfinder.pem') local('rm -f viewfinder.pem') fprint('Pushing haproxy configs') assert env.nodetype, 'no nodetype specified' run('ln -f -s ~/viewfinder/scripts/haproxy.conf ~/conf/haproxy.conf') run(('ln -f -s ~/viewfinder/scripts/haproxy.redirect.%s.conf ~/conf/haproxy.redirect.conf' % env.nodetype.lower()))
[ "@", "task", "def", "haproxy_install", "(", ")", ":", "fprint", "(", "'Rsync thirdparty/haproxy ~/haproxy'", ")", "rsync_project", "(", "local_dir", "=", "'third_party/haproxy/'", ",", "remote_dir", "=", "'~/haproxy/'", ",", "ssh_opts", "=", "'-o StrictHostKeyChecking=no'", ")", "fprint", "(", "'Building haproxy'", ")", "run", "(", "'haproxy/build.sh ~/'", ")", "fprint", "(", "'Generating viewfinder.pem for haproxy'", ")", "vf_passphrase", "=", "load_passphrase_from_file", "(", ")", "local", "(", "(", "'scripts/generate_haproxy_certificate.sh viewfinder.co %s viewfinder.pem'", "%", "vf_passphrase", ")", ")", "run", "(", "'mkdir -p ~/conf'", ")", "run", "(", "'rm -f ~/conf/viewfinder.pem'", ")", "put", "(", "'viewfinder.pem'", ",", "'~/conf/viewfinder.pem'", ")", "run", "(", "'chmod 400 ~/conf/viewfinder.pem'", ")", "local", "(", "'rm -f viewfinder.pem'", ")", "fprint", "(", "'Pushing haproxy configs'", ")", "assert", "env", ".", "nodetype", ",", "'no nodetype specified'", "run", "(", "'ln -f -s ~/viewfinder/scripts/haproxy.conf ~/conf/haproxy.conf'", ")", "run", "(", "(", "'ln -f -s ~/viewfinder/scripts/haproxy.redirect.%s.conf ~/conf/haproxy.redirect.conf'", "%", "env", ".", "nodetype", ".", "lower", "(", ")", ")", ")" ]
install and configure haproxy .
train
false
40,912
def AnalyticsDataFeedFromString(xml_string): feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string) if feed.entry: for entry in feed.entry: for met in entry.metric: entry.__dict__[met.name.replace('ga:', '')] = met if (entry.dimension is not None): for dim in entry.dimension: entry.__dict__[dim.name.replace('ga:', '')] = dim return feed
[ "def", "AnalyticsDataFeedFromString", "(", "xml_string", ")", ":", "feed", "=", "atom", ".", "CreateClassFromXMLString", "(", "AnalyticsDataFeed", ",", "xml_string", ")", "if", "feed", ".", "entry", ":", "for", "entry", "in", "feed", ".", "entry", ":", "for", "met", "in", "entry", ".", "metric", ":", "entry", ".", "__dict__", "[", "met", ".", "name", ".", "replace", "(", "'ga:'", ",", "''", ")", "]", "=", "met", "if", "(", "entry", ".", "dimension", "is", "not", "None", ")", ":", "for", "dim", "in", "entry", ".", "dimension", ":", "entry", ".", "__dict__", "[", "dim", ".", "name", ".", "replace", "(", "'ga:'", ",", "''", ")", "]", "=", "dim", "return", "feed" ]
converts an xml string into an accountlistfeed object .
train
false
40,913
def convert_config_string_to_dict(config_string): resultant_dict = {} try: st = config_string.replace('=', ':') st = st.replace(' ', ', ') resultant_dict = ast.literal_eval(st) except Exception: LOG.warning(_LW('Error encountered translating config_string: %(config_string)s to dict'), {'config_string': config_string}) return resultant_dict
[ "def", "convert_config_string_to_dict", "(", "config_string", ")", ":", "resultant_dict", "=", "{", "}", "try", ":", "st", "=", "config_string", ".", "replace", "(", "'='", ",", "':'", ")", "st", "=", "st", ".", "replace", "(", "' '", ",", "', '", ")", "resultant_dict", "=", "ast", ".", "literal_eval", "(", "st", ")", "except", "Exception", ":", "LOG", ".", "warning", "(", "_LW", "(", "'Error encountered translating config_string: %(config_string)s to dict'", ")", ",", "{", "'config_string'", ":", "config_string", "}", ")", "return", "resultant_dict" ]
convert config file replication string to a dict .
train
false
40,914
def offline_sync_database_to_version(version=None): global USE_TRIGGERS USE_TRIGGERS = False if version: _sync_common_repo(version) else: expand_schema() migrate_data() contract_schema()
[ "def", "offline_sync_database_to_version", "(", "version", "=", "None", ")", ":", "global", "USE_TRIGGERS", "USE_TRIGGERS", "=", "False", "if", "version", ":", "_sync_common_repo", "(", "version", ")", "else", ":", "expand_schema", "(", ")", "migrate_data", "(", ")", "contract_schema", "(", ")" ]
perform and off-line sync of the database .
train
false
40,915
def assert_calculated_changes(case, node_state, node_config, nonmanifest_datasets, expected_changes, additional_node_states=frozenset(), leases=Leases(), discovered_datasets=None): api = UnusableAPI() deployer = BlockDeviceDeployer(node_uuid=node_state.uuid, hostname=node_state.hostname, block_device_api=api) cluster_state = compute_cluster_state(node_state, additional_node_states, nonmanifest_datasets) if (discovered_datasets is None): local_state = local_state_from_shared_state(node_state=node_state, nonmanifest_datasets=cluster_state.nonmanifest_datasets) else: local_state = BlockDeviceDeployerLocalState(node_uuid=node_state.uuid, hostname=node_state.hostname, datasets=dataset_map_from_iterable(discovered_datasets)) case.assertEqual(local_state.shared_state_changes(), (node_state.set('applications', None), NonManifestDatasets(datasets=cluster_state.nonmanifest_datasets)), 'Inconsistent test data.') return assert_calculated_changes_for_deployer(case, deployer, node_state, node_config, nonmanifest_datasets, additional_node_states, set(), expected_changes, local_state, leases=leases)
[ "def", "assert_calculated_changes", "(", "case", ",", "node_state", ",", "node_config", ",", "nonmanifest_datasets", ",", "expected_changes", ",", "additional_node_states", "=", "frozenset", "(", ")", ",", "leases", "=", "Leases", "(", ")", ",", "discovered_datasets", "=", "None", ")", ":", "api", "=", "UnusableAPI", "(", ")", "deployer", "=", "BlockDeviceDeployer", "(", "node_uuid", "=", "node_state", ".", "uuid", ",", "hostname", "=", "node_state", ".", "hostname", ",", "block_device_api", "=", "api", ")", "cluster_state", "=", "compute_cluster_state", "(", "node_state", ",", "additional_node_states", ",", "nonmanifest_datasets", ")", "if", "(", "discovered_datasets", "is", "None", ")", ":", "local_state", "=", "local_state_from_shared_state", "(", "node_state", "=", "node_state", ",", "nonmanifest_datasets", "=", "cluster_state", ".", "nonmanifest_datasets", ")", "else", ":", "local_state", "=", "BlockDeviceDeployerLocalState", "(", "node_uuid", "=", "node_state", ".", "uuid", ",", "hostname", "=", "node_state", ".", "hostname", ",", "datasets", "=", "dataset_map_from_iterable", "(", "discovered_datasets", ")", ")", "case", ".", "assertEqual", "(", "local_state", ".", "shared_state_changes", "(", ")", ",", "(", "node_state", ".", "set", "(", "'applications'", ",", "None", ")", ",", "NonManifestDatasets", "(", "datasets", "=", "cluster_state", ".", "nonmanifest_datasets", ")", ")", ",", "'Inconsistent test data.'", ")", "return", "assert_calculated_changes_for_deployer", "(", "case", ",", "deployer", ",", "node_state", ",", "node_config", ",", "nonmanifest_datasets", ",", "additional_node_states", ",", "set", "(", ")", ",", "expected_changes", ",", "local_state", ",", "leases", "=", "leases", ")" ]
assert that blockdevicedeployer calculates certain changes in a certain circumstance .
train
false
40,916
def builtin_lookup(name): builtin_astroid = MANAGER.ast_from_module(builtins) if (name == '__dict__'): return (builtin_astroid, ()) try: stmts = builtin_astroid.locals[name] except KeyError: stmts = () return (builtin_astroid, stmts)
[ "def", "builtin_lookup", "(", "name", ")", ":", "builtin_astroid", "=", "MANAGER", ".", "ast_from_module", "(", "builtins", ")", "if", "(", "name", "==", "'__dict__'", ")", ":", "return", "(", "builtin_astroid", ",", "(", ")", ")", "try", ":", "stmts", "=", "builtin_astroid", ".", "locals", "[", "name", "]", "except", "KeyError", ":", "stmts", "=", "(", ")", "return", "(", "builtin_astroid", ",", "stmts", ")" ]
lookup a name into the builtin module return the list of matching statements and the astroid for the builtin module .
train
true
40,917
def ErrCheckHandle(result, func, args): if (not result): raise WinError() return AutoHANDLE(result)
[ "def", "ErrCheckHandle", "(", "result", ",", "func", ",", "args", ")", ":", "if", "(", "not", "result", ")", ":", "raise", "WinError", "(", ")", "return", "AutoHANDLE", "(", "result", ")" ]
errcheck function for windows functions that return a handle .
train
false
40,918
def show_quick_panel(window, list, on_done): flags = 0 if (int(sublime.version()) >= 3070): flags = sublime.KEEP_OPEN_ON_FOCUS_LOST return window.show_quick_panel(list, on_done, flags)
[ "def", "show_quick_panel", "(", "window", ",", "list", ",", "on_done", ")", ":", "flags", "=", "0", "if", "(", "int", "(", "sublime", ".", "version", "(", ")", ")", ">=", "3070", ")", ":", "flags", "=", "sublime", ".", "KEEP_OPEN_ON_FOCUS_LOST", "return", "window", ".", "show_quick_panel", "(", "list", ",", "on_done", ",", "flags", ")" ]
wrapper for the window .
train
false
40,919
def iter_all(class_name): for (cls, wdict) in six.iteritems(live_refs): if (cls.__name__ == class_name): return six.iterkeys(wdict)
[ "def", "iter_all", "(", "class_name", ")", ":", "for", "(", "cls", ",", "wdict", ")", "in", "six", ".", "iteritems", "(", "live_refs", ")", ":", "if", "(", "cls", ".", "__name__", "==", "class_name", ")", ":", "return", "six", ".", "iterkeys", "(", "wdict", ")" ]
iterate over all objects of the same class by its class name .
train
false
40,921
def fig_to_vincent(fig): renderer = VincentRenderer() exporter = Exporter(renderer) exporter.run(fig) return renderer.chart
[ "def", "fig_to_vincent", "(", "fig", ")", ":", "renderer", "=", "VincentRenderer", "(", ")", "exporter", "=", "Exporter", "(", "renderer", ")", "exporter", ".", "run", "(", "fig", ")", "return", "renderer", ".", "chart" ]
convert a matplotlib figure to a vincent object .
train
false
40,924
def make_region(*arg, **kw): return CacheRegion(*arg, **kw)
[ "def", "make_region", "(", "*", "arg", ",", "**", "kw", ")", ":", "return", "CacheRegion", "(", "*", "arg", ",", "**", "kw", ")" ]
instantiate a new :class: .
train
false
40,925
def distill_naming(dictionary): d = {key: value for (key, value) in dictionary.items() if (key in NAMING_DEFAULTS)} return Naming(d)
[ "def", "distill_naming", "(", "dictionary", ")", ":", "d", "=", "{", "key", ":", "value", "for", "(", "key", ",", "value", ")", "in", "dictionary", ".", "items", "(", ")", "if", "(", "key", "in", "NAMING_DEFAULTS", ")", "}", "return", "Naming", "(", "d", ")" ]
distill only keys and values related to the naming conventions .
train
false
40,926
def allocate_lock(): return LockType()
[ "def", "allocate_lock", "(", ")", ":", "return", "LockType", "(", ")" ]
dummy implementation of thread .
train
false
40,927
def safe_filter(error_output=u''): def inner(f): @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except Exception as err: if sorl_settings.THUMBNAIL_DEBUG: raise logger.error((u'Thumbnail filter failed: %s' % err.message), exc_info=sys.exc_info()) return error_output return wrapper return inner
[ "def", "safe_filter", "(", "error_output", "=", "u''", ")", ":", "def", "inner", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "except", "Exception", "as", "err", ":", "if", "sorl_settings", ".", "THUMBNAIL_DEBUG", ":", "raise", "logger", ".", "error", "(", "(", "u'Thumbnail filter failed: %s'", "%", "err", ".", "message", ")", ",", "exc_info", "=", "sys", ".", "exc_info", "(", ")", ")", "return", "error_output", "return", "wrapper", "return", "inner" ]
a safe filter decorator only raising errors when thumbnail_debug is true otherwise returning error_output .
train
true
40,928
def getVertexGivenBinary(byteIndex, stlData): return Vector3(getFloatGivenBinary(byteIndex, stlData), getFloatGivenBinary((byteIndex + 4), stlData), getFloatGivenBinary((byteIndex + 8), stlData))
[ "def", "getVertexGivenBinary", "(", "byteIndex", ",", "stlData", ")", ":", "return", "Vector3", "(", "getFloatGivenBinary", "(", "byteIndex", ",", "stlData", ")", ",", "getFloatGivenBinary", "(", "(", "byteIndex", "+", "4", ")", ",", "stlData", ")", ",", "getFloatGivenBinary", "(", "(", "byteIndex", "+", "8", ")", ",", "stlData", ")", ")" ]
get vertex given stl vertex line .
train
false
40,930
def equalsIgnoreCase(a, b): return ((a == b) or (string.lower(a) == string.lower(b)))
[ "def", "equalsIgnoreCase", "(", "a", ",", "b", ")", ":", "return", "(", "(", "a", "==", "b", ")", "or", "(", "string", ".", "lower", "(", "a", ")", "==", "string", ".", "lower", "(", "b", ")", ")", ")" ]
return true iff a and b have the same lowercase representation .
train
false