id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
12,196
def check_pack_directory_exists(pack): packs_base_paths = get_packs_base_paths() for base_dir in packs_base_paths: pack_path = os.path.join(base_dir, pack) if os.path.exists(pack_path): return True return False
[ "def", "check_pack_directory_exists", "(", "pack", ")", ":", "packs_base_paths", "=", "get_packs_base_paths", "(", ")", "for", "base_dir", "in", "packs_base_paths", ":", "pack_path", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "pack", ")", "if", "os", ".", "path", ".", "exists", "(", "pack_path", ")", ":", "return", "True", "return", "False" ]
check if a provided pack exists in one of the pack paths .
train
false
12,197
def dfs_tree(G, source=None): T = nx.DiGraph() if (source is None): T.add_nodes_from(G) else: T.add_node(source) T.add_edges_from(dfs_edges(G, source)) return T
[ "def", "dfs_tree", "(", "G", ",", "source", "=", "None", ")", ":", "T", "=", "nx", ".", "DiGraph", "(", ")", "if", "(", "source", "is", "None", ")", ":", "T", ".", "add_nodes_from", "(", "G", ")", "else", ":", "T", ".", "add_node", "(", "source", ")", "T", ".", "add_edges_from", "(", "dfs_edges", "(", "G", ",", "source", ")", ")", "return", "T" ]
return oriented tree constructed from a depth-first-search from source .
train
false
12,198
def test_abort_message_only_printed_once(): with quiet(): result = local('fab -f tests/support/aborts.py kaboom', capture=True) eq_(result.stderr, 'Fatal error: It burns!\n\nAborting.')
[ "def", "test_abort_message_only_printed_once", "(", ")", ":", "with", "quiet", "(", ")", ":", "result", "=", "local", "(", "'fab -f tests/support/aborts.py kaboom'", ",", "capture", "=", "True", ")", "eq_", "(", "result", ".", "stderr", ",", "'Fatal error: It burns!\\n\\nAborting.'", ")" ]
abort()s systemexit should not cause a reprint of the error message .
train
false
12,201
def FiniteRV(name, density): return rv(name, FiniteDistributionHandmade, density)
[ "def", "FiniteRV", "(", "name", ",", "density", ")", ":", "return", "rv", "(", "name", ",", "FiniteDistributionHandmade", ",", "density", ")" ]
create a finite random variable given a dict representing the density .
train
false
12,202
def parse_constraint(source, constraints, ch): if (ch not in 'deis'): raise ParseError() if (ch in constraints): raise ParseError() return ch
[ "def", "parse_constraint", "(", "source", ",", "constraints", ",", "ch", ")", ":", "if", "(", "ch", "not", "in", "'deis'", ")", ":", "raise", "ParseError", "(", ")", "if", "(", "ch", "in", "constraints", ")", ":", "raise", "ParseError", "(", ")", "return", "ch" ]
parses a constraint .
train
false
12,203
def make_cost_matrix(profit_matrix, inversion_function): cost_matrix = [] for row in profit_matrix: cost_matrix.append([inversion_function(value) for value in row]) return cost_matrix
[ "def", "make_cost_matrix", "(", "profit_matrix", ",", "inversion_function", ")", ":", "cost_matrix", "=", "[", "]", "for", "row", "in", "profit_matrix", ":", "cost_matrix", ".", "append", "(", "[", "inversion_function", "(", "value", ")", "for", "value", "in", "row", "]", ")", "return", "cost_matrix" ]
create a cost matrix from a profit matrix by calling inversion_function to invert each value .
train
true
12,204
def generic_ospf_parser(pattern, ospf_data): a_match = re.search(pattern, ospf_data) if a_match: return a_match.group(1) return None
[ "def", "generic_ospf_parser", "(", "pattern", ",", "ospf_data", ")", ":", "a_match", "=", "re", ".", "search", "(", "pattern", ",", "ospf_data", ")", "if", "a_match", ":", "return", "a_match", ".", "group", "(", "1", ")", "return", "None" ]
takes a generic regular expression pattern that has a group(1) match pattern and returns this else returns none .
train
false
12,205
def should_embed(dep_name): path = dep_abspath(dep_name) defkey = 'EMBED' key = ((dep_name.replace('-', '').upper() + '_') + defkey) return _get_config_value(key, defkey, path)
[ "def", "should_embed", "(", "dep_name", ")", ":", "path", "=", "dep_abspath", "(", "dep_name", ")", "defkey", "=", "'EMBED'", "key", "=", "(", "(", "dep_name", ".", "replace", "(", "'-'", ",", "''", ")", ".", "upper", "(", ")", "+", "'_'", ")", "+", "defkey", ")", "return", "_get_config_value", "(", "key", ",", "defkey", ",", "path", ")" ]
check the configuration for the dep_name and see if it should be embedded .
train
false
12,206
@contextlib.contextmanager def record_time(times, enabled, *args): if (not enabled): (yield) else: start = time.time() (yield) end = time.time() times.append((' '.join(args), start, end))
[ "@", "contextlib", ".", "contextmanager", "def", "record_time", "(", "times", ",", "enabled", ",", "*", "args", ")", ":", "if", "(", "not", "enabled", ")", ":", "(", "yield", ")", "else", ":", "start", "=", "time", ".", "time", "(", ")", "(", "yield", ")", "end", "=", "time", ".", "time", "(", ")", "times", ".", "append", "(", "(", "' '", ".", "join", "(", "args", ")", ",", "start", ",", "end", ")", ")" ]
record the time of a specific action .
train
false
12,208
def check_pidfile(pidfile): return os.path.isfile(pidfile)
[ "def", "check_pidfile", "(", "pidfile", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "pidfile", ")" ]
determine if a pidfile has been written out .
train
false
12,209
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
12,211
def get_password_reset_url(user, token_generator=default_token_generator): kwargs = {'token': token_generator.make_token(user), 'uidb64': urlsafe_base64_encode(force_bytes(user.id))} return reverse('password-reset-confirm', kwargs=kwargs)
[ "def", "get_password_reset_url", "(", "user", ",", "token_generator", "=", "default_token_generator", ")", ":", "kwargs", "=", "{", "'token'", ":", "token_generator", ".", "make_token", "(", "user", ")", ",", "'uidb64'", ":", "urlsafe_base64_encode", "(", "force_bytes", "(", "user", ".", "id", ")", ")", "}", "return", "reverse", "(", "'password-reset-confirm'", ",", "kwargs", "=", "kwargs", ")" ]
generate a password-reset url for a given user .
train
false
12,212
def _ecdf(x): nobs = len(x) return (np.arange(1, (nobs + 1)) / float(nobs))
[ "def", "_ecdf", "(", "x", ")", ":", "nobs", "=", "len", "(", "x", ")", "return", "(", "np", ".", "arange", "(", "1", ",", "(", "nobs", "+", "1", ")", ")", "/", "float", "(", "nobs", ")", ")" ]
no frills empirical cdf used in fdrcorrection .
train
true
12,213
def is_valid_cidr(string_network): if (string_network.count('/') == 1): try: mask = int(string_network.split('/')[1]) except ValueError: return False if ((mask < 1) or (mask > 32)): return False try: socket.inet_aton(string_network.split('/')[0]) except socket.error: return False else: return False return True
[ "def", "is_valid_cidr", "(", "string_network", ")", ":", "if", "(", "string_network", ".", "count", "(", "'/'", ")", "==", "1", ")", ":", "try", ":", "mask", "=", "int", "(", "string_network", ".", "split", "(", "'/'", ")", "[", "1", "]", ")", "except", "ValueError", ":", "return", "False", "if", "(", "(", "mask", "<", "1", ")", "or", "(", "mask", ">", "32", ")", ")", ":", "return", "False", "try", ":", "socket", ".", "inet_aton", "(", "string_network", ".", "split", "(", "'/'", ")", "[", "0", "]", ")", "except", "socket", ".", "error", ":", "return", "False", "else", ":", "return", "False", "return", "True" ]
check if address is valid the provided address can be a ipv6 or a ipv4 cidr address .
train
true
12,214
def multivariate_rbf(x, y=0.0, sigma=1.0, l=1.0): x = tf.convert_to_tensor(x) y = tf.convert_to_tensor(y) sigma = tf.convert_to_tensor(sigma) l = tf.convert_to_tensor(l) dependencies = [tf.verify_tensor_all_finite(x, msg=''), tf.verify_tensor_all_finite(y, msg=''), tf.assert_positive(sigma), tf.assert_positive(l)] x = control_flow_ops.with_dependencies(dependencies, x) y = control_flow_ops.with_dependencies(dependencies, y) sigma = control_flow_ops.with_dependencies(dependencies, sigma) l = control_flow_ops.with_dependencies(dependencies, l) return (tf.pow(sigma, 2.0) * tf.exp((((-1.0) / (2.0 * tf.pow(l, 2.0))) * tf.reduce_sum(tf.pow((x - y), 2.0)))))
[ "def", "multivariate_rbf", "(", "x", ",", "y", "=", "0.0", ",", "sigma", "=", "1.0", ",", "l", "=", "1.0", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "x", ")", "y", "=", "tf", ".", "convert_to_tensor", "(", "y", ")", "sigma", "=", "tf", ".", "convert_to_tensor", "(", "sigma", ")", "l", "=", "tf", ".", "convert_to_tensor", "(", "l", ")", "dependencies", "=", "[", "tf", ".", "verify_tensor_all_finite", "(", "x", ",", "msg", "=", "''", ")", ",", "tf", ".", "verify_tensor_all_finite", "(", "y", ",", "msg", "=", "''", ")", ",", "tf", ".", "assert_positive", "(", "sigma", ")", ",", "tf", ".", "assert_positive", "(", "l", ")", "]", "x", "=", "control_flow_ops", ".", "with_dependencies", "(", "dependencies", ",", "x", ")", "y", "=", "control_flow_ops", ".", "with_dependencies", "(", "dependencies", ",", "y", ")", "sigma", "=", "control_flow_ops", ".", "with_dependencies", "(", "dependencies", ",", "sigma", ")", "l", "=", "control_flow_ops", ".", "with_dependencies", "(", "dependencies", ",", "l", ")", "return", "(", "tf", ".", "pow", "(", "sigma", ",", "2.0", ")", "*", "tf", ".", "exp", "(", "(", "(", "(", "-", "1.0", ")", "/", "(", "2.0", "*", "tf", ".", "pow", "(", "l", ",", "2.0", ")", ")", ")", "*", "tf", ".", "reduce_sum", "(", "tf", ".", "pow", "(", "(", "x", "-", "y", ")", ",", "2.0", ")", ")", ")", ")", ")" ]
squared-exponential kernel .
train
false
12,215
def set_gl_entries_by_account(company, from_date, to_date, root_lft, root_rgt, filters, gl_entries_by_account, ignore_closing_entries=False): additional_conditions = get_additional_conditions(from_date, ignore_closing_entries, filters) gl_entries = frappe.db.sql(u'select posting_date, account, debit, credit, is_opening, fiscal_year from `tabGL Entry`\n DCTB DCTB where company=%(company)s\n DCTB DCTB {additional_conditions}\n DCTB DCTB and posting_date <= %(to_date)s\n DCTB DCTB and account in (select name from `tabAccount`\n DCTB DCTB DCTB where lft >= %(lft)s and rgt <= %(rgt)s)\n DCTB DCTB order by account, posting_date'.format(additional_conditions=additional_conditions), {u'company': company, u'from_date': from_date, u'to_date': to_date, u'lft': root_lft, u'rgt': root_rgt}, as_dict=True) for entry in gl_entries: gl_entries_by_account.setdefault(entry.account, []).append(entry) return gl_entries_by_account
[ "def", "set_gl_entries_by_account", "(", "company", ",", "from_date", ",", "to_date", ",", "root_lft", ",", "root_rgt", ",", "filters", ",", "gl_entries_by_account", ",", "ignore_closing_entries", "=", "False", ")", ":", "additional_conditions", "=", "get_additional_conditions", "(", "from_date", ",", "ignore_closing_entries", ",", "filters", ")", "gl_entries", "=", "frappe", ".", "db", ".", "sql", "(", "u'select posting_date, account, debit, credit, is_opening, fiscal_year from `tabGL Entry`\\n DCTB DCTB where company=%(company)s\\n DCTB DCTB {additional_conditions}\\n DCTB DCTB and posting_date <= %(to_date)s\\n DCTB DCTB and account in (select name from `tabAccount`\\n DCTB DCTB DCTB where lft >= %(lft)s and rgt <= %(rgt)s)\\n DCTB DCTB order by account, posting_date'", ".", "format", "(", "additional_conditions", "=", "additional_conditions", ")", ",", "{", "u'company'", ":", "company", ",", "u'from_date'", ":", "from_date", ",", "u'to_date'", ":", "to_date", ",", "u'lft'", ":", "root_lft", ",", "u'rgt'", ":", "root_rgt", "}", ",", "as_dict", "=", "True", ")", "for", "entry", "in", "gl_entries", ":", "gl_entries_by_account", ".", "setdefault", "(", "entry", ".", "account", ",", "[", "]", ")", ".", "append", "(", "entry", ")", "return", "gl_entries_by_account" ]
returns a dict like { "account": [gl entries] .
train
false
12,217
def asBinary(i): if (i > 1): if ((i % 2) == 1): return (asBinary((i >> 1)) + '1') else: return (asBinary((i >> 1)) + '0') else: return str(i)
[ "def", "asBinary", "(", "i", ")", ":", "if", "(", "i", ">", "1", ")", ":", "if", "(", "(", "i", "%", "2", ")", "==", "1", ")", ":", "return", "(", "asBinary", "(", "(", "i", ">>", "1", ")", ")", "+", "'1'", ")", "else", ":", "return", "(", "asBinary", "(", "(", "i", ">>", "1", ")", ")", "+", "'0'", ")", "else", ":", "return", "str", "(", "i", ")" ]
produces a string from an integers binary representation .
train
false
12,219
def _connect_volume(*args, **kwargs): return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'}
[ "def", "_connect_volume", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "{", "'path'", ":", "u'/dev/disk/by-path/xxxx'", ",", "'type'", ":", "'block'", "}" ]
return predefined volume info .
train
false
12,220
def _diff_cache_cluster(current, desired): if (current.get('SecurityGroups') is not None): current['SecurityGroupIds'] = [s['SecurityGroupId'] for s in current['SecurityGroups']] if (current.get('CacheSecurityGroups') is not None): current['CacheSecurityGroupNames'] = [c['CacheSecurityGroupName'] for c in current['CacheSecurityGroups']] if (current.get('NotificationConfiguration') is not None): current['NotificationTopicArn'] = current['NotificationConfiguration']['TopicArn'] current['NotificationTopicStatus'] = current['NotificationConfiguration']['TopicStatus'] if (current.get('CacheParameterGroup') is not None): current['CacheParameterGroupName'] = current['CacheParameterGroup']['CacheParameterGroupName'] modifiable = {'AutoMinorVersionUpgrade': 'AutoMinorVersionUpgrade', 'AZMode': 'AZMode', 'CacheNodeType': 'CacheNodeType', 'CacheNodeIdsToRemove': None, 'CacheParameterGroupName': 'CacheParameterGroupName', 'CacheSecurityGroupNames': 'CacheSecurityGroupNames', 'EngineVersion': 'EngineVersion', 'NewAvailabilityZones': None, 'NotificationTopicArn': 'NotificationTopicArn', 'NotificationTopicStatus': 'NotificationTopicStatus', 'NumCacheNodes': 'NumCacheNodes', 'PreferredMaintenanceWindow': 'PreferredMaintenanceWindow', 'SecurityGroupIds': 'SecurityGroupIds', 'SnapshotRetentionLimit': 'SnapshotRetentionLimit', 'SnapshotWindow': 'SnapshotWindow'} need_update = {} for (m, o) in modifiable.items(): if (m in desired): if (not o): need_update[m] = desired[m] elif (m in current): if (current[m] != desired[m]): need_update[m] = desired[m] return need_update
[ "def", "_diff_cache_cluster", "(", "current", ",", "desired", ")", ":", "if", "(", "current", ".", "get", "(", "'SecurityGroups'", ")", "is", "not", "None", ")", ":", "current", "[", "'SecurityGroupIds'", "]", "=", "[", "s", "[", "'SecurityGroupId'", "]", "for", "s", "in", "current", "[", "'SecurityGroups'", "]", "]", "if", "(", "current", ".", "get", "(", "'CacheSecurityGroups'", ")", "is", "not", "None", ")", ":", "current", "[", "'CacheSecurityGroupNames'", "]", "=", "[", "c", "[", "'CacheSecurityGroupName'", "]", "for", "c", "in", "current", "[", "'CacheSecurityGroups'", "]", "]", "if", "(", "current", ".", "get", "(", "'NotificationConfiguration'", ")", "is", "not", "None", ")", ":", "current", "[", "'NotificationTopicArn'", "]", "=", "current", "[", "'NotificationConfiguration'", "]", "[", "'TopicArn'", "]", "current", "[", "'NotificationTopicStatus'", "]", "=", "current", "[", "'NotificationConfiguration'", "]", "[", "'TopicStatus'", "]", "if", "(", "current", ".", "get", "(", "'CacheParameterGroup'", ")", "is", "not", "None", ")", ":", "current", "[", "'CacheParameterGroupName'", "]", "=", "current", "[", "'CacheParameterGroup'", "]", "[", "'CacheParameterGroupName'", "]", "modifiable", "=", "{", "'AutoMinorVersionUpgrade'", ":", "'AutoMinorVersionUpgrade'", ",", "'AZMode'", ":", "'AZMode'", ",", "'CacheNodeType'", ":", "'CacheNodeType'", ",", "'CacheNodeIdsToRemove'", ":", "None", ",", "'CacheParameterGroupName'", ":", "'CacheParameterGroupName'", ",", "'CacheSecurityGroupNames'", ":", "'CacheSecurityGroupNames'", ",", "'EngineVersion'", ":", "'EngineVersion'", ",", "'NewAvailabilityZones'", ":", "None", ",", "'NotificationTopicArn'", ":", "'NotificationTopicArn'", ",", "'NotificationTopicStatus'", ":", "'NotificationTopicStatus'", ",", "'NumCacheNodes'", ":", "'NumCacheNodes'", ",", "'PreferredMaintenanceWindow'", ":", "'PreferredMaintenanceWindow'", ",", "'SecurityGroupIds'", ":", "'SecurityGroupIds'", ",", "'SnapshotRetentionLimit'", ":", "'SnapshotRetentionLimit'", ",", "'SnapshotWindow'", ":", "'SnapshotWindow'", "}", "need_update", "=", "{", "}", "for", "(", "m", ",", "o", ")", "in", "modifiable", ".", "items", "(", ")", ":", "if", "(", "m", "in", "desired", ")", ":", "if", "(", "not", "o", ")", ":", "need_update", "[", "m", "]", "=", "desired", "[", "m", "]", "elif", "(", "m", "in", "current", ")", ":", "if", "(", "current", "[", "m", "]", "!=", "desired", "[", "m", "]", ")", ":", "need_update", "[", "m", "]", "=", "desired", "[", "m", "]", "return", "need_update" ]
if you need to enhance what modify_cache_cluster() considers when deciding what is to be updated .
train
true
12,221
def _dyad_div(one, other): if (isinstance(one, Dyadic) and isinstance(other, Dyadic)): raise TypeError('Cannot divide two dyadics') elif isinstance(one, Dyadic): return DyadicMul(one, Pow(other, S.NegativeOne)) else: raise TypeError('Cannot divide by a dyadic')
[ "def", "_dyad_div", "(", "one", ",", "other", ")", ":", "if", "(", "isinstance", "(", "one", ",", "Dyadic", ")", "and", "isinstance", "(", "other", ",", "Dyadic", ")", ")", ":", "raise", "TypeError", "(", "'Cannot divide two dyadics'", ")", "elif", "isinstance", "(", "one", ",", "Dyadic", ")", ":", "return", "DyadicMul", "(", "one", ",", "Pow", "(", "other", ",", "S", ".", "NegativeOne", ")", ")", "else", ":", "raise", "TypeError", "(", "'Cannot divide by a dyadic'", ")" ]
helper for division involving dyadics .
train
false
12,222
@transaction.non_atomic_requests @require_POST @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') def get_proctored_exam_results(request, course_id): query_features = ['user_email', 'exam_name', 'attempt_code', 'allowed_time_limit_mins', 'is_sample_attempt', 'started_at', 'completed_at', 'status'] course_key = CourseKey.from_string(course_id) try: lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report(request, course_key, query_features) status_response = _('The proctored exam results report is being created. To view the status of the report, see Pending Tasks below.') except AlreadyRunningError: status_response = _('The proctored exam results report is currently being created. To view the status of the report, see Pending Tasks below. You will be able to download the report when it is complete.') return JsonResponse({'status': status_response})
[ "@", "transaction", ".", "non_atomic_requests", "@", "require_POST", "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "def", "get_proctored_exam_results", "(", "request", ",", "course_id", ")", ":", "query_features", "=", "[", "'user_email'", ",", "'exam_name'", ",", "'attempt_code'", ",", "'allowed_time_limit_mins'", ",", "'is_sample_attempt'", ",", "'started_at'", ",", "'completed_at'", ",", "'status'", "]", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "try", ":", "lms", ".", "djangoapps", ".", "instructor_task", ".", "api", ".", "submit_proctored_exam_results_report", "(", "request", ",", "course_key", ",", "query_features", ")", "status_response", "=", "_", "(", "'The proctored exam results report is being created. To view the status of the report, see Pending Tasks below.'", ")", "except", "AlreadyRunningError", ":", "status_response", "=", "_", "(", "'The proctored exam results report is currently being created. To view the status of the report, see Pending Tasks below. You will be able to download the report when it is complete.'", ")", "return", "JsonResponse", "(", "{", "'status'", ":", "status_response", "}", ")" ]
return info about proctored exam results in a course as a dict .
train
false
12,223
def within_tempdir(callable): proxy = with_tempdir(in_tempdir(callable)) proxy.__name__ = callable.__name__ return proxy
[ "def", "within_tempdir", "(", "callable", ")", ":", "proxy", "=", "with_tempdir", "(", "in_tempdir", "(", "callable", ")", ")", "proxy", ".", "__name__", "=", "callable", ".", "__name__", "return", "proxy" ]
a decorator run the enclosed function inside a tmpdir removed after execution .
train
false
12,224
def test_docx(name): return absjoin(thisdir, 'test_files', ('%s.docx' % name))
[ "def", "test_docx", "(", "name", ")", ":", "return", "absjoin", "(", "thisdir", ",", "'test_files'", ",", "(", "'%s.docx'", "%", "name", ")", ")" ]
return the absolute path to test .
train
false
12,225
def from_base85(text): acc = 0 for c in text: acc = ((acc * 85) + b85dec[c]) return acc
[ "def", "from_base85", "(", "text", ")", ":", "acc", "=", "0", "for", "c", "in", "text", ":", "acc", "=", "(", "(", "acc", "*", "85", ")", "+", "b85dec", "[", "c", "]", ")", "return", "acc" ]
decodes the given base 85 text into an integer .
train
false
12,226
@task def get_previous_version_tag(): shortversion = get_sympy_short_version() curcommit = 'HEAD' with cd('/home/vagrant/repos/sympy'): while True: curtag = run(('git describe --abbrev=0 --tags ' + curcommit)).strip() if (shortversion in curtag): parents = local(('git rev-list --parents -n 1 ' + curtag), capture=True).strip().split() assert (len(parents) == 2), curtag curcommit = (curtag + '^') else: print(blue('Using {tag} as the tag for the previous release.'.format(tag=curtag), bold=True)) return curtag error('Could not find the tag for the previous release.')
[ "@", "task", "def", "get_previous_version_tag", "(", ")", ":", "shortversion", "=", "get_sympy_short_version", "(", ")", "curcommit", "=", "'HEAD'", "with", "cd", "(", "'/home/vagrant/repos/sympy'", ")", ":", "while", "True", ":", "curtag", "=", "run", "(", "(", "'git describe --abbrev=0 --tags '", "+", "curcommit", ")", ")", ".", "strip", "(", ")", "if", "(", "shortversion", "in", "curtag", ")", ":", "parents", "=", "local", "(", "(", "'git rev-list --parents -n 1 '", "+", "curtag", ")", ",", "capture", "=", "True", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "assert", "(", "len", "(", "parents", ")", "==", "2", ")", ",", "curtag", "curcommit", "=", "(", "curtag", "+", "'^'", ")", "else", ":", "print", "(", "blue", "(", "'Using {tag} as the tag for the previous release.'", ".", "format", "(", "tag", "=", "curtag", ")", ",", "bold", "=", "True", ")", ")", "return", "curtag", "error", "(", "'Could not find the tag for the previous release.'", ")" ]
get the version of the previous release .
train
false
12,228
def fast_isin(X, Y): if (len(Y) > 0): T = Y.copy() T.sort() D = T.searchsorted(X) T = np.append(T, np.array([0])) W = (T[D] == X) if isinstance(W, bool): return np.zeros((len(X),), bool) else: return (T[D] == X) else: return np.zeros((len(X),), bool)
[ "def", "fast_isin", "(", "X", ",", "Y", ")", ":", "if", "(", "len", "(", "Y", ")", ">", "0", ")", ":", "T", "=", "Y", ".", "copy", "(", ")", "T", ".", "sort", "(", ")", "D", "=", "T", ".", "searchsorted", "(", "X", ")", "T", "=", "np", ".", "append", "(", "T", ",", "np", ".", "array", "(", "[", "0", "]", ")", ")", "W", "=", "(", "T", "[", "D", "]", "==", "X", ")", "if", "isinstance", "(", "W", ",", "bool", ")", ":", "return", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "bool", ")", "else", ":", "return", "(", "T", "[", "D", "]", "==", "X", ")", "else", ":", "return", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "bool", ")" ]
indices of elements in a numpy array that appear in another .
train
true
12,229
def create_wsgi_app(path_to_library=None, prefix='', virtual_library=None): from calibre.library import db cherrypy.config.update({'environment': 'embedded'}) db = db(path_to_library) parser = option_parser() (opts, args) = parser.parse_args(['calibre-server']) opts.url_prefix = prefix opts.restriction = virtual_library server = LibraryServer(db, opts, wsgi=True, show_tracebacks=True) return cherrypy.Application(server, script_name=None, config=server.config)
[ "def", "create_wsgi_app", "(", "path_to_library", "=", "None", ",", "prefix", "=", "''", ",", "virtual_library", "=", "None", ")", ":", "from", "calibre", ".", "library", "import", "db", "cherrypy", ".", "config", ".", "update", "(", "{", "'environment'", ":", "'embedded'", "}", ")", "db", "=", "db", "(", "path_to_library", ")", "parser", "=", "option_parser", "(", ")", "(", "opts", ",", "args", ")", "=", "parser", ".", "parse_args", "(", "[", "'calibre-server'", "]", ")", "opts", ".", "url_prefix", "=", "prefix", "opts", ".", "restriction", "=", "virtual_library", "server", "=", "LibraryServer", "(", "db", ",", "opts", ",", "wsgi", "=", "True", ",", "show_tracebacks", "=", "True", ")", "return", "cherrypy", ".", "Application", "(", "server", ",", "script_name", "=", "None", ",", "config", "=", "server", ".", "config", ")" ]
wsgi entry point .
train
false
12,233
def check_qt(): qt_infos = dict(pyqt5=('PyQt5', '5.2'), pyqt=('PyQt4', '4.6')) try: import qtpy (package_name, required_ver) = qt_infos[qtpy.API] actual_ver = qtpy.PYQT_VERSION if (LooseVersion(actual_ver) < LooseVersion(required_ver)): show_warning(('Please check Spyder installation requirements:\n%s %s+ is required (found v%s).' % (package_name, required_ver, actual_ver))) except ImportError: show_warning(('Failed to import qtpy.\nPlease check Spyder installation requirements:\n\nqtpy 1.1.0+ and either\n%s %s+ or\n%s %s+\n\nare required to run Spyder.' % (qt_infos['pyqt5'] + qt_infos['pyqt'])))
[ "def", "check_qt", "(", ")", ":", "qt_infos", "=", "dict", "(", "pyqt5", "=", "(", "'PyQt5'", ",", "'5.2'", ")", ",", "pyqt", "=", "(", "'PyQt4'", ",", "'4.6'", ")", ")", "try", ":", "import", "qtpy", "(", "package_name", ",", "required_ver", ")", "=", "qt_infos", "[", "qtpy", ".", "API", "]", "actual_ver", "=", "qtpy", ".", "PYQT_VERSION", "if", "(", "LooseVersion", "(", "actual_ver", ")", "<", "LooseVersion", "(", "required_ver", ")", ")", ":", "show_warning", "(", "(", "'Please check Spyder installation requirements:\\n%s %s+ is required (found v%s).'", "%", "(", "package_name", ",", "required_ver", ",", "actual_ver", ")", ")", ")", "except", "ImportError", ":", "show_warning", "(", "(", "'Failed to import qtpy.\\nPlease check Spyder installation requirements:\\n\\nqtpy 1.1.0+ and either\\n%s %s+ or\\n%s %s+\\n\\nare required to run Spyder.'", "%", "(", "qt_infos", "[", "'pyqt5'", "]", "+", "qt_infos", "[", "'pyqt'", "]", ")", ")", ")" ]
check qt binding requirements .
train
true
12,235
def parse_lsmod_for_module(l_raw, module_name, escape=True): if escape: module_search = re.escape(module_name) else: module_search = module_name lsmod = re.search(('^(?P<name>%s)\\s+(?P<size>\\d+)\\s+(?P<used>\\d+)\\s*(?P<submodules>\\S+)?$' % module_search), l_raw, re.M) if lsmod: module_info = lsmod.groupdict([]) module_info['size'] = int(module_info['size']) module_info['used'] = int(module_info['used']) if module_info['submodules']: module_info['submodules'] = module_info['submodules'].split(',') return module_info else: return {}
[ "def", "parse_lsmod_for_module", "(", "l_raw", ",", "module_name", ",", "escape", "=", "True", ")", ":", "if", "escape", ":", "module_search", "=", "re", ".", "escape", "(", "module_name", ")", "else", ":", "module_search", "=", "module_name", "lsmod", "=", "re", ".", "search", "(", "(", "'^(?P<name>%s)\\\\s+(?P<size>\\\\d+)\\\\s+(?P<used>\\\\d+)\\\\s*(?P<submodules>\\\\S+)?$'", "%", "module_search", ")", ",", "l_raw", ",", "re", ".", "M", ")", "if", "lsmod", ":", "module_info", "=", "lsmod", ".", "groupdict", "(", "[", "]", ")", "module_info", "[", "'size'", "]", "=", "int", "(", "module_info", "[", "'size'", "]", ")", "module_info", "[", "'used'", "]", "=", "int", "(", "module_info", "[", "'used'", "]", ")", "if", "module_info", "[", "'submodules'", "]", ":", "module_info", "[", "'submodules'", "]", "=", "module_info", "[", "'submodules'", "]", ".", "split", "(", "','", ")", "return", "module_info", "else", ":", "return", "{", "}" ]
use a regexp to parse raw lsmod output and get module information .
train
false
12,236
def get_symbols_handler(file_extension): global SYMBOLS_HANDLER return SYMBOLS_HANDLER.get(file_extension, None)
[ "def", "get_symbols_handler", "(", "file_extension", ")", ":", "global", "SYMBOLS_HANDLER", "return", "SYMBOLS_HANDLER", ".", "get", "(", "file_extension", ",", "None", ")" ]
returns the symbol handler for the given file_extension .
train
false
12,237
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) args = {} if FunctionVersion: args['FunctionVersion'] = FunctionVersion if Description: args['Description'] = Description r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args) if r: keys = ('Name', 'FunctionVersion', 'Description') return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])} else: log.warning('Alias was not updated') return {'updated': False} except ClientError as e: return {'created': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "update_alias", "(", "FunctionName", ",", "Name", ",", "FunctionVersion", "=", "None", ",", "Description", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "args", "=", "{", "}", "if", "FunctionVersion", ":", "args", "[", "'FunctionVersion'", "]", "=", "FunctionVersion", "if", "Description", ":", "args", "[", "'Description'", "]", "=", "Description", "r", "=", "conn", ".", "update_alias", "(", "FunctionName", "=", "FunctionName", ",", "Name", "=", "Name", ",", "**", "args", ")", "if", "r", ":", "keys", "=", "(", "'Name'", ",", "'FunctionVersion'", ",", "'Description'", ")", "return", "{", "'updated'", ":", "True", ",", "'alias'", ":", "dict", "(", "[", "(", "k", ",", "r", ".", "get", "(", "k", ")", ")", "for", "k", "in", "keys", "]", ")", "}", "else", ":", "log", ".", "warning", "(", "'Alias was not updated'", ")", "return", "{", "'updated'", ":", "False", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'created'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
update the named alias to the configuration .
train
true
12,238
def _docstring_from_node(node): CLIP_LENGTH = 2000 s = node.value s = s.lstrip('urb') if s.startswith('"""'): s = s[3:(-3)] elif s.startswith("'''"): s = s[3:(-3)] else: assert (s[0] in ('"', "'")) s = s[1:(-1)] return s[:CLIP_LENGTH]
[ "def", "_docstring_from_node", "(", "node", ")", ":", "CLIP_LENGTH", "=", "2000", "s", "=", "node", ".", "value", "s", "=", "s", ".", "lstrip", "(", "'urb'", ")", "if", "s", ".", "startswith", "(", "'\"\"\"'", ")", ":", "s", "=", "s", "[", "3", ":", "(", "-", "3", ")", "]", "elif", "s", ".", "startswith", "(", "\"'''\"", ")", ":", "s", "=", "s", "[", "3", ":", "(", "-", "3", ")", "]", "else", ":", "assert", "(", "s", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ")", "s", "=", "s", "[", "1", ":", "(", "-", "1", ")", "]", "return", "s", "[", ":", "CLIP_LENGTH", "]" ]
return the docstring content for the given docstring node .
train
false
12,239
def test_resize_icon_same(): resize_size = [339] final_size = [(339, 128)] _uploader(resize_size, final_size)
[ "def", "test_resize_icon_same", "(", ")", ":", "resize_size", "=", "[", "339", "]", "final_size", "=", "[", "(", "339", ",", "128", ")", "]", "_uploader", "(", "resize_size", ",", "final_size", ")" ]
image stays the same .
train
false
12,240
def SetSi(si): global _si _si = si
[ "def", "SetSi", "(", "si", ")", ":", "global", "_si", "_si", "=", "si" ]
set the saved service instance .
train
false
12,241
def cmd_log(cmd, cwd): output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT) log.debug('Command was: {0!r}. Working directory was: {1!r}'.format(' '.join(cmd), cwd)) log.debug('Command output was: {0!r}'.format(output)) return output
[ "def", "cmd_log", "(", "cmd", ",", "cwd", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "cwd", "=", "cwd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "log", ".", "debug", "(", "'Command was: {0!r}. Working directory was: {1!r}'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ",", "cwd", ")", ")", "log", ".", "debug", "(", "'Command output was: {0!r}'", ".", "format", "(", "output", ")", ")", "return", "output" ]
helper function to redirect stderr to stdout and log the command used along with the output .
train
false
12,242
def _ToImagesError(error, blob_key=None): error_map = {images_service_pb.ImagesServiceError.NOT_IMAGE: NotImageError, images_service_pb.ImagesServiceError.BAD_IMAGE_DATA: BadImageError, images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE: LargeImageError, images_service_pb.ImagesServiceError.INVALID_BLOB_KEY: InvalidBlobKeyError, images_service_pb.ImagesServiceError.ACCESS_DENIED: AccessDeniedError, images_service_pb.ImagesServiceError.OBJECT_NOT_FOUND: ObjectNotFoundError, images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR: TransformationError, images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA: BadRequestError} error_code = error.application_error if (error_code == images_service_pb.ImagesServiceError.INVALID_BLOB_KEY): return InvalidBlobKeyError(blob_key) desired_exc = error_map.get(error_code, Error) return desired_exc(error.error_detail)
[ "def", "_ToImagesError", "(", "error", ",", "blob_key", "=", "None", ")", ":", "error_map", "=", "{", "images_service_pb", ".", "ImagesServiceError", ".", "NOT_IMAGE", ":", "NotImageError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "BAD_IMAGE_DATA", ":", "BadImageError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "IMAGE_TOO_LARGE", ":", "LargeImageError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "INVALID_BLOB_KEY", ":", "InvalidBlobKeyError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "ACCESS_DENIED", ":", "AccessDeniedError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "OBJECT_NOT_FOUND", ":", "ObjectNotFoundError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "UNSPECIFIED_ERROR", ":", "TransformationError", ",", "images_service_pb", ".", "ImagesServiceError", ".", "BAD_TRANSFORM_DATA", ":", "BadRequestError", "}", "error_code", "=", "error", ".", "application_error", "if", "(", "error_code", "==", "images_service_pb", ".", "ImagesServiceError", ".", "INVALID_BLOB_KEY", ")", ":", "return", "InvalidBlobKeyError", "(", "blob_key", ")", "desired_exc", "=", "error_map", ".", "get", "(", "error_code", ",", "Error", ")", "return", "desired_exc", "(", "error", ".", "error_detail", ")" ]
translate an application error to an images error .
train
false
12,243
def qenum_key(base, value, add_base=False, klass=None): if (klass is None): klass = value.__class__ if (klass == int): raise TypeError("Can't guess enum class of an int!") try: idx = base.staticMetaObject.indexOfEnumerator(klass.__name__) ret = base.staticMetaObject.enumerator(idx).valueToKey(value) except AttributeError: ret = None if (ret is None): for (name, obj) in vars(base).items(): if (isinstance(obj, klass) and (obj == value)): ret = name break else: ret = '0x{:04x}'.format(int(value)) if (add_base and hasattr(base, '__name__')): return '.'.join([base.__name__, ret]) else: return ret
[ "def", "qenum_key", "(", "base", ",", "value", ",", "add_base", "=", "False", ",", "klass", "=", "None", ")", ":", "if", "(", "klass", "is", "None", ")", ":", "klass", "=", "value", ".", "__class__", "if", "(", "klass", "==", "int", ")", ":", "raise", "TypeError", "(", "\"Can't guess enum class of an int!\"", ")", "try", ":", "idx", "=", "base", ".", "staticMetaObject", ".", "indexOfEnumerator", "(", "klass", ".", "__name__", ")", "ret", "=", "base", ".", "staticMetaObject", ".", "enumerator", "(", "idx", ")", ".", "valueToKey", "(", "value", ")", "except", "AttributeError", ":", "ret", "=", "None", "if", "(", "ret", "is", "None", ")", ":", "for", "(", "name", ",", "obj", ")", "in", "vars", "(", "base", ")", ".", "items", "(", ")", ":", "if", "(", "isinstance", "(", "obj", ",", "klass", ")", "and", "(", "obj", "==", "value", ")", ")", ":", "ret", "=", "name", "break", "else", ":", "ret", "=", "'0x{:04x}'", ".", "format", "(", "int", "(", "value", ")", ")", "if", "(", "add_base", "and", "hasattr", "(", "base", ",", "'__name__'", ")", ")", ":", "return", "'.'", ".", "join", "(", "[", "base", ".", "__name__", ",", "ret", "]", ")", "else", ":", "return", "ret" ]
convert a qt enum value to its key as a string .
train
false
12,244
def dedent(text): margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if (margin is None): margin = indent elif indent.startswith(margin): pass elif margin.startswith(indent): margin = indent else: margin = '' break if (0 and margin): for line in text.split('\n'): assert ((not line) or line.startswith(margin)), ('line = %r, margin = %r' % (line, margin)) if margin: text = re.sub(('(?m)^' + margin), '', text) return text
[ "def", "dedent", "(", "text", ")", ":", "margin", "=", "None", "text", "=", "_whitespace_only_re", ".", "sub", "(", "''", ",", "text", ")", "indents", "=", "_leading_whitespace_re", ".", "findall", "(", "text", ")", "for", "indent", "in", "indents", ":", "if", "(", "margin", "is", "None", ")", ":", "margin", "=", "indent", "elif", "indent", ".", "startswith", "(", "margin", ")", ":", "pass", "elif", "margin", ".", "startswith", "(", "indent", ")", ":", "margin", "=", "indent", "else", ":", "margin", "=", "''", "break", "if", "(", "0", "and", "margin", ")", ":", "for", "line", "in", "text", ".", "split", "(", "'\\n'", ")", ":", "assert", "(", "(", "not", "line", ")", "or", "line", ".", "startswith", "(", "margin", ")", ")", ",", "(", "'line = %r, margin = %r'", "%", "(", "line", ",", "margin", ")", ")", "if", "margin", ":", "text", "=", "re", ".", "sub", "(", "(", "'(?m)^'", "+", "margin", ")", ",", "''", ",", "text", ")", "return", "text" ]
remove excess indentation from docstring *s* .
train
true
12,246
def _get_secrets(namespace, name, apiserver_url): url = '{0}/api/v1/namespaces/{1}/secrets/{2}'.format(apiserver_url, namespace, name) ret = http.query(url) if ret.get('body'): return json.loads(ret.get('body')) else: return None
[ "def", "_get_secrets", "(", "namespace", ",", "name", ",", "apiserver_url", ")", ":", "url", "=", "'{0}/api/v1/namespaces/{1}/secrets/{2}'", ".", "format", "(", "apiserver_url", ",", "namespace", ",", "name", ")", "ret", "=", "http", ".", "query", "(", "url", ")", "if", "ret", ".", "get", "(", "'body'", ")", ":", "return", "json", ".", "loads", "(", "ret", ".", "get", "(", "'body'", ")", ")", "else", ":", "return", "None" ]
get secrets of the namespace .
train
false
12,247
def is_event_loop_running_wx(app=None): ip = get_ipython() if (ip is not None): if (ip.active_eventloop and (ip.active_eventloop == 'wx')): return True if (app is None): app = get_app_wx() if hasattr(app, '_in_event_loop'): return app._in_event_loop else: return app.IsMainLoopRunning()
[ "def", "is_event_loop_running_wx", "(", "app", "=", "None", ")", ":", "ip", "=", "get_ipython", "(", ")", "if", "(", "ip", "is", "not", "None", ")", ":", "if", "(", "ip", ".", "active_eventloop", "and", "(", "ip", ".", "active_eventloop", "==", "'wx'", ")", ")", ":", "return", "True", "if", "(", "app", "is", "None", ")", ":", "app", "=", "get_app_wx", "(", ")", "if", "hasattr", "(", "app", ",", "'_in_event_loop'", ")", ":", "return", "app", ".", "_in_event_loop", "else", ":", "return", "app", ".", "IsMainLoopRunning", "(", ")" ]
is the wx event loop running .
train
false
12,248
def temporal_padding(x, padding=1): input_shape = x.shape output_shape = (input_shape[0], (input_shape[1] + (2 * padding)), input_shape[2]) output = T.zeros(output_shape) return T.set_subtensor(output[:, padding:(x.shape[1] + padding), :], x)
[ "def", "temporal_padding", "(", "x", ",", "padding", "=", "1", ")", ":", "input_shape", "=", "x", ".", "shape", "output_shape", "=", "(", "input_shape", "[", "0", "]", ",", "(", "input_shape", "[", "1", "]", "+", "(", "2", "*", "padding", ")", ")", ",", "input_shape", "[", "2", "]", ")", "output", "=", "T", ".", "zeros", "(", "output_shape", ")", "return", "T", ".", "set_subtensor", "(", "output", "[", ":", ",", "padding", ":", "(", "x", ".", "shape", "[", "1", "]", "+", "padding", ")", ",", ":", "]", ",", "x", ")" ]
pads the middle dimension of a 3d tensor with "padding" zeros left and right .
train
false
12,249
def _test_nested_change(case, outer_factory, inner_factory): inner_action = ControllableAction(result=succeed(None)) subchanges = [ControllableAction(result=succeed(None)), inner_factory(changes=[inner_action]), ControllableAction(result=succeed(None))] change = outer_factory(changes=subchanges) run_state_change(change, DEPLOYER, InMemoryStatePersister()) case.assertEqual((True, DEPLOYER), (inner_action.called, inner_action.deployer))
[ "def", "_test_nested_change", "(", "case", ",", "outer_factory", ",", "inner_factory", ")", ":", "inner_action", "=", "ControllableAction", "(", "result", "=", "succeed", "(", "None", ")", ")", "subchanges", "=", "[", "ControllableAction", "(", "result", "=", "succeed", "(", "None", ")", ")", ",", "inner_factory", "(", "changes", "=", "[", "inner_action", "]", ")", ",", "ControllableAction", "(", "result", "=", "succeed", "(", "None", ")", ")", "]", "change", "=", "outer_factory", "(", "changes", "=", "subchanges", ")", "run_state_change", "(", "change", ",", "DEPLOYER", ",", "InMemoryStatePersister", "(", ")", ")", "case", ".", "assertEqual", "(", "(", "True", ",", "DEPLOYER", ")", ",", "(", "inner_action", ".", "called", ",", "inner_action", ".", "deployer", ")", ")" ]
assert that ichangestate providers wrapped inside inner_factory wrapped inside outer_factory are run with the same deployer argument as is passed to run_state_change .
train
false
12,250
def random_rotation_matrix(rand=None): return quaternion_matrix(random_quaternion(rand))
[ "def", "random_rotation_matrix", "(", "rand", "=", "None", ")", ":", "return", "quaternion_matrix", "(", "random_quaternion", "(", "rand", ")", ")" ]
return uniform random rotation matrix .
train
false
12,251
def GetLogUrls(opener, api_host): (start_timestamp, end_timestamp) = _GetTimestamps() logging.info(('time range %s => %s' % (datetime.datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d %H:%M:%S'), datetime.datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d %H:%M:%S')))) request_dict = {'user_id': options.options.user_id, 'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp} if options.options.filter: request_dict['filter'] = options.options.filter response_dict = admin_api.ServiceRequest(opener, api_host, 'list_client_logs', request_dict) return response_dict['log_urls']
[ "def", "GetLogUrls", "(", "opener", ",", "api_host", ")", ":", "(", "start_timestamp", ",", "end_timestamp", ")", "=", "_GetTimestamps", "(", ")", "logging", ".", "info", "(", "(", "'time range %s => %s'", "%", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_timestamp", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", ",", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "end_timestamp", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", ")", ")", ")", "request_dict", "=", "{", "'user_id'", ":", "options", ".", "options", ".", "user_id", ",", "'start_timestamp'", ":", "start_timestamp", ",", "'end_timestamp'", ":", "end_timestamp", "}", "if", "options", ".", "options", ".", "filter", ":", "request_dict", "[", "'filter'", "]", "=", "options", ".", "options", ".", "filter", "response_dict", "=", "admin_api", ".", "ServiceRequest", "(", "opener", ",", "api_host", ",", "'list_client_logs'", ",", "request_dict", ")", "return", "response_dict", "[", "'log_urls'", "]" ]
calls into the admin api to get log urls for the user .
train
false
12,252
def is_orm_value(obj): return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, sqlalchemy.sql.expression.ColumnElement))
[ "def", "is_orm_value", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "sqlalchemy", ".", "orm", ".", "attributes", ".", "InstrumentedAttribute", ",", "sqlalchemy", ".", "sql", ".", "expression", ".", "ColumnElement", ")", ")" ]
check if object is an orm field .
train
false
12,253
def _loc_to_coil_trans(loc): loc = loc.astype(np.float64) coil_trans = np.concatenate([loc.reshape(4, 3).T[:, [1, 2, 3, 0]], np.array([0, 0, 0, 1]).reshape(1, 4)]) return coil_trans
[ "def", "_loc_to_coil_trans", "(", "loc", ")", ":", "loc", "=", "loc", ".", "astype", "(", "np", ".", "float64", ")", "coil_trans", "=", "np", ".", "concatenate", "(", "[", "loc", ".", "reshape", "(", "4", ",", "3", ")", ".", "T", "[", ":", ",", "[", "1", ",", "2", ",", "3", ",", "0", "]", "]", ",", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", ",", "1", "]", ")", ".", "reshape", "(", "1", ",", "4", ")", "]", ")", "return", "coil_trans" ]
convert loc vector to coil_trans .
train
false
12,254
def _set_msg_type(msg_type): def _set_cls_msg_type(cls): cls.cls_msg_type = msg_type return cls return _set_cls_msg_type
[ "def", "_set_msg_type", "(", "msg_type", ")", ":", "def", "_set_cls_msg_type", "(", "cls", ")", ":", "cls", ".", "cls_msg_type", "=", "msg_type", "return", "cls", "return", "_set_cls_msg_type" ]
annotate corresponding ofp message type .
train
false
12,255
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): if (not issubclass(type(hostvars), dict)): raise errors.AnsibleFilterError('|failed expects hostvars is a dict') if ((persistent_volume_claims is not None) and (not issubclass(type(persistent_volume_claims), list))): raise errors.AnsibleFilterError('|failed expects persistent_volume_claims is a list') if (persistent_volume_claims is None): persistent_volume_claims = [] if ('hosted' in hostvars['openshift']): for component in hostvars['openshift']['hosted']: if ('storage' in hostvars['openshift']['hosted'][component]): params = hostvars['openshift']['hosted'][component]['storage'] kind = params['kind'] create_pv = params['create_pv'] create_pvc = params['create_pvc'] if ((kind not in [None, 'object']) and create_pv and create_pvc): volume = params['volume']['name'] size = params['volume']['size'] access_modes = params['access']['modes'] persistent_volume_claim = dict(name='{0}-claim'.format(volume), capacity=size, access_modes=access_modes) persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims
[ "def", "oo_persistent_volume_claims", "(", "hostvars", ",", "persistent_volume_claims", "=", "None", ")", ":", "if", "(", "not", "issubclass", "(", "type", "(", "hostvars", ")", ",", "dict", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects hostvars is a dict'", ")", "if", "(", "(", "persistent_volume_claims", "is", "not", "None", ")", "and", "(", "not", "issubclass", "(", "type", "(", "persistent_volume_claims", ")", ",", "list", ")", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects persistent_volume_claims is a list'", ")", "if", "(", "persistent_volume_claims", "is", "None", ")", ":", "persistent_volume_claims", "=", "[", "]", "if", "(", "'hosted'", "in", "hostvars", "[", "'openshift'", "]", ")", ":", "for", "component", "in", "hostvars", "[", "'openshift'", "]", "[", "'hosted'", "]", ":", "if", "(", "'storage'", "in", "hostvars", "[", "'openshift'", "]", "[", "'hosted'", "]", "[", "component", "]", ")", ":", "params", "=", "hostvars", "[", "'openshift'", "]", "[", "'hosted'", "]", "[", "component", "]", "[", "'storage'", "]", "kind", "=", "params", "[", "'kind'", "]", "create_pv", "=", "params", "[", "'create_pv'", "]", "create_pvc", "=", "params", "[", "'create_pvc'", "]", "if", "(", "(", "kind", "not", "in", "[", "None", ",", "'object'", "]", ")", "and", "create_pv", "and", "create_pvc", ")", ":", "volume", "=", "params", "[", "'volume'", "]", "[", "'name'", "]", "size", "=", "params", "[", "'volume'", "]", "[", "'size'", "]", "access_modes", "=", "params", "[", "'access'", "]", "[", "'modes'", "]", "persistent_volume_claim", "=", "dict", "(", "name", "=", "'{0}-claim'", ".", "format", "(", "volume", ")", ",", "capacity", "=", "size", ",", "access_modes", "=", "access_modes", ")", "persistent_volume_claims", ".", "append", "(", "persistent_volume_claim", ")", "return", "persistent_volume_claims" ]
generate list of persistent volume claims based on oo_openshift_env storage options set in host variables .
train
false
12,256
def common_exceptions_400(func): def wrapped(request, *args, **kwargs): use_json = (request.is_ajax() or request.META.get('HTTP_ACCEPT', '').startswith('application/json')) try: return func(request, *args, **kwargs) except User.DoesNotExist: message = _('User does not exist.') if use_json: return JsonResponse({'error': message}, 400) else: return HttpResponseBadRequest(message) except AlreadyRunningError: message = _('Task is already running.') if use_json: return JsonResponse({'error': message}, 400) else: return HttpResponseBadRequest(message) return wrapped
[ "def", "common_exceptions_400", "(", "func", ")", ":", "def", "wrapped", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "use_json", "=", "(", "request", ".", "is_ajax", "(", ")", "or", "request", ".", "META", ".", "get", "(", "'HTTP_ACCEPT'", ",", "''", ")", ".", "startswith", "(", "'application/json'", ")", ")", "try", ":", "return", "func", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", "except", "User", ".", "DoesNotExist", ":", "message", "=", "_", "(", "'User does not exist.'", ")", "if", "use_json", ":", "return", "JsonResponse", "(", "{", "'error'", ":", "message", "}", ",", "400", ")", "else", ":", "return", "HttpResponseBadRequest", "(", "message", ")", "except", "AlreadyRunningError", ":", "message", "=", "_", "(", "'Task is already running.'", ")", "if", "use_json", ":", "return", "JsonResponse", "(", "{", "'error'", ":", "message", "}", ",", "400", ")", "else", ":", "return", "HttpResponseBadRequest", "(", "message", ")", "return", "wrapped" ]
catches common exceptions and renders matching 400 errors .
train
false
12,257
def scans_for_fname(fname): if isinstance(fname, list): scans = np.zeros((len(fname),), dtype=object) for (sno, f) in enumerate(fname): scans[sno] = (u'%s,1' % f) return scans img = load(fname) if (len(img.shape) == 3): return np.array(((u'%s,1' % fname),), dtype=object) else: n_scans = img.shape[3] scans = np.zeros((n_scans,), dtype=object) for sno in range(n_scans): scans[sno] = (u'%s,%d' % (fname, (sno + 1))) return scans
[ "def", "scans_for_fname", "(", "fname", ")", ":", "if", "isinstance", "(", "fname", ",", "list", ")", ":", "scans", "=", "np", ".", "zeros", "(", "(", "len", "(", "fname", ")", ",", ")", ",", "dtype", "=", "object", ")", "for", "(", "sno", ",", "f", ")", "in", "enumerate", "(", "fname", ")", ":", "scans", "[", "sno", "]", "=", "(", "u'%s,1'", "%", "f", ")", "return", "scans", "img", "=", "load", "(", "fname", ")", "if", "(", "len", "(", "img", ".", "shape", ")", "==", "3", ")", ":", "return", "np", ".", "array", "(", "(", "(", "u'%s,1'", "%", "fname", ")", ",", ")", ",", "dtype", "=", "object", ")", "else", ":", "n_scans", "=", "img", ".", "shape", "[", "3", "]", "scans", "=", "np", ".", "zeros", "(", "(", "n_scans", ",", ")", ",", "dtype", "=", "object", ")", "for", "sno", "in", "range", "(", "n_scans", ")", ":", "scans", "[", "sno", "]", "=", "(", "u'%s,%d'", "%", "(", "fname", ",", "(", "sno", "+", "1", ")", ")", ")", "return", "scans" ]
reads a nifti file and converts it to a numpy array storing individual nifti volumes .
train
false
12,258
def _load_arg_defaults(kwargs): if current_app: kwargs.setdefault('cls', current_app.json_decoder) else: kwargs.setdefault('cls', JSONDecoder)
[ "def", "_load_arg_defaults", "(", "kwargs", ")", ":", "if", "current_app", ":", "kwargs", ".", "setdefault", "(", "'cls'", ",", "current_app", ".", "json_decoder", ")", "else", ":", "kwargs", ".", "setdefault", "(", "'cls'", ",", "JSONDecoder", ")" ]
inject default arguments for load functions .
train
true
12,259
def is_mysql_running(): with open(os.devnull, 'w') as os_devnull: returncode = subprocess.call('pgrep mysqld', stdout=os_devnull, shell=True) return (returncode == 0)
[ "def", "is_mysql_running", "(", ")", ":", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "os_devnull", ":", "returncode", "=", "subprocess", ".", "call", "(", "'pgrep mysqld'", ",", "stdout", "=", "os_devnull", ",", "shell", "=", "True", ")", "return", "(", "returncode", "==", "0", ")" ]
returns true if mysql is running .
train
false
12,260
def setting(name, default=None): return getattr(settings, name, default)
[ "def", "setting", "(", "name", ",", "default", "=", "None", ")", ":", "return", "getattr", "(", "settings", ",", "name", ",", "default", ")" ]
helper function to get a django setting by name .
train
false
12,261
def xisabs(filename): if filename.startswith('/'): return True elif filename.startswith('\\'): return True elif re.match('\\w:[\\\\/]', filename): return True return False
[ "def", "xisabs", "(", "filename", ")", ":", "if", "filename", ".", "startswith", "(", "'/'", ")", ":", "return", "True", "elif", "filename", ".", "startswith", "(", "'\\\\'", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "'\\\\w:[\\\\\\\\/]'", ",", "filename", ")", ":", "return", "True", "return", "False" ]
cross-platform version of os .
train
false
12,264
def test_ee_init(): ratio = 1.0 ee = EasyEnsemble(ratio=ratio, random_state=RND_SEED) assert_equal(ee.ratio, ratio) assert_equal(ee.replacement, False) assert_equal(ee.n_subsets, 10) assert_equal(ee.random_state, RND_SEED)
[ "def", "test_ee_init", "(", ")", ":", "ratio", "=", "1.0", "ee", "=", "EasyEnsemble", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_equal", "(", "ee", ".", "ratio", ",", "ratio", ")", "assert_equal", "(", "ee", ".", "replacement", ",", "False", ")", "assert_equal", "(", "ee", ".", "n_subsets", ",", "10", ")", "assert_equal", "(", "ee", ".", "random_state", ",", "RND_SEED", ")" ]
test the initialisation of the object .
train
false
12,265
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False): if (not force_upload): try: md5 = hashlib.md5(file_object.read()).hexdigest() return track_from_md5(md5) except util.EchoNestAPIError: pass file_object.seek(0) return _track_from_data(file_object.read(), filetype, timeout)
[ "def", "track_from_file", "(", "file_object", ",", "filetype", ",", "timeout", "=", "DEFAULT_ASYNC_TIMEOUT", ",", "force_upload", "=", "False", ")", ":", "if", "(", "not", "force_upload", ")", ":", "try", ":", "md5", "=", "hashlib", ".", "md5", "(", "file_object", ".", "read", "(", ")", ")", ".", "hexdigest", "(", ")", "return", "track_from_md5", "(", "md5", ")", "except", "util", ".", "EchoNestAPIError", ":", "pass", "file_object", ".", "seek", "(", "0", ")", "return", "_track_from_data", "(", "file_object", ".", "read", "(", ")", ",", "filetype", ",", "timeout", ")" ]
create a track object from a file-like object .
train
true
12,267
def decipher_vigenere(msg, key, symbols=None): (msg, key, A) = _prep(msg, key, symbols) map = {c: i for (i, c) in enumerate(A)} N = len(A) K = [map[c] for c in key] n = len(K) C = [map[c] for c in msg] rv = ''.join([A[(((- K[(i % n)]) + c) % N)] for (i, c) in enumerate(C)]) return rv
[ "def", "decipher_vigenere", "(", "msg", ",", "key", ",", "symbols", "=", "None", ")", ":", "(", "msg", ",", "key", ",", "A", ")", "=", "_prep", "(", "msg", ",", "key", ",", "symbols", ")", "map", "=", "{", "c", ":", "i", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "A", ")", "}", "N", "=", "len", "(", "A", ")", "K", "=", "[", "map", "[", "c", "]", "for", "c", "in", "key", "]", "n", "=", "len", "(", "K", ")", "C", "=", "[", "map", "[", "c", "]", "for", "c", "in", "msg", "]", "rv", "=", "''", ".", "join", "(", "[", "A", "[", "(", "(", "(", "-", "K", "[", "(", "i", "%", "n", ")", "]", ")", "+", "c", ")", "%", "N", ")", "]", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "C", ")", "]", ")", "return", "rv" ]
decode using the vigenère cipher .
train
false
12,268
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
12,270
def update_conn_info(conn_info, connector, lookup_service): init_targ_map = build_initiator_target_map(connector, conn_info['data']['target_wwn'], lookup_service) if init_targ_map: conn_info['data']['initiator_target_map'] = init_targ_map
[ "def", "update_conn_info", "(", "conn_info", ",", "connector", ",", "lookup_service", ")", ":", "init_targ_map", "=", "build_initiator_target_map", "(", "connector", ",", "conn_info", "[", "'data'", "]", "[", "'target_wwn'", "]", ",", "lookup_service", ")", "if", "init_targ_map", ":", "conn_info", "[", "'data'", "]", "[", "'initiator_target_map'", "]", "=", "init_targ_map" ]
set wwn mapping list to the connection info .
train
false
12,272
@receiver(post_save, sender=Release) def update_download_supernav(sender, instance, **kwargs): if kwargs.get('raw', False): return if instance.is_published: update_supernav() update_homepage_download_box()
[ "@", "receiver", "(", "post_save", ",", "sender", "=", "Release", ")", "def", "update_download_supernav", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'raw'", ",", "False", ")", ":", "return", "if", "instance", ".", "is_published", ":", "update_supernav", "(", ")", "update_homepage_download_box", "(", ")" ]
update download supernav .
train
false
12,274
def download_book_crossings(target_dir): archive_path = os.path.join(target_dir, ARCHIVE_NAME) if (not os.path.exists(target_dir)): os.makedirs(target_dir) if (not os.path.exists(archive_path)): logger.warn('Downloading dataset from %s (77 MB)', URL) opener = urllib.urlopen(URL) open(archive_path, 'wb').write(opener.read()) logger.info('Decompressing %s', archive_path) source_zip = zipfile.ZipFile(archive_path, 'r') archives = [] for name in source_zip.namelist(): if (name.find('.csv') != (-1)): source_zip.extract(name, target_dir) archives.append(name) source_zip.close() os.remove(archive_path) return archives
[ "def", "download_book_crossings", "(", "target_dir", ")", ":", "archive_path", "=", "os", ".", "path", ".", "join", "(", "target_dir", ",", "ARCHIVE_NAME", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "target_dir", ")", ")", ":", "os", ".", "makedirs", "(", "target_dir", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "archive_path", ")", ")", ":", "logger", ".", "warn", "(", "'Downloading dataset from %s (77 MB)'", ",", "URL", ")", "opener", "=", "urllib", ".", "urlopen", "(", "URL", ")", "open", "(", "archive_path", ",", "'wb'", ")", ".", "write", "(", "opener", ".", "read", "(", ")", ")", "logger", ".", "info", "(", "'Decompressing %s'", ",", "archive_path", ")", "source_zip", "=", "zipfile", ".", "ZipFile", "(", "archive_path", ",", "'r'", ")", "archives", "=", "[", "]", "for", "name", "in", "source_zip", ".", "namelist", "(", ")", ":", "if", "(", "name", ".", "find", "(", "'.csv'", ")", "!=", "(", "-", "1", ")", ")", ":", "source_zip", ".", "extract", "(", "name", ",", "target_dir", ")", "archives", ".", "append", "(", "name", ")", "source_zip", ".", "close", "(", ")", "os", ".", "remove", "(", "archive_path", ")", "return", "archives" ]
download the book-crossing data and unzip it .
train
false
12,275
def reveal(file): finder = _getfinder() fsr = Carbon.File.FSRef(file) file_alias = fsr.FSNewAliasMinimal() return finder.reveal(file_alias)
[ "def", "reveal", "(", "file", ")", ":", "finder", "=", "_getfinder", "(", ")", "fsr", "=", "Carbon", ".", "File", ".", "FSRef", "(", "file", ")", "file_alias", "=", "fsr", ".", "FSNewAliasMinimal", "(", ")", "return", "finder", ".", "reveal", "(", "file_alias", ")" ]
reveal a file in the finder .
train
false
12,276
def enforce(context, action, target): init() match_list = (('rule:%s' % action),) credentials = context.to_dict() policy.enforce(match_list, target, credentials, exception.PolicyNotAuthorized, action=action)
[ "def", "enforce", "(", "context", ",", "action", ",", "target", ")", ":", "init", "(", ")", "match_list", "=", "(", "(", "'rule:%s'", "%", "action", ")", ",", ")", "credentials", "=", "context", ".", "to_dict", "(", ")", "policy", ".", "enforce", "(", "match_list", ",", "target", ",", "credentials", ",", "exception", ".", "PolicyNotAuthorized", ",", "action", "=", "action", ")" ]
verifies that the action is valid on the target in this context .
train
false
12,277
def disabled_account(request, username, template_name, extra_context=None): user = get_object_or_404(get_user_model(), username__iexact=username) if user.is_active: raise Http404 if (not extra_context): extra_context = dict() extra_context['viewed_user'] = user extra_context['profile'] = get_user_profile(user=user) return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
[ "def", "disabled_account", "(", "request", ",", "username", ",", "template_name", ",", "extra_context", "=", "None", ")", ":", "user", "=", "get_object_or_404", "(", "get_user_model", "(", ")", ",", "username__iexact", "=", "username", ")", "if", "user", ".", "is_active", ":", "raise", "Http404", "if", "(", "not", "extra_context", ")", ":", "extra_context", "=", "dict", "(", ")", "extra_context", "[", "'viewed_user'", "]", "=", "user", "extra_context", "[", "'profile'", "]", "=", "get_user_profile", "(", "user", "=", "user", ")", "return", "ExtraContextTemplateView", ".", "as_view", "(", "template_name", "=", "template_name", ",", "extra_context", "=", "extra_context", ")", "(", "request", ")" ]
checks if the account is disabled .
train
true
12,278
def revert_ccx_staff_to_coaches(apps, schema_editor): CustomCourseForEdX = apps.get_model(u'ccx', u'CustomCourseForEdX') db_alias = schema_editor.connection.alias if (not (db_alias == u'default')): return list_ccx = CustomCourseForEdX.objects.using(db_alias).all() for ccx in list_ccx: ccx_locator = CCXLocator.from_course_locator(ccx.course_id, unicode(ccx.id)) try: course = get_course_by_id(ccx_locator) except Http404: log.error(u'Could not migrate access for CCX course: %s', unicode(ccx_locator)) else: coach = User.objects.get(id=ccx.coach.id) allow_access(course, coach, u'ccx_coach', send_email=False) revoke_access(course, coach, u'staff', send_email=False) log.info(u'The CCX coach of CCX %s has been switched from "Staff" to "CCX Coach".', unicode(ccx_locator))
[ "def", "revert_ccx_staff_to_coaches", "(", "apps", ",", "schema_editor", ")", ":", "CustomCourseForEdX", "=", "apps", ".", "get_model", "(", "u'ccx'", ",", "u'CustomCourseForEdX'", ")", "db_alias", "=", "schema_editor", ".", "connection", ".", "alias", "if", "(", "not", "(", "db_alias", "==", "u'default'", ")", ")", ":", "return", "list_ccx", "=", "CustomCourseForEdX", ".", "objects", ".", "using", "(", "db_alias", ")", ".", "all", "(", ")", "for", "ccx", "in", "list_ccx", ":", "ccx_locator", "=", "CCXLocator", ".", "from_course_locator", "(", "ccx", ".", "course_id", ",", "unicode", "(", "ccx", ".", "id", ")", ")", "try", ":", "course", "=", "get_course_by_id", "(", "ccx_locator", ")", "except", "Http404", ":", "log", ".", "error", "(", "u'Could not migrate access for CCX course: %s'", ",", "unicode", "(", "ccx_locator", ")", ")", "else", ":", "coach", "=", "User", ".", "objects", ".", "get", "(", "id", "=", "ccx", ".", "coach", ".", "id", ")", "allow_access", "(", "course", ",", "coach", ",", "u'ccx_coach'", ",", "send_email", "=", "False", ")", "revoke_access", "(", "course", ",", "coach", ",", "u'staff'", ",", "send_email", "=", "False", ")", "log", ".", "info", "(", "u'The CCX coach of CCX %s has been switched from \"Staff\" to \"CCX Coach\".'", ",", "unicode", "(", "ccx_locator", ")", ")" ]
modify all staff on ccx courses so that they no longer have the staff role on the course that they coach .
train
false
12,279
def test_jpeg(h, f): if (h[6:10] == 'JFIF'): return 'jpeg'
[ "def", "test_jpeg", "(", "h", ",", "f", ")", ":", "if", "(", "h", "[", "6", ":", "10", "]", "==", "'JFIF'", ")", ":", "return", "'jpeg'" ]
jpeg data in jfif format .
train
false
12,280
@contextlib.contextmanager def batch_normalization(*bricks): from blocks.bricks import BatchNormalization bn = find_bricks(bricks, (lambda b: isinstance(b, BatchNormalization))) try: for brick in bn: brick.__enter__() (yield) finally: for brick in bn[::(-1)]: brick.__exit__()
[ "@", "contextlib", ".", "contextmanager", "def", "batch_normalization", "(", "*", "bricks", ")", ":", "from", "blocks", ".", "bricks", "import", "BatchNormalization", "bn", "=", "find_bricks", "(", "bricks", ",", "(", "lambda", "b", ":", "isinstance", "(", "b", ",", "BatchNormalization", ")", ")", ")", "try", ":", "for", "brick", "in", "bn", ":", "brick", ".", "__enter__", "(", ")", "(", "yield", ")", "finally", ":", "for", "brick", "in", "bn", "[", ":", ":", "(", "-", "1", ")", "]", ":", "brick", ".", "__exit__", "(", ")" ]
apply batch normalization on x given mean .
train
false
12,281
def _get_n_epochs(epochs, n): epochs_out = [] for e in epochs: if (not isinstance(e, (list, tuple))): e = (e,) epochs_out.append(e) if (len(epochs_out) >= n): (yield epochs_out) epochs_out = [] (yield epochs_out)
[ "def", "_get_n_epochs", "(", "epochs", ",", "n", ")", ":", "epochs_out", "=", "[", "]", "for", "e", "in", "epochs", ":", "if", "(", "not", "isinstance", "(", "e", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "e", "=", "(", "e", ",", ")", "epochs_out", ".", "append", "(", "e", ")", "if", "(", "len", "(", "epochs_out", ")", ">=", "n", ")", ":", "(", "yield", "epochs_out", ")", "epochs_out", "=", "[", "]", "(", "yield", "epochs_out", ")" ]
generator that returns lists with at most n epochs .
train
false
12,282
def percentiles_to_weights(qs, vals, length): if (length == 0): return () diff = np.ediff1d(qs, 0.0, 0.0) weights = ((0.5 * length) * (diff[1:] + diff[:(-1)])) return (vals.tolist(), weights.tolist())
[ "def", "percentiles_to_weights", "(", "qs", ",", "vals", ",", "length", ")", ":", "if", "(", "length", "==", "0", ")", ":", "return", "(", ")", "diff", "=", "np", ".", "ediff1d", "(", "qs", ",", "0.0", ",", "0.0", ")", "weights", "=", "(", "(", "0.5", "*", "length", ")", "*", "(", "diff", "[", "1", ":", "]", "+", "diff", "[", ":", "(", "-", "1", ")", "]", ")", ")", "return", "(", "vals", ".", "tolist", "(", ")", ",", "weights", ".", "tolist", "(", ")", ")" ]
weigh percentile values by length and the difference between percentiles .
train
false
12,283
def new_node(category, title, user, description='', parent=None): Node = apps.get_model('osf.Node') category = category title = strip_html(title.strip()) if description: description = strip_html(description.strip()) node = Node(title=title, category=category, creator=user, description=description, parent=parent) node.save() return node
[ "def", "new_node", "(", "category", ",", "title", ",", "user", ",", "description", "=", "''", ",", "parent", "=", "None", ")", ":", "Node", "=", "apps", ".", "get_model", "(", "'osf.Node'", ")", "category", "=", "category", "title", "=", "strip_html", "(", "title", ".", "strip", "(", ")", ")", "if", "description", ":", "description", "=", "strip_html", "(", "description", ".", "strip", "(", ")", ")", "node", "=", "Node", "(", "title", "=", "title", ",", "category", "=", "category", ",", "creator", "=", "user", ",", "description", "=", "description", ",", "parent", "=", "parent", ")", "node", ".", "save", "(", ")", "return", "node" ]
create a new project or component .
train
false
12,284
def fixParam(line): result = re.sub('(\\w+):', '@param \\1', line) result = re.sub(' @', '@', result) return result
[ "def", "fixParam", "(", "line", ")", ":", "result", "=", "re", ".", "sub", "(", "'(\\\\w+):'", ",", "'@param \\\\1'", ",", "line", ")", "result", "=", "re", ".", "sub", "(", "' @'", ",", "'@'", ",", "result", ")", "return", "result" ]
change foo: bar to @foo bar .
train
false
12,286
def example_number_for_non_geo_entity(country_calling_code): metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None) if (metadata is not None): desc = metadata.general_desc try: if (desc.example_number is not None): return parse(((_PLUS_SIGN + unicod(country_calling_code)) + desc.example_number), UNKNOWN_REGION) except NumberParseException: pass return None
[ "def", "example_number_for_non_geo_entity", "(", "country_calling_code", ")", ":", "metadata", "=", "PhoneMetadata", ".", "metadata_for_nongeo_region", "(", "country_calling_code", ",", "None", ")", "if", "(", "metadata", "is", "not", "None", ")", ":", "desc", "=", "metadata", ".", "general_desc", "try", ":", "if", "(", "desc", ".", "example_number", "is", "not", "None", ")", ":", "return", "parse", "(", "(", "(", "_PLUS_SIGN", "+", "unicod", "(", "country_calling_code", ")", ")", "+", "desc", ".", "example_number", ")", ",", "UNKNOWN_REGION", ")", "except", "NumberParseException", ":", "pass", "return", "None" ]
gets a valid number for the specified country calling code for a non-geographical entity .
train
false
12,287
def im_proposals(net, im): blobs = {} (blobs['data'], blobs['im_info']) = _get_image_blob(im) net.blobs['data'].reshape(*blobs['data'].shape) net.blobs['im_info'].reshape(*blobs['im_info'].shape) blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False), im_info=blobs['im_info'].astype(np.float32, copy=False)) scale = blobs['im_info'][(0, 2)] boxes = (blobs_out['rois'][:, 1:].copy() / scale) scores = blobs_out['scores'].copy() return (boxes, scores)
[ "def", "im_proposals", "(", "net", ",", "im", ")", ":", "blobs", "=", "{", "}", "(", "blobs", "[", "'data'", "]", ",", "blobs", "[", "'im_info'", "]", ")", "=", "_get_image_blob", "(", "im", ")", "net", ".", "blobs", "[", "'data'", "]", ".", "reshape", "(", "*", "blobs", "[", "'data'", "]", ".", "shape", ")", "net", ".", "blobs", "[", "'im_info'", "]", ".", "reshape", "(", "*", "blobs", "[", "'im_info'", "]", ".", "shape", ")", "blobs_out", "=", "net", ".", "forward", "(", "data", "=", "blobs", "[", "'data'", "]", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", ",", "im_info", "=", "blobs", "[", "'im_info'", "]", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", ")", "scale", "=", "blobs", "[", "'im_info'", "]", "[", "(", "0", ",", "2", ")", "]", "boxes", "=", "(", "blobs_out", "[", "'rois'", "]", "[", ":", ",", "1", ":", "]", ".", "copy", "(", ")", "/", "scale", ")", "scores", "=", "blobs_out", "[", "'scores'", "]", ".", "copy", "(", ")", "return", "(", "boxes", ",", "scores", ")" ]
generate rpn proposals on a single image .
train
false
12,288
def db_drop(): logger.info('dropping capublic...') try: connection = MySQLdb.connect(user=MYSQL_USER, passwd=MYSQL_PASSWORD, db='capublic') except _mysql_exceptions.OperationalError: logger.info('...no such database. Bailing.') return connection.autocommit(True) cursor = connection.cursor() cursor.execute('DROP DATABASE IF EXISTS capublic;') connection.close() logger.info('...done.')
[ "def", "db_drop", "(", ")", ":", "logger", ".", "info", "(", "'dropping capublic...'", ")", "try", ":", "connection", "=", "MySQLdb", ".", "connect", "(", "user", "=", "MYSQL_USER", ",", "passwd", "=", "MYSQL_PASSWORD", ",", "db", "=", "'capublic'", ")", "except", "_mysql_exceptions", ".", "OperationalError", ":", "logger", ".", "info", "(", "'...no such database. Bailing.'", ")", "return", "connection", ".", "autocommit", "(", "True", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'DROP DATABASE IF EXISTS capublic;'", ")", "connection", ".", "close", "(", ")", "logger", ".", "info", "(", "'...done.'", ")" ]
drop the old database .
train
false
12,289
def get_old_style_versioned_asset_url(asset_path): try: locator = StaticContent.get_location_from_path(asset_path) content = AssetManager.find(locator, as_stream=True) return u'{}/{}{}'.format(VERSIONED_ASSETS_PREFIX, content.content_digest, asset_path) except (InvalidKeyError, ItemNotFoundError): pass return asset_path
[ "def", "get_old_style_versioned_asset_url", "(", "asset_path", ")", ":", "try", ":", "locator", "=", "StaticContent", ".", "get_location_from_path", "(", "asset_path", ")", "content", "=", "AssetManager", ".", "find", "(", "locator", ",", "as_stream", "=", "True", ")", "return", "u'{}/{}{}'", ".", "format", "(", "VERSIONED_ASSETS_PREFIX", ",", "content", ".", "content_digest", ",", "asset_path", ")", "except", "(", "InvalidKeyError", ",", "ItemNotFoundError", ")", ":", "pass", "return", "asset_path" ]
creates an old-style versioned asset url .
train
false
12,292
def WinChmod(filename, acl_list, user=None): if (user is None): user = win32api.GetUserName() if (not os.path.exists(filename)): raise RuntimeError(('filename %s does not exist' % filename)) acl_bitmask = 0 for acl in acl_list: acl_bitmask |= getattr(ntsecuritycon, acl) dacl = win32security.ACL() (win_user, _, _) = win32security.LookupAccountName('', user) dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user) security_descriptor = win32security.GetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION) security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl, DACL_DEFAULT) win32security.SetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor)
[ "def", "WinChmod", "(", "filename", ",", "acl_list", ",", "user", "=", "None", ")", ":", "if", "(", "user", "is", "None", ")", ":", "user", "=", "win32api", ".", "GetUserName", "(", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ")", ":", "raise", "RuntimeError", "(", "(", "'filename %s does not exist'", "%", "filename", ")", ")", "acl_bitmask", "=", "0", "for", "acl", "in", "acl_list", ":", "acl_bitmask", "|=", "getattr", "(", "ntsecuritycon", ",", "acl", ")", "dacl", "=", "win32security", ".", "ACL", "(", ")", "(", "win_user", ",", "_", ",", "_", ")", "=", "win32security", ".", "LookupAccountName", "(", "''", ",", "user", ")", "dacl", ".", "AddAccessAllowedAce", "(", "win32security", ".", "ACL_REVISION", ",", "acl_bitmask", ",", "win_user", ")", "security_descriptor", "=", "win32security", ".", "GetFileSecurity", "(", "filename", ",", "win32security", ".", "DACL_SECURITY_INFORMATION", ")", "security_descriptor", ".", "SetSecurityDescriptorDacl", "(", "DACL_PRESENT", ",", "dacl", ",", "DACL_DEFAULT", ")", "win32security", ".", "SetFileSecurity", "(", "filename", ",", "win32security", ".", "DACL_SECURITY_INFORMATION", ",", "security_descriptor", ")" ]
provide chmod-like functionality for windows .
train
true
12,293
def make_pipeline(*steps): return Pipeline(_name_estimators(steps))
[ "def", "make_pipeline", "(", "*", "steps", ")", ":", "return", "Pipeline", "(", "_name_estimators", "(", "steps", ")", ")" ]
construct a pipeline from the given estimators .
train
false
12,294
def splitdoc(doc): lines = split(strip(doc), '\n') if (len(lines) == 1): return (lines[0], '') elif ((len(lines) >= 2) and (not rstrip(lines[1]))): return (lines[0], join(lines[2:], '\n')) return ('', join(lines, '\n'))
[ "def", "splitdoc", "(", "doc", ")", ":", "lines", "=", "split", "(", "strip", "(", "doc", ")", ",", "'\\n'", ")", "if", "(", "len", "(", "lines", ")", "==", "1", ")", ":", "return", "(", "lines", "[", "0", "]", ",", "''", ")", "elif", "(", "(", "len", "(", "lines", ")", ">=", "2", ")", "and", "(", "not", "rstrip", "(", "lines", "[", "1", "]", ")", ")", ")", ":", "return", "(", "lines", "[", "0", "]", ",", "join", "(", "lines", "[", "2", ":", "]", ",", "'\\n'", ")", ")", "return", "(", "''", ",", "join", "(", "lines", ",", "'\\n'", ")", ")" ]
split a doc string into a synopsis line and the rest .
train
false
12,295
def conditional(sentence, predictive=True, **kwargs): S = sentence if (not (hasattr(S, 'words') and hasattr(S, 'parse_token'))): raise TypeError(('%s object is not a parsed Sentence' % repr(S.__class__.__name__))) if question(S): return False i = find((lambda w: (s(w) == 'were')), S) i = ((i and i.index) or 0) if ((i > 0) and ((s(S[(i - 1)]) in ('i', 'it', 'he', 'she')) or (S[(i - 1)].type == 'NN'))): return False for (i, w) in enumerate(S): if (w.type == 'MD'): if ((s(w) == 'ought') and (i < len(S)) and (s(S[(i + 1)]) == 'to')): return True if (s(w) in ('would', 'should', "'d", 'could', 'might')): return True if ((s(w) in ('will', 'shall', "'ll")) and (i > 0) and (s(S[(i - 1)]) == 'you') and (not verbs(S, 0, i))): return False if ((s(w) in ('will', 'shall', "'ll")) and predictive): return True if (s(w) in ('will', 'shall', "'ll", 'can', 'may')): r = s(S).rstrip(' .!') for cc in ('if', 'when', 'once', 'as soon as', 'assuming', 'provided that', 'given that'): if ((cc + ' ') in r): return True return False
[ "def", "conditional", "(", "sentence", ",", "predictive", "=", "True", ",", "**", "kwargs", ")", ":", "S", "=", "sentence", "if", "(", "not", "(", "hasattr", "(", "S", ",", "'words'", ")", "and", "hasattr", "(", "S", ",", "'parse_token'", ")", ")", ")", ":", "raise", "TypeError", "(", "(", "'%s object is not a parsed Sentence'", "%", "repr", "(", "S", ".", "__class__", ".", "__name__", ")", ")", ")", "if", "question", "(", "S", ")", ":", "return", "False", "i", "=", "find", "(", "(", "lambda", "w", ":", "(", "s", "(", "w", ")", "==", "'were'", ")", ")", ",", "S", ")", "i", "=", "(", "(", "i", "and", "i", ".", "index", ")", "or", "0", ")", "if", "(", "(", "i", ">", "0", ")", "and", "(", "(", "s", "(", "S", "[", "(", "i", "-", "1", ")", "]", ")", "in", "(", "'i'", ",", "'it'", ",", "'he'", ",", "'she'", ")", ")", "or", "(", "S", "[", "(", "i", "-", "1", ")", "]", ".", "type", "==", "'NN'", ")", ")", ")", ":", "return", "False", "for", "(", "i", ",", "w", ")", "in", "enumerate", "(", "S", ")", ":", "if", "(", "w", ".", "type", "==", "'MD'", ")", ":", "if", "(", "(", "s", "(", "w", ")", "==", "'ought'", ")", "and", "(", "i", "<", "len", "(", "S", ")", ")", "and", "(", "s", "(", "S", "[", "(", "i", "+", "1", ")", "]", ")", "==", "'to'", ")", ")", ":", "return", "True", "if", "(", "s", "(", "w", ")", "in", "(", "'would'", ",", "'should'", ",", "\"'d\"", ",", "'could'", ",", "'might'", ")", ")", ":", "return", "True", "if", "(", "(", "s", "(", "w", ")", "in", "(", "'will'", ",", "'shall'", ",", "\"'ll\"", ")", ")", "and", "(", "i", ">", "0", ")", "and", "(", "s", "(", "S", "[", "(", "i", "-", "1", ")", "]", ")", "==", "'you'", ")", "and", "(", "not", "verbs", "(", "S", ",", "0", ",", "i", ")", ")", ")", ":", "return", "False", "if", "(", "(", "s", "(", "w", ")", "in", "(", "'will'", ",", "'shall'", ",", "\"'ll\"", ")", ")", "and", "predictive", ")", ":", "return", "True", "if", "(", "s", "(", "w", ")", "in", "(", "'will'", ",", "'shall'", ",", "\"'ll\"", ",", "'can'", ",", "'may'", ")", ")", ":", "r", "=", "s", "(", "S", ")", ".", "rstrip", "(", "' .!'", ")", "for", "cc", "in", "(", "'if'", ",", "'when'", ",", "'once'", ",", "'as soon as'", ",", "'assuming'", ",", "'provided that'", ",", "'given that'", ")", ":", "if", "(", "(", "cc", "+", "' '", ")", "in", "r", ")", ":", "return", "True", "return", "False" ]
decorator for a conditionally applied decorator .
train
false
12,296
@verbose def corpusreader_demo(): from nltk.corpus import twitter_samples as tweets print() print('Complete tweet documents') print(SPACER) for tweet in tweets.docs('tweets.20150430-223406.json')[:1]: print(json.dumps(tweet, indent=1, sort_keys=True)) print() print('Raw tweet strings:') print(SPACER) for text in tweets.strings('tweets.20150430-223406.json')[:15]: print(text) print() print('Tokenized tweet strings:') print(SPACER) for toks in tweets.tokenized('tweets.20150430-223406.json')[:15]: print(toks)
[ "@", "verbose", "def", "corpusreader_demo", "(", ")", ":", "from", "nltk", ".", "corpus", "import", "twitter_samples", "as", "tweets", "print", "(", ")", "print", "(", "'Complete tweet documents'", ")", "print", "(", "SPACER", ")", "for", "tweet", "in", "tweets", ".", "docs", "(", "'tweets.20150430-223406.json'", ")", "[", ":", "1", "]", ":", "print", "(", "json", ".", "dumps", "(", "tweet", ",", "indent", "=", "1", ",", "sort_keys", "=", "True", ")", ")", "print", "(", ")", "print", "(", "'Raw tweet strings:'", ")", "print", "(", "SPACER", ")", "for", "text", "in", "tweets", ".", "strings", "(", "'tweets.20150430-223406.json'", ")", "[", ":", "15", "]", ":", "print", "(", "text", ")", "print", "(", ")", "print", "(", "'Tokenized tweet strings:'", ")", "print", "(", "SPACER", ")", "for", "toks", "in", "tweets", ".", "tokenized", "(", "'tweets.20150430-223406.json'", ")", "[", ":", "15", "]", ":", "print", "(", "toks", ")" ]
use :module:twittercorpusreader tp read a file of tweets .
train
false
12,297
def getstatusoutput(cmd): import os pipe = os.popen((('{ ' + cmd) + '; } 2>&1'), 'r') text = pipe.read() sts = pipe.close() if (sts is None): sts = 0 if (text[(-1):] == '\n'): text = text[:(-1)] return (sts, text)
[ "def", "getstatusoutput", "(", "cmd", ")", ":", "import", "os", "pipe", "=", "os", ".", "popen", "(", "(", "(", "'{ '", "+", "cmd", ")", "+", "'; } 2>&1'", ")", ",", "'r'", ")", "text", "=", "pipe", ".", "read", "(", ")", "sts", "=", "pipe", ".", "close", "(", ")", "if", "(", "sts", "is", "None", ")", ":", "sts", "=", "0", "if", "(", "text", "[", "(", "-", "1", ")", ":", "]", "==", "'\\n'", ")", ":", "text", "=", "text", "[", ":", "(", "-", "1", ")", "]", "return", "(", "sts", ",", "text", ")" ]
return of executing cmd in a shell .
train
false
12,298
def wheel_translation(event): if PYQT4: tx = event.delta() ty = 0.0 if (event.orientation() == Qt.Vertical): (tx, ty) = (ty, tx) else: angle = event.angleDelta() tx = angle.x() ty = angle.y() return (tx, ty)
[ "def", "wheel_translation", "(", "event", ")", ":", "if", "PYQT4", ":", "tx", "=", "event", ".", "delta", "(", ")", "ty", "=", "0.0", "if", "(", "event", ".", "orientation", "(", ")", "==", "Qt", ".", "Vertical", ")", ":", "(", "tx", ",", "ty", ")", "=", "(", "ty", ",", "tx", ")", "else", ":", "angle", "=", "event", ".", "angleDelta", "(", ")", "tx", "=", "angle", ".", "x", "(", ")", "ty", "=", "angle", ".", "y", "(", ")", "return", "(", "tx", ",", "ty", ")" ]
return the tx ty translation delta for a pan .
train
false
12,299
def mark_resources_dirty(f): @six.wraps(f) def wrapper(_self, context, *args, **kwargs): ret_val = f(_self, context, *args, **kwargs) set_resources_dirty(context) return ret_val return wrapper
[ "def", "mark_resources_dirty", "(", "f", ")", ":", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "_self", ",", "context", ",", "*", "args", ",", "**", "kwargs", ")", ":", "ret_val", "=", "f", "(", "_self", ",", "context", ",", "*", "args", ",", "**", "kwargs", ")", "set_resources_dirty", "(", "context", ")", "return", "ret_val", "return", "wrapper" ]
decorator for functions which alter resource usage .
train
false
12,300
def VimExpressionToPythonType(vim_expression): result = vim.eval(vim_expression) if (not (isinstance(result, str) or isinstance(result, bytes))): return result try: return int(result) except ValueError: return ToUnicode(result)
[ "def", "VimExpressionToPythonType", "(", "vim_expression", ")", ":", "result", "=", "vim", ".", "eval", "(", "vim_expression", ")", "if", "(", "not", "(", "isinstance", "(", "result", ",", "str", ")", "or", "isinstance", "(", "result", ",", "bytes", ")", ")", ")", ":", "return", "result", "try", ":", "return", "int", "(", "result", ")", "except", "ValueError", ":", "return", "ToUnicode", "(", "result", ")" ]
returns a python type from the return value of the supplied vim expression .
train
false
12,301
def top_down(brule, fns=basic_fns): return chain(do_one(brule, identity), (lambda expr: sall(top_down(brule, fns), fns)(expr)))
[ "def", "top_down", "(", "brule", ",", "fns", "=", "basic_fns", ")", ":", "return", "chain", "(", "do_one", "(", "brule", ",", "identity", ")", ",", "(", "lambda", "expr", ":", "sall", "(", "top_down", "(", "brule", ",", "fns", ")", ",", "fns", ")", "(", "expr", ")", ")", ")" ]
apply a rule down a tree running it on the top nodes first .
train
false
12,302
def bridge_list(): cmd = 'ovs-vsctl list-br' result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] stdout = result['stdout'] return _stdout_list_split(retcode, stdout)
[ "def", "bridge_list", "(", ")", ":", "cmd", "=", "'ovs-vsctl list-br'", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "retcode", "=", "result", "[", "'retcode'", "]", "stdout", "=", "result", "[", "'stdout'", "]", "return", "_stdout_list_split", "(", "retcode", ",", "stdout", ")" ]
lists all existing real and fake bridges .
train
true
12,303
def message_from_bytes(s, *args, **kws): from email.parser import BytesParser return BytesParser(*args, **kws).parsebytes(s)
[ "def", "message_from_bytes", "(", "s", ",", "*", "args", ",", "**", "kws", ")", ":", "from", "email", ".", "parser", "import", "BytesParser", "return", "BytesParser", "(", "*", "args", ",", "**", "kws", ")", ".", "parsebytes", "(", "s", ")" ]
parse a bytes string into a message object model .
train
true
12,304
def is_negatively_weighted(G, edge=None, weight='weight'): if (edge is not None): data = G.get_edge_data(*edge) if (data is None): msg = 'Edge {!r} does not exist.'.format(edge) raise nx.NetworkXError(msg) return ((weight in data) and (data[weight] < 0)) return any((((weight in data) and (data[weight] < 0)) for (u, v, data) in G.edges(data=True)))
[ "def", "is_negatively_weighted", "(", "G", ",", "edge", "=", "None", ",", "weight", "=", "'weight'", ")", ":", "if", "(", "edge", "is", "not", "None", ")", ":", "data", "=", "G", ".", "get_edge_data", "(", "*", "edge", ")", "if", "(", "data", "is", "None", ")", ":", "msg", "=", "'Edge {!r} does not exist.'", ".", "format", "(", "edge", ")", "raise", "nx", ".", "NetworkXError", "(", "msg", ")", "return", "(", "(", "weight", "in", "data", ")", "and", "(", "data", "[", "weight", "]", "<", "0", ")", ")", "return", "any", "(", "(", "(", "(", "weight", "in", "data", ")", "and", "(", "data", "[", "weight", "]", "<", "0", ")", ")", "for", "(", "u", ",", "v", ",", "data", ")", "in", "G", ".", "edges", "(", "data", "=", "True", ")", ")", ")" ]
returns true if g has negatively weighted edges .
train
false
12,305
def validate_server(value): global email_endjob, email_full, email_rss if ((value == '') and (email_endjob() or email_full() or email_rss())): return (T('Server address required'), None) else: return (None, value)
[ "def", "validate_server", "(", "value", ")", ":", "global", "email_endjob", ",", "email_full", ",", "email_rss", "if", "(", "(", "value", "==", "''", ")", "and", "(", "email_endjob", "(", ")", "or", "email_full", "(", ")", "or", "email_rss", "(", ")", ")", ")", ":", "return", "(", "T", "(", "'Server address required'", ")", ",", "None", ")", "else", ":", "return", "(", "None", ",", "value", ")" ]
check if server non-empty .
train
false
12,307
def save_resized_background(background_image_file, event_id, size, image_sizes): width_ = 1300 height_ = 500 basewidth = image_sizes.full_width aspect = image_sizes.full_aspect height_size = image_sizes.full_height if (size == 'large'): width_ = 1300 height_ = 500 aspect = image_sizes.full_aspect basewidth = image_sizes.full_width height_size = image_sizes.full_height elif (size == 'thumbnail'): width_ = 500 height_ = 200 aspect = image_sizes.full_aspect basewidth = image_sizes.thumbnail_width height_size = image_sizes.thumbnail_height elif (size == 'icon'): width_ = 75 height_ = 30 aspect = image_sizes.icon_aspect basewidth = image_sizes.icon_width height_size = image_sizes.icon_height upload_path = UPLOAD_PATHS['event'][size].format(event_id=int(event_id)) return save_resized_image(background_image_file, width_, height_, basewidth, aspect, height_size, upload_path)
[ "def", "save_resized_background", "(", "background_image_file", ",", "event_id", ",", "size", ",", "image_sizes", ")", ":", "width_", "=", "1300", "height_", "=", "500", "basewidth", "=", "image_sizes", ".", "full_width", "aspect", "=", "image_sizes", ".", "full_aspect", "height_size", "=", "image_sizes", ".", "full_height", "if", "(", "size", "==", "'large'", ")", ":", "width_", "=", "1300", "height_", "=", "500", "aspect", "=", "image_sizes", ".", "full_aspect", "basewidth", "=", "image_sizes", ".", "full_width", "height_size", "=", "image_sizes", ".", "full_height", "elif", "(", "size", "==", "'thumbnail'", ")", ":", "width_", "=", "500", "height_", "=", "200", "aspect", "=", "image_sizes", ".", "full_aspect", "basewidth", "=", "image_sizes", ".", "thumbnail_width", "height_size", "=", "image_sizes", ".", "thumbnail_height", "elif", "(", "size", "==", "'icon'", ")", ":", "width_", "=", "75", "height_", "=", "30", "aspect", "=", "image_sizes", ".", "icon_aspect", "basewidth", "=", "image_sizes", ".", "icon_width", "height_size", "=", "image_sizes", ".", "icon_height", "upload_path", "=", "UPLOAD_PATHS", "[", "'event'", "]", "[", "size", "]", ".", "format", "(", "event_id", "=", "int", "(", "event_id", ")", ")", "return", "save_resized_image", "(", "background_image_file", ",", "width_", ",", "height_", ",", "basewidth", ",", "aspect", ",", "height_size", ",", "upload_path", ")" ]
save the resized version of the background image .
train
false
12,308
@when(u'we delete from table') def step_delete_from_table(context): context.cli.sendline(u"delete from a where x = 'yyy';")
[ "@", "when", "(", "u'we delete from table'", ")", "def", "step_delete_from_table", "(", "context", ")", ":", "context", ".", "cli", ".", "sendline", "(", "u\"delete from a where x = 'yyy';\"", ")" ]
send deete from table .
train
false
12,309
def eliot_to_stdout(message_formats, action_formats, stdout=sys.stdout): def eliot_output(message): message_type = message.get('message_type') action_type = message.get('action_type') action_status = message.get('action_status') message_format = '' if (message_type is not None): if ((message_type == 'twisted:log') and message.get('error')): message_format = '%(message)s' else: message_format = message_formats.get(message_type, '') elif (action_type is not None): if (action_status == 'started'): message_format = action_formats.get('action_type', '') stdout.write((message_format % message)) stdout.flush() add_destination(eliot_output)
[ "def", "eliot_to_stdout", "(", "message_formats", ",", "action_formats", ",", "stdout", "=", "sys", ".", "stdout", ")", ":", "def", "eliot_output", "(", "message", ")", ":", "message_type", "=", "message", ".", "get", "(", "'message_type'", ")", "action_type", "=", "message", ".", "get", "(", "'action_type'", ")", "action_status", "=", "message", ".", "get", "(", "'action_status'", ")", "message_format", "=", "''", "if", "(", "message_type", "is", "not", "None", ")", ":", "if", "(", "(", "message_type", "==", "'twisted:log'", ")", "and", "message", ".", "get", "(", "'error'", ")", ")", ":", "message_format", "=", "'%(message)s'", "else", ":", "message_format", "=", "message_formats", ".", "get", "(", "message_type", ",", "''", ")", "elif", "(", "action_type", "is", "not", "None", ")", ":", "if", "(", "action_status", "==", "'started'", ")", ":", "message_format", "=", "action_formats", ".", "get", "(", "'action_type'", ",", "''", ")", "stdout", ".", "write", "(", "(", "message_format", "%", "message", ")", ")", "stdout", ".", "flush", "(", ")", "add_destination", "(", "eliot_output", ")" ]
write pretty versions of eliot log messages to stdout .
train
false
12,311
def make_bar(**props): return {'bar': props['mplobj'], 'x0': get_rect_xmin(props['data']), 'y0': get_rect_ymin(props['data']), 'x1': get_rect_xmax(props['data']), 'y1': get_rect_ymax(props['data']), 'alpha': props['style']['alpha'], 'edgecolor': props['style']['edgecolor'], 'facecolor': props['style']['facecolor'], 'edgewidth': props['style']['edgewidth'], 'dasharray': props['style']['dasharray'], 'zorder': props['style']['zorder']}
[ "def", "make_bar", "(", "**", "props", ")", ":", "return", "{", "'bar'", ":", "props", "[", "'mplobj'", "]", ",", "'x0'", ":", "get_rect_xmin", "(", "props", "[", "'data'", "]", ")", ",", "'y0'", ":", "get_rect_ymin", "(", "props", "[", "'data'", "]", ")", ",", "'x1'", ":", "get_rect_xmax", "(", "props", "[", "'data'", "]", ")", ",", "'y1'", ":", "get_rect_ymax", "(", "props", "[", "'data'", "]", ")", ",", "'alpha'", ":", "props", "[", "'style'", "]", "[", "'alpha'", "]", ",", "'edgecolor'", ":", "props", "[", "'style'", "]", "[", "'edgecolor'", "]", ",", "'facecolor'", ":", "props", "[", "'style'", "]", "[", "'facecolor'", "]", ",", "'edgewidth'", ":", "props", "[", "'style'", "]", "[", "'edgewidth'", "]", ",", "'dasharray'", ":", "props", "[", "'style'", "]", "[", "'dasharray'", "]", ",", "'zorder'", ":", "props", "[", "'style'", "]", "[", "'zorder'", "]", "}" ]
make an intermediate bar dictionary .
train
false
12,312
def create_zone(module, gcdns, zone): description = module.params['description'] extra = dict(description=description) zone_name = module.params['zone'] if (zone_name[(-1)] != '.'): zone_name = (zone_name + '.') if (zone is not None): return False try: if (not module.check_mode): gcdns.create_zone(domain=zone_name, extra=extra) return True except ResourceExistsError: return False except InvalidRequestError as error: if (error.code == 'invalid'): module.fail_json(msg=('zone name is not a valid DNS name: %s' % zone_name), changed=False) elif (error.code == 'managedZoneDnsNameNotAvailable'): module.fail_json(msg=('zone name is reserved or already in use: %s' % zone_name), changed=False) elif (error.code == 'verifyManagedZoneDnsNameOwnership'): module.fail_json(msg=('ownership of zone %s needs to be verified at %s' % (zone_name, ZONE_VERIFICATION_URL)), changed=False) else: raise
[ "def", "create_zone", "(", "module", ",", "gcdns", ",", "zone", ")", ":", "description", "=", "module", ".", "params", "[", "'description'", "]", "extra", "=", "dict", "(", "description", "=", "description", ")", "zone_name", "=", "module", ".", "params", "[", "'zone'", "]", "if", "(", "zone_name", "[", "(", "-", "1", ")", "]", "!=", "'.'", ")", ":", "zone_name", "=", "(", "zone_name", "+", "'.'", ")", "if", "(", "zone", "is", "not", "None", ")", ":", "return", "False", "try", ":", "if", "(", "not", "module", ".", "check_mode", ")", ":", "gcdns", ".", "create_zone", "(", "domain", "=", "zone_name", ",", "extra", "=", "extra", ")", "return", "True", "except", "ResourceExistsError", ":", "return", "False", "except", "InvalidRequestError", "as", "error", ":", "if", "(", "error", ".", "code", "==", "'invalid'", ")", ":", "module", ".", "fail_json", "(", "msg", "=", "(", "'zone name is not a valid DNS name: %s'", "%", "zone_name", ")", ",", "changed", "=", "False", ")", "elif", "(", "error", ".", "code", "==", "'managedZoneDnsNameNotAvailable'", ")", ":", "module", ".", "fail_json", "(", "msg", "=", "(", "'zone name is reserved or already in use: %s'", "%", "zone_name", ")", ",", "changed", "=", "False", ")", "elif", "(", "error", ".", "code", "==", "'verifyManagedZoneDnsNameOwnership'", ")", ":", "module", ".", "fail_json", "(", "msg", "=", "(", "'ownership of zone %s needs to be verified at %s'", "%", "(", "zone_name", ",", "ZONE_VERIFICATION_URL", ")", ")", ",", "changed", "=", "False", ")", "else", ":", "raise" ]
create a new zone .
train
false
12,313
def _get_split_zone(zone, _conn, private_zone): for _zone in _conn.get_zones(): if (_zone.name == zone): _private_zone = (True if (_zone.config['PrivateZone'].lower() == 'true') else False) if (_private_zone == private_zone): return _zone return False
[ "def", "_get_split_zone", "(", "zone", ",", "_conn", ",", "private_zone", ")", ":", "for", "_zone", "in", "_conn", ".", "get_zones", "(", ")", ":", "if", "(", "_zone", ".", "name", "==", "zone", ")", ":", "_private_zone", "=", "(", "True", "if", "(", "_zone", ".", "config", "[", "'PrivateZone'", "]", ".", "lower", "(", ")", "==", "'true'", ")", "else", "False", ")", "if", "(", "_private_zone", "==", "private_zone", ")", ":", "return", "_zone", "return", "False" ]
with boto route53 .
train
true
12,316
def logarithmic(): return [(dimensionless_unscaled, function_units.dex, np.log10, (lambda x: (10.0 ** x)))]
[ "def", "logarithmic", "(", ")", ":", "return", "[", "(", "dimensionless_unscaled", ",", "function_units", ".", "dex", ",", "np", ".", "log10", ",", "(", "lambda", "x", ":", "(", "10.0", "**", "x", ")", ")", ")", "]" ]
linearly interpolates values in new_x based in the log space of y .
train
false
12,318
def _find_cmd(cmd): paths = System.Environment.GetEnvironmentVariable('PATH').Split(os.pathsep) for path in paths: filename = os.path.join(path, cmd) if System.IO.File.Exists(filename): return py3compat.bytes_to_str(filename) raise OSError(('command %r not found' % cmd))
[ "def", "_find_cmd", "(", "cmd", ")", ":", "paths", "=", "System", ".", "Environment", ".", "GetEnvironmentVariable", "(", "'PATH'", ")", ".", "Split", "(", "os", ".", "pathsep", ")", "for", "path", "in", "paths", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "cmd", ")", "if", "System", ".", "IO", ".", "File", ".", "Exists", "(", "filename", ")", ":", "return", "py3compat", ".", "bytes_to_str", "(", "filename", ")", "raise", "OSError", "(", "(", "'command %r not found'", "%", "cmd", ")", ")" ]
find the full path to a command using which .
train
false
12,319
def nonzero(a): return a.nonzero()
[ "def", "nonzero", "(", "a", ")", ":", "return", "a", ".", "nonzero", "(", ")" ]
returns one of the following: if return_matrix is false : a tuple of vector arrays such that the ith element of the jth array is the index of the ith non-zero element of the input array in the jth dimension .
train
false
12,320
def test_dead(): jobs = bg.BackgroundJobManager() j = jobs.new(crasher) j.join() nt.assert_equal(len(jobs.completed), 0) nt.assert_equal(len(jobs.dead), 1) jobs.flush() nt.assert_equal(len(jobs.dead), 0)
[ "def", "test_dead", "(", ")", ":", "jobs", "=", "bg", ".", "BackgroundJobManager", "(", ")", "j", "=", "jobs", ".", "new", "(", "crasher", ")", "j", ".", "join", "(", ")", "nt", ".", "assert_equal", "(", "len", "(", "jobs", ".", "completed", ")", ",", "0", ")", "nt", ".", "assert_equal", "(", "len", "(", "jobs", ".", "dead", ")", ",", "1", ")", "jobs", ".", "flush", "(", ")", "nt", ".", "assert_equal", "(", "len", "(", "jobs", ".", "dead", ")", ",", "0", ")" ]
test control of dead jobs .
train
false
12,323
def splitPrefix(name): if (isinstance(name, basestring) and (':' in name)): return tuple(name.split(':', 1)) else: return (None, name)
[ "def", "splitPrefix", "(", "name", ")", ":", "if", "(", "isinstance", "(", "name", ",", "basestring", ")", "and", "(", "':'", "in", "name", ")", ")", ":", "return", "tuple", "(", "name", ".", "split", "(", "':'", ",", "1", ")", ")", "else", ":", "return", "(", "None", ",", "name", ")" ]
split the name into a tuple .
train
true