id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
33,893
def join_path(a, *p): path = a for b in p: if (len(b) == 0): continue if b.startswith('/'): path += b[1:] elif ((path == '') or path.endswith('/')): path += b else: path += ('/' + b) return path
[ "def", "join_path", "(", "a", ",", "*", "p", ")", ":", "path", "=", "a", "for", "b", "in", "p", ":", "if", "(", "len", "(", "b", ")", "==", "0", ")", ":", "continue", "if", "b", ".", "startswith", "(", "'/'", ")", ":", "path", "+=", "b", ...
join path tokens together similar to osp .
train
true
33,895
@np.deprecate(message='mstats.threshold is deprecated in scipy 0.17.0') def threshold(a, threshmin=None, threshmax=None, newval=0): a = ma.array(a, copy=True) mask = np.zeros(a.shape, dtype=bool) if (threshmin is not None): mask |= (a < threshmin).filled(False) if (threshmax is not None): mask |= (a > threshmax).filled(False) a[mask] = newval return a
[ "@", "np", ".", "deprecate", "(", "message", "=", "'mstats.threshold is deprecated in scipy 0.17.0'", ")", "def", "threshold", "(", "a", ",", "threshmin", "=", "None", ",", "threshmax", "=", "None", ",", "newval", "=", "0", ")", ":", "a", "=", "ma", ".", ...
local threshold of an image .
train
false
33,897
def GetCookieFromResponse(response): user_cookie_header_list = [h for h in response.headers.get_list('Set-Cookie') if h.startswith('user=')] if (not user_cookie_header_list): return None return re.match('user="?([^";]*)', user_cookie_header_list[(-1)]).group(1)
[ "def", "GetCookieFromResponse", "(", "response", ")", ":", "user_cookie_header_list", "=", "[", "h", "for", "h", "in", "response", ".", "headers", ".", "get_list", "(", "'Set-Cookie'", ")", "if", "h", ".", "startswith", "(", "'user='", ")", "]", "if", "(",...
extracts the user cookie from an http response and returns it if it exists .
train
false
33,898
def _normalize_vectors(rr): size = np.sqrt(np.sum((rr * rr), axis=1)) size[(size == 0)] = 1.0 rr /= size[:, np.newaxis]
[ "def", "_normalize_vectors", "(", "rr", ")", ":", "size", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "rr", "*", "rr", ")", ",", "axis", "=", "1", ")", ")", "size", "[", "(", "size", "==", "0", ")", "]", "=", "1.0", "rr", "/=",...
normalize surface vertices .
train
false
33,900
def args_to_list(arg_string): arg_list = [x.strip() for x in arg_string.strip().strip('[]').split(',')] return arg_list
[ "def", "args_to_list", "(", "arg_string", ")", ":", "arg_list", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "arg_string", ".", "strip", "(", ")", ".", "strip", "(", "'[]'", ")", ".", "split", "(", "','", ")", "]", "return", "arg_list" ...
parses argument-string to a list .
train
false
33,901
@image_comparison(baseline_images=[u'legend_auto3']) def test_legend_auto3(): fig = plt.figure() ax = fig.add_subplot(111) x = [0.9, 0.1, 0.1, 0.9, 0.9, 0.5] y = [0.95, 0.95, 0.05, 0.05, 0.5, 0.5] ax.plot(x, y, u'o-', label=u'line') ax.set_xlim(0.0, 1.0) ax.set_ylim(0.0, 1.0) ax.legend(loc=0)
[ "@", "image_comparison", "(", "baseline_images", "=", "[", "u'legend_auto3'", "]", ")", "def", "test_legend_auto3", "(", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "x", "=", "[", "0.9", ...
test automatic legend placement .
train
false
33,903
def _vect_div(one, other): if (isinstance(one, Vector) and isinstance(other, Vector)): raise TypeError('Cannot divide two vectors') elif isinstance(one, Vector): if (other == S.Zero): raise ValueError('Cannot divide a vector by zero') return VectorMul(one, Pow(other, S.NegativeOne)) else: raise TypeError('Invalid division involving a vector')
[ "def", "_vect_div", "(", "one", ",", "other", ")", ":", "if", "(", "isinstance", "(", "one", ",", "Vector", ")", "and", "isinstance", "(", "other", ",", "Vector", ")", ")", ":", "raise", "TypeError", "(", "'Cannot divide two vectors'", ")", "elif", "isin...
helper for division involving vectors .
train
false
33,904
def create_generate(kind, project, resource, offset): generate = eval(('Generate' + kind.title())) return generate(project, resource, offset)
[ "def", "create_generate", "(", "kind", ",", "project", ",", "resource", ",", "offset", ")", ":", "generate", "=", "eval", "(", "(", "'Generate'", "+", "kind", ".", "title", "(", ")", ")", ")", "return", "generate", "(", "project", ",", "resource", ",",...
a factory for creating generate objects kind can be variable .
train
true
33,905
def MeanVar(xs, ddof=0): xs = np.asarray(xs) mean = xs.mean() s2 = Var(xs, mean, ddof) return (mean, s2)
[ "def", "MeanVar", "(", "xs", ",", "ddof", "=", "0", ")", ":", "xs", "=", "np", ".", "asarray", "(", "xs", ")", "mean", "=", "xs", ".", "mean", "(", ")", "s2", "=", "Var", "(", "xs", ",", "mean", ",", "ddof", ")", "return", "(", "mean", ",",...
computes mean and variance .
train
false
33,908
def gemset_copy(source, destination, runas=None): return _rvm(['gemset', 'copy', source, destination], runas=runas)
[ "def", "gemset_copy", "(", "source", ",", "destination", ",", "runas", "=", "None", ")", ":", "return", "_rvm", "(", "[", "'gemset'", ",", "'copy'", ",", "source", ",", "destination", "]", ",", "runas", "=", "runas", ")" ]
copy all gems from one gemset to another .
train
false
33,909
def _lstsq_solution(b, bcpy, n): raise NotImplementedError
[ "def", "_lstsq_solution", "(", "b", ",", "bcpy", ",", "n", ")", ":", "raise", "NotImplementedError" ]
extract x from the bcpy scratch space .
train
false
33,910
def load_modules(base_name, base_path): modules = [] for (importer, module_name, _) in pkgutil.iter_modules(base_path): full_module_name = '{}.{}'.format(base_name, module_name) if (full_module_name not in sys.modules): module = importer.find_module(module_name).load_module(full_module_name) else: module = sys.modules[full_module_name] modules.append(module) return modules
[ "def", "load_modules", "(", "base_name", ",", "base_path", ")", ":", "modules", "=", "[", "]", "for", "(", "importer", ",", "module_name", ",", "_", ")", "in", "pkgutil", ".", "iter_modules", "(", "base_path", ")", ":", "full_module_name", "=", "'{}.{}'", ...
load all modules .
train
false
33,911
def compat_patch_logging_config(logging_config): if ('filters' not in logging_config.get('handlers', {}).get('mail_admins', {'filters': []})): warnings.warn("You have no filters defined on the 'mail_admins' logging handler: adding implicit debug-false-only filter. See http://docs.djangoproject.com/en/dev/releases/1.4/#request-exceptions-are-now-always-logged", PendingDeprecationWarning) filter_name = 'require_debug_false' filters = logging_config.setdefault('filters', {}) while (filter_name in filters): filter_name = (filter_name + '_') filters[filter_name] = {'()': 'django.utils.log.RequireDebugFalse'} logging_config['handlers']['mail_admins']['filters'] = [filter_name]
[ "def", "compat_patch_logging_config", "(", "logging_config", ")", ":", "if", "(", "'filters'", "not", "in", "logging_config", ".", "get", "(", "'handlers'", ",", "{", "}", ")", ".", "get", "(", "'mail_admins'", ",", "{", "'filters'", ":", "[", "]", "}", ...
backwards-compatibility shim for #16288 fix .
train
false
33,913
def show_warning(message): try: import Tkinter, tkMessageBox root = Tkinter.Tk() root.withdraw() tkMessageBox.showerror('Spyder', message) except ImportError: pass raise RuntimeError(message)
[ "def", "show_warning", "(", "message", ")", ":", "try", ":", "import", "Tkinter", ",", "tkMessageBox", "root", "=", "Tkinter", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "tkMessageBox", ".", "showerror", "(", "'Spyder'", ",", "message", ")",...
show warning using tkinter if available .
train
true
33,915
def permission_name(name): def wraps(f): f._permission_name = name return f return wraps
[ "def", "permission_name", "(", "name", ")", ":", "def", "wraps", "(", "f", ")", ":", "f", ".", "_permission_name", "=", "name", "return", "f", "return", "wraps" ]
use this decorator to override the name of the permission .
train
false
33,916
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params): for i in dummy_ind: exog0 = exog.copy() exog0[:, i] = 0 effect0 = model.predict(params, exog0) exog0[:, i] = 1 effect1 = model.predict(params, exog0) if ('ey' in method): effect0 = np.log(effect0) effect1 = np.log(effect1) effects[:, i] = (effect1 - effect0) return effects
[ "def", "_get_dummy_effects", "(", "effects", ",", "exog", ",", "dummy_ind", ",", "method", ",", "model", ",", "params", ")", ":", "for", "i", "in", "dummy_ind", ":", "exog0", "=", "exog", ".", "copy", "(", ")", "exog0", "[", ":", ",", "i", "]", "="...
if theres a dummy variable .
train
false
33,917
def test_file(name): return absjoin(test_file_dir, name)
[ "def", "test_file", "(", "name", ")", ":", "return", "absjoin", "(", "test_file_dir", ",", "name", ")" ]
ensure that its possible to easily output files .
train
false
33,918
def getKeyMap(): myMap = ('wsrep_last_committed', 'wsrep_replicated', 'wsrep_repl_keys', 'wsrep_local_commits', 'wsrep_received', 'wsrep_local_send_queue_avg', 'wsrep_local_recv_queue_avg') return myMap
[ "def", "getKeyMap", "(", ")", ":", "myMap", "=", "(", "'wsrep_last_committed'", ",", "'wsrep_replicated'", ",", "'wsrep_repl_keys'", ",", "'wsrep_local_commits'", ",", "'wsrep_received'", ",", "'wsrep_local_send_queue_avg'", ",", "'wsrep_local_recv_queue_avg'", ")", "retu...
you can use everything that is displayed when you call "show status like %wsrep%" on your db-host .
train
false
33,922
def combine_transforms(t_first, t_second, fro, to): fro = _to_const(fro) to = _to_const(to) if (t_first['from'] != fro): raise RuntimeError(('From mismatch: %s ("%s") != %s ("%s")' % (t_first['from'], _coord_frame_name(t_first['from']), fro, _coord_frame_name(fro)))) if (t_first['to'] != t_second['from']): raise RuntimeError(('Transform mismatch: t1["to"] = %s ("%s"), t2["from"] = %s ("%s")' % (t_first['to'], _coord_frame_name(t_first['to']), t_second['from'], _coord_frame_name(t_second['from'])))) if (t_second['to'] != to): raise RuntimeError(('To mismatch: %s ("%s") != %s ("%s")' % (t_second['to'], _coord_frame_name(t_second['to']), to, _coord_frame_name(to)))) return Transform(fro, to, np.dot(t_second['trans'], t_first['trans']))
[ "def", "combine_transforms", "(", "t_first", ",", "t_second", ",", "fro", ",", "to", ")", ":", "fro", "=", "_to_const", "(", "fro", ")", "to", "=", "_to_const", "(", "to", ")", "if", "(", "t_first", "[", "'from'", "]", "!=", "fro", ")", ":", "raise...
combine two transforms .
train
false
33,923
@snippet def logger_usage(client, to_delete): LOG_NAME = ('logger_usage_%d' % _millis()) logger = client.logger(LOG_NAME) to_delete.append(logger) logger.log_text('A simple entry') logger.log_struct({'message': 'My second entry', 'weather': 'partly cloudy'}) from google.cloud.logging import DESCENDING for entry in logger.list_entries(order_by=DESCENDING): do_something_with(entry) def _logger_delete(): logger.delete() _backoff_not_found(_logger_delete) to_delete.remove(logger)
[ "@", "snippet", "def", "logger_usage", "(", "client", ",", "to_delete", ")", ":", "LOG_NAME", "=", "(", "'logger_usage_%d'", "%", "_millis", "(", ")", ")", "logger", "=", "client", ".", "logger", "(", "LOG_NAME", ")", "to_delete", ".", "append", "(", "lo...
logger usage .
train
false
33,924
def run_gevent(): from gevent import monkey monkey.patch_all()
[ "def", "run_gevent", "(", ")", ":", "from", "gevent", "import", "monkey", "monkey", ".", "patch_all", "(", ")" ]
prepare to run tests with gevent .
train
false
33,927
@nox.parametrize('sample', NON_GAE_STANDARD_SAMPLES) def session_py27(session, sample): session.interpreter = 'python2.7' _session_tests(session, sample)
[ "@", "nox", ".", "parametrize", "(", "'sample'", ",", "NON_GAE_STANDARD_SAMPLES", ")", "def", "session_py27", "(", "session", ",", "sample", ")", ":", "session", ".", "interpreter", "=", "'python2.7'", "_session_tests", "(", "session", ",", "sample", ")" ]
runs py .
train
false
33,928
def String(string, prefix=None): return Leaf(token.STRING, string, prefix=prefix)
[ "def", "String", "(", "string", ",", "prefix", "=", "None", ")", ":", "return", "Leaf", "(", "token", ".", "STRING", ",", "string", ",", "prefix", "=", "prefix", ")" ]
a string leaf .
train
false
33,932
def parse_version_string(version_string): if (version_string == 'latest'): version_string = max_version_string() try: return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError(('invalid version string: %s; %s' % (version_string, exc)))
[ "def", "parse_version_string", "(", "version_string", ")", ":", "if", "(", "version_string", "==", "'latest'", ")", ":", "version_string", "=", "max_version_string", "(", ")", "try", ":", "return", "Version", "(", "*", "(", "int", "(", "value", ")", "for", ...
turn a version string into a version .
train
false
33,934
def _extract_images(filename, num_images): print('Extracting images from: ', filename) with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read((((_IMAGE_SIZE * _IMAGE_SIZE) * num_images) * _NUM_CHANNELS)) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS) return data
[ "def", "_extract_images", "(", "filename", ",", "num_images", ")", ":", "print", "(", "'Extracting images from: '", ",", "filename", ")", "with", "gzip", ".", "open", "(", "filename", ")", "as", "bytestream", ":", "bytestream", ".", "read", "(", "16", ")", ...
extract the images into a numpy array .
train
false
33,935
def provide(callback, resource_type): _get_manager().register(callback, resource_type)
[ "def", "provide", "(", "callback", ",", "resource_type", ")", ":", "_get_manager", "(", ")", ".", "register", "(", "callback", ",", "resource_type", ")" ]
register a callback as a producer for the resource type .
train
false
33,936
@add_handler('pyeval') def qute_pyeval(_url): html = jinja.render('pre.html', title='pyeval', content=pyeval_output) return ('text/html', html)
[ "@", "add_handler", "(", "'pyeval'", ")", "def", "qute_pyeval", "(", "_url", ")", ":", "html", "=", "jinja", ".", "render", "(", "'pre.html'", ",", "title", "=", "'pyeval'", ",", "content", "=", "pyeval_output", ")", "return", "(", "'text/html'", ",", "h...
handler for qute:pyeval .
train
false
33,937
def set_route(context): if context.web_form_name: context.route = u'{0}?name={1}'.format(context.pathname, quoted(context.doc.name)) elif (context.doc and getattr(context.doc, u'route', None)): context.route = context.doc.route else: context.route = u'{0}/{1}'.format((context.pathname or quoted(context.doc.doctype)), quoted(context.doc.name))
[ "def", "set_route", "(", "context", ")", ":", "if", "context", ".", "web_form_name", ":", "context", ".", "route", "=", "u'{0}?name={1}'", ".", "format", "(", "context", ".", "pathname", ",", "quoted", "(", "context", ".", "doc", ".", "name", ")", ")", ...
set link for the list item .
train
false
33,938
def _find_x12(x12path=None, prefer_x13=True): global _binary_names if ((x12path is not None) and x12path.endswith(_binary_names)): x12path = os.path.dirname(x12path) if (not prefer_x13): _binary_names = _binary_names[::(-1)] if (x12path is None): x12path = os.getenv('X12PATH', '') if (not x12path): x12path = os.getenv('X13PATH', '') elif (x12path is None): x12path = os.getenv('X13PATH', '') if (not x12path): x12path = os.getenv('X12PATH', '') for binary in _binary_names: x12 = os.path.join(x12path, binary) try: subprocess.check_call(x12, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return x12 except OSError: pass else: return False
[ "def", "_find_x12", "(", "x12path", "=", "None", ",", "prefer_x13", "=", "True", ")", ":", "global", "_binary_names", "if", "(", "(", "x12path", "is", "not", "None", ")", "and", "x12path", ".", "endswith", "(", "_binary_names", ")", ")", ":", "x12path", ...
if x12path is not given .
train
false
33,939
def path_split_all(path): parts = [] while True: (path, fn) = os.path.split(path) if (len(fn) == 0): break parts.append(fn) return reversed(parts)
[ "def", "path_split_all", "(", "path", ")", ":", "parts", "=", "[", "]", "while", "True", ":", "(", "path", ",", "fn", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "(", "len", "(", "fn", ")", "==", "0", ")", ":", "break",...
split a path at all path separaters .
train
false
33,940
def _encode_minkey(name, dummy0, dummy1, dummy2): return ('\xff' + name)
[ "def", "_encode_minkey", "(", "name", ",", "dummy0", ",", "dummy1", ",", "dummy2", ")", ":", "return", "(", "'\\xff'", "+", "name", ")" ]
encode bson .
train
false
33,941
def vb_list_machines(**kwargs): manager = vb_get_manager() machines = manager.getArray(vb_get_box(), 'machines') return [vb_xpcom_to_attribute_dict(machine, 'IMachine', **kwargs) for machine in machines]
[ "def", "vb_list_machines", "(", "**", "kwargs", ")", ":", "manager", "=", "vb_get_manager", "(", ")", "machines", "=", "manager", ".", "getArray", "(", "vb_get_box", "(", ")", ",", "'machines'", ")", "return", "[", "vb_xpcom_to_attribute_dict", "(", "machine",...
which machines does the hypervisor have .
train
true
33,942
def getOPOrUserServices(openid_services): op_services = arrangeByType(openid_services, [OPENID_IDP_2_0_TYPE]) openid_services = arrangeByType(openid_services, OpenIDServiceEndpoint.openid_type_uris) return (op_services or openid_services)
[ "def", "getOPOrUserServices", "(", "openid_services", ")", ":", "op_services", "=", "arrangeByType", "(", "openid_services", ",", "[", "OPENID_IDP_2_0_TYPE", "]", ")", "openid_services", "=", "arrangeByType", "(", "openid_services", ",", "OpenIDServiceEndpoint", ".", ...
extract op identifier services .
train
true
33,943
def _lcm(a, b): if ((a == 0) or (b == 0)): return 0 else: return (abs((a * b)) / gcd(a, b))
[ "def", "_lcm", "(", "a", ",", "b", ")", ":", "if", "(", "(", "a", "==", "0", ")", "or", "(", "b", "==", "0", ")", ")", ":", "return", "0", "else", ":", "return", "(", "abs", "(", "(", "a", "*", "b", ")", ")", "/", "gcd", "(", "a", ","...
least common multiple between 2 integers .
train
true
33,944
def parse_response_data(layer, fields): layer = layer[4:] response = [] for i in xrange(fields): ltxt = int(layer[0], 16) text = ''.join(layer[1:(ltxt + 1)]).decode('hex') response.append(text) layer = layer[(ltxt + 1):] return response
[ "def", "parse_response_data", "(", "layer", ",", "fields", ")", ":", "layer", "=", "layer", "[", "4", ":", "]", "response", "=", "[", "]", "for", "i", "in", "xrange", "(", "fields", ")", ":", "ltxt", "=", "int", "(", "layer", "[", "0", "]", ",", ...
parse the data from a response .
train
false
33,945
@contextlib.contextmanager def with_site_configuration_context(domain='test.localhost', configuration=None): (site, __) = Site.objects.get_or_create(domain=domain, name=domain) (site_configuration, created) = SiteConfiguration.objects.get_or_create(site=site, defaults={'enabled': True, 'values': configuration}) if (not created): site_configuration.values = configuration site_configuration.save() with patch('openedx.core.djangoapps.site_configuration.helpers.get_current_site_configuration', return_value=site_configuration): with patch('openedx.core.djangoapps.theming.helpers.get_current_site', return_value=site): with patch('django.contrib.sites.models.SiteManager.get_current', return_value=site): (yield)
[ "@", "contextlib", ".", "contextmanager", "def", "with_site_configuration_context", "(", "domain", "=", "'test.localhost'", ",", "configuration", "=", "None", ")", ":", "(", "site", ",", "__", ")", "=", "Site", ".", "objects", ".", "get_or_create", "(", "domai...
a function to get a context manger to run a test with a configuration enabled .
train
false
33,946
def signWrangler(poi): for field in ['Text1', 'Text2', 'Text3', 'Text4']: poi[field] = jsonText(poi[field]) return poi
[ "def", "signWrangler", "(", "poi", ")", ":", "for", "field", "in", "[", "'Text1'", ",", "'Text2'", ",", "'Text3'", ",", "'Text4'", "]", ":", "poi", "[", "field", "]", "=", "jsonText", "(", "poi", "[", "field", "]", ")", "return", "poi" ]
just does the json things for signs .
train
false
33,948
@with_device def setprop(name, value): return process(['setprop', name, value]).recvall().strip()
[ "@", "with_device", "def", "setprop", "(", "name", ",", "value", ")", ":", "return", "process", "(", "[", "'setprop'", ",", "name", ",", "value", "]", ")", ".", "recvall", "(", ")", ".", "strip", "(", ")" ]
writes a property to the system property store .
train
false
33,949
@contextlib.contextmanager def restrict_course(course_key, access_point='enrollment', disable_access_check=False): cache.clear() with mock.patch.object(pygeoip.GeoIP, 'country_code_by_addr') as mock_ip: CountryAccessRule.objects.all().delete() (country, __) = Country.objects.get_or_create(country='IR') (restricted_course, __) = RestrictedCourse.objects.get_or_create(course_key=course_key) restricted_course.enroll_msg_key = 'default' restricted_course.access_msg_key = 'default' restricted_course.disable_access_check = disable_access_check restricted_course.save() CountryAccessRule.objects.get_or_create(restricted_course=restricted_course, country=country, rule_type='blacklist') mock_ip.return_value = 'IR' redirect_url = reverse('embargo_blocked_message', kwargs={'access_point': access_point, 'message_key': 'default'}) (yield redirect_url)
[ "@", "contextlib", ".", "contextmanager", "def", "restrict_course", "(", "course_key", ",", "access_point", "=", "'enrollment'", ",", "disable_access_check", "=", "False", ")", ":", "cache", ".", "clear", "(", ")", "with", "mock", ".", "patch", ".", "object", ...
simulate that a course is restricted .
train
false
33,951
def get_file_obj(fname, mode='r', encoding=None): if _is_string_like(fname): return _open(fname, mode, encoding) try: if ('r' in mode): fname.read if (('w' in mode) or ('a' in mode)): fname.write except AttributeError: raise ValueError('fname must be a string or a file-like object') return EmptyContextManager(fname)
[ "def", "get_file_obj", "(", "fname", ",", "mode", "=", "'r'", ",", "encoding", "=", "None", ")", ":", "if", "_is_string_like", "(", "fname", ")", ":", "return", "_open", "(", "fname", ",", "mode", ",", "encoding", ")", "try", ":", "if", "(", "'r'", ...
light wrapper to handle strings and let files pass through .
train
false
33,952
def training_event(): return s3db.hrm_training_event_controller()
[ "def", "training_event", "(", ")", ":", "return", "s3db", ".", "hrm_training_event_controller", "(", ")" ]
training events controller .
train
false
33,953
def create_annotation(module): annotation = {} if (module.params['duration'] != None): duration = module.params['duration'] else: duration = 0 if (module.params['start'] != None): start = module.params['start'] else: start = int(time.time()) if (module.params['stop'] != None): stop = module.params['stop'] else: stop = (int(time.time()) + duration) annotation['start'] = int(start) annotation['stop'] = int(stop) annotation['category'] = module.params['category'] annotation['description'] = module.params['description'] annotation['title'] = module.params['title'] return annotation
[ "def", "create_annotation", "(", "module", ")", ":", "annotation", "=", "{", "}", "if", "(", "module", ".", "params", "[", "'duration'", "]", "!=", "None", ")", ":", "duration", "=", "module", ".", "params", "[", "'duration'", "]", "else", ":", "durati...
takes ansible module object .
train
false
33,954
def cross_list(*sequences): result = [[]] for seq in sequences: result = [(sublist + [item]) for sublist in result for item in seq] return result
[ "def", "cross_list", "(", "*", "sequences", ")", ":", "result", "=", "[", "[", "]", "]", "for", "seq", "in", "sequences", ":", "result", "=", "[", "(", "sublist", "+", "[", "item", "]", ")", "for", "sublist", "in", "result", "for", "item", "in", ...
from: URL .
train
true
33,957
def meta_command(name, bases, attrs): commands = {} docs = {} for (attr, value) in attrs.items(): if getattr(value, '__command__', False): commands[attr] = value docs[attr] = (getattr(value, '__doc__', None) or ('perform the %s command' % attr)).strip() attrs['__commands__'] = commands attrs['__docs__'] = docs def run(self, command, *args, **kwargs): return self.__commands__[command](self, *args, **kwargs) attrs.setdefault('run', run) return type(name, bases, attrs)
[ "def", "meta_command", "(", "name", ",", "bases", ",", "attrs", ")", ":", "commands", "=", "{", "}", "docs", "=", "{", "}", "for", "(", "attr", ",", "value", ")", "in", "attrs", ".", "items", "(", ")", ":", "if", "getattr", "(", "value", ",", "...
look for attrs with a truthy attribute __command__ and add them to an attribute __commands__ on the type that maps names to decorated methods .
train
false
33,958
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False): import frappe.modules return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
[ "def", "reload_doc", "(", "module", ",", "dt", "=", "None", ",", "dn", "=", "None", ",", "force", "=", "False", ",", "reset_permissions", "=", "False", ")", ":", "import", "frappe", ".", "modules", "return", "frappe", ".", "modules", ".", "reload_doc", ...
reload document from model files .
train
false
33,960
def is_pandas_min_version(min_version): from pandas import __version__ as pversion return (LooseVersion(pversion) >= min_version)
[ "def", "is_pandas_min_version", "(", "min_version", ")", ":", "from", "pandas", "import", "__version__", "as", "pversion", "return", "(", "LooseVersion", "(", "pversion", ")", ">=", "min_version", ")" ]
check whether pandas is at least min_version .
train
false
33,963
def bare_except(logical_line, noqa): if noqa: return regex = re.compile('except\\s*:') match = regex.match(logical_line) if match: (yield (match.start(), "E722 do not use bare except'"))
[ "def", "bare_except", "(", "logical_line", ",", "noqa", ")", ":", "if", "noqa", ":", "return", "regex", "=", "re", ".", "compile", "(", "'except\\\\s*:'", ")", "match", "=", "regex", ".", "match", "(", "logical_line", ")", "if", "match", ":", "(", "yie...
when catching exceptions .
train
true
33,966
def deploynobackup(appname=None): appname = (appname or os.path.split(os.getcwd())[(-1)]) appfolder = ((applications + '/') + appname) zipfile = os.path.join(appfolder, '_update.zip') if os.path.exists(zipfile): os.unlink(zipfile) local('zip -r _update.zip */*.py */*/*.py views/*.html views/*/*.html static/*') put('_update.zip', '/tmp/_update.zip') try: with cd(appfolder): sudo('unzip -o /tmp/_update.zip') sudo('chown -R www-data:www-data *') sudo(('echo "%s" > DATE_DEPLOYMENT' % now)) finally: sudo('rm /tmp/_update.zip')
[ "def", "deploynobackup", "(", "appname", "=", "None", ")", ":", "appname", "=", "(", "appname", "or", "os", ".", "path", ".", "split", "(", "os", ".", "getcwd", "(", ")", ")", "[", "(", "-", "1", ")", "]", ")", "appfolder", "=", "(", "(", "appl...
fab -h username@host deploy:appname .
train
false
33,967
def ek(function, *args, **kwargs): if (name == 'nt'): result = function(*args, **kwargs) else: result = function(*[(ss(x) if isinstance(x, (str, unicode)) else x) for x in args], **kwargs) if isinstance(result, (list, tuple)): return _fix_list_encoding(result) if isinstance(result, str): return _to_unicode(result) return result
[ "def", "ek", "(", "function", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "name", "==", "'nt'", ")", ":", "result", "=", "function", "(", "*", "args", ",", "**", "kwargs", ")", "else", ":", "result", "=", "function", "(", "*", "...
encoding kludge: call function with arguments and unicode-encode output .
train
false
33,968
def _serialize_agent(controlamp): return str(controlamp.transport.getPeer())
[ "def", "_serialize_agent", "(", "controlamp", ")", ":", "return", "str", "(", "controlamp", ".", "transport", ".", "getPeer", "(", ")", ")" ]
serialize a connected controlamp to the address of its peer .
train
false
33,970
def body_quopri_check(c): return bool(bqre.match(c))
[ "def", "body_quopri_check", "(", "c", ")", ":", "return", "bool", "(", "bqre", ".", "match", "(", "c", ")", ")" ]
return true if the character should be escaped with body quopri .
train
false
33,971
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False): return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject, ochl=True)
[ "def", "parse_yahoo_historical_ochl", "(", "fh", ",", "adjusted", "=", "True", ",", "asobject", "=", "False", ")", ":", "return", "_parse_yahoo_historical", "(", "fh", ",", "adjusted", "=", "adjusted", ",", "asobject", "=", "asobject", ",", "ochl", "=", "Tru...
parse the historical data in file handle fh from yahoo finance .
train
false
33,972
def get_can_use_openstack_swift(): try: if has_module(u'swift.storage', members=[u'SwiftStorage']): return (True, None) else: return (False, _(u'OpenStack Swift depends on django-storage-swift, which is not installed')) except ImproperlyConfigured as e: return (False, (_(u'OpenStack Swift backend failed to load: %s') % e))
[ "def", "get_can_use_openstack_swift", "(", ")", ":", "try", ":", "if", "has_module", "(", "u'swift.storage'", ",", "members", "=", "[", "u'SwiftStorage'", "]", ")", ":", "return", "(", "True", ",", "None", ")", "else", ":", "return", "(", "False", ",", "...
check whether django-storage-swift is installed .
train
false
33,973
def item_entity(): return s3db.supply_item_entity_controller()
[ "def", "item_entity", "(", ")", ":", "return", "s3db", ".", "supply_item_entity_controller", "(", ")" ]
restful crud controller .
train
false
33,974
@login_required def project_subprojects(request, project_slug): project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug) form_kwargs = {'parent': project, 'user': request.user} if (request.method == 'POST'): form = SubprojectForm(request.POST, **form_kwargs) if form.is_valid(): form.save() broadcast(type='app', task=tasks.symlink_subproject, args=[project.pk]) project_dashboard = reverse('projects_subprojects', args=[project.slug]) return HttpResponseRedirect(project_dashboard) else: form = SubprojectForm(**form_kwargs) subprojects = project.subprojects.all() return render_to_response('projects/project_subprojects.html', {'form': form, 'project': project, 'subprojects': subprojects}, context_instance=RequestContext(request))
[ "@", "login_required", "def", "project_subprojects", "(", "request", ",", "project_slug", ")", ":", "project", "=", "get_object_or_404", "(", "Project", ".", "objects", ".", "for_admin_user", "(", "request", ".", "user", ")", ",", "slug", "=", "project_slug", ...
project subprojects view and form view .
train
false
33,976
def get_module_source_file(module_name): module_name.split source_file = __import__(module_name, fromlist=['']).__file__ if source_file.endswith('.pyc'): return source_file[0:(-1)] return source_file
[ "def", "get_module_source_file", "(", "module_name", ")", ":", "module_name", ".", "split", "source_file", "=", "__import__", "(", "module_name", ",", "fromlist", "=", "[", "''", "]", ")", ".", "__file__", "if", "source_file", ".", "endswith", "(", "'.pyc'", ...
URL URL .
train
false
33,977
def upfirdn_naive(x, h, up=1, down=1): h = np.asarray(h) out = np.zeros((len(x) * up), x.dtype) out[::up] = x out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)] return out
[ "def", "upfirdn_naive", "(", "x", ",", "h", ",", "up", "=", "1", ",", "down", "=", "1", ")", ":", "h", "=", "np", ".", "asarray", "(", "h", ")", "out", "=", "np", ".", "zeros", "(", "(", "len", "(", "x", ")", "*", "up", ")", ",", "x", "...
naive upfirdn processing in python note: arg order differs to facilitate apply_along_axis use .
train
false
33,978
def instance_get_all_hung_in_rebooting(context, reboot_window): return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
[ "def", "instance_get_all_hung_in_rebooting", "(", "context", ",", "reboot_window", ")", ":", "return", "IMPL", ".", "instance_get_all_hung_in_rebooting", "(", "context", ",", "reboot_window", ")" ]
get all instances stuck in a rebooting state .
train
false
33,980
def get_results(arg): bookmarks = tower.get_bookmarks(BOOKMARKS_FILE) if (arg == ''): results = sorted(bookmarks, key=(lambda x: x.sort_order)) else: titles = alp.fuzzy_search(arg, bookmarks, key=(lambda x: x.title)) paths = alp.fuzzy_search(arg, bookmarks, key=(lambda x: x.path)) results = set(titles).union(set(paths)) return results
[ "def", "get_results", "(", "arg", ")", ":", "bookmarks", "=", "tower", ".", "get_bookmarks", "(", "BOOKMARKS_FILE", ")", "if", "(", "arg", "==", "''", ")", ":", "results", "=", "sorted", "(", "bookmarks", ",", "key", "=", "(", "lambda", "x", ":", "x"...
returns a list of bookmark objects after a title search based on the given arg .
train
false
33,981
def _click_edit(page_object, button_css, view_css, bounded_selector=(lambda x: x)): page_object.q(css=bounded_selector(button_css)).first.click() EmptyPromise((lambda : page_object.q(css=view_css).present), 'Wait for the Studio editor to be present').fulfill() return page_object
[ "def", "_click_edit", "(", "page_object", ",", "button_css", ",", "view_css", ",", "bounded_selector", "=", "(", "lambda", "x", ":", "x", ")", ")", ":", "page_object", ".", "q", "(", "css", "=", "bounded_selector", "(", "button_css", ")", ")", ".", "firs...
click on the first editing button found and wait for the studio editor to be present .
train
false
33,982
def randomNumbers(b=6): random_number = (int(''.join((random.choice(string.digits) for x in range(b)))) + 10000) if (random_number < 100000): random_number = (random_number + 100000) return random_number
[ "def", "randomNumbers", "(", "b", "=", "6", ")", ":", "random_number", "=", "(", "int", "(", "''", ".", "join", "(", "(", "random", ".", "choice", "(", "string", ".", "digits", ")", "for", "x", "in", "range", "(", "b", ")", ")", ")", ")", "+", ...
returns a random string/key of "b" characters in length .
train
false
33,983
def p_compound_statement_2(t): pass
[ "def", "p_compound_statement_2", "(", "t", ")", ":", "pass" ]
compound_statement : lbrace statement_list rbrace .
train
false
33,984
def ThrottleLayout(bandwidth_limit, http_limit, rps_limit): bulkloader_limits = dict(remote_api_throttle.NO_LIMITS) bulkloader_limits.update({remote_api_throttle.BANDWIDTH_UP: bandwidth_limit, remote_api_throttle.BANDWIDTH_DOWN: bandwidth_limit, remote_api_throttle.REQUESTS: http_limit, remote_api_throttle.HTTPS_BANDWIDTH_UP: bandwidth_limit, remote_api_throttle.HTTPS_BANDWIDTH_DOWN: bandwidth_limit, remote_api_throttle.HTTPS_REQUESTS: http_limit, remote_api_throttle.ENTITIES_FETCHED: rps_limit, remote_api_throttle.ENTITIES_MODIFIED: rps_limit}) return bulkloader_limits
[ "def", "ThrottleLayout", "(", "bandwidth_limit", ",", "http_limit", ",", "rps_limit", ")", ":", "bulkloader_limits", "=", "dict", "(", "remote_api_throttle", ".", "NO_LIMITS", ")", "bulkloader_limits", ".", "update", "(", "{", "remote_api_throttle", ".", "BANDWIDTH_...
return a dictionary indicating the throttle options .
train
false
33,985
def parse_tadm_weights(paramfile): weights = [] for line in paramfile: weights.append(float(line.strip())) return numpy.array(weights, u'd')
[ "def", "parse_tadm_weights", "(", "paramfile", ")", ":", "weights", "=", "[", "]", "for", "line", "in", "paramfile", ":", "weights", ".", "append", "(", "float", "(", "line", ".", "strip", "(", ")", ")", ")", "return", "numpy", ".", "array", "(", "we...
given the stdout output generated by tadm when training a model .
train
false
33,989
def pow_high(p, max_denom=1024): assert (p > 1) p = Fraction((1 / Fraction(p))).limit_denominator(max_denom) if ((1 / p) == int((1 / p))): return (int((1 / p)), (p, (1 - p))) return ((1 / p), (p, (1 - p)))
[ "def", "pow_high", "(", "p", ",", "max_denom", "=", "1024", ")", ":", "assert", "(", "p", ">", "1", ")", "p", "=", "Fraction", "(", "(", "1", "/", "Fraction", "(", "p", ")", ")", ")", ".", "limit_denominator", "(", "max_denom", ")", "if", "(", ...
return power tuple x <= t^ 1^ user wants the epigraph variable t .
train
false
33,990
@require_context @pick_context_manager_writer def ec2_instance_create(context, instance_uuid, id=None): ec2_instance_ref = models.InstanceIdMapping() ec2_instance_ref.update({'uuid': instance_uuid}) if (id is not None): ec2_instance_ref.update({'id': id}) ec2_instance_ref.save(context.session) return ec2_instance_ref
[ "@", "require_context", "@", "pick_context_manager_writer", "def", "ec2_instance_create", "(", "context", ",", "instance_uuid", ",", "id", "=", "None", ")", ":", "ec2_instance_ref", "=", "models", ".", "InstanceIdMapping", "(", ")", "ec2_instance_ref", ".", "update"...
create ec2 compatible instance by provided uuid .
train
false
33,991
@magic_arguments() def magic_foo2(self, args): return parse_argstring(magic_foo2, args)
[ "@", "magic_arguments", "(", ")", "def", "magic_foo2", "(", "self", ",", "args", ")", ":", "return", "parse_argstring", "(", "magic_foo2", ",", "args", ")" ]
a docstring .
train
false
33,992
def test_label_consistency(): label_1 = np.arange(5).reshape(1, (-1)) label_2 = np.array([0, 1]) colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1)] rgb_1 = label2rgb(label_1, colors=colors) rgb_2 = label2rgb(label_2, colors=colors) for label_id in label_2.flat: assert_close(rgb_1[(label_1 == label_id)], rgb_2[(label_2 == label_id)])
[ "def", "test_label_consistency", "(", ")", ":", "label_1", "=", "np", ".", "arange", "(", "5", ")", ".", "reshape", "(", "1", ",", "(", "-", "1", ")", ")", "label_2", "=", "np", ".", "array", "(", "[", "0", ",", "1", "]", ")", "colors", "=", ...
assert that the same labels map to the same colors .
train
false
33,994
def test_parse_ave(): conditions = parse_config(ave_fname) assert_true((len(conditions) == 4))
[ "def", "test_parse_ave", "(", ")", ":", "conditions", "=", "parse_config", "(", "ave_fname", ")", "assert_true", "(", "(", "len", "(", "conditions", ")", "==", "4", ")", ")" ]
test parsing of .
train
false
33,996
def get_git_version(): if DEVELOP: match = '--match=*.*.*build*' else: match = '--match=*.*.*' try: version = subprocess.check_output(('git describe --abbrev=4 --tags'.split() + [match])).strip() except: version = 'unknown' fout = open('RELEASE-VERSION', 'wb') fout.write(version) fout.write('\n') fout.close() fout = open('GIT-COMMIT', 'wb') try: commit = subprocess.check_output('git rev-parse HEAD'.split()).strip() except: commit = 'unknown' fout.write(commit) fout.write('\n') fout.close() return version
[ "def", "get_git_version", "(", ")", ":", "if", "DEVELOP", ":", "match", "=", "'--match=*.*.*build*'", "else", ":", "match", "=", "'--match=*.*.*'", "try", ":", "version", "=", "subprocess", ".", "check_output", "(", "(", "'git describe --abbrev=4 --tags'", ".", ...
get the version from git .
train
false
33,997
@control_command(args=[(u'n', int)], signature=u'[N=1]') def pool_shrink(state, n=1, **kwargs): if state.consumer.controller.autoscaler: state.consumer.controller.autoscaler.force_scale_down(n) else: state.consumer.pool.shrink(n) state.consumer._update_prefetch_count((- n)) return ok(u'pool will shrink')
[ "@", "control_command", "(", "args", "=", "[", "(", "u'n'", ",", "int", ")", "]", ",", "signature", "=", "u'[N=1]'", ")", "def", "pool_shrink", "(", "state", ",", "n", "=", "1", ",", "**", "kwargs", ")", ":", "if", "state", ".", "consumer", ".", ...
shrink pool by n processes/threads .
train
false
33,998
def _get_vispy_font_filename(face, bold, italic): name = (face + '-') name += ('Regular' if ((not bold) and (not italic)) else '') name += ('Bold' if bold else '') name += ('Italic' if italic else '') name += '.ttf' return load_data_file(('fonts/%s' % name))
[ "def", "_get_vispy_font_filename", "(", "face", ",", "bold", ",", "italic", ")", ":", "name", "=", "(", "face", "+", "'-'", ")", "name", "+=", "(", "'Regular'", "if", "(", "(", "not", "bold", ")", "and", "(", "not", "italic", ")", ")", "else", "''"...
fetch a remote vispy font .
train
true
34,000
def unlink_all(rc_object): for kind in ALL_FOUR: os.unlink(getattr(rc_object, kind))
[ "def", "unlink_all", "(", "rc_object", ")", ":", "for", "kind", "in", "ALL_FOUR", ":", "os", ".", "unlink", "(", "getattr", "(", "rc_object", ",", "kind", ")", ")" ]
unlink all four items associated with this renewablecert .
train
false
34,001
def _check_for_cores(): return (len(__salt__['config.option']('solr.cores')) > 0)
[ "def", "_check_for_cores", "(", ")", ":", "return", "(", "len", "(", "__salt__", "[", "'config.option'", "]", "(", "'solr.cores'", ")", ")", ">", "0", ")" ]
private method checks to see if using_cores has been set or not .
train
false
34,002
def _chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])
[ "def", "_chunks", "(", "l", ",", "n", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", ":", "(", "yield", "l", "[", "i", ":", "(", "i", "+", "n", ")", "]", ")" ]
yield successive chunks from list  l with a minimum size  n .
train
true
34,003
def test_no_translation(): old_lang = translation.get_language() try: translation.activate('pt-br') with no_translation(): assert (translation.get_language().lower() == settings.LANGUAGE_CODE.lower()) assert (translation.get_language() == 'pt-br') with no_translation('es'): assert (translation.get_language() == 'es') assert (translation.get_language() == 'pt-br') finally: translation.activate(old_lang)
[ "def", "test_no_translation", "(", ")", ":", "old_lang", "=", "translation", ".", "get_language", "(", ")", "try", ":", "translation", ".", "activate", "(", "'pt-br'", ")", "with", "no_translation", "(", ")", ":", "assert", "(", "translation", ".", "get_lang...
no_translation provides a context where only the default language is active .
train
false
34,005
def feed(request, url, feed_dict=None): from django.contrib.syndication.feeds import Feed as LegacyFeed import warnings warnings.warn('The syndication feed() view is deprecated. Please use the new class based view API.', category=DeprecationWarning) if (not feed_dict): raise Http404('No feeds are registered.') try: (slug, param) = url.split('/', 1) except ValueError: (slug, param) = (url, '') try: f = feed_dict[slug] except KeyError: raise Http404(("Slug %r isn't registered." % slug)) if (not issubclass(f, LegacyFeed)): instance = f() instance.feed_url = (getattr(f, 'feed_url', None) or request.path) instance.title_template = (f.title_template or ('feeds/%s_title.html' % slug)) instance.description_template = (f.description_template or ('feeds/%s_description.html' % slug)) return instance(request) try: feedgen = f(slug, request).get_feed(param) except FeedDoesNotExist: raise Http404(('Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not.' % slug)) response = HttpResponse(mimetype=feedgen.mime_type) feedgen.write(response, 'utf-8') return response
[ "def", "feed", "(", "request", ",", "url", ",", "feed_dict", "=", "None", ")", ":", "from", "django", ".", "contrib", ".", "syndication", ".", "feeds", "import", "Feed", "as", "LegacyFeed", "import", "warnings", "warnings", ".", "warn", "(", "'The syndicat...
provided for backwards compatibility .
train
false
34,006
def test_cli_roles_override_decorator_roles(): @roles('r1') def command(): pass eq_effective_roles(command, ['r2'], cli_roles=['r2'], env={'roledefs': fake_roles})
[ "def", "test_cli_roles_override_decorator_roles", "(", ")", ":", "@", "roles", "(", "'r1'", ")", "def", "command", "(", ")", ":", "pass", "eq_effective_roles", "(", "command", ",", "[", "'r2'", "]", ",", "cli_roles", "=", "[", "'r2'", "]", ",", "env", "=...
if cli roles are provided they replace roles defined in @roles .
train
false
34,007
def _get_locale_dirs(resources, include_core=True): contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib') dirs = [] for contrib_name in os.listdir(contrib_dir): path = os.path.join(contrib_dir, contrib_name, 'locale') if os.path.isdir(path): dirs.append((contrib_name, path)) if (contrib_name in HAVE_JS): dirs.append((('%s-js' % contrib_name), path)) if include_core: dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale'))) if (resources is not None): res_names = [d[0] for d in dirs] dirs = [ld for ld in dirs if (ld[0] in resources)] if (len(resources) > len(dirs)): print ('You have specified some unknown resources. Available resource names are: %s' % (', '.join(res_names),)) exit(1) return dirs
[ "def", "_get_locale_dirs", "(", "resources", ",", "include_core", "=", "True", ")", ":", "contrib_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'django'", ",", "'contrib'", ")", "dirs", "=", "[", "]", "for", "co...
return a tuple for all locale directories .
train
false
34,008
def assert_request_user_has_resource_db_permission(request, resource_db, permission_type): has_permission = request_user_has_resource_db_permission(request=request, resource_db=resource_db, permission_type=permission_type) if (not has_permission): user_db = get_user_db_from_request(request=request) raise ResourceAccessDeniedError(user_db=user_db, resource_db=resource_db, permission_type=permission_type)
[ "def", "assert_request_user_has_resource_db_permission", "(", "request", ",", "resource_db", ",", "permission_type", ")", ":", "has_permission", "=", "request_user_has_resource_db_permission", "(", "request", "=", "request", ",", "resource_db", "=", "resource_db", ",", "p...
check that currently logged-in user has specified permission on the provied resource .
train
false
34,010
def test_export(): tpot_obj = TPOTClassifier() try: tpot_obj.export('test_export.py') assert False except ValueError: pass
[ "def", "test_export", "(", ")", ":", "tpot_obj", "=", "TPOTClassifier", "(", ")", "try", ":", "tpot_obj", ".", "export", "(", "'test_export.py'", ")", "assert", "False", "except", "ValueError", ":", "pass" ]
assert that tpots export function throws a valueerror when no optimized pipeline exists .
train
false
34,011
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): if (len(new_directory_list) > 0): working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0)) directory_args['path'] = working_dir changed = module.set_fs_attributes_if_different(directory_args, changed) changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed) return changed
[ "def", "adjust_recursive_directory_permissions", "(", "pre_existing_dir", ",", "new_directory_list", ",", "module", ",", "directory_args", ",", "changed", ")", ":", "if", "(", "len", "(", "new_directory_list", ")", ">", "0", ")", ":", "working_dir", "=", "os", "...
walk the new directories list and make sure that permissions are as we would expect .
train
false
34,012
def souptest_fragment(fragment): souptest_doctype = getattr(souptest_fragment, 'souptest_doctype', None) if (souptest_doctype is None): with open(ENTITY_DTD_PATH, 'r') as ent_file: souptest_doctype = (SOUPTEST_DOCTYPE_FMT % ent_file.read()) souptest_fragment.souptest_doctype = souptest_doctype if ('<![CDATA' in fragment): raise SoupUnexpectedCDataSectionError(fragment) documentized_fragment = ('%s<div>%s</div>' % (souptest_doctype, fragment)) s = StringIO(documentized_fragment) try: parser = lxml.etree.XMLParser() for node in lxml.etree.parse(s, parser).iter(): souptest_sniff_node(node) except lxml.etree.XMLSyntaxError: (type_, value, trace) = sys.exc_info() if value.msg.startswith('xmlParseCharRef: invalid xmlChar '): raise SoupUnsupportedEntityError, (value,), trace undef_ent = re.match(UNDEFINED_ENTITY_RE, value.msg) if undef_ent: raise SoupUnsupportedEntityError, (value, undef_ent.group(1)), trace raise SoupSyntaxError, (value,), trace
[ "def", "souptest_fragment", "(", "fragment", ")", ":", "souptest_doctype", "=", "getattr", "(", "souptest_fragment", ",", "'souptest_doctype'", ",", "None", ")", "if", "(", "souptest_doctype", "is", "None", ")", ":", "with", "open", "(", "ENTITY_DTD_PATH", ",", ...
check if an html fragment is sane and safe to embed .
train
false
34,013
def fileinfo(path): cmd = ('mfsfileinfo ' + path) ret = {} chunknum = '' out = __salt__['cmd.run_all'](cmd, python_shell=False) output = out['stdout'].splitlines() for line in output: if (not line): continue if ('/' in line): comps = line.split('/') chunknum = comps[0].strip().split(':') meta = comps[1].strip().split(' ') chunk = chunknum[0].replace('chunk ', '') loc = chunknum[1].strip() id_ = meta[0].replace('(id:', '') ver = meta[1].replace(')', '').replace('ver:', '') ret[chunknum[0]] = {'chunk': chunk, 'loc': loc, 'id': id_, 'ver': ver} if ('copy' in line): copyinfo = line.strip().split(':') ret[chunknum[0]][copyinfo[0]] = {'copy': copyinfo[0].replace('copy ', ''), 'ip': copyinfo[1].strip(), 'port': copyinfo[2]} return ret
[ "def", "fileinfo", "(", "path", ")", ":", "cmd", "=", "(", "'mfsfileinfo '", "+", "path", ")", "ret", "=", "{", "}", "chunknum", "=", "''", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "output...
return information on a file located on the moose cli example: .
train
true
34,014
@instrumented_task(name='sentry.tasks.store.save_event', queue='events.save_event') def save_event(cache_key=None, data=None, start_time=None, **kwargs): from sentry.event_manager import EventManager if cache_key: data = default_cache.get(cache_key) if (data is None): metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'}) return project = data.pop('project') Raven.tags_context({'project': project}) try: manager = EventManager(data) manager.save(project) finally: if cache_key: default_cache.delete(cache_key) if start_time: metrics.timing('events.time-to-process', (time() - start_time), instance=data['platform'])
[ "@", "instrumented_task", "(", "name", "=", "'sentry.tasks.store.save_event'", ",", "queue", "=", "'events.save_event'", ")", "def", "save_event", "(", "cache_key", "=", "None", ",", "data", "=", "None", ",", "start_time", "=", "None", ",", "**", "kwargs", ")"...
saves an event to the database .
train
false
34,015
def apply_android_specific_fixes(font): font_data.delete_from_cmap(font, [8419, 8593, 8595]) for table in ['LTSH', 'hdmx', 'VDMX', 'gasp']: if (table in font): del font[table] glyph_set = font.getGlyphSet() ellipsis = glyph_set['ellipsis']._glyph for component in ellipsis.components: component.flags &= (~ (1 << 2))
[ "def", "apply_android_specific_fixes", "(", "font", ")", ":", "font_data", ".", "delete_from_cmap", "(", "font", ",", "[", "8419", ",", "8593", ",", "8595", "]", ")", "for", "table", "in", "[", "'LTSH'", ",", "'hdmx'", ",", "'VDMX'", ",", "'gasp'", "]", ...
apply fixes needed for android .
train
false
34,020
def set_selinux_context(path, user=None, role=None, type=None, range=None): if (not any((user, role, type, range))): return False cmd = ['chcon'] if user: cmd.extend(['-u', user]) if role: cmd.extend(['-r', role]) if type: cmd.extend(['-t', type]) if range: cmd.extend(['-l', range]) cmd.append(path) ret = (not __salt__['cmd.retcode'](cmd, python_shell=False)) if ret: return get_selinux_context(path) else: return ret
[ "def", "set_selinux_context", "(", "path", ",", "user", "=", "None", ",", "role", "=", "None", ",", "type", "=", "None", ",", "range", "=", "None", ")", ":", "if", "(", "not", "any", "(", "(", "user", ",", "role", ",", "type", ",", "range", ")", ...
set a specific selinux label on a given path cli example: .
train
false
34,021
def test_font_list(): f = list_fonts() assert_true((len(f) > 0)) for font in _vispy_fonts: assert_in(font, f)
[ "def", "test_font_list", "(", ")", ":", "f", "=", "list_fonts", "(", ")", "assert_true", "(", "(", "len", "(", "f", ")", ">", "0", ")", ")", "for", "font", "in", "_vispy_fonts", ":", "assert_in", "(", "font", ",", "f", ")" ]
test font listing .
train
false
34,024
def mapk(actual, predicted, k=10): return np.mean([apk(a, p, k) for (a, p) in zip(actual, predicted)])
[ "def", "mapk", "(", "actual", ",", "predicted", ",", "k", "=", "10", ")", ":", "return", "np", ".", "mean", "(", "[", "apk", "(", "a", ",", "p", ",", "k", ")", "for", "(", "a", ",", "p", ")", "in", "zip", "(", "actual", ",", "predicted", ")...
computes the mean average precision at k .
train
true
34,025
def get_vmdk_attach_config_spec(client_factory, disk_type=constants.DEFAULT_DISK_TYPE, file_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None, disk_io_limits=None): config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_config_spec = [] virtual_device_config_spec = _create_virtual_disk_spec(client_factory, controller_key, disk_type, file_path, disk_size, linked_clone, unit_number, device_name, disk_io_limits) device_config_spec.append(virtual_device_config_spec) config_spec.deviceChange = device_config_spec return config_spec
[ "def", "get_vmdk_attach_config_spec", "(", "client_factory", ",", "disk_type", "=", "constants", ".", "DEFAULT_DISK_TYPE", ",", "file_path", "=", "None", ",", "disk_size", "=", "None", ",", "linked_clone", "=", "False", ",", "controller_key", "=", "None", ",", "...
builds the vmdk attach config spec .
train
false
34,027
def checkT1s(T1_files, cw256=False): import sys import nibabel as nib from nipype.utils.filemanip import filename_to_list T1_files = filename_to_list(T1_files) if (len(T1_files) == 0): print "ERROR: No T1's Given" sys.exit((-1)) shape = nib.load(T1_files[0]).shape for t1 in T1_files[1:]: if (nib.load(t1).shape != shape): print 'ERROR: T1s not the same size. Cannot process {0} and {1} together'.format(T1_files[0], t1) sys.exit((-1)) origvol_names = ['{0:03d}.mgz'.format((i + 1)) for i in range(len(T1_files))] if ((not cw256) and any(((dim > 256) for dim in shape))): print 'Setting MRI Convert to crop images to 256 FOV' cw256 = True resample_type = ('cubic' if (len(T1_files) > 1) else 'interpolate') return (T1_files, cw256, resample_type, origvol_names)
[ "def", "checkT1s", "(", "T1_files", ",", "cw256", "=", "False", ")", ":", "import", "sys", "import", "nibabel", "as", "nib", "from", "nipype", ".", "utils", ".", "filemanip", "import", "filename_to_list", "T1_files", "=", "filename_to_list", "(", "T1_files", ...
verifying size of inputs and setting workflow parameters .
train
false
34,028
def _override_setuptools(req): if (req.project_name == 'setuptools'): if (not len(req.specs)): return True for (comparator, version) in req.specs: if (comparator in ['==', '>=', '>']): if ('0.7' in version): return False return True return False
[ "def", "_override_setuptools", "(", "req", ")", ":", "if", "(", "req", ".", "project_name", "==", "'setuptools'", ")", ":", "if", "(", "not", "len", "(", "req", ".", "specs", ")", ")", ":", "return", "True", "for", "(", "comparator", ",", "version", ...
return true when distribute wants to override a setuptools dependency .
train
true
34,030
def write_sequence(name, seq, qual, seq_fh, qual_fh): if (len(seq) == 0): return if (qual_fh is None): seq_fh.write(format_as_fastq(name, seq, qual)) else: (seqstring, qualstring) = format_as_fasta(name, seq, qual) seq_fh.write(seqstring) qual_fh.write(qualstring) return
[ "def", "write_sequence", "(", "name", ",", "seq", ",", "qual", ",", "seq_fh", ",", "qual_fh", ")", ":", "if", "(", "len", "(", "seq", ")", "==", "0", ")", ":", "return", "if", "(", "qual_fh", "is", "None", ")", ":", "seq_fh", ".", "write", "(", ...
write sequence and quality fasta and fasta qual filehandles if sequence length is 0 .
train
false
34,031
def compareAreaAscending(loopArea, otherLoopArea): if (loopArea.area < otherLoopArea.area): return (-1) return int((loopArea.area > otherLoopArea.area))
[ "def", "compareAreaAscending", "(", "loopArea", ",", "otherLoopArea", ")", ":", "if", "(", "loopArea", ".", "area", "<", "otherLoopArea", ".", "area", ")", ":", "return", "(", "-", "1", ")", "return", "int", "(", "(", "loopArea", ".", "area", ">", "oth...
get comparison in order to sort loop areas in ascending order of area .
train
false
34,032
def _get_default_entrance_exam_minimum_pct(): entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT) if entrance_exam_minimum_score_pct.is_integer(): entrance_exam_minimum_score_pct = (entrance_exam_minimum_score_pct / 100) return entrance_exam_minimum_score_pct
[ "def", "_get_default_entrance_exam_minimum_pct", "(", ")", ":", "entrance_exam_minimum_score_pct", "=", "float", "(", "settings", ".", "ENTRANCE_EXAM_MIN_SCORE_PCT", ")", "if", "entrance_exam_minimum_score_pct", ".", "is_integer", "(", ")", ":", "entrance_exam_minimum_score_p...
helper method to return the default value from configuration converts integer values to decimals .
train
false
34,033
def rm_stored_dir(dir_path, storage=default_storage): empty_dirs = [] for (root, dirs, files) in walk_storage(dir_path): for fn in files: storage.delete(('%s/%s' % (root, fn))) empty_dirs.insert(0, root) empty_dirs.append(dir_path) for dn in empty_dirs: storage.delete(dn)
[ "def", "rm_stored_dir", "(", "dir_path", ",", "storage", "=", "default_storage", ")", ":", "empty_dirs", "=", "[", "]", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "walk_storage", "(", "dir_path", ")", ":", "for", "fn", "in", "files", ":"...
removes a stored directory and all files stored beneath that path .
train
false
34,034
@register.tag def showifcached(parser, token): tokens = token.contents.split() if (len(tokens) != 2): raise TemplateSyntaxError((u"'%r' tag requires 1 argument." % tokens[0])) return ShowIfCachedNode(tokens[1])
[ "@", "register", ".", "tag", "def", "showifcached", "(", "parser", ",", "token", ")", ":", "tokens", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "(", "len", "(", "tokens", ")", "!=", "2", ")", ":", "raise", "TemplateSyntaxError", "("...
show content if it exists in the cache .
train
false
34,036
def _on_error(function, path, excinfo): try: if os.access(path, os.W_OK): raise OSError() os.chmod(path, stat.S_IWUSR) function(path) except OSError: try: if ((not os.path.isdir(path)) and (path[(-20):] != '.package-control-old')): os.rename(path, (path + '.package-control-old')) except OSError: pass
[ "def", "_on_error", "(", "function", ",", "path", ",", "excinfo", ")", ":", "try", ":", "if", "os", ".", "access", "(", "path", ",", "os", ".", "W_OK", ")", ":", "raise", "OSError", "(", ")", "os", ".", "chmod", "(", "path", ",", "stat", ".", "...
error handler for shutil .
train
false
34,037
def mapping_keys(s): mapping_keys_re = re.compile('\\%\\([^\\)]*\\)\\w') return sorted(mapping_keys_re.findall(s))
[ "def", "mapping_keys", "(", "s", ")", ":", "mapping_keys_re", "=", "re", ".", "compile", "(", "'\\\\%\\\\([^\\\\)]*\\\\)\\\\w'", ")", "return", "sorted", "(", "mapping_keys_re", ".", "findall", "(", "s", ")", ")" ]
return a sorted list of the mapping keys in the string s .
train
false
34,038
def format_exception_cut_frames(x=1): (typ, val, tb) = sys.exc_info() res = [] tbres = traceback.format_tb(tb) res += tbres[(- x):] res += traceback.format_exception_only(typ, val) return ''.join(res)
[ "def", "format_exception_cut_frames", "(", "x", "=", "1", ")", ":", "(", "typ", ",", "val", ",", "tb", ")", "=", "sys", ".", "exc_info", "(", ")", "res", "=", "[", "]", "tbres", "=", "traceback", ".", "format_tb", "(", "tb", ")", "res", "+=", "tb...
format an exception with traceback .
train
false