id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
27,714 | @testing.requires_testing_data
def test_crop():
raw = concatenate_raws([read_raw_fif(f) for f in [fif_fname, fif_fname]])
split_size = 10.0
sfreq = raw.info['sfreq']
nsamp = ((raw.last_samp - raw.first_samp) + 1)
tmins = np.r_[(1.0, np.round(np.arange(0.0, (nsamp - 1), (split_size * sfreq))))]
tmins = np.sort(tmins)
tmaxs = np.concatenate(((tmins[1:] - 1), [(nsamp - 1)]))
tmaxs /= sfreq
tmins /= sfreq
raws = ([None] * len(tmins))
for (ri, (tmin, tmax)) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0.0, (nsamp - 1), (split_size * sfreq)))
tmaxs = np.concatenate(((tmins[1:] - 1), [(nsamp - 1)]))
tmaxs /= sfreq
tmins /= sfreq
raws = ([None] * len(tmins))
for (ri, (tmin, tmax)) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=(tmin + 2))
assert_equal(raw1[:][0].shape, (1, 2001))
| [
"@",
"testing",
".",
"requires_testing_data",
"def",
"test_crop",
"(",
")",
":",
"raw",
"=",
"concatenate_raws",
"(",
"[",
"read_raw_fif",
"(",
"f",
")",
"for",
"f",
"in",
"[",
"fif_fname",
",",
"fif_fname",
"]",
"]",
")",
"split_size",
"=",
"10.0",
"sfr... | test tfr cropping . | train | false |
27,715 | def validate_groupby_func(name, args, kwargs, allowed=None):
if (allowed is None):
allowed = []
kwargs = (set(kwargs) - set(allowed))
if ((len(args) + len(kwargs)) > 0):
raise UnsupportedFunctionCall('numpy operations are not valid with groupby. Use .groupby(...).{func}() instead'.format(func=name))
| [
"def",
"validate_groupby_func",
"(",
"name",
",",
"args",
",",
"kwargs",
",",
"allowed",
"=",
"None",
")",
":",
"if",
"(",
"allowed",
"is",
"None",
")",
":",
"allowed",
"=",
"[",
"]",
"kwargs",
"=",
"(",
"set",
"(",
"kwargs",
")",
"-",
"set",
"(",
... | args and kwargs should be empty . | train | true |
27,716 | def extract_operands(source):
n = abs(source)
i = int(n)
if isinstance(n, float):
if (i == n):
n = i
else:
n = decimal.Decimal(str(n))
if isinstance(n, decimal.Decimal):
dec_tuple = n.as_tuple()
exp = dec_tuple.exponent
fraction_digits = (dec_tuple.digits[exp:] if (exp < 0) else ())
trailing = ''.join((str(d) for d in fraction_digits))
no_trailing = trailing.rstrip('0')
v = len(trailing)
w = len(no_trailing)
f = int((trailing or 0))
t = int((no_trailing or 0))
else:
v = w = f = t = 0
return (n, i, v, w, f, t)
| [
"def",
"extract_operands",
"(",
"source",
")",
":",
"n",
"=",
"abs",
"(",
"source",
")",
"i",
"=",
"int",
"(",
"n",
")",
"if",
"isinstance",
"(",
"n",
",",
"float",
")",
":",
"if",
"(",
"i",
"==",
"n",
")",
":",
"n",
"=",
"i",
"else",
":",
... | extract operands from a decimal . | train | false |
27,717 | def _os_info():
lines = []
releaseinfo = None
if (sys.platform == 'linux'):
osver = ''
releaseinfo = _release_info()
elif (sys.platform == 'win32'):
osver = ', '.join(platform.win32_ver())
elif (sys.platform == 'darwin'):
(release, versioninfo, machine) = platform.mac_ver()
if all(((not e) for e in versioninfo)):
versioninfo = ''
else:
versioninfo = '.'.join(versioninfo)
osver = ', '.join([e for e in [release, versioninfo, machine] if e])
else:
osver = '?'
lines.append('OS Version: {}'.format(osver))
if (releaseinfo is not None):
for (fn, data) in releaseinfo:
lines += ['', '--- {} ---'.format(fn), data]
return lines
| [
"def",
"_os_info",
"(",
")",
":",
"lines",
"=",
"[",
"]",
"releaseinfo",
"=",
"None",
"if",
"(",
"sys",
".",
"platform",
"==",
"'linux'",
")",
":",
"osver",
"=",
"''",
"releaseinfo",
"=",
"_release_info",
"(",
")",
"elif",
"(",
"sys",
".",
"platform"... | get operating system info . | train | false |
27,718 | def unregister_admin_widget(widget_cls):
widget_id = widget_cls.widget_id
try:
primary_widgets.remove(widget_cls)
except ValueError:
try:
secondary_widgets.remove(widget_cls)
except ValueError:
logging.error(u'Failed to unregister unknown administration widget "%s".', widget_id)
raise KeyError((u'"%s" is not a registered administration widget' % widget_id))
| [
"def",
"unregister_admin_widget",
"(",
"widget_cls",
")",
":",
"widget_id",
"=",
"widget_cls",
".",
"widget_id",
"try",
":",
"primary_widgets",
".",
"remove",
"(",
"widget_cls",
")",
"except",
"ValueError",
":",
"try",
":",
"secondary_widgets",
".",
"remove",
"(... | unregister a previously registered administration widget . | train | false |
27,719 | def add_custom_doctypes(data, doctype_info):
add_section(data, _(u'Documents'), u'fa fa-star', [d for d in doctype_info if (d.custom and (d.document_type in (u'Document', u'Transaction')))])
add_section(data, _(u'Setup'), u'fa fa-cog', [d for d in doctype_info if (d.custom and (d.document_type in (u'Setup', u'Master', u'')))])
| [
"def",
"add_custom_doctypes",
"(",
"data",
",",
"doctype_info",
")",
":",
"add_section",
"(",
"data",
",",
"_",
"(",
"u'Documents'",
")",
",",
"u'fa fa-star'",
",",
"[",
"d",
"for",
"d",
"in",
"doctype_info",
"if",
"(",
"d",
".",
"custom",
"and",
"(",
... | adds custom doctypes to modules setup via config/desktop . | train | false |
27,720 | def resolve_ref(uri):
parsed = urlparse(uri)
if (parsed.path in schema_paths):
schema = schema_paths[parsed.path]
if callable(schema):
return schema(**dict(parse_qsl(parsed.query)))
return schema
raise jsonschema.RefResolutionError((u'%s could not be resolved' % uri))
| [
"def",
"resolve_ref",
"(",
"uri",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"uri",
")",
"if",
"(",
"parsed",
".",
"path",
"in",
"schema_paths",
")",
":",
"schema",
"=",
"schema_paths",
"[",
"parsed",
".",
"path",
"]",
"if",
"callable",
"(",
"schema",
... | finds and returns a schema pointed to by uri that has been registered in the register_schema function . | train | false |
27,722 | def GetChild(node, tag):
for child in node.getchildren():
if (GetTag(child) == tag):
return child
| [
"def",
"GetChild",
"(",
"node",
",",
"tag",
")",
":",
"for",
"child",
"in",
"node",
".",
"getchildren",
"(",
")",
":",
"if",
"(",
"GetTag",
"(",
"child",
")",
"==",
"tag",
")",
":",
"return",
"child"
] | returns first child of node with tag . | train | false |
27,723 | def MergeStandardOptions(options, params):
pass
| [
"def",
"MergeStandardOptions",
"(",
"options",
",",
"params",
")",
":",
"pass"
] | take an options object generated by the command line and merge the values into the iisparameters object . | train | false |
27,724 | def _slashappend_or_add_error(p, caller):
try:
st = os.lstat(p)
except OSError as e:
add_error(('%s: %s' % (caller, e)))
return None
else:
if stat.S_ISDIR(st.st_mode):
return slashappend(p)
return p
| [
"def",
"_slashappend_or_add_error",
"(",
"p",
",",
"caller",
")",
":",
"try",
":",
"st",
"=",
"os",
".",
"lstat",
"(",
"p",
")",
"except",
"OSError",
"as",
"e",
":",
"add_error",
"(",
"(",
"'%s: %s'",
"%",
"(",
"caller",
",",
"e",
")",
")",
")",
... | return p . | train | false |
27,725 | def migrate_guid_log(log):
for key in ['project', 'node']:
if (key in log.params):
value = (log.params[key] or '')
record = models.Node.load(value.lower())
if (record is not None):
log.params[key] = record._primary_key
if ('contributor' in log.params):
if isinstance(log.params['contributor'], basestring):
record = models.User.load(log.params['contributor'].lower())
if record:
log.params['contributor'] = record._primary_key
if ('contributors' in log.params):
for (idx, uid) in enumerate(log.params['contributors']):
if isinstance(uid, basestring):
record = models.User.load(uid.lower())
if record:
log.params['contributors'][idx] = record._primary_key
data = log.to_storage()
if data['user']:
record = models.User.load(data['user'].lower())
if record:
log.user = record
log.save()
| [
"def",
"migrate_guid_log",
"(",
"log",
")",
":",
"for",
"key",
"in",
"[",
"'project'",
",",
"'node'",
"]",
":",
"if",
"(",
"key",
"in",
"log",
".",
"params",
")",
":",
"value",
"=",
"(",
"log",
".",
"params",
"[",
"key",
"]",
"or",
"''",
")",
"... | migrate non-reference fields containing primary keys on logs . | train | false |
27,726 | def recursive_rm(*patterns):
for (root, subdirs, subfiles) in os.walk('.'):
root = os.path.normpath(root)
if root.startswith('.git/'):
continue
for file in subfiles:
for pattern in patterns:
if fnmatch.fnmatch(file, pattern):
safe_remove(os.path.join(root, file))
for dir in subdirs:
for pattern in patterns:
if fnmatch.fnmatch(dir, pattern):
safe_rmtree(os.path.join(root, dir))
| [
"def",
"recursive_rm",
"(",
"*",
"patterns",
")",
":",
"for",
"(",
"root",
",",
"subdirs",
",",
"subfiles",
")",
"in",
"os",
".",
"walk",
"(",
"'.'",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"root",
")",
"if",
"root",
".",
... | recursively remove a file or matching a list of patterns . | train | false |
27,728 | def is_exp(var):
neg = False
neg_info = is_neg(var)
if (neg_info is not None):
neg = True
var = neg_info
if (var.owner and (var.owner.op == tensor.exp)):
return (neg, var.owner.inputs[0])
| [
"def",
"is_exp",
"(",
"var",
")",
":",
"neg",
"=",
"False",
"neg_info",
"=",
"is_neg",
"(",
"var",
")",
"if",
"(",
"neg_info",
"is",
"not",
"None",
")",
":",
"neg",
"=",
"True",
"var",
"=",
"neg_info",
"if",
"(",
"var",
".",
"owner",
"and",
"(",
... | match a variable with either of the exp(x) or -exp(x) patterns . | train | false |
27,729 | @task
@use_master
@set_modified_on
def resize_promo_imgs(src, dst, sizes, **kw):
log.info(('[1@None] Resizing promo imgs: %s' % dst))
try:
for s in sizes:
size_dst = ('%s-%s.png' % (dst, s))
resize_image(src, size_dst, (s, 0), remove_src=False)
pngcrush_image.delay(size_dst, **kw)
with private_storage.open(src) as fd:
promo_img_hash = _hash_file(fd)
private_storage.delete(src)
log.info(('Promo img hash resizing completed for: %s' % dst))
return {'promo_img_hash': promo_img_hash}
except Exception as e:
log.error(('Error resizing promo img hash: %s; %s' % (e, dst)))
| [
"@",
"task",
"@",
"use_master",
"@",
"set_modified_on",
"def",
"resize_promo_imgs",
"(",
"src",
",",
"dst",
",",
"sizes",
",",
"**",
"kw",
")",
":",
"log",
".",
"info",
"(",
"(",
"'[1@None] Resizing promo imgs: %s'",
"%",
"dst",
")",
")",
"try",
":",
"fo... | resizes webapp/website promo imgs . | train | false |
27,730 | def joyent_node_state(id_):
states = {'running': 0, 'stopped': 2, 'stopping': 2, 'provisioning': 3, 'deleted': 2, 'unknown': 4}
if (id_ not in states):
id_ = 'unknown'
return node_state(states[id_])
| [
"def",
"joyent_node_state",
"(",
"id_",
")",
":",
"states",
"=",
"{",
"'running'",
":",
"0",
",",
"'stopped'",
":",
"2",
",",
"'stopping'",
":",
"2",
",",
"'provisioning'",
":",
"3",
",",
"'deleted'",
":",
"2",
",",
"'unknown'",
":",
"4",
"}",
"if",
... | convert joyent returned state to state common to other data center return values for consistency . | train | false |
27,731 | def testmakeelement():
testelement = makeelement('testname', attributes={'testattribute': 'testvalue'}, tagtext='testtagtext')
assert (testelement.tag == '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}testname')
assert (testelement.attrib == {'{http://schemas.openxmlformats.org/wordprocessingml/2006/main}testattribute': 'testvalue'})
assert (testelement.text == 'testtagtext')
| [
"def",
"testmakeelement",
"(",
")",
":",
"testelement",
"=",
"makeelement",
"(",
"'testname'",
",",
"attributes",
"=",
"{",
"'testattribute'",
":",
"'testvalue'",
"}",
",",
"tagtext",
"=",
"'testtagtext'",
")",
"assert",
"(",
"testelement",
".",
"tag",
"==",
... | ensure custom elements get created . | train | false |
27,733 | def delete_option_group(name, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if (not conn):
return {'deleted': bool(conn)}
res = conn.delete_option_group(OptionGroupName=name)
if (not res):
return {'deleted': bool(res), 'message': 'Failed to delete RDS option group {0}.'.format(name)}
return {'deleted': bool(res), 'message': 'Deleted RDS option group {0}.'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
| [
"def",
"delete_option_group",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"... | delete an rds option group . | train | false |
27,734 | @command('(rm|add)\\s(?:\\*|all)')
def add_rm_all(action):
if (action == 'rm'):
g.model.songs.clear()
msg = ((c.b + 'Cleared all songs') + c.w)
g.content = content.generate_songlist_display(zeromsg=msg)
elif (action == 'add'):
size = len(g.model)
songlist_rm_add('add', ('-' + str(size)))
| [
"@",
"command",
"(",
"'(rm|add)\\\\s(?:\\\\*|all)'",
")",
"def",
"add_rm_all",
"(",
"action",
")",
":",
"if",
"(",
"action",
"==",
"'rm'",
")",
":",
"g",
".",
"model",
".",
"songs",
".",
"clear",
"(",
")",
"msg",
"=",
"(",
"(",
"c",
".",
"b",
"+",
... | add all displayed songs to current playlist . | train | false |
27,735 | def next_week_day(base_date, weekday):
day_of_week = base_date.weekday()
end_of_this_week = (base_date + timedelta(days=(6 - day_of_week)))
day = (end_of_this_week + timedelta(days=1))
while (day.weekday() != weekday):
day = (day + timedelta(days=1))
return day
| [
"def",
"next_week_day",
"(",
"base_date",
",",
"weekday",
")",
":",
"day_of_week",
"=",
"base_date",
".",
"weekday",
"(",
")",
"end_of_this_week",
"=",
"(",
"base_date",
"+",
"timedelta",
"(",
"days",
"=",
"(",
"6",
"-",
"day_of_week",
")",
")",
")",
"da... | finds next weekday . | train | true |
27,737 | def _get_cron_date_time(**kwargs):
range_max = {'minute': list(list(range(60))), 'hour': list(list(range(24))), 'month': list(list(range(1, 13))), 'dayweek': list(list(range(7)))}
ret = {}
for param in ('minute', 'hour', 'month', 'dayweek'):
value = str(kwargs.get(param, '1')).lower()
if (value == 'random'):
ret[param] = str(random.sample(range_max[param], 1)[0])
else:
ret[param] = value
if (ret['month'] in '1 3 5 7 8 10 12'.split()):
daymonth_max = 31
elif (ret['month'] in '4 6 9 11'.split()):
daymonth_max = 30
else:
daymonth_max = 28
daymonth = str(kwargs.get('daymonth', '1')).lower()
if (daymonth == 'random'):
ret['daymonth'] = str(random.sample(list(list(range(1, (daymonth_max + 1)))), 1)[0])
else:
ret['daymonth'] = daymonth
return ret
| [
"def",
"_get_cron_date_time",
"(",
"**",
"kwargs",
")",
":",
"range_max",
"=",
"{",
"'minute'",
":",
"list",
"(",
"list",
"(",
"range",
"(",
"60",
")",
")",
")",
",",
"'hour'",
":",
"list",
"(",
"list",
"(",
"range",
"(",
"24",
")",
")",
")",
","... | returns a dict of date/time values to be used in a cron entry . | train | false |
27,738 | def rate_limit(resp):
if ('X-RateLimit-Remaining' not in resp.headers):
log.info('No rate limit detected. Hum along...')
return
remain = int(resp.headers['X-RateLimit-Remaining'])
limit = int(resp.headers['X-RateLimit-Limit'])
log.info('Rate limiter: %s allowed out of %d', remain, limit)
if (remain > 1):
return
reset = int(resp.headers['X-RateLimit-Reset'])
ts = datetime.fromtimestamp(reset)
delay = (ts - datetime.now()).total_seconds()
log.info('Hit rate limit. Have to wait for %d seconds', delay)
if (delay < 0):
delay = 2
time.sleep(delay)
| [
"def",
"rate_limit",
"(",
"resp",
")",
":",
"if",
"(",
"'X-RateLimit-Remaining'",
"not",
"in",
"resp",
".",
"headers",
")",
":",
"log",
".",
"info",
"(",
"'No rate limit detected. Hum along...'",
")",
"return",
"remain",
"=",
"int",
"(",
"resp",
".",
"header... | rate limiting decorator . | train | false |
27,739 | def _SkipGroup(buffer, pos, end):
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if (new_pos == (-1)):
return pos
pos = new_pos
| [
"def",
"_SkipGroup",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
":",
"while",
"1",
":",
"(",
"tag_bytes",
",",
"pos",
")",
"=",
"ReadTag",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"SkipField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"... | skip sub-group . | train | true |
27,742 | def run_threads(collection, target):
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(30)
assert (not t.isAlive())
| [
"def",
"run_threads",
"(",
"collection",
",",
"target",
")",
":",
"threads",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"NTHREADS",
")",
":",
"bound_target",
"=",
"partial",
"(",
"target",
",",
"collection",
",",
"i",
")",
"threads",
".",
"append",... | run a target function in many threads . | train | false |
27,743 | def removeElementFromListTable(element, key, listTable):
if (key not in listTable):
return
elementList = listTable[key]
if (len(elementList) < 2):
del listTable[key]
return
if (element in elementList):
elementList.remove(element)
| [
"def",
"removeElementFromListTable",
"(",
"element",
",",
"key",
",",
"listTable",
")",
":",
"if",
"(",
"key",
"not",
"in",
"listTable",
")",
":",
"return",
"elementList",
"=",
"listTable",
"[",
"key",
"]",
"if",
"(",
"len",
"(",
"elementList",
")",
"<",... | remove an element from the list table . | train | false |
27,745 | def n_nonzero_columns(X):
return len(np.unique(X.nonzero()[1]))
| [
"def",
"n_nonzero_columns",
"(",
"X",
")",
":",
"return",
"len",
"(",
"np",
".",
"unique",
"(",
"X",
".",
"nonzero",
"(",
")",
"[",
"1",
"]",
")",
")"
] | returns the number of non-zero columns in a csr matrix x . | train | false |
27,746 | def run_docstring_examples(f, globs, verbose=False, name='NoName', compileflags=None, optionflags=0):
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
| [
"def",
"run_docstring_examples",
"(",
"f",
",",
"globs",
",",
"verbose",
"=",
"False",
",",
"name",
"=",
"'NoName'",
",",
"compileflags",
"=",
"None",
",",
"optionflags",
"=",
"0",
")",
":",
"finder",
"=",
"DocTestFinder",
"(",
"verbose",
"=",
"verbose",
... | test examples in the given objects docstring (f) . | train | true |
27,748 | def geoserver_urls(request):
defaults = dict(GEOSERVER_BASE_URL=ogc_server_settings.public_url, UPLOADER_URL=(reverse('data_upload') if (getattr(settings, 'UPLOADER', dict()).get('BACKEND', 'geonode.rest') == 'geonode.importer') else reverse('layer_upload')), MAPFISH_PRINT_ENABLED=getattr(ogc_server_settings, 'MAPFISH_PRINT_ENABLED', False), PRINT_NG_ENABLED=getattr(ogc_server_settings, 'PRINT_NG_ENABLED', False), GEONODE_SECURITY_ENABLED=getattr(ogc_server_settings, 'GEONODE_SECURITY_ENABLED', False), GEOGIG_ENABLED=getattr(ogc_server_settings, 'GEOGIG_ENABLED', False), TIME_ENABLED=getattr(settings, 'UPLOADER', dict()).get('OPTIONS', dict()).get('TIME_ENABLED', False), MOSAIC_ENABLED=getattr(settings, 'UPLOADER', dict()).get('OPTIONS', dict()).get('MOSAIC_ENABLED', False))
return defaults
| [
"def",
"geoserver_urls",
"(",
"request",
")",
":",
"defaults",
"=",
"dict",
"(",
"GEOSERVER_BASE_URL",
"=",
"ogc_server_settings",
".",
"public_url",
",",
"UPLOADER_URL",
"=",
"(",
"reverse",
"(",
"'data_upload'",
")",
"if",
"(",
"getattr",
"(",
"settings",
",... | global values to pass to templates . | train | false |
27,749 | def make_scalar():
raise NotImplementedError('TODO: implement this function.')
| [
"def",
"make_scalar",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"'TODO: implement this function.'",
")"
] | returns a new theano scalar . | train | false |
27,750 | def manual_seed(seed):
return default_generator.manual_seed(seed)
| [
"def",
"manual_seed",
"(",
"seed",
")",
":",
"return",
"default_generator",
".",
"manual_seed",
"(",
"seed",
")"
] | sets the seed for generating random numbers . | train | false |
27,751 | def verifyConstructorArgument(testCase, cls, argName, defaultVal, altVal, attrName=None):
if (attrName is None):
attrName = argName
actual = {}
expected = {'defaultVal': defaultVal, 'altVal': altVal}
o = cls()
actual['defaultVal'] = getattr(o, attrName)
o = cls(**{argName: altVal})
actual['altVal'] = getattr(o, attrName)
testCase.assertEqual(expected, actual)
| [
"def",
"verifyConstructorArgument",
"(",
"testCase",
",",
"cls",
",",
"argName",
",",
"defaultVal",
",",
"altVal",
",",
"attrName",
"=",
"None",
")",
":",
"if",
"(",
"attrName",
"is",
"None",
")",
":",
"attrName",
"=",
"argName",
"actual",
"=",
"{",
"}",... | verify that an attribute has the expected default value and that a corresponding argument passed to a constructor is assigned to that attribute . | train | false |
27,752 | def InterpolatePath(path, client, users=None, path_args=None, depth=0):
sys_formatters = {'systemroot': 'c:\\Windows'}
if path_args:
sys_formatters.update(path_args)
if users:
results = []
for user in users:
user = GetUserInfo(client, user)
if user:
formatters = dict(((x.name, y) for (x, y) in user.ListSetFields()))
formatters.update(sys_formatters)
try:
results.append(path.format(**formatters))
except KeyError:
pass
return results
else:
try:
path = path.format(**sys_formatters)
except KeyError:
logging.warn('Failed path interpolation on %s', path)
return ''
if (('{' in path) and (depth < 10)):
path = InterpolatePath(path, client=client, users=users, path_args=path_args, depth=(depth + 1))
return path
| [
"def",
"InterpolatePath",
"(",
"path",
",",
"client",
",",
"users",
"=",
"None",
",",
"path_args",
"=",
"None",
",",
"depth",
"=",
"0",
")",
":",
"sys_formatters",
"=",
"{",
"'systemroot'",
":",
"'c:\\\\Windows'",
"}",
"if",
"path_args",
":",
"sys_formatte... | take a string as a path on a client and interpolate with client data . | train | true |
27,753 | def id_to_path(pk):
pk = unicode(pk)
path = [pk[(-1)]]
if (len(pk) >= 2):
path.append(pk[(-2):])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
| [
"def",
"id_to_path",
"(",
"pk",
")",
":",
"pk",
"=",
"unicode",
"(",
"pk",
")",
"path",
"=",
"[",
"pk",
"[",
"(",
"-",
"1",
")",
"]",
"]",
"if",
"(",
"len",
"(",
"pk",
")",
">=",
"2",
")",
":",
"path",
".",
"append",
"(",
"pk",
"[",
"(",
... | generate a path from an id . | train | false |
27,756 | def _build_poll_environ(token):
return _build_environ('/_ah/channel/dev', {'command': 'poll', 'channel': token})
| [
"def",
"_build_poll_environ",
"(",
"token",
")",
":",
"return",
"_build_environ",
"(",
"'/_ah/channel/dev'",
",",
"{",
"'command'",
":",
"'poll'",
",",
"'channel'",
":",
"token",
"}",
")"
] | build an environ for a wsgi request that performs a channel poll . | train | false |
27,758 | def _get_single_trace_by_component(**component_filter):
traces = Trace.query(**component_filter)
if (len(traces) == 0):
return None
elif (len(traces) > 1):
raise UniqueTraceNotFoundException(('More than 1 trace matching %s found.' % component_filter))
return traces[0]
| [
"def",
"_get_single_trace_by_component",
"(",
"**",
"component_filter",
")",
":",
"traces",
"=",
"Trace",
".",
"query",
"(",
"**",
"component_filter",
")",
"if",
"(",
"len",
"(",
"traces",
")",
"==",
"0",
")",
":",
"return",
"None",
"elif",
"(",
"len",
"... | tries to return a single trace mathing component_filter . | train | false |
27,760 | @pytest.fixture()
def execute_task(manager):
def execute(task_name, abort=False, options=None):
u'\n Use to execute one test task from config.\n\n :param abort: If `True` expect (and require) this task to abort.\n '
log.info((u'********** Running task: %s ********** ' % task_name))
config = manager.config[u'tasks'][task_name]
task = Task(manager, task_name, config=config, options=options)
try:
if abort:
with pytest.raises(TaskAbort):
task.execute()
else:
task.execute()
finally:
try:
task.session.close()
except Exception:
pass
return task
return execute
| [
"@",
"pytest",
".",
"fixture",
"(",
")",
"def",
"execute_task",
"(",
"manager",
")",
":",
"def",
"execute",
"(",
"task_name",
",",
"abort",
"=",
"False",
",",
"options",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"(",
"u'********** Running task: %s ... | a function that can be used to execute and return a named task in config argument . | train | false |
27,761 | def found(url):
return redirect(url, '302 Found')
| [
"def",
"found",
"(",
"url",
")",
":",
"return",
"redirect",
"(",
"url",
",",
"'302 Found'",
")"
] | redirects to the specified location using http 302 status code . | train | false |
27,762 | def gf_degree(f):
return (len(f) - 1)
| [
"def",
"gf_degree",
"(",
"f",
")",
":",
"return",
"(",
"len",
"(",
"f",
")",
"-",
"1",
")"
] | return the leading degree of f . | train | false |
27,764 | def line(name, content=None, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True, create=False, user=None, group=None, file_mode=None):
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''}
if (not name):
return _error(ret, 'Must provide name to file.line')
if (create and (not os.path.isfile(name))):
managed(name, create=create, user=user, group=group, mode=file_mode)
(check_res, check_msg) = _check_file(name)
if (not check_res):
return _error(ret, check_msg)
mode = ((mode and mode.lower()) or mode)
if (mode is None):
return _error(ret, 'Mode was not defined. How to process the file?')
modeswithemptycontent = ['delete']
if ((mode not in modeswithemptycontent) and (content is None)):
return _error(ret, 'Content can only be empty if mode is {0}'.format(modeswithemptycontent))
del modeswithemptycontent
changes = __salt__['file.line'](name, content, match=match, mode=mode, location=location, before=before, after=after, show_changes=show_changes, backup=backup, quiet=quiet, indent=indent)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
| [
"def",
"line",
"(",
"name",
",",
"content",
"=",
"None",
",",
"match",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"location",
"=",
"None",
",",
"before",
"=",
"None",
",",
"after",
"=",
"None",
",",
"show_changes",
"=",
"True",
",",
"backup",
"=",... | draws a line from to using the given stroke color and stroke width . | train | true |
27,767 | def test_optional_args():
print 'TODO'
| [
"def",
"test_optional_args",
"(",
")",
":",
"print",
"'TODO'"
] | URL existing use of optional args tests look woefully insufficient . | train | false |
27,768 | def create_layer(number):
file_list = glob.glob(('%s*.shp' % shp_path))
random_index = randint(0, (len(file_list) - 1))
file_uri = file_list[random_index]
layer = file_upload(file_uri)
assign_keywords(layer)
resource = layer.get_self_resource()
set_resource(resource)
| [
"def",
"create_layer",
"(",
"number",
")",
":",
"file_list",
"=",
"glob",
".",
"glob",
"(",
"(",
"'%s*.shp'",
"%",
"shp_path",
")",
")",
"random_index",
"=",
"randint",
"(",
"0",
",",
"(",
"len",
"(",
"file_list",
")",
"-",
"1",
")",
")",
"file_uri",... | creates a new layer . | train | false |
27,770 | def php_str_noquotes(data):
encoded = ''
for char in data:
encoded += 'chr({0}).'.format(ord(char))
return encoded[:(-1)]
| [
"def",
"php_str_noquotes",
"(",
"data",
")",
":",
"encoded",
"=",
"''",
"for",
"char",
"in",
"data",
":",
"encoded",
"+=",
"'chr({0}).'",
".",
"format",
"(",
"ord",
"(",
"char",
")",
")",
"return",
"encoded",
"[",
":",
"(",
"-",
"1",
")",
"]"
] | convert string to chr . | train | false |
27,773 | def validate_deprecated(validator, message, instance, schema):
log.warning(message)
| [
"def",
"validate_deprecated",
"(",
"validator",
",",
"message",
",",
"instance",
",",
"schema",
")",
":",
"log",
".",
"warning",
"(",
"message",
")"
] | not really a validator . | train | false |
27,774 | def get_zones(permanent=True):
cmd = '--get-zones'
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split()
| [
"def",
"get_zones",
"(",
"permanent",
"=",
"True",
")",
":",
"cmd",
"=",
"'--get-zones'",
"if",
"permanent",
":",
"cmd",
"+=",
"' --permanent'",
"return",
"__firewall_cmd",
"(",
"cmd",
")",
".",
"split",
"(",
")"
] | get a list of azs for the configured region . | train | false |
27,775 | def delete_bucket(bucket_name):
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.delete()
print 'Bucket {} deleted'.format(bucket.name)
| [
"def",
"delete_bucket",
"(",
"bucket_name",
")",
":",
"storage_client",
"=",
"storage",
".",
"Client",
"(",
")",
"bucket",
"=",
"storage_client",
".",
"get_bucket",
"(",
"bucket_name",
")",
"bucket",
".",
"delete",
"(",
")",
"print",
"'Bucket {} deleted'",
"."... | deletes a bucket . | train | false |
27,776 | def matrix_rep(op, basis):
a = zeros(len(basis))
for i in range(len(basis)):
for j in range(len(basis)):
a[(i, j)] = apply_operators(((Dagger(basis[i]) * op) * basis[j]))
return a
| [
"def",
"matrix_rep",
"(",
"op",
",",
"basis",
")",
":",
"a",
"=",
"zeros",
"(",
"len",
"(",
"basis",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"basis",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"basis",
")",
")",
... | find the representation of an operator in a basis . | train | false |
27,777 | def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
feed_item_service = client.GetService('FeedItemService')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [CreateFeedItemAddOperation('Mars', '$1234.56', mars_date.strftime(time_format), adgroup_ids[0], ad_customizer_feed), CreateFeedItemAddOperation('Venus', '$1450.00', venus_date.strftime(time_format), adgroup_ids[1], ad_customizer_feed)]
response = feed_item_service.mutate(feed_item_operations)
if ('value' in response):
for feed_item in response['value']:
print ('Added FeedItem with ID %d.' % feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No FeedItems were added.')
| [
"def",
"CreateCustomizerFeedItems",
"(",
"client",
",",
"adgroup_ids",
",",
"ad_customizer_feed",
")",
":",
"feed_item_service",
"=",
"client",
".",
"GetService",
"(",
"'FeedItemService'",
")",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"mars_date",
"=",
"dat... | creates feeditems for the specified adgroups . | train | true |
27,779 | def idz_frm(n, w, x):
return _id.idz_frm(n, w, x)
| [
"def",
"idz_frm",
"(",
"n",
",",
"w",
",",
"x",
")",
":",
"return",
"_id",
".",
"idz_frm",
"(",
"n",
",",
"w",
",",
"x",
")"
] | transform complex vector via a composition of rokhlins random transform . | train | false |
27,780 | def hpfilter(X, lamb=1600):
_pandas_wrapper = _maybe_get_pandas_wrapper(X)
X = np.asarray(X, float)
if (X.ndim > 1):
X = X.squeeze()
nobs = len(X)
I = sparse.eye(nobs, nobs)
offsets = np.array([0, 1, 2])
data = np.repeat([[1.0], [(-2.0)], [1.0]], nobs, axis=1)
K = sparse.dia_matrix((data, offsets), shape=((nobs - 2), nobs))
use_umfpack = True
trend = spsolve((I + (lamb * K.T.dot(K))), X, use_umfpack=use_umfpack)
cycle = (X - trend)
if (_pandas_wrapper is not None):
return (_pandas_wrapper(cycle), _pandas_wrapper(trend))
return (cycle, trend)
| [
"def",
"hpfilter",
"(",
"X",
",",
"lamb",
"=",
"1600",
")",
":",
"_pandas_wrapper",
"=",
"_maybe_get_pandas_wrapper",
"(",
"X",
")",
"X",
"=",
"np",
".",
"asarray",
"(",
"X",
",",
"float",
")",
"if",
"(",
"X",
".",
"ndim",
">",
"1",
")",
":",
"X"... | hodrick-prescott filter parameters x : array-like the 1d ndarray timeseries to filter of length or lamb : float the hodrick-prescott smoothing parameter . | train | false |
27,781 | @frappe.whitelist()
def clear_error_logs():
frappe.only_for(u'System Manager')
frappe.db.sql(u'delete from `tabError Log`')
| [
"@",
"frappe",
".",
"whitelist",
"(",
")",
"def",
"clear_error_logs",
"(",
")",
":",
"frappe",
".",
"only_for",
"(",
"u'System Manager'",
")",
"frappe",
".",
"db",
".",
"sql",
"(",
"u'delete from `tabError Log`'",
")"
] | flush all error logs . | train | false |
27,783 | def _pow_int(inter, power):
power = int(power)
if (power & 1):
return interval((inter.start ** power), (inter.end ** power))
elif ((inter.start < 0) and (inter.end > 0)):
start = 0
end = max((inter.start ** power), (inter.end ** power))
return interval(start, end)
else:
return interval((inter.start ** power), (inter.end ** power))
| [
"def",
"_pow_int",
"(",
"inter",
",",
"power",
")",
":",
"power",
"=",
"int",
"(",
"power",
")",
"if",
"(",
"power",
"&",
"1",
")",
":",
"return",
"interval",
"(",
"(",
"inter",
".",
"start",
"**",
"power",
")",
",",
"(",
"inter",
".",
"end",
"... | evaluates an interval raised to an integer power . | train | false |
27,784 | @treeio_login_required
@handle_response_format
@_process_mass_form
def charts_index(request, response_format='html'):
charts = Object.filter_by_request(request, Chart.objects)
context = _get_default_context(request)
context.update({'charts': charts})
return render_to_response('reports/chart_index', context, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"treeio_login_required",
"@",
"handle_response_format",
"@",
"_process_mass_form",
"def",
"charts_index",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"charts",
"=",
"Object",
".",
"filter_by_request",
"(",
"request",
",",
"Chart",
".",
"o... | charts index page . | train | false |
27,786 | def sshclient_from_instance(instance, ssh_key_file, host_key_file='~/.ssh/known_hosts', user_name='root', ssh_pwd=None):
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd)
| [
"def",
"sshclient_from_instance",
"(",
"instance",
",",
"ssh_key_file",
",",
"host_key_file",
"=",
"'~/.ssh/known_hosts'",
",",
"user_name",
"=",
"'root'",
",",
"ssh_pwd",
"=",
"None",
")",
":",
"s",
"=",
"FakeServer",
"(",
"instance",
",",
"ssh_key_file",
")",
... | create and return an sshclient object given an instance object . | train | true |
27,788 | def CalculateBlankLines(tree):
blank_line_calculator = _BlankLineCalculator()
blank_line_calculator.Visit(tree)
| [
"def",
"CalculateBlankLines",
"(",
"tree",
")",
":",
"blank_line_calculator",
"=",
"_BlankLineCalculator",
"(",
")",
"blank_line_calculator",
".",
"Visit",
"(",
"tree",
")"
] | run the blank line calculator visitor over the tree . | train | false |
27,789 | @contextfunction
def services_service_list(context, services, skip_group=False):
request = context['request']
response_format = 'html'
if ('response_format' in context):
response_format = context['response_format']
return Markup(render_to_string('services/tags/service_list', {'services': services, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
| [
"@",
"contextfunction",
"def",
"services_service_list",
"(",
"context",
",",
"services",
",",
"skip_group",
"=",
"False",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"response_format",
"=",
"'html'",
"if",
"(",
"'response_format'",
"in",
"context... | print a list of services . | train | false |
27,790 | def _handleBulletWidth(bulletText, style, maxWidths):
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
bulletWidth = 0
for f in bulletText:
bulletWidth = (bulletWidth + stringWidth(f.text, f.fontName, f.fontSize))
bulletRight = ((style.bulletIndent + bulletWidth) + (0.6 * style.bulletFontSize))
indent = (style.leftIndent + style.firstLineIndent)
if (bulletRight > indent):
maxWidths[0] -= (bulletRight - indent)
| [
"def",
"_handleBulletWidth",
"(",
"bulletText",
",",
"style",
",",
"maxWidths",
")",
":",
"if",
"bulletText",
":",
"if",
"isinstance",
"(",
"bulletText",
",",
"basestring",
")",
":",
"bulletWidth",
"=",
"stringWidth",
"(",
"bulletText",
",",
"style",
".",
"b... | work out bullet width and adjust maxwidths[0] if neccessary . | train | true |
27,791 | def cython(pyx_files, working_path=''):
if ((len(sys.argv) >= 2) and (sys.argv[1] == 'clean')):
return
try:
from Cython import __version__
if (LooseVersion(__version__) < '0.23'):
raise RuntimeError('Cython >= 0.23 needed to build scikit-image')
from Cython.Build import cythonize
except ImportError:
print ('Cython not found; falling back to pre-built %s' % ' '.join([f.replace('.pyx.in', 'c').replace('.pyx', '.c') for f in pyx_files]))
else:
for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
if (not _changed(pyxfile)):
continue
if pyxfile.endswith('.pyx.in'):
process_tempita_pyx(pyxfile)
pyxfile = pyxfile.replace('.pyx.in', '.pyx')
cythonize(pyxfile)
| [
"def",
"cython",
"(",
"pyx_files",
",",
"working_path",
"=",
"''",
")",
":",
"if",
"(",
"(",
"len",
"(",
"sys",
".",
"argv",
")",
">=",
"2",
")",
"and",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
"==",
"'clean'",
")",
")",
":",
"return",
"try",
... | use cython to convert the given files to c . | train | false |
27,792 | def _cmp_by_med(path1, path2):
def get_path_med(path):
med = path.get_pattr(BGP_ATTR_TYPE_MULTI_EXIT_DISC)
if (not med):
return 0
return med.value
med1 = get_path_med(path1)
med2 = get_path_med(path2)
if (med1 == med2):
return None
elif (med1 < med2):
return path1
return path2
| [
"def",
"_cmp_by_med",
"(",
"path1",
",",
"path2",
")",
":",
"def",
"get_path_med",
"(",
"path",
")",
":",
"med",
"=",
"path",
".",
"get_pattr",
"(",
"BGP_ATTR_TYPE_MULTI_EXIT_DISC",
")",
"if",
"(",
"not",
"med",
")",
":",
"return",
"0",
"return",
"med",
... | select the path based with lowest med value . | train | true |
27,794 | @cli.command('blur')
@click.option('-r', '--radius', default=2, show_default=True, help='The blur radius.')
@processor
def blur_cmd(images, radius):
blur = ImageFilter.GaussianBlur(radius)
for image in images:
click.echo(('Blurring "%s" by %dpx' % (image.filename, radius)))
(yield copy_filename(image.filter(blur), image))
| [
"@",
"cli",
".",
"command",
"(",
"'blur'",
")",
"@",
"click",
".",
"option",
"(",
"'-r'",
",",
"'--radius'",
",",
"default",
"=",
"2",
",",
"show_default",
"=",
"True",
",",
"help",
"=",
"'The blur radius.'",
")",
"@",
"processor",
"def",
"blur_cmd",
"... | applies gaussian blur . | train | false |
27,795 | def unconstrain_stationary_multivariate(constrained, error_variance):
from scipy import linalg
use_list = (type(constrained) == list)
if (not use_list):
(k_endog, order) = constrained.shape
order //= k_endog
constrained = [constrained[:k_endog, (i * k_endog):((i + 1) * k_endog)] for i in range(order)]
else:
order = len(constrained)
k_endog = constrained[0].shape[0]
partial_autocorrelations = _compute_multivariate_pacf_from_coefficients(constrained, error_variance, order, k_endog)
unconstrained = _unconstrain_sv_less_than_one(partial_autocorrelations, order, k_endog)
if (not use_list):
unconstrained = np.concatenate(unconstrained, axis=1)
return (unconstrained, error_variance)
| [
"def",
"unconstrain_stationary_multivariate",
"(",
"constrained",
",",
"error_variance",
")",
":",
"from",
"scipy",
"import",
"linalg",
"use_list",
"=",
"(",
"type",
"(",
"constrained",
")",
"==",
"list",
")",
"if",
"(",
"not",
"use_list",
")",
":",
"(",
"k_... | transform constrained parameters used in likelihood evaluation to unconstrained parameters used by the optimizer parameters constrained : array or list constrained parameters of . | train | false |
27,797 | def get_watcher_dirs(theme_dirs=None, themes=None):
dirs = []
dirs.extend(COMMON_LOOKUP_PATHS)
if (theme_dirs and themes):
themes = get_theme_paths(themes=themes, theme_dirs=theme_dirs)
for theme in themes:
for _dir in (get_sass_directories('lms', theme) + get_sass_directories('cms', theme)):
dirs.append(_dir['sass_source_dir'])
dirs.extend(_dir['lookup_paths'])
for _dir in ((get_sass_directories('lms') + get_sass_directories('cms')) + get_common_sass_directories()):
dirs.append(_dir['sass_source_dir'])
dirs.extend(_dir['lookup_paths'])
dirs = list(set(dirs))
return dirs
| [
"def",
"get_watcher_dirs",
"(",
"theme_dirs",
"=",
"None",
",",
"themes",
"=",
"None",
")",
":",
"dirs",
"=",
"[",
"]",
"dirs",
".",
"extend",
"(",
"COMMON_LOOKUP_PATHS",
")",
"if",
"(",
"theme_dirs",
"and",
"themes",
")",
":",
"themes",
"=",
"get_theme_... | return sass directories that need to be added to sass watcher . | train | false |
27,798 | @cronjobs.register
def purge_tweets():
pin_this_thread()
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
if (not locale):
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug(('Truncating tweet list: Removing tweets older than %s, for [%s].' % (oldest.created, locale)))
Tweet.objects.filter(locale=locale, created__lte=oldest.created).delete()
| [
"@",
"cronjobs",
".",
"register",
"def",
"purge_tweets",
"(",
")",
":",
"pin_this_thread",
"(",
")",
"for",
"locale",
"in",
"settings",
".",
"SUMO_LANGUAGES",
":",
"locale",
"=",
"settings",
".",
"LOCALES",
"[",
"locale",
"]",
".",
"iso639_1",
"if",
"(",
... | periodically purge old tweets for each locale . | train | false |
27,799 | def getProfileDescription(profile):
try:
if (not isinstance(profile, ImageCmsProfile)):
profile = ImageCmsProfile(profile)
return (profile.profile.product_description + '\n')
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
| [
"def",
"getProfileDescription",
"(",
"profile",
")",
":",
"try",
":",
"if",
"(",
"not",
"isinstance",
"(",
"profile",
",",
"ImageCmsProfile",
")",
")",
":",
"profile",
"=",
"ImageCmsProfile",
"(",
"profile",
")",
"return",
"(",
"profile",
".",
"profile",
"... | gets the description for the given profile . | train | false |
27,800 | def remove_qos(tenant_id, qos_id):
session = db.get_session()
try:
qos = session.query(l2network_models.QoS).filter_by(tenant_id=tenant_id).filter_by(qos_id=qos_id).one()
session.delete(qos)
session.flush()
return qos
except exc.NoResultFound:
pass
| [
"def",
"remove_qos",
"(",
"tenant_id",
",",
"qos_id",
")",
":",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"try",
":",
"qos",
"=",
"session",
".",
"query",
"(",
"l2network_models",
".",
"QoS",
")",
".",
"filter_by",
"(",
"tenant_id",
"=",
"ten... | removes a qos to tenant association . | train | false |
27,801 | def mock_object(**params):
return type('Mock', (), params)()
| [
"def",
"mock_object",
"(",
"**",
"params",
")",
":",
"return",
"type",
"(",
"'Mock'",
",",
"(",
")",
",",
"params",
")",
"(",
")"
] | creates an object using params to set attributes . | train | false |
27,804 | def floor(x):
return Floor()(x)
| [
"def",
"floor",
"(",
"x",
")",
":",
"return",
"Floor",
"(",
")",
"(",
"x",
")"
] | apply the floor function to each element of the matrix mat . | train | false |
27,805 | def find_function_info(func, spec, args):
module = getattr(func, '__module__', None)
name = getattr(func, '__name__', None)
self = getattr(func, '__self__', None)
cname = None
if self:
cname = self.__name__
elif (len(spec.args) and (spec.args[0] == 'self')):
cname = args[0].__class__.__name__
elif (len(spec.args) and (spec.args[0] == 'cls')):
cname = args[0].__name__
if name:
qname = []
if (module and (module != '__main__')):
qname.append(module)
qname.append('.')
if cname:
qname.append(cname)
qname.append('.')
qname.append(name)
name = ''.join(qname)
return (name, None)
| [
"def",
"find_function_info",
"(",
"func",
",",
"spec",
",",
"args",
")",
":",
"module",
"=",
"getattr",
"(",
"func",
",",
"'__module__'",
",",
"None",
")",
"name",
"=",
"getattr",
"(",
"func",
",",
"'__name__'",
",",
"None",
")",
"self",
"=",
"getattr"... | return function meta-data in a tuple . | train | false |
27,806 | def get_param_value(params, key):
try:
return params[key]
except KeyError:
LOG.error(_LE('Request does not contain %s parameter!'), key)
raise exception.HeatMissingParameterError(key)
| [
"def",
"get_param_value",
"(",
"params",
",",
"key",
")",
":",
"try",
":",
"return",
"params",
"[",
"key",
"]",
"except",
"KeyError",
":",
"LOG",
".",
"error",
"(",
"_LE",
"(",
"'Request does not contain %s parameter!'",
")",
",",
"key",
")",
"raise",
"exc... | looks up an expected parameter in a parsed params dict . | train | false |
27,808 | def create_cookie(name, value, **kwargs):
result = dict(version=0, name=name, value=value, port=None, domain='', path='/', secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
badargs = (set(kwargs) - set(result))
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError((err % list(badargs)))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
| [
"def",
"create_cookie",
"(",
"name",
",",
"value",
",",
"**",
"kwargs",
")",
":",
"result",
"=",
"dict",
"(",
"version",
"=",
"0",
",",
"name",
"=",
"name",
",",
"value",
"=",
"value",
",",
"port",
"=",
"None",
",",
"domain",
"=",
"''",
",",
"pat... | make a cookie from underspecified parameters . | train | true |
27,809 | def convert_path(pathname):
if (os.sep == '/'):
return pathname
if (not pathname):
return pathname
if (pathname[0] == '/'):
raise ValueError(("path '%s' cannot be absolute" % pathname))
if (pathname[(-1)] == '/'):
raise ValueError(("path '%s' cannot end with '/'" % pathname))
paths = pathname.split('/')
while ('.' in paths):
paths.remove('.')
if (not paths):
return os.curdir
return os.path.join(*paths)
| [
"def",
"convert_path",
"(",
"pathname",
")",
":",
"if",
"(",
"os",
".",
"sep",
"==",
"'/'",
")",
":",
"return",
"pathname",
"if",
"(",
"not",
"pathname",
")",
":",
"return",
"pathname",
"if",
"(",
"pathname",
"[",
"0",
"]",
"==",
"'/'",
")",
":",
... | return pathname as a name that will work on the native filesystem . | train | true |
27,812 | def get_default_fscluster_config():
return conf.HDFS_CLUSTERS[FS_DEFAULT_NAME]
| [
"def",
"get_default_fscluster_config",
"(",
")",
":",
"return",
"conf",
".",
"HDFS_CLUSTERS",
"[",
"FS_DEFAULT_NAME",
"]"
] | get the default fs config . | train | false |
27,814 | def expln(x):
def f(val):
if (val < 0):
return exp(val)
else:
return (log((val + 1.0)) + 1)
try:
result = array(list(map(f, x)))
except TypeError:
result = array(f(x))
return result
| [
"def",
"expln",
"(",
"x",
")",
":",
"def",
"f",
"(",
"val",
")",
":",
"if",
"(",
"val",
"<",
"0",
")",
":",
"return",
"exp",
"(",
"val",
")",
"else",
":",
"return",
"(",
"log",
"(",
"(",
"val",
"+",
"1.0",
")",
")",
"+",
"1",
")",
"try",
... | this continuous function ensures that the values of the array are always positive . | train | false |
27,815 | def ExtractToken(url, scopes_included_in_next=True):
parsed = urlparse.urlparse(url)
token = gdata.auth.AuthSubTokenFromUrl(parsed[4])
scopes = ''
if scopes_included_in_next:
for pair in parsed[4].split('&'):
if pair.startswith(('%s=' % SCOPE_URL_PARAM_NAME)):
scopes = urllib.unquote_plus(pair.split('=')[1])
return (token, scopes.split(' '))
| [
"def",
"ExtractToken",
"(",
"url",
",",
"scopes_included_in_next",
"=",
"True",
")",
":",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"token",
"=",
"gdata",
".",
"auth",
".",
"AuthSubTokenFromUrl",
"(",
"parsed",
"[",
"4",
"]",
")",
"sco... | gets the authsub token from the current pages url . | train | false |
27,817 | def squared_exponential(theta, d):
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if (d.ndim > 1):
n_features = d.shape[1]
else:
n_features = 1
if (theta.size == 1):
return np.exp(((- theta[0]) * np.sum((d ** 2), axis=1)))
elif (theta.size != n_features):
raise ValueError(('Length of theta must be 1 or %s' % n_features))
else:
return np.exp((- np.sum((theta.reshape(1, n_features) * (d ** 2)), axis=1)))
| [
"def",
"squared_exponential",
"(",
"theta",
",",
"d",
")",
":",
"theta",
"=",
"np",
".",
"asarray",
"(",
"theta",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"d",
"=",
"np",
".",
"asarray",
"(",
"d",
",",
"dtype",
"=",
"np",
".",
"float64",
")"... | squared exponential correlation model . | train | false |
27,818 | @allow_jsonp
def register_public_key_server_auto(request):
public_key = urllib.unquote(request.GET.get('device_key', ''))
if RegisteredDevicePublicKey.objects.filter(public_key=public_key):
return HttpResponseForbidden('Device is already registered.')
zone = Zone(name=('Zone for public key %s' % public_key[:50]))
zone.save()
RegisteredDevicePublicKey(zone=zone, public_key=public_key).save()
return JsonResponse({})
| [
"@",
"allow_jsonp",
"def",
"register_public_key_server_auto",
"(",
"request",
")",
":",
"public_key",
"=",
"urllib",
".",
"unquote",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'device_key'",
",",
"''",
")",
")",
"if",
"RegisteredDevicePublicKey",
".",
"obje... | this function allows an anonymous client to request a device key to be associated with a new zone . | train | false |
27,820 | def cellname(rowx, colx):
return ('%s%d' % (colname(colx), (rowx + 1)))
| [
"def",
"cellname",
"(",
"rowx",
",",
"colx",
")",
":",
"return",
"(",
"'%s%d'",
"%",
"(",
"colname",
"(",
"colx",
")",
",",
"(",
"rowx",
"+",
"1",
")",
")",
")"
] | translate a cell coordinate to a fancy cell name (e . | train | false |
27,821 | @verbose
def fetch_hcp_mmp_parcellation(subjects_dir=None, verbose=None):
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
destination = op.join(subjects_dir, 'fsaverage', 'label')
fnames = [op.join(destination, 'lh.HCPMMP1.annot'), op.join(destination, 'rh.HCPMMP1.annot')]
if all((op.isfile(fname) for fname in fnames)):
return
if ('--accept-hcpmmp-license' in sys.argv):
answer = 'y'
else:
answer = input(('%s\nAgree (y/[n])? ' % _hcp_mmp_license_text))
if (answer.lower() != 'y'):
raise RuntimeError('You must agree to the license to use this dataset')
_fetch_file('https://ndownloader.figshare.com/files/5528816', fnames[0], hash_='46a102b59b2fb1bb4bd62d51bf02e975')
_fetch_file('https://ndownloader.figshare.com/files/5528819', fnames[1], hash_='75e96b331940227bbcb07c1c791c2463')
| [
"@",
"verbose",
"def",
"fetch_hcp_mmp_parcellation",
"(",
"subjects_dir",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"subjects_dir",
"=",
"get_subjects_dir",
"(",
"subjects_dir",
",",
"raise_error",
"=",
"True",
")",
"destination",
"=",
"op",
".",
"jo... | fetch the hcp-mmp parcellation . | train | false |
27,823 | def in6_or(a1, a2):
return _in6_bitops(a1, a2, 0)
| [
"def",
"in6_or",
"(",
"a1",
",",
"a2",
")",
":",
"return",
"_in6_bitops",
"(",
"a1",
",",
"a2",
",",
"0",
")"
] | provides a bit to bit or of provided addresses . | train | false |
27,824 | def requirejs_xmodule(request):
return render_to_response('xmodule.js', {'urls': get_xmodule_urls()}, content_type='text/javascript')
| [
"def",
"requirejs_xmodule",
"(",
"request",
")",
":",
"return",
"render_to_response",
"(",
"'xmodule.js'",
",",
"{",
"'urls'",
":",
"get_xmodule_urls",
"(",
")",
"}",
",",
"content_type",
"=",
"'text/javascript'",
")"
] | view function that returns a requirejs-wrapped javascript file that loads all the xmodule urls; meant to be loaded via requirejs . | train | false |
27,825 | def search_autocomplete(request):
if ('term' in request.GET):
term = request.GET['term']
else:
raise Http404
queryset = Project.objects.public(request.user).filter(name__icontains=term)[:20]
ret_list = []
for project in queryset:
ret_list.append({'label': project.name, 'value': project.slug})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
| [
"def",
"search_autocomplete",
"(",
"request",
")",
":",
"if",
"(",
"'term'",
"in",
"request",
".",
"GET",
")",
":",
"term",
"=",
"request",
".",
"GET",
"[",
"'term'",
"]",
"else",
":",
"raise",
"Http404",
"queryset",
"=",
"Project",
".",
"objects",
"."... | return a json list of project names . | train | false |
27,827 | def append_makeopts(value):
return append_var('MAKEOPTS', value)
| [
"def",
"append_makeopts",
"(",
"value",
")",
":",
"return",
"append_var",
"(",
"'MAKEOPTS'",
",",
"value",
")"
] | add to or create a new makeopts in the make . | train | false |
27,828 | def rankedFitness(R):
res = zeros_like(R)
l = list(zip(R, list(range(len(R)))))
l.sort()
for (i, (_, j)) in enumerate(l):
res[j] = i
return res
| [
"def",
"rankedFitness",
"(",
"R",
")",
":",
"res",
"=",
"zeros_like",
"(",
"R",
")",
"l",
"=",
"list",
"(",
"zip",
"(",
"R",
",",
"list",
"(",
"range",
"(",
"len",
"(",
"R",
")",
")",
")",
")",
")",
"l",
".",
"sort",
"(",
")",
"for",
"(",
... | produce a linear ranking of the fitnesses in r . | train | false |
27,830 | def submit_course_survey_report(request, course_key):
task_type = 'course_survey_report'
task_class = course_survey_report_csv
task_input = {}
task_key = ''
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
| [
"def",
"submit_course_survey_report",
"(",
"request",
",",
"course_key",
")",
":",
"task_type",
"=",
"'course_survey_report'",
"task_class",
"=",
"course_survey_report_csv",
"task_input",
"=",
"{",
"}",
"task_key",
"=",
"''",
"return",
"submit_task",
"(",
"request",
... | submits a task to generate a html file containing the executive summary report . | train | false |
27,831 | def ReadVFS(pathspec, offset, length, progress_callback=None):
fd = VFSOpen(pathspec, progress_callback=progress_callback)
fd.Seek(offset)
return fd.Read(length)
| [
"def",
"ReadVFS",
"(",
"pathspec",
",",
"offset",
",",
"length",
",",
"progress_callback",
"=",
"None",
")",
":",
"fd",
"=",
"VFSOpen",
"(",
"pathspec",
",",
"progress_callback",
"=",
"progress_callback",
")",
"fd",
".",
"Seek",
"(",
"offset",
")",
"return... | read from the vfs and return the contents . | train | true |
27,833 | def get_all_lexers():
for item in LEXERS.itervalues():
(yield item[1:])
for lexer in find_plugin_lexers():
(yield (lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes))
| [
"def",
"get_all_lexers",
"(",
")",
":",
"for",
"item",
"in",
"LEXERS",
".",
"itervalues",
"(",
")",
":",
"(",
"yield",
"item",
"[",
"1",
":",
"]",
")",
"for",
"lexer",
"in",
"find_plugin_lexers",
"(",
")",
":",
"(",
"yield",
"(",
"lexer",
".",
"nam... | return a generator of tuples in the form of all know lexers . | train | true |
27,834 | def extract_lazy_object(lo):
if (not hasattr(lo, '_wrapped')):
return lo
if (lo._wrapped is empty):
lo._setup()
return lo._wrapped
| [
"def",
"extract_lazy_object",
"(",
"lo",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"lo",
",",
"'_wrapped'",
")",
")",
":",
"return",
"lo",
"if",
"(",
"lo",
".",
"_wrapped",
"is",
"empty",
")",
":",
"lo",
".",
"_setup",
"(",
")",
"return",
"lo",
... | unwrap a lazyobject and return the inner object . | train | false |
27,836 | @contextmanager
def ensure_clean(filename=None, return_filelike=False):
filename = (filename or '')
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
(yield f)
finally:
f.close()
else:
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
(fd, filename) = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import nose
raise nose.SkipTest('no unicode file names on this system')
try:
(yield filename)
finally:
try:
os.close(fd)
except Exception as e:
print ("Couldn't close file descriptor: %d (file: %s)" % (fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print ('Exception on removing file: %s' % e)
| [
"@",
"contextmanager",
"def",
"ensure_clean",
"(",
"filename",
"=",
"None",
",",
"return_filelike",
"=",
"False",
")",
":",
"filename",
"=",
"(",
"filename",
"or",
"''",
")",
"fd",
"=",
"None",
"if",
"return_filelike",
":",
"f",
"=",
"tempfile",
".",
"Te... | gets a temporary path and agrees to remove on close . | train | false |
27,837 | def test_positive_integer_2():
assert (1 == positive_integer('1'))
| [
"def",
"test_positive_integer_2",
"(",
")",
":",
"assert",
"(",
"1",
"==",
"positive_integer",
"(",
"'1'",
")",
")"
] | assert that the tpot cli interfaces integer parsing returns the integer value of a string encoded integer when n > 0 . | train | false |
27,838 | @contextmanager
def override_provides(category, spec_list):
old_provides = _provide_specs[category]
_uncache(category)
_provide_specs[category] = spec_list
try:
(yield)
finally:
_uncache(category)
_provide_specs[category] = old_provides
| [
"@",
"contextmanager",
"def",
"override_provides",
"(",
"category",
",",
"spec_list",
")",
":",
"old_provides",
"=",
"_provide_specs",
"[",
"category",
"]",
"_uncache",
"(",
"category",
")",
"_provide_specs",
"[",
"category",
"]",
"=",
"spec_list",
"try",
":",
... | context manager to override provides for a given category . | train | false |
27,839 | def _count_diff_hashable(actual, expected):
(s, t) = (_ordered_count(actual), _ordered_count(expected))
result = []
for (elem, cnt_s) in s.items():
cnt_t = t.get(elem, 0)
if (cnt_s != cnt_t):
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for (elem, cnt_t) in t.items():
if (elem not in s):
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
| [
"def",
"_count_diff_hashable",
"(",
"actual",
",",
"expected",
")",
":",
"(",
"s",
",",
"t",
")",
"=",
"(",
"_ordered_count",
"(",
"actual",
")",
",",
"_ordered_count",
"(",
"expected",
")",
")",
"result",
"=",
"[",
"]",
"for",
"(",
"elem",
",",
"cnt... | returns list of triples where the counts differ . | train | false |
27,840 | def p_program_error(p):
p[0] = None
p.parser.error = 1
| [
"def",
"p_program_error",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"None",
"p",
".",
"parser",
".",
"error",
"=",
"1"
] | program : error . | train | false |
27,841 | def _list_snapshots(reactor, filesystem):
d = zfs_command(reactor, _list_snapshots_command(filesystem))
d.addCallback(_parse_snapshots, filesystem)
return d
| [
"def",
"_list_snapshots",
"(",
"reactor",
",",
"filesystem",
")",
":",
"d",
"=",
"zfs_command",
"(",
"reactor",
",",
"_list_snapshots_command",
"(",
"filesystem",
")",
")",
"d",
".",
"addCallback",
"(",
"_parse_snapshots",
",",
"filesystem",
")",
"return",
"d"... | list the snapshots of the given filesystem . | train | false |
27,842 | def test_globject():
objects = [GLObject() for i in range(10)]
ids = [ob.id for ob in objects]
assert (len(set(ids)) == len(objects))
commands = []
for ob in objects:
commands.extend(ob._glir.clear())
assert (len(commands) == len(objects))
for cmd in commands:
assert (cmd[0] == 'CREATE')
ob = objects[(-1)]
q = ob._glir
ob.delete()
cmd = q.clear()[(-1)]
assert (cmd[0] == 'DELETE')
| [
"def",
"test_globject",
"(",
")",
":",
"objects",
"=",
"[",
"GLObject",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
"]",
"ids",
"=",
"[",
"ob",
".",
"id",
"for",
"ob",
"in",
"objects",
"]",
"assert",
"(",
"len",
"(",
"set",
"(",
"ids",... | test gl object uinique id and glir create command . | train | false |
27,845 | def for_float_dtypes(name='dtype', no_float16=False):
if no_float16:
return for_dtypes(_regular_float_dtypes, name=name)
else:
return for_dtypes(_float_dtypes, name=name)
| [
"def",
"for_float_dtypes",
"(",
"name",
"=",
"'dtype'",
",",
"no_float16",
"=",
"False",
")",
":",
"if",
"no_float16",
":",
"return",
"for_dtypes",
"(",
"_regular_float_dtypes",
",",
"name",
"=",
"name",
")",
"else",
":",
"return",
"for_dtypes",
"(",
"_float... | decorator that checks the fixture with all float dtypes . | train | false |
27,847 | def clear_memoized_methods(obj, *method_names):
for key in list(getattr(obj, '_memoized_results', {}).keys()):
if (key[0] in method_names):
del obj._memoized_results[key]
property_dict = obj._cache_
for prop in method_names:
inner_attname = ('__%s' % prop)
if (inner_attname in property_dict):
del property_dict[inner_attname]
| [
"def",
"clear_memoized_methods",
"(",
"obj",
",",
"*",
"method_names",
")",
":",
"for",
"key",
"in",
"list",
"(",
"getattr",
"(",
"obj",
",",
"'_memoized_results'",
",",
"{",
"}",
")",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"key",
"[",
"0",
"]... | clear the memoized method or @memoizeproperty results for the given method names from the given object . | train | true |
27,851 | def stream_response_to_file(response, path=None):
pre_opened = False
fd = None
filename = None
if path:
if isinstance(getattr(path, 'write', None), collections.Callable):
pre_opened = True
fd = path
filename = getattr(fd, 'name', None)
else:
fd = open(path, 'wb')
filename = path
else:
header = response.headers['content-disposition']
i = (header.find('filename=') + len('filename='))
filename = header[i:]
fd = open(filename, 'wb')
for chunk in response.iter_content(chunk_size=512):
fd.write(chunk)
if (not pre_opened):
fd.close()
return filename
| [
"def",
"stream_response_to_file",
"(",
"response",
",",
"path",
"=",
"None",
")",
":",
"pre_opened",
"=",
"False",
"fd",
"=",
"None",
"filename",
"=",
"None",
"if",
"path",
":",
"if",
"isinstance",
"(",
"getattr",
"(",
"path",
",",
"'write'",
",",
"None"... | stream a response body to the specified file . | train | true |
27,852 | def _plot_topo_onpick(event, show_func):
orig_ax = event.inaxes
if ((event.inaxes is None) or ((not hasattr(orig_ax, '_mne_ch_idx')) and (not hasattr(orig_ax, '_mne_axs')))):
return
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'):
(x, y) = (event.xdata, event.ydata)
for ax in orig_ax._mne_axs:
if ((x >= ax.pos[0]) and (y >= ax.pos[1]) and (x <= (ax.pos[0] + ax.pos[2])) and (y <= (ax.pos[1] + ax.pos[3]))):
orig_ax = ax
break
else:
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
(fig, ax) = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
show_func(ax, ch_idx)
except Exception as err:
print(err)
raise
| [
"def",
"_plot_topo_onpick",
"(",
"event",
",",
"show_func",
")",
":",
"orig_ax",
"=",
"event",
".",
"inaxes",
"if",
"(",
"(",
"event",
".",
"inaxes",
"is",
"None",
")",
"or",
"(",
"(",
"not",
"hasattr",
"(",
"orig_ax",
",",
"'_mne_ch_idx'",
")",
")",
... | onpick callback that shows a single channel in a new figure . | train | false |
27,853 | def export_usermessages_batch(input_path, output_path):
with open(input_path, 'r') as input_file:
output = ujson.loads(input_file.read())
message_ids = [item['id'] for item in output['zerver_message']]
user_profile_ids = set(output['zerver_userprofile_ids'])
del output['zerver_userprofile_ids']
realm = Realm.objects.get(id=output['realm_id'])
del output['realm_id']
output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids, output_path)
write_message_export(output_path, output)
os.unlink(input_path)
| [
"def",
"export_usermessages_batch",
"(",
"input_path",
",",
"output_path",
")",
":",
"with",
"open",
"(",
"input_path",
",",
"'r'",
")",
"as",
"input_file",
":",
"output",
"=",
"ujson",
".",
"loads",
"(",
"input_file",
".",
"read",
"(",
")",
")",
"message_... | as part of the system for doing parallel exports . | train | false |
27,855 | def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
wrapper = TextWrapper(width=width, initial_indent=initial_indent, subsequent_indent=subsequent_indent, break_long_words=False)
return wrapper.wrap(text)
| [
"def",
"wraptext",
"(",
"text",
",",
"width",
"=",
"70",
",",
"initial_indent",
"=",
"''",
",",
"subsequent_indent",
"=",
"''",
")",
":",
"wrapper",
"=",
"TextWrapper",
"(",
"width",
"=",
"width",
",",
"initial_indent",
"=",
"initial_indent",
",",
"subsequ... | simple wrapper around the textwrap . | train | false |
27,856 | def make_pidlockfile(path, acquire_timeout):
if (not isinstance(path, basestring)):
error = ValueError(('Not a filesystem path: %(path)r' % vars()))
raise error
if (not os.path.isabs(path)):
error = ValueError(('Not an absolute path: %(path)r' % vars()))
raise error
lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout)
return lockfile
| [
"def",
"make_pidlockfile",
"(",
"path",
",",
"acquire_timeout",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"path",
",",
"basestring",
")",
")",
":",
"error",
"=",
"ValueError",
"(",
"(",
"'Not a filesystem path: %(path)r'",
"%",
"vars",
"(",
")",
")",
... | make a pidlockfile instance with the given filesystem path . | train | false |
27,858 | def build_addon_button(text, action, title=''):
button = {'text': text, 'action': action}
if title:
button['attributes'] = 'title="{title}" data-toggle="tooltip" data-placement="right" '.format(title=title)
return button
| [
"def",
"build_addon_button",
"(",
"text",
",",
"action",
",",
"title",
"=",
"''",
")",
":",
"button",
"=",
"{",
"'text'",
":",
"text",
",",
"'action'",
":",
"action",
"}",
"if",
"title",
":",
"button",
"[",
"'attributes'",
"]",
"=",
"'title=\"{title}\" d... | builds am action button to be rendered in hgrid . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.