id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
33,762 | def in_nested_list(nested_list, obj):
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif (elmt == obj):
return True
return False
| [
"def",
"in_nested_list",
"(",
"nested_list",
",",
"obj",
")",
":",
"for",
"elmt",
"in",
"nested_list",
":",
"if",
"isinstance",
"(",
"elmt",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"in_nested_list",
"(",
"elmt",
",",
"obj",
")",
":",
"re... | return true if the object is an element of <nested_list> or of a nested list . | train | true |
33,764 | def ld_svhn(extended=False, test_only=False):
file_urls = ['http://ufldl.stanford.edu/housenumbers/train_32x32.mat', 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat', 'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat']
local_urls = maybe_download(file_urls, FLAGS.data_dir)
if (not test_only):
(train_data, train_labels) = extract_svhn(local_urls[0])
train_data = image_whitening(train_data)
(ext_data, ext_labels) = extract_svhn(local_urls[2])
ext_data = image_whitening(ext_data)
(test_data, test_labels) = extract_svhn(local_urls[1])
test_data = image_whitening(test_data)
if test_only:
return (test_data, test_labels)
elif extended:
train_data = np.vstack((train_data, ext_data))
train_labels = np.hstack((train_labels, ext_labels))
return (train_data, train_labels, test_data, test_labels)
else:
return (train_data, train_labels, test_data, test_labels, ext_data, ext_labels)
| [
"def",
"ld_svhn",
"(",
"extended",
"=",
"False",
",",
"test_only",
"=",
"False",
")",
":",
"file_urls",
"=",
"[",
"'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'",
",",
"'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'",
",",
"'http://ufldl.stanford.edu/housenu... | load the original svhn data . | train | false |
33,765 | def get_user_info(http_cookie, cookie_name=_COOKIE_NAME):
try:
cookie = Cookie.SimpleCookie(http_cookie)
except Cookie.CookieError:
return ('', False, '')
cookie_dict = dict(((k, v.value) for (k, v) in cookie.iteritems()))
return _get_user_info_from_dict(cookie_dict, cookie_name)
| [
"def",
"get_user_info",
"(",
"http_cookie",
",",
"cookie_name",
"=",
"_COOKIE_NAME",
")",
":",
"try",
":",
"cookie",
"=",
"Cookie",
".",
"SimpleCookie",
"(",
"http_cookie",
")",
"except",
"Cookie",
".",
"CookieError",
":",
"return",
"(",
"''",
",",
"False",
... | returns an s3 user with . | train | false |
33,766 | def safe_get_host(request):
if (isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and ('*' not in settings.ALLOWED_HOSTS)):
return request.get_host()
else:
return configuration_helpers.get_value('site_domain', settings.SITE_NAME)
| [
"def",
"safe_get_host",
"(",
"request",
")",
":",
"if",
"(",
"isinstance",
"(",
"settings",
".",
"ALLOWED_HOSTS",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"(",
"'*'",
"not",
"in",
"settings",
".",
"ALLOWED_HOSTS",
")",
")",
":",
"return",
"requ... | get the host name for this request . | train | false |
33,769 | def generate_refund_notification_body(student, refund_ids):
msg = _(u'A refund request has been initiated for {username} ({email}). To process this request, please visit the link(s) below.').format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(u'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT)
refund_urls = [urljoin(ecommerce_url_root, u'/dashboard/refunds/{}/'.format(refund_id)) for refund_id in refund_ids]
return u'{msg}\n\n{urls}'.format(msg=msg, urls=u'\n'.join(refund_urls))
| [
"def",
"generate_refund_notification_body",
"(",
"student",
",",
"refund_ids",
")",
":",
"msg",
"=",
"_",
"(",
"u'A refund request has been initiated for {username} ({email}). To process this request, please visit the link(s) below.'",
")",
".",
"format",
"(",
"username",
"=",
... | returns a refund notification message body . | train | false |
33,772 | def finger_master(hash_type=None):
if (hash_type is None):
hash_type = __opts__['hash_type']
return salt.utils.pem_finger(os.path.join(__opts__['pki_dir'], 'minion_master.pub'), sum_type=hash_type)
| [
"def",
"finger_master",
"(",
"hash_type",
"=",
"None",
")",
":",
"if",
"(",
"hash_type",
"is",
"None",
")",
":",
"hash_type",
"=",
"__opts__",
"[",
"'hash_type'",
"]",
"return",
"salt",
".",
"utils",
".",
"pem_finger",
"(",
"os",
".",
"path",
".",
"joi... | return the fingerprint of the masters public key on the minion . | train | false |
33,773 | def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print ('CLEANUP TASK: %s' % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run)))
| [
"def",
"execute_cleanup_tasks",
"(",
"ctx",
",",
"cleanup_tasks",
",",
"dry_run",
"=",
"False",
")",
":",
"executor",
"=",
"Executor",
"(",
"cleanup_tasks",
",",
"ctx",
".",
"config",
")",
"for",
"cleanup_task",
"in",
"cleanup_tasks",
".",
"tasks",
":",
"pri... | execute several cleanup tasks as part of the cleanup . | train | true |
33,774 | @receiver(COURSE_CERT_AWARDED, sender=GeneratedCertificate)
def create_completion_badge(sender, user, course_key, status, **kwargs):
completion_check(user)
| [
"@",
"receiver",
"(",
"COURSE_CERT_AWARDED",
",",
"sender",
"=",
"GeneratedCertificate",
")",
"def",
"create_completion_badge",
"(",
"sender",
",",
"user",
",",
"course_key",
",",
"status",
",",
"**",
"kwargs",
")",
":",
"completion_check",
"(",
"user",
")"
] | standard signal hook to create x courses completed badges when a certificate has been generated . | train | false |
33,776 | def MEDPRICE(barDs, count):
return call_talib_with_hl(barDs, count, talib.MEDPRICE)
| [
"def",
"MEDPRICE",
"(",
"barDs",
",",
"count",
")",
":",
"return",
"call_talib_with_hl",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"MEDPRICE",
")"
] | median price . | train | false |
33,778 | def c_digs(client):
roomname = (ROOM_TEMPLATE % client.counter())
exitname1 = (EXIT_TEMPLATE % client.counter())
exitname2 = (EXIT_TEMPLATE % client.counter())
client.exits.extend([exitname1, exitname2])
return ('@dig/tel %s = %s, %s' % (roomname, exitname1, exitname2))
| [
"def",
"c_digs",
"(",
"client",
")",
":",
"roomname",
"=",
"(",
"ROOM_TEMPLATE",
"%",
"client",
".",
"counter",
"(",
")",
")",
"exitname1",
"=",
"(",
"EXIT_TEMPLATE",
"%",
"client",
".",
"counter",
"(",
")",
")",
"exitname2",
"=",
"(",
"EXIT_TEMPLATE",
... | digs a new room . | train | false |
33,779 | @not_implemented_for('directed')
@not_implemented_for('multigraph')
def communicability_betweenness_centrality(G, normalized=True):
import scipy
import scipy.linalg
nodelist = list(G)
n = len(nodelist)
A = nx.to_numpy_matrix(G, nodelist)
A[(A != 0.0)] = 1
expA = scipy.linalg.expm(A.A)
mapping = dict(zip(nodelist, range(n)))
cbc = {}
for v in G:
i = mapping[v]
row = A[i, :].copy()
col = A[:, i].copy()
A[i, :] = 0
A[:, i] = 0
B = ((expA - scipy.linalg.expm(A)) / expA)
B[i, :] = 0
B[:, i] = 0
B -= scipy.diag(scipy.diag(B))
cbc[v] = float(B.sum())
A[i, :] = row
A[:, i] = col
cbc = _rescale(cbc, normalized=normalized)
return cbc
| [
"@",
"not_implemented_for",
"(",
"'directed'",
")",
"@",
"not_implemented_for",
"(",
"'multigraph'",
")",
"def",
"communicability_betweenness_centrality",
"(",
"G",
",",
"normalized",
"=",
"True",
")",
":",
"import",
"scipy",
"import",
"scipy",
".",
"linalg",
"nod... | return subgraph communicability for all pairs of nodes in g . | train | false |
33,780 | def send_ip_addr_adv_notif(ns_name, iface_name, address, count=3, log_exception=True):
def arping():
_arping(ns_name, iface_name, address, count, log_exception)
if ((count > 0) and (netaddr.IPAddress(address).version == 4)):
eventlet.spawn_n(arping)
| [
"def",
"send_ip_addr_adv_notif",
"(",
"ns_name",
",",
"iface_name",
",",
"address",
",",
"count",
"=",
"3",
",",
"log_exception",
"=",
"True",
")",
":",
"def",
"arping",
"(",
")",
":",
"_arping",
"(",
"ns_name",
",",
"iface_name",
",",
"address",
",",
"c... | send advance notification of an ip address assignment . | train | false |
33,781 | def _selection_key_press(event, params):
if (event.key == 'down'):
_change_channel_group((-1), params)
elif (event.key == 'up'):
_change_channel_group(1, params)
elif (event.key == 'escape'):
_close_event(event, params)
| [
"def",
"_selection_key_press",
"(",
"event",
",",
"params",
")",
":",
"if",
"(",
"event",
".",
"key",
"==",
"'down'",
")",
":",
"_change_channel_group",
"(",
"(",
"-",
"1",
")",
",",
"params",
")",
"elif",
"(",
"event",
".",
"key",
"==",
"'up'",
")",... | callback for keys in selection dialog . | train | false |
33,783 | def _update_subsection_grades(course_key, scored_block_usage_key, only_if_higher, user_id):
student = User.objects.get(id=user_id)
store = modulestore()
with store.bulk_operations(course_key):
course_structure = get_course_blocks(student, store.make_course_usage_key(course_key))
subsections_to_update = course_structure.get_transformer_block_field(scored_block_usage_key, GradesTransformer, 'subsections', set())
course = store.get_course(course_key, depth=0)
subsection_grade_factory = SubsectionGradeFactory(student, course, course_structure)
for subsection_usage_key in subsections_to_update:
if (subsection_usage_key in course_structure):
subsection_grade = subsection_grade_factory.update(course_structure[subsection_usage_key], only_if_higher)
SUBSECTION_SCORE_CHANGED.send(sender=None, course=course, course_structure=course_structure, user=student, subsection_grade=subsection_grade)
| [
"def",
"_update_subsection_grades",
"(",
"course_key",
",",
"scored_block_usage_key",
",",
"only_if_higher",
",",
"user_id",
")",
":",
"student",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"user_id",
")",
"store",
"=",
"modulestore",
"(",
")",
... | a helper function to update subsection grades in the database for each subsection containing the given block . | train | false |
33,785 | def mock_get(status_code=200):
def _(url=None, *args, **kwargs):
'\n mock method for requests.get, this method will read xml file, form a Response object from the\n contents of this file, set status code and return the Response object.\n '
url = (url.split('/')[(-1)] if url else 'testshib-providers.xml')
file_path = (os.path.dirname(os.path.realpath(__file__)) + '/test_data/{}'.format(url))
with open(file_path) as providers:
xml = providers.read()
response = Response()
response._content = xml
response.status_code = status_code
return response
return _
| [
"def",
"mock_get",
"(",
"status_code",
"=",
"200",
")",
":",
"def",
"_",
"(",
"url",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"url",
"=",
"(",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"(",
"-",
"1",
")",
"]",
"if",
"u... | args: status_code : integer showing the status code for the response object . | train | false |
33,787 | def flip_index(i, n):
return ((n - i) - 1)
| [
"def",
"flip_index",
"(",
"i",
",",
"n",
")",
":",
"return",
"(",
"(",
"n",
"-",
"i",
")",
"-",
"1",
")"
] | reorder qubit indices from largest to smallest . | train | false |
33,788 | def del_none(d):
for (key, value) in d.items():
if (value is None):
del d[key]
elif isinstance(value, dict):
del_none(value)
elif isinstance(value, list):
for v in value:
if isinstance(v, dict):
del_none(v)
if (not v):
d[key] = []
return d
| [
"def",
"del_none",
"(",
"d",
")",
":",
"for",
"(",
"key",
",",
"value",
")",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"(",
"value",
"is",
"None",
")",
":",
"del",
"d",
"[",
"key",
"]",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")"... | delete keys with the value none in a dictionary . | train | false |
33,789 | def rect_to_tuple(rect):
return (rect.x(), rect.y(), rect.width(), rect.height())
| [
"def",
"rect_to_tuple",
"(",
"rect",
")",
":",
"return",
"(",
"rect",
".",
"x",
"(",
")",
",",
"rect",
".",
"y",
"(",
")",
",",
"rect",
".",
"width",
"(",
")",
",",
"rect",
".",
"height",
"(",
")",
")"
] | convert a qrectf into a tuple . | train | false |
33,790 | def QuoteShellArgument(arg, flavor):
if re.match('^[a-zA-Z0-9_=.\\\\/-]+$', arg):
return arg
if (flavor == 'win'):
return gyp.msvs_emulation.QuoteForRspFile(arg)
return (("'" + arg.replace("'", (("'" + '"\'"') + "'"))) + "'")
| [
"def",
"QuoteShellArgument",
"(",
"arg",
",",
"flavor",
")",
":",
"if",
"re",
".",
"match",
"(",
"'^[a-zA-Z0-9_=.\\\\\\\\/-]+$'",
",",
"arg",
")",
":",
"return",
"arg",
"if",
"(",
"flavor",
"==",
"'win'",
")",
":",
"return",
"gyp",
".",
"msvs_emulation",
... | quote a string such that it will be interpreted as a single argument by the shell . | train | false |
33,791 | def inspect(subject, raiseerr=True):
type_ = type(subject)
for cls in type_.__mro__:
if (cls in _registrars):
reg = _registrars[cls]
if (reg is True):
return subject
ret = reg(subject)
if (ret is not None):
break
else:
reg = ret = None
if (raiseerr and ((reg is None) or (ret is None))):
raise exc.NoInspectionAvailable(('No inspection system is available for object of type %s' % type_))
return ret
| [
"def",
"inspect",
"(",
"subject",
",",
"raiseerr",
"=",
"True",
")",
":",
"type_",
"=",
"type",
"(",
"subject",
")",
"for",
"cls",
"in",
"type_",
".",
"__mro__",
":",
"if",
"(",
"cls",
"in",
"_registrars",
")",
":",
"reg",
"=",
"_registrars",
"[",
... | produce an inspection object for the given target . | train | false |
33,793 | @unbox(types.Buffer)
def unbox_buffer(typ, obj, c):
buf = c.pyapi.alloca_buffer()
res = c.pyapi.get_buffer(obj, buf)
is_error = cgutils.is_not_null(c.builder, res)
nativearycls = c.context.make_array(typ)
nativeary = nativearycls(c.context, c.builder)
aryptr = nativeary._getpointer()
with cgutils.if_likely(c.builder, c.builder.not_(is_error)):
ptr = c.builder.bitcast(aryptr, c.pyapi.voidptr)
if c.context.enable_nrt:
c.pyapi.nrt_adapt_buffer_from_python(buf, ptr)
else:
c.pyapi.numba_buffer_adaptor(buf, ptr)
def cleanup():
c.pyapi.release_buffer(buf)
return NativeValue(c.builder.load(aryptr), is_error=is_error, cleanup=cleanup)
| [
"@",
"unbox",
"(",
"types",
".",
"Buffer",
")",
"def",
"unbox_buffer",
"(",
"typ",
",",
"obj",
",",
"c",
")",
":",
"buf",
"=",
"c",
".",
"pyapi",
".",
"alloca_buffer",
"(",
")",
"res",
"=",
"c",
".",
"pyapi",
".",
"get_buffer",
"(",
"obj",
",",
... | convert a py_buffer-providing object to a native array structure . | train | false |
33,796 | def dup_diff(f, m, K):
if (m <= 0):
return f
n = dup_degree(f)
if (n < m):
return []
deriv = []
if (m == 1):
for coeff in f[:(- m)]:
deriv.append((K(n) * coeff))
n -= 1
else:
for coeff in f[:(- m)]:
k = n
for i in range((n - 1), (n - m), (-1)):
k *= i
deriv.append((K(k) * coeff))
n -= 1
return dup_strip(deriv)
| [
"def",
"dup_diff",
"(",
"f",
",",
"m",
",",
"K",
")",
":",
"if",
"(",
"m",
"<=",
"0",
")",
":",
"return",
"f",
"n",
"=",
"dup_degree",
"(",
"f",
")",
"if",
"(",
"n",
"<",
"m",
")",
":",
"return",
"[",
"]",
"deriv",
"=",
"[",
"]",
"if",
... | m-th order derivative of a polynomial in k[x] . | train | false |
33,797 | def _get_exog_re_names(self, exog_re):
if (self.k_re == 0):
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif (isinstance(exog_re, pd.Series) and (exog_re.name is not None)):
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
defnames = ['x_re{0:1d}'.format((k + 1)) for k in range(exog_re.shape[1])]
return defnames
| [
"def",
"_get_exog_re_names",
"(",
"self",
",",
"exog_re",
")",
":",
"if",
"(",
"self",
".",
"k_re",
"==",
"0",
")",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"exog_re",
",",
"pd",
".",
"DataFrame",
")",
":",
"return",
"exog_re",
".",
"columns... | passes through if given a list of names . | train | false |
33,798 | def ptmsiReallocationCommand(PTmsiSignature_presence=0):
a = TpPd(pd=3)
b = MessageType(mesType=16)
c = MobileId()
d = RoutingAreaIdentification()
e = ForceToStandbyAndSpareHalfOctets()
packet = ((((a / b) / c) / d) / e)
if (PTmsiSignature_presence is 1):
g = PTmsiSignature(ieiPTS=25)
packet = (packet / g)
return packet
| [
"def",
"ptmsiReallocationCommand",
"(",
"PTmsiSignature_presence",
"=",
"0",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"16",
")",
"c",
"=",
"MobileId",
"(",
")",
"d",
"=",
"RoutingAreaIdentificati... | p-tmsi reallocation command section 9 . | train | true |
33,799 | def _DropCommonSuffixes(filename):
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))):
return filename[:((- len(suffix)) - 1)]
return os.path.splitext(filename)[0]
| [
"def",
"_DropCommonSuffixes",
"(",
"filename",
")",
":",
"for",
"suffix",
"in",
"(",
"'test.cc'",
",",
"'regtest.cc'",
",",
"'unittest.cc'",
",",
"'inl.h'",
",",
"'impl.h'",
",",
"'internal.h'",
")",
":",
"if",
"(",
"filename",
".",
"endswith",
"(",
"suffix"... | drops common suffixes like _test . | train | true |
33,800 | def is_legal_resource_base_name(name):
if (name is None):
return False
m = BASE_NAME_LEGAL_CHARS_P.match(name)
return ((m is not None) and (m.group(0) == name))
| [
"def",
"is_legal_resource_base_name",
"(",
"name",
")",
":",
"if",
"(",
"name",
"is",
"None",
")",
":",
"return",
"False",
"m",
"=",
"BASE_NAME_LEGAL_CHARS_P",
".",
"match",
"(",
"name",
")",
"return",
"(",
"(",
"m",
"is",
"not",
"None",
")",
"and",
"(... | validates that name is a legal resource base name . | train | false |
33,801 | def load_base_library():
library = dict()
library.update(read_style_directory(BASE_LIBRARY_PATH))
return library
| [
"def",
"load_base_library",
"(",
")",
":",
"library",
"=",
"dict",
"(",
")",
"library",
".",
"update",
"(",
"read_style_directory",
"(",
"BASE_LIBRARY_PATH",
")",
")",
"return",
"library"
] | load style library defined in this package . | train | false |
33,802 | def nova_is_not(logical_line):
split_line = logical_line.split()
if ((len(split_line) == 5) and (split_line[0] == 'if') and (split_line[1] == 'not') and (split_line[3] == 'is')):
(yield (logical_line.find('not'), "N901: Use the 'is not' operator for when testing for unequal identities"))
| [
"def",
"nova_is_not",
"(",
"logical_line",
")",
":",
"split_line",
"=",
"logical_line",
".",
"split",
"(",
")",
"if",
"(",
"(",
"len",
"(",
"split_line",
")",
"==",
"5",
")",
"and",
"(",
"split_line",
"[",
"0",
"]",
"==",
"'if'",
")",
"and",
"(",
"... | check localization in line . | train | false |
33,804 | def autoassign(self, locals):
for (k, v) in locals.iteritems():
if (k == 'self'):
continue
setattr(self, k, v)
| [
"def",
"autoassign",
"(",
"self",
",",
"locals",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"locals",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"k",
"==",
"'self'",
")",
":",
"continue",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")"
] | automatically assigns local variables to self . | train | false |
33,805 | def _dt_to_epoch_ns(dt_series):
index = pd.to_datetime(dt_series.values)
if (index.tzinfo is None):
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64)
| [
"def",
"_dt_to_epoch_ns",
"(",
"dt_series",
")",
":",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"dt_series",
".",
"values",
")",
"if",
"(",
"index",
".",
"tzinfo",
"is",
"None",
")",
":",
"index",
"=",
"index",
".",
"tz_localize",
"(",
"'UTC'",
")",... | convert a timeseries into an int64index of nanoseconds since the epoch . | train | true |
33,806 | def is_current_user_capable(api_name):
user = get_current_user()
if (not user):
sys.stderr.write((('user is not logged in - cannot use api ' + api_name) + '\n'))
return False
email = user.email()
return is_user_capable(email, api_name)
| [
"def",
"is_current_user_capable",
"(",
"api_name",
")",
":",
"user",
"=",
"get_current_user",
"(",
")",
"if",
"(",
"not",
"user",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"(",
"(",
"'user is not logged in - cannot use api '",
"+",
"api_name",
")",
... | checks to see if the current user is capable to run a certain api args: api_name: a string of the api to check for returns: true if capable . | train | false |
33,807 | def download_output(project_id, cluster_id, output_bucket, job_id):
print 'Downloading output file'
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = 'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'.format(cluster_id, job_id)
return bucket.blob(output_blob).download_as_string()
| [
"def",
"download_output",
"(",
"project_id",
",",
"cluster_id",
",",
"output_bucket",
",",
"job_id",
")",
":",
"print",
"'Downloading output file'",
"client",
"=",
"storage",
".",
"Client",
"(",
"project",
"=",
"project_id",
")",
"bucket",
"=",
"client",
".",
... | downloads the output file from cloud storage and returns it as a string . | train | false |
33,808 | def parse_cols_arg(cols):
if cols:
if cols.endswith(','):
cols += '0'
col_list = [(int(x) - 1) for x in cols.split(',')]
return col_list
else:
return BED_DEFAULT_COLS
| [
"def",
"parse_cols_arg",
"(",
"cols",
")",
":",
"if",
"cols",
":",
"if",
"cols",
".",
"endswith",
"(",
"','",
")",
":",
"cols",
"+=",
"'0'",
"col_list",
"=",
"[",
"(",
"int",
"(",
"x",
")",
"-",
"1",
")",
"for",
"x",
"in",
"cols",
".",
"split",... | parse a columns command line argument into a four-tuple . | train | false |
33,809 | def _format(seconds, *divisors):
results = []
seconds = int(seconds)
for divisor in divisors:
results.append((seconds // divisor))
seconds %= divisor
results.append(seconds)
return tuple(results)
| [
"def",
"_format",
"(",
"seconds",
",",
"*",
"divisors",
")",
":",
"results",
"=",
"[",
"]",
"seconds",
"=",
"int",
"(",
"seconds",
")",
"for",
"divisor",
"in",
"divisors",
":",
"results",
".",
"append",
"(",
"(",
"seconds",
"//",
"divisor",
")",
")",... | helper function . | train | false |
33,812 | def selRoulette(individuals, k):
s_inds = sorted(individuals, key=attrgetter('fitness'), reverse=True)
sum_fits = sum((ind.fitness.values[0] for ind in individuals))
chosen = []
for i in xrange(k):
u = (random.random() * sum_fits)
sum_ = 0
for ind in s_inds:
sum_ += ind.fitness.values[0]
if (sum_ > u):
chosen.append(ind)
break
return chosen
| [
"def",
"selRoulette",
"(",
"individuals",
",",
"k",
")",
":",
"s_inds",
"=",
"sorted",
"(",
"individuals",
",",
"key",
"=",
"attrgetter",
"(",
"'fitness'",
")",
",",
"reverse",
"=",
"True",
")",
"sum_fits",
"=",
"sum",
"(",
"(",
"ind",
".",
"fitness",
... | select *k* individuals from the input *individuals* using *k* spins of a roulette . | train | false |
33,813 | def _get_derivative(coordinates):
try:
return (coordinates[1] / coordinates[0])
except ZeroDivisionError:
return float('inf')
| [
"def",
"_get_derivative",
"(",
"coordinates",
")",
":",
"try",
":",
"return",
"(",
"coordinates",
"[",
"1",
"]",
"/",
"coordinates",
"[",
"0",
"]",
")",
"except",
"ZeroDivisionError",
":",
"return",
"float",
"(",
"'inf'",
")"
] | get derivative of the line from to given coordinates . | train | false |
33,815 | def sdm_monomial_divides(A, B):
return ((A[0] == B[0]) and all(((a <= b) for (a, b) in zip(A[1:], B[1:]))))
| [
"def",
"sdm_monomial_divides",
"(",
"A",
",",
"B",
")",
":",
"return",
"(",
"(",
"A",
"[",
"0",
"]",
"==",
"B",
"[",
"0",
"]",
")",
"and",
"all",
"(",
"(",
"(",
"a",
"<=",
"b",
")",
"for",
"(",
"a",
",",
"b",
")",
"in",
"zip",
"(",
"A",
... | does there exist a monomial x such that xa = b? examples positive examples: in the following examples . | train | false |
33,816 | def oldgc(phenny, input):
query = input.group(2)
if (not query):
return phenny.reply('.gc what?')
query = query.encode('utf-8')
num = formatnumber(google_count(query))
phenny.say(((query + ': ') + num))
| [
"def",
"oldgc",
"(",
"phenny",
",",
"input",
")",
":",
"query",
"=",
"input",
".",
"group",
"(",
"2",
")",
"if",
"(",
"not",
"query",
")",
":",
"return",
"phenny",
".",
"reply",
"(",
"'.gc what?'",
")",
"query",
"=",
"query",
".",
"encode",
"(",
... | returns the number of google results for the specified input . | train | false |
33,817 | def get_oauth_client(server_token_url, client_id, client_secret):
if (not is_valid_url(server_token_url)):
return
client = BackendApplicationClient(client_id=client_id)
oauth_ccxcon = OAuth2Session(client=client)
oauth_ccxcon.fetch_token(token_url=server_token_url, client_id=client_id, client_secret=client_secret, timeout=CCXCON_REQUEST_TIMEOUT)
return oauth_ccxcon
| [
"def",
"get_oauth_client",
"(",
"server_token_url",
",",
"client_id",
",",
"client_secret",
")",
":",
"if",
"(",
"not",
"is_valid_url",
"(",
"server_token_url",
")",
")",
":",
"return",
"client",
"=",
"BackendApplicationClient",
"(",
"client_id",
"=",
"client_id",... | function that creates an oauth client and fetches a token . | train | false |
33,819 | def mean_grad(x, beta):
return x
| [
"def",
"mean_grad",
"(",
"x",
",",
"beta",
")",
":",
"return",
"x"
] | gradient/jacobian for d / d beta . | train | false |
33,820 | def find_highest_files(directory, unversioned, regex):
files = find_matching_files(directory, regex)
get_numbers = re.compile('(\\d+).(\\d+).(\\d+)')
def max2(a, b):
'Returns the larger of the two values'
av = get_numbers.search(os.path.basename(a)).groups()
bv = get_numbers.search(os.path.basename(b)).groups()
ret = (cmp(av[0], bv[0]) or cmp(av[1], bv[1]) or cmp(av[2], bv[2]))
if (ret < 0):
return b
return a
if (len(files) > 0):
return reduce(max2, files)
last_chance = os.path.join(directory, unversioned)
if os.path.exists(last_chance):
return last_chance
return None
| [
"def",
"find_highest_files",
"(",
"directory",
",",
"unversioned",
",",
"regex",
")",
":",
"files",
"=",
"find_matching_files",
"(",
"directory",
",",
"regex",
")",
"get_numbers",
"=",
"re",
".",
"compile",
"(",
"'(\\\\d+).(\\\\d+).(\\\\d+)'",
")",
"def",
"max2"... | find the highest numbered file in a given directory that matches a given pattern . | train | false |
33,821 | def mixed_store_config(data_dir, mappings, store_order=None):
if (store_order is None):
store_order = [StoreConstructors.draft, StoreConstructors.split]
store_constructors = {StoreConstructors.split: split_mongo_store_config(data_dir)['default'], StoreConstructors.draft: draft_mongo_store_config(data_dir)['default']}
store = {'default': {'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore', 'OPTIONS': {'mappings': mappings, 'stores': [store_constructors[store] for store in store_order]}}}
return store
| [
"def",
"mixed_store_config",
"(",
"data_dir",
",",
"mappings",
",",
"store_order",
"=",
"None",
")",
":",
"if",
"(",
"store_order",
"is",
"None",
")",
":",
"store_order",
"=",
"[",
"StoreConstructors",
".",
"draft",
",",
"StoreConstructors",
".",
"split",
"]... | return a mixedmodulestore configuration . | train | false |
33,822 | def console_auth_token_destroy_all_by_instance(context, instance_uuid):
return IMPL.console_auth_token_destroy_all_by_instance(context, instance_uuid)
| [
"def",
"console_auth_token_destroy_all_by_instance",
"(",
"context",
",",
"instance_uuid",
")",
":",
"return",
"IMPL",
".",
"console_auth_token_destroy_all_by_instance",
"(",
"context",
",",
"instance_uuid",
")"
] | delete all console authorizations belonging to the instance . | train | false |
33,823 | def format_read_as_fna(read, qual=False):
out = StringIO()
out.write(('>%s' % read['Name']))
start_idx = (read['clip_qual_left'] - 1)
end_idx = read['clip_qual_right']
out.write((' length=%d' % (end_idx - start_idx)))
(timestamp, _, region, location) = decode_accession(read['Name'])
out.write((' xy=%04d_%04d' % location))
out.write((' region=%d' % region))
out.write((' run=R_%d_%02d_%02d_%02d_%02d_%02d_' % timestamp))
out.write('\n')
if qual:
scores = read['quality_scores'][start_idx:end_idx]
out.write(' '.join([('%d' % s) for s in scores]))
else:
bases = read['Bases'][start_idx:end_idx]
out.write(bases)
out.write('\n')
return out.getvalue()
| [
"def",
"format_read_as_fna",
"(",
"read",
",",
"qual",
"=",
"False",
")",
":",
"out",
"=",
"StringIO",
"(",
")",
"out",
".",
"write",
"(",
"(",
"'>%s'",
"%",
"read",
"[",
"'Name'",
"]",
")",
")",
"start_idx",
"=",
"(",
"read",
"[",
"'clip_qual_left'"... | format a single read from a binary sff file as a fasta record . | train | false |
33,824 | def require_content(content_type):
def decorator(f):
@functools.wraps(f)
def decorated_function(req):
if (req.content_type != content_type):
raise webob.exc.HTTPUnsupportedMediaType((_('The media type %(bad_type)s is not supported, use %(good_type)s') % {'bad_type': (req.content_type or 'None'), 'good_type': content_type}), json_formatter=json_error_formatter)
else:
return f(req)
return decorated_function
return decorator
| [
"def",
"require_content",
"(",
"content_type",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"decorated_function",
"(",
"req",
")",
":",
"if",
"(",
"req",
".",
"content_type",
"!=",
"content_type"... | decorator to require a content type in a handler . | train | false |
33,825 | def test_clone_should_abort_if_user_does_not_want_to_reclone(mocker, tmpdir):
mocker.patch('cookiecutter.vcs.is_vcs_installed', autospec=True, return_value=True)
mocker.patch('cookiecutter.vcs.prompt_and_delete_repo', side_effect=SystemExit, autospec=True)
mock_subprocess = mocker.patch('cookiecutter.vcs.subprocess.check_output', autospec=True)
clone_to_dir = tmpdir.mkdir('clone')
clone_to_dir.mkdir('cookiecutter-pytest-plugin')
repo_url = 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git'
with pytest.raises(SystemExit):
vcs.clone(repo_url, clone_to_dir=str(clone_to_dir))
assert (not mock_subprocess.called)
| [
"def",
"test_clone_should_abort_if_user_does_not_want_to_reclone",
"(",
"mocker",
",",
"tmpdir",
")",
":",
"mocker",
".",
"patch",
"(",
"'cookiecutter.vcs.is_vcs_installed'",
",",
"autospec",
"=",
"True",
",",
"return_value",
"=",
"True",
")",
"mocker",
".",
"patch",
... | in clone() . | train | false |
33,827 | def get_updated_changeset_revisions_from_tool_shed(app, tool_shed_url, name, owner, changeset_revision):
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url)
if ((tool_shed_url is None) or (name is None) or (owner is None) or (changeset_revision is None)):
message = 'Unable to get updated changeset revisions from the Tool Shed because one or more of the following '
message += ('required parameters is None: tool_shed_url: %s, name: %s, owner: %s, changeset_revision: %s ' % (str(tool_shed_url), str(name), str(owner), str(changeset_revision)))
raise Exception(message)
params = dict(name=name, owner=owner, changeset_revision=changeset_revision)
pathspec = ['repository', 'updated_changeset_revisions']
text = util.url_get(tool_shed_url, password_mgr=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
return text
| [
"def",
"get_updated_changeset_revisions_from_tool_shed",
"(",
"app",
",",
"tool_shed_url",
",",
"name",
",",
"owner",
",",
"changeset_revision",
")",
":",
"tool_shed_url",
"=",
"common_util",
".",
"get_tool_shed_url_from_tool_shed_registry",
"(",
"app",
",",
"tool_shed_ur... | get all appropriate newer changeset revisions for the repository defined by the received tool_shed_url / name / owner combination . | train | false |
33,829 | def _devpath(dev):
return os.path.join('/dev', _devbase(dev))
| [
"def",
"_devpath",
"(",
"dev",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'/dev'",
",",
"_devbase",
"(",
"dev",
")",
")"
] | return /dev name of just about any dev :return: /dev/devicename . | train | false |
33,830 | def set_time_override(override_time=datetime.datetime.utcnow()):
utcnow.override_time = override_time
| [
"def",
"set_time_override",
"(",
"override_time",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
":",
"utcnow",
".",
"override_time",
"=",
"override_time"
] | override utils . | train | false |
33,831 | def promote_buttons(request):
return render(request, 'landing/promote_buttons.html')
| [
"def",
"promote_buttons",
"(",
"request",
")",
":",
"return",
"render",
"(",
"request",
",",
"'landing/promote_buttons.html'",
")"
] | bug 646192: mdn affiliate buttons . | train | false |
33,834 | def axify(expr, axis, keepdims=False):
return type(expr)(expr._child, axis=axis, keepdims=keepdims)
| [
"def",
"axify",
"(",
"expr",
",",
"axis",
",",
"keepdims",
"=",
"False",
")",
":",
"return",
"type",
"(",
"expr",
")",
"(",
"expr",
".",
"_child",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"keepdims",
")"
] | inject axis argument into expression helper function for compute_up . | train | false |
33,835 | def gevent_un_patch_all():
restore_saved_module('os')
restore_saved_module('time')
restore_saved_module('thread')
restore_saved_module('threading')
restore_saved_module('_threading_local')
restore_saved_module('stdin')
restore_saved_module('stdout')
restore_saved_module('socket')
restore_saved_module('select')
restore_saved_module('ssl')
restore_saved_module('subprocess')
| [
"def",
"gevent_un_patch_all",
"(",
")",
":",
"restore_saved_module",
"(",
"'os'",
")",
"restore_saved_module",
"(",
"'time'",
")",
"restore_saved_module",
"(",
"'thread'",
")",
"restore_saved_module",
"(",
"'threading'",
")",
"restore_saved_module",
"(",
"'_threading_lo... | a method to unpatch gevent libraries . | train | false |
33,836 | def bulk_recover(workers, lbn, profile='default'):
ret = {}
if isinstance(workers, str):
workers = workers.split(',')
for worker in workers:
try:
ret[worker] = worker_recover(worker, lbn, profile)
except Exception:
ret[worker] = False
return ret
| [
"def",
"bulk_recover",
"(",
"workers",
",",
"lbn",
",",
"profile",
"=",
"'default'",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"workers",
",",
"str",
")",
":",
"workers",
"=",
"workers",
".",
"split",
"(",
"','",
")",
"for",
"worker",
... | recover all the given workers in the specific load balancer cli examples: . | train | false |
33,837 | def dup_sign_variations(f, K):
(prev, k) = (K.zero, 0)
for coeff in f:
if K.is_negative((coeff * prev)):
k += 1
if coeff:
prev = coeff
return k
| [
"def",
"dup_sign_variations",
"(",
"f",
",",
"K",
")",
":",
"(",
"prev",
",",
"k",
")",
"=",
"(",
"K",
".",
"zero",
",",
"0",
")",
"for",
"coeff",
"in",
"f",
":",
"if",
"K",
".",
"is_negative",
"(",
"(",
"coeff",
"*",
"prev",
")",
")",
":",
... | compute the number of sign variations of f in k[x] . | train | false |
33,838 | def getFileTextInFileDirectory(fileInDirectory, fileName, readMode='r'):
absoluteFilePathInFileDirectory = os.path.join(os.path.dirname(fileInDirectory), fileName)
return getFileText(absoluteFilePathInFileDirectory, readMode)
| [
"def",
"getFileTextInFileDirectory",
"(",
"fileInDirectory",
",",
"fileName",
",",
"readMode",
"=",
"'r'",
")",
":",
"absoluteFilePathInFileDirectory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fileInDirectory",
")",
",... | get the entire text of a file in the directory of the file in directory . | train | false |
33,840 | def find_channel(caller, channelname, silent=False, noaliases=False):
channels = ChannelDB.objects.channel_search(channelname)
if (not channels):
if (not noaliases):
channels = [chan for chan in ChannelDB.objects.get_all_channels() if (channelname in chan.aliases.all())]
if channels:
return channels[0]
if (not silent):
caller.msg(("Channel '%s' not found." % channelname))
return None
elif (len(channels) > 1):
matches = ', '.join([('%s(%s)' % (chan.key, chan.id)) for chan in channels])
if (not silent):
caller.msg(('Multiple channels match (be more specific): \n%s' % matches))
return None
return channels[0]
| [
"def",
"find_channel",
"(",
"caller",
",",
"channelname",
",",
"silent",
"=",
"False",
",",
"noaliases",
"=",
"False",
")",
":",
"channels",
"=",
"ChannelDB",
".",
"objects",
".",
"channel_search",
"(",
"channelname",
")",
"if",
"(",
"not",
"channels",
")"... | helper function for searching for a single channel with some error handling . | train | false |
33,841 | def _add_constant(name, container=None):
c = getattr(constants, name, _UNDEFINED)
if (c == _UNDEFINED):
return
globals()[name] = c
__all__.append(name)
if (container is not None):
container.add(c)
return c
| [
"def",
"_add_constant",
"(",
"name",
",",
"container",
"=",
"None",
")",
":",
"c",
"=",
"getattr",
"(",
"constants",
",",
"name",
",",
"_UNDEFINED",
")",
"if",
"(",
"c",
"==",
"_UNDEFINED",
")",
":",
"return",
"globals",
"(",
")",
"[",
"name",
"]",
... | add a constant to be defined optionally add it to one of the sets for use in get/setopt checkers . | train | false |
33,842 | def html_parts(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=1, initial_header_level=1):
overrides = {'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_header_level}
parts = core.publish_parts(source=input_string, source_path=source_path, destination_path=destination_path, writer_name='html', settings_overrides=overrides)
return parts
| [
"def",
"html_parts",
"(",
"input_string",
",",
"source_path",
"=",
"None",
",",
"destination_path",
"=",
"None",
",",
"input_encoding",
"=",
"'unicode'",
",",
"doctitle",
"=",
"1",
",",
"initial_header_level",
"=",
"1",
")",
":",
"overrides",
"=",
"{",
"'inp... | given an input string . | train | true |
33,843 | def getFirstAncestorWithSectionHeader(entry):
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, 'h2')
if (len(headers) > 0):
return headers
return []
| [
"def",
"getFirstAncestorWithSectionHeader",
"(",
"entry",
")",
":",
"for",
"a",
"in",
"domhelpers",
".",
"getParents",
"(",
"entry",
")",
"[",
"1",
":",
"]",
":",
"headers",
"=",
"domhelpers",
".",
"findNodesNamed",
"(",
"a",
",",
"'h2'",
")",
"if",
"(",... | visit the ancestors of c{entry} until one with at least one c{h2} child node is found . | train | false |
33,844 | def timer_helper(num_handlers=1, sleep_time=5, event_handlers=[], aTimer=None, error_margin=0.25):
global COUNT
global TIMER_HELPER_FINISHED
COUNT = 0
TIMER_HELPER_FINISHED = False
try:
if (aTimer == None):
aTimer = System.Timers.Timer()
for i in xrange(num_handlers):
aTimer.Elapsed += System.Timers.ElapsedEventHandler(onTimedEvent)
for handler in event_handlers:
aTimer.Elapsed += System.Timers.ElapsedEventHandler(handler)
aTimer.Interval = 100
aTimer.Enabled = True
sleep((sleep_time / 10.0))
aTimer.Enabled = False
sleep((sleep_time / 10.0))
Assert((COUNT >= (int((sleep_time - (sleep_time * error_margin))) * (num_handlers + len(event_handlers)))), str(COUNT))
Assert((COUNT <= (int((sleep_time + (sleep_time * error_margin))) * (num_handlers + len(event_handlers)))), str(COUNT))
finally:
TIMER_HELPER_FINISHED = True
| [
"def",
"timer_helper",
"(",
"num_handlers",
"=",
"1",
",",
"sleep_time",
"=",
"5",
",",
"event_handlers",
"=",
"[",
"]",
",",
"aTimer",
"=",
"None",
",",
"error_margin",
"=",
"0.25",
")",
":",
"global",
"COUNT",
"global",
"TIMER_HELPER_FINISHED",
"COUNT",
... | helper function used to test a single timer object under various conditions . | train | false |
33,845 | def factorized(A):
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if (not isspmatrix_csc(A)):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype()
if (A.dtype.char not in 'dD'):
raise ValueError('convert matrix data to double, please, using .astype(), or set linsolve.useUmfpack = False')
umf = umfpack.UmfpackContext(_get_umf_family(A))
umf.numeric(A)
def solve(b):
return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return solve
else:
return splu(A).solve
| [
"def",
"factorized",
"(",
"A",
")",
":",
"if",
"useUmfpack",
":",
"if",
"noScikit",
":",
"raise",
"RuntimeError",
"(",
"'Scikits.umfpack not installed.'",
")",
"if",
"(",
"not",
"isspmatrix_csc",
"(",
"A",
")",
")",
":",
"A",
"=",
"csc_matrix",
"(",
"A",
... | return a function for solving a sparse linear system . | train | false |
33,846 | def test_NORBSmall():
skip_if_no_data()
data = NORBSmall('train')
assert (data.X.shape == (24300, 18432))
assert (data.X.dtype == 'float32')
assert (data.y.shape == (24300,))
assert (data.y_labels == 5)
assert (data.get_topological_view().shape == (24300, 96, 96, 2))
data = NORBSmall('test')
assert (data.X.shape == (24300, 18432))
assert (data.X.dtype == 'float32')
assert (data.y.shape == (24300,))
assert (data.y_labels == 5)
assert (data.get_topological_view().shape == (24300, 96, 96, 2))
| [
"def",
"test_NORBSmall",
"(",
")",
":",
"skip_if_no_data",
"(",
")",
"data",
"=",
"NORBSmall",
"(",
"'train'",
")",
"assert",
"(",
"data",
".",
"X",
".",
"shape",
"==",
"(",
"24300",
",",
"18432",
")",
")",
"assert",
"(",
"data",
".",
"X",
".",
"dt... | this function tests the norbsmall class . | train | false |
33,847 | def remove_hosts_file():
known_hosts_file = ('%s/.ssh/known_hosts' % os.getenv('HOME'))
if os.path.isfile(known_hosts_file):
logging.debug('Deleting known hosts file %s', known_hosts_file)
os.remove(known_hosts_file)
| [
"def",
"remove_hosts_file",
"(",
")",
":",
"known_hosts_file",
"=",
"(",
"'%s/.ssh/known_hosts'",
"%",
"os",
".",
"getenv",
"(",
"'HOME'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"known_hosts_file",
")",
":",
"logging",
".",
"debug",
"(",
... | remove the ssh known hosts file for a machine . | train | false |
33,848 | def operation_looks_paginated(operation_model):
has_input_param = _shape_has_pagination_param(operation_model.input_shape)
has_output_param = _shape_has_pagination_param(operation_model.output_shape)
return (has_input_param and has_output_param)
| [
"def",
"operation_looks_paginated",
"(",
"operation_model",
")",
":",
"has_input_param",
"=",
"_shape_has_pagination_param",
"(",
"operation_model",
".",
"input_shape",
")",
"has_output_param",
"=",
"_shape_has_pagination_param",
"(",
"operation_model",
".",
"output_shape",
... | checks whether an operation looks like it can be paginated :type operation_model: botocore . | train | false |
33,849 | @mock_ec2
def test_decorater_wrapped_gets_set():
test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal(u'test_decorater_wrapped_gets_set')
| [
"@",
"mock_ec2",
"def",
"test_decorater_wrapped_gets_set",
"(",
")",
":",
"test_decorater_wrapped_gets_set",
".",
"__wrapped__",
".",
"__name__",
".",
"should",
".",
"equal",
"(",
"u'test_decorater_wrapped_gets_set'",
")"
] | moto decorators __wrapped__ should get set to the tests function . | train | false |
33,850 | def register_arguments(func, args=None):
if (args is None):
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
| [
"def",
"register_arguments",
"(",
"func",
",",
"args",
"=",
"None",
")",
":",
"if",
"(",
"args",
"is",
"None",
")",
":",
"args",
"=",
"func",
".",
"args",
".",
"args",
"if",
"func",
".",
"args",
".",
"vararg",
":",
"func",
".",
"set_local",
"(",
... | add given arguments to local args is a list that may contains nested lists (i . | train | true |
33,851 | def create_random_person(locale=None, minimum_name_comp_len=0):
fake = get_faker([u'person', u'internet', u'address'], locale=locale)
while True:
first_name = fake.first_name()
last_name = fake.last_name()
name = (u'%s %s' % (first_name, last_name))
if ((len(first_name) > minimum_name_comp_len) and (len(last_name) > minimum_name_comp_len)):
break
email = get_random_email(fake)
phone = fake.phone_number()
prefix = u''
suffix = u''
address = create_random_address(fake=fake, name=name, prefix=prefix, suffix=suffix, email=email, phone=phone)
return PersonContact.objects.create(email=email, phone=phone, name=name, first_name=first_name, last_name=last_name, prefix=prefix, suffix=suffix, default_shipping_address=address, default_billing_address=address, gender=random.choice(u'mfuo'), language=fake.language)
| [
"def",
"create_random_person",
"(",
"locale",
"=",
"None",
",",
"minimum_name_comp_len",
"=",
"0",
")",
":",
"fake",
"=",
"get_faker",
"(",
"[",
"u'person'",
",",
"u'internet'",
",",
"u'address'",
"]",
",",
"locale",
"=",
"locale",
")",
"while",
"True",
":... | create a random personcontact from the given locale . | train | false |
33,852 | def bounce_local(drain=False):
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart')
else:
cmd = _traffic_line('-b')
if drain:
cmd = '{0} {1}'.format(cmd, '--drain')
log.debug('Running: %s', cmd)
return _subprocess(cmd)
| [
"def",
"bounce_local",
"(",
"drain",
"=",
"False",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'server'",
",",
"'restart'",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-b'",
")",
"if",
"drain",
":",
"cmd",
"=",
"'{0}... | bounce traffic server on the local node . | train | false |
33,853 | def _get_credentials():
ret = {'user': False, 'passwd': False}
for item in ret:
for struct in [__opts__, __grains__, __pillar__]:
for config_key in __valid_configs[item]:
value = salt.utils.traverse_dict_and_list(struct, config_key, None)
if value:
ret[item] = value
break
return (ret['user'], ret['passwd'])
| [
"def",
"_get_credentials",
"(",
")",
":",
"ret",
"=",
"{",
"'user'",
":",
"False",
",",
"'passwd'",
":",
"False",
"}",
"for",
"item",
"in",
"ret",
":",
"for",
"struct",
"in",
"[",
"__opts__",
",",
"__grains__",
",",
"__pillar__",
"]",
":",
"for",
"co... | get the username and password from opts . | train | true |
33,856 | def check_c_int(context, builder, n):
_maxint = ((2 ** 31) - 1)
def impl(n):
if (n > _maxint):
raise OverflowError('array size too large to fit in C int')
context.compile_internal(builder, impl, signature(types.none, types.intp), (n,))
| [
"def",
"check_c_int",
"(",
"context",
",",
"builder",
",",
"n",
")",
":",
"_maxint",
"=",
"(",
"(",
"2",
"**",
"31",
")",
"-",
"1",
")",
"def",
"impl",
"(",
"n",
")",
":",
"if",
"(",
"n",
">",
"_maxint",
")",
":",
"raise",
"OverflowError",
"(",... | check whether *n* fits in a c int . | train | false |
33,857 | def read_global(name):
cache_path = _global_cache_path()
return _read(cache_path, name)
| [
"def",
"read_global",
"(",
"name",
")",
":",
"cache_path",
"=",
"_global_cache_path",
"(",
")",
"return",
"_read",
"(",
"cache_path",
",",
"name",
")"
] | reads the object from the global sublime cache path using pickle arguments: name -- the relative file name to read the object returns: the object at the location with the name . | train | false |
33,858 | def aggregate_scores(scores):
total_correct_graded = float_sum((score.earned for score in scores if score.graded))
total_possible_graded = float_sum((score.possible for score in scores if score.graded))
any_attempted_graded = any((score.attempted for score in scores if score.graded))
total_correct = float_sum((score.earned for score in scores))
total_possible = float_sum((score.possible for score in scores))
any_attempted = any((score.attempted for score in scores))
all_total = AggregatedScore(total_correct, total_possible, False, any_attempted)
graded_total = AggregatedScore(total_correct_graded, total_possible_graded, True, any_attempted_graded)
return (all_total, graded_total)
| [
"def",
"aggregate_scores",
"(",
"scores",
")",
":",
"total_correct_graded",
"=",
"float_sum",
"(",
"(",
"score",
".",
"earned",
"for",
"score",
"in",
"scores",
"if",
"score",
".",
"graded",
")",
")",
"total_possible_graded",
"=",
"float_sum",
"(",
"(",
"scor... | scores: a list of scorebase objects returns: a tuple . | train | false |
33,859 | def test_pycuda_only():
from pycuda.compiler import SourceModule
mod = SourceModule('\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n')
multiply_them = mod.get_function('multiply_them')
a = numpy.random.randn(100).astype(numpy.float32)
b = numpy.random.randn(100).astype(numpy.float32)
dest = numpy.zeros_like(a)
multiply_them(drv.Out(dest), drv.In(a), drv.In(b), block=(400, 1, 1), grid=(1, 1))
assert (dest == (a * b)).all()
| [
"def",
"test_pycuda_only",
"(",
")",
":",
"from",
"pycuda",
".",
"compiler",
"import",
"SourceModule",
"mod",
"=",
"SourceModule",
"(",
"'\\n__global__ void multiply_them(float *dest, float *a, float *b)\\n{\\n const int i = threadIdx.x;\\n dest[i] = a[i] * b[i];\\n}\\n'",
")",
"... | run pycuda only example to test that pycuda works . | train | false |
33,860 | def create_attached_volume(dataset_id, mountpoint, maximum_size=None, metadata=pmap()):
return AttachedVolume(manifestation=Manifestation(dataset=Dataset(dataset_id=dataset_id, maximum_size=maximum_size, metadata=metadata), primary=True), mountpoint=FilePath(mountpoint))
| [
"def",
"create_attached_volume",
"(",
"dataset_id",
",",
"mountpoint",
",",
"maximum_size",
"=",
"None",
",",
"metadata",
"=",
"pmap",
"(",
")",
")",
":",
"return",
"AttachedVolume",
"(",
"manifestation",
"=",
"Manifestation",
"(",
"dataset",
"=",
"Dataset",
"... | create an attachedvolume instance with the supplied parameters and return it . | train | false |
33,861 | def load_ipython_extension(ip):
warnings.warn('The rmagic extension in IPython has moved to `rpy2.ipython`, please see `rpy2` documentation.')
| [
"def",
"load_ipython_extension",
"(",
"ip",
")",
":",
"warnings",
".",
"warn",
"(",
"'The rmagic extension in IPython has moved to `rpy2.ipython`, please see `rpy2` documentation.'",
")"
] | this is called to load the module as an ipython extension . | train | false |
33,862 | def my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
| [
"def",
"my_import",
"(",
"name",
")",
":",
"mod",
"=",
"__import__",
"(",
"name",
")",
"components",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"for",
"comp",
"in",
"components",
"[",
"1",
":",
"]",
":",
"mod",
"=",
"getattr",
"(",
"mod",
",",
"... | module importer - taken from the python documentation . | train | true |
33,863 | def create_string_buffer(init, size=None):
if isinstance(init, (str, unicode)):
if (size is None):
size = (len(init) + 1)
buftype = (c_char * size)
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = (c_char * init)
buf = buftype()
return buf
raise TypeError(init)
| [
"def",
"create_string_buffer",
"(",
"init",
",",
"size",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"init",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"if",
"(",
"size",
"is",
"None",
")",
":",
"size",
"=",
"(",
"len",
"(",
"init",
")",... | create_string_buffer -> character array create_string_buffer -> character array create_string_buffer -> character array . | train | true |
33,864 | def _maybeFindSourceLine(testThing):
method = getattr(testThing, '_testMethodName', None)
if (method is not None):
testThing = getattr(testThing, method)
code = getattr(testThing, '__code__', None)
if (code is not None):
return code.co_firstlineno
try:
return inspect.getsourcelines(testThing)[1]
except (IOError, TypeError):
return (-1)
| [
"def",
"_maybeFindSourceLine",
"(",
"testThing",
")",
":",
"method",
"=",
"getattr",
"(",
"testThing",
",",
"'_testMethodName'",
",",
"None",
")",
"if",
"(",
"method",
"is",
"not",
"None",
")",
":",
"testThing",
"=",
"getattr",
"(",
"testThing",
",",
"meth... | try to find the source line of the given test thing . | train | false |
33,865 | def hobj(symb, width):
return ''.join(xobj(symb, width))
| [
"def",
"hobj",
"(",
"symb",
",",
"width",
")",
":",
"return",
"''",
".",
"join",
"(",
"xobj",
"(",
"symb",
",",
"width",
")",
")"
] | construct horizontal object of a given width see: xobj . | train | false |
33,866 | def request_user_has_permission(request, permission_type):
user_db = get_user_db_from_request(request=request)
return user_has_permission(user_db=user_db, permission_type=permission_type)
| [
"def",
"request_user_has_permission",
"(",
"request",
",",
"permission_type",
")",
":",
"user_db",
"=",
"get_user_db_from_request",
"(",
"request",
"=",
"request",
")",
"return",
"user_has_permission",
"(",
"user_db",
"=",
"user_db",
",",
"permission_type",
"=",
"pe... | check that currently logged-in user has specified permission . | train | false |
33,867 | def write_fine_calibration(fname, calibration):
_check_fname(fname, overwrite=True)
check_fname(fname, 'cal', ('.dat',))
with open(fname, 'wb') as cal_file:
for (ci, chan) in enumerate(calibration['ch_names']):
cal_line = np.concatenate([calibration['locs'][ci], calibration['imb_cals'][ci]]).round(6)
cal_str = ((str(chan) + ' ') + ' '.join(map((lambda x: ('%.6f' % x)), cal_line)))
cal_file.write((cal_str + '\n').encode('ASCII'))
| [
"def",
"write_fine_calibration",
"(",
"fname",
",",
"calibration",
")",
":",
"_check_fname",
"(",
"fname",
",",
"overwrite",
"=",
"True",
")",
"check_fname",
"(",
"fname",
",",
"'cal'",
",",
"(",
"'.dat'",
",",
")",
")",
"with",
"open",
"(",
"fname",
","... | write fine calibration information to a . | train | false |
33,868 | def get_formats():
FORMAT_SETTINGS = ('DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS')
result = {}
for module in ([settings] + get_format_modules(reverse=True)):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for (k, v) in result.items():
if isinstance(v, (basestring, int)):
src.append(("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v)))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append(("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v))))
return ''.join(src)
| [
"def",
"get_formats",
"(",
")",
":",
"FORMAT_SETTINGS",
"=",
"(",
"'DATE_FORMAT'",
",",
"'DATETIME_FORMAT'",
",",
"'TIME_FORMAT'",
",",
"'YEAR_MONTH_FORMAT'",
",",
"'MONTH_DAY_FORMAT'",
",",
"'SHORT_DATE_FORMAT'",
",",
"'SHORT_DATETIME_FORMAT'",
",",
"'FIRST_DAY_OF_WEEK'"... | returns all formats strings required for i18n to work . | train | false |
33,869 | @with_setup(step_runner_environ)
def test_feature_can_run_only_specified_scenarios():
feature = Feature.from_string(FEATURE7)
scenarios_ran = []
@after.each_scenario
def just_register(scenario):
scenarios_ran.append(scenario.name)
feature.run(scenarios=(2, 5))
assert_equals(scenarios_ran, ['2nd one', '5th one'])
| [
"@",
"with_setup",
"(",
"step_runner_environ",
")",
"def",
"test_feature_can_run_only_specified_scenarios",
"(",
")",
":",
"feature",
"=",
"Feature",
".",
"from_string",
"(",
"FEATURE7",
")",
"scenarios_ran",
"=",
"[",
"]",
"@",
"after",
".",
"each_scenario",
"def... | features can run only specified scenarios . | train | false |
33,870 | def htons(integer):
return ntohs(integer)
| [
"def",
"htons",
"(",
"integer",
")",
":",
"return",
"ntohs",
"(",
"integer",
")"
] | htons -> integer convert a 16-bit integer from host to network byte order . | train | false |
33,871 | def get_pip_requirements(fname=os.path.join(jasperpath.LIB_PATH, 'requirements.txt')):
logger = logging.getLogger(__name__)
if os.access(fname, os.R_OK):
reqs = list(pip.req.parse_requirements(fname))
logger.debug("Found %d PIP requirements in file '%s'", len(reqs), fname)
return reqs
else:
logger.debug("PIP requirements file '%s' not found or not readable", fname)
| [
"def",
"get_pip_requirements",
"(",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"jasperpath",
".",
"LIB_PATH",
",",
"'requirements.txt'",
")",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"os",
".",
"access",
"... | gets the pip requirements from a text file . | train | false |
33,872 | def _print_keys(query):
for row in query:
print_(((u' ' * 2) + row['key']))
| [
"def",
"_print_keys",
"(",
"query",
")",
":",
"for",
"row",
"in",
"query",
":",
"print_",
"(",
"(",
"(",
"u' '",
"*",
"2",
")",
"+",
"row",
"[",
"'key'",
"]",
")",
")"
] | given a sqlite query result . | train | false |
33,873 | def _get_feature_importances(estimator, norm_order=1):
importances = getattr(estimator, 'feature_importances_', None)
if ((importances is None) and hasattr(estimator, 'coef_')):
if (estimator.coef_.ndim == 1):
importances = np.abs(estimator.coef_)
else:
importances = norm(estimator.coef_, axis=0, ord=norm_order)
elif (importances is None):
raise ValueError(('The underlying estimator %s has no `coef_` or `feature_importances_` attribute. Either pass a fitted estimator to SelectFromModel or call fit before calling transform.' % estimator.__class__.__name__))
return importances
| [
"def",
"_get_feature_importances",
"(",
"estimator",
",",
"norm_order",
"=",
"1",
")",
":",
"importances",
"=",
"getattr",
"(",
"estimator",
",",
"'feature_importances_'",
",",
"None",
")",
"if",
"(",
"(",
"importances",
"is",
"None",
")",
"and",
"hasattr",
... | retrieve or aggregate feature importances from estimator . | train | false |
33,876 | def cluster_status(runas=None):
if ((runas is None) and (not salt.utils.is_windows())):
runas = salt.utils.get_user()
res = __salt__['cmd.run_all']([__context__['rabbitmqctl'], 'cluster_status'], runas=runas, python_shell=False)
_check_response(res)
return res['stdout']
| [
"def",
"cluster_status",
"(",
"runas",
"=",
"None",
")",
":",
"if",
"(",
"(",
"runas",
"is",
"None",
")",
"and",
"(",
"not",
"salt",
".",
"utils",
".",
"is_windows",
"(",
")",
")",
")",
":",
"runas",
"=",
"salt",
".",
"utils",
".",
"get_user",
"(... | return rabbitmq cluster_status cli example: . | train | false |
33,877 | def user_is_system_admin(user_db):
return user_has_role(user_db=user_db, role=SystemRole.SYSTEM_ADMIN)
| [
"def",
"user_is_system_admin",
"(",
"user_db",
")",
":",
"return",
"user_has_role",
"(",
"user_db",
"=",
"user_db",
",",
"role",
"=",
"SystemRole",
".",
"SYSTEM_ADMIN",
")"
] | return true if the provided user has system admin rule . | train | false |
33,879 | def getNewRepository():
return ExportRepository()
| [
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] | get the repository constructor . | train | false |
33,880 | def reset_multiprocessing_logger():
try:
from billiard import util
except ImportError:
pass
else:
if hasattr(util, u'_logger'):
util._logger = None
| [
"def",
"reset_multiprocessing_logger",
"(",
")",
":",
"try",
":",
"from",
"billiard",
"import",
"util",
"except",
"ImportError",
":",
"pass",
"else",
":",
"if",
"hasattr",
"(",
"util",
",",
"u'_logger'",
")",
":",
"util",
".",
"_logger",
"=",
"None"
] | reset multiprocessing logging setup . | train | false |
33,881 | def test_quteproc_error_message_did_fail(qtbot, quteproc, request_mock):
request_mock.node.rep_call.failed = True
with qtbot.waitSignal(quteproc.got_error):
quteproc.send_cmd(':message-error test')
quteproc.after_test()
| [
"def",
"test_quteproc_error_message_did_fail",
"(",
"qtbot",
",",
"quteproc",
",",
"request_mock",
")",
":",
"request_mock",
".",
"node",
".",
"rep_call",
".",
"failed",
"=",
"True",
"with",
"qtbot",
".",
"waitSignal",
"(",
"quteproc",
".",
"got_error",
")",
"... | make sure the test does not fail on teardown if the main test failed . | train | false |
33,882 | def write_hostnames(save_path, hostnames_ips):
file_path = dir_find(save_path)
hostnames_ip_file = os.path.join(file_path, 'openstack_hostnames_ips.yml')
with open(hostnames_ip_file, 'wb') as f:
f.write(json.dumps(hostnames_ips, indent=4, sort_keys=True))
| [
"def",
"write_hostnames",
"(",
"save_path",
",",
"hostnames_ips",
")",
":",
"file_path",
"=",
"dir_find",
"(",
"save_path",
")",
"hostnames_ip_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"file_path",
",",
"'openstack_hostnames_ips.yml'",
")",
"with",
"open"... | write a list of all hosts and their given ip addresses note: the file is saved in json format to a file with the name openstack_hostnames_ips . | train | false |
33,883 | def read_element_float(stream, size):
if (size == 4):
return unpack('>f', _read(stream, 4))[0]
elif (size == 8):
return unpack('>d', _read(stream, 8))[0]
else:
raise SizeError(size)
| [
"def",
"read_element_float",
"(",
"stream",
",",
"size",
")",
":",
"if",
"(",
"size",
"==",
"4",
")",
":",
"return",
"unpack",
"(",
"'>f'",
",",
"_read",
"(",
"stream",
",",
"4",
")",
")",
"[",
"0",
"]",
"elif",
"(",
"size",
"==",
"8",
")",
":"... | read the element data of type :data:float . | train | false |
33,884 | def _is_filename_excluded_for_bad_patterns_check(pattern, filename):
return (any((filename.startswith(bad_pattern) for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])) or (filename in BAD_PATTERNS[pattern]['excluded_files']))
| [
"def",
"_is_filename_excluded_for_bad_patterns_check",
"(",
"pattern",
",",
"filename",
")",
":",
"return",
"(",
"any",
"(",
"(",
"filename",
".",
"startswith",
"(",
"bad_pattern",
")",
"for",
"bad_pattern",
"in",
"BAD_PATTERNS",
"[",
"pattern",
"]",
"[",
"'excl... | checks if file is excluded from the bad patterns check . | train | false |
33,885 | def _to_dict(objects):
try:
if isinstance(objects, string_types):
objects = json.loads(objects)
except ValueError as err:
log.error('Could not parse objects: %s', err)
raise err
return objects
| [
"def",
"_to_dict",
"(",
"objects",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"objects",
",",
"string_types",
")",
":",
"objects",
"=",
"json",
".",
"loads",
"(",
"objects",
")",
"except",
"ValueError",
"as",
"err",
":",
"log",
".",
"error",
"(",
... | potentially interprets a string as json for usage with mongo . | train | false |
33,887 | def log_installed_python_prereqs():
sh('pip freeze > {}'.format((Env.GEN_LOG_DIR + '/pip_freeze.log')))
return
| [
"def",
"log_installed_python_prereqs",
"(",
")",
":",
"sh",
"(",
"'pip freeze > {}'",
".",
"format",
"(",
"(",
"Env",
".",
"GEN_LOG_DIR",
"+",
"'/pip_freeze.log'",
")",
")",
")",
"return"
] | logs output of pip freeze for debugging . | train | false |
33,888 | def build_info():
ret = {'info': []}
out = __salt__['cmd.run']('{0} -V'.format(__detect_os()))
for i in out.splitlines():
if i.startswith('configure argument'):
ret['build arguments'] = re.findall("(?:[^\\s]*'.*')|(?:[^\\s]+)", i)[2:]
continue
ret['info'].append(i)
return ret
| [
"def",
"build_info",
"(",
")",
":",
"ret",
"=",
"{",
"'info'",
":",
"[",
"]",
"}",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'{0} -V'",
".",
"format",
"(",
"__detect_os",
"(",
")",
")",
")",
"for",
"i",
"in",
"out",
".",
"splitlines",
... | return server and build arguments cli example: . | train | true |
33,889 | def _log_entry_mapping_to_pb(mapping):
entry_pb = LogEntry()
if ('timestamp' in mapping):
mapping['timestamp'] = _datetime_to_rfc3339(mapping['timestamp'])
ParseDict(mapping, entry_pb)
return entry_pb
| [
"def",
"_log_entry_mapping_to_pb",
"(",
"mapping",
")",
":",
"entry_pb",
"=",
"LogEntry",
"(",
")",
"if",
"(",
"'timestamp'",
"in",
"mapping",
")",
":",
"mapping",
"[",
"'timestamp'",
"]",
"=",
"_datetime_to_rfc3339",
"(",
"mapping",
"[",
"'timestamp'",
"]",
... | helper for :meth:write_entries . | train | false |
33,891 | def _serialize_row(row):
new_row = [(_escape_tex_reserved_symbols(unicode(item)) if item else '') for item in row]
return (((6 * ' ') + ' & '.join(new_row)) + ' \\\\')
| [
"def",
"_serialize_row",
"(",
"row",
")",
":",
"new_row",
"=",
"[",
"(",
"_escape_tex_reserved_symbols",
"(",
"unicode",
"(",
"item",
")",
")",
"if",
"item",
"else",
"''",
")",
"for",
"item",
"in",
"row",
"]",
"return",
"(",
"(",
"(",
"6",
"*",
"' '"... | returns string representation of a single row . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.