id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
10,587
|
def get_case(d, t):
if (not d.has(t)):
if d.is_one:
return 'base'
return 'primitive'
if d.rem(Poly(t, t)).is_zero:
return 'exp'
if d.rem(Poly((1 + (t ** 2)), t)).is_zero:
return 'tan'
if (d.degree(t) > 1):
return 'other_nonlinear'
return 'other_linear'
|
[
"def",
"get_case",
"(",
"d",
",",
"t",
")",
":",
"if",
"(",
"not",
"d",
".",
"has",
"(",
"t",
")",
")",
":",
"if",
"d",
".",
"is_one",
":",
"return",
"'base'",
"return",
"'primitive'",
"if",
"d",
".",
"rem",
"(",
"Poly",
"(",
"t",
",",
"t",
")",
")",
".",
"is_zero",
":",
"return",
"'exp'",
"if",
"d",
".",
"rem",
"(",
"Poly",
"(",
"(",
"1",
"+",
"(",
"t",
"**",
"2",
")",
")",
",",
"t",
")",
")",
".",
"is_zero",
":",
"return",
"'tan'",
"if",
"(",
"d",
".",
"degree",
"(",
"t",
")",
">",
"1",
")",
":",
"return",
"'other_nonlinear'",
"return",
"'other_linear'"
] |
returns the type of the derivation d .
|
train
| false
|
10,588
|
def newDerOctetString(binstring):
if isinstance(binstring, DerObject):
der = DerOctetString(binstring.encode())
else:
der = DerOctetString(binstring)
return der
|
[
"def",
"newDerOctetString",
"(",
"binstring",
")",
":",
"if",
"isinstance",
"(",
"binstring",
",",
"DerObject",
")",
":",
"der",
"=",
"DerOctetString",
"(",
"binstring",
".",
"encode",
"(",
")",
")",
"else",
":",
"der",
"=",
"DerOctetString",
"(",
"binstring",
")",
"return",
"der"
] |
create a deroctetstring object .
|
train
| false
|
10,590
|
def get_recently_published_exp_summaries(limit):
return _get_exploration_summaries_from_models(exp_models.ExpSummaryModel.get_recently_published(limit))
|
[
"def",
"get_recently_published_exp_summaries",
"(",
"limit",
")",
":",
"return",
"_get_exploration_summaries_from_models",
"(",
"exp_models",
".",
"ExpSummaryModel",
".",
"get_recently_published",
"(",
"limit",
")",
")"
] |
returns a dict with all featured exploration summary domain objects .
|
train
| false
|
10,591
|
def _async_wait_until_running(reactor, instance):
action = start_action(action_type=u'flocker:provision:aws:wait_until_running', instance_id=instance.id)
def check_final_state(ignored):
if (instance.state != u'running'):
raise FailedToRun(instance.state_reason)
action.add_success_fields(instance_state=instance.state, instance_state_reason=instance.state_reason)
return instance
def finished_booting():
d = maybeDeferred(_node_is_booting, instance)
d.addCallback((lambda x: (not x)))
return d
with action.context():
d = loop_until(reactor, finished_booting, repeat(5, INSTANCE_TIMEOUT))
d = DeferredContext(d)
d.addCallback(check_final_state)
d.addActionFinish()
return d.result
|
[
"def",
"_async_wait_until_running",
"(",
"reactor",
",",
"instance",
")",
":",
"action",
"=",
"start_action",
"(",
"action_type",
"=",
"u'flocker:provision:aws:wait_until_running'",
",",
"instance_id",
"=",
"instance",
".",
"id",
")",
"def",
"check_final_state",
"(",
"ignored",
")",
":",
"if",
"(",
"instance",
".",
"state",
"!=",
"u'running'",
")",
":",
"raise",
"FailedToRun",
"(",
"instance",
".",
"state_reason",
")",
"action",
".",
"add_success_fields",
"(",
"instance_state",
"=",
"instance",
".",
"state",
",",
"instance_state_reason",
"=",
"instance",
".",
"state_reason",
")",
"return",
"instance",
"def",
"finished_booting",
"(",
")",
":",
"d",
"=",
"maybeDeferred",
"(",
"_node_is_booting",
",",
"instance",
")",
"d",
".",
"addCallback",
"(",
"(",
"lambda",
"x",
":",
"(",
"not",
"x",
")",
")",
")",
"return",
"d",
"with",
"action",
".",
"context",
"(",
")",
":",
"d",
"=",
"loop_until",
"(",
"reactor",
",",
"finished_booting",
",",
"repeat",
"(",
"5",
",",
"INSTANCE_TIMEOUT",
")",
")",
"d",
"=",
"DeferredContext",
"(",
"d",
")",
"d",
".",
"addCallback",
"(",
"check_final_state",
")",
"d",
".",
"addActionFinish",
"(",
")",
"return",
"d",
".",
"result"
] |
wait until a instance is running .
|
train
| false
|
10,592
|
def get_run_usr():
if ('SUDO_USER' in os.environ):
usr = os.environ['SUDO_USER']
else:
usr = init_app("who -m | awk '{print $1;}'")
try:
getpwnam(usr)
except:
usr = None
return usr
|
[
"def",
"get_run_usr",
"(",
")",
":",
"if",
"(",
"'SUDO_USER'",
"in",
"os",
".",
"environ",
")",
":",
"usr",
"=",
"os",
".",
"environ",
"[",
"'SUDO_USER'",
"]",
"else",
":",
"usr",
"=",
"init_app",
"(",
"\"who -m | awk '{print $1;}'\"",
")",
"try",
":",
"getpwnam",
"(",
"usr",
")",
"except",
":",
"usr",
"=",
"None",
"return",
"usr"
] |
fetch the user that launched zarp .
|
train
| false
|
10,593
|
def standard_normal_sample(shape):
return tf.random_normal(shape)
|
[
"def",
"standard_normal_sample",
"(",
"shape",
")",
":",
"return",
"tf",
".",
"random_normal",
"(",
"shape",
")"
] |
samples from standard gaussian distribution .
|
train
| false
|
10,594
|
def _CopySortExpressionToProtocolBuffer(sort_expression, pb):
pb.set_sort_expression(sort_expression.expression.encode('utf-8'))
if (sort_expression.direction == SortExpression.ASCENDING):
pb.set_sort_descending(False)
if (sort_expression.default_value is not None):
if isinstance(sort_expression.default_value, basestring):
pb.set_default_value_text(sort_expression.default_value.encode('utf-8'))
elif (isinstance(sort_expression.default_value, datetime.datetime) or isinstance(sort_expression.default_value, datetime.date)):
pb.set_default_value_text(str(search_util.EpochTime(sort_expression.default_value)))
else:
pb.set_default_value_numeric(sort_expression.default_value)
return pb
|
[
"def",
"_CopySortExpressionToProtocolBuffer",
"(",
"sort_expression",
",",
"pb",
")",
":",
"pb",
".",
"set_sort_expression",
"(",
"sort_expression",
".",
"expression",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"(",
"sort_expression",
".",
"direction",
"==",
"SortExpression",
".",
"ASCENDING",
")",
":",
"pb",
".",
"set_sort_descending",
"(",
"False",
")",
"if",
"(",
"sort_expression",
".",
"default_value",
"is",
"not",
"None",
")",
":",
"if",
"isinstance",
"(",
"sort_expression",
".",
"default_value",
",",
"basestring",
")",
":",
"pb",
".",
"set_default_value_text",
"(",
"sort_expression",
".",
"default_value",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"elif",
"(",
"isinstance",
"(",
"sort_expression",
".",
"default_value",
",",
"datetime",
".",
"datetime",
")",
"or",
"isinstance",
"(",
"sort_expression",
".",
"default_value",
",",
"datetime",
".",
"date",
")",
")",
":",
"pb",
".",
"set_default_value_text",
"(",
"str",
"(",
"search_util",
".",
"EpochTime",
"(",
"sort_expression",
".",
"default_value",
")",
")",
")",
"else",
":",
"pb",
".",
"set_default_value_numeric",
"(",
"sort_expression",
".",
"default_value",
")",
"return",
"pb"
] |
copies a sortexpression to a search_service_pb .
|
train
| false
|
10,595
|
def getNewRepository():
return ExportRepository()
|
[
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] |
get new repository .
|
train
| false
|
10,597
|
@rule(u'$nick(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')
@example(u'.help tell')
@commands(u'help', u'commands')
@priority(u'low')
def help(bot, trigger):
if trigger.group(2):
name = trigger.group(2)
name = name.lower()
threshold = 3
if (name in bot.doc):
if ((len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0)) > threshold):
if (trigger.nick != trigger.sender):
bot.reply(u"The documentation for this command is too long; I'm sending it to you in a private message.")
msgfun = (lambda l: bot.msg(trigger.nick, l))
else:
msgfun = bot.reply
for line in bot.doc[name][0]:
msgfun(line)
if bot.doc[name][1]:
msgfun((u'e.g. ' + bot.doc[name][1]))
else:
if ((u'command-gist' in bot.memory) and (bot.memory[u'command-gist'][0] == len(bot.command_groups))):
url = bot.memory[u'command-gist'][1]
else:
bot.say(u"Hang on, I'm creating a list.")
msgs = []
name_length = max(6, max((len(k) for k in bot.command_groups.keys())))
for (category, cmds) in collections.OrderedDict(sorted(bot.command_groups.items())).items():
category = category.upper().ljust(name_length)
cmds = u' '.join(cmds)
msg = ((category + u' ') + cmds)
indent = (u' ' * (name_length + 2))
msgs.append(u'\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))
url = create_gist(bot, u'\n\n'.join(msgs))
if (not url):
return
bot.memory[u'command-gist'] = (len(bot.command_groups), url)
bot.say(u"I've posted a list of my commands at {} - You can see more info about any of these commands by doing .help <command> (e.g. .help time)".format(url))
|
[
"@",
"rule",
"(",
"u'$nick(?i)(help|doc) +([A-Za-z]+)(?:\\\\?+)?$'",
")",
"@",
"example",
"(",
"u'.help tell'",
")",
"@",
"commands",
"(",
"u'help'",
",",
"u'commands'",
")",
"@",
"priority",
"(",
"u'low'",
")",
"def",
"help",
"(",
"bot",
",",
"trigger",
")",
":",
"if",
"trigger",
".",
"group",
"(",
"2",
")",
":",
"name",
"=",
"trigger",
".",
"group",
"(",
"2",
")",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"threshold",
"=",
"3",
"if",
"(",
"name",
"in",
"bot",
".",
"doc",
")",
":",
"if",
"(",
"(",
"len",
"(",
"bot",
".",
"doc",
"[",
"name",
"]",
"[",
"0",
"]",
")",
"+",
"(",
"1",
"if",
"bot",
".",
"doc",
"[",
"name",
"]",
"[",
"1",
"]",
"else",
"0",
")",
")",
">",
"threshold",
")",
":",
"if",
"(",
"trigger",
".",
"nick",
"!=",
"trigger",
".",
"sender",
")",
":",
"bot",
".",
"reply",
"(",
"u\"The documentation for this command is too long; I'm sending it to you in a private message.\"",
")",
"msgfun",
"=",
"(",
"lambda",
"l",
":",
"bot",
".",
"msg",
"(",
"trigger",
".",
"nick",
",",
"l",
")",
")",
"else",
":",
"msgfun",
"=",
"bot",
".",
"reply",
"for",
"line",
"in",
"bot",
".",
"doc",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"msgfun",
"(",
"line",
")",
"if",
"bot",
".",
"doc",
"[",
"name",
"]",
"[",
"1",
"]",
":",
"msgfun",
"(",
"(",
"u'e.g. '",
"+",
"bot",
".",
"doc",
"[",
"name",
"]",
"[",
"1",
"]",
")",
")",
"else",
":",
"if",
"(",
"(",
"u'command-gist'",
"in",
"bot",
".",
"memory",
")",
"and",
"(",
"bot",
".",
"memory",
"[",
"u'command-gist'",
"]",
"[",
"0",
"]",
"==",
"len",
"(",
"bot",
".",
"command_groups",
")",
")",
")",
":",
"url",
"=",
"bot",
".",
"memory",
"[",
"u'command-gist'",
"]",
"[",
"1",
"]",
"else",
":",
"bot",
".",
"say",
"(",
"u\"Hang on, I'm creating a list.\"",
")",
"msgs",
"=",
"[",
"]",
"name_length",
"=",
"max",
"(",
"6",
",",
"max",
"(",
"(",
"len",
"(",
"k",
")",
"for",
"k",
"in",
"bot",
".",
"command_groups",
".",
"keys",
"(",
")",
")",
")",
")",
"for",
"(",
"category",
",",
"cmds",
")",
"in",
"collections",
".",
"OrderedDict",
"(",
"sorted",
"(",
"bot",
".",
"command_groups",
".",
"items",
"(",
")",
")",
")",
".",
"items",
"(",
")",
":",
"category",
"=",
"category",
".",
"upper",
"(",
")",
".",
"ljust",
"(",
"name_length",
")",
"cmds",
"=",
"u' '",
".",
"join",
"(",
"cmds",
")",
"msg",
"=",
"(",
"(",
"category",
"+",
"u' '",
")",
"+",
"cmds",
")",
"indent",
"=",
"(",
"u' '",
"*",
"(",
"name_length",
"+",
"2",
")",
")",
"msgs",
".",
"append",
"(",
"u'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"msg",
",",
"subsequent_indent",
"=",
"indent",
")",
")",
")",
"url",
"=",
"create_gist",
"(",
"bot",
",",
"u'\\n\\n'",
".",
"join",
"(",
"msgs",
")",
")",
"if",
"(",
"not",
"url",
")",
":",
"return",
"bot",
".",
"memory",
"[",
"u'command-gist'",
"]",
"=",
"(",
"len",
"(",
"bot",
".",
"command_groups",
")",
",",
"url",
")",
"bot",
".",
"say",
"(",
"u\"I've posted a list of my commands at {} - You can see more info about any of these commands by doing .help <command> (e.g. .help time)\"",
".",
"format",
"(",
"url",
")",
")"
] |
adds a charthelp object to the help attribute of the function .
|
train
| false
|
10,598
|
def flash_size_bytes(size):
if ('MB' in size):
return ((int(size[:size.index('MB')]) * 1024) * 1024)
elif ('KB' in size):
return (int(size[:size.index('KB')]) * 1024)
else:
raise FatalError(('Unknown size %s' % size))
|
[
"def",
"flash_size_bytes",
"(",
"size",
")",
":",
"if",
"(",
"'MB'",
"in",
"size",
")",
":",
"return",
"(",
"(",
"int",
"(",
"size",
"[",
":",
"size",
".",
"index",
"(",
"'MB'",
")",
"]",
")",
"*",
"1024",
")",
"*",
"1024",
")",
"elif",
"(",
"'KB'",
"in",
"size",
")",
":",
"return",
"(",
"int",
"(",
"size",
"[",
":",
"size",
".",
"index",
"(",
"'KB'",
")",
"]",
")",
"*",
"1024",
")",
"else",
":",
"raise",
"FatalError",
"(",
"(",
"'Unknown size %s'",
"%",
"size",
")",
")"
] |
given a flash size of the type passed in args .
|
train
| true
|
10,599
|
def _patch():
import sys
from OpenGL import GL
if (sys.version_info > (3,)):
buffersubdatafunc = GL.glBufferSubData
if hasattr(buffersubdatafunc, 'wrapperFunction'):
buffersubdatafunc = buffersubdatafunc.wrapperFunction
_m = sys.modules[buffersubdatafunc.__module__]
_m.long = int
try:
from OpenGL.GL.VERSION import GL_2_0
GL_2_0.GL_OBJECT_SHADER_SOURCE_LENGTH = GL_2_0.GL_SHADER_SOURCE_LENGTH
except Exception:
pass
|
[
"def",
"_patch",
"(",
")",
":",
"import",
"sys",
"from",
"OpenGL",
"import",
"GL",
"if",
"(",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
")",
")",
":",
"buffersubdatafunc",
"=",
"GL",
".",
"glBufferSubData",
"if",
"hasattr",
"(",
"buffersubdatafunc",
",",
"'wrapperFunction'",
")",
":",
"buffersubdatafunc",
"=",
"buffersubdatafunc",
".",
"wrapperFunction",
"_m",
"=",
"sys",
".",
"modules",
"[",
"buffersubdatafunc",
".",
"__module__",
"]",
"_m",
".",
"long",
"=",
"int",
"try",
":",
"from",
"OpenGL",
".",
"GL",
".",
"VERSION",
"import",
"GL_2_0",
"GL_2_0",
".",
"GL_OBJECT_SHADER_SOURCE_LENGTH",
"=",
"GL_2_0",
".",
"GL_SHADER_SOURCE_LENGTH",
"except",
"Exception",
":",
"pass"
] |
monkey-patch pyopengl to fix a bug in glbuffersubdata .
|
train
| true
|
10,601
|
def cell_delete(context, cell_name):
return IMPL.cell_delete(context, cell_name)
|
[
"def",
"cell_delete",
"(",
"context",
",",
"cell_name",
")",
":",
"return",
"IMPL",
".",
"cell_delete",
"(",
"context",
",",
"cell_name",
")"
] |
delete a child cell .
|
train
| false
|
10,602
|
def test_scamp_sip_distortion_parameters():
header = get_pkg_data_contents(u'data/validate.fits', encoding=u'binary')
w = wcs.WCS(header)
w.all_pix2world(0, 0, 0)
|
[
"def",
"test_scamp_sip_distortion_parameters",
"(",
")",
":",
"header",
"=",
"get_pkg_data_contents",
"(",
"u'data/validate.fits'",
",",
"encoding",
"=",
"u'binary'",
")",
"w",
"=",
"wcs",
".",
"WCS",
"(",
"header",
")",
"w",
".",
"all_pix2world",
"(",
"0",
",",
"0",
",",
"0",
")"
] |
test parsing of wcs parameters with redundant sip and scamp distortion parameters .
|
train
| false
|
10,604
|
def test_install_from_wheel_with_legacy(script, data):
result = script.pip('install', 'script.wheel2a==0.1', '--no-index', ('--find-links=' + data.find_links), expect_error=False)
legacy_file1 = (script.bin / 'testscript1.bat')
legacy_file2 = (script.bin / 'testscript2')
assert (legacy_file1 in result.files_created)
assert (legacy_file2 in result.files_created)
|
[
"def",
"test_install_from_wheel_with_legacy",
"(",
"script",
",",
"data",
")",
":",
"result",
"=",
"script",
".",
"pip",
"(",
"'install'",
",",
"'script.wheel2a==0.1'",
",",
"'--no-index'",
",",
"(",
"'--find-links='",
"+",
"data",
".",
"find_links",
")",
",",
"expect_error",
"=",
"False",
")",
"legacy_file1",
"=",
"(",
"script",
".",
"bin",
"/",
"'testscript1.bat'",
")",
"legacy_file2",
"=",
"(",
"script",
".",
"bin",
"/",
"'testscript2'",
")",
"assert",
"(",
"legacy_file1",
"in",
"result",
".",
"files_created",
")",
"assert",
"(",
"legacy_file2",
"in",
"result",
".",
"files_created",
")"
] |
test installing scripts .
|
train
| false
|
10,605
|
def setup_services(hass, track_new_found_calendars, calendar_service):
def _found_calendar(call):
'Check if we know about a calendar and generate PLATFORM_DISCOVER.'
calendar = get_calendar_info(hass, call.data)
if (hass.data[DATA_INDEX].get(calendar[CONF_CAL_ID], None) is not None):
return
hass.data[DATA_INDEX].update({calendar[CONF_CAL_ID]: calendar})
update_config(hass.config.path(YAML_DEVICES), hass.data[DATA_INDEX][calendar[CONF_CAL_ID]])
discovery.load_platform(hass, 'calendar', DOMAIN, hass.data[DATA_INDEX][calendar[CONF_CAL_ID]])
hass.services.register(DOMAIN, SERVICE_FOUND_CALENDARS, _found_calendar, None, schema=None)
def _scan_for_calendars(service):
'Scan for new calendars.'
service = calendar_service.get()
cal_list = service.calendarList()
calendars = cal_list.list().execute()['items']
for calendar in calendars:
calendar['track'] = track_new_found_calendars
hass.services.call(DOMAIN, SERVICE_FOUND_CALENDARS, calendar)
hass.services.register(DOMAIN, SERVICE_SCAN_CALENDARS, _scan_for_calendars, None, schema=None)
return True
|
[
"def",
"setup_services",
"(",
"hass",
",",
"track_new_found_calendars",
",",
"calendar_service",
")",
":",
"def",
"_found_calendar",
"(",
"call",
")",
":",
"calendar",
"=",
"get_calendar_info",
"(",
"hass",
",",
"call",
".",
"data",
")",
"if",
"(",
"hass",
".",
"data",
"[",
"DATA_INDEX",
"]",
".",
"get",
"(",
"calendar",
"[",
"CONF_CAL_ID",
"]",
",",
"None",
")",
"is",
"not",
"None",
")",
":",
"return",
"hass",
".",
"data",
"[",
"DATA_INDEX",
"]",
".",
"update",
"(",
"{",
"calendar",
"[",
"CONF_CAL_ID",
"]",
":",
"calendar",
"}",
")",
"update_config",
"(",
"hass",
".",
"config",
".",
"path",
"(",
"YAML_DEVICES",
")",
",",
"hass",
".",
"data",
"[",
"DATA_INDEX",
"]",
"[",
"calendar",
"[",
"CONF_CAL_ID",
"]",
"]",
")",
"discovery",
".",
"load_platform",
"(",
"hass",
",",
"'calendar'",
",",
"DOMAIN",
",",
"hass",
".",
"data",
"[",
"DATA_INDEX",
"]",
"[",
"calendar",
"[",
"CONF_CAL_ID",
"]",
"]",
")",
"hass",
".",
"services",
".",
"register",
"(",
"DOMAIN",
",",
"SERVICE_FOUND_CALENDARS",
",",
"_found_calendar",
",",
"None",
",",
"schema",
"=",
"None",
")",
"def",
"_scan_for_calendars",
"(",
"service",
")",
":",
"service",
"=",
"calendar_service",
".",
"get",
"(",
")",
"cal_list",
"=",
"service",
".",
"calendarList",
"(",
")",
"calendars",
"=",
"cal_list",
".",
"list",
"(",
")",
".",
"execute",
"(",
")",
"[",
"'items'",
"]",
"for",
"calendar",
"in",
"calendars",
":",
"calendar",
"[",
"'track'",
"]",
"=",
"track_new_found_calendars",
"hass",
".",
"services",
".",
"call",
"(",
"DOMAIN",
",",
"SERVICE_FOUND_CALENDARS",
",",
"calendar",
")",
"hass",
".",
"services",
".",
"register",
"(",
"DOMAIN",
",",
"SERVICE_SCAN_CALENDARS",
",",
"_scan_for_calendars",
",",
"None",
",",
"schema",
"=",
"None",
")",
"return",
"True"
] |
setup service listeners .
|
train
| false
|
10,606
|
def LoadConfig(config_file_name, exit_fn=sys.exit):
if config_file_name:
config_file = open(config_file_name, 'r')
try:
bulkloader_config = imp.load_module('bulkloader_config', config_file, config_file_name, ('', 'r', imp.PY_SOURCE))
sys.modules['bulkloader_config'] = bulkloader_config
if hasattr(bulkloader_config, 'loaders'):
for cls in bulkloader_config.loaders:
Loader.RegisterLoader(cls())
if hasattr(bulkloader_config, 'exporters'):
for cls in bulkloader_config.exporters:
Exporter.RegisterExporter(cls())
if hasattr(bulkloader_config, 'mappers'):
for cls in bulkloader_config.mappers:
Mapper.RegisterMapper(cls())
except NameError as e:
m = re.search("[^']*'([^']*)'.*", str(e))
if (m.groups() and (m.group(1) == 'Loader')):
print >>sys.stderr, '\nThe config file format has changed and you appear to be using an old-style\nconfig file. Please make the following changes:\n\n1. At the top of the file, add this:\n\nfrom google.appengine.tools.bulkloader import Loader\n\n2. For each of your Loader subclasses add the following at the end of the\n __init__ definitioion:\n\nself.alias_old_names()\n\n3. At the bottom of the file, add this:\n\nloaders = [MyLoader1,...,MyLoaderN]\n\nWhere MyLoader1,...,MyLoaderN are the Loader subclasses you want the bulkloader\nto have access to.\n'
exit_fn(1)
else:
raise
except Exception as e:
if (isinstance(e, NameClashError) or (('bulkloader_config' in vars()) and (hasattr(bulkloader_config, 'bulkloader') and isinstance(e, bulkloader_config.bulkloader.NameClashError)))):
print >>sys.stderr, ('Found both %s and %s while aliasing old names on %s.' % (e.old_name, e.new_name, e.klass))
exit_fn(1)
else:
raise
|
[
"def",
"LoadConfig",
"(",
"config_file_name",
",",
"exit_fn",
"=",
"sys",
".",
"exit",
")",
":",
"if",
"config_file_name",
":",
"config_file",
"=",
"open",
"(",
"config_file_name",
",",
"'r'",
")",
"try",
":",
"bulkloader_config",
"=",
"imp",
".",
"load_module",
"(",
"'bulkloader_config'",
",",
"config_file",
",",
"config_file_name",
",",
"(",
"''",
",",
"'r'",
",",
"imp",
".",
"PY_SOURCE",
")",
")",
"sys",
".",
"modules",
"[",
"'bulkloader_config'",
"]",
"=",
"bulkloader_config",
"if",
"hasattr",
"(",
"bulkloader_config",
",",
"'loaders'",
")",
":",
"for",
"cls",
"in",
"bulkloader_config",
".",
"loaders",
":",
"Loader",
".",
"RegisterLoader",
"(",
"cls",
"(",
")",
")",
"if",
"hasattr",
"(",
"bulkloader_config",
",",
"'exporters'",
")",
":",
"for",
"cls",
"in",
"bulkloader_config",
".",
"exporters",
":",
"Exporter",
".",
"RegisterExporter",
"(",
"cls",
"(",
")",
")",
"if",
"hasattr",
"(",
"bulkloader_config",
",",
"'mappers'",
")",
":",
"for",
"cls",
"in",
"bulkloader_config",
".",
"mappers",
":",
"Mapper",
".",
"RegisterMapper",
"(",
"cls",
"(",
")",
")",
"except",
"NameError",
"as",
"e",
":",
"m",
"=",
"re",
".",
"search",
"(",
"\"[^']*'([^']*)'.*\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"(",
"m",
".",
"groups",
"(",
")",
"and",
"(",
"m",
".",
"group",
"(",
"1",
")",
"==",
"'Loader'",
")",
")",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"'\\nThe config file format has changed and you appear to be using an old-style\\nconfig file. Please make the following changes:\\n\\n1. At the top of the file, add this:\\n\\nfrom google.appengine.tools.bulkloader import Loader\\n\\n2. For each of your Loader subclasses add the following at the end of the\\n __init__ definitioion:\\n\\nself.alias_old_names()\\n\\n3. At the bottom of the file, add this:\\n\\nloaders = [MyLoader1,...,MyLoaderN]\\n\\nWhere MyLoader1,...,MyLoaderN are the Loader subclasses you want the bulkloader\\nto have access to.\\n'",
"exit_fn",
"(",
"1",
")",
"else",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"if",
"(",
"isinstance",
"(",
"e",
",",
"NameClashError",
")",
"or",
"(",
"(",
"'bulkloader_config'",
"in",
"vars",
"(",
")",
")",
"and",
"(",
"hasattr",
"(",
"bulkloader_config",
",",
"'bulkloader'",
")",
"and",
"isinstance",
"(",
"e",
",",
"bulkloader_config",
".",
"bulkloader",
".",
"NameClashError",
")",
")",
")",
")",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"'Found both %s and %s while aliasing old names on %s.'",
"%",
"(",
"e",
".",
"old_name",
",",
"e",
".",
"new_name",
",",
"e",
".",
"klass",
")",
")",
"exit_fn",
"(",
"1",
")",
"else",
":",
"raise"
] |
initialize a configmanager with the specified options .
|
train
| false
|
10,607
|
def add_host(zone, name, ttl, ip, keyname, keyfile, nameserver, timeout, port=53, keyalgorithm='hmac-md5'):
res = []
if (zone in name):
name = name.replace(zone, '').rstrip('.')
fqdn = '{0}.{1}'.format(name, zone)
ret = create(zone, name, ttl, 'A', ip, keyname, keyfile, nameserver, timeout, port, keyalgorithm)
res.append(ret[fqdn])
parts = ip.split('.')[::(-1)]
i = len(parts)
popped = []
while (i > 1):
p = parts.pop(0)
i -= 1
popped.append(p)
zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.')
name = '.'.join(popped)
rev_fqdn = '{0}.{1}'.format(name, zone)
ret = create(zone, name, ttl, 'PTR', '{0}.'.format(fqdn), keyname, keyfile, nameserver, timeout, port, keyalgorithm)
if ('Created' in ret[rev_fqdn]):
res.append(ret[rev_fqdn])
return {fqdn: res}
res.append(ret[rev_fqdn])
return {fqdn: res}
|
[
"def",
"add_host",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"ip",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
"=",
"53",
",",
"keyalgorithm",
"=",
"'hmac-md5'",
")",
":",
"res",
"=",
"[",
"]",
"if",
"(",
"zone",
"in",
"name",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"zone",
",",
"''",
")",
".",
"rstrip",
"(",
"'.'",
")",
"fqdn",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"zone",
")",
"ret",
"=",
"create",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"'A'",
",",
"ip",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
",",
"keyalgorithm",
")",
"res",
".",
"append",
"(",
"ret",
"[",
"fqdn",
"]",
")",
"parts",
"=",
"ip",
".",
"split",
"(",
"'.'",
")",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
"i",
"=",
"len",
"(",
"parts",
")",
"popped",
"=",
"[",
"]",
"while",
"(",
"i",
">",
"1",
")",
":",
"p",
"=",
"parts",
".",
"pop",
"(",
"0",
")",
"i",
"-=",
"1",
"popped",
".",
"append",
"(",
"p",
")",
"zone",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"parts",
")",
",",
"'in-addr.arpa.'",
")",
"name",
"=",
"'.'",
".",
"join",
"(",
"popped",
")",
"rev_fqdn",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"name",
",",
"zone",
")",
"ret",
"=",
"create",
"(",
"zone",
",",
"name",
",",
"ttl",
",",
"'PTR'",
",",
"'{0}.'",
".",
"format",
"(",
"fqdn",
")",
",",
"keyname",
",",
"keyfile",
",",
"nameserver",
",",
"timeout",
",",
"port",
",",
"keyalgorithm",
")",
"if",
"(",
"'Created'",
"in",
"ret",
"[",
"rev_fqdn",
"]",
")",
":",
"res",
".",
"append",
"(",
"ret",
"[",
"rev_fqdn",
"]",
")",
"return",
"{",
"fqdn",
":",
"res",
"}",
"res",
".",
"append",
"(",
"ret",
"[",
"rev_fqdn",
"]",
")",
"return",
"{",
"fqdn",
":",
"res",
"}"
] |
add host .
|
train
| true
|
10,608
|
def EvalNormalCdf(x, mu=0, sigma=1):
return stats.norm.cdf(x, loc=mu, scale=sigma)
|
[
"def",
"EvalNormalCdf",
"(",
"x",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"return",
"stats",
".",
"norm",
".",
"cdf",
"(",
"x",
",",
"loc",
"=",
"mu",
",",
"scale",
"=",
"sigma",
")"
] |
evaluates the cdf of the normal distribution .
|
train
| false
|
10,609
|
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None):
def ns_reply_callback(req, reply_mac, iface):
'\n Callback that reply to a NS by sending a similar NS\n '
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = ((Ether(src=reply_mac) / IPv6(src='::', dst=dst)) / ICMPv6ND_NS(tgt=tgt))
sendp(rep, iface=iface, verbose=0)
print ('Reply NS for target address %s (received from %s)' % (tgt, mac))
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac)
|
[
"def",
"NDP_Attack_DAD_DoS_via_NS",
"(",
"iface",
"=",
"None",
",",
"mac_src_filter",
"=",
"None",
",",
"tgt_filter",
"=",
"None",
",",
"reply_mac",
"=",
"None",
")",
":",
"def",
"ns_reply_callback",
"(",
"req",
",",
"reply_mac",
",",
"iface",
")",
":",
"mac",
"=",
"req",
"[",
"Ether",
"]",
".",
"src",
"dst",
"=",
"req",
"[",
"IPv6",
"]",
".",
"dst",
"tgt",
"=",
"req",
"[",
"ICMPv6ND_NS",
"]",
".",
"tgt",
"rep",
"=",
"(",
"(",
"Ether",
"(",
"src",
"=",
"reply_mac",
")",
"/",
"IPv6",
"(",
"src",
"=",
"'::'",
",",
"dst",
"=",
"dst",
")",
")",
"/",
"ICMPv6ND_NS",
"(",
"tgt",
"=",
"tgt",
")",
")",
"sendp",
"(",
"rep",
",",
"iface",
"=",
"iface",
",",
"verbose",
"=",
"0",
")",
"print",
"(",
"'Reply NS for target address %s (received from %s)'",
"%",
"(",
"tgt",
",",
"mac",
")",
")",
"_NDP_Attack_DAD_DoS",
"(",
"ns_reply_callback",
",",
"iface",
",",
"mac_src_filter",
",",
"tgt_filter",
",",
"reply_mac",
")"
] |
perform the dad dos attack using ns described in section 4 .
|
train
| true
|
10,610
|
def path_isdir(path):
return os.path.isdir(path)
|
[
"def",
"path_isdir",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")"
] |
check to see whether the file already exists .
|
train
| false
|
10,611
|
def obj_dict_dictize(obj_dict, context, sort_key=(lambda x: x)):
result_list = []
for (key, obj) in obj_dict.items():
result_list.append(table_dictize(obj, context))
return sorted(result_list, key=sort_key)
|
[
"def",
"obj_dict_dictize",
"(",
"obj_dict",
",",
"context",
",",
"sort_key",
"=",
"(",
"lambda",
"x",
":",
"x",
")",
")",
":",
"result_list",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"obj",
")",
"in",
"obj_dict",
".",
"items",
"(",
")",
":",
"result_list",
".",
"append",
"(",
"table_dictize",
"(",
"obj",
",",
"context",
")",
")",
"return",
"sorted",
"(",
"result_list",
",",
"key",
"=",
"sort_key",
")"
] |
get a dict whose values are model objects and represent it as a list of dicts .
|
train
| false
|
10,612
|
def get_database_engine_options(kwargs):
conversions = {'convert_unicode': string_as_bool, 'pool_timeout': int, 'echo': string_as_bool, 'echo_pool': string_as_bool, 'pool_recycle': int, 'pool_size': int, 'max_overflow': int, 'pool_threadlocal': string_as_bool}
prefix = 'database_engine_option_'
prefix_len = len(prefix)
rval = {}
for (key, value) in kwargs.items():
if key.startswith(prefix):
key = key[prefix_len:]
if (key in conversions):
value = conversions[key](value)
rval[key] = value
return rval
|
[
"def",
"get_database_engine_options",
"(",
"kwargs",
")",
":",
"conversions",
"=",
"{",
"'convert_unicode'",
":",
"string_as_bool",
",",
"'pool_timeout'",
":",
"int",
",",
"'echo'",
":",
"string_as_bool",
",",
"'echo_pool'",
":",
"string_as_bool",
",",
"'pool_recycle'",
":",
"int",
",",
"'pool_size'",
":",
"int",
",",
"'max_overflow'",
":",
"int",
",",
"'pool_threadlocal'",
":",
"string_as_bool",
"}",
"prefix",
"=",
"'database_engine_option_'",
"prefix_len",
"=",
"len",
"(",
"prefix",
")",
"rval",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
":",
"key",
"=",
"key",
"[",
"prefix_len",
":",
"]",
"if",
"(",
"key",
"in",
"conversions",
")",
":",
"value",
"=",
"conversions",
"[",
"key",
"]",
"(",
"value",
")",
"rval",
"[",
"key",
"]",
"=",
"value",
"return",
"rval"
] |
allow options for the sqlalchemy database engine to be passed by using the prefix "database_engine_option" .
|
train
| false
|
10,613
|
def split_into_keywords(string, to_filter_stopwords=False):
if to_filter_stopwords:
return [kw for kw in RE_KEYWORD_SPLIT.split(string.lower()) if ((len(kw) > 0) and (kw not in DIALOG_STOPWORDS))]
else:
return [kw for kw in RE_KEYWORD_SPLIT.split(string.lower()) if (len(kw) > 0)]
|
[
"def",
"split_into_keywords",
"(",
"string",
",",
"to_filter_stopwords",
"=",
"False",
")",
":",
"if",
"to_filter_stopwords",
":",
"return",
"[",
"kw",
"for",
"kw",
"in",
"RE_KEYWORD_SPLIT",
".",
"split",
"(",
"string",
".",
"lower",
"(",
")",
")",
"if",
"(",
"(",
"len",
"(",
"kw",
")",
">",
"0",
")",
"and",
"(",
"kw",
"not",
"in",
"DIALOG_STOPWORDS",
")",
")",
"]",
"else",
":",
"return",
"[",
"kw",
"for",
"kw",
"in",
"RE_KEYWORD_SPLIT",
".",
"split",
"(",
"string",
".",
"lower",
"(",
")",
")",
"if",
"(",
"len",
"(",
"kw",
")",
">",
"0",
")",
"]"
] |
takes a string and returns a list of lowercase strings .
|
train
| false
|
10,614
|
def missing(name, limit=''):
if (limit == 'upstart'):
return (not _service_is_upstart(name))
elif (limit == 'sysvinit'):
return (not _service_is_sysv(name))
elif (_service_is_upstart(name) or _service_is_sysv(name)):
return False
else:
return True
|
[
"def",
"missing",
"(",
"name",
",",
"limit",
"=",
"''",
")",
":",
"if",
"(",
"limit",
"==",
"'upstart'",
")",
":",
"return",
"(",
"not",
"_service_is_upstart",
"(",
"name",
")",
")",
"elif",
"(",
"limit",
"==",
"'sysvinit'",
")",
":",
"return",
"(",
"not",
"_service_is_sysv",
"(",
"name",
")",
")",
"elif",
"(",
"_service_is_upstart",
"(",
"name",
")",
"or",
"_service_is_sysv",
"(",
"name",
")",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
the inverse of service .
|
train
| true
|
10,615
|
def globber(path, pattern=u'*'):
if os.path.exists(path):
return [f for f in os.listdir(path) if fnmatch.fnmatch(f, pattern)]
else:
return []
|
[
"def",
"globber",
"(",
"path",
",",
"pattern",
"=",
"u'*'",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"f",
",",
"pattern",
")",
"]",
"else",
":",
"return",
"[",
"]"
] |
return matching base file/folder names in folder path .
|
train
| false
|
10,616
|
def central_server_only(handler):
def central_server_only_wrapper_fn(*args, **kwargs):
if (not settings.CENTRAL_SERVER):
raise Http404(_('This path is only available on the central server.'))
return handler(*args, **kwargs)
return central_server_only_wrapper_fn
|
[
"def",
"central_server_only",
"(",
"handler",
")",
":",
"def",
"central_server_only_wrapper_fn",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"settings",
".",
"CENTRAL_SERVER",
")",
":",
"raise",
"Http404",
"(",
"_",
"(",
"'This path is only available on the central server.'",
")",
")",
"return",
"handler",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"central_server_only_wrapper_fn"
] |
assert-like decorator that marks a function for use only on the central server .
|
train
| false
|
10,617
|
@docfiller
def mat_reader_factory(file_name, appendmat=True, **kwargs):
byte_stream = _open_file(file_name, appendmat)
(mjv, mnv) = get_matfile_version(byte_stream)
if (mjv == 0):
return MatFile4Reader(byte_stream, **kwargs)
elif (mjv == 1):
return MatFile5Reader(byte_stream, **kwargs)
elif (mjv == 2):
raise NotImplementedError('Please use HDF reader for matlab v7.3 files')
else:
raise TypeError(('Did not recognize version %s' % mjv))
|
[
"@",
"docfiller",
"def",
"mat_reader_factory",
"(",
"file_name",
",",
"appendmat",
"=",
"True",
",",
"**",
"kwargs",
")",
":",
"byte_stream",
"=",
"_open_file",
"(",
"file_name",
",",
"appendmat",
")",
"(",
"mjv",
",",
"mnv",
")",
"=",
"get_matfile_version",
"(",
"byte_stream",
")",
"if",
"(",
"mjv",
"==",
"0",
")",
":",
"return",
"MatFile4Reader",
"(",
"byte_stream",
",",
"**",
"kwargs",
")",
"elif",
"(",
"mjv",
"==",
"1",
")",
":",
"return",
"MatFile5Reader",
"(",
"byte_stream",
",",
"**",
"kwargs",
")",
"elif",
"(",
"mjv",
"==",
"2",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Please use HDF reader for matlab v7.3 files'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"(",
"'Did not recognize version %s'",
"%",
"mjv",
")",
")"
] |
create reader for matlab .
|
train
| false
|
10,618
|
def determineDefaultFunctionName():
try:
(1 / 0)
except:
return traceback.extract_stack()[(-2)][2]
|
[
"def",
"determineDefaultFunctionName",
"(",
")",
":",
"try",
":",
"(",
"1",
"/",
"0",
")",
"except",
":",
"return",
"traceback",
".",
"extract_stack",
"(",
")",
"[",
"(",
"-",
"2",
")",
"]",
"[",
"2",
"]"
] |
return the string used by python as the name for code objects which are compiled from interactive input or at the top-level of modules .
|
train
| false
|
10,619
|
def subreddit_messages_nocache(sr):
from r2.lib.db import queries
inbox = queries.get_subreddit_messages(sr)
messages = _load_messages(inbox)
return compute_message_trees(messages)
|
[
"def",
"subreddit_messages_nocache",
"(",
"sr",
")",
":",
"from",
"r2",
".",
"lib",
".",
"db",
"import",
"queries",
"inbox",
"=",
"queries",
".",
"get_subreddit_messages",
"(",
"sr",
")",
"messages",
"=",
"_load_messages",
"(",
"inbox",
")",
"return",
"compute_message_trees",
"(",
"messages",
")"
] |
just like user_messages .
|
train
| false
|
10,620
|
def _block_diag(A, n):
if sparse.issparse(A):
raise NotImplemented('sparse reversal not implemented yet')
(ma, na) = A.shape
bdn = (na // int(n))
if ((na % n) > 0):
raise ValueError('Width of matrix must be a multiple of n')
tmp = np.arange((ma * bdn), dtype=np.int).reshape(bdn, ma)
tmp = np.tile(tmp, (1, n))
ii = tmp.ravel()
jj = np.arange(na, dtype=np.int)[None, :]
jj = (jj * np.ones(ma, dtype=np.int)[:, None])
jj = jj.T.ravel()
bd = sparse.coo_matrix((A.T.ravel(), np.c_[(ii, jj)].T)).tocsc()
return bd
|
[
"def",
"_block_diag",
"(",
"A",
",",
"n",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"A",
")",
":",
"raise",
"NotImplemented",
"(",
"'sparse reversal not implemented yet'",
")",
"(",
"ma",
",",
"na",
")",
"=",
"A",
".",
"shape",
"bdn",
"=",
"(",
"na",
"//",
"int",
"(",
"n",
")",
")",
"if",
"(",
"(",
"na",
"%",
"n",
")",
">",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Width of matrix must be a multiple of n'",
")",
"tmp",
"=",
"np",
".",
"arange",
"(",
"(",
"ma",
"*",
"bdn",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
".",
"reshape",
"(",
"bdn",
",",
"ma",
")",
"tmp",
"=",
"np",
".",
"tile",
"(",
"tmp",
",",
"(",
"1",
",",
"n",
")",
")",
"ii",
"=",
"tmp",
".",
"ravel",
"(",
")",
"jj",
"=",
"np",
".",
"arange",
"(",
"na",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"[",
"None",
",",
":",
"]",
"jj",
"=",
"(",
"jj",
"*",
"np",
".",
"ones",
"(",
"ma",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"[",
":",
",",
"None",
"]",
")",
"jj",
"=",
"jj",
".",
"T",
".",
"ravel",
"(",
")",
"bd",
"=",
"sparse",
".",
"coo_matrix",
"(",
"(",
"A",
".",
"T",
".",
"ravel",
"(",
")",
",",
"np",
".",
"c_",
"[",
"(",
"ii",
",",
"jj",
")",
"]",
".",
"T",
")",
")",
".",
"tocsc",
"(",
")",
"return",
"bd"
] |
construct a block diagonal from a packed structure .
|
train
| false
|
10,621
|
@require_POST
@login_required
@permitted
def vote_for_thread(request, course_id, thread_id, value):
thread = cc.Thread.find(thread_id)
result = _vote_or_unvote(request, course_id, thread, value)
thread_voted.send(sender=None, user=request.user, post=thread)
return result
|
[
"@",
"require_POST",
"@",
"login_required",
"@",
"permitted",
"def",
"vote_for_thread",
"(",
"request",
",",
"course_id",
",",
"thread_id",
",",
"value",
")",
":",
"thread",
"=",
"cc",
".",
"Thread",
".",
"find",
"(",
"thread_id",
")",
"result",
"=",
"_vote_or_unvote",
"(",
"request",
",",
"course_id",
",",
"thread",
",",
"value",
")",
"thread_voted",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"user",
"=",
"request",
".",
"user",
",",
"post",
"=",
"thread",
")",
"return",
"result"
] |
given a course id and thread id vote for this thread ajax only .
|
train
| false
|
10,622
|
def device_ctypes_pointer(obj):
if (obj is None):
return c_void_p(0)
require_device_memory(obj)
return obj.device_ctypes_pointer
|
[
"def",
"device_ctypes_pointer",
"(",
"obj",
")",
":",
"if",
"(",
"obj",
"is",
"None",
")",
":",
"return",
"c_void_p",
"(",
"0",
")",
"require_device_memory",
"(",
"obj",
")",
"return",
"obj",
".",
"device_ctypes_pointer"
] |
get the ctypes object for the device pointer .
|
train
| false
|
10,625
|
def parsehttpdate(string_):
try:
t = time.strptime(string_, '%a, %d %b %Y %H:%M:%S %Z')
except ValueError:
return None
return datetime.datetime(*t[:6])
|
[
"def",
"parsehttpdate",
"(",
"string_",
")",
":",
"try",
":",
"t",
"=",
"time",
".",
"strptime",
"(",
"string_",
",",
"'%a, %d %b %Y %H:%M:%S %Z'",
")",
"except",
"ValueError",
":",
"return",
"None",
"return",
"datetime",
".",
"datetime",
"(",
"*",
"t",
"[",
":",
"6",
"]",
")"
] |
parses an http date into a datetime object .
|
train
| true
|
10,626
|
def cache_headers_valid(policy_name, headers):
policy_headers = CACHE_POLICY_DIRECTIVES[policy_name]
for (header_name, expected_vals) in policy_headers.items():
found_vals = set(headers.get(header_name, []))
if (header_name == 'cache-control'):
parsed_cache_control = set()
for cache_header in found_vals:
for split_header in cache_header.split(','):
cache_directive = split_header.strip().lower()
parsed_cache_control.add(cache_directive)
if (parsed_cache_control != expected_vals):
return False
elif (found_vals != expected_vals):
return False
return True
|
[
"def",
"cache_headers_valid",
"(",
"policy_name",
",",
"headers",
")",
":",
"policy_headers",
"=",
"CACHE_POLICY_DIRECTIVES",
"[",
"policy_name",
"]",
"for",
"(",
"header_name",
",",
"expected_vals",
")",
"in",
"policy_headers",
".",
"items",
"(",
")",
":",
"found_vals",
"=",
"set",
"(",
"headers",
".",
"get",
"(",
"header_name",
",",
"[",
"]",
")",
")",
"if",
"(",
"header_name",
"==",
"'cache-control'",
")",
":",
"parsed_cache_control",
"=",
"set",
"(",
")",
"for",
"cache_header",
"in",
"found_vals",
":",
"for",
"split_header",
"in",
"cache_header",
".",
"split",
"(",
"','",
")",
":",
"cache_directive",
"=",
"split_header",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"parsed_cache_control",
".",
"add",
"(",
"cache_directive",
")",
"if",
"(",
"parsed_cache_control",
"!=",
"expected_vals",
")",
":",
"return",
"False",
"elif",
"(",
"found_vals",
"!=",
"expected_vals",
")",
":",
"return",
"False",
"return",
"True"
] |
check if a responses headers make sense given a cache policy .
|
train
| false
|
10,628
|
def test_iba_geo_mean_binary():
(y_true, y_pred, _) = make_prediction(binary=True)
iba_gmean = make_index_balanced_accuracy(alpha=0.5, squared=True)(geometric_mean_score)
iba = iba_gmean(y_true, y_pred)
assert_allclose(iba, 0.5948, rtol=R_TOL)
|
[
"def",
"test_iba_geo_mean_binary",
"(",
")",
":",
"(",
"y_true",
",",
"y_pred",
",",
"_",
")",
"=",
"make_prediction",
"(",
"binary",
"=",
"True",
")",
"iba_gmean",
"=",
"make_index_balanced_accuracy",
"(",
"alpha",
"=",
"0.5",
",",
"squared",
"=",
"True",
")",
"(",
"geometric_mean_score",
")",
"iba",
"=",
"iba_gmean",
"(",
"y_true",
",",
"y_pred",
")",
"assert_allclose",
"(",
"iba",
",",
"0.5948",
",",
"rtol",
"=",
"R_TOL",
")"
] |
test to test the iba using the geometric mean .
|
train
| false
|
10,629
|
@public
def factor(f, *gens, **args):
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if ((fac.is_Mul or fac.is_Pow) and (fac != p)):
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if (not f.is_commutative):
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
|
[
"@",
"public",
"def",
"factor",
"(",
"f",
",",
"*",
"gens",
",",
"**",
"args",
")",
":",
"f",
"=",
"sympify",
"(",
"f",
")",
"if",
"args",
".",
"pop",
"(",
"'deep'",
",",
"False",
")",
":",
"partials",
"=",
"{",
"}",
"muladd",
"=",
"f",
".",
"atoms",
"(",
"Mul",
",",
"Add",
")",
"for",
"p",
"in",
"muladd",
":",
"fac",
"=",
"factor",
"(",
"p",
",",
"*",
"gens",
",",
"**",
"args",
")",
"if",
"(",
"(",
"fac",
".",
"is_Mul",
"or",
"fac",
".",
"is_Pow",
")",
"and",
"(",
"fac",
"!=",
"p",
")",
")",
":",
"partials",
"[",
"p",
"]",
"=",
"fac",
"return",
"f",
".",
"xreplace",
"(",
"partials",
")",
"try",
":",
"return",
"_generic_factor",
"(",
"f",
",",
"gens",
",",
"args",
",",
"method",
"=",
"'factor'",
")",
"except",
"PolynomialError",
"as",
"msg",
":",
"if",
"(",
"not",
"f",
".",
"is_commutative",
")",
":",
"from",
"sympy",
".",
"core",
".",
"exprtools",
"import",
"factor_nc",
"return",
"factor_nc",
"(",
"f",
")",
"else",
":",
"raise",
"PolynomialError",
"(",
"msg",
")"
] |
compute the factorization of expression .
|
train
| false
|
10,630
|
def is_command(text):
return text.startswith('/')
|
[
"def",
"is_command",
"(",
"text",
")",
":",
"return",
"text",
".",
"startswith",
"(",
"'/'",
")"
] |
checks if text is a command .
|
train
| false
|
10,631
|
def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
from _pytest.fixtures import scopes
indirect_as_list = isinstance(indirect, (list, tuple))
all_arguments_are_fixtures = ((indirect is True) or (indirect_as_list and (len(indirect) == argnames)))
if all_arguments_are_fixtures:
fixturedefs = (arg2fixturedefs or {})
used_scopes = [fixturedef[0].scope for (name, fixturedef) in fixturedefs.items()]
if used_scopes:
for scope in reversed(scopes):
if (scope in used_scopes):
return scope
return 'function'
|
[
"def",
"_find_parametrized_scope",
"(",
"argnames",
",",
"arg2fixturedefs",
",",
"indirect",
")",
":",
"from",
"_pytest",
".",
"fixtures",
"import",
"scopes",
"indirect_as_list",
"=",
"isinstance",
"(",
"indirect",
",",
"(",
"list",
",",
"tuple",
")",
")",
"all_arguments_are_fixtures",
"=",
"(",
"(",
"indirect",
"is",
"True",
")",
"or",
"(",
"indirect_as_list",
"and",
"(",
"len",
"(",
"indirect",
")",
"==",
"argnames",
")",
")",
")",
"if",
"all_arguments_are_fixtures",
":",
"fixturedefs",
"=",
"(",
"arg2fixturedefs",
"or",
"{",
"}",
")",
"used_scopes",
"=",
"[",
"fixturedef",
"[",
"0",
"]",
".",
"scope",
"for",
"(",
"name",
",",
"fixturedef",
")",
"in",
"fixturedefs",
".",
"items",
"(",
")",
"]",
"if",
"used_scopes",
":",
"for",
"scope",
"in",
"reversed",
"(",
"scopes",
")",
":",
"if",
"(",
"scope",
"in",
"used_scopes",
")",
":",
"return",
"scope",
"return",
"'function'"
] |
find the most appropriate scope for a parametrized call based on its arguments .
|
train
| false
|
10,632
|
def _maybe_align_partitions(args):
dfs = [df for df in args if isinstance(df, _Frame)]
if (not dfs):
return args
divisions = dfs[0].divisions
if (not all(((df.divisions == divisions) for df in dfs))):
dfs2 = iter(align_partitions(*dfs)[0])
return [(a if (not isinstance(a, _Frame)) else next(dfs2)) for a in args]
return args
|
[
"def",
"_maybe_align_partitions",
"(",
"args",
")",
":",
"dfs",
"=",
"[",
"df",
"for",
"df",
"in",
"args",
"if",
"isinstance",
"(",
"df",
",",
"_Frame",
")",
"]",
"if",
"(",
"not",
"dfs",
")",
":",
"return",
"args",
"divisions",
"=",
"dfs",
"[",
"0",
"]",
".",
"divisions",
"if",
"(",
"not",
"all",
"(",
"(",
"(",
"df",
".",
"divisions",
"==",
"divisions",
")",
"for",
"df",
"in",
"dfs",
")",
")",
")",
":",
"dfs2",
"=",
"iter",
"(",
"align_partitions",
"(",
"*",
"dfs",
")",
"[",
"0",
"]",
")",
"return",
"[",
"(",
"a",
"if",
"(",
"not",
"isinstance",
"(",
"a",
",",
"_Frame",
")",
")",
"else",
"next",
"(",
"dfs2",
")",
")",
"for",
"a",
"in",
"args",
"]",
"return",
"args"
] |
align dataframe blocks if divisions are different .
|
train
| false
|
10,633
|
def get_start_date():
today = datetime.today()
date = datetime((today.year - 1), today.month, today.day, 12)
weekday = datetime.weekday(date)
while (weekday < 6):
date = (date + timedelta(1))
weekday = datetime.weekday(date)
return date
|
[
"def",
"get_start_date",
"(",
")",
":",
"today",
"=",
"datetime",
".",
"today",
"(",
")",
"date",
"=",
"datetime",
"(",
"(",
"today",
".",
"year",
"-",
"1",
")",
",",
"today",
".",
"month",
",",
"today",
".",
"day",
",",
"12",
")",
"weekday",
"=",
"datetime",
".",
"weekday",
"(",
"date",
")",
"while",
"(",
"weekday",
"<",
"6",
")",
":",
"date",
"=",
"(",
"date",
"+",
"timedelta",
"(",
"1",
")",
")",
"weekday",
"=",
"datetime",
".",
"weekday",
"(",
"date",
")",
"return",
"date"
] |
returns a datetime object for the first sunday after one year ago today at 12:00 noon .
|
train
| false
|
10,635
|
def ranking(R):
l = sorted(list(enumerate(R)), cmp=(lambda a, b: cmp(a[1], b[1])))
l = sorted(list(enumerate(l)), cmp=(lambda a, b: cmp(a[1], b[1])))
return array([kv[0] for kv in l])
|
[
"def",
"ranking",
"(",
"R",
")",
":",
"l",
"=",
"sorted",
"(",
"list",
"(",
"enumerate",
"(",
"R",
")",
")",
",",
"cmp",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"cmp",
"(",
"a",
"[",
"1",
"]",
",",
"b",
"[",
"1",
"]",
")",
")",
")",
"l",
"=",
"sorted",
"(",
"list",
"(",
"enumerate",
"(",
"l",
")",
")",
",",
"cmp",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"cmp",
"(",
"a",
"[",
"1",
"]",
",",
"b",
"[",
"1",
"]",
")",
")",
")",
"return",
"array",
"(",
"[",
"kv",
"[",
"0",
"]",
"for",
"kv",
"in",
"l",
"]",
")"
] |
produces a linear ranking of the values in r .
|
train
| false
|
10,636
|
def dmp_compose(f, g, u, K):
if (not u):
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
|
[
"def",
"dmp_compose",
"(",
"f",
",",
"g",
",",
"u",
",",
"K",
")",
":",
"if",
"(",
"not",
"u",
")",
":",
"return",
"dup_compose",
"(",
"f",
",",
"g",
",",
"K",
")",
"if",
"dmp_zero_p",
"(",
"f",
",",
"u",
")",
":",
"return",
"f",
"h",
"=",
"[",
"f",
"[",
"0",
"]",
"]",
"for",
"c",
"in",
"f",
"[",
"1",
":",
"]",
":",
"h",
"=",
"dmp_mul",
"(",
"h",
",",
"g",
",",
"u",
",",
"K",
")",
"h",
"=",
"dmp_add_term",
"(",
"h",
",",
"c",
",",
"0",
",",
"u",
",",
"K",
")",
"return",
"h"
] |
evaluate functional composition f(g) in k[x] .
|
train
| false
|
10,637
|
@pytest.mark.parametrize('error_message', ["error: pathspec 'unknown_branch' did not match any file(s) known to git.", "hg: abort: unknown revision 'unknown_branch'!"])
def test_clone_handles_branch_typo(mocker, clone_dir, error_message):
mocker.patch('cookiecutter.vcs.subprocess.check_output', autospec=True, side_effect=[subprocess.CalledProcessError((-1), 'cmd', output=error_message)])
repository_url = 'https://github.com/pytest-dev/cookiecutter-pytest-plugin'
with pytest.raises(exceptions.RepositoryCloneFailed) as err:
vcs.clone(repository_url, clone_to_dir=clone_dir, checkout='unknown_branch', no_input=True)
assert (str(err.value) == 'The unknown_branch branch of repository {} could not found, have you made a typo?'.format(repository_url))
|
[
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"'error_message'",
",",
"[",
"\"error: pathspec 'unknown_branch' did not match any file(s) known to git.\"",
",",
"\"hg: abort: unknown revision 'unknown_branch'!\"",
"]",
")",
"def",
"test_clone_handles_branch_typo",
"(",
"mocker",
",",
"clone_dir",
",",
"error_message",
")",
":",
"mocker",
".",
"patch",
"(",
"'cookiecutter.vcs.subprocess.check_output'",
",",
"autospec",
"=",
"True",
",",
"side_effect",
"=",
"[",
"subprocess",
".",
"CalledProcessError",
"(",
"(",
"-",
"1",
")",
",",
"'cmd'",
",",
"output",
"=",
"error_message",
")",
"]",
")",
"repository_url",
"=",
"'https://github.com/pytest-dev/cookiecutter-pytest-plugin'",
"with",
"pytest",
".",
"raises",
"(",
"exceptions",
".",
"RepositoryCloneFailed",
")",
"as",
"err",
":",
"vcs",
".",
"clone",
"(",
"repository_url",
",",
"clone_to_dir",
"=",
"clone_dir",
",",
"checkout",
"=",
"'unknown_branch'",
",",
"no_input",
"=",
"True",
")",
"assert",
"(",
"str",
"(",
"err",
".",
"value",
")",
"==",
"'The unknown_branch branch of repository {} could not found, have you made a typo?'",
".",
"format",
"(",
"repository_url",
")",
")"
] |
in clone() .
|
train
| false
|
10,638
|
def result_to_country_region_city(result):
country = result_to_country(result)
if country:
region = result_to_region(result, country)
city = result_to_city(result, country, region)
else:
region = city = None
return (country, region, city)
|
[
"def",
"result_to_country_region_city",
"(",
"result",
")",
":",
"country",
"=",
"result_to_country",
"(",
"result",
")",
"if",
"country",
":",
"region",
"=",
"result_to_region",
"(",
"result",
",",
"country",
")",
"city",
"=",
"result_to_city",
"(",
"result",
",",
"country",
",",
"region",
")",
"else",
":",
"region",
"=",
"city",
"=",
"None",
"return",
"(",
"country",
",",
"region",
",",
"city",
")"
] |
given one result from mapbox .
|
train
| false
|
10,639
|
def get_default_model_permissions(model):
permissions = set()
for default in model._meta.default_permissions:
permissions.add(('%s.%s_%s' % (model._meta.app_label, default, model._meta.model_name)))
return permissions
|
[
"def",
"get_default_model_permissions",
"(",
"model",
")",
":",
"permissions",
"=",
"set",
"(",
")",
"for",
"default",
"in",
"model",
".",
"_meta",
".",
"default_permissions",
":",
"permissions",
".",
"add",
"(",
"(",
"'%s.%s_%s'",
"%",
"(",
"model",
".",
"_meta",
".",
"app_label",
",",
"default",
",",
"model",
".",
"_meta",
".",
"model_name",
")",
")",
")",
"return",
"permissions"
] |
return a set of all default permissions for a given model .
|
train
| false
|
10,642
|
def extra_oauth2_scope(*scopes):
def extra_oauth2_wrapper(fn):
@functools.wraps(fn)
def wrapper_fn(*a, **kw):
if (not c.oauth_user):
return fn(*a, **kw)
elif c.oauth_scope.has_access(c.site.name, set(scopes)):
return fn(*a, **kw)
else:
raise OAuth2Scope.InsufficientScopeError(scopes)
return wrapper_fn
return extra_oauth2_wrapper
|
[
"def",
"extra_oauth2_scope",
"(",
"*",
"scopes",
")",
":",
"def",
"extra_oauth2_wrapper",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapper_fn",
"(",
"*",
"a",
",",
"**",
"kw",
")",
":",
"if",
"(",
"not",
"c",
".",
"oauth_user",
")",
":",
"return",
"fn",
"(",
"*",
"a",
",",
"**",
"kw",
")",
"elif",
"c",
".",
"oauth_scope",
".",
"has_access",
"(",
"c",
".",
"site",
".",
"name",
",",
"set",
"(",
"scopes",
")",
")",
":",
"return",
"fn",
"(",
"*",
"a",
",",
"**",
"kw",
")",
"else",
":",
"raise",
"OAuth2Scope",
".",
"InsufficientScopeError",
"(",
"scopes",
")",
"return",
"wrapper_fn",
"return",
"extra_oauth2_wrapper"
] |
wrap a function so that it only returns data if user has all scopes when not in an oauth2 context .
|
train
| false
|
10,644
|
def aggregationToMonthsSeconds(interval):
seconds = (interval.get('microseconds', 0) * 1e-06)
seconds += (interval.get('milliseconds', 0) * 0.001)
seconds += interval.get('seconds', 0)
seconds += (interval.get('minutes', 0) * 60)
seconds += ((interval.get('hours', 0) * 60) * 60)
seconds += (((interval.get('days', 0) * 24) * 60) * 60)
seconds += ((((interval.get('weeks', 0) * 7) * 24) * 60) * 60)
months = interval.get('months', 0)
months += (12 * interval.get('years', 0))
return {'months': months, 'seconds': seconds}
|
[
"def",
"aggregationToMonthsSeconds",
"(",
"interval",
")",
":",
"seconds",
"=",
"(",
"interval",
".",
"get",
"(",
"'microseconds'",
",",
"0",
")",
"*",
"1e-06",
")",
"seconds",
"+=",
"(",
"interval",
".",
"get",
"(",
"'milliseconds'",
",",
"0",
")",
"*",
"0.001",
")",
"seconds",
"+=",
"interval",
".",
"get",
"(",
"'seconds'",
",",
"0",
")",
"seconds",
"+=",
"(",
"interval",
".",
"get",
"(",
"'minutes'",
",",
"0",
")",
"*",
"60",
")",
"seconds",
"+=",
"(",
"(",
"interval",
".",
"get",
"(",
"'hours'",
",",
"0",
")",
"*",
"60",
")",
"*",
"60",
")",
"seconds",
"+=",
"(",
"(",
"(",
"interval",
".",
"get",
"(",
"'days'",
",",
"0",
")",
"*",
"24",
")",
"*",
"60",
")",
"*",
"60",
")",
"seconds",
"+=",
"(",
"(",
"(",
"(",
"interval",
".",
"get",
"(",
"'weeks'",
",",
"0",
")",
"*",
"7",
")",
"*",
"24",
")",
"*",
"60",
")",
"*",
"60",
")",
"months",
"=",
"interval",
".",
"get",
"(",
"'months'",
",",
"0",
")",
"months",
"+=",
"(",
"12",
"*",
"interval",
".",
"get",
"(",
"'years'",
",",
"0",
")",
")",
"return",
"{",
"'months'",
":",
"months",
",",
"'seconds'",
":",
"seconds",
"}"
] |
return the number of months and seconds from an aggregation dict that represents a date and time .
|
train
| true
|
10,645
|
def _AddPrefix(element, prefix):
if (element is None):
return element
if (isinstance(element, list) or isinstance(element, tuple)):
return [(prefix + e) for e in element]
else:
return (prefix + element)
|
[
"def",
"_AddPrefix",
"(",
"element",
",",
"prefix",
")",
":",
"if",
"(",
"element",
"is",
"None",
")",
":",
"return",
"element",
"if",
"(",
"isinstance",
"(",
"element",
",",
"list",
")",
"or",
"isinstance",
"(",
"element",
",",
"tuple",
")",
")",
":",
"return",
"[",
"(",
"prefix",
"+",
"e",
")",
"for",
"e",
"in",
"element",
"]",
"else",
":",
"return",
"(",
"prefix",
"+",
"element",
")"
] |
add |prefix| to |element| or each subelement if element is iterable .
|
train
| false
|
10,646
|
def update_context(name, ctx=None):
if (ctx is None):
ctx = builtins.__xonsh_ctx__
if (not hasattr(update_context, 'bad_imports')):
update_context.bad_imports = []
modctx = xontrib_context(name)
if (modctx is None):
update_context.bad_imports.append(name)
return ctx
return ctx.update(modctx)
|
[
"def",
"update_context",
"(",
"name",
",",
"ctx",
"=",
"None",
")",
":",
"if",
"(",
"ctx",
"is",
"None",
")",
":",
"ctx",
"=",
"builtins",
".",
"__xonsh_ctx__",
"if",
"(",
"not",
"hasattr",
"(",
"update_context",
",",
"'bad_imports'",
")",
")",
":",
"update_context",
".",
"bad_imports",
"=",
"[",
"]",
"modctx",
"=",
"xontrib_context",
"(",
"name",
")",
"if",
"(",
"modctx",
"is",
"None",
")",
":",
"update_context",
".",
"bad_imports",
".",
"append",
"(",
"name",
")",
"return",
"ctx",
"return",
"ctx",
".",
"update",
"(",
"modctx",
")"
] |
updates a context in place from a xontrib .
|
train
| false
|
10,648
|
def S_nw_panel(xw, weights, groupidx):
nlags = (len(weights) - 1)
S = (weights[0] * np.dot(xw.T, xw))
for lag in range(1, (nlags + 1)):
(xw0, xwlag) = lagged_groups(xw, lag, groupidx)
s = np.dot(xw0.T, xwlag)
S += (weights[lag] * (s + s.T))
return S
|
[
"def",
"S_nw_panel",
"(",
"xw",
",",
"weights",
",",
"groupidx",
")",
":",
"nlags",
"=",
"(",
"len",
"(",
"weights",
")",
"-",
"1",
")",
"S",
"=",
"(",
"weights",
"[",
"0",
"]",
"*",
"np",
".",
"dot",
"(",
"xw",
".",
"T",
",",
"xw",
")",
")",
"for",
"lag",
"in",
"range",
"(",
"1",
",",
"(",
"nlags",
"+",
"1",
")",
")",
":",
"(",
"xw0",
",",
"xwlag",
")",
"=",
"lagged_groups",
"(",
"xw",
",",
"lag",
",",
"groupidx",
")",
"s",
"=",
"np",
".",
"dot",
"(",
"xw0",
".",
"T",
",",
"xwlag",
")",
"S",
"+=",
"(",
"weights",
"[",
"lag",
"]",
"*",
"(",
"s",
"+",
"s",
".",
"T",
")",
")",
"return",
"S"
] |
inner covariance matrix for hac for panel data no denominator nobs used no reference for this .
|
train
| false
|
10,649
|
def test_url_req_case_mismatch_no_index(script, data):
Upper = os.path.join(data.find_links, 'Upper-1.0.tar.gz')
result = script.pip('install', '--no-index', '-f', data.find_links, Upper, 'requiresupper')
egg_folder = ((script.site_packages / 'Upper-1.0-py%s.egg-info') % pyversion)
assert (egg_folder in result.files_created), str(result)
egg_folder = ((script.site_packages / 'Upper-2.0-py%s.egg-info') % pyversion)
assert (egg_folder not in result.files_created), str(result)
|
[
"def",
"test_url_req_case_mismatch_no_index",
"(",
"script",
",",
"data",
")",
":",
"Upper",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"find_links",
",",
"'Upper-1.0.tar.gz'",
")",
"result",
"=",
"script",
".",
"pip",
"(",
"'install'",
",",
"'--no-index'",
",",
"'-f'",
",",
"data",
".",
"find_links",
",",
"Upper",
",",
"'requiresupper'",
")",
"egg_folder",
"=",
"(",
"(",
"script",
".",
"site_packages",
"/",
"'Upper-1.0-py%s.egg-info'",
")",
"%",
"pyversion",
")",
"assert",
"(",
"egg_folder",
"in",
"result",
".",
"files_created",
")",
",",
"str",
"(",
"result",
")",
"egg_folder",
"=",
"(",
"(",
"script",
".",
"site_packages",
"/",
"'Upper-2.0-py%s.egg-info'",
")",
"%",
"pyversion",
")",
"assert",
"(",
"egg_folder",
"not",
"in",
"result",
".",
"files_created",
")",
",",
"str",
"(",
"result",
")"
] |
tar ball url requirements .
|
train
| false
|
10,650
|
def _ReconstructPath(initial_state, current):
path = collections.deque()
while current.previous:
path.appendleft(current)
current = current.previous
for node in path:
initial_state.AddTokenToState(newline=node.newline, dry_run=False)
|
[
"def",
"_ReconstructPath",
"(",
"initial_state",
",",
"current",
")",
":",
"path",
"=",
"collections",
".",
"deque",
"(",
")",
"while",
"current",
".",
"previous",
":",
"path",
".",
"appendleft",
"(",
"current",
")",
"current",
"=",
"current",
".",
"previous",
"for",
"node",
"in",
"path",
":",
"initial_state",
".",
"AddTokenToState",
"(",
"newline",
"=",
"node",
".",
"newline",
",",
"dry_run",
"=",
"False",
")"
] |
reconstruct the path through the queue with lowest penalty .
|
train
| false
|
10,651
|
def setup_transaction_hook(config):
def _notify_resource_events_before(request):
'Notify the accumulated resource events before end of transaction.\n '
for event in request.get_resource_events():
request.registry.notify(event)
def _notify_resource_events_after(success, request):
'Notify the accumulated resource events if transaction succeeds.\n '
if success:
for event in request.get_resource_events(after_commit=True):
try:
request.registry.notify(event)
except Exception:
logger.error('Unable to notify', exc_info=True)
def on_new_request(event):
'When a new request comes in, hook on transaction commit.\n '
if hasattr(event.request, 'parent'):
return
current = transaction.get()
current.addBeforeCommitHook(_notify_resource_events_before, args=(event.request,))
current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))
config.add_subscriber(on_new_request, NewRequest)
|
[
"def",
"setup_transaction_hook",
"(",
"config",
")",
":",
"def",
"_notify_resource_events_before",
"(",
"request",
")",
":",
"for",
"event",
"in",
"request",
".",
"get_resource_events",
"(",
")",
":",
"request",
".",
"registry",
".",
"notify",
"(",
"event",
")",
"def",
"_notify_resource_events_after",
"(",
"success",
",",
"request",
")",
":",
"if",
"success",
":",
"for",
"event",
"in",
"request",
".",
"get_resource_events",
"(",
"after_commit",
"=",
"True",
")",
":",
"try",
":",
"request",
".",
"registry",
".",
"notify",
"(",
"event",
")",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"'Unable to notify'",
",",
"exc_info",
"=",
"True",
")",
"def",
"on_new_request",
"(",
"event",
")",
":",
"if",
"hasattr",
"(",
"event",
".",
"request",
",",
"'parent'",
")",
":",
"return",
"current",
"=",
"transaction",
".",
"get",
"(",
")",
"current",
".",
"addBeforeCommitHook",
"(",
"_notify_resource_events_before",
",",
"args",
"=",
"(",
"event",
".",
"request",
",",
")",
")",
"current",
".",
"addAfterCommitHook",
"(",
"_notify_resource_events_after",
",",
"args",
"=",
"(",
"event",
".",
"request",
",",
")",
")",
"config",
".",
"add_subscriber",
"(",
"on_new_request",
",",
"NewRequest",
")"
] |
resource events are plugged with the transactions of pyramid_tm .
|
train
| false
|
10,652
|
@mock_streams('stdout')
def test_puts_with_encoding_type_none_output():
s = u'string!'
output.user = True
sys.stdout.encoding = None
puts(s, show_prefix=False)
eq_(sys.stdout.getvalue(), (s + '\n'))
|
[
"@",
"mock_streams",
"(",
"'stdout'",
")",
"def",
"test_puts_with_encoding_type_none_output",
"(",
")",
":",
"s",
"=",
"u'string!'",
"output",
".",
"user",
"=",
"True",
"sys",
".",
"stdout",
".",
"encoding",
"=",
"None",
"puts",
"(",
"s",
",",
"show_prefix",
"=",
"False",
")",
"eq_",
"(",
"sys",
".",
"stdout",
".",
"getvalue",
"(",
")",
",",
"(",
"s",
"+",
"'\\n'",
")",
")"
] |
puts() should print unicode output without a stream encoding .
|
train
| false
|
10,654
|
def getSegmentPathDefault():
return [Vector3(), Vector3(0.0, 1.0)]
|
[
"def",
"getSegmentPathDefault",
"(",
")",
":",
"return",
"[",
"Vector3",
"(",
")",
",",
"Vector3",
"(",
"0.0",
",",
"1.0",
")",
"]"
] |
get segment path default .
|
train
| false
|
10,655
|
def get_br_trunk_port_name(prefix, port_id):
return ('%st-%s' % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN]
|
[
"def",
"get_br_trunk_port_name",
"(",
"prefix",
",",
"port_id",
")",
":",
"return",
"(",
"'%st-%s'",
"%",
"(",
"prefix",
",",
"port_id",
")",
")",
"[",
":",
"constants",
".",
"DEVICE_NAME_MAX_LEN",
"]"
] |
return the ovs port name for the given port id .
|
train
| false
|
10,656
|
def load_diabetes(return_X_y=False):
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
if return_X_y:
return (data, target)
return Bunch(data=data, target=target, feature_names=['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6'])
|
[
"def",
"load_diabetes",
"(",
"return_X_y",
"=",
"False",
")",
":",
"base_dir",
"=",
"join",
"(",
"dirname",
"(",
"__file__",
")",
",",
"'data'",
")",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"join",
"(",
"base_dir",
",",
"'diabetes_data.csv.gz'",
")",
")",
"target",
"=",
"np",
".",
"loadtxt",
"(",
"join",
"(",
"base_dir",
",",
"'diabetes_target.csv.gz'",
")",
")",
"if",
"return_X_y",
":",
"return",
"(",
"data",
",",
"target",
")",
"return",
"Bunch",
"(",
"data",
"=",
"data",
",",
"target",
"=",
"target",
",",
"feature_names",
"=",
"[",
"'age'",
",",
"'sex'",
",",
"'bmi'",
",",
"'bp'",
",",
"'s1'",
",",
"'s2'",
",",
"'s3'",
",",
"'s4'",
",",
"'s5'",
",",
"'s6'",
"]",
")"
] |
load and return the diabetes dataset .
|
train
| false
|
10,657
|
def init_bh_match():
global bh_match
bh_match = BhCore().match
debug('Match object loaded.')
|
[
"def",
"init_bh_match",
"(",
")",
":",
"global",
"bh_match",
"bh_match",
"=",
"BhCore",
"(",
")",
".",
"match",
"debug",
"(",
"'Match object loaded.'",
")"
] |
initialize the match object .
|
train
| false
|
10,658
|
def _resource_path_list(resource, *elements):
path = [(loc.__name__ or '') for loc in lineage(resource)]
path.reverse()
path.extend(elements)
return path
|
[
"def",
"_resource_path_list",
"(",
"resource",
",",
"*",
"elements",
")",
":",
"path",
"=",
"[",
"(",
"loc",
".",
"__name__",
"or",
"''",
")",
"for",
"loc",
"in",
"lineage",
"(",
"resource",
")",
"]",
"path",
".",
"reverse",
"(",
")",
"path",
".",
"extend",
"(",
"elements",
")",
"return",
"path"
] |
implementation detail shared by resource_path and resource_path_tuple .
|
train
| false
|
10,659
|
def inf_loop_multiple_back_edge(rec):
while True:
rec.mark('yield')
(yield)
p = rec('p')
if p:
rec.mark('bra')
pass
|
[
"def",
"inf_loop_multiple_back_edge",
"(",
"rec",
")",
":",
"while",
"True",
":",
"rec",
".",
"mark",
"(",
"'yield'",
")",
"(",
"yield",
")",
"p",
"=",
"rec",
"(",
"'p'",
")",
"if",
"p",
":",
"rec",
".",
"mark",
"(",
"'bra'",
")",
"pass"
] |
test to reveal bug of invalid liveness when infinite loop has multiple backedge .
|
train
| false
|
10,660
|
def substitute_variables(input_data, substitutions):
def f(value):
if (not isinstance(value, basestring)):
return value
new_value = Template(value).safe_substitute(substitutions)
if (new_value != value):
LOG.debug(('Substituted %s -> %s' % (repr(value), repr(new_value))))
return new_value
return recursive_walk(f, input_data)
|
[
"def",
"substitute_variables",
"(",
"input_data",
",",
"substitutions",
")",
":",
"def",
"f",
"(",
"value",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"value",
",",
"basestring",
")",
")",
":",
"return",
"value",
"new_value",
"=",
"Template",
"(",
"value",
")",
".",
"safe_substitute",
"(",
"substitutions",
")",
"if",
"(",
"new_value",
"!=",
"value",
")",
":",
"LOG",
".",
"debug",
"(",
"(",
"'Substituted %s -> %s'",
"%",
"(",
"repr",
"(",
"value",
")",
",",
"repr",
"(",
"new_value",
")",
")",
")",
")",
"return",
"new_value",
"return",
"recursive_walk",
"(",
"f",
",",
"input_data",
")"
] |
replaces variables with values from substitutions .
|
train
| false
|
10,661
|
def get_comment_app():
comments_app = get_comment_app_name()
if (comments_app not in settings.INSTALLED_APPS):
raise ImproperlyConfigured(('The COMMENTS_APP (%r) must be in INSTALLED_APPS' % settings.COMMENTS_APP))
try:
package = import_module(comments_app)
except ImportError:
raise ImproperlyConfigured('The COMMENTS_APP setting refers to a non-existing package.')
return package
|
[
"def",
"get_comment_app",
"(",
")",
":",
"comments_app",
"=",
"get_comment_app_name",
"(",
")",
"if",
"(",
"comments_app",
"not",
"in",
"settings",
".",
"INSTALLED_APPS",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"(",
"'The COMMENTS_APP (%r) must be in INSTALLED_APPS'",
"%",
"settings",
".",
"COMMENTS_APP",
")",
")",
"try",
":",
"package",
"=",
"import_module",
"(",
"comments_app",
")",
"except",
"ImportError",
":",
"raise",
"ImproperlyConfigured",
"(",
"'The COMMENTS_APP setting refers to a non-existing package.'",
")",
"return",
"package"
] |
get the comment app as defined in the settings .
|
train
| false
|
10,662
|
def tensor_product_simp(e, **hints):
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return (tensor_product_simp(e.base) ** e.exp)
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
|
[
"def",
"tensor_product_simp",
"(",
"e",
",",
"**",
"hints",
")",
":",
"if",
"isinstance",
"(",
"e",
",",
"Add",
")",
":",
"return",
"Add",
"(",
"*",
"[",
"tensor_product_simp",
"(",
"arg",
")",
"for",
"arg",
"in",
"e",
".",
"args",
"]",
")",
"elif",
"isinstance",
"(",
"e",
",",
"Pow",
")",
":",
"return",
"(",
"tensor_product_simp",
"(",
"e",
".",
"base",
")",
"**",
"e",
".",
"exp",
")",
"elif",
"isinstance",
"(",
"e",
",",
"Mul",
")",
":",
"return",
"tensor_product_simp_Mul",
"(",
"e",
")",
"elif",
"isinstance",
"(",
"e",
",",
"Commutator",
")",
":",
"return",
"Commutator",
"(",
"*",
"[",
"tensor_product_simp",
"(",
"arg",
")",
"for",
"arg",
"in",
"e",
".",
"args",
"]",
")",
"elif",
"isinstance",
"(",
"e",
",",
"AntiCommutator",
")",
":",
"return",
"AntiCommutator",
"(",
"*",
"[",
"tensor_product_simp",
"(",
"arg",
")",
"for",
"arg",
"in",
"e",
".",
"args",
"]",
")",
"else",
":",
"return",
"e"
] |
try to simplify and combine tensorproducts .
|
train
| false
|
10,663
|
def get_credentials_file(*args):
if check_file_permissions():
ensure_local_plotly_files()
return utils.load_json_dict(CREDENTIALS_FILE, *args)
else:
return FILE_CONTENT[CREDENTIALS_FILE]
|
[
"def",
"get_credentials_file",
"(",
"*",
"args",
")",
":",
"if",
"check_file_permissions",
"(",
")",
":",
"ensure_local_plotly_files",
"(",
")",
"return",
"utils",
".",
"load_json_dict",
"(",
"CREDENTIALS_FILE",
",",
"*",
"args",
")",
"else",
":",
"return",
"FILE_CONTENT",
"[",
"CREDENTIALS_FILE",
"]"
] |
return specified args from ~/ .
|
train
| false
|
10,664
|
def unicode_join(seq):
return concat(imap(text_type, seq))
|
[
"def",
"unicode_join",
"(",
"seq",
")",
":",
"return",
"concat",
"(",
"imap",
"(",
"text_type",
",",
"seq",
")",
")"
] |
simple args to unicode conversion and concatenation .
|
train
| false
|
10,666
|
def TSF(ds, count, timeperiod=(- (2 ** 31))):
return call_talib_with_ds(ds, count, talib.TSF, timeperiod)
|
[
"def",
"TSF",
"(",
"ds",
",",
"count",
",",
"timeperiod",
"=",
"(",
"-",
"(",
"2",
"**",
"31",
")",
")",
")",
":",
"return",
"call_talib_with_ds",
"(",
"ds",
",",
"count",
",",
"talib",
".",
"TSF",
",",
"timeperiod",
")"
] |
time series forecast .
|
train
| false
|
10,667
|
def assert_opened_help_link_is_correct(test, url):
test.browser.switch_to_window(test.browser.window_handles[(-1)])
test.assertEqual(url, test.browser.current_url)
test.assertNotIn('Maze Found', test.browser.title)
|
[
"def",
"assert_opened_help_link_is_correct",
"(",
"test",
",",
"url",
")",
":",
"test",
".",
"browser",
".",
"switch_to_window",
"(",
"test",
".",
"browser",
".",
"window_handles",
"[",
"(",
"-",
"1",
")",
"]",
")",
"test",
".",
"assertEqual",
"(",
"url",
",",
"test",
".",
"browser",
".",
"current_url",
")",
"test",
".",
"assertNotIn",
"(",
"'Maze Found'",
",",
"test",
".",
"browser",
".",
"title",
")"
] |
asserts that url of browser when help link is clicked is correct .
|
train
| false
|
10,668
|
def get_subnetid(vm_):
subnetid = config.get_cloud_config_value('subnetid', vm_, __opts__, search_global=False)
if subnetid:
return subnetid
subnetname = config.get_cloud_config_value('subnetname', vm_, __opts__, search_global=False)
if subnetname:
return _get_subnetname_id(subnetname)
return None
|
[
"def",
"get_subnetid",
"(",
"vm_",
")",
":",
"subnetid",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'subnetid'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"if",
"subnetid",
":",
"return",
"subnetid",
"subnetname",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'subnetname'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"if",
"subnetname",
":",
"return",
"_get_subnetname_id",
"(",
"subnetname",
")",
"return",
"None"
] |
returns the subnetid to use .
|
train
| true
|
10,669
|
def test_ast_bad_get():
cant_compile(u'(get)')
cant_compile(u'(get 1)')
|
[
"def",
"test_ast_bad_get",
"(",
")",
":",
"cant_compile",
"(",
"u'(get)'",
")",
"cant_compile",
"(",
"u'(get 1)'",
")"
] |
make sure ast cant compile invalid get .
|
train
| false
|
10,671
|
def perform_all_actions(all_actions):
for action_group in all_actions:
to_destroy = getattr(action_group, 'destroy', [])
for resource in to_destroy:
destroy_resource(resource)
|
[
"def",
"perform_all_actions",
"(",
"all_actions",
")",
":",
"for",
"action_group",
"in",
"all_actions",
":",
"to_destroy",
"=",
"getattr",
"(",
"action_group",
",",
"'destroy'",
",",
"[",
"]",
")",
"for",
"resource",
"in",
"to_destroy",
":",
"destroy_resource",
"(",
"resource",
")"
] |
loop through all the actions and destroy the resources that need to be destroyed .
|
train
| false
|
10,672
|
def test_parse_annotation():
annot = '+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00+123\x14\x14\x00\x00\x00\x00\x00\x00\x00'
annot = [a for a in iterbytes(annot)]
annot[1::2] = [(a * 256) for a in annot[1::2]]
tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
assert_equal(_parse_tal_channel([tal_channel]), [[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'], [180.0, 0, 'Lights off'], [180.0, 0, 'Close door'], [3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']])
|
[
"def",
"test_parse_annotation",
"(",
")",
":",
"annot",
"=",
"'+180\\x14Lights off\\x14Close door\\x14\\x00\\x00\\x00\\x00\\x00+180\\x14Lights off\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00+180\\x14Close door\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00+3.14\\x1504.20\\x14nothing\\x14\\x00\\x00\\x00\\x00+1800.2\\x1525.5\\x14Apnea\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00+123\\x14\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00'",
"annot",
"=",
"[",
"a",
"for",
"a",
"in",
"iterbytes",
"(",
"annot",
")",
"]",
"annot",
"[",
"1",
":",
":",
"2",
"]",
"=",
"[",
"(",
"a",
"*",
"256",
")",
"for",
"a",
"in",
"annot",
"[",
"1",
":",
":",
"2",
"]",
"]",
"tal_channel",
"=",
"map",
"(",
"sum",
",",
"zip",
"(",
"annot",
"[",
"0",
":",
":",
"2",
"]",
",",
"annot",
"[",
"1",
":",
":",
"2",
"]",
")",
")",
"assert_equal",
"(",
"_parse_tal_channel",
"(",
"[",
"tal_channel",
"]",
")",
",",
"[",
"[",
"180.0",
",",
"0",
",",
"'Lights off'",
"]",
",",
"[",
"180.0",
",",
"0",
",",
"'Close door'",
"]",
",",
"[",
"180.0",
",",
"0",
",",
"'Lights off'",
"]",
",",
"[",
"180.0",
",",
"0",
",",
"'Close door'",
"]",
",",
"[",
"3.14",
",",
"4.2",
",",
"'nothing'",
"]",
",",
"[",
"1800.2",
",",
"25.5",
",",
"'Apnea'",
"]",
"]",
")"
] |
test parsing the tal channel .
|
train
| false
|
10,673
|
def splitdrive(p):
return ('', p)
|
[
"def",
"splitdrive",
"(",
"p",
")",
":",
"return",
"(",
"''",
",",
"p",
")"
] |
split a pathname into drive/unc sharepoint and relative path specifiers .
|
train
| false
|
10,674
|
def CreateProcess(appName, cmdline, procSecurity, threadSecurity, inheritHandles, newEnvironment, env, workingDir, startupInfo):
(hProcess, hThread, dwPid, dwTid) = win32process.CreateProcess(appName, cmdline, procSecurity, threadSecurity, inheritHandles, newEnvironment, env, workingDir, startupInfo)
dwPid = 42
return (hProcess, hThread, dwPid, dwTid)
|
[
"def",
"CreateProcess",
"(",
"appName",
",",
"cmdline",
",",
"procSecurity",
",",
"threadSecurity",
",",
"inheritHandles",
",",
"newEnvironment",
",",
"env",
",",
"workingDir",
",",
"startupInfo",
")",
":",
"(",
"hProcess",
",",
"hThread",
",",
"dwPid",
",",
"dwTid",
")",
"=",
"win32process",
".",
"CreateProcess",
"(",
"appName",
",",
"cmdline",
",",
"procSecurity",
",",
"threadSecurity",
",",
"inheritHandles",
",",
"newEnvironment",
",",
"env",
",",
"workingDir",
",",
"startupInfo",
")",
"dwPid",
"=",
"42",
"return",
"(",
"hProcess",
",",
"hThread",
",",
"dwPid",
",",
"dwTid",
")"
] |
this function mocks the generated pid aspect of the win32 .
|
train
| false
|
10,675
|
def load_backends_and_plugins(plugins, working_set, backends, build_configuration=None):
build_configuration = (build_configuration or BuildConfiguration())
load_build_configuration_from_source(build_configuration, backends)
load_plugins(build_configuration, (plugins or []), working_set)
return build_configuration
|
[
"def",
"load_backends_and_plugins",
"(",
"plugins",
",",
"working_set",
",",
"backends",
",",
"build_configuration",
"=",
"None",
")",
":",
"build_configuration",
"=",
"(",
"build_configuration",
"or",
"BuildConfiguration",
"(",
")",
")",
"load_build_configuration_from_source",
"(",
"build_configuration",
",",
"backends",
")",
"load_plugins",
"(",
"build_configuration",
",",
"(",
"plugins",
"or",
"[",
"]",
")",
",",
"working_set",
")",
"return",
"build_configuration"
] |
load named plugins and source backends .
|
train
| true
|
10,676
|
def get_public_key(key, passphrase=None, asObj=False):
if isinstance(key, M2Crypto.X509.X509):
rsa = key.get_pubkey().get_rsa()
text = ''
else:
text = _text_or_file(key)
text = get_pem_entry(text)
if text.startswith('-----BEGIN PUBLIC KEY-----'):
if (not asObj):
return text
bio = M2Crypto.BIO.MemoryBuffer()
bio.write(text)
rsa = M2Crypto.RSA.load_pub_key_bio(bio)
bio = M2Crypto.BIO.MemoryBuffer()
if text.startswith('-----BEGIN CERTIFICATE-----'):
cert = M2Crypto.X509.load_cert_string(text)
rsa = cert.get_pubkey().get_rsa()
if text.startswith('-----BEGIN CERTIFICATE REQUEST-----'):
csr = M2Crypto.X509.load_request_string(text)
rsa = csr.get_pubkey().get_rsa()
if (text.startswith('-----BEGIN PRIVATE KEY-----') or text.startswith('-----BEGIN RSA PRIVATE KEY-----')):
rsa = M2Crypto.RSA.load_key_string(text, callback=_passphrase_callback(passphrase))
if asObj:
evppubkey = M2Crypto.EVP.PKey()
evppubkey.assign_rsa(rsa)
return evppubkey
rsa.save_pub_key_bio(bio)
return bio.read_all()
|
[
"def",
"get_public_key",
"(",
"key",
",",
"passphrase",
"=",
"None",
",",
"asObj",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"M2Crypto",
".",
"X509",
".",
"X509",
")",
":",
"rsa",
"=",
"key",
".",
"get_pubkey",
"(",
")",
".",
"get_rsa",
"(",
")",
"text",
"=",
"''",
"else",
":",
"text",
"=",
"_text_or_file",
"(",
"key",
")",
"text",
"=",
"get_pem_entry",
"(",
"text",
")",
"if",
"text",
".",
"startswith",
"(",
"'-----BEGIN PUBLIC KEY-----'",
")",
":",
"if",
"(",
"not",
"asObj",
")",
":",
"return",
"text",
"bio",
"=",
"M2Crypto",
".",
"BIO",
".",
"MemoryBuffer",
"(",
")",
"bio",
".",
"write",
"(",
"text",
")",
"rsa",
"=",
"M2Crypto",
".",
"RSA",
".",
"load_pub_key_bio",
"(",
"bio",
")",
"bio",
"=",
"M2Crypto",
".",
"BIO",
".",
"MemoryBuffer",
"(",
")",
"if",
"text",
".",
"startswith",
"(",
"'-----BEGIN CERTIFICATE-----'",
")",
":",
"cert",
"=",
"M2Crypto",
".",
"X509",
".",
"load_cert_string",
"(",
"text",
")",
"rsa",
"=",
"cert",
".",
"get_pubkey",
"(",
")",
".",
"get_rsa",
"(",
")",
"if",
"text",
".",
"startswith",
"(",
"'-----BEGIN CERTIFICATE REQUEST-----'",
")",
":",
"csr",
"=",
"M2Crypto",
".",
"X509",
".",
"load_request_string",
"(",
"text",
")",
"rsa",
"=",
"csr",
".",
"get_pubkey",
"(",
")",
".",
"get_rsa",
"(",
")",
"if",
"(",
"text",
".",
"startswith",
"(",
"'-----BEGIN PRIVATE KEY-----'",
")",
"or",
"text",
".",
"startswith",
"(",
"'-----BEGIN RSA PRIVATE KEY-----'",
")",
")",
":",
"rsa",
"=",
"M2Crypto",
".",
"RSA",
".",
"load_key_string",
"(",
"text",
",",
"callback",
"=",
"_passphrase_callback",
"(",
"passphrase",
")",
")",
"if",
"asObj",
":",
"evppubkey",
"=",
"M2Crypto",
".",
"EVP",
".",
"PKey",
"(",
")",
"evppubkey",
".",
"assign_rsa",
"(",
"rsa",
")",
"return",
"evppubkey",
"rsa",
".",
"save_pub_key_bio",
"(",
"bio",
")",
"return",
"bio",
".",
"read_all",
"(",
")"
] |
create the public key object from a retrieved certificate .
|
train
| true
|
10,678
|
def escape(text):
text = text.replace('\\', '\\\\')
text = text.replace('"""', '""\\"')
text = text.replace(' \n', ' \\n\\\n')
return text
|
[
"def",
"escape",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"'\"\"\"'",
",",
"'\"\"\\\\\"'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"' \\n'",
",",
"' \\\\n\\\\\\n'",
")",
"return",
"text"
] |
escape a string in an oauth-compatible fashion .
|
train
| false
|
10,679
|
def smart_truncate(string, max_length=0, word_boundaries=False, separator=' '):
string = string.strip(separator)
if (not max_length):
return string
if (len(string) < max_length):
return string
if (not word_boundaries):
return string[:max_length].strip(separator)
if (separator not in string):
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = ((len(truncated) + len(word)) + len(separator))
if (next_len <= max_length):
truncated += '{0}{1}'.format(word, separator)
if (not truncated):
truncated = string[:max_length]
return truncated.strip(separator)
|
[
"def",
"smart_truncate",
"(",
"string",
",",
"max_length",
"=",
"0",
",",
"word_boundaries",
"=",
"False",
",",
"separator",
"=",
"' '",
")",
":",
"string",
"=",
"string",
".",
"strip",
"(",
"separator",
")",
"if",
"(",
"not",
"max_length",
")",
":",
"return",
"string",
"if",
"(",
"len",
"(",
"string",
")",
"<",
"max_length",
")",
":",
"return",
"string",
"if",
"(",
"not",
"word_boundaries",
")",
":",
"return",
"string",
"[",
":",
"max_length",
"]",
".",
"strip",
"(",
"separator",
")",
"if",
"(",
"separator",
"not",
"in",
"string",
")",
":",
"return",
"string",
"[",
":",
"max_length",
"]",
"truncated",
"=",
"''",
"for",
"word",
"in",
"string",
".",
"split",
"(",
"separator",
")",
":",
"if",
"word",
":",
"next_len",
"=",
"(",
"(",
"len",
"(",
"truncated",
")",
"+",
"len",
"(",
"word",
")",
")",
"+",
"len",
"(",
"separator",
")",
")",
"if",
"(",
"next_len",
"<=",
"max_length",
")",
":",
"truncated",
"+=",
"'{0}{1}'",
".",
"format",
"(",
"word",
",",
"separator",
")",
"if",
"(",
"not",
"truncated",
")",
":",
"truncated",
"=",
"string",
"[",
":",
"max_length",
"]",
"return",
"truncated",
".",
"strip",
"(",
"separator",
")"
] |
truncate a string .
|
train
| true
|
10,680
|
def template_localtime(value, use_tz=None):
should_convert = (isinstance(value, datetime) and (settings.USE_TZ if (use_tz is None) else use_tz) and (not is_naive(value)) and getattr(value, 'convert_to_local_time', True))
return (localtime(value) if should_convert else value)
|
[
"def",
"template_localtime",
"(",
"value",
",",
"use_tz",
"=",
"None",
")",
":",
"should_convert",
"=",
"(",
"isinstance",
"(",
"value",
",",
"datetime",
")",
"and",
"(",
"settings",
".",
"USE_TZ",
"if",
"(",
"use_tz",
"is",
"None",
")",
"else",
"use_tz",
")",
"and",
"(",
"not",
"is_naive",
"(",
"value",
")",
")",
"and",
"getattr",
"(",
"value",
",",
"'convert_to_local_time'",
",",
"True",
")",
")",
"return",
"(",
"localtime",
"(",
"value",
")",
"if",
"should_convert",
"else",
"value",
")"
] |
checks if value is a datetime and converts it to local time if necessary .
|
train
| false
|
10,681
|
def _coerce_exceptions(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IOException as e:
e.msg = force_unicode(e.msg, errors='replace')
e.stack = force_unicode(e.stack, errors='replace')
LOG.exception(('Exception in Hadoop FS call ' + function.__name__))
if (e.clazz == HADOOP_ACCESSCONTROLEXCEPTION):
raise PermissionDeniedException(e.msg, e)
else:
raise
return wrapper
|
[
"def",
"_coerce_exceptions",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"IOException",
"as",
"e",
":",
"e",
".",
"msg",
"=",
"force_unicode",
"(",
"e",
".",
"msg",
",",
"errors",
"=",
"'replace'",
")",
"e",
".",
"stack",
"=",
"force_unicode",
"(",
"e",
".",
"stack",
",",
"errors",
"=",
"'replace'",
")",
"LOG",
".",
"exception",
"(",
"(",
"'Exception in Hadoop FS call '",
"+",
"function",
".",
"__name__",
")",
")",
"if",
"(",
"e",
".",
"clazz",
"==",
"HADOOP_ACCESSCONTROLEXCEPTION",
")",
":",
"raise",
"PermissionDeniedException",
"(",
"e",
".",
"msg",
",",
"e",
")",
"else",
":",
"raise",
"return",
"wrapper"
] |
decorator that causes exceptions thrown by the decorated function to be coerced into generic exceptions from the hadoop .
|
train
| false
|
10,682
|
@task()
def enable_self_generated_certs(course_key):
course_key = CourseKey.from_string(course_key)
course = CourseOverview.get_from_id(course_key)
is_enabled_for_course = CertificateGenerationCourseSetting.is_enabled_for_course(course_key)
if (course.self_paced and (not is_enabled_for_course)):
CertificateGenerationCourseSetting.set_enabled_for_course(course_key, True)
|
[
"@",
"task",
"(",
")",
"def",
"enable_self_generated_certs",
"(",
"course_key",
")",
":",
"course_key",
"=",
"CourseKey",
".",
"from_string",
"(",
"course_key",
")",
"course",
"=",
"CourseOverview",
".",
"get_from_id",
"(",
"course_key",
")",
"is_enabled_for_course",
"=",
"CertificateGenerationCourseSetting",
".",
"is_enabled_for_course",
"(",
"course_key",
")",
"if",
"(",
"course",
".",
"self_paced",
"and",
"(",
"not",
"is_enabled_for_course",
")",
")",
":",
"CertificateGenerationCourseSetting",
".",
"set_enabled_for_course",
"(",
"course_key",
",",
"True",
")"
] |
enable the self-generated certificates by default for self-paced courses .
|
train
| false
|
10,685
|
def _unusedTestDirectory(base):
counter = 0
while True:
if counter:
testdir = base.sibling(('%s-%d' % (base.basename(), counter)))
else:
testdir = base
testDirLock = FilesystemLock((testdir.path + '.lock'))
if testDirLock.lock():
if testdir.exists():
_removeSafely(testdir)
testdir.makedirs()
testdir.child('_trial_marker').setContent('')
return (testdir, testDirLock)
elif (base.basename() == '_trial_temp'):
counter += 1
else:
raise _WorkingDirectoryBusy()
|
[
"def",
"_unusedTestDirectory",
"(",
"base",
")",
":",
"counter",
"=",
"0",
"while",
"True",
":",
"if",
"counter",
":",
"testdir",
"=",
"base",
".",
"sibling",
"(",
"(",
"'%s-%d'",
"%",
"(",
"base",
".",
"basename",
"(",
")",
",",
"counter",
")",
")",
")",
"else",
":",
"testdir",
"=",
"base",
"testDirLock",
"=",
"FilesystemLock",
"(",
"(",
"testdir",
".",
"path",
"+",
"'.lock'",
")",
")",
"if",
"testDirLock",
".",
"lock",
"(",
")",
":",
"if",
"testdir",
".",
"exists",
"(",
")",
":",
"_removeSafely",
"(",
"testdir",
")",
"testdir",
".",
"makedirs",
"(",
")",
"testdir",
".",
"child",
"(",
"'_trial_marker'",
")",
".",
"setContent",
"(",
"''",
")",
"return",
"(",
"testdir",
",",
"testDirLock",
")",
"elif",
"(",
"base",
".",
"basename",
"(",
")",
"==",
"'_trial_temp'",
")",
":",
"counter",
"+=",
"1",
"else",
":",
"raise",
"_WorkingDirectoryBusy",
"(",
")"
] |
find an unused directory named similarly to c{base} .
|
train
| false
|
10,688
|
def saveAnswers(questions, series_id, complete_id, rvars):
text = ''
table = current.s3db.survey_complete
for question in questions:
code = question['code']
if ((code in rvars) and (rvars[code] != '')):
line = ('"%s","%s"\n' % (code, rvars[code]))
text += line
if (complete_id is None):
record_id = table.insert(series_id=series_id, answer_list=text)
S3SurveyCompleteModel.completeOnAccept(record_id)
return record_id
else:
current.db((table.id == complete_id)).update(answer_list=text)
S3SurveyCompleteModel.completeOnAccept(complete_id)
return complete_id
|
[
"def",
"saveAnswers",
"(",
"questions",
",",
"series_id",
",",
"complete_id",
",",
"rvars",
")",
":",
"text",
"=",
"''",
"table",
"=",
"current",
".",
"s3db",
".",
"survey_complete",
"for",
"question",
"in",
"questions",
":",
"code",
"=",
"question",
"[",
"'code'",
"]",
"if",
"(",
"(",
"code",
"in",
"rvars",
")",
"and",
"(",
"rvars",
"[",
"code",
"]",
"!=",
"''",
")",
")",
":",
"line",
"=",
"(",
"'\"%s\",\"%s\"\\n'",
"%",
"(",
"code",
",",
"rvars",
"[",
"code",
"]",
")",
")",
"text",
"+=",
"line",
"if",
"(",
"complete_id",
"is",
"None",
")",
":",
"record_id",
"=",
"table",
".",
"insert",
"(",
"series_id",
"=",
"series_id",
",",
"answer_list",
"=",
"text",
")",
"S3SurveyCompleteModel",
".",
"completeOnAccept",
"(",
"record_id",
")",
"return",
"record_id",
"else",
":",
"current",
".",
"db",
"(",
"(",
"table",
".",
"id",
"==",
"complete_id",
")",
")",
".",
"update",
"(",
"answer_list",
"=",
"text",
")",
"S3SurveyCompleteModel",
".",
"completeOnAccept",
"(",
"complete_id",
")",
"return",
"complete_id"
] |
insert/update a record in survey_complete .
|
train
| false
|
10,689
|
def setup_cluster(num_cpus, outdir, verbose, error_profile):
server_socket = setup_server()
(workers, client_socks_and_adrs) = setup_workers(num_cpus, outdir, server_socket, verbose=verbose, error_profile=error_profile)
client_sockets = [sock for (sock, addr) in client_socks_and_adrs]
return (client_sockets, workers, server_socket)
|
[
"def",
"setup_cluster",
"(",
"num_cpus",
",",
"outdir",
",",
"verbose",
",",
"error_profile",
")",
":",
"server_socket",
"=",
"setup_server",
"(",
")",
"(",
"workers",
",",
"client_socks_and_adrs",
")",
"=",
"setup_workers",
"(",
"num_cpus",
",",
"outdir",
",",
"server_socket",
",",
"verbose",
"=",
"verbose",
",",
"error_profile",
"=",
"error_profile",
")",
"client_sockets",
"=",
"[",
"sock",
"for",
"(",
"sock",
",",
"addr",
")",
"in",
"client_socks_and_adrs",
"]",
"return",
"(",
"client_sockets",
",",
"workers",
",",
"server_socket",
")"
] |
setup server and clients .
|
train
| false
|
10,690
|
def _subsets(n):
if (n == 1):
a = [[1]]
elif (n == 2):
a = [[1, 0], [0, 1], [1, 1]]
elif (n == 3):
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = _subsets((n - 1))
a0 = [(x + [0]) for x in b]
a1 = [(x + [1]) for x in b]
a = ((a0 + [(([0] * (n - 1)) + [1])]) + a1)
return a
|
[
"def",
"_subsets",
"(",
"n",
")",
":",
"if",
"(",
"n",
"==",
"1",
")",
":",
"a",
"=",
"[",
"[",
"1",
"]",
"]",
"elif",
"(",
"n",
"==",
"2",
")",
":",
"a",
"=",
"[",
"[",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
"]",
"elif",
"(",
"n",
"==",
"3",
")",
":",
"a",
"=",
"[",
"[",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"0",
",",
"1",
"]",
",",
"[",
"0",
",",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
"]",
"else",
":",
"b",
"=",
"_subsets",
"(",
"(",
"n",
"-",
"1",
")",
")",
"a0",
"=",
"[",
"(",
"x",
"+",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"b",
"]",
"a1",
"=",
"[",
"(",
"x",
"+",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"b",
"]",
"a",
"=",
"(",
"(",
"a0",
"+",
"[",
"(",
"(",
"[",
"0",
"]",
"*",
"(",
"n",
"-",
"1",
")",
")",
"+",
"[",
"1",
"]",
")",
"]",
")",
"+",
"a1",
")",
"return",
"a"
] |
returns all possible subsets of the set except the empty set .
|
train
| false
|
10,691
|
def _adapt(iface, obj):
for (wrapper_test, wrapper) in IMOUNTABLE_FILESYSTEM_ADAPTERS.items():
if wrapper_test(obj):
return wrapper(obj)
|
[
"def",
"_adapt",
"(",
"iface",
",",
"obj",
")",
":",
"for",
"(",
"wrapper_test",
",",
"wrapper",
")",
"in",
"IMOUNTABLE_FILESYSTEM_ADAPTERS",
".",
"items",
"(",
")",
":",
"if",
"wrapper_test",
"(",
"obj",
")",
":",
"return",
"wrapper",
"(",
"obj",
")"
] |
adapt the filesystem argument of mount to imountablefilesystem .
|
train
| false
|
10,692
|
def releaseNetToMs():
a = TpPd(pd=3)
b = MessageType(mesType=45)
c = CauseHdr(ieiC=8, eightBitC=0)
d = CauseHdr(ieiC=8, eightBitC=0)
e = FacilityHdr(ieiF=28, eightBitF=0)
f = UserUserHdr(ieiUU=126, eightBitUU=0)
packet = (((((a / b) / c) / d) / e) / f)
return packet
|
[
"def",
"releaseNetToMs",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"45",
")",
"c",
"=",
"CauseHdr",
"(",
"ieiC",
"=",
"8",
",",
"eightBitC",
"=",
"0",
")",
"d",
"=",
"CauseHdr",
"(",
"ieiC",
"=",
"8",
",",
"eightBitC",
"=",
"0",
")",
"e",
"=",
"FacilityHdr",
"(",
"ieiF",
"=",
"28",
",",
"eightBitF",
"=",
"0",
")",
"f",
"=",
"UserUserHdr",
"(",
"ieiUU",
"=",
"126",
",",
"eightBitUU",
"=",
"0",
")",
"packet",
"=",
"(",
"(",
"(",
"(",
"(",
"a",
"/",
"b",
")",
"/",
"c",
")",
"/",
"d",
")",
"/",
"e",
")",
"/",
"f",
")",
"return",
"packet"
] |
release section 9 .
|
train
| true
|
10,693
|
def currentTag():
cmd = 'git describe --always --tag'.split()
tag = subprocess.check_output(cmd, cwd=VERSIONSDIR).split('-')[0]
return tag
|
[
"def",
"currentTag",
"(",
")",
":",
"cmd",
"=",
"'git describe --always --tag'",
".",
"split",
"(",
")",
"tag",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"cwd",
"=",
"VERSIONSDIR",
")",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"return",
"tag"
] |
returns the current tag name from the version repository .
|
train
| false
|
10,694
|
def get_ssh_key_fingerprint(ssh_key):
parts = ssh_key.strip().split()
if (len(parts) == 0):
return None
key_type = parts[0]
key = base64.b64decode(parts[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
key_fp = ':'.join(((a + b) for (a, b) in zip(fp_plain[::2], fp_plain[1::2]))).upper()
if (len(parts) < 3):
return ('%s (%s)' % (key_fp, key_type))
else:
user_host = parts[2]
return ('%s %s (%s)' % (key_fp, user_host, key_type))
|
[
"def",
"get_ssh_key_fingerprint",
"(",
"ssh_key",
")",
":",
"parts",
"=",
"ssh_key",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"(",
"len",
"(",
"parts",
")",
"==",
"0",
")",
":",
"return",
"None",
"key_type",
"=",
"parts",
"[",
"0",
"]",
"key",
"=",
"base64",
".",
"b64decode",
"(",
"parts",
"[",
"1",
"]",
".",
"encode",
"(",
"'ascii'",
")",
")",
"fp_plain",
"=",
"hashlib",
".",
"md5",
"(",
"key",
")",
".",
"hexdigest",
"(",
")",
"key_fp",
"=",
"':'",
".",
"join",
"(",
"(",
"(",
"a",
"+",
"b",
")",
"for",
"(",
"a",
",",
"b",
")",
"in",
"zip",
"(",
"fp_plain",
"[",
":",
":",
"2",
"]",
",",
"fp_plain",
"[",
"1",
":",
":",
"2",
"]",
")",
")",
")",
".",
"upper",
"(",
")",
"if",
"(",
"len",
"(",
"parts",
")",
"<",
"3",
")",
":",
"return",
"(",
"'%s (%s)'",
"%",
"(",
"key_fp",
",",
"key_type",
")",
")",
"else",
":",
"user_host",
"=",
"parts",
"[",
"2",
"]",
"return",
"(",
"'%s %s (%s)'",
"%",
"(",
"key_fp",
",",
"user_host",
",",
"key_type",
")",
")"
] |
return the public key fingerprint of a given public ssh key in format "fb:0c:ac:0a:07:94:5b:ce:75:6e:63:32:13:ad:ad:d7 [user@host] " .
|
train
| false
|
10,696
|
@deprecated('use cls.register(cli)')
def register_commands(commands):
for command_klass in commands:
_COMMANDS.register(command_klass)
|
[
"@",
"deprecated",
"(",
"'use cls.register(cli)'",
")",
"def",
"register_commands",
"(",
"commands",
")",
":",
"for",
"command_klass",
"in",
"commands",
":",
"_COMMANDS",
".",
"register",
"(",
"command_klass",
")"
] |
called when the emr command table is being built .
|
train
| false
|
10,699
|
def caffe_load_image(filename, color=True, as_uint=False):
with WithTimer('imread', quiet=True):
if as_uint:
img = skimage.io.imread(filename)
else:
img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32)
if (img.ndim == 2):
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif (img.shape[2] == 4):
img = img[:, :, :3]
return img
|
[
"def",
"caffe_load_image",
"(",
"filename",
",",
"color",
"=",
"True",
",",
"as_uint",
"=",
"False",
")",
":",
"with",
"WithTimer",
"(",
"'imread'",
",",
"quiet",
"=",
"True",
")",
":",
"if",
"as_uint",
":",
"img",
"=",
"skimage",
".",
"io",
".",
"imread",
"(",
"filename",
")",
"else",
":",
"img",
"=",
"skimage",
".",
"img_as_float",
"(",
"skimage",
".",
"io",
".",
"imread",
"(",
"filename",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"(",
"img",
".",
"ndim",
"==",
"2",
")",
":",
"img",
"=",
"img",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"color",
":",
"img",
"=",
"np",
".",
"tile",
"(",
"img",
",",
"(",
"1",
",",
"1",
",",
"3",
")",
")",
"elif",
"(",
"img",
".",
"shape",
"[",
"2",
"]",
"==",
"4",
")",
":",
"img",
"=",
"img",
"[",
":",
",",
":",
",",
":",
"3",
"]",
"return",
"img"
] |
copied from caffe to simplify potential import problems .
|
train
| false
|
10,700
|
def _finalize_fairy(connection, connection_record, pool, ref, echo, fairy=None):
_refs.discard(connection_record)
if ((ref is not None) and (connection_record.fairy_ref is not ref)):
return
if (connection is not None):
if (connection_record and echo):
pool.logger.debug('Connection %r being returned to pool', connection)
try:
fairy = (fairy or _ConnectionFairy(connection, connection_record))
assert (fairy.connection is connection)
fairy._reset(pool, echo)
if (not connection_record):
pool._close_connection(connection)
except Exception as e:
if connection_record:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record:
connection_record.checkin()
|
[
"def",
"_finalize_fairy",
"(",
"connection",
",",
"connection_record",
",",
"pool",
",",
"ref",
",",
"echo",
",",
"fairy",
"=",
"None",
")",
":",
"_refs",
".",
"discard",
"(",
"connection_record",
")",
"if",
"(",
"(",
"ref",
"is",
"not",
"None",
")",
"and",
"(",
"connection_record",
".",
"fairy_ref",
"is",
"not",
"ref",
")",
")",
":",
"return",
"if",
"(",
"connection",
"is",
"not",
"None",
")",
":",
"if",
"(",
"connection_record",
"and",
"echo",
")",
":",
"pool",
".",
"logger",
".",
"debug",
"(",
"'Connection %r being returned to pool'",
",",
"connection",
")",
"try",
":",
"fairy",
"=",
"(",
"fairy",
"or",
"_ConnectionFairy",
"(",
"connection",
",",
"connection_record",
")",
")",
"assert",
"(",
"fairy",
".",
"connection",
"is",
"connection",
")",
"fairy",
".",
"_reset",
"(",
"pool",
",",
"echo",
")",
"if",
"(",
"not",
"connection_record",
")",
":",
"pool",
".",
"_close_connection",
"(",
"connection",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"connection_record",
":",
"connection_record",
".",
"invalidate",
"(",
"e",
"=",
"e",
")",
"if",
"isinstance",
"(",
"e",
",",
"(",
"SystemExit",
",",
"KeyboardInterrupt",
")",
")",
":",
"raise",
"if",
"connection_record",
":",
"connection_record",
".",
"checkin",
"(",
")"
] |
cleanup for a :class: .
|
train
| false
|
10,702
|
def _clean_probes(probes):
probes = _ordered_dict_to_dict(probes)
probes_copy = deepcopy(probes)
for (probe_name, probe_tests) in six.iteritems(probes_copy):
if (not probe_tests):
probes.pop(probe_name)
continue
for (test_name, test_params) in six.iteritems(probe_tests):
if (not test_params):
probes[probe_name].pop(test_name)
if (not probes.get(probe_name)):
probes.pop(probe_name)
return True
|
[
"def",
"_clean_probes",
"(",
"probes",
")",
":",
"probes",
"=",
"_ordered_dict_to_dict",
"(",
"probes",
")",
"probes_copy",
"=",
"deepcopy",
"(",
"probes",
")",
"for",
"(",
"probe_name",
",",
"probe_tests",
")",
"in",
"six",
".",
"iteritems",
"(",
"probes_copy",
")",
":",
"if",
"(",
"not",
"probe_tests",
")",
":",
"probes",
".",
"pop",
"(",
"probe_name",
")",
"continue",
"for",
"(",
"test_name",
",",
"test_params",
")",
"in",
"six",
".",
"iteritems",
"(",
"probe_tests",
")",
":",
"if",
"(",
"not",
"test_params",
")",
":",
"probes",
"[",
"probe_name",
"]",
".",
"pop",
"(",
"test_name",
")",
"if",
"(",
"not",
"probes",
".",
"get",
"(",
"probe_name",
")",
")",
":",
"probes",
".",
"pop",
"(",
"probe_name",
")",
"return",
"True"
] |
will remove empty and useless values from the probes dictionary .
|
train
| true
|
10,703
|
def clear_known_categories(x, cols=None, index=True):
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = (x.dtypes == 'category')
if (cols is None):
cols = mask[mask].index
elif (not mask.loc[cols].all()):
raise ValueError('Not all columns are categoricals')
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if (index and isinstance(x.index, pd.CategoricalIndex)):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
|
[
"def",
"clear_known_categories",
"(",
"x",
",",
"cols",
"=",
"None",
",",
"index",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"(",
"pd",
".",
"Series",
",",
"pd",
".",
"DataFrame",
")",
")",
":",
"x",
"=",
"x",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"pd",
".",
"DataFrame",
")",
":",
"mask",
"=",
"(",
"x",
".",
"dtypes",
"==",
"'category'",
")",
"if",
"(",
"cols",
"is",
"None",
")",
":",
"cols",
"=",
"mask",
"[",
"mask",
"]",
".",
"index",
"elif",
"(",
"not",
"mask",
".",
"loc",
"[",
"cols",
"]",
".",
"all",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Not all columns are categoricals'",
")",
"for",
"c",
"in",
"cols",
":",
"x",
"[",
"c",
"]",
".",
"cat",
".",
"set_categories",
"(",
"[",
"UNKNOWN_CATEGORIES",
"]",
",",
"inplace",
"=",
"True",
")",
"elif",
"isinstance",
"(",
"x",
",",
"pd",
".",
"Series",
")",
":",
"if",
"is_categorical_dtype",
"(",
"x",
".",
"dtype",
")",
":",
"x",
".",
"cat",
".",
"set_categories",
"(",
"[",
"UNKNOWN_CATEGORIES",
"]",
",",
"inplace",
"=",
"True",
")",
"if",
"(",
"index",
"and",
"isinstance",
"(",
"x",
".",
"index",
",",
"pd",
".",
"CategoricalIndex",
")",
")",
":",
"x",
".",
"index",
"=",
"x",
".",
"index",
".",
"set_categories",
"(",
"[",
"UNKNOWN_CATEGORIES",
"]",
")",
"elif",
"isinstance",
"(",
"x",
",",
"pd",
".",
"CategoricalIndex",
")",
":",
"x",
"=",
"x",
".",
"set_categories",
"(",
"[",
"UNKNOWN_CATEGORIES",
"]",
")",
"return",
"x"
] |
set categories to be unknown .
|
train
| false
|
10,704
|
def getSPClass(spatialImp):
if (spatialImp == 'py'):
return PYSpatialPooler
elif (spatialImp == 'cpp'):
return CPPSpatialPooler
else:
raise RuntimeError(("Invalid spatialImp '%s'. Legal values are: 'py', 'cpp'" % spatialImp))
|
[
"def",
"getSPClass",
"(",
"spatialImp",
")",
":",
"if",
"(",
"spatialImp",
"==",
"'py'",
")",
":",
"return",
"PYSpatialPooler",
"elif",
"(",
"spatialImp",
"==",
"'cpp'",
")",
":",
"return",
"CPPSpatialPooler",
"else",
":",
"raise",
"RuntimeError",
"(",
"(",
"\"Invalid spatialImp '%s'. Legal values are: 'py', 'cpp'\"",
"%",
"spatialImp",
")",
")"
] |
return the class corresponding to the given spatialimp string .
|
train
| false
|
10,705
|
def escapeRegExpString(string):
toEscapeChars = ['\\', '(', ')', '.', '|', '^', '$', '*', '+', '?', '[', ']']
escapedValue = ''
for i in range(len(string)):
if (string[i] in toEscapeChars):
escapedValue += ('\\' + string[i])
else:
escapedValue += string[i]
return escapedValue
|
[
"def",
"escapeRegExpString",
"(",
"string",
")",
":",
"toEscapeChars",
"=",
"[",
"'\\\\'",
",",
"'('",
",",
"')'",
",",
"'.'",
",",
"'|'",
",",
"'^'",
",",
"'$'",
",",
"'*'",
",",
"'+'",
",",
"'?'",
",",
"'['",
",",
"']'",
"]",
"escapedValue",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"if",
"(",
"string",
"[",
"i",
"]",
"in",
"toEscapeChars",
")",
":",
"escapedValue",
"+=",
"(",
"'\\\\'",
"+",
"string",
"[",
"i",
"]",
")",
"else",
":",
"escapedValue",
"+=",
"string",
"[",
"i",
"]",
"return",
"escapedValue"
] |
escape the given string to include it as a regular expression .
|
train
| false
|
10,706
|
def get_impression_pixel_url(codename):
mac = (codename + hashlib.sha1((codename + g.tracking_secret)).hexdigest())
v_param = ('?v=%s&' % _get_encrypted_user_slug())
hash_and_id_params = urllib.urlencode({'hash': mac, 'id': codename})
return ((g.adframetracker_url + v_param) + hash_and_id_params)
|
[
"def",
"get_impression_pixel_url",
"(",
"codename",
")",
":",
"mac",
"=",
"(",
"codename",
"+",
"hashlib",
".",
"sha1",
"(",
"(",
"codename",
"+",
"g",
".",
"tracking_secret",
")",
")",
".",
"hexdigest",
"(",
")",
")",
"v_param",
"=",
"(",
"'?v=%s&'",
"%",
"_get_encrypted_user_slug",
"(",
")",
")",
"hash_and_id_params",
"=",
"urllib",
".",
"urlencode",
"(",
"{",
"'hash'",
":",
"mac",
",",
"'id'",
":",
"codename",
"}",
")",
"return",
"(",
"(",
"g",
".",
"adframetracker_url",
"+",
"v_param",
")",
"+",
"hash_and_id_params",
")"
] |
return a url to use for tracking impressions of the given advert .
|
train
| false
|
10,708
|
def getComplexDefaultByDictionary(defaultComplex, dictionary, key):
if (key in dictionary):
return complex(dictionary[key].strip().replace('(', '').replace(')', ''))
return defaultComplex
|
[
"def",
"getComplexDefaultByDictionary",
"(",
"defaultComplex",
",",
"dictionary",
",",
"key",
")",
":",
"if",
"(",
"key",
"in",
"dictionary",
")",
":",
"return",
"complex",
"(",
"dictionary",
"[",
"key",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'('",
",",
"''",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
")",
"return",
"defaultComplex"
] |
get the value as a complex .
|
train
| false
|
10,709
|
def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):
tab = idl_.tables[table]
for r in tab.rows.values():
if (getattr(r, column) == match):
return r
if (default is not _NO_DEFAULT):
return default
raise RowNotFound(table=table, col=column, match=match)
|
[
"def",
"row_by_value",
"(",
"idl_",
",",
"table",
",",
"column",
",",
"match",
",",
"default",
"=",
"_NO_DEFAULT",
")",
":",
"tab",
"=",
"idl_",
".",
"tables",
"[",
"table",
"]",
"for",
"r",
"in",
"tab",
".",
"rows",
".",
"values",
"(",
")",
":",
"if",
"(",
"getattr",
"(",
"r",
",",
"column",
")",
"==",
"match",
")",
":",
"return",
"r",
"if",
"(",
"default",
"is",
"not",
"_NO_DEFAULT",
")",
":",
"return",
"default",
"raise",
"RowNotFound",
"(",
"table",
"=",
"table",
",",
"col",
"=",
"column",
",",
"match",
"=",
"match",
")"
] |
lookup an idl row in a table by column/value .
|
train
| false
|
10,711
|
def display_timestamps_pair(time_m_2):
if (len(time_m_2) == 0):
return '(empty)'
time_m_2 = np.array(time_m_2)
return '({}, {})'.format(display_timestamps(time_m_2[:, 0]), display_timestamps(time_m_2[:, 1]))
|
[
"def",
"display_timestamps_pair",
"(",
"time_m_2",
")",
":",
"if",
"(",
"len",
"(",
"time_m_2",
")",
"==",
"0",
")",
":",
"return",
"'(empty)'",
"time_m_2",
"=",
"np",
".",
"array",
"(",
"time_m_2",
")",
"return",
"'({}, {})'",
".",
"format",
"(",
"display_timestamps",
"(",
"time_m_2",
"[",
":",
",",
"0",
"]",
")",
",",
"display_timestamps",
"(",
"time_m_2",
"[",
":",
",",
"1",
"]",
")",
")"
] |
takes a list of the following form: [ .
|
train
| true
|
10,712
|
def get_fun(fun):
serv = _get_serv(ret=None)
ret = {}
for minion in serv.smembers('minions'):
ind_str = '{0}:{1}'.format(minion, fun)
try:
jid = serv.get(ind_str)
except Exception:
continue
if (not jid):
continue
data = serv.get('{0}:{1}'.format(minion, jid))
if data:
ret[minion] = json.loads(data)
return ret
|
[
"def",
"get_fun",
"(",
"fun",
")",
":",
"serv",
"=",
"_get_serv",
"(",
"ret",
"=",
"None",
")",
"ret",
"=",
"{",
"}",
"for",
"minion",
"in",
"serv",
".",
"smembers",
"(",
"'minions'",
")",
":",
"ind_str",
"=",
"'{0}:{1}'",
".",
"format",
"(",
"minion",
",",
"fun",
")",
"try",
":",
"jid",
"=",
"serv",
".",
"get",
"(",
"ind_str",
")",
"except",
"Exception",
":",
"continue",
"if",
"(",
"not",
"jid",
")",
":",
"continue",
"data",
"=",
"serv",
".",
"get",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"minion",
",",
"jid",
")",
")",
"if",
"data",
":",
"ret",
"[",
"minion",
"]",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"return",
"ret"
] |
return the most recent jobs that have executed the named function .
|
train
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.