id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
46,255
def nics_skip(name, nics, ipv6): return nics_skipped(name, nics=nics, ipv6=ipv6)
[ "def", "nics_skip", "(", "name", ",", "nics", ",", "ipv6", ")", ":", "return", "nics_skipped", "(", "name", ",", "nics", "=", "nics", ",", "ipv6", "=", "ipv6", ")" ]
alias for :mod:csf .
train
true
46,256
@require_POST def toggle_skill_subscription(request, url): skill = get_object_or_404(Skill, url=url) profile = request.user.userprofile if profile.skills.filter(id=skill.id).exists(): profile.skills.remove(skill) else: profile.skills.add(skill) return redirect(reverse('groups:show_skill', args=[skill.url]))
[ "@", "require_POST", "def", "toggle_skill_subscription", "(", "request", ",", "url", ")", ":", "skill", "=", "get_object_or_404", "(", "Skill", ",", "url", "=", "url", ")", "profile", "=", "request", ".", "user", ".", "userprofile", "if", "profile", ".", "skills", ".", "filter", "(", "id", "=", "skill", ".", "id", ")", ".", "exists", "(", ")", ":", "profile", ".", "skills", ".", "remove", "(", "skill", ")", "else", ":", "profile", ".", "skills", ".", "add", "(", "skill", ")", "return", "redirect", "(", "reverse", "(", "'groups:show_skill'", ",", "args", "=", "[", "skill", ".", "url", "]", ")", ")" ]
toggle the current users membership of a group .
train
false
46,259
def decode_text(s): if s.startswith('\xfe\xff'): return unicode(s[2:], 'utf-16be', 'ignore') else: return ''.join((PDFDocEncoding[ord(c)] for c in s))
[ "def", "decode_text", "(", "s", ")", ":", "if", "s", ".", "startswith", "(", "'\\xfe\\xff'", ")", ":", "return", "unicode", "(", "s", "[", "2", ":", "]", ",", "'utf-16be'", ",", "'ignore'", ")", "else", ":", "return", "''", ".", "join", "(", "(", "PDFDocEncoding", "[", "ord", "(", "c", ")", "]", "for", "c", "in", "s", ")", ")" ]
decodes a pdfdocencoding string to unicode .
train
true
46,260
def attach_basic_attachments(queryset, as_field='attachments_attr'): model = queryset.model type = apps.get_model('contenttypes', 'ContentType').objects.get_for_model(model) sql = 'SELECT json_agg(row_to_json(t))\n FROM(\n SELECT\n attachments_attachment.id,\n attachments_attachment.attached_file\n FROM attachments_attachment\n WHERE attachments_attachment.object_id = {tbl}.id\n AND attachments_attachment.content_type_id = {type_id}\n AND attachments_attachment.is_deprecated = False\n ORDER BY attachments_attachment.order, attachments_attachment.created_date, attachments_attachment.id) t' sql = sql.format(tbl=model._meta.db_table, type_id=type.id) queryset = queryset.extra(select={as_field: sql}) return queryset
[ "def", "attach_basic_attachments", "(", "queryset", ",", "as_field", "=", "'attachments_attr'", ")", ":", "model", "=", "queryset", ".", "model", "type", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", ".", "objects", ".", "get_for_model", "(", "model", ")", "sql", "=", "'SELECT json_agg(row_to_json(t))\\n FROM(\\n SELECT\\n attachments_attachment.id,\\n attachments_attachment.attached_file\\n FROM attachments_attachment\\n WHERE attachments_attachment.object_id = {tbl}.id\\n AND attachments_attachment.content_type_id = {type_id}\\n AND attachments_attachment.is_deprecated = False\\n ORDER BY attachments_attachment.order, attachments_attachment.created_date, attachments_attachment.id) t'", "sql", "=", "sql", ".", "format", "(", "tbl", "=", "model", ".", "_meta", ".", "db_table", ",", "type_id", "=", "type", ".", "id", ")", "queryset", "=", "queryset", ".", "extra", "(", "select", "=", "{", "as_field", ":", "sql", "}", ")", "return", "queryset" ]
attach basic attachments info as json column to each object of the queryset .
train
false
46,261
def _dup_coil_set(coils, coord_frame, t): if ((t is not None) and (coord_frame != t['from'])): raise RuntimeError('transformation frame does not match the coil set') coils = deepcopy(coils) if (t is not None): coord_frame = t['to'] for coil in coils: for key in ('ex', 'ey', 'ez'): if (key in coil): coil[key] = apply_trans(t['trans'], coil[key], False) coil['r0'] = apply_trans(t['trans'], coil['r0']) coil['rmag'] = apply_trans(t['trans'], coil['rmag']) coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False) coil['coord_frame'] = t['to'] return (coils, coord_frame)
[ "def", "_dup_coil_set", "(", "coils", ",", "coord_frame", ",", "t", ")", ":", "if", "(", "(", "t", "is", "not", "None", ")", "and", "(", "coord_frame", "!=", "t", "[", "'from'", "]", ")", ")", ":", "raise", "RuntimeError", "(", "'transformation frame does not match the coil set'", ")", "coils", "=", "deepcopy", "(", "coils", ")", "if", "(", "t", "is", "not", "None", ")", ":", "coord_frame", "=", "t", "[", "'to'", "]", "for", "coil", "in", "coils", ":", "for", "key", "in", "(", "'ex'", ",", "'ey'", ",", "'ez'", ")", ":", "if", "(", "key", "in", "coil", ")", ":", "coil", "[", "key", "]", "=", "apply_trans", "(", "t", "[", "'trans'", "]", ",", "coil", "[", "key", "]", ",", "False", ")", "coil", "[", "'r0'", "]", "=", "apply_trans", "(", "t", "[", "'trans'", "]", ",", "coil", "[", "'r0'", "]", ")", "coil", "[", "'rmag'", "]", "=", "apply_trans", "(", "t", "[", "'trans'", "]", ",", "coil", "[", "'rmag'", "]", ")", "coil", "[", "'cosmag'", "]", "=", "apply_trans", "(", "t", "[", "'trans'", "]", ",", "coil", "[", "'cosmag'", "]", ",", "False", ")", "coil", "[", "'coord_frame'", "]", "=", "t", "[", "'to'", "]", "return", "(", "coils", ",", "coord_frame", ")" ]
make a duplicate .
train
false
46,262
def fake_cluster_ovo(context, **updates): return objects.Cluster._from_db_object(context, objects.Cluster(), fake_cluster_orm(**updates))
[ "def", "fake_cluster_ovo", "(", "context", ",", "**", "updates", ")", ":", "return", "objects", ".", "Cluster", ".", "_from_db_object", "(", "context", ",", "objects", ".", "Cluster", "(", ")", ",", "fake_cluster_orm", "(", "**", "updates", ")", ")" ]
create a fake cluster versioned object .
train
false
46,263
def _get_rrd_updates(server, start_time): try: xml = urllib.urlopen(('%s://%s:%s@%s/rrd_updates?start=%s' % (server[0], CONF.xenapi_connection_username, CONF.xenapi_connection_password, server[1], start_time))) return xml.read() except IOError: LOG.exception((_('Unable to obtain RRD XML updates with server details: %(server)s.') % locals())) return None
[ "def", "_get_rrd_updates", "(", "server", ",", "start_time", ")", ":", "try", ":", "xml", "=", "urllib", ".", "urlopen", "(", "(", "'%s://%s:%s@%s/rrd_updates?start=%s'", "%", "(", "server", "[", "0", "]", ",", "CONF", ".", "xenapi_connection_username", ",", "CONF", ".", "xenapi_connection_password", ",", "server", "[", "1", "]", ",", "start_time", ")", ")", ")", "return", "xml", ".", "read", "(", ")", "except", "IOError", ":", "LOG", ".", "exception", "(", "(", "_", "(", "'Unable to obtain RRD XML updates with server details: %(server)s.'", ")", "%", "locals", "(", ")", ")", ")", "return", "None" ]
return the rrd updates xml as a string .
train
false
46,264
def yaml_from_dict(dictionary, width=80): return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
[ "def", "yaml_from_dict", "(", "dictionary", ",", "width", "=", "80", ")", ":", "return", "yaml", ".", "safe_dump", "(", "dictionary", ",", "default_flow_style", "=", "False", ",", "width", "=", "width", ")" ]
gets the yaml representation of a dict .
train
false
46,266
def checkRun(cmd): return check_call(cmd.split(' '))
[ "def", "checkRun", "(", "cmd", ")", ":", "return", "check_call", "(", "cmd", ".", "split", "(", "' '", ")", ")" ]
simple interface to subprocess .
train
false
46,267
def get_appversions(app, min_version, max_version): qs = AppVersion.objects.filter(application=app.id) min_appver = qs.get(version=min_version) max_appver = qs.get(version=max_version) return (min_appver, max_appver)
[ "def", "get_appversions", "(", "app", ",", "min_version", ",", "max_version", ")", ":", "qs", "=", "AppVersion", ".", "objects", ".", "filter", "(", "application", "=", "app", ".", "id", ")", "min_appver", "=", "qs", ".", "get", "(", "version", "=", "min_version", ")", "max_appver", "=", "qs", ".", "get", "(", "version", "=", "max_version", ")", "return", "(", "min_appver", ",", "max_appver", ")" ]
return the appversions that correspond to the given versions .
train
false
46,268
def check_python_version(): if (sys.hexversion < 50593792): version_str = '.'.join(map(str, sys.version_info[:3])) text = (('At least Python 3.4 is required to run qutebrowser, but ' + version_str) + ' is installed!\n') if (Tk and ('--no-err-windows' not in sys.argv)): root = Tk() root.withdraw() messagebox.showerror('qutebrowser: Fatal error!', text) else: sys.stderr.write(text) sys.stderr.flush() sys.exit(1)
[ "def", "check_python_version", "(", ")", ":", "if", "(", "sys", ".", "hexversion", "<", "50593792", ")", ":", "version_str", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "sys", ".", "version_info", "[", ":", "3", "]", ")", ")", "text", "=", "(", "(", "'At least Python 3.4 is required to run qutebrowser, but '", "+", "version_str", ")", "+", "' is installed!\\n'", ")", "if", "(", "Tk", "and", "(", "'--no-err-windows'", "not", "in", "sys", ".", "argv", ")", ")", ":", "root", "=", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "messagebox", ".", "showerror", "(", "'qutebrowser: Fatal error!'", ",", "text", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "text", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "sys", ".", "exit", "(", "1", ")" ]
checks if python version is supported by cuckoo .
train
false
46,269
def is_xen_hyper(): try: if (__grains__['virtual_subtype'] != 'Xen Dom0'): return False except KeyError: return False try: with salt.utils.fopen('/proc/modules') as fp_: if ('xen_' not in fp_.read()): return False except (OSError, IOError): return False return ('libvirtd' in __salt__['cmd.run'](__grains__['ps']))
[ "def", "is_xen_hyper", "(", ")", ":", "try", ":", "if", "(", "__grains__", "[", "'virtual_subtype'", "]", "!=", "'Xen Dom0'", ")", ":", "return", "False", "except", "KeyError", ":", "return", "False", "try", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "'/proc/modules'", ")", "as", "fp_", ":", "if", "(", "'xen_'", "not", "in", "fp_", ".", "read", "(", ")", ")", ":", "return", "False", "except", "(", "OSError", ",", "IOError", ")", ":", "return", "False", "return", "(", "'libvirtd'", "in", "__salt__", "[", "'cmd.run'", "]", "(", "__grains__", "[", "'ps'", "]", ")", ")" ]
returns a bool whether or not this node is a xen hypervisor cli example: .
train
false
46,270
def change_disk_mode(si, vm_obj, disk_number, mode, disk_prefix_label='Hard disk '): disk_label = (disk_prefix_label + str(disk_number)) virtual_disk_device = None for dev in vm_obj.config.hardware.device: if (isinstance(dev, vim.vm.device.VirtualDisk) and (dev.deviceInfo.label == disk_label)): virtual_disk_device = dev if (not virtual_disk_device): raise RuntimeError('Virtual {} could not be found.'.format(disk_label)) virtual_disk_spec = vim.vm.device.VirtualDeviceSpec() virtual_disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit virtual_disk_spec.device = virtual_disk_device virtual_disk_spec.device.backing.diskMode = mode dev_changes = [] dev_changes.append(virtual_disk_spec) spec = vim.vm.ConfigSpec() spec.deviceChange = dev_changes task = vm_obj.ReconfigVM_Task(spec=spec) tasks.wait_for_tasks(si, [task]) return True
[ "def", "change_disk_mode", "(", "si", ",", "vm_obj", ",", "disk_number", ",", "mode", ",", "disk_prefix_label", "=", "'Hard disk '", ")", ":", "disk_label", "=", "(", "disk_prefix_label", "+", "str", "(", "disk_number", ")", ")", "virtual_disk_device", "=", "None", "for", "dev", "in", "vm_obj", ".", "config", ".", "hardware", ".", "device", ":", "if", "(", "isinstance", "(", "dev", ",", "vim", ".", "vm", ".", "device", ".", "VirtualDisk", ")", "and", "(", "dev", ".", "deviceInfo", ".", "label", "==", "disk_label", ")", ")", ":", "virtual_disk_device", "=", "dev", "if", "(", "not", "virtual_disk_device", ")", ":", "raise", "RuntimeError", "(", "'Virtual {} could not be found.'", ".", "format", "(", "disk_label", ")", ")", "virtual_disk_spec", "=", "vim", ".", "vm", ".", "device", ".", "VirtualDeviceSpec", "(", ")", "virtual_disk_spec", ".", "operation", "=", "vim", ".", "vm", ".", "device", ".", "VirtualDeviceSpec", ".", "Operation", ".", "edit", "virtual_disk_spec", ".", "device", "=", "virtual_disk_device", "virtual_disk_spec", ".", "device", ".", "backing", ".", "diskMode", "=", "mode", "dev_changes", "=", "[", "]", "dev_changes", ".", "append", "(", "virtual_disk_spec", ")", "spec", "=", "vim", ".", "vm", ".", "ConfigSpec", "(", ")", "spec", ".", "deviceChange", "=", "dev_changes", "task", "=", "vm_obj", ".", "ReconfigVM_Task", "(", "spec", "=", "spec", ")", "tasks", ".", "wait_for_tasks", "(", "si", ",", "[", "task", "]", ")", "return", "True" ]
change the disk mode on a virtual hard disk .
train
false
46,271
def getDHGeneratorAndPrime(kexAlgorithm): kex = getKex(kexAlgorithm) return (kex.generator, kex.prime)
[ "def", "getDHGeneratorAndPrime", "(", "kexAlgorithm", ")", ":", "kex", "=", "getKex", "(", "kexAlgorithm", ")", "return", "(", "kex", ".", "generator", ",", "kex", ".", "prime", ")" ]
get the generator and the prime to use in key exchange .
train
false
46,273
def format_yahoo_index_url(symbol, start_date, end_date): return ('http://ichart.finance.yahoo.com/table.csv?' + urlencode({'s': symbol, 'a': (start_date.month - 1), 'b': start_date.day, 'c': start_date.year, 'd': (end_date.month - 1), 'e': end_date.day, 'f': end_date.year, 'g': 'd'}))
[ "def", "format_yahoo_index_url", "(", "symbol", ",", "start_date", ",", "end_date", ")", ":", "return", "(", "'http://ichart.finance.yahoo.com/table.csv?'", "+", "urlencode", "(", "{", "'s'", ":", "symbol", ",", "'a'", ":", "(", "start_date", ".", "month", "-", "1", ")", ",", "'b'", ":", "start_date", ".", "day", ",", "'c'", ":", "start_date", ".", "year", ",", "'d'", ":", "(", "end_date", ".", "month", "-", "1", ")", ",", "'e'", ":", "end_date", ".", "day", ",", "'f'", ":", "end_date", ".", "year", ",", "'g'", ":", "'d'", "}", ")", ")" ]
format a url for querying yahoo finance for index data .
train
false
46,274
def _multi_blockify(tuples, dtype=None): grouper = itertools.groupby(tuples, (lambda x: x[2].dtype)) new_blocks = [] for (dtype, tup_block) in grouper: (values, placement) = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks
[ "def", "_multi_blockify", "(", "tuples", ",", "dtype", "=", "None", ")", ":", "grouper", "=", "itertools", ".", "groupby", "(", "tuples", ",", "(", "lambda", "x", ":", "x", "[", "2", "]", ".", "dtype", ")", ")", "new_blocks", "=", "[", "]", "for", "(", "dtype", ",", "tup_block", ")", "in", "grouper", ":", "(", "values", ",", "placement", ")", "=", "_stack_arrays", "(", "list", "(", "tup_block", ")", ",", "dtype", ")", "block", "=", "make_block", "(", "values", ",", "placement", "=", "placement", ")", "new_blocks", ".", "append", "(", "block", ")", "return", "new_blocks" ]
return an array of blocks that potentially have different dtypes .
train
true
46,275
def _colors(n): for i in xrange(n): (yield colorsys.hsv_to_rgb((float(i) / n), 1.0, 1.0))
[ "def", "_colors", "(", "n", ")", ":", "for", "i", "in", "xrange", "(", "n", ")", ":", "(", "yield", "colorsys", ".", "hsv_to_rgb", "(", "(", "float", "(", "i", ")", "/", "n", ")", ",", "1.0", ",", "1.0", ")", ")" ]
generator function for creating n colors .
train
false
46,276
def _get_fake_course_info(course_id, include_expired=False): for course in _COURSES: if (course_id == course['course_id']): if ((course_id in _VERIFIED_MODE_EXPIRED) and (not include_expired)): course['course_modes'] = [mode for mode in course['course_modes'] if (mode['slug'] != 'verified')] return course
[ "def", "_get_fake_course_info", "(", "course_id", ",", "include_expired", "=", "False", ")", ":", "for", "course", "in", "_COURSES", ":", "if", "(", "course_id", "==", "course", "[", "'course_id'", "]", ")", ":", "if", "(", "(", "course_id", "in", "_VERIFIED_MODE_EXPIRED", ")", "and", "(", "not", "include_expired", ")", ")", ":", "course", "[", "'course_modes'", "]", "=", "[", "mode", "for", "mode", "in", "course", "[", "'course_modes'", "]", "if", "(", "mode", "[", "'slug'", "]", "!=", "'verified'", ")", "]", "return", "course" ]
get a course from the courses array .
train
false
46,278
def split_input(val): if isinstance(val, list): return val try: return [x.strip() for x in val.split(',')] except AttributeError: return [x.strip() for x in str(val).split(',')]
[ "def", "split_input", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "list", ")", ":", "return", "val", "try", ":", "return", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "val", ".", "split", "(", "','", ")", "]", "except", "AttributeError", ":", "return", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "str", "(", "val", ")", ".", "split", "(", "','", ")", "]" ]
take an input value and split it into a list .
train
false
46,279
def rle_repr(l): def add_rle(previous, runlen, result): if isinstance(previous, (list, tuple)): previous = rle_repr(previous) if (runlen > 1): result.append(('[%s]*%i' % (previous, runlen))) elif (result and ('*' not in result[(-1)])): result[(-1)] = ('[%s, %s]' % (result[(-1)][1:(-1)], previous)) else: result.append(('[%s]' % previous)) iterable = iter(l) runlen = 1 result = [] try: previous = iterable.next() except StopIteration: return '[]' for element in iterable: if (element == previous): runlen = (runlen + 1) continue else: add_rle(previous, runlen, result) previous = element runlen = 1 add_rle(previous, runlen, result) return ' + '.join(result)
[ "def", "rle_repr", "(", "l", ")", ":", "def", "add_rle", "(", "previous", ",", "runlen", ",", "result", ")", ":", "if", "isinstance", "(", "previous", ",", "(", "list", ",", "tuple", ")", ")", ":", "previous", "=", "rle_repr", "(", "previous", ")", "if", "(", "runlen", ">", "1", ")", ":", "result", ".", "append", "(", "(", "'[%s]*%i'", "%", "(", "previous", ",", "runlen", ")", ")", ")", "elif", "(", "result", "and", "(", "'*'", "not", "in", "result", "[", "(", "-", "1", ")", "]", ")", ")", ":", "result", "[", "(", "-", "1", ")", "]", "=", "(", "'[%s, %s]'", "%", "(", "result", "[", "(", "-", "1", ")", "]", "[", "1", ":", "(", "-", "1", ")", "]", ",", "previous", ")", ")", "else", ":", "result", ".", "append", "(", "(", "'[%s]'", "%", "previous", ")", ")", "iterable", "=", "iter", "(", "l", ")", "runlen", "=", "1", "result", "=", "[", "]", "try", ":", "previous", "=", "iterable", ".", "next", "(", ")", "except", "StopIteration", ":", "return", "'[]'", "for", "element", "in", "iterable", ":", "if", "(", "element", "==", "previous", ")", ":", "runlen", "=", "(", "runlen", "+", "1", ")", "continue", "else", ":", "add_rle", "(", "previous", ",", "runlen", ",", "result", ")", "previous", "=", "element", "runlen", "=", "1", "add_rle", "(", "previous", ",", "runlen", ",", "result", ")", "return", "' + '", ".", "join", "(", "result", ")" ]
run-length encode a list into an "eval"-able form example: .
train
false
46,281
def _next_legen_der(n, x, p0, p01, p0d, p0dd): help_ = p0 helpd = p0d p0 = ((((((2 * n) - 1) * x) * help_) - ((n - 1) * p01)) / n) p0d = ((n * help_) + (x * helpd)) p0dd = (((n + 1) * helpd) + (x * p0dd)) p01 = help_ return (p0, p0d, p0dd)
[ "def", "_next_legen_der", "(", "n", ",", "x", ",", "p0", ",", "p01", ",", "p0d", ",", "p0dd", ")", ":", "help_", "=", "p0", "helpd", "=", "p0d", "p0", "=", "(", "(", "(", "(", "(", "(", "2", "*", "n", ")", "-", "1", ")", "*", "x", ")", "*", "help_", ")", "-", "(", "(", "n", "-", "1", ")", "*", "p01", ")", ")", "/", "n", ")", "p0d", "=", "(", "(", "n", "*", "help_", ")", "+", "(", "x", "*", "helpd", ")", ")", "p0dd", "=", "(", "(", "(", "n", "+", "1", ")", "*", "helpd", ")", "+", "(", "x", "*", "p0dd", ")", ")", "p01", "=", "help_", "return", "(", "p0", ",", "p0d", ",", "p0dd", ")" ]
compute the next legendre polynomial and its derivatives .
train
false
46,283
def null_error_tracker(msg): pass
[ "def", "null_error_tracker", "(", "msg", ")", ":", "pass" ]
a dummy error tracker that just ignores the messages .
train
false
46,284
def decode_fs_path(path): return force_unicode(path, HDFS_ENCODING, errors='strict')
[ "def", "decode_fs_path", "(", "path", ")", ":", "return", "force_unicode", "(", "path", ",", "HDFS_ENCODING", ",", "errors", "=", "'strict'", ")" ]
decode_fs_path -> unicode path .
train
false
46,285
def split_user_input(line, pattern=None): encoding = get_stream_enc(sys.stdin, 'utf-8') line = py3compat.cast_unicode(line, encoding) if (pattern is None): pattern = line_split match = pattern.match(line) if (not match): try: (ifun, the_rest) = line.split(None, 1) except ValueError: (ifun, the_rest) = (line, u'') pre = re.match('^(\\s*)(.*)', line).groups()[0] esc = '' else: (pre, esc, ifun, the_rest) = match.groups() return (pre, (esc or ''), ifun.strip(), the_rest.lstrip())
[ "def", "split_user_input", "(", "line", ",", "pattern", "=", "None", ")", ":", "encoding", "=", "get_stream_enc", "(", "sys", ".", "stdin", ",", "'utf-8'", ")", "line", "=", "py3compat", ".", "cast_unicode", "(", "line", ",", "encoding", ")", "if", "(", "pattern", "is", "None", ")", ":", "pattern", "=", "line_split", "match", "=", "pattern", ".", "match", "(", "line", ")", "if", "(", "not", "match", ")", ":", "try", ":", "(", "ifun", ",", "the_rest", ")", "=", "line", ".", "split", "(", "None", ",", "1", ")", "except", "ValueError", ":", "(", "ifun", ",", "the_rest", ")", "=", "(", "line", ",", "u''", ")", "pre", "=", "re", ".", "match", "(", "'^(\\\\s*)(.*)'", ",", "line", ")", ".", "groups", "(", ")", "[", "0", "]", "esc", "=", "''", "else", ":", "(", "pre", ",", "esc", ",", "ifun", ",", "the_rest", ")", "=", "match", ".", "groups", "(", ")", "return", "(", "pre", ",", "(", "esc", "or", "''", ")", ",", "ifun", ".", "strip", "(", ")", ",", "the_rest", ".", "lstrip", "(", ")", ")" ]
split user input into initial whitespace .
train
true
46,286
def safe_octal(octal_value): try: return oct(octal_value) except TypeError: return str(octal_value)
[ "def", "safe_octal", "(", "octal_value", ")", ":", "try", ":", "return", "oct", "(", "octal_value", ")", "except", "TypeError", ":", "return", "str", "(", "octal_value", ")" ]
safe_octal -> octal value in string this correctly handles octal values specified as a string or as a numeric .
train
false
46,287
@docfiller def loadmat(file_name, mdict=None, appendmat=True, **kwargs): variable_names = kwargs.pop('variable_names', None) MR = mat_reader_factory(file_name, appendmat, **kwargs) matfile_dict = MR.get_variables(variable_names) if (mdict is not None): mdict.update(matfile_dict) else: mdict = matfile_dict if isinstance(file_name, string_types): MR.mat_stream.close() return mdict
[ "@", "docfiller", "def", "loadmat", "(", "file_name", ",", "mdict", "=", "None", ",", "appendmat", "=", "True", ",", "**", "kwargs", ")", ":", "variable_names", "=", "kwargs", ".", "pop", "(", "'variable_names'", ",", "None", ")", "MR", "=", "mat_reader_factory", "(", "file_name", ",", "appendmat", ",", "**", "kwargs", ")", "matfile_dict", "=", "MR", ".", "get_variables", "(", "variable_names", ")", "if", "(", "mdict", "is", "not", "None", ")", ":", "mdict", ".", "update", "(", "matfile_dict", ")", "else", ":", "mdict", "=", "matfile_dict", "if", "isinstance", "(", "file_name", ",", "string_types", ")", ":", "MR", ".", "mat_stream", ".", "close", "(", ")", "return", "mdict" ]
load matlab file .
train
false
46,288
def loadPreferences(filename): global settingsList profileParser = ConfigParser.ConfigParser() try: profileParser.read(filename) except ConfigParser.ParsingError: return for set in settingsList: if set.isPreference(): if profileParser.has_option('preference', set.getName()): set.setValue(unicode(profileParser.get('preference', set.getName()), 'utf-8', 'replace')) n = 0 while profileParser.has_section(('machine_%d' % n)): for set in settingsList: if set.isMachineSetting(): if profileParser.has_option(('machine_%d' % n), set.getName()): set.setValue(unicode(profileParser.get(('machine_%d' % n), set.getName()), 'utf-8', 'replace'), n) n += 1 setActiveMachine(int(getPreferenceFloat('active_machine')))
[ "def", "loadPreferences", "(", "filename", ")", ":", "global", "settingsList", "profileParser", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "try", ":", "profileParser", ".", "read", "(", "filename", ")", "except", "ConfigParser", ".", "ParsingError", ":", "return", "for", "set", "in", "settingsList", ":", "if", "set", ".", "isPreference", "(", ")", ":", "if", "profileParser", ".", "has_option", "(", "'preference'", ",", "set", ".", "getName", "(", ")", ")", ":", "set", ".", "setValue", "(", "unicode", "(", "profileParser", ".", "get", "(", "'preference'", ",", "set", ".", "getName", "(", ")", ")", ",", "'utf-8'", ",", "'replace'", ")", ")", "n", "=", "0", "while", "profileParser", ".", "has_section", "(", "(", "'machine_%d'", "%", "n", ")", ")", ":", "for", "set", "in", "settingsList", ":", "if", "set", ".", "isMachineSetting", "(", ")", ":", "if", "profileParser", ".", "has_option", "(", "(", "'machine_%d'", "%", "n", ")", ",", "set", ".", "getName", "(", ")", ")", ":", "set", ".", "setValue", "(", "unicode", "(", "profileParser", ".", "get", "(", "(", "'machine_%d'", "%", "n", ")", ",", "set", ".", "getName", "(", ")", ")", ",", "'utf-8'", ",", "'replace'", ")", ",", "n", ")", "n", "+=", "1", "setActiveMachine", "(", "int", "(", "getPreferenceFloat", "(", "'active_machine'", ")", ")", ")" ]
read a configuration file as global config .
train
false
46,289
def launch_tails(follow_paths, lastlines_dirpath=None): if (lastlines_dirpath and (not os.path.exists(lastlines_dirpath))): os.makedirs(lastlines_dirpath) tail_cmd = ('/usr/bin/tail', '--retry', '--follow=name') procs = {} pipes = {} for path in follow_paths: cmd = list(tail_cmd) if lastlines_dirpath: reverse_lineno = lookup_lastlines(lastlines_dirpath, path) if (reverse_lineno is None): reverse_lineno = 1 cmd.append(('--lines=%d' % reverse_lineno)) cmd.append(path) tail_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) procs[path] = tail_proc pipes[nonblocking(tail_proc.stdout)] = path return (procs, pipes)
[ "def", "launch_tails", "(", "follow_paths", ",", "lastlines_dirpath", "=", "None", ")", ":", "if", "(", "lastlines_dirpath", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "lastlines_dirpath", ")", ")", ")", ":", "os", ".", "makedirs", "(", "lastlines_dirpath", ")", "tail_cmd", "=", "(", "'/usr/bin/tail'", ",", "'--retry'", ",", "'--follow=name'", ")", "procs", "=", "{", "}", "pipes", "=", "{", "}", "for", "path", "in", "follow_paths", ":", "cmd", "=", "list", "(", "tail_cmd", ")", "if", "lastlines_dirpath", ":", "reverse_lineno", "=", "lookup_lastlines", "(", "lastlines_dirpath", ",", "path", ")", "if", "(", "reverse_lineno", "is", "None", ")", ":", "reverse_lineno", "=", "1", "cmd", ".", "append", "(", "(", "'--lines=%d'", "%", "reverse_lineno", ")", ")", "cmd", ".", "append", "(", "path", ")", "tail_proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "procs", "[", "path", "]", "=", "tail_proc", "pipes", "[", "nonblocking", "(", "tail_proc", ".", "stdout", ")", "]", "=", "path", "return", "(", "procs", ",", "pipes", ")" ]
launch a tail process for each follow_path .
train
false
46,290
def string_to_int(s): result = 0 for c in s: if (not isinstance(c, int)): c = ord(c) result = ((256 * result) + c) return result
[ "def", "string_to_int", "(", "s", ")", ":", "result", "=", "0", "for", "c", "in", "s", ":", "if", "(", "not", "isinstance", "(", "c", ",", "int", ")", ")", ":", "c", "=", "ord", "(", "c", ")", "result", "=", "(", "(", "256", "*", "result", ")", "+", "c", ")", "return", "result" ]
convert a string of bytes into an integer .
train
true
46,291
def am_following_user(context, data_dict): return _am_following(context, data_dict, ckan.logic.schema.default_follow_user_schema(), context['model'].UserFollowingUser)
[ "def", "am_following_user", "(", "context", ",", "data_dict", ")", ":", "return", "_am_following", "(", "context", ",", "data_dict", ",", "ckan", ".", "logic", ".", "schema", ".", "default_follow_user_schema", "(", ")", ",", "context", "[", "'model'", "]", ".", "UserFollowingUser", ")" ]
return true if youre following the given user .
train
false
46,292
def queryset_orders_for_user(user): queryset = Order._default_manager.select_related('billing_address', 'billing_address__country', 'shipping_address', 'shipping_address__country', 'user').prefetch_related('lines') if user.is_staff: return queryset else: partners = Partner._default_manager.filter(users=user) return queryset.filter(lines__partner__in=partners).distinct()
[ "def", "queryset_orders_for_user", "(", "user", ")", ":", "queryset", "=", "Order", ".", "_default_manager", ".", "select_related", "(", "'billing_address'", ",", "'billing_address__country'", ",", "'shipping_address'", ",", "'shipping_address__country'", ",", "'user'", ")", ".", "prefetch_related", "(", "'lines'", ")", "if", "user", ".", "is_staff", ":", "return", "queryset", "else", ":", "partners", "=", "Partner", ".", "_default_manager", ".", "filter", "(", "users", "=", "user", ")", "return", "queryset", ".", "filter", "(", "lines__partner__in", "=", "partners", ")", ".", "distinct", "(", ")" ]
returns a queryset of all orders that a user is allowed to access .
train
false
46,293
def get_required_dists(dists, dist): if (dist not in dists): raise DistlibException((u'given distribution %r is not a member of the list' % dist.name)) graph = make_graph(dists) req = [] todo = graph.adjacency_list[dist] while todo: d = todo.pop()[0] req.append(d) for pred in graph.adjacency_list[d]: if (pred not in req): todo.append(pred) return req
[ "def", "get_required_dists", "(", "dists", ",", "dist", ")", ":", "if", "(", "dist", "not", "in", "dists", ")", ":", "raise", "DistlibException", "(", "(", "u'given distribution %r is not a member of the list'", "%", "dist", ".", "name", ")", ")", "graph", "=", "make_graph", "(", "dists", ")", "req", "=", "[", "]", "todo", "=", "graph", ".", "adjacency_list", "[", "dist", "]", "while", "todo", ":", "d", "=", "todo", ".", "pop", "(", ")", "[", "0", "]", "req", ".", "append", "(", "d", ")", "for", "pred", "in", "graph", ".", "adjacency_list", "[", "d", "]", ":", "if", "(", "pred", "not", "in", "req", ")", ":", "todo", ".", "append", "(", "pred", ")", "return", "req" ]
recursively generate a list of distributions from *dists* that are required by *dist* .
train
true
46,294
def test_emacs_arguments(): (result, cli) = _feed_cli_with_input(u'\x1b4x\n') assert (result.text == u'xxxx') (result, cli) = _feed_cli_with_input(u'\x1b44x\n') assert (result.text == (u'x' * 44)) (result, cli) = _feed_cli_with_input(u'\x1b4\x1b4x\n') assert (result.text == (u'x' * 44)) (result, cli) = _feed_cli_with_input(u'aaaa\x1b-\x1b[Cbbbb\n') assert (result.text == u'aaabbbba') (result, cli) = _feed_cli_with_input(u'aaaa\x1b-3\x1b[Cbbbb\n') assert (result.text == u'abbbbaaa') (result, cli) = _feed_cli_with_input(u'aaaa\x1b---3\x1b[Cbbbb\n') assert (result.text == u'abbbbaaa')
[ "def", "test_emacs_arguments", "(", ")", ":", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'\\x1b4x\\n'", ")", "assert", "(", "result", ".", "text", "==", "u'xxxx'", ")", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'\\x1b44x\\n'", ")", "assert", "(", "result", ".", "text", "==", "(", "u'x'", "*", "44", ")", ")", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'\\x1b4\\x1b4x\\n'", ")", "assert", "(", "result", ".", "text", "==", "(", "u'x'", "*", "44", ")", ")", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'aaaa\\x1b-\\x1b[Cbbbb\\n'", ")", "assert", "(", "result", ".", "text", "==", "u'aaabbbba'", ")", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'aaaa\\x1b-3\\x1b[Cbbbb\\n'", ")", "assert", "(", "result", ".", "text", "==", "u'abbbbaaa'", ")", "(", "result", ",", "cli", ")", "=", "_feed_cli_with_input", "(", "u'aaaa\\x1b---3\\x1b[Cbbbb\\n'", ")", "assert", "(", "result", ".", "text", "==", "u'abbbbaaa'", ")" ]
test various combinations of arguments in emacs mode .
train
false
46,296
@register.inclusion_tag(u'shop/includes/order_totals.txt', takes_context=True) def order_totals_text(context): return _order_totals(context)
[ "@", "register", ".", "inclusion_tag", "(", "u'shop/includes/order_totals.txt'", ",", "takes_context", "=", "True", ")", "def", "order_totals_text", "(", "context", ")", ":", "return", "_order_totals", "(", "context", ")" ]
text version of order_totals .
train
false
46,297
def reidemeister_presentation(fp_grp, H, elm_rounds=2, simp_rounds=2): C = coset_enumeration_r(fp_grp, H) C.compress() C.standardize() define_schreier_generators(C) reidemeister_relators(C) for i in range(20): elimination_technique_1(C) simplify_presentation(C) C.schreier_generators = tuple(C._schreier_generators) C.reidemeister_relators = tuple(C._reidemeister_relators) return (C.schreier_generators, C.reidemeister_relators)
[ "def", "reidemeister_presentation", "(", "fp_grp", ",", "H", ",", "elm_rounds", "=", "2", ",", "simp_rounds", "=", "2", ")", ":", "C", "=", "coset_enumeration_r", "(", "fp_grp", ",", "H", ")", "C", ".", "compress", "(", ")", "C", ".", "standardize", "(", ")", "define_schreier_generators", "(", "C", ")", "reidemeister_relators", "(", "C", ")", "for", "i", "in", "range", "(", "20", ")", ":", "elimination_technique_1", "(", "C", ")", "simplify_presentation", "(", "C", ")", "C", ".", "schreier_generators", "=", "tuple", "(", "C", ".", "_schreier_generators", ")", "C", ".", "reidemeister_relators", "=", "tuple", "(", "C", ".", "_reidemeister_relators", ")", "return", "(", "C", ".", "schreier_generators", ",", "C", ".", "reidemeister_relators", ")" ]
fp_group: a finitely presented group .
train
false
46,299
def chunk_str(content, length=420): def chunk(c, l): while c: out = (c + ' ')[:l].rsplit(' ', 1)[0] c = c[len(out):].strip() (yield out) return list(chunk(content, length))
[ "def", "chunk_str", "(", "content", ",", "length", "=", "420", ")", ":", "def", "chunk", "(", "c", ",", "l", ")", ":", "while", "c", ":", "out", "=", "(", "c", "+", "' '", ")", "[", ":", "l", "]", ".", "rsplit", "(", "' '", ",", "1", ")", "[", "0", "]", "c", "=", "c", "[", "len", "(", "out", ")", ":", "]", ".", "strip", "(", ")", "(", "yield", "out", ")", "return", "list", "(", "chunk", "(", "content", ",", "length", ")", ")" ]
chunks a string into smaller strings of given length .
train
false
46,300
def setLoopLayerScale(loopLayer, xyPlaneScale, zAxisScale): for loop in loopLayer.loops: for pointIndex in xrange(len(loop)): loop[pointIndex] *= xyPlaneScale loopLayer.z *= zAxisScale
[ "def", "setLoopLayerScale", "(", "loopLayer", ",", "xyPlaneScale", ",", "zAxisScale", ")", ":", "for", "loop", "in", "loopLayer", ".", "loops", ":", "for", "pointIndex", "in", "xrange", "(", "len", "(", "loop", ")", ")", ":", "loop", "[", "pointIndex", "]", "*=", "xyPlaneScale", "loopLayer", ".", "z", "*=", "zAxisScale" ]
set the slice element scale .
train
false
46,301
def _re_pattern_pprint(obj, p, cycle): p.text('re.compile(') pattern = repr(obj.pattern) if (pattern[:1] in 'uU'): pattern = pattern[1:] prefix = 'ur' else: prefix = 'r' pattern = (prefix + pattern.replace('\\\\', '\\')) p.text(pattern) if obj.flags: p.text(',') p.breakable() done_one = False for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE', 'DEBUG'): if (obj.flags & getattr(re, flag)): if done_one: p.text('|') p.text(('re.' + flag)) done_one = True p.text(')')
[ "def", "_re_pattern_pprint", "(", "obj", ",", "p", ",", "cycle", ")", ":", "p", ".", "text", "(", "'re.compile('", ")", "pattern", "=", "repr", "(", "obj", ".", "pattern", ")", "if", "(", "pattern", "[", ":", "1", "]", "in", "'uU'", ")", ":", "pattern", "=", "pattern", "[", "1", ":", "]", "prefix", "=", "'ur'", "else", ":", "prefix", "=", "'r'", "pattern", "=", "(", "prefix", "+", "pattern", ".", "replace", "(", "'\\\\\\\\'", ",", "'\\\\'", ")", ")", "p", ".", "text", "(", "pattern", ")", "if", "obj", ".", "flags", ":", "p", ".", "text", "(", "','", ")", "p", ".", "breakable", "(", ")", "done_one", "=", "False", "for", "flag", "in", "(", "'TEMPLATE'", ",", "'IGNORECASE'", ",", "'LOCALE'", ",", "'MULTILINE'", ",", "'DOTALL'", ",", "'UNICODE'", ",", "'VERBOSE'", ",", "'DEBUG'", ")", ":", "if", "(", "obj", ".", "flags", "&", "getattr", "(", "re", ",", "flag", ")", ")", ":", "if", "done_one", ":", "p", ".", "text", "(", "'|'", ")", "p", ".", "text", "(", "(", "'re.'", "+", "flag", ")", ")", "done_one", "=", "True", "p", ".", "text", "(", "')'", ")" ]
the pprint function for regular expression patterns .
train
true
46,302
def find_commands(management_dir): command_dir = os.path.join(management_dir, 'commands') try: return [f[:(-3)] for f in os.listdir(command_dir) if ((not f.startswith('_')) and f.endswith('.py'))] except OSError: return []
[ "def", "find_commands", "(", "management_dir", ")", ":", "command_dir", "=", "os", ".", "path", ".", "join", "(", "management_dir", ",", "'commands'", ")", "try", ":", "return", "[", "f", "[", ":", "(", "-", "3", ")", "]", "for", "f", "in", "os", ".", "listdir", "(", "command_dir", ")", "if", "(", "(", "not", "f", ".", "startswith", "(", "'_'", ")", ")", "and", "f", ".", "endswith", "(", "'.py'", ")", ")", "]", "except", "OSError", ":", "return", "[", "]" ]
given a path to a management directory .
train
true
46,303
def tree2str(tree, concat=' '): return concat.join([word for (word, tag) in tree])
[ "def", "tree2str", "(", "tree", ",", "concat", "=", "' '", ")", ":", "return", "concat", ".", "join", "(", "[", "word", "for", "(", "word", ",", "tag", ")", "in", "tree", "]", ")" ]
convert a nltk .
train
false
46,304
def test_sobel_v_zeros(): result = filters.sobel_v(np.zeros((10, 10)), np.ones((10, 10), bool)) assert_allclose(result, 0)
[ "def", "test_sobel_v_zeros", "(", ")", ":", "result", "=", "filters", ".", "sobel_v", "(", "np", ".", "zeros", "(", "(", "10", ",", "10", ")", ")", ",", "np", ".", "ones", "(", "(", "10", ",", "10", ")", ",", "bool", ")", ")", "assert_allclose", "(", "result", ",", "0", ")" ]
vertical sobel on an array of all zeros .
train
false
46,306
def _correct_offset(fid): current = fid.tell() if ((current % BTI.FILE_CURPOS) != 0): offset = (current % BTI.FILE_CURPOS) fid.seek((BTI.FILE_CURPOS - offset), 1)
[ "def", "_correct_offset", "(", "fid", ")", ":", "current", "=", "fid", ".", "tell", "(", ")", "if", "(", "(", "current", "%", "BTI", ".", "FILE_CURPOS", ")", "!=", "0", ")", ":", "offset", "=", "(", "current", "%", "BTI", ".", "FILE_CURPOS", ")", "fid", ".", "seek", "(", "(", "BTI", ".", "FILE_CURPOS", "-", "offset", ")", ",", "1", ")" ]
align fid pointer .
train
false
46,307
def get_container_root(container): default_path = os.path.join('/var/lib/docker', 'containers', _get_container_infos(container)['Id']) default_rootfs = os.path.join(default_path, 'rootfs') rootfs_re = re.compile('^lxc.rootfs\\s*=\\s*(.*)\\s*$', re.U) try: lxcconfig = os.path.join(default_path, 'config.lxc') with salt.utils.fopen(lxcconfig) as fhr: lines = fhr.readlines() rlines = lines[:] rlines.reverse() for rline in rlines: robj = rootfs_re.search(rline) if robj: rootfs = robj.groups()[0] break except Exception: rootfs = default_rootfs return rootfs
[ "def", "get_container_root", "(", "container", ")", ":", "default_path", "=", "os", ".", "path", ".", "join", "(", "'/var/lib/docker'", ",", "'containers'", ",", "_get_container_infos", "(", "container", ")", "[", "'Id'", "]", ")", "default_rootfs", "=", "os", ".", "path", ".", "join", "(", "default_path", ",", "'rootfs'", ")", "rootfs_re", "=", "re", ".", "compile", "(", "'^lxc.rootfs\\\\s*=\\\\s*(.*)\\\\s*$'", ",", "re", ".", "U", ")", "try", ":", "lxcconfig", "=", "os", ".", "path", ".", "join", "(", "default_path", ",", "'config.lxc'", ")", "with", "salt", ".", "utils", ".", "fopen", "(", "lxcconfig", ")", "as", "fhr", ":", "lines", "=", "fhr", ".", "readlines", "(", ")", "rlines", "=", "lines", "[", ":", "]", "rlines", ".", "reverse", "(", ")", "for", "rline", "in", "rlines", ":", "robj", "=", "rootfs_re", ".", "search", "(", "rline", ")", "if", "robj", ":", "rootfs", "=", "robj", ".", "groups", "(", ")", "[", "0", "]", "break", "except", "Exception", ":", "rootfs", "=", "default_rootfs", "return", "rootfs" ]
get the container rootfs path container container id or grain cli example: .
train
false
46,309
def get_http_proxy(network_service='Ethernet'): if (__grains__['os'] == 'Windows'): return _get_proxy_windows(['http']) return _get_proxy_osx('getwebproxy', network_service)
[ "def", "get_http_proxy", "(", "network_service", "=", "'Ethernet'", ")", ":", "if", "(", "__grains__", "[", "'os'", "]", "==", "'Windows'", ")", ":", "return", "_get_proxy_windows", "(", "[", "'http'", "]", ")", "return", "_get_proxy_osx", "(", "'getwebproxy'", ",", "network_service", ")" ]
returns the current http proxy settings network_service the network service to apply the changes to .
train
false
46,310
def dfs_preorder_nodes(G, source=None): pre = (v for (u, v, d) in nx.dfs_labeled_edges(G, source=source) if (d == 'forward')) return pre
[ "def", "dfs_preorder_nodes", "(", "G", ",", "source", "=", "None", ")", ":", "pre", "=", "(", "v", "for", "(", "u", ",", "v", ",", "d", ")", "in", "nx", ".", "dfs_labeled_edges", "(", "G", ",", "source", "=", "source", ")", "if", "(", "d", "==", "'forward'", ")", ")", "return", "pre" ]
produce nodes in a depth-first-search pre-ordering starting from source .
train
false
46,311
def generateVectors(numVectors=100, length=500, activity=50): vectors = [] coinc = numpy.zeros(length, dtype='int32') indexList = range(length) for i in xrange(numVectors): coinc[:] = 0 coinc[random.sample(indexList, activity)] = 1 vectors.append(coinc.copy()) return vectors
[ "def", "generateVectors", "(", "numVectors", "=", "100", ",", "length", "=", "500", ",", "activity", "=", "50", ")", ":", "vectors", "=", "[", "]", "coinc", "=", "numpy", ".", "zeros", "(", "length", ",", "dtype", "=", "'int32'", ")", "indexList", "=", "range", "(", "length", ")", "for", "i", "in", "xrange", "(", "numVectors", ")", ":", "coinc", "[", ":", "]", "=", "0", "coinc", "[", "random", ".", "sample", "(", "indexList", ",", "activity", ")", "]", "=", "1", "vectors", ".", "append", "(", "coinc", ".", "copy", "(", ")", ")", "return", "vectors" ]
generate a list of random sparse distributed vectors .
train
true
46,312
def _check_fname(fname, overwrite=False, must_exist=False): if (not isinstance(fname, string_types)): raise TypeError('file name is not a string') if (must_exist and (not op.isfile(fname))): raise IOError(('File "%s" does not exist' % fname)) if op.isfile(fname): if (not overwrite): raise IOError('Destination file exists. Please use option "overwrite=True" to force overwriting.') else: logger.info('Overwriting existing file.')
[ "def", "_check_fname", "(", "fname", ",", "overwrite", "=", "False", ",", "must_exist", "=", "False", ")", ":", "if", "(", "not", "isinstance", "(", "fname", ",", "string_types", ")", ")", ":", "raise", "TypeError", "(", "'file name is not a string'", ")", "if", "(", "must_exist", "and", "(", "not", "op", ".", "isfile", "(", "fname", ")", ")", ")", ":", "raise", "IOError", "(", "(", "'File \"%s\" does not exist'", "%", "fname", ")", ")", "if", "op", ".", "isfile", "(", "fname", ")", ":", "if", "(", "not", "overwrite", ")", ":", "raise", "IOError", "(", "'Destination file exists. Please use option \"overwrite=True\" to force overwriting.'", ")", "else", ":", "logger", ".", "info", "(", "'Overwriting existing file.'", ")" ]
check for file existence .
train
false
46,313
def disambig_string(info): disambig = [] if (info.data_source and (info.data_source != 'MusicBrainz')): disambig.append(info.data_source) if isinstance(info, hooks.AlbumInfo): if info.media: if (info.mediums and (info.mediums > 1)): disambig.append(u'{0}x{1}'.format(info.mediums, info.media)) else: disambig.append(info.media) if info.year: disambig.append(six.text_type(info.year)) if info.country: disambig.append(info.country) if info.label: disambig.append(info.label) if info.albumdisambig: disambig.append(info.albumdisambig) if disambig: return u', '.join(disambig)
[ "def", "disambig_string", "(", "info", ")", ":", "disambig", "=", "[", "]", "if", "(", "info", ".", "data_source", "and", "(", "info", ".", "data_source", "!=", "'MusicBrainz'", ")", ")", ":", "disambig", ".", "append", "(", "info", ".", "data_source", ")", "if", "isinstance", "(", "info", ",", "hooks", ".", "AlbumInfo", ")", ":", "if", "info", ".", "media", ":", "if", "(", "info", ".", "mediums", "and", "(", "info", ".", "mediums", ">", "1", ")", ")", ":", "disambig", ".", "append", "(", "u'{0}x{1}'", ".", "format", "(", "info", ".", "mediums", ",", "info", ".", "media", ")", ")", "else", ":", "disambig", ".", "append", "(", "info", ".", "media", ")", "if", "info", ".", "year", ":", "disambig", ".", "append", "(", "six", ".", "text_type", "(", "info", ".", "year", ")", ")", "if", "info", ".", "country", ":", "disambig", ".", "append", "(", "info", ".", "country", ")", "if", "info", ".", "label", ":", "disambig", ".", "append", "(", "info", ".", "label", ")", "if", "info", ".", "albumdisambig", ":", "disambig", ".", "append", "(", "info", ".", "albumdisambig", ")", "if", "disambig", ":", "return", "u', '", ".", "join", "(", "disambig", ")" ]
generate a string for an albuminfo or trackinfo object that provides context that helps disambiguate similar-looking albums and tracks .
train
false
46,314
def test_attributes(mixin_cols): m = mixin_cols['m'] m.info.name = 'a' assert (m.info.name == 'a') m.info.description = 'a' assert (m.info.description == 'a') if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)): with pytest.raises(AttributeError): m.info.unit = u.m else: m.info.unit = u.m assert (m.info.unit is u.m) m.info.format = 'a' assert (m.info.format == 'a') m.info.meta = {'a': 1} assert (m.info.meta == {'a': 1}) with pytest.raises(AttributeError): m.info.bad_attr = 1 with pytest.raises(AttributeError): m.info.bad_attr
[ "def", "test_attributes", "(", "mixin_cols", ")", ":", "m", "=", "mixin_cols", "[", "'m'", "]", "m", ".", "info", ".", "name", "=", "'a'", "assert", "(", "m", ".", "info", ".", "name", "==", "'a'", ")", "m", ".", "info", ".", "description", "=", "'a'", "assert", "(", "m", ".", "info", ".", "description", "==", "'a'", ")", "if", "isinstance", "(", "m", ",", "(", "u", ".", "Quantity", ",", "coordinates", ".", "SkyCoord", ",", "time", ".", "Time", ")", ")", ":", "with", "pytest", ".", "raises", "(", "AttributeError", ")", ":", "m", ".", "info", ".", "unit", "=", "u", ".", "m", "else", ":", "m", ".", "info", ".", "unit", "=", "u", ".", "m", "assert", "(", "m", ".", "info", ".", "unit", "is", "u", ".", "m", ")", "m", ".", "info", ".", "format", "=", "'a'", "assert", "(", "m", ".", "info", ".", "format", "==", "'a'", ")", "m", ".", "info", ".", "meta", "=", "{", "'a'", ":", "1", "}", "assert", "(", "m", ".", "info", ".", "meta", "==", "{", "'a'", ":", "1", "}", ")", "with", "pytest", ".", "raises", "(", "AttributeError", ")", ":", "m", ".", "info", ".", "bad_attr", "=", "1", "with", "pytest", ".", "raises", "(", "AttributeError", ")", ":", "m", ".", "info", ".", "bad_attr" ]
test if attribute checks are in place .
train
false
46,315
def deblock(B): if ((not isinstance(B, BlockMatrix)) or (not B.blocks.has(BlockMatrix))): return B wrap = (lambda x: (x if isinstance(x, BlockMatrix) else BlockMatrix([[x]]))) bb = B.blocks.applyfunc(wrap) from sympy import Matrix try: MM = Matrix(0, sum((bb[(0, i)].blocks.shape[1] for i in range(bb.shape[1]))), []) for row in range(0, bb.shape[0]): M = Matrix(bb[(row, 0)].blocks) for col in range(1, bb.shape[1]): M = M.row_join(bb[(row, col)].blocks) MM = MM.col_join(M) return BlockMatrix(MM) except ShapeError: return B
[ "def", "deblock", "(", "B", ")", ":", "if", "(", "(", "not", "isinstance", "(", "B", ",", "BlockMatrix", ")", ")", "or", "(", "not", "B", ".", "blocks", ".", "has", "(", "BlockMatrix", ")", ")", ")", ":", "return", "B", "wrap", "=", "(", "lambda", "x", ":", "(", "x", "if", "isinstance", "(", "x", ",", "BlockMatrix", ")", "else", "BlockMatrix", "(", "[", "[", "x", "]", "]", ")", ")", ")", "bb", "=", "B", ".", "blocks", ".", "applyfunc", "(", "wrap", ")", "from", "sympy", "import", "Matrix", "try", ":", "MM", "=", "Matrix", "(", "0", ",", "sum", "(", "(", "bb", "[", "(", "0", ",", "i", ")", "]", ".", "blocks", ".", "shape", "[", "1", "]", "for", "i", "in", "range", "(", "bb", ".", "shape", "[", "1", "]", ")", ")", ")", ",", "[", "]", ")", "for", "row", "in", "range", "(", "0", ",", "bb", ".", "shape", "[", "0", "]", ")", ":", "M", "=", "Matrix", "(", "bb", "[", "(", "row", ",", "0", ")", "]", ".", "blocks", ")", "for", "col", "in", "range", "(", "1", ",", "bb", ".", "shape", "[", "1", "]", ")", ":", "M", "=", "M", ".", "row_join", "(", "bb", "[", "(", "row", ",", "col", ")", "]", ".", "blocks", ")", "MM", "=", "MM", ".", "col_join", "(", "M", ")", "return", "BlockMatrix", "(", "MM", ")", "except", "ShapeError", ":", "return", "B" ]
flatten a blockmatrix of blockmatrices .
train
false
46,316
def convert_to_one_hot(integer_vector, dtype=None, max_labels=None, mode='stack', sparse=False): if (dtype is None): dtype = config.floatX if isinstance(integer_vector, list): integer_vector = np.array(integer_vector) assert (np.min(integer_vector) >= 0) assert (integer_vector.ndim <= 2) if (max_labels is None): max_labels = (max(integer_vector) + 1) return OneHotFormatter(max_labels, dtype=dtype).format(integer_vector, mode=mode, sparse=sparse)
[ "def", "convert_to_one_hot", "(", "integer_vector", ",", "dtype", "=", "None", ",", "max_labels", "=", "None", ",", "mode", "=", "'stack'", ",", "sparse", "=", "False", ")", ":", "if", "(", "dtype", "is", "None", ")", ":", "dtype", "=", "config", ".", "floatX", "if", "isinstance", "(", "integer_vector", ",", "list", ")", ":", "integer_vector", "=", "np", ".", "array", "(", "integer_vector", ")", "assert", "(", "np", ".", "min", "(", "integer_vector", ")", ">=", "0", ")", "assert", "(", "integer_vector", ".", "ndim", "<=", "2", ")", "if", "(", "max_labels", "is", "None", ")", ":", "max_labels", "=", "(", "max", "(", "integer_vector", ")", "+", "1", ")", "return", "OneHotFormatter", "(", "max_labels", ",", "dtype", "=", "dtype", ")", ".", "format", "(", "integer_vector", ",", "mode", "=", "mode", ",", "sparse", "=", "sparse", ")" ]
formats a given array of target labels into a one-hot vector .
train
false
46,317
def del_hist_job(job, del_files): if job: path = PostProcessor.do.get_path(job) if path: PostProcessor.do.delete(job, del_files=del_files) else: history_db = sabnzbd.connect_db() path = history_db.get_path(job) history_db.remove_history(job) if (path and del_files and clip_path(path).lower().startswith(cfg.download_dir.get_path().lower())): remove_all(path, recursive=True) return True
[ "def", "del_hist_job", "(", "job", ",", "del_files", ")", ":", "if", "job", ":", "path", "=", "PostProcessor", ".", "do", ".", "get_path", "(", "job", ")", "if", "path", ":", "PostProcessor", ".", "do", ".", "delete", "(", "job", ",", "del_files", "=", "del_files", ")", "else", ":", "history_db", "=", "sabnzbd", ".", "connect_db", "(", ")", "path", "=", "history_db", ".", "get_path", "(", "job", ")", "history_db", ".", "remove_history", "(", "job", ")", "if", "(", "path", "and", "del_files", "and", "clip_path", "(", "path", ")", ".", "lower", "(", ")", ".", "startswith", "(", "cfg", ".", "download_dir", ".", "get_path", "(", ")", ".", "lower", "(", ")", ")", ")", ":", "remove_all", "(", "path", ",", "recursive", "=", "True", ")", "return", "True" ]
remove history element .
train
false
46,318
def data_to_token_ids(data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile('\\d')): if (not gfile.Exists(target_path)): print ('Tokenizing data in %s' % data_path) (vocab, _) = initialize_vocabulary(vocabulary_path) with gfile.GFile(data_path, mode='rb') as data_file: with gfile.GFile(target_path, mode='w') as tokens_file: counter = 0 for line in data_file: counter += 1 if ((counter % 100000) == 0): print (' tokenizing line %d' % counter) token_ids = sentence_to_token_ids(line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE) tokens_file.write((' '.join([str(tok) for tok in token_ids]) + '\n')) else: print ('Target path %s exists' % target_path)
[ "def", "data_to_token_ids", "(", "data_path", ",", "target_path", ",", "vocabulary_path", ",", "tokenizer", "=", "None", ",", "normalize_digits", "=", "True", ",", "UNK_ID", "=", "3", ",", "_DIGIT_RE", "=", "re", ".", "compile", "(", "'\\\\d'", ")", ")", ":", "if", "(", "not", "gfile", ".", "Exists", "(", "target_path", ")", ")", ":", "print", "(", "'Tokenizing data in %s'", "%", "data_path", ")", "(", "vocab", ",", "_", ")", "=", "initialize_vocabulary", "(", "vocabulary_path", ")", "with", "gfile", ".", "GFile", "(", "data_path", ",", "mode", "=", "'rb'", ")", "as", "data_file", ":", "with", "gfile", ".", "GFile", "(", "target_path", ",", "mode", "=", "'w'", ")", "as", "tokens_file", ":", "counter", "=", "0", "for", "line", "in", "data_file", ":", "counter", "+=", "1", "if", "(", "(", "counter", "%", "100000", ")", "==", "0", ")", ":", "print", "(", "' tokenizing line %d'", "%", "counter", ")", "token_ids", "=", "sentence_to_token_ids", "(", "line", ",", "vocab", ",", "tokenizer", ",", "normalize_digits", ",", "UNK_ID", "=", "UNK_ID", ",", "_DIGIT_RE", "=", "_DIGIT_RE", ")", "tokens_file", ".", "write", "(", "(", "' '", ".", "join", "(", "[", "str", "(", "tok", ")", "for", "tok", "in", "token_ids", "]", ")", "+", "'\\n'", ")", ")", "else", ":", "print", "(", "'Target path %s exists'", "%", "target_path", ")" ]
tokenize data file and turn into token-ids using given vocabulary file .
train
true
46,321
def summary(): assess_tables() return s3_rest_controller()
[ "def", "summary", "(", ")", ":", "assess_tables", "(", ")", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
46,322
def json_int_dttm_ser(obj): val = base_json_conv(obj) if (val is not None): return val if isinstance(obj, datetime): obj = datetime_to_epoch(obj) elif isinstance(obj, date): obj = ((obj - EPOCH.date()).total_seconds() * 1000) else: raise TypeError(u'Unserializable object {} of type {}'.format(obj, type(obj))) return obj
[ "def", "json_int_dttm_ser", "(", "obj", ")", ":", "val", "=", "base_json_conv", "(", "obj", ")", "if", "(", "val", "is", "not", "None", ")", ":", "return", "val", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "obj", "=", "datetime_to_epoch", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "date", ")", ":", "obj", "=", "(", "(", "obj", "-", "EPOCH", ".", "date", "(", ")", ")", ".", "total_seconds", "(", ")", "*", "1000", ")", "else", ":", "raise", "TypeError", "(", "u'Unserializable object {} of type {}'", ".", "format", "(", "obj", ",", "type", "(", "obj", ")", ")", ")", "return", "obj" ]
json serializer that deals with dates .
train
true
46,323
def with_attributes(*use_attributes): vmap = {False: 0, True: 1} values = [vmap[bool(v)] for v in use_attributes] def set_value(function): try: function.ATTRIBUTES.update(values) except AttributeError: function.ATTRIBUTES = set(values) return function return set_value
[ "def", "with_attributes", "(", "*", "use_attributes", ")", ":", "vmap", "=", "{", "False", ":", "0", ",", "True", ":", "1", "}", "values", "=", "[", "vmap", "[", "bool", "(", "v", ")", "]", "for", "v", "in", "use_attributes", "]", "def", "set_value", "(", "function", ")", ":", "try", ":", "function", ".", "ATTRIBUTES", ".", "update", "(", "values", ")", "except", "AttributeError", ":", "function", ".", "ATTRIBUTES", "=", "set", "(", "values", ")", "return", "function", "return", "set_value" ]
decorator for benchmarks that use attributes .
train
false
46,324
def auth(username, password): _cred = __get_yubico_users(username) client = Yubico(_cred['id'], _cred['key']) try: if client.verify(password): return True else: return False except yubico_exceptions.StatusCodeError as e: log.info('Unable to verify YubiKey `{0}`'.format(e)) return False
[ "def", "auth", "(", "username", ",", "password", ")", ":", "_cred", "=", "__get_yubico_users", "(", "username", ")", "client", "=", "Yubico", "(", "_cred", "[", "'id'", "]", ",", "_cred", "[", "'key'", "]", ")", "try", ":", "if", "client", ".", "verify", "(", "password", ")", ":", "return", "True", "else", ":", "return", "False", "except", "yubico_exceptions", ".", "StatusCodeError", "as", "e", ":", "log", ".", "info", "(", "'Unable to verify YubiKey `{0}`'", ".", "format", "(", "e", ")", ")", "return", "False" ]
check for account id on all non-root urls .
train
true
46,325
def get_cli_body_ssh_vrf(module, command, response): command_splitted = command.split('|') if ((len(command_splitted) > 2) or ('show run' in command)): body = response elif (('xml' in response[0]) or (response[0] == '\n')): body = [] else: body = [json.loads(response[0])] return body
[ "def", "get_cli_body_ssh_vrf", "(", "module", ",", "command", ",", "response", ")", ":", "command_splitted", "=", "command", ".", "split", "(", "'|'", ")", "if", "(", "(", "len", "(", "command_splitted", ")", ">", "2", ")", "or", "(", "'show run'", "in", "command", ")", ")", ":", "body", "=", "response", "elif", "(", "(", "'xml'", "in", "response", "[", "0", "]", ")", "or", "(", "response", "[", "0", "]", "==", "'\\n'", ")", ")", ":", "body", "=", "[", "]", "else", ":", "body", "=", "[", "json", ".", "loads", "(", "response", "[", "0", "]", ")", "]", "return", "body" ]
get response for when transport=cli .
train
false
46,326
def _fractional_power_pade(R, t, m): if ((m < 1) or (int(m) != m)): raise ValueError('expected a positive integer m') if (not ((-1) < t < 1)): raise ValueError('expected -1 < t < 1') R = np.asarray(R) if ((len(R.shape) != 2) or (R.shape[0] != R.shape[1])): raise ValueError('expected an upper triangular square matrix') (n, n) = R.shape ident = np.identity(n) Y = (R * _fractional_power_pade_constant((2 * m), t)) for j in range(((2 * m) - 1), 0, (-1)): rhs = (R * _fractional_power_pade_constant(j, t)) Y = solve_triangular((ident + Y), rhs) U = (ident + Y) if (not np.array_equal(U, np.triu(U))): raise Exception('internal inconsistency') return U
[ "def", "_fractional_power_pade", "(", "R", ",", "t", ",", "m", ")", ":", "if", "(", "(", "m", "<", "1", ")", "or", "(", "int", "(", "m", ")", "!=", "m", ")", ")", ":", "raise", "ValueError", "(", "'expected a positive integer m'", ")", "if", "(", "not", "(", "(", "-", "1", ")", "<", "t", "<", "1", ")", ")", ":", "raise", "ValueError", "(", "'expected -1 < t < 1'", ")", "R", "=", "np", ".", "asarray", "(", "R", ")", "if", "(", "(", "len", "(", "R", ".", "shape", ")", "!=", "2", ")", "or", "(", "R", ".", "shape", "[", "0", "]", "!=", "R", ".", "shape", "[", "1", "]", ")", ")", ":", "raise", "ValueError", "(", "'expected an upper triangular square matrix'", ")", "(", "n", ",", "n", ")", "=", "R", ".", "shape", "ident", "=", "np", ".", "identity", "(", "n", ")", "Y", "=", "(", "R", "*", "_fractional_power_pade_constant", "(", "(", "2", "*", "m", ")", ",", "t", ")", ")", "for", "j", "in", "range", "(", "(", "(", "2", "*", "m", ")", "-", "1", ")", ",", "0", ",", "(", "-", "1", ")", ")", ":", "rhs", "=", "(", "R", "*", "_fractional_power_pade_constant", "(", "j", ",", "t", ")", ")", "Y", "=", "solve_triangular", "(", "(", "ident", "+", "Y", ")", ",", "rhs", ")", "U", "=", "(", "ident", "+", "Y", ")", "if", "(", "not", "np", ".", "array_equal", "(", "U", ",", "np", ".", "triu", "(", "U", ")", ")", ")", ":", "raise", "Exception", "(", "'internal inconsistency'", ")", "return", "U" ]
evaluate the pade approximation of a fractional matrix power .
train
false
46,327
def clues_login(text): text = text.lower() for clue in ('username', 'password', 'invalid', 'authen', 'access denied'): if (clue in text): return True return False
[ "def", "clues_login", "(", "text", ")", ":", "text", "=", "text", ".", "lower", "(", ")", "for", "clue", "in", "(", "'username'", ",", "'password'", ",", "'invalid'", ",", "'authen'", ",", "'access denied'", ")", ":", "if", "(", "clue", "in", "text", ")", ":", "return", "True", "return", "False" ]
check for any "failed login" clues in the response code .
train
false
46,328
def calculate_compounded_added_taxes(price, tax_groups): if price.includes_tax: return _calc_compounded_added_taxes_from_taxful(price, tax_groups) else: return _calc_compounded_added_taxes_from_taxless(price, tax_groups)
[ "def", "calculate_compounded_added_taxes", "(", "price", ",", "tax_groups", ")", ":", "if", "price", ".", "includes_tax", ":", "return", "_calc_compounded_added_taxes_from_taxful", "(", "price", ",", "tax_groups", ")", "else", ":", "return", "_calc_compounded_added_taxes_from_taxless", "(", "price", ",", "tax_groups", ")" ]
calculate compounded and added taxes from given groups of taxes .
train
false
46,329
def get_ext_list(): exts = [] for ext in cfg.prio_sort_list(): ext = ext.strip() if (not ext.startswith('.')): ext = ('.' + ext) exts.append(ext) return exts
[ "def", "get_ext_list", "(", ")", ":", "exts", "=", "[", "]", "for", "ext", "in", "cfg", ".", "prio_sort_list", "(", ")", ":", "ext", "=", "ext", ".", "strip", "(", ")", "if", "(", "not", "ext", ".", "startswith", "(", "'.'", ")", ")", ":", "ext", "=", "(", "'.'", "+", "ext", ")", "exts", ".", "append", "(", "ext", ")", "return", "exts" ]
return priority extension list .
train
false
46,331
def _image_child_entry_delete_all(child_model_cls, image_id, delete_time=None, session=None): session = (session or get_session()) query = session.query(child_model_cls).filter_by(image_id=image_id).filter_by(deleted=False) delete_time = (delete_time or timeutils.utcnow()) count = query.update({'deleted': True, 'deleted_at': delete_time}) return count
[ "def", "_image_child_entry_delete_all", "(", "child_model_cls", ",", "image_id", ",", "delete_time", "=", "None", ",", "session", "=", "None", ")", ":", "session", "=", "(", "session", "or", "get_session", "(", ")", ")", "query", "=", "session", ".", "query", "(", "child_model_cls", ")", ".", "filter_by", "(", "image_id", "=", "image_id", ")", ".", "filter_by", "(", "deleted", "=", "False", ")", "delete_time", "=", "(", "delete_time", "or", "timeutils", ".", "utcnow", "(", ")", ")", "count", "=", "query", ".", "update", "(", "{", "'deleted'", ":", "True", ",", "'deleted_at'", ":", "delete_time", "}", ")", "return", "count" ]
deletes all the child entries for the given image id .
train
false
46,332
@core_helper def resource_view_full_page(resource_view): view_plugin = datapreview.get_view_plugin(resource_view['view_type']) return view_plugin.info().get('full_page_edit', False)
[ "@", "core_helper", "def", "resource_view_full_page", "(", "resource_view", ")", ":", "view_plugin", "=", "datapreview", ".", "get_view_plugin", "(", "resource_view", "[", "'view_type'", "]", ")", "return", "view_plugin", ".", "info", "(", ")", ".", "get", "(", "'full_page_edit'", ",", "False", ")" ]
returns if the edit view page should be full page .
train
false
46,333
def test_set_join_node_file_input(tmpdir): wd = str(tmpdir) os.chdir(wd) open(u'test.nii', u'w+').close() open(u'test2.nii', u'w+').close() wf = pe.Workflow(name=u'test') inputspec = pe.Node(IdentityInterface(fields=[u'n']), name=u'inputspec') inputspec.iterables = [(u'n', [os.path.join(wd, u'test.nii'), os.path.join(wd, u'test2.nii')])] pre_join1 = pe.Node(IdentityInterface(fields=[u'n']), name=u'pre_join1') wf.connect(inputspec, u'n', pre_join1, u'n') join = pe.JoinNode(PickFirst(), joinsource=u'inputspec', joinfield=u'in_files', name=u'join') wf.connect(pre_join1, u'n', join, u'in_files') wf.run()
[ "def", "test_set_join_node_file_input", "(", "tmpdir", ")", ":", "wd", "=", "str", "(", "tmpdir", ")", "os", ".", "chdir", "(", "wd", ")", "open", "(", "u'test.nii'", ",", "u'w+'", ")", ".", "close", "(", ")", "open", "(", "u'test2.nii'", ",", "u'w+'", ")", ".", "close", "(", ")", "wf", "=", "pe", ".", "Workflow", "(", "name", "=", "u'test'", ")", "inputspec", "=", "pe", ".", "Node", "(", "IdentityInterface", "(", "fields", "=", "[", "u'n'", "]", ")", ",", "name", "=", "u'inputspec'", ")", "inputspec", ".", "iterables", "=", "[", "(", "u'n'", ",", "[", "os", ".", "path", ".", "join", "(", "wd", ",", "u'test.nii'", ")", ",", "os", ".", "path", ".", "join", "(", "wd", ",", "u'test2.nii'", ")", "]", ")", "]", "pre_join1", "=", "pe", ".", "Node", "(", "IdentityInterface", "(", "fields", "=", "[", "u'n'", "]", ")", ",", "name", "=", "u'pre_join1'", ")", "wf", ".", "connect", "(", "inputspec", ",", "u'n'", ",", "pre_join1", ",", "u'n'", ")", "join", "=", "pe", ".", "JoinNode", "(", "PickFirst", "(", ")", ",", "joinsource", "=", "u'inputspec'", ",", "joinfield", "=", "u'in_files'", ",", "name", "=", "u'join'", ")", "wf", ".", "connect", "(", "pre_join1", ",", "u'n'", ",", "join", ",", "u'in_files'", ")", "wf", ".", "run", "(", ")" ]
test collecting join inputs to a set .
train
false
46,335
def handle_server_api(output, kwargs): name = kwargs.get('keyword') if (not name): name = kwargs.get('name') if name: server = config.get_config('servers', name) if server: server.set_dict(kwargs) old_name = name else: config.ConfigServer(name, kwargs) old_name = None Downloader.do.update_server(old_name, name) return name
[ "def", "handle_server_api", "(", "output", ",", "kwargs", ")", ":", "name", "=", "kwargs", ".", "get", "(", "'keyword'", ")", "if", "(", "not", "name", ")", ":", "name", "=", "kwargs", ".", "get", "(", "'name'", ")", "if", "name", ":", "server", "=", "config", ".", "get_config", "(", "'servers'", ",", "name", ")", "if", "server", ":", "server", ".", "set_dict", "(", "kwargs", ")", "old_name", "=", "name", "else", ":", "config", ".", "ConfigServer", "(", "name", ",", "kwargs", ")", "old_name", "=", "None", "Downloader", ".", "do", ".", "update_server", "(", "old_name", ",", "name", ")", "return", "name" ]
special handler for api-call set_config [servers] .
train
false
46,336
def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 0, brute_force)
[ "def", "binary_erosion", "(", "input", ",", "structure", "=", "None", ",", "iterations", "=", "1", ",", "mask", "=", "None", ",", "output", "=", "None", ",", "border_value", "=", "0", ",", "origin", "=", "0", ",", "brute_force", "=", "False", ")", ":", "return", "_binary_erosion", "(", "input", ",", "structure", ",", "iterations", ",", "mask", ",", "output", ",", "border_value", ",", "origin", ",", "0", ",", "brute_force", ")" ]
multi-dimensional binary erosion with a given structuring element .
train
false
46,337
def gce_provisioner(zone, project, ssh_public_key, gce_credentials=None): key = Key.fromString(bytes(ssh_public_key)) credentials = gce_credentials_from_config(gce_credentials) compute = discovery.build('compute', 'v1', credentials=credentials) return GCEProvisioner(instance_builder=GCEInstanceBuilder(zone=unicode(zone), project=unicode(project), compute=compute), ssh_public_key=key)
[ "def", "gce_provisioner", "(", "zone", ",", "project", ",", "ssh_public_key", ",", "gce_credentials", "=", "None", ")", ":", "key", "=", "Key", ".", "fromString", "(", "bytes", "(", "ssh_public_key", ")", ")", "credentials", "=", "gce_credentials_from_config", "(", "gce_credentials", ")", "compute", "=", "discovery", ".", "build", "(", "'compute'", ",", "'v1'", ",", "credentials", "=", "credentials", ")", "return", "GCEProvisioner", "(", "instance_builder", "=", "GCEInstanceBuilder", "(", "zone", "=", "unicode", "(", "zone", ")", ",", "project", "=", "unicode", "(", "project", ")", ",", "compute", "=", "compute", ")", ",", "ssh_public_key", "=", "key", ")" ]
create an :class:iprovisioner for provisioning nodes on gce .
train
false
46,338
def is_valid_image_name(image_name): regex = re.compile('[\\w\\(\\)\\.\\-\\/_]{3,128}$') try: return (regex.match(image_name) is not None) except TypeError: return False
[ "def", "is_valid_image_name", "(", "image_name", ")", ":", "regex", "=", "re", ".", "compile", "(", "'[\\\\w\\\\(\\\\)\\\\.\\\\-\\\\/_]{3,128}$'", ")", "try", ":", "return", "(", "regex", ".", "match", "(", "image_name", ")", "is", "not", "None", ")", "except", "TypeError", ":", "return", "False" ]
check if image_name is a valid aws image name 1 .
train
false
46,339
def _wrap(element, output, wrapper=u''): output.append(wrapper) if element.text: output.append(_collapse_whitespace(element.text)) for child in element: _element_to_text(child, output) output.append(wrapper)
[ "def", "_wrap", "(", "element", ",", "output", ",", "wrapper", "=", "u''", ")", ":", "output", ".", "append", "(", "wrapper", ")", "if", "element", ".", "text", ":", "output", ".", "append", "(", "_collapse_whitespace", "(", "element", ".", "text", ")", ")", "for", "child", "in", "element", ":", "_element_to_text", "(", "child", ",", "output", ")", "output", ".", "append", "(", "wrapper", ")" ]
recursively extracts text from element .
train
false
46,340
def timedelta_to_integral_seconds(delta): return int(delta.total_seconds())
[ "def", "timedelta_to_integral_seconds", "(", "delta", ")", ":", "return", "int", "(", "delta", ".", "total_seconds", "(", ")", ")" ]
convert a pd .
train
false
46,341
def test_posdef_symmetric3(): data = np.array([[1.0, 1], [1, 1]], dtype=theano.config.floatX) assert (mv.posdef(data) == 0)
[ "def", "test_posdef_symmetric3", "(", ")", ":", "data", "=", "np", ".", "array", "(", "[", "[", "1.0", ",", "1", "]", ",", "[", "1", ",", "1", "]", "]", ",", "dtype", "=", "theano", ".", "config", ".", "floatX", ")", "assert", "(", "mv", ".", "posdef", "(", "data", ")", "==", "0", ")" ]
the test return 0 if the matrix has 0 eigenvalue .
train
false
46,342
def WebRootCheck(path): p = re.sub('/+', '/', ('/%s/' % path))[:(-1)] if (p != CleanText(p, banned=CleanText.NONPATH).clean): raise ValueError(('Invalid web root: %s' % path)) return p
[ "def", "WebRootCheck", "(", "path", ")", ":", "p", "=", "re", ".", "sub", "(", "'/+'", ",", "'/'", ",", "(", "'/%s/'", "%", "path", ")", ")", "[", ":", "(", "-", "1", ")", "]", "if", "(", "p", "!=", "CleanText", "(", "p", ",", "banned", "=", "CleanText", ".", "NONPATH", ")", ".", "clean", ")", ":", "raise", "ValueError", "(", "(", "'Invalid web root: %s'", "%", "path", ")", ")", "return", "p" ]
verify that a string is a valid web root path .
train
false
46,343
def template_str(tem, queue=False, **kwargs): conflict = _check_queue(queue, kwargs) if (conflict is not None): return conflict try: st_ = salt.state.State(__opts__, proxy=__proxy__) except NameError: st_ = salt.state.State(__opts__) ret = st_.call_template_str(tem) _set_retcode(ret) return ret
[ "def", "template_str", "(", "tem", ",", "queue", "=", "False", ",", "**", "kwargs", ")", ":", "conflict", "=", "_check_queue", "(", "queue", ",", "kwargs", ")", "if", "(", "conflict", "is", "not", "None", ")", ":", "return", "conflict", "try", ":", "st_", "=", "salt", ".", "state", ".", "State", "(", "__opts__", ",", "proxy", "=", "__proxy__", ")", "except", "NameError", ":", "st_", "=", "salt", ".", "state", ".", "State", "(", "__opts__", ")", "ret", "=", "st_", ".", "call_template_str", "(", "tem", ")", "_set_retcode", "(", "ret", ")", "return", "ret" ]
execute the information stored in a string from an sls template cli example: .
train
false
46,345
def bad_fetch(nzo, url, msg='', content=False): if msg: msg = unicoder(msg) else: msg = '' nzo.status = Status.FAILED if url: nzo.filename = url nzo.final_name = url.strip() if content: msg = T('Unusable NZB file') else: msg = (T('URL Fetching failed; %s') % msg) nzo.fail_msg = msg notifier.send_notification((T('URL Fetching failed; %s') % ''), ('%s\n%s' % (msg, url)), 'other') if (cfg.email_endjob() > 0): emailer.badfetch_mail(msg, url) NzbQueue.do.remove(nzo.nzo_id, add_to_history=True)
[ "def", "bad_fetch", "(", "nzo", ",", "url", ",", "msg", "=", "''", ",", "content", "=", "False", ")", ":", "if", "msg", ":", "msg", "=", "unicoder", "(", "msg", ")", "else", ":", "msg", "=", "''", "nzo", ".", "status", "=", "Status", ".", "FAILED", "if", "url", ":", "nzo", ".", "filename", "=", "url", "nzo", ".", "final_name", "=", "url", ".", "strip", "(", ")", "if", "content", ":", "msg", "=", "T", "(", "'Unusable NZB file'", ")", "else", ":", "msg", "=", "(", "T", "(", "'URL Fetching failed; %s'", ")", "%", "msg", ")", "nzo", ".", "fail_msg", "=", "msg", "notifier", ".", "send_notification", "(", "(", "T", "(", "'URL Fetching failed; %s'", ")", "%", "''", ")", ",", "(", "'%s\\n%s'", "%", "(", "msg", ",", "url", ")", ")", ",", "'other'", ")", "if", "(", "cfg", ".", "email_endjob", "(", ")", ">", "0", ")", ":", "emailer", ".", "badfetch_mail", "(", "msg", ",", "url", ")", "NzbQueue", ".", "do", ".", "remove", "(", "nzo", ".", "nzo_id", ",", "add_to_history", "=", "True", ")" ]
create history entry for failed url fetch msg : message to be logged retry : make retry link in history content : report in history that cause is a bad nzb file .
train
false
46,349
def read_data_list(ofile): data = [next(ofile)] if (data[0].strip()[0] == '{'): raise ValueError('This looks like a sparse ARFF: not supported yet') data.extend([i for i in ofile]) return data
[ "def", "read_data_list", "(", "ofile", ")", ":", "data", "=", "[", "next", "(", "ofile", ")", "]", "if", "(", "data", "[", "0", "]", ".", "strip", "(", ")", "[", "0", "]", "==", "'{'", ")", ":", "raise", "ValueError", "(", "'This looks like a sparse ARFF: not supported yet'", ")", "data", ".", "extend", "(", "[", "i", "for", "i", "in", "ofile", "]", ")", "return", "data" ]
read each line of the iterable and put it in a list .
train
false
46,350
def random_crop_generator(generator, crop_size=(128, 128)): if (type(crop_size) not in (tuple, list)): crop_size = [crop_size, crop_size] elif (len(crop_size) == 2): crop_size = list(crop_size) else: raise ValueError('invalid crop_size') for (data, seg) in generator: lb_x = np.random.randint(0, (data.shape[2] - crop_size[0])) lb_y = np.random.randint(0, (data.shape[3] - crop_size[1])) data = data[:, :, lb_x:(lb_x + crop_size[0]), lb_y:(lb_y + crop_size[1])] seg = seg[:, :, lb_x:(lb_x + crop_size[0]), lb_y:(lb_y + crop_size[1])] (yield (data, seg))
[ "def", "random_crop_generator", "(", "generator", ",", "crop_size", "=", "(", "128", ",", "128", ")", ")", ":", "if", "(", "type", "(", "crop_size", ")", "not", "in", "(", "tuple", ",", "list", ")", ")", ":", "crop_size", "=", "[", "crop_size", ",", "crop_size", "]", "elif", "(", "len", "(", "crop_size", ")", "==", "2", ")", ":", "crop_size", "=", "list", "(", "crop_size", ")", "else", ":", "raise", "ValueError", "(", "'invalid crop_size'", ")", "for", "(", "data", ",", "seg", ")", "in", "generator", ":", "lb_x", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "(", "data", ".", "shape", "[", "2", "]", "-", "crop_size", "[", "0", "]", ")", ")", "lb_y", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "(", "data", ".", "shape", "[", "3", "]", "-", "crop_size", "[", "1", "]", ")", ")", "data", "=", "data", "[", ":", ",", ":", ",", "lb_x", ":", "(", "lb_x", "+", "crop_size", "[", "0", "]", ")", ",", "lb_y", ":", "(", "lb_y", "+", "crop_size", "[", "1", "]", ")", "]", "seg", "=", "seg", "[", ":", ",", ":", ",", "lb_x", ":", "(", "lb_x", "+", "crop_size", "[", "0", "]", ")", ",", "lb_y", ":", "(", "lb_y", "+", "crop_size", "[", "1", "]", ")", "]", "(", "yield", "(", "data", ",", "seg", ")", ")" ]
yields a random crop of size crop_size .
train
false
46,351
def cms_hash_token(token_id, mode='md5'): if (token_id is None): return None if (is_asn1_token(token_id) or is_pkiz(token_id)): hasher = hashlib.new(mode) if isinstance(token_id, six.text_type): token_id = token_id.encode('utf-8') hasher.update(token_id) return hasher.hexdigest() else: return token_id
[ "def", "cms_hash_token", "(", "token_id", ",", "mode", "=", "'md5'", ")", ":", "if", "(", "token_id", "is", "None", ")", ":", "return", "None", "if", "(", "is_asn1_token", "(", "token_id", ")", "or", "is_pkiz", "(", "token_id", ")", ")", ":", "hasher", "=", "hashlib", ".", "new", "(", "mode", ")", "if", "isinstance", "(", "token_id", ",", "six", ".", "text_type", ")", ":", "token_id", "=", "token_id", ".", "encode", "(", "'utf-8'", ")", "hasher", ".", "update", "(", "token_id", ")", "return", "hasher", ".", "hexdigest", "(", ")", "else", ":", "return", "token_id" ]
hash pki tokens .
train
false
46,352
def structure_to_mongo(structure, course_context=None): with TIMER.timer('structure_to_mongo', course_context) as tagger: tagger.measure('blocks', len(structure['blocks'])) check('BlockKey', structure['root']) check('dict(BlockKey: BlockData)', structure['blocks']) for block in structure['blocks'].itervalues(): if ('children' in block.fields): check('list(BlockKey)', block.fields['children']) new_structure = dict(structure) new_structure['blocks'] = [] for (block_key, block) in structure['blocks'].iteritems(): new_block = dict(block.to_storable()) new_block.setdefault('block_type', block_key.type) new_block['block_id'] = block_key.id new_structure['blocks'].append(new_block) return new_structure
[ "def", "structure_to_mongo", "(", "structure", ",", "course_context", "=", "None", ")", ":", "with", "TIMER", ".", "timer", "(", "'structure_to_mongo'", ",", "course_context", ")", "as", "tagger", ":", "tagger", ".", "measure", "(", "'blocks'", ",", "len", "(", "structure", "[", "'blocks'", "]", ")", ")", "check", "(", "'BlockKey'", ",", "structure", "[", "'root'", "]", ")", "check", "(", "'dict(BlockKey: BlockData)'", ",", "structure", "[", "'blocks'", "]", ")", "for", "block", "in", "structure", "[", "'blocks'", "]", ".", "itervalues", "(", ")", ":", "if", "(", "'children'", "in", "block", ".", "fields", ")", ":", "check", "(", "'list(BlockKey)'", ",", "block", ".", "fields", "[", "'children'", "]", ")", "new_structure", "=", "dict", "(", "structure", ")", "new_structure", "[", "'blocks'", "]", "=", "[", "]", "for", "(", "block_key", ",", "block", ")", "in", "structure", "[", "'blocks'", "]", ".", "iteritems", "(", ")", ":", "new_block", "=", "dict", "(", "block", ".", "to_storable", "(", ")", ")", "new_block", ".", "setdefault", "(", "'block_type'", ",", "block_key", ".", "type", ")", "new_block", "[", "'block_id'", "]", "=", "block_key", ".", "id", "new_structure", "[", "'blocks'", "]", ".", "append", "(", "new_block", ")", "return", "new_structure" ]
converts the blocks key from a map {blockkey: block_data} to a list [block_data] .
train
false
46,353
def gemset_delete(ruby, gemset, runas=None): return _rvm_do(ruby, ['rvm', '--force', 'gemset', 'delete', gemset], runas=runas)
[ "def", "gemset_delete", "(", "ruby", ",", "gemset", ",", "runas", "=", "None", ")", ":", "return", "_rvm_do", "(", "ruby", ",", "[", "'rvm'", ",", "'--force'", ",", "'gemset'", ",", "'delete'", ",", "gemset", "]", ",", "runas", "=", "runas", ")" ]
delete a gemset ruby the ruby version to which the gemset belongs gemset the gemset to delete runas the user under which to run rvm .
train
false
46,355
@contextlib.contextmanager def example_script(name, output='success'): tempdir = tempfile.mkdtemp(prefix='tmp-pexpect-test') try: script_path = os.path.join(tempdir, name) with open(script_path, 'w') as f: f.write(('#!/bin/sh\necho "%s"' % (output,))) try: os.chmod(script_path, 493) (yield tempdir) finally: os.remove(script_path) finally: os.rmdir(tempdir)
[ "@", "contextlib", ".", "contextmanager", "def", "example_script", "(", "name", ",", "output", "=", "'success'", ")", ":", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'tmp-pexpect-test'", ")", "try", ":", "script_path", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "name", ")", "with", "open", "(", "script_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "(", "'#!/bin/sh\\necho \"%s\"'", "%", "(", "output", ",", ")", ")", ")", "try", ":", "os", ".", "chmod", "(", "script_path", ",", "493", ")", "(", "yield", "tempdir", ")", "finally", ":", "os", ".", "remove", "(", "script_path", ")", "finally", ":", "os", ".", "rmdir", "(", "tempdir", ")" ]
helper to create a temporary shell script that tests can run .
train
false
46,356
def _appveyor_api_request(path): r = requests.get('{}/{}'.format(_appveyor_api, path), headers={'Content-Type': 'application/json'}) r.raise_for_status() return r.json()
[ "def", "_appveyor_api_request", "(", "path", ")", ":", "r", "=", "requests", ".", "get", "(", "'{}/{}'", ".", "format", "(", "_appveyor_api", ",", "path", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")" ]
make an appveyor api request .
train
false
46,358
def sequences_get_mask(sequences, pad_val=0): mask = np.ones_like(sequences) for (i, seq) in enumerate(sequences): for i_w in reversed(range(len(seq))): if (seq[i_w] == pad_val): mask[(i, i_w)] = 0 else: break return mask
[ "def", "sequences_get_mask", "(", "sequences", ",", "pad_val", "=", "0", ")", ":", "mask", "=", "np", ".", "ones_like", "(", "sequences", ")", "for", "(", "i", ",", "seq", ")", "in", "enumerate", "(", "sequences", ")", ":", "for", "i_w", "in", "reversed", "(", "range", "(", "len", "(", "seq", ")", ")", ")", ":", "if", "(", "seq", "[", "i_w", "]", "==", "pad_val", ")", ":", "mask", "[", "(", "i", ",", "i_w", ")", "]", "=", "0", "else", ":", "break", "return", "mask" ]
return mask for sequences .
train
true
46,359
def _wav2array(nchannels, sampwidth, data): (num_samples, remainder) = divmod(len(data), (sampwidth * nchannels)) if (remainder > 0): raise ValueError('The length of data is not a multiple of sampwidth * num_channels.') if (sampwidth > 4): raise ValueError('sampwidth must not be greater than 4.') if (sampwidth == 3): a = _np.empty((num_samples, nchannels, 4), dtype=_np.uint8) raw_bytes = _np.fromstring(data, dtype=_np.uint8) a[:, :, :sampwidth] = raw_bytes.reshape((-1), nchannels, sampwidth) a[:, :, sampwidth:] = ((a[:, :, (sampwidth - 1):sampwidth] >> 7) * 255) result = a.view('<i4').reshape(a.shape[:(-1)]) else: dt_char = ('u' if (sampwidth == 1) else 'i') a = _np.fromstring(data, dtype=('<%s%d' % (dt_char, sampwidth))) result = a.reshape((-1), nchannels) return result
[ "def", "_wav2array", "(", "nchannels", ",", "sampwidth", ",", "data", ")", ":", "(", "num_samples", ",", "remainder", ")", "=", "divmod", "(", "len", "(", "data", ")", ",", "(", "sampwidth", "*", "nchannels", ")", ")", "if", "(", "remainder", ">", "0", ")", ":", "raise", "ValueError", "(", "'The length of data is not a multiple of sampwidth * num_channels.'", ")", "if", "(", "sampwidth", ">", "4", ")", ":", "raise", "ValueError", "(", "'sampwidth must not be greater than 4.'", ")", "if", "(", "sampwidth", "==", "3", ")", ":", "a", "=", "_np", ".", "empty", "(", "(", "num_samples", ",", "nchannels", ",", "4", ")", ",", "dtype", "=", "_np", ".", "uint8", ")", "raw_bytes", "=", "_np", ".", "fromstring", "(", "data", ",", "dtype", "=", "_np", ".", "uint8", ")", "a", "[", ":", ",", ":", ",", ":", "sampwidth", "]", "=", "raw_bytes", ".", "reshape", "(", "(", "-", "1", ")", ",", "nchannels", ",", "sampwidth", ")", "a", "[", ":", ",", ":", ",", "sampwidth", ":", "]", "=", "(", "(", "a", "[", ":", ",", ":", ",", "(", "sampwidth", "-", "1", ")", ":", "sampwidth", "]", ">>", "7", ")", "*", "255", ")", "result", "=", "a", ".", "view", "(", "'<i4'", ")", ".", "reshape", "(", "a", ".", "shape", "[", ":", "(", "-", "1", ")", "]", ")", "else", ":", "dt_char", "=", "(", "'u'", "if", "(", "sampwidth", "==", "1", ")", "else", "'i'", ")", "a", "=", "_np", ".", "fromstring", "(", "data", ",", "dtype", "=", "(", "'<%s%d'", "%", "(", "dt_char", ",", "sampwidth", ")", ")", ")", "result", "=", "a", ".", "reshape", "(", "(", "-", "1", ")", ",", "nchannels", ")", "return", "result" ]
data must be the string containing the bytes from the wav file .
train
false
46,361
@pytest.mark.skipif('HAS_BEAUTIFUL_SOUP') def test_htmlinputter_no_bs4(): inputter = html.HTMLInputter() with pytest.raises(core.OptionalTableImportError): inputter.process_lines([])
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'HAS_BEAUTIFUL_SOUP'", ")", "def", "test_htmlinputter_no_bs4", "(", ")", ":", "inputter", "=", "html", ".", "HTMLInputter", "(", ")", "with", "pytest", ".", "raises", "(", "core", ".", "OptionalTableImportError", ")", ":", "inputter", ".", "process_lines", "(", "[", "]", ")" ]
this should return an optionaltableimporterror if beautifulsoup is not installed .
train
false
46,362
@newrelic.agent.function_trace() def get_content_sections(src=''): sections = [] if src: attr = '[id]' selector = ((attr + ',').join(SECTION_TAGS) + attr) try: document = pq(src) except etree.ParserError: pass else: for element in document.find(selector): sections.append({'title': element.text, 'id': element.attrib.get('id')}) return sections
[ "@", "newrelic", ".", "agent", ".", "function_trace", "(", ")", "def", "get_content_sections", "(", "src", "=", "''", ")", ":", "sections", "=", "[", "]", "if", "src", ":", "attr", "=", "'[id]'", "selector", "=", "(", "(", "attr", "+", "','", ")", ".", "join", "(", "SECTION_TAGS", ")", "+", "attr", ")", "try", ":", "document", "=", "pq", "(", "src", ")", "except", "etree", ".", "ParserError", ":", "pass", "else", ":", "for", "element", "in", "document", ".", "find", "(", "selector", ")", ":", "sections", ".", "append", "(", "{", "'title'", ":", "element", ".", "text", ",", "'id'", ":", "element", ".", "attrib", ".", "get", "(", "'id'", ")", "}", ")", "return", "sections" ]
gets sections in a document .
train
false
46,364
def redirect_stream(system, target): if (target is None): target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target.fileno() try: os.dup2(target_fd, system.fileno()) except OSError as err: raise DaemonError('Could not redirect {0} to {1}: {2}'.format(system, target, err))
[ "def", "redirect_stream", "(", "system", ",", "target", ")", ":", "if", "(", "target", "is", "None", ")", ":", "target_fd", "=", "os", ".", "open", "(", "os", ".", "devnull", ",", "os", ".", "O_RDWR", ")", "else", ":", "target_fd", "=", "target", ".", "fileno", "(", ")", "try", ":", "os", ".", "dup2", "(", "target_fd", ",", "system", ".", "fileno", "(", ")", ")", "except", "OSError", "as", "err", ":", "raise", "DaemonError", "(", "'Could not redirect {0} to {1}: {2}'", ".", "format", "(", "system", ",", "target", ",", "err", ")", ")" ]
redirect unix streams if none .
train
true
46,365
def get_output_filepaths(output_dir, fasta_fp): fasta_extensions = ['.fa', '.fasta', '.fna'] curr_fasta_out = basename(fasta_fp) for fasta_extension in fasta_extensions: curr_fasta_out = curr_fasta_out.replace(fasta_extension, '') curr_fasta_out += '_rev_primer_truncated.fna' output_fp = join(output_dir, curr_fasta_out) log_fp = join(output_dir, 'rev_primer_truncation.log') return (output_fp, log_fp)
[ "def", "get_output_filepaths", "(", "output_dir", ",", "fasta_fp", ")", ":", "fasta_extensions", "=", "[", "'.fa'", ",", "'.fasta'", ",", "'.fna'", "]", "curr_fasta_out", "=", "basename", "(", "fasta_fp", ")", "for", "fasta_extension", "in", "fasta_extensions", ":", "curr_fasta_out", "=", "curr_fasta_out", ".", "replace", "(", "fasta_extension", ",", "''", ")", "curr_fasta_out", "+=", "'_rev_primer_truncated.fna'", "output_fp", "=", "join", "(", "output_dir", ",", "curr_fasta_out", ")", "log_fp", "=", "join", "(", "output_dir", ",", "'rev_primer_truncation.log'", ")", "return", "(", "output_fp", ",", "log_fp", ")" ]
returns output fasta filepath and log filepath fasta_fp: fasta filepath output_dir: output directory .
train
false
46,366
def varnames(func, startindex=None): cache = getattr(func, '__dict__', {}) try: return cache['_varnames'] except KeyError: pass if inspect.isclass(func): try: func = func.__init__ except AttributeError: return () startindex = 1 else: if ((not inspect.isfunction(func)) and (not inspect.ismethod(func))): try: func = getattr(func, '__call__', func) except Exception: return () if (startindex is None): startindex = int(inspect.ismethod(func)) try: rawcode = func.__code__ except AttributeError: return () try: x = rawcode.co_varnames[startindex:rawcode.co_argcount] except AttributeError: x = () else: defaults = func.__defaults__ if defaults: x = x[:(- len(defaults))] try: cache['_varnames'] = x except TypeError: pass return x
[ "def", "varnames", "(", "func", ",", "startindex", "=", "None", ")", ":", "cache", "=", "getattr", "(", "func", ",", "'__dict__'", ",", "{", "}", ")", "try", ":", "return", "cache", "[", "'_varnames'", "]", "except", "KeyError", ":", "pass", "if", "inspect", ".", "isclass", "(", "func", ")", ":", "try", ":", "func", "=", "func", ".", "__init__", "except", "AttributeError", ":", "return", "(", ")", "startindex", "=", "1", "else", ":", "if", "(", "(", "not", "inspect", ".", "isfunction", "(", "func", ")", ")", "and", "(", "not", "inspect", ".", "ismethod", "(", "func", ")", ")", ")", ":", "try", ":", "func", "=", "getattr", "(", "func", ",", "'__call__'", ",", "func", ")", "except", "Exception", ":", "return", "(", ")", "if", "(", "startindex", "is", "None", ")", ":", "startindex", "=", "int", "(", "inspect", ".", "ismethod", "(", "func", ")", ")", "try", ":", "rawcode", "=", "func", ".", "__code__", "except", "AttributeError", ":", "return", "(", ")", "try", ":", "x", "=", "rawcode", ".", "co_varnames", "[", "startindex", ":", "rawcode", ".", "co_argcount", "]", "except", "AttributeError", ":", "x", "=", "(", ")", "else", ":", "defaults", "=", "func", ".", "__defaults__", "if", "defaults", ":", "x", "=", "x", "[", ":", "(", "-", "len", "(", "defaults", ")", ")", "]", "try", ":", "cache", "[", "'_varnames'", "]", "=", "x", "except", "TypeError", ":", "pass", "return", "x" ]
return argument name tuple for a function .
train
false
46,367
def is_cffi_func(obj): try: return (ffi.typeof(obj).kind == 'function') except TypeError: try: return (obj in _ool_func_types) except: return False
[ "def", "is_cffi_func", "(", "obj", ")", ":", "try", ":", "return", "(", "ffi", ".", "typeof", "(", "obj", ")", ".", "kind", "==", "'function'", ")", "except", "TypeError", ":", "try", ":", "return", "(", "obj", "in", "_ool_func_types", ")", "except", ":", "return", "False" ]
check whether the obj is a cffi function .
train
false
46,368
def get_pack_from_index(pack): if (not pack): raise ValueError('Pack name must be specified.') (index, _) = fetch_pack_index() return index.get(pack)
[ "def", "get_pack_from_index", "(", "pack", ")", ":", "if", "(", "not", "pack", ")", ":", "raise", "ValueError", "(", "'Pack name must be specified.'", ")", "(", "index", ",", "_", ")", "=", "fetch_pack_index", "(", ")", "return", "index", ".", "get", "(", "pack", ")" ]
search index by pack name .
train
false
46,369
def in_a_while(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0, format=TIME_FORMAT): if (format is None): format = TIME_FORMAT return time_in_a_while(days, seconds, microseconds, milliseconds, minutes, hours, weeks).strftime(format)
[ "def", "in_a_while", "(", "days", "=", "0", ",", "seconds", "=", "0", ",", "microseconds", "=", "0", ",", "milliseconds", "=", "0", ",", "minutes", "=", "0", ",", "hours", "=", "0", ",", "weeks", "=", "0", ",", "format", "=", "TIME_FORMAT", ")", ":", "if", "(", "format", "is", "None", ")", ":", "format", "=", "TIME_FORMAT", "return", "time_in_a_while", "(", "days", ",", "seconds", ",", "microseconds", ",", "milliseconds", ",", "minutes", ",", "hours", ",", "weeks", ")", ".", "strftime", "(", "format", ")" ]
format of timedelta: timedelta .
train
true
46,371
def bswap(value): (hi, lo) = struct.unpack('>II', struct.pack('<Q', value)) return ((hi << 32) | lo)
[ "def", "bswap", "(", "value", ")", ":", "(", "hi", ",", "lo", ")", "=", "struct", ".", "unpack", "(", "'>II'", ",", "struct", ".", "pack", "(", "'<Q'", ",", "value", ")", ")", "return", "(", "(", "hi", "<<", "32", ")", "|", "lo", ")" ]
a byte-swap instruction in python .
train
false
46,372
def chisquare_effectsize(probs0, probs1, correction=None, cohen=True, axis=0): probs0 = np.asarray(probs0, float) probs1 = np.asarray(probs1, float) probs0 = (probs0 / probs0.sum(axis)) probs1 = (probs1 / probs1.sum(axis)) d2 = (((probs1 - probs0) ** 2) / probs0).sum(axis) if (correction is not None): (nobs, df) = correction diff = ((probs1 - probs0) / probs0).sum(axis) d2 = np.maximum(((((d2 * nobs) - diff) - df) / (nobs - 1.0)), 0) if cohen: return np.sqrt(d2) else: return d2
[ "def", "chisquare_effectsize", "(", "probs0", ",", "probs1", ",", "correction", "=", "None", ",", "cohen", "=", "True", ",", "axis", "=", "0", ")", ":", "probs0", "=", "np", ".", "asarray", "(", "probs0", ",", "float", ")", "probs1", "=", "np", ".", "asarray", "(", "probs1", ",", "float", ")", "probs0", "=", "(", "probs0", "/", "probs0", ".", "sum", "(", "axis", ")", ")", "probs1", "=", "(", "probs1", "/", "probs1", ".", "sum", "(", "axis", ")", ")", "d2", "=", "(", "(", "(", "probs1", "-", "probs0", ")", "**", "2", ")", "/", "probs0", ")", ".", "sum", "(", "axis", ")", "if", "(", "correction", "is", "not", "None", ")", ":", "(", "nobs", ",", "df", ")", "=", "correction", "diff", "=", "(", "(", "probs1", "-", "probs0", ")", "/", "probs0", ")", ".", "sum", "(", "axis", ")", "d2", "=", "np", ".", "maximum", "(", "(", "(", "(", "(", "d2", "*", "nobs", ")", "-", "diff", ")", "-", "df", ")", "/", "(", "nobs", "-", "1.0", ")", ")", ",", "0", ")", "if", "cohen", ":", "return", "np", ".", "sqrt", "(", "d2", ")", "else", ":", "return", "d2" ]
effect size for a chisquare goodness-of-fit test parameters probs0 : array_like probabilities or cell frequencies under the null hypothesis probs1 : array_like probabilities or cell frequencies under the alternative hypothesis probs0 and probs1 need to have the same length in the axis dimension .
train
false
46,375
def try_send_telemetry(request=None, max_age_hours=24, raise_on_error=False): force_send = bool(((not get_last_submission_time()) or (not settings.DEBUG))) try: return _send_telemetry(request=request, max_age_hours=max_age_hours, force_send=force_send) except TelemetryNotSent: if raise_on_error: raise return False
[ "def", "try_send_telemetry", "(", "request", "=", "None", ",", "max_age_hours", "=", "24", ",", "raise_on_error", "=", "False", ")", ":", "force_send", "=", "bool", "(", "(", "(", "not", "get_last_submission_time", "(", ")", ")", "or", "(", "not", "settings", ".", "DEBUG", ")", ")", ")", "try", ":", "return", "_send_telemetry", "(", "request", "=", "request", ",", "max_age_hours", "=", "max_age_hours", ",", "force_send", "=", "force_send", ")", "except", "TelemetryNotSent", ":", "if", "raise_on_error", ":", "raise", "return", "False" ]
send telemetry information .
train
false
46,376
def water_filling(n, a, sum_x=1): x = cvx.Variable(n) alpha = cvx.Parameter(n, sign='positive') alpha.value = a obj = cvx.Maximize(cvx.sum_entries(cvx.log((alpha + x)))) constraints = [(x >= 0), ((cvx.sum_entries(x) - sum_x) == 0)] prob = cvx.Problem(obj, constraints) prob.solve() if (prob.status == 'optimal'): return (prob.status, prob.value, x.value) else: return (prob.status, np.nan, np.nan)
[ "def", "water_filling", "(", "n", ",", "a", ",", "sum_x", "=", "1", ")", ":", "x", "=", "cvx", ".", "Variable", "(", "n", ")", "alpha", "=", "cvx", ".", "Parameter", "(", "n", ",", "sign", "=", "'positive'", ")", "alpha", ".", "value", "=", "a", "obj", "=", "cvx", ".", "Maximize", "(", "cvx", ".", "sum_entries", "(", "cvx", ".", "log", "(", "(", "alpha", "+", "x", ")", ")", ")", ")", "constraints", "=", "[", "(", "x", ">=", "0", ")", ",", "(", "(", "cvx", ".", "sum_entries", "(", "x", ")", "-", "sum_x", ")", "==", "0", ")", "]", "prob", "=", "cvx", ".", "Problem", "(", "obj", ",", "constraints", ")", "prob", ".", "solve", "(", ")", "if", "(", "prob", ".", "status", "==", "'optimal'", ")", ":", "return", "(", "prob", ".", "status", ",", "prob", ".", "value", ",", "x", ".", "value", ")", "else", ":", "return", "(", "prob", ".", "status", ",", "np", ".", "nan", ",", "np", ".", "nan", ")" ]
boyd and vandenberghe .
train
false
46,377
def shows_by_name(normalized_name, session=None): return session.query(Series).filter(Series._name_normalized.contains(normalized_name)).order_by(func.char_length(Series.name)).all()
[ "def", "shows_by_name", "(", "normalized_name", ",", "session", "=", "None", ")", ":", "return", "session", ".", "query", "(", "Series", ")", ".", "filter", "(", "Series", ".", "_name_normalized", ".", "contains", "(", "normalized_name", ")", ")", ".", "order_by", "(", "func", ".", "char_length", "(", "Series", ".", "name", ")", ")", ".", "all", "(", ")" ]
returns all series matching normalized_name .
train
false
46,378
def maximal_matching(G): matching = set() nodes = set() for (u, v) in G.edges(): if ((u not in nodes) and (v not in nodes) and (u != v)): matching.add((u, v)) nodes.add(u) nodes.add(v) return matching
[ "def", "maximal_matching", "(", "G", ")", ":", "matching", "=", "set", "(", ")", "nodes", "=", "set", "(", ")", "for", "(", "u", ",", "v", ")", "in", "G", ".", "edges", "(", ")", ":", "if", "(", "(", "u", "not", "in", "nodes", ")", "and", "(", "v", "not", "in", "nodes", ")", "and", "(", "u", "!=", "v", ")", ")", ":", "matching", ".", "add", "(", "(", "u", ",", "v", ")", ")", "nodes", ".", "add", "(", "u", ")", "nodes", ".", "add", "(", "v", ")", "return", "matching" ]
find a maximal matching in the graph .
train
false
46,379
def cmpToKey(mycmp): class K(object, ): def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return (mycmp(self.obj, other.obj) < 0) def __gt__(self, other): return (mycmp(self.obj, other.obj) > 0) def __eq__(self, other): return (mycmp(self.obj, other.obj) == 0) def __le__(self, other): return (mycmp(self.obj, other.obj) <= 0) def __ge__(self, other): return (mycmp(self.obj, other.obj) >= 0) def __ne__(self, other): return (mycmp(self.obj, other.obj) != 0) return K
[ "def", "cmpToKey", "(", "mycmp", ")", ":", "class", "K", "(", "object", ",", ")", ":", "def", "__init__", "(", "self", ",", "obj", ",", "*", "args", ")", ":", "self", ".", "obj", "=", "obj", "def", "__lt__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", "<", "0", ")", "def", "__gt__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", ">", "0", ")", "def", "__eq__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", "==", "0", ")", "def", "__le__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", "<=", "0", ")", "def", "__ge__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", ">=", "0", ")", "def", "__ne__", "(", "self", ",", "other", ")", ":", "return", "(", "mycmp", "(", "self", ".", "obj", ",", "other", ".", "obj", ")", "!=", "0", ")", "return", "K" ]
convert a cmp= function into a key= function .
train
false
46,380
@memoize def get_brew_path_prefix(): try: return subprocess.check_output(['brew', '--prefix'], universal_newlines=True).strip() except: return None
[ "@", "memoize", "def", "get_brew_path_prefix", "(", ")", ":", "try", ":", "return", "subprocess", ".", "check_output", "(", "[", "'brew'", ",", "'--prefix'", "]", ",", "universal_newlines", "=", "True", ")", ".", "strip", "(", ")", "except", ":", "return", "None" ]
to get brew path .
train
false