query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Return node's language tag. Look iteratively in self and parents for a class argument starting with ``language`` and return the remainder of it (which should be a `BCP49` language tag) or the `fallback`. | def get_language_code(self, fallback=''):
for cls in self.get('classes', []):
if cls.startswith('language-'):
return cls[9:]
try:
return self.parent.get_language(fallback)
except AttributeError:
return fallback | [
"def language(element: ET.Element) -> Optional[str]:\n classes = element.get('class', '').split()\n # Return the first one that matches.\n for css_class in classes:\n match = re.match(r'(lang|language)-(.*)$', css_class)\n if match is not None:\n prefix, language = match.groups()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`. | def update_basic_atts(self, dict_):
if isinstance(dict_, Node):
dict_ = dict_.attributes
for att in self.basic_attributes:
self.append_attr_list(att, dict_.get(att, [])) | [
"def update_all_atts(self, dict_, update_fun = copy_attr_consistent,\r\n replace = True, and_source = False):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n\r\n # Include the source attribute when copying?\r\n if and_source:\r\n fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each element in values, if it does not exist in self[attr], append it. | def append_attr_list(self, attr, values):
# List Concatenation
for value in values:
if not value in self[attr]:
self[attr].append(value) | [
"def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First, convert both self[attr] and value to a nonstring sequence type; if either is not already a sequence, convert it to a list of one element. Then call append_attr_list. | def coerce_append_attr_list(self, attr, value):
# List Concatenation
if not isinstance(self.get(attr), list):
self[attr] = [self[attr]]
if not isinstance(value, list):
value = [value]
self.append_attr_list(attr, value) | [
"def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If self[attr] does not exist or force is True or omitted, set self[attr] to value, otherwise do nothing. | def replace_attr(self, attr, value, force = True):
# One or the other
if force or self.get(attr) is None:
self[attr] = value | [
"def set(self, attr, val):\n if not hasattr(self, attr):\n logger.error('model: set: The attribute \"{0}\" is undefined'.format(attr))\n sys.exit(1)\n setattr(self, attr, val)",
"def set_attribute(self,att,val):\r\n self.attributes[att] = val",
"def set(self, attr, val, strict=True):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If attr is an attribute of self and both self[attr] and value are lists, concatenate the two sequences, setting the result to self[attr]. If either self[attr] or value are nonsequences and replace is True or self[attr] is None, replace self[attr] with value. Otherwise, do nothing. | def copy_attr_concatenate(self, attr, value, replace):
if self.get(attr) is not value:
if isinstance(self.get(attr), list) and \
isinstance(value, list):
self.append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace) | [
"def replace_attr(self, attr, value, force = True):\r\n # One or the other\r\n if force or self.get(attr) is None:\r\n self[attr] = value",
"def append_attr_list(self, attr, values):\r\n # List Concatenation\r\n for value in values:\r\n if not value in self[attr]:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates all attributes from node or dictionary `dict_`. Appends the basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') and then, for all other attributes in dict_, updates the same attribute in self. When attributes with the same identifier appear in both self and dict_, the two values are merge... | def update_all_atts(self, dict_, update_fun = copy_attr_consistent,
replace = True, and_source = False):
if isinstance(dict_, Node):
dict_ = dict_.attributes
# Include the source attribute when copying?
if and_source:
filter_fun = self.is_n... | [
"def update_basic_atts(self, dict_):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n for att in self.basic_attributes:\r\n self.append_attr_list(att, dict_.get(att, []))",
"def update(self, dict):\n self.attr.update(dict)\n return self",
"def co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace one child `Node` with another child or children. | def replace(self, old, new):
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new | [
"def _replace_child(self, node, old, new):\n if node is None:\n self.tree = new\n elif node._left == old:\n node._left = new\n node._left._parent = node\n elif node._right == old:\n node._right = new\n node._left._parent = node\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the index of the first child whose class exactly matches. | def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
... | [
"def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0",
"def _get_class_index(prediction: np.ndarray, order_number_minus_one: int) -> int:\n return np.where(\n prediction\n == np.partition(prediction.flatten(), -2)[-order_number_minus_one... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the index of the first child whose class does not match. | def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isi... | [
"def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0",
"def getMinChildIndex(self, index):\n\t\tleftChild = self.data[self.leftChildIndexOf(index)]\n\t\t\n\t\tif self.rightChildIndexOf(index) < self.numElements:\n\t\t\trightChild = self.data[self.rightCh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Note that this Element has been referenced by its name `name` or id `id`. | def note_referenced_by(self, name=None, id=None):
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target pro... | [
"def element_name(self, element_name: str):\n\n self._element_name = element_name",
"def element_name(self) -> str:\n return self._element_name",
"def add_name(self):\n self.curr_iden = self.curr_word\n self.curr_obj.insert_attr_name(self.curr_word)",
"def _reference(self):\n\t\tpa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if and only if the given attribute is NOT one of the basic list attributes defined for all Elements. | def is_not_list_attribute(cls, attr):
return attr not in cls.list_attributes | [
"def is_not_known_attribute(cls, attr):\r\n return attr not in cls.known_attributes",
"def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False",
"def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if and only if the given attribute is NOT recognized by this class. | def is_not_known_attribute(cls, attr):
return attr not in cls.known_attributes | [
"def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a DOM representation of this document. | def asdom(self, dom=None):
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot | [
"def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')",
"def getImplementation(self):\n return DOMImplementation()",
"def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElemen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call self."``visit_`` + node class name" with `node` as parameter. If the ``visit_...`` method does not exist, call self.unknown_visit. | def dispatch_visit(self, node):
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
r... | [
"def Visit(self, node):\n mapping = self._mapping\n\n # Build a visitor that performs the old_class -> new_class mapping:\n class Visitor(visitors.Visitor):\n visits_all_node_types = True\n name_to_class = mapping\n for name, new_cls in mapping.iteritems():\n\n def Visit(self, node):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call self."``depart_`` + node class name" with `node` as parameter. If the ``depart_...`` method does not exist, call self.unknown_departure. | def dispatch_departure(self, node):
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name... | [
"def connect_directive_node(self, name, f_visit, f_depart):\r\n self.builder._function_node.append((name, f_visit, f_depart))",
"def _depart(self, data, sock, forward=True):\n if forward:\n self.send_replicas_forward()\n time.sleep(1)\n self.send_data_forward()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called when entering unknown `Node` types. Raise an exception unless overridden. | def unknown_visit(self, node):
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__)) | [
"def currentNodeHasUnknownType(*args, **kwargs):\n \n pass",
"def unknownNode(plugin=bool, realClassName=bool, realClassTag=bool):\n pass",
"def generic_visit(self, node):\n raise NotImplementedError('Unsupported AST node %s' % node)",
"def register_for_new_hierarchy_nodes(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quote attributes for pseudoxml | def pseudo_quoteattr(value):
return '"%s"' % value | [
"def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]",
"def quoteattr(data, entities={}):\r\n data = escape(data, entities)\r\n if '\"' in data:\r\n if \"'\" in data:\r\n data = '\"%s\"' % data.replace('\"', \""\")\r\n else:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get CSV data from the directive content, from an external file, or from a URL reference. | def get_csv_data(self):
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in se... | [
"def get_csv_data(self):\r\n encoding = self.options.get(\r\n 'encoding', self.state.document.settings.input_encoding)\r\n error_handler = self.state.document.settings.input_encoding_error_handler\r\n if self.content:\r\n # CSV data is from directive content.\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Locate and return a role function from its languagedependent name, along with a list of system messages. If the role is not found in the current | def role(role_name, language_module, lineno, reporter):
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[nor... | [
"def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register an interpreted text role by its canonical name. | def register_canonical_role(name, role_fn):
set_implicit_options(role_fn)
_role_registry[name] = role_fn | [
"def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '",
"def register_token(cls, a_name, a_re, a_type):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register an interpreted text role by its local or languagedependent name. | def register_local_role(name, role_fn):
set_implicit_options(role_fn)
_roles[name] = role_fn | [
"def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '",
"def addtemplate(self, name, text):\n\t\tself.context[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add customization options to role functions, unless explicitly set or disabled. | def set_implicit_options(role_fn):
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option | [
"def experimental_options(self):\n ...",
"def add_experimental_option(self, name, value):\n ...",
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For roles which simply wrap a given `node_class` around the text. | def register_generic_role(canonical_name, node_class):
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role) | [
"def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname",
"def node_roles(node):\n return \"_\".join(sorted(node[\"roles\"]))",
"def add_child_classes(node):\n for para in node.trav... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to set options['classes'] and delete options['class']. | def set_classes(options):
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class'] | [
"def reset_class(self, classes):\n self._clear_cached_op()\n self.classes = classes\n self.num_class = len(classes)",
"def remove_class(class_id):\r\n return 200",
"def __init__ (self, options=[]):\r\n for opt in options:\r\n setattr(self, opt, None)",
"def remove_cla... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse `input_lines` and modify the `document` node in place. | def run(self, input_lines, document, input_offset=0, match_titles=True,
inliner=None):
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init... | [
"def process(self, lines):\n for line in lines:\n self._process_line(line)",
"def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)",
"def updateLineParsing(self):\n self.titleLine = self.parseLine(self.getTitleLine())\n self.outputLines = [self.parseLi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to input line `abs_line_offset`, ignoring jumps past the end. | def goto_line(self, abs_line_offset):
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass | [
"def jump_to_line(self, lineno=None):\r\n if lineno is not None:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(lineno)\r\n return\r\n\r\n maximum = self.blockCount()\r\n line = QInputDialog.getInt(self, self.tr(\"Jump to Line\"),\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new StateMachine rooted at `node` and run it over the input `block`. | def nested_parse(self, block, input_offset, node, match_titles=False,
state_machine_class=None, state_machine_kwargs=None):
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kw... | [
"async def mine_new_block():\n block = await self.create_block_async_func(Address.create_empty_account())\n if not block:\n self.input_q.put((None, {}))\n return\n mining_params = self.get_mining_param_func()\n mining_params[\"consensus_type\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build, compile and return a regular expression based on `definition`. | def build_regexp(definition, compile=True):
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings... | [
"def compile(self):\n return re.compile(self.pattern, self.flags)",
"def get_compiled(self, name: str) -> re.compile:\n rx = re.compile(self.regexp)\n if self.flag_multiline:\n rx.flags ^= re.MULTILINE\n if self.flag_dotall:\n rx.flags ^= re.DOTALL\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Settingbased customizations; run when parsing begins. | def init_customizations(self, settings):
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
... | [
"def setup(self):\n # Call the base class setup first so that all of the variables are fully initialized and formatted.\n super().setup()\n\n # Write out the custom config\n self.writeCustomConfig()",
"def __call__(self, iperf):\n self.validate()\n for key, value in self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if inline markup startstring is 'quoted'. 'Quoted' in this context means the startstring is enclosed in a pair of matching opening/closing delimiters (not necessarily quotes) or at the end of the match. | def quoted_start(self, match):
string = match.string
start = match.start()
if start == 0: # start-string at beginning of text
return False
prestart = string[start - 1]
try:
poststart = string[match.end()]
except IndexError:... | [
"def quotedstart(self, match):\n string = match.string\n start = match.start()\n end = match.end()\n if start == 0: # start-string at beginning of text\n return 0\n prestart = string[start - 1]\n try:\n poststart = string[end]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check each of the patterns in `self.implicit_dispatch` for a match, and dispatch to the stored method for the pattern. Recursively check the text before and after the match. Return a list of `nodes.Text` and inline element nodes. | def implicit_inline(self, text, lineno):
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
... | [
"def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check attribution shape. Return the index past the end of the attribution, and the indent. | def check_attribution(self, indented, attribution_start):
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
in... | [
"def indirection_level(self):\n return self.ty.count(\"*\") + self.ty.count(\"[\")",
"def getMarkPosition(self, i: int) -> int:\n ...",
"def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)((... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct and return the next enumerated list item marker, and an autoenumerator ("" instead of the regular enumerator). Return ``None`` for invalid (out of range) ordinals. | def make_enumerator(self, ordinal, sequence, format): #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
... | [
"def get_next_item(self):\n return # osid.assessment.Item",
"def test_incorrect_start(start):\n with raises(TypeError):\n next(ienumerate([21], start))",
"def get_next_id(self):\n try:\n next_item = next(self)\n except StopIteration:\n raise IllegalState('no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract & return field name from a field marker match. | def parse_field_marker(self, match):
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field | [
"def parse_field_marker(self, match):\n field = match.string[1:] # strip off leading ':'\n field = field[:field.find(':')] # strip off trailing ':' etc.\n tokens = field.split()\n return tokens[0], tokens[1:] # first == name, others == args",
"def _get_field_name(cls, rule_co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of `node.option` and `node.option_argument` objects, parsed from an option marker match. | def parse_option_marker(self, match):
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=', 1)
if len(firstopt) > 1:
... | [
"def argv(self):\n optlist = []\n for n in range(self.count):\n optlist.append(self.flag)\n if self.values is not None:\n optlist.append(self.values[n])\n return optlist",
"def ParseOptions():\n parser = optparse.OptionParser()\n parser.add_option('--ver... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return one line element of a line_block. | def line_block_line(self, match, lineno):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
text = u'\n'.join(indented)
text_nodes, messages = self.in... | [
"def getLine(self) -> \"SbLine const &\":\n return _coin.SbLineProjector_getLine(self)",
"def getLine(self, line_id: int) -> Line:\n return self.pool[line_id]",
"def get_line(self, line_name):\n for line in self.line_list:\n if line.name == line_name:\n return line... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a directive then run its directive function. | def run_directive(self, directive, match, type_name, option_presets):
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_numbe... | [
"def _parse_directive(directive_ast: dict) -> \"DirectiveNode\":\n return DirectiveNode(\n name=_parse_name(directive_ast[\"name\"]),\n arguments=_parse_arguments(directive_ast[\"arguments\"]),\n location=_parse_location(directive_ast[\"loc\"]),\n )",
"async def on_directive(self, direc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse `datalines` for a field list containing extension options matching `option_spec`. | def parse_extension_options(self, option_spec, datalines):
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=True)
if newline_offset != len(datalines): # incomplete ... | [
"def parse_extension_attributes(self, attribute_spec, datalines, blankfinish):\n node = nodes.field_list()\n newlineoffset, blankfinish = self.nestedlistparse(\n datalines, 0, node, initialstate='FieldList',\n blankfinish=blankfinish)\n if newlineoffset != len(dataline... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine which explicit construct this is, parse & return it. | def explicit_construct(self, match):
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error:
... | [
"def explicit_construct(self, match):\n errors = []\n for method, pattern in self.explicit.constructs:\n expmatch = pattern.match(match.string)\n if expmatch:\n try:\n return method(self, expmatch)\n except MarkupError, detail: # n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RFC2822style field list item. | def rfc2822(self, match, context, next_state):
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, bla... | [
"def __getitem__(self, i: 'int const') -> \"SoField *\":\n return _coin.SoFieldList___getitem__(self, i)",
"def parseField(f):\n k = f.id\n if f.has_value('alternate_name'):\n k = f.get_value('alternate_name') or f.id\n v = getattr(request, k, MARKER)\n if hasattr(v, 'edit'):\n # This is an encap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Analyze the text `block` and return a table data structure. Given a plaintextgraphic table in `block` (list of lines of text; no whitespace padding), parse the table, construct and return the data necessary to construct a CALS table or equivalent. Raise `TableMarkupError` if there is any problem with the markup. | def parse(self, block):
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure | [
"def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Look for a head/body row separator line; store the line index. | def find_head_body_sep(self):
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
... | [
"def parse_special_header(self, linenum, info):\n if linenum + 1 < len(self.lines) and \\\n self.lines[linenum].startswith(\"Index: \") and \\\n self.lines[linenum + 1] == self.INDEX_SEP:\n # This is an Index: header, which is common in CVS and Subversion,\n # amongs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start with a queue of upperleft corners, containing the upperleft corner of the table itself. Trace out one rectangular cell, remember it, and add its upperright and lowerleft corners to the queue of potential upperleft corners of further cells. Process the queue in toptobottom order, keeping track of how much of each ... | def parse_table(self):
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result... | [
"def iter_cells_greater_than(self, row_zb: int, col_zb: int) \\\n -> Generator[Tuple[int, int], None, None]:\n # Cell above?\n if (row_zb > 0 and\n self.inequalities_down[row_zb - 1][col_zb] == BOTTOM_LT_TOP):\n other = row_zb - 1, col_zb\n yield other\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First determine the column boundaries from the top border, then process rows. Each row may consist of multiple lines; accumulate lines until a row is complete. Call `self.parse_row` to finish the job. | def parse_table(self):
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
... | [
"def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the text `lines` of a row, parse it and append to `self.table`. The row is parsed according to the current column spec (either `spanline` if provided or `self.columns`). For each column, extract text from each line, and check for text in column margins. Finally, adjust for insignificant whitespace. | def parse_row(self, lines, start, spanline=None):
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
... | [
"def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for text in column margins and text overflow in the last column. Raise TableMarkupError if anything but whitespace is in column margins. Adjust the end value for the last column if there is text overflow. | def check_columns(self, lines, first_line, columns):
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
... | [
"def text_error_cols(text): \n po = ParseOptions(min_null_count=0, max_null_count=999)\n en_dir = Dictionary() # open the dictionary only once\n sent = Sentence(text, en_dir, po)\n linkages = sent.parse()\n if sent.null_count() == 0 :\n return []\n else:\n error_cols=[]\n iws=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extend the list values of `master` with those from `newdata`. Both parameters must be dictionaries containing list values. | def update_dict_of_lists(master, newdata):
for key, values in newdata.items():
master.setdefault(key, []).extend(values) | [
"def update_dictargs( list_of_dicts, master_dict, issuer = 'alberta_treasury' ):\n key, default_dict = create_default_dictargs( issuer = issuer )\n if master_dict.get( key, None ) is None:\n master_dict[ key ] = list()\n for append_dict in list_of_dicts:\n d = dict( default_dict.items() + app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the state machine on `input_lines`. Return results (a list). Reset `self.line_offset` and `self.current_state`. Run the beginningoffile transition. Input one line at a time and check for a matching transition. If a match is found, call the transition method and possibly change the state. Store the context returned ... | def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_sour... | [
"def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return current state object; set it first if `next_state` given. | def get_state(self, next_state=None):
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.... | [
"def go_to_state(self, next_state):\n for t in self.transitions:\n if t.next_state == None:\n t.next_state = next_state\n return self.root",
"def next_state(self):\r\n s = max(self.states)\r\n self.states.remove(s)\r\n return s[1]",
"def get_next_stat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load `self.line` with the `n`'th next line and return it. | def next_line(self, n=1):
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
... | [
"def read_line(file_path, n):\n return linecache.getline(file_path, n)",
"def NthLineOfFile( fname, n = 0 ):\n with open( fname ) as f:\n while n > 0:\n f.readline()\n n -= 1\n return f.readline().strip()",
"def _next_line(self):\n self.current_line += 1\n return next... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return 1 if the next line is blank or nonexistant. | def is_next_line_blank(self):
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1 | [
"def non_blank_lines(thing):\n \n count = 0\n for line in thing:\n if line.strip():\n count += 1\n return count",
"def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line",
"def _not_empty_line(line):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load `self.line` with the `n`'th previous line and return it. | def previous_line(self, n=1):
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line | [
"def previous(self, n = 1):\n return NonStandardInteger(self.non_st_part, self.st_part - n, self.non_st_ring)",
"def prev_line(rule):\n return shift_line(-1, rule)",
"def read_line(file_path, n):\n return linecache.getline(file_path, n)",
"def NthLineOfFile( fname, n = 0 ):\n with open( fname ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to absolute line offset `line_offset`, load and return it. | def goto_line(self, line_offset):
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self... | [
"def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return source of line at absolute line offset `line_offset`. | def get_source(self, line_offset):
return self.input_lines.source(line_offset - self.input_offset) | [
"def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line",
"def raise_source_exception(\n source: str,\n rel_path: Path,\n source_lineno: int,\n file_lineno: int,\n source_offse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return (source, line) tuple for current or given line number. Looks up the source and line number in the `self.input_lines` StringList instance to count for included source files. If the optional argument `lineno` is given, convert it from an absolute line number to the corresponding (source, line) pair. | def get_source_and_line(self, lineno=None):
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
... | [
"def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1",
"def _resolve_lineno(self, lineno):\n if lineno is None:\n return self.line_numb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Examine one line of input for a transition match & execute its method. | def check_line(self, context, state, transitions=None):
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
... | [
"def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize & add a `state_class` (`State` subclass) object. | def add_state(self, state_class):
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug) | [
"def setup_class(cls):\n cls.state = State()\n cls.state.name = \"Oregon\"",
"def set_classy_state(self, state: Dict[str, Any]) -> None:\n raise NotImplementedError",
"def fsm_factory(name, states):\n className = name.capitalize() + \"State\"\n attribs = dict(\n __mapper_args__... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add `state_classes` (a list of `State` subclasses). | def add_states(self, state_classes):
for state_class in state_classes:
self.add_state(state_class) | [
"def add_css_classes(self, *css_classes):\n for cls in css_classes:\n self._css_classes.add(cls)",
"def add_class(self, class_):\n self.classes.append(class_)",
"def addState(self, state):\n id = len(self.states)\n self.states.append(state)\n return id",
"def _reg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make and add transitions listed in `self.initial_transitions`. | def add_initial_transitions(self):
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions) | [
"def getAutomaticTransitions():",
"def set_transitions(self, cell_transition, orientation, new_transitions):\n raise NotImplementedError()",
"def setup_transition_list(self):\n \n # Create an empty transition list\n xn_list = []\n \n # Append four transitions to the lis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a transition by `name`. | def remove_transition(self, name):
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name) | [
"def remove(self, name):\n if self.states[name]:\n del self.states[name]",
"def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_removeTransition(self, transition)",
"def removeScene(self, name: str) -> None:\r\n\r\n for scene in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of transition names and a transition mapping. | def make_transitions(self, name_list):
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
... | [
"def create_transition_dict(self):\n out = {}\n for state in self.states:\n to_states, probas = self.transition_from(state)\n out[state] = {s: p for s, p in zip(to_states, probas)}\n return out",
"def getAutomaticTransitions():",
"def transitions(self) -> list:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A "do nothing" transition method. Return unchanged `context` & `next_state`, empty result. Useful for simple state changes (actionless transitions). | def nop(self, match, context, next_state):
return context, next_state, [] | [
"def noop_context():\n yield",
"def noop(value, state = None):\n return value, None",
"def strip_state(e: Expression) -> None:\n if hasattr(e, \"state\"):\n e.state = None\n for c in e.children():\n strip_state(c)",
"def reset(self):\n self.env.reset()\n\n repeat_noop_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize a `StateSM` object; extends `State.__init__()`. Check for indent state machine attributes, set defaults if not set. | def __init__(self, state_machine, debug=False):
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is N... | [
"def __init__(self, token, state, extra):\n self.state = state\n self.token = token\n self.extra = extra\n pass",
"def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle an indented text block. Extend or override in subclasses. Recursively run the registered state machine for indented blocks (`self.indent_sm`). | def indent(self, match, context, next_state):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_stat... | [
"def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )",
"def update_indent(self) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle a knownindent text block. Extend or override in subclasses. Recursively run the registered state machine for knownindent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. | def known_indent(self, match, context, next_state):
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented,... | [
"def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove items from the start of the list, without touching the parent. | def trim_start(self, n=1):
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
... | [
"def removeFirst(self):\n\t\tself.head = self.head.after",
"def delete_beg(self):\n\n if self.head != None:\n\n # grab the node that comes after the head.\n aft_head = self.head.next_node\n\n # have the last node now point to that node\n self.end.next_node = aft_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return source for index `i`. | def source(self, i):
return self.info(i)[0] | [
"def source(self, index=0):\n if not self._sources:\n self.get_data()\n try:\n sitename, url = self._sources[index]\n except TypeError:\n return self._sources[index]\n except IndexError:\n raise NotFoundError(\"No episode sources found.\")\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return offset for index `i`. | def offset(self, i):
return self.info(i)[1] | [
"def getOffset(self, index: int) -> int:\n ...",
"def getMarkPosition(self, i: int) -> int:\n ...",
"def getEndPosition(self, i: int) -> int:\n ...",
"def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp",
"def offset_at_position(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Trim `length` characters off the beginning of each item, inplace, from index `start` to `end`. No whitespacechecking is done on the trimmed text. Does not affect slice parent. | def trim_left(self, length, start=0, end=sys.maxint):
self.data[start:end] = [line[length:]
for line in self.data[start:end]] | [
"def trim(self, start, end):\r\n self.ltrim(start, end)",
"def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoBaseList_truncate(self, length)",
"def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoChildList_truncate(self, length)",
"def trim(self, star... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line). | def get_text_block(self, start, flush_left=False):
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
... | [
"def _extract_block_from_next_pos(self, marker):\n block = ''\n if not self.oom.find_text(marker):\n return block\n\n line = self.oom.current()\n block += \"{}\\n\".format(line)\n for line in self.oom:\n if not line.startswith(' '):\n self.oom.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pad all doublewidth characters in self by appending `pad_char` to each. For East Asian language support. | def pad_double_width(self, pad_char):
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if i... | [
"def padIt(str: unicode, padlen: int, endchar: int, padded: bool) -> unicode:\n ...",
"def _pad(string, length, char=None):\r\n \r\n if char == None:\r\n addchar = ' '\r\n else:\r\n addchar = char\r\n while len(string) < length:\r\n string += addchar\r\n return string",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found. | def candidate_index(self, node):
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index],... | [
"def get_tag(self, candidate):\n return self.ordered.index(candidate)",
"def __index__(self, product, options=[]):\n cart_items = self.cart_serializable\n for i in range(len(cart_items)):\n if cart_items[i]['product_pk'] == product.pk:\n if sorted(cart_items[i]['opti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set document['title'] metadata title from the following | def set_metadata(self):
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['tit... | [
"def title(self, title):\r\n doc.title = title",
"def set_title(self, value):\n return self._set_one_attribute(self.AttributeNames.TITLE, value)",
"def _write_title(self) -> None:\n self.doc.preamble.append(Command('title', self.json_summary[\"title\"]))\n self.doc.preamble.append(Co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mask the email address in `ref` and return a replacement node. `ref` is returned unchanged if it contains no email address. For email addresses such as "user", mask the address as "user at host" (text) to thwart simple email address harvesters (except for those listed in `non_masked_addresses`). If a PEP number (`pepno... | def mask_email(ref, pepno=None):
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.r... | [
"def mask_email(email: str) -> str:\n if email.count(\"@\") != 1:\n raise ValueError(\"Invalid email address, should have exactly one @\")\n address, domain = email.split(\"@\")\n if not address:\n raise ValueError(\"Invalid email address, address should not be empty\")\n if not domain:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign numbers to autonumbered footnotes. For labeled autonumbered footnotes, copy the number over to corresponding footnote references. | def number_footnotes(self, startnum):
for footnote in self.document.autofootnotes:
while True:
label = str(startnum)
startnum += 1
if label not in self.document.nameids:
break
footnote.insert(0, nodes.label('', la... | [
"def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign numbers to autonumbered footnote references. | def number_footnote_references(self, startnum):
i = 0
for ref in self.document.autofootnote_refs:
if ref.resolved or ref.hasattr('refid'):
continue
try:
label = self.autofootnote_labels[i]
except IndexError:
msg ... | [
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add symbols indexes to "[]"style footnotes and references. | def symbolize_footnotes(self):
labels = []
for footnote in self.document.symbol_footnotes:
reps, index = divmod(self.document.symbol_footnote_start,
len(self.symbols))
labeltext = self.symbols[index] * (reps + 1)
labels.append(la... | [
"def prepare_symbols(self):",
"def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Link manuallylabeled footnotes and citations to/from their references. | def resolve_footnotes_and_citations(self):
for footnote in self.document.footnotes:
for label in footnote['names']:
if label in self.document.footnote_refs:
reflist = self.document.footnote_refs[label]
self.resolve_references(footnote, ref... | [
"def _generate_biblio_ref_content(self, doc, out_buffer):\n out_buffer.write(\"\\nDocument contains the following Bibliography References:\\n\")\n\n for biblio_ref in doc.get_biblio_refs():\n out_buffer.write(\"- Reference to [{}]\\n\".format(biblio_ref.get_name()))",
"def result_nodes(\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up a lexical analyzer for `code` in `language`. | def __init__(self, code, language, tokennames='short'):
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
... | [
"def get_lexems(code):\n\n g.clear()\n lexer()\n g.lexer.input(code.lower())\n result = list(g.lexer)\n return g.error_list, result",
"def __init__(self, lang='sl', type='standard'):\n if lang not in ['sl', 'hr', 'sr', 'bg', 'mk']:\n raise Exception(\"Reldi tokenizer is currently ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge subsequent tokens of same tokentype. Also strip the final newline (added by pygments). | def merge(self, tokens):
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value... | [
"def _MergeOrAddToken(self, text, token_type):\n if not text:\n return\n if (not self._tokens or\n self._tokens[-1][self.TOKEN_TYPE_INDEX] != token_type):\n self._tokens.append((token_type, text))\n elif self._tokens[-1][self.TOKEN_TYPE_INDEX] == Token.Markdown.Section:\n # A section ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse self.code and yield "classified" tokens. | def __iter__(self):
if self.lexer is None:
yield ([], self.code)
return
tokens = pygments.lex(self.code, self.lexer)
for tokentype, value in self.merge(tokens):
if self.tokennames == 'long': # long CSS class args
classes = str(tokentype)... | [
"def itercodelines(self):\r\n codeline = CodeLine(0)\r\n for token in self.itertokens():\r\n codeline.append(token)\r\n if codeline.complete:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return unicode representation of `self.data`. Try ``unicode(self.data)``, catch `UnicodeError` and if `self.data` is an Exception instance, work around | def __unicode__(self):
try:
u = unicode(self.data)
if isinstance(self.data, EnvironmentError):
u = u.replace(": u'", ": '") # normalize filename quoting
return u
except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors
if... | [
"def get_unicode(self,data, force=False):\n if isinstance(data, binary_type):\n return data.decode('utf-8')\n elif data is None:\n return ''\n elif force:\n return str(data)\n else:\n return data",
"def __unicode__(self):\n s = StringI... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write `data` to self.stream. Ignore, if self.stream is False. `data` can be a `string`, `unicode`, or `Exception` instance. | def write(self, data):
if self.stream is False:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
self.encoding_errors, self.decoding_errors))
try:
self.stream.write(data)
ex... | [
"def write(self, data):\n if self.finished:\n raise SinkException(\"The AudioData is already finished writing.\")\n try:\n self.file.write(data)\n except ValueError:\n pass",
"def write(self, data):\n try:\n with open (self.filename, 'w') as ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Close the erroroutput stream. Ignored if the stream is` sys.stderr` or `sys.stdout` or has no close() method. | def close(self):
if self.stream in (sys.stdout, sys.stderr):
return
try:
self.stream.close()
except AttributeError:
pass | [
"def close_stream(self, input_stream):\n # type: (object) -> None\n # see https://docs.oracle.com/javase/7/docs/api/java/io/FilterInputStream.html#close()\n input_stream.close()",
"def close(self):\n self._output_fh.close()",
"def close(self):\n \n self.stream.close()",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
append(child) > element Appends child and returns self if self is not full or first nonfull parent. | def append(self, child):
assert not self.full()
self.children.append(child)
child.parent = self
node = self
while node.full():
node = node.parent
return node | [
"def add_child(self, e):\n if self.children == None:\n raise TypeError('this element cannot have a child: %r' % self)\n if isinstance(e, basestring) and self.children and isinstance(self.children[-1], basestring):\n self.children[-1] += e\n else:\n self.children.append(e)\n if isinstance(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
delete_child() > child Delete last child and return it. | def delete_child(self):
child = self.children[-1]
del self.children[-1]
return child | [
"def delete_child(child_id):\n Child.objects(id=child_id).delete()\n return {'success': True, 'data': \"Data Deleted\"}, 200",
"def _delete(self, node):\n if self.num_children(node) == 2:\n raise ValueError('Position has two children')\n child = node._left if node._left else node._r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
close() > parent Close element and return first nonfull element. | def close(self):
parent = self.parent
while parent.full():
parent = parent.parent
return parent | [
"def end_child(self):\n if self.cur_child is not None and not self.cur_child.closed:\n self.cur_child.end()\n self.cur_child = None",
"def lastDescendant(self, inclClosed=False):\n item = self\n while True:\n if item.childList and (item.open or inclClosed):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse_latex_math(string [,inline]) > MathMLtree Returns a MathMLtree parsed from string. inline=True is for inline math and inline=False is for displayed math. tree is the whole tree and node is the current element. | def parse_latex_math(string, inline=True):
# Normalize white-space:
string = ' '.join(string.split())
if inline:
node = mrow()
tree = math(node, inline=True)
else:
node = mtd()
tree = math(mtable(mtr(node)), inline=False)
while len(string) > 0:
... | [
"def parse_mathml(s):\n import xml.dom.minidom\n x = xml.dom.minidom.parseString(s)\n return parse_mathml_rhs(dom_child(x))",
"def compile_math(math):\n if isinstance(math, str):\n math = (\n math\n .replace('&&', 'and')\n .replace('||', 'or')\n .repl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return dictionary of Unicode character lists. For each of the `catagories`, an item contains a list with all Unicode characters with `cp_min` <= codepoint <= `cp_max` that belong to the category. (The default values check every codepoint supported by Python.) | def unicode_charlists(categories, cp_min=0, cp_max=None):
# Determine highest code point with one of the given categories
# (may shorten the search time considerably if there are many
# categories with not too high characters):
if cp_max is None:
cp_max = max(x for x in xrange(sys.maxunicod... | [
"def unicode_charlists(categories, cp_min=0, cp_max=None):\r\n # Determine highest code point with one of the given categories\r\n # (may shorten the search time considerably if there are many\r\n # categories with not too high characters):\r\n if cp_max is None:\r\n cp_max = max(x for x in range... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Docutils punctuation category sample strings. Return list of sample strings for the categories "Open", "Close", "Delimiters" and "ClosingDelimiters" used in the `inline markup recognition rules`_. | def punctuation_samples():
# Lists with characters in Unicode punctuation character categories
cp_min = 160 # ASCII chars have special rules for backwards compatibility
ucharlists = unicode_charlists(unicode_punctuation_categories, cp_min)
# match opening/closing characters
# --------------... | [
"def _run_split_on_punctuation(self, text):\n\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if self._is_punctuation(char):\n output.append([char])\n start_new_word ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append the separator for table head. | def append_separator(self, separator):
self._rows.append([separator]) | [
"def add_divider(self):\n self.page += '<hr style=\"clear:both;\"/>\\n'",
"def set_separator(self) -> None:\n self.separator = len(self.lines)",
"def __writeSeparator(self, indent):\n self.__dev.write(\" \" * indent)\n self.__dev.write(\"<HR>\\n\")",
"def _Header(numCols):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return commented version of the passed text. | def comment(self, text):
return self.comment_begin(text)+'.\n' | [
"def get_comment_text():\n first = comment_start + len(lang.comment_start)\n return line[first:]",
"def _get_comment_text():\n comment_samples = [\n \"Malesu mauris nas lum rfusce vehicula bibend. Morbi.\",\n \"Nuncsed quamal felis donec rutrum class ipsumnam teger. Sedin me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure the last line in body is terminated by new line. | def ensure_eol(self):
if len(self.body) > 0 and self.body[-1][-1] != '\n':
self.body.append('\n') | [
"def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'",
"def ensure_newline(self, n):\n assert n >= 0\n text = self._output.getvalue().rstrip('\\n')\n if not text:\n return\n self._output = StringIO()\n self._outpu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
append header with .TH and .SH NAME | def append_header(self):
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.head.append(self.header())
self.head.append(MACRO_DEF)
self.header_written = 1 | [
"def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n file_sam = open(os.path.join(args.output_dir,'watsonAligned.out.sam'))\n print(file_sam)\n for line in file_sam:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a fragment that has the given positional arguments as child nodes. | def __call__(self, *args):
return Fragment()(*args) | [
"def createDocumentFragment(*args):\n return DocumentFragment(*args)",
"def create_from_args(self, name, content=None, attributes=None, children=None):\n node = self.node_class(name, content, attributes, children)\n return [node]",
"def create_node(**kwargs):",
"def create_arguments(prima... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new factory that is bound to the specified namespace. | def __getitem__(self, namespace):
return ElementFactory(namespace) | [
"def __getitem__(self, namespace):\n return ElementFactory(namespace)",
"def convert_namespace_to_factory(class_input):\r\n return decorate_class_methods(class_input, to_factory)",
"def get_factory(package):\r\n return functools.partial(get, package)",
"def __new__ (cls, *args, **kw):\n (u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that every item on the stream is actually a markup event. | def _ensure(stream):
stream = iter(stream)
event = stream.next()
# Check whether the iterable is a real markup event stream by examining the
# first item it yields; if it's not we'll need to do some conversion
if type(event) is not tuple or len(event) != 3:
for event in chain([event]... | [
"def _ensure(stream):\r\n stream = iter(stream)\r\n event = next(stream)\r\n\r\n # Check whether the iterable is a real markup event stream by examining the\r\n # first item it yields; if it's not we'll need to do some conversion\r\n if type(event) is not tuple or len(event) != 3:\r\n for even... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an item or slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')]) >>> attrs[1] ('title', 'Foo') | def __getitem__(self, i):
items = tuple.__getitem__(self, i)
if type(i) is slice:
return Attrs(items)
return items | [
"def __getitem__(self, index):\n return self.attribute_values[index]",
"def __getslice__(self, i, j):\r\n return Attrs(tuple.__getslice__(self, i, j))",
"def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attribut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a slice of the attributes list. >>> attrs = Attrs([('href', ''), ('title', 'Foo')]) | def __getslice__(self, i, j):
return Attrs(tuple.__getslice__(self, i, j)) | [
"def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]",
"def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |