query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return a new instance that contains the attributes in `attrs` in addition to any already existing attributes. Any attributes in the new set that have a value of `None` are removed.
def __or__(self, attrs): remove = set([an for an, av in attrs if av is None]) replace = dict([(an, av) for an, av in attrs if an in self and av is not None]) return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self if sn not in remove] + ...
[ "def copy(self):\n\t\tnewAttr = AtomAttributes(None, None, None, None, None, None)\n\t\tnewAttr.__dict__ = self.__dict__.copy()\n\t\treturn newAttr", "def copy(self,**kwds):\n new_ds = copy.copy(self)\n def _find_set(kwd):\n val = kwds.get(kwd)\n if val is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new instance with all attributes with a name in `names` are removed.
def __sub__(self, names): if isinstance(names, basestring): names = (names,) return Attrs([(name, val) for name, val in self if name not in names])
[ "def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])", "def remove_attr(self, name):\n del self.attributes_dict[name]", "def strip_attributes(self):\r\n original_attributes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a `Markup` object which is the concatenation of the strings in the given sequence, where this `Markup` object is the separator between the joined elements. Any element in the sequence that is not a `Markup` instance is automatically escaped.
def join(self, seq, escape_quotes=True): return Markup(unicode.join(self, [escape(item, quotes=escape_quotes) for item in seq]))
[ "def html_join(sep, sequence):\n sep_safe = conditional_escape(sep)\n return mark_safe(sep_safe.join(conditional_escape(e) for e in sequence))", "def join(self, iterable):\r\n result = ANSIString('')\r\n last_item = None\r\n for item in iterable:\r\n if last_item is not None:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Markup instance from a string and escape special characters it may contain (, & and \"). >>> escape('"1 If the `quotes` parameter is set to `False`, the \" character is left as is. Escaping quotes is generally only required for strings that are to be used in attribute values. >>> escape('"1
def escape(cls, text, quotes=True): if not text: return cls() if type(text) is cls: return text if hasattr(text, '__html__'): return cls(text.__html__()) text = text.replace('&', '&amp;') \ .replace('<', '&lt;') \ ...
[ "def escape_quotes(self, str): \n return str.replace(\"\\\"\", \"\\\\\\\"\")", "def escape(t):\n return (t\n .replace(\"&quot;\", '@quot;')\n .replace(\"&amp;\", \"@amp;\").replace(\"&lt;\", \"@lt;\").replace(\"&gt;\", \"@gt;\")\n\n .replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverseescapes &, , and \" and returns a `unicode` object. >>> Markup('1 &lt; 2').unescape() u'1 < 2'
def unescape(self): if not self: return '' return unicode(self).replace('&#34;', '"') \ .replace('&gt;', '>') \ .replace('&lt;', '<') \ .replace('&amp;', '&')
[ "def htmlunescape(value):\n\n retVal = value\n if value and isinstance(value, basestring):\n codes = ((\"&lt;\", '<'), (\"&gt;\", '>'), (\"&quot;\", '\"'), (\"&nbsp;\", ' '), (\"&amp;\", '&'), (\"&apos;\", \"'\"))\n retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)\n try:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the `QName` instance.
def __new__(cls, qname): if type(qname) is cls: return qname qname = qname.lstrip('{') parts = qname.split('}', 1) if len(parts) > 1: self = unicode.__new__(cls, '{%s' % qname) self.namespace, self.localname = map(unicode, parts) else...
[ "def __new__(cls, qname):\r\n if type(qname) is cls:\r\n return qname\r\n\r\n qname = qname.lstrip('{')\r\n parts = qname.split('}', 1)\r\n if len(parts) > 1:\r\n self = str.__new__(cls, '{%s' % qname)\r\n self.namespace, self.localname = list(map(str, pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the given css property declaration is to be considered safe for inclusion in the output.
def is_safe_css(self, propname, value): if propname not in self.safe_css: return False if propname.startswith('margin') and '-' in value: # Negative margins can be used for phishing return False return True
[ "def is_property_allowed(prop):\n return self.allowed_styles is None or \\\n prop.lower() in self.allowed_styles", "def _isprop(self, attr: str) -> bool:\n\n return isinstance(attr, property)", "def _is_property(self,key):\n return bool(re.match(database.RE_KIMID, key))",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the given URI is to be considered safe for inclusion in the output. The default implementation checks whether the scheme of the URI is in the set of allowed URIs (`safe_schemes`). >>> sanitizer = HTMLSanitizer()
def is_safe_uri(self, uri): if '#' in uri: uri = uri.split('#', 1)[0] # Strip out the fragment identifier if ':' not in uri: return True # This is a relative URI chars = [char for char in uri.split(':', 1)[0] if char.isalnum()] return ''.join(chars).lower() ...
[ "def safe_uri(uri):\n path, query, frag = split_path(uri)\n safe = True\n for part in (path, query, frag):\n safe = safe and safe_chars_regex.search(part)\n return safe", "def _ManifestUrlHasSecureScheme(self):\n secure_schemes = (\n \"file\",\n \"https\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove potentially dangerous property declarations from CSS code. In particular, properties using the CSS ``url()`` function with a scheme
def sanitize_css(self, text): decls = [] text = self._strip_css_comments(self._replace_unicode_escapes(text)) for decl in text.split(';'): decl = decl.strip() if not decl: continue try: propname, value = decl.split(':', ...
[ "def minify_properties(src):\n min_re = re.compile(r\"(^|[^\\\\](?:\\\\\\\\)*)#.*$\", re.M)\n src = min_re.sub(r\"\\1\", src)\n src = re.sub(r\"\\n+\", r\"\\n\", src)\n return src", "def remove_urls(document):\n return re.sub(r'https?://(www\\.)?[-\\w@:%.\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-\\w@:%_\\+....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate any localizable strings in the given stream. This function shouldn't be called directly. Instead, an instance of the `Translator` class should be registered as a filter with the `Template` or the `TemplateLoader`, or applied as a regular stream filter. If used as a template filter, it should be inserted in fr...
def __call__(self, stream, ctxt=None, translate_text=True, translate_attrs=True): ignore_tags = self.ignore_tags include_attrs = self.include_attrs skip = 0 xml_lang = XML_NAMESPACE['lang'] if not self.extract_text: translate_text = False ...
[ "def __call__(self, stream, ctxt=None, translate_text=True,\r\n translate_attrs=True):\r\n ignore_tags = self.ignore_tags\r\n include_attrs = self.include_attrs\r\n skip = 0\r\n xml_lang = XML_NAMESPACE['lang']\r\n if not self.extract_text:\r\n translate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract localizable strings from the given template stream. For every string found, this function yields a ``(lineno, function,
def extract(self, stream, gettext_functions=GETTEXT_FUNCTIONS, search_text=True, comment_stack=None): if not self.extract_text: search_text = False if comment_stack is None: comment_stack = [] skip = 0 xml_lang = XML_NAMESPACE['lang'] ...
[ "def extract(fileobj, keywords, comment_tags, options):\n encoding = options.get('encoding', 'utf-8')\n\n original_position = fileobj.tell()\n\n text = fileobj.read().decode(encoding)\n\n if django.VERSION[:2] >= (1, 9):\n tokens = Lexer(text).tokenize()\n else:\n tokens = Lexer(text, N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience function to register the `Translator` filter and the related directives with the given template.
def setup(self, template): template.filters.insert(0, self) if hasattr(template, 'add_directives'): template.add_directives(Translator.NAMESPACE, self)
[ "def on_template_loaded(cls, template):\n translator = Translator(ugettext)\n template.filters.insert(0, translator)\n\n if hasattr(template, 'add_directives'):\n template.add_directives(Translator.NAMESPACE, translator)", "def templateFilter(func):\n jinja2_env.filters[func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interpolate the given message translation with the events in the buffer and return the translated stream.
def translate(self, string, regex=re.compile(r'%\((\w+)\)s')): substream = None def yield_parts(string): for idx, part in enumerate(regex.split(string)): if idx % 2: yield self.values[part] elif part: yield (TEX...
[ "def interpolate(stream):\n\n current_index = None\n values_buffer = []\n\n for (t,v) in stream:\n if t:\n if current_index is not None and values_buffer:\n delta = (v - current_index) / len(values_buffer)\n for v2 in values_buffer:\n yield...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract strings from Python bytecode. >>> from genshi.template.eval import Expression >>> expr = Expression('_("Hello")') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('_', u'Hello')] >>> expr = Expression('ngettext("You have %(num)s item", ' ... '"You have %(num)s items", num)') >>> list(extract_from_code(exp...
def extract_from_code(code, gettext_functions): def _walk(node): if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \ and node.func.id in gettext_functions: strings = [] def _add(arg): if isinstance(arg, _ast.Str) and isinstance(...
[ "def extract_from_code(code, gettext_functions):\r\n def _walk(node):\r\n if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \\\r\n and node.func.id in gettext_functions:\r\n strings = []\r\n def _add(arg):\r\n if isinstance(arg, _ast.St...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simplify a marked stream.
def _simplify(stream, with_attrs=False): def _generate(): for mark, (kind, data, pos) in stream: if kind is START: if with_attrs: data = (unicode(data[0]), dict((unicode(k), v) for k, v in data[1])) ...
[ "def _simplify(stream, with_attrs=False):\r\n def _generate():\r\n for mark, (kind, data, pos) in stream:\r\n if kind is START:\r\n if with_attrs:\r\n data = (str(data[0]), dict((str(k), v)\r\n for k, v in data[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the transform filter to the marked stream.
def __call__(self, stream, keep_marks=False): transforms = self._mark(stream) for link in self.transforms: transforms = link(transforms) if not keep_marks: transforms = self._unmark(transforms) return Stream(transforms, serializer=geta...
[ "def apply_transform(self, transform, include_scatter=False):\n self._transformed_events = self._transform(transform, include_scatter=include_scatter)\n self._include_scatter_option = include_scatter\n self.transform = transform", "def flatland_filter(stream, context):\n return Stream(Flat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert selection so that marked events become unmarked, and vice versa. Specificaly, all marks are converted to null marks, and all null marks are converted to OUTSIDE marks. >>> html = HTML('Some test text', encoding='utf8') >>> print(html | Transformer('//em').invert().trace()) ('OUTSIDE', ('START', (QName('body'), A...
def invert(self): return self.apply(InvertTransformation())
[ "def inverse(transformer, inverse='identity', inverse_dropped='nan'):\n if isinstance(transformer, TransformerExtensions):\n transformer.inverse = inverse\n return transformer\n\n return TransformerExtensions(\n transformer,\n inverse=inverse,\n inverse_dropped=inverse_dropp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap selection in an element. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('.//em').wrap('strong')) Some TitleSome body text.
def wrap(self, element): return self.apply(WrapTransformation(element))
[ "def wrap(text, open_tag, close_tag):\n return ''.join((open_tag, text, close_tag, ))", "def wrap(self, text: str) -> str:\n return \"\\n\".join(textwrap.wrap(\n text, self.width,\n initial_indent=self.prefix + self.initial_indent,\n subsequent_indent=self.prefix + self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy selection into buffer. The buffer is replaced by each contiguous selection before being passed to the next transformation. If accumulate=True, further selections will be appended to the buffer rather than replacing it. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... ...
def copy(self, buffer, accumulate=False): return self.apply(CopyTransformation(buffer, accumulate))
[ "def simpleCopySelection():\n # ideas / tests / original:\n # push into current group..\n\n App = FreeCAD\n Gui = FreeCADGui\n\n selection = FreeCADGui.Selection.getSelection()\n\n for obj in selection:\n obj_new = object_create_copy(obj)\n obj_new.ViewObject.Visibility = True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy selection into buffer and remove the selection from the stream. >>> from genshi.builder import tag >>> buffer = StreamBuffer() >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('.//em/text()').cut(buffer) ... .end().select('.//em').after(tag.h1(buffer))) Some Tit...
def cut(self, buffer, accumulate=False): return self.apply(CutTransformation(buffer, accumulate))
[ "def cut(self):\n self.focus()\n self.dispatch('Cut')\n return self", "def editCut(self):\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.cut()\n return\n except AttributeError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a function to the ``data`` element of events of ``kind`` in the selection. >>> html = HTML('Some Title' ... 'Some body text.', ... encoding='utf8') >>> print(html | Transformer('head/title').map(unicode.upper, TEXT)) SOME TITLESome body text.
def map(self, function, kind): return self.apply(MapTransformation(function, kind))
[ "def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator that parses the HTML source, yielding markup events.
def parse(self): def _generate(): if self.encoding: reader = codecs.getreader(self.encoding) source = reader(self.source) else: source = self.source try: bufsize = 4 * 1024 # 4K done = Fa...
[ "def parse(self):\r\n def _generate():\r\n if self.encoding:\r\n reader = codecs.getreader(self.encoding)\r\n source = reader(self.source)\r\n else:\r\n source = self.source\r\n try:\r\n bufsize = 4 * 1024 # 4K\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a serializer object for the given method.
def get_serializer(method='xml', **kwargs): if isinstance(method, basestring): method = {'xml': XMLSerializer, 'xhtml': XHTMLSerializer, 'html': HTMLSerializer, 'text': TextSerializer}[method.lower()] return method(**kwargs)
[ "def get_serializer(method='xml', **kwargs):\r\n if isinstance(method, str):\r\n method = {'xml': XMLSerializer,\r\n 'xhtml': XHTMLSerializer,\r\n 'html': HTMLSerializer,\r\n 'text': TextSerializer}[method.lower()]\r\n return method(**kwargs)", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the HTML serializer.
def __init__(self, doctype=None, strip_whitespace=True, cache=True): super(HTMLSerializer, self).__init__(doctype, False) self.filters = [EmptyTagFilter()] if strip_whitespace: self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE, ...
[ "def __init__(self, encoding= 'latin-1'):\n html.parser.HTMLParser.__init__(self)\n self._reset()\n self.encoding = encoding", "def init_renderers(cls):", "def __init__(self, template):\n self.template = template\n \n with open(template) as f:\n logging.info(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests if two node tests are equal
def nodes_equal(node1, node2): if type(node1) is not type(node2): return False if type(node1) == LocalNameTest: return node1.name == node2.name return True
[ "def test_node_eq(self):\n node1 = ts.Node('a', 2)\n assert node1 == copy.copy(node1)\n assert node1 != ts.Node('b', 2)\n assert node1 != ts.Node('a', 3)\n assert node1 != ts.Node('a', 2, ts.Node('a', 1))\n assert ts.Node('a', 2, ts.Node('a', 1)) != node1\n node1.lef...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert value to a scalar. If a single element Attrs() object is passed the value of the single attribute will be returned.
def as_scalar(value): if isinstance(value, Attrs): assert len(value) == 1 return value[0][1] else: return value
[ "def to_scalar(self, v):\n if v is None:\n return v\n else:\n return v.asnumpy().item()", "def extractValue(self, model, item):\n return getattr(item, self.attribute.attrname)", "def Value(self):\n if self.IsNull:\n return None\n elif self.IsBool:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the given directives to the stream.
def _apply_directives(stream, directives, ctxt, vars): if directives: stream = directives[0](iter(stream), directives[1:], ctxt, **vars) return stream
[ "def directives(self, directives):\n\n self._directives = directives", "def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute the given `Suite` object.
def _exec_suite(suite, ctxt, vars=None): if vars: ctxt.push(vars) ctxt.push({}) suite.execute(ctxt) if vars: top = ctxt.pop() ctxt.pop() ctxt.frames[0].update(top)
[ "def run_suite(self, suite, **kwargs):\n return PyunitConsumer(\n verbosity=self.verbosity,\n failfast=self.failfast,\n ).run(suite)", "def run_suite(self, suite, **kwargs):\n options = {\n 'verbosity': getattr(settings, 'TEST_OUTPUT_VERBOSE', False),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a key for the given directive class that should be used to sort it among other directives on the same `SUB` event. The default implementation simply returns the index of the directive in the `directives` list.
def get_directive_index(self, dir_cls): if dir_cls in self._dir_order: return self._dir_order.index(dir_cls) return len(self._dir_order)
[ "def key_from_class_name(class_name):\n assert class_name in Transaction._class_names\n return Transaction._class_names[class_name]", "def class_to_idx(self):\n\n return {cat: i for i, cat in enumerate(self.CLASSES)}", "def match_class(self, key):\n self._keystrokes.append(key)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal stream filter that performs inclusion of external template files.
def _include(self, stream, ctxt, **vars): from genshi.template.loader import TemplateNotFound for event in stream: if event[0] is INCLUDE: href, cls, fallback = event[1] if not isinstance(href, basestring): parts = [] ...
[ "def _include(self, stream, ctxt, **vars):\r\n from genshi.template.loader import TemplateNotFound\r\n\r\n for event in stream:\r\n if event[0] is INCLUDE:\r\n href, cls, fallback = event[1]\r\n if not isinstance(href, str):\r\n parts = []\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the given expression, raising a useful error message when a syntax error is encountered.
def _parse_expr(cls, expr, template, lineno=-1, offset=-1): try: return expr and Expression(expr, template.filepath, lineno, lookup=template.lookup) or None except SyntaxError, err: err.msg += ' in expression "%s" of "%s" directive' % (...
[ "def _parse_expr(cls, expr, template, lineno=-1, offset=-1):\r\n try:\r\n return expr and Expression(expr, template.filepath, lineno,\r\n lookup=template.lookup) or None\r\n except SyntaxError as err:\r\n err.msg += ' in expression \"%s\" of ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the AST representation of an assignment, and returns a function that applies the assignment of a given value to a dictionary.
def _assignment(ast): def _names(node): if isinstance(node, _ast.Tuple): return tuple([_names(child) for child in node.elts]) elif isinstance(node, _ast.Name): return node.id def _assign(data, value, names=_names(ast)): if type(names) is tuple: ...
[ "def assignment(self, symbol_table):\n symbol_table[self.key] = self.value.evaluate(self.value, symbol_table)", "def eval_assignment(assignment, caller_parameters, caller_arguments, motif_node_dict, local_dict):\n\tif type(assignment.rvalue).__name__ == 'FuncCall':\n\t\tmotif_node, tree_node = eval_function_ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate the expression against the given data dictionary.
def evaluate(self, data): __traceback_hide__ = 'before_and_this' _globals = self._globals(data) return eval(self.code, _globals, {'__data__': data})
[ "def evaluate(expr, locals):", "def do_eval(expr, context):\n return eval(expr, context.vals)", "def evaluate(self):\n self.arithmeticInorder()\n return eval(self._expression)", "def evaluate(self, expr_object, eval_context_object):\n\n from exprs.evaluation import evaluate_expression_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise an ``UndefinedError`` immediately.
def undefined(cls, key, owner=UNDEFINED): __traceback_hide__ = True raise UndefinedError(key, owner=owner)
[ "def failure(self):\n raise RuntimeError, \"This function always raises an error.\"", "def test_undefined_rule(self):\n tree = rule_grammar.parse('boy = howdy\\n')\n assert_raises(UndefinedLabel, RuleVisitor().visit, tree)", "def assert_undefined(actual: Any, msg: str = '') -> None:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory for a load function that delegates to other loaders depending on the prefix of the requested template path. The prefix is stripped from the filename when passing on the load request to the delegate. >>> load = prefixed(
def prefixed(**delegates): def _dispatch_by_prefix(filename): for prefix, delegate in delegates.items(): if filename.startswith(prefix): if isinstance(delegate, basestring): delegate = directory(delegate) filepath,...
[ "def prefixed(**delegates):\r\n def _dispatch_by_prefix(filename):\r\n for prefix, delegate in list(delegates.items()):\r\n if filename.startswith(prefix):\r\n if isinstance(delegate, str):\r\n delegate = directory(delegate)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a custom `DirectiveFactory` for a given namespace.
def add_directives(self, namespace, factory): assert not self._prepared, 'Too late for adding directives, ' \ 'template already prepared' self._stream = self._extract_directives(self._stream, namespace, factory)
[ "def register(self, service, factory=..., instance=..., scope=..., **kwargs):\n ...", "def add_factory(self, node_name, factory):\n self.factories[node_name] = factory", "def convert_namespace_to_factory(class_input):\r\n return decorate_class_methods(class_input, to_factory)", "def register_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a template specified in python 'dot' notation, or load one from a string.
def load_template(self, templatename, template_string=None): if template_string is not None: return self.template_class(template_string) if self.use_package_naming: divider = templatename.rfind('.') if divider >= 0: from pkg_resources import re...
[ "def _find_template(self, filename, start=0):\n\n filename = filename.lstrip(\"/\").replace(\"/\", os.sep)\n cachename = \":@@{0}@@:{1}\".format(start, filename)\n\n if not self._path:\n raise RestrictedError(\n \"Attempt to load template from empty search path: {0}\"....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render the template to a string using the provided info.
def render(self, info, format=None, fragment=False, template=None): kwargs = self._get_render_options(format=format, fragment=fragment) return self.transform(info, template).render(**kwargs)
[ "def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def render_template(self, *args, **kwargs):\n return self.renderer.render(*args, **kwargs)", "def render(self, template, **kw):\n t = jinja_env.get_template(template) \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify more complex nesting using otherwise.
def test_complex_nesting_otherwise(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="1"> <div py:when="1" py:choose="2"> <span py:when="1">FAIL</span> <span py:otherwise="">OK</span> </div> ...
[ "def is_nested(self, ):\n\t\tpass", "def IsNestedFamORAssem(self) -> bool:", "def IsNestedFamANDAssem(self) -> bool:", "def testTryExceptElse(self):\n token = self.parser.parse(filename='evo/TryExceptElse.evo')\n trytoken = token.content[0]\n res = trytoken.siblings()\n self.assert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a when directive with a strip directive actually strips of the outer element.
def test_when_with_strip(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <span py:otherwise="">foo</span> </div> </doc>""") self.assertEqual("""<doc> <span>foo</span> </doc>"""...
[ "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.gen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a `when` directive outside of a `choose` directive is reported as an error.
def test_when_outside_choose(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:when="xy" /> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_when_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:when>foo</py:when>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.gen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `otherwise` directive outside of a `choose` directive is reported as an error.
def test_otherwise_outside_choose(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:otherwise="" /> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `when` directive that doesn't have a `test` attribute is reported as an error.
def test_when_without_test(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <py:when>foo</py:when> </div> </doc>""") self.assertRaises(TemplateRuntimeError, str, tmpl.generate())
[ "def test_when_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:when=\"xy\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_models_edx_problem_check_fail_with_valid_stat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that an `otherwise` directive can be used without a `test` attribute.
def test_otherwise_without_test(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:choose="" py:strip=""> <py:otherwise>foo</py:otherwise> </div> </doc>""") self.assertEqual("""<doc> foo </doc>""", tmpl...
[ "def test_otherwise_outside_choose(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:otherwise=\"\" />\r\n </doc>\"\"\")\r\n self.assertRaises(TemplateRuntimeError, str, tmpl.generate())", "def test_when_without_test(self):\r\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a named template function with a strip directive actually strips of the outer element.
def test_function_with_strip(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:def="echo(what)" py:strip=""> <b>${what}</b> </div> ${echo('foo')} </doc>""") self.assertEqual("""<doc> <b>foo</b> ...
[ "def test_when_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <span py:otherwise=\"\">foo</span>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that the directive works as expected in a text template.
def test_in_text_template(self): tmpl = TextTemplate(""" #def echo(greeting, name='world') ${greeting}, ${name}! #end ${echo('Hi', name='you')} """) self.assertEqual(""" Hi, you! """, tmpl.generate().render(encodi...
[ "def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))", "def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a named template function using "star arguments" works as expected.
def test_function_with_star_args(self): tmpl = MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <div py:def="f(*args, **kwargs)"> ${repr(args)} ${repr(sorted(kwargs.items()))} </div> ${f(1, 2, a=3, b=4)} </doc>""") self....
[ "def test_variable_arguments(self):\n def foo(*args):\n return tuple(args)\n provider = FunctionProvider(foo)\n wrapped_function = provider()\n self.assertSequenceEqual(wrapped_function(1, 2), (1, 2))\n self.assertSequenceEqual(wrapped_function(1), (1,))", "def foo4(_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify an empty 'for' value is an error
def test_for_with_empty_value(self): try: MarkupTemplate("""<doc xmlns:py="http://genshi.edgewall.org/"> <py:for each=""> empty </py:for> </doc>""", filename='test.html').generate() self.fail('ExpectedTemplateSyntaxError') ...
[ "def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('Exp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that outputting context data in attribtes escapes quotes.
def test_attr_escape_quotes(self): tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/"> <elem class="$myvar"/> </div>""") self.assertEqual("""<div> <elem class="&#34;foo&#34;"/> </div>""", str(tmpl.generate(myvar='"foo"')))
[ "def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quote...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a namespace declaration on an element that is removed from the generated stream does not get pushed up to the next nonstripped element (see ticket 107).
def test_namespace_on_removed_elem(self): tmpl = MarkupTemplate("""<?xml version="1.0"?> <Test xmlns:py="http://genshi.edgewall.org/"> <Size py:if="0" xmlns:t="test">Size</Size> <Item/> </Test>""") self.assertEqual("""<?xml version="1.0"?>\n<Test> ...
[ "def check_namespace(self):\n if not self.tree:\n self.xml_validate()\n \n root = self.tree.getroot()\n self.namespace = root.get(\"targetNamespace\")\n if self.namespace is None:\n self.namespace = \"\"", "def remove_namespace(doc, namespace):\n ns =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the text with all entities and tags removed. >>> plaintext('1 &lt; 2') u'1 < 2' The `keeplinebreaks` parameter can be set to ``False`` to replace any line
def plaintext(text, keeplinebreaks=True): text = stripentities(striptags(text)) if not keeplinebreaks: text = text.replace('\n', ' ') return text
[ "def stripentities(text, keepxmlentities=False):\r\n def _replace_entity(match):\r\n if match.group(1): # numeric entity\r\n ref = match.group(1)\r\n if ref.startswith('x'):\r\n ref = int(ref[1:], 16)\r\n else:\r\n ref = int(ref, 10)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of the given text with any character or numeric entities replaced by the equivalent UTF8 characters. >>> stripentities('1 &lt; 2') u'1 >> stripentities('more &hellip;') u'more \u2026' >>> stripentities('&8230;') u'\u2026' >>> stripentities('&x2026;') u'\u2026' If the `keepxmlentities` parameter is provide...
def stripentities(text, keepxmlentities=False): def _replace_entity(match): if match.group(1): # numeric entity ref = match.group(1) if ref.startswith('x'): ref = int(ref[1:], 16) else: ref = int(ref, 10) return unichr(r...
[ "def stripentities(text, keepxmlentities=False):\r\n def _replace_entity(match):\r\n if match.group(1): # numeric entity\r\n ref = match.group(1)\r\n if ref.startswith('x'):\r\n ref = int(ref[1:], 16)\r\n else:\r\n ref = int(ref, 10)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of the text with any XML/HTML tags removed. >>> striptags('Foo bar') 'Foo bar' >>> striptags('Foo') 'Foo' >>> striptags('Foo') 'Foo'
def striptags(text): return _STRIPTAGS_RE.sub('', text)
[ "def remove_tags(text):\n pattern = re.compile('<.*?>')\n return pattern.sub(r'', text)", "def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)", "def strip_tags(html):\n\n s = HTMLStripper()\n s.feed(html)\n stripped = s.get_data()\n # Remove extra s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`.
def apply_filters(stream, filters, lexer=None): def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token for filter_ in filters: stream = _apply(filter_, stream) return stream
[ "def apply(filters: List[Callable], q: Queryable) -> Queryable:\n for f in filters:\n q = f(q)\n return q", "def set_filters(filter_list):", "def filters(self, filters):\n\n for f in filters:\n self.filter(f[\"attribute_name\"], f[\"value\"], f[\"operator\"])", "def apply_filter...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``.
def format(self, tokensource, outfile): if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile)
[ "def write_file(tokens, f):\n for t in tokens:\n f.write(\"%s:\\n\" % t[0])\n for entry in t[1:]:\n f.write(\"\\t%s\\n\" % entry)", "def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the css class of this token type prefixed with the classprefix option.
def _get_css_class(self, ttype): ttypeclass = _get_ttype_class(ttype) if ttypeclass: return self.classprefix + ttypeclass return ''
[ "def _get_class_string(self):\n\n classes = self.attrs.get(\"class\", None)\n\n # No classes were set in the attributes\n if not classes:\n return \" \".join(self.classes)\n\n classes = classes.value\n\n # Make room for the classes set in the tag\n if self.classes:\n classes += \" \"\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return CSS style definitions for the classes produced by the current highlighting style. ``arg`` can be a string or list of selectors to insert before the token type classes.
def get_style_defs(self, arg=None): if arg is None: arg = ('cssclass' in self.options and '.'+self.cssclass or '') if isinstance(arg, basestring): args = [arg] else: args = list(arg) def prefix(cls): if cls: cls =...
[ "def get_style_defs(self, arg=None):\r\n if arg is None:\r\n arg = ('cssclass' in self.options and '.'+self.cssclass or '')\r\n if isinstance(arg, str):\r\n args = [arg]\r\n else:\r\n args = list(arg)\r\n\r\n def prefix(cls):\r\n if cls:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just format the tokens, without any wrapping tags. Yield individual lines.
def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator # for <span style=""> lookup only getcls = self.ttype2class.get c2s = self.class2style escape_table = _escape_html_table tagsfile = self.tagsfile lspan = '' ...
[ "def tokens(self):\n for t in self._ast.tokens:\n yield t", "def token_layout_generator(self):\n tfs_space, tfs_newline = ' ', os.linesep\n yield (Token.Prompt, self.config['IPYSH_TERMINAL_PROMPT'])\n\n layout = self.config['IPYSH_TOKEN_LAYOUT']\n if layout not in sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Highlighted the lines specified in the `hl_lines` option by postprocessing the token stream coming from `_format_lines`.
def _highlight_lines(self, tokensource): hls = self.hl_lines for i, (t, value) in enumerate(tokensource): if t != 1: yield t, value if i + 1 in hls: # i + 1 because Python indexes start at 0 if self.noclasses: style = '...
[ "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)", "def _highlit_line(content, offsets, markup, markdown, encoding):\n def c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the character size.
def get_char_size(self): return self.fonts['NORMAL'].getsize('M')
[ "def getTextLen(self):\r\n return self.TextLen", "def getTextSize(self):\n return self.textSize", "def string_length(self):\n return type_get_string_length(self)", "def getLength(self) -> \"int\":\n return _coin.SbName_getLength(self)", "def characters_count(self) -> int:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the font based on bold and italic flags.
def get_font(self, bold, oblique): if bold and oblique: return self.fonts['BOLDITALIC'] elif bold: return self.fonts['BOLD'] elif oblique: return self.fonts['ITALIC'] else: return self.fonts['NORMAL']
[ "def Font(self, attr=None):\n if attr is None:\n self._font = 0\n else:\n mask = 1 << attr\n self._font ^= mask\n font = self._font & ((1 << renderer.BOLD) |\n (1 << renderer.CODE) |\n (1 << renderer.ITALIC))\n if font & (1 << renderer.CODE)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the height of a line.
def _get_line_height(self): return self.fonth + self.line_pad
[ "def get_height(self):\n\t\treturn self.y[1] - self.y[0]", "def lineHeight(scr, lineNode):\n if lineNode is None:\n return 0\n manyLines = (len(lineNode.value)+1)//scr.getmaxyx()[1]+1\n # above solution doesn't account for tabs\n return manyLines", "def _get_height(self) -> \"int\" :\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Y coordinate of a line number.
def _get_line_y(self, lineno): return lineno * self._get_line_height() + self.image_pad
[ "def ycoord(pt):\n return pt.y", "def get_ycoord(self, y):\n return (y - self.ylimits[0]) / self.dy", "def get_pos_y(self):\n return self._position[1]", "def getline(self, bno):\r\n return self.breakpt[bno]['line']", "def get_y(self):\n\n return math.floor(self.position.y)", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the width of a character.
def _get_char_width(self): return self.fontw
[ "def get_width( o ):\n \"\"\"获取该字符在屏幕上的显示的长度\"\"\"\n global widths\n if o == 0xe or o == 0xf:\n return 0\n for num, wid in widths:\n if o <= chr(num):\n return wid\n return 1", "def fontwidth(word):\n return sum([lookup.ASCIIPIXELS[letter] + 1\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the correct font for the style.
def _get_style_font(self, style): return self.fonts.get_font(style['bold'], style['italic'])
[ "def getFont(self):\r\n return self.font", "def _get_font(self):\n return self._control.document().defaultFont()", "def font(self, font_name):\n return self._font[font_name]", "def getFont(self):\n from pagebot.fonttoolbox.objects.font import getFont\n from pagebot.contexts....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create drawables for the token content.
def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chai...
[ "def DrawTokensBlue():\r\n for i in range(4):\r\n Tokens(TokenBlue,BlueChips[i][0],BlueChips[i][1])", "def DrawTokensRed():\r\n for i in range(4):\r\n Tokens(TokenRed,RedChips[i][0],RedChips[i][1])", "def make_drawable(self):\n drawable_env = []\n for i in range(self.Y):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Paint the line number background on the image.
def _paint_line_number_bg(self, im): if not self.line_numbers: return if self.line_number_fg is None: return draw = ImageDraw.Draw(im) recth = im.size[-1] rectw = self.image_pad + self.line_number_width - self.line_number_pad draw.rectangle...
[ "def draw_horizontal_lines(img):\n row, col = img.shape\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n interval = row / 10\n for i in range(1, 10):\n (x0, y0) = map(int, [0, i * interval])\n (x1, y1) = map(int, [col, i * interval])\n img = cv2.line(img, (x0, y0), (x1, y1), (0, 255,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
def format(self, tokensource, outfile): self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) ...
[ "def display_tokens(tokens, image):\n new_image = image.convert('RGBA')\n dr = ImageDraw.Draw(new_image)\n fnt = ImageFont.load_default()\n font_size = 28\n try:\n # Pretty font, pretty sure I'm the only one to have it though - Andy\n fnt = ImageFont.truetype(\"DejaVuSans.ttf\", font_si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters.
def get_tokens(self, text, unfiltered=False): if not isinstance(text, unicode): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] ...
[ "def get_tokens(self, text, unfiltered=False):\r\n if not isinstance(text, str):\r\n if self.encoding == 'guess':\r\n try:\r\n text = text.decode('utf-8')\r\n if text.startswith('\\ufeff'):\r\n text = text[len('\\ufeff'):]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback that yields multiple actions for each group in the match.
def bygroups(*args): def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i...
[ "def _performManyAct(self, action, args, messages, D):\n readCount = 0\n i = -1\n\n for message in messages:\n if message.read:\n readCount += 1\n yield action(message, **args)\n i += 1\n D.callback((readCount, i+1-readCount))", "def grou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate cls after preprocessing its token definitions.
def __call__(cls, *args, **kwds): if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens =...
[ "def __init__(self):\n self.__parser = SpaCyParser()\n self.__word_substitutor = WordSubstitutor()", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream.
def do_insertions(insertions, tokens): insertions = iter(insertions) try: index, itokens = insertions.next() except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the tok...
[ "def do_insertions(insertions, tokens):\r\n insertions = iter(insertions)\r\n try:\r\n index, itokens = next(insertions)\r\n except StopIteration:\r\n # no insertions\r\n for item in tokens:\r\n yield item\r\n return\r\n\r\n realpos = None\r\n insleft = True\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a subclass of baselexer that accepts the ObjectiveC syntax extensions.
def objective(baselexer): # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, # since that's quite common in ordinary C/C++ files. It's OK to match # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. # # The upshot of this is that we CANNOT match @class o...
[ "def create_lexer(self):\n raise NotImplementedError()", "def test_lexer():\n generator_stream = cStringIO.StringIO()\n generator_stream.write(\"\"\"\n[[:newline:]] NEWLINE\n[[:whitespace:]] IGNORE\n'namespace'[[:whitespace:]]* NAMESPACE\n[a-z][a-z0-9_?!]* ID\n':...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set an explicit indentation level for a block scalar.
def set_block_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent =...
[ "def indent_level(self, indent_level):\n\n self.container['indent_level'] = indent_level", "def indent(self, lvl=1):\n self.current_level += lvl\n assert self.current_level >= 0, \"Level of indentation cannot become negative\"\"\"", "def update_indent(self) -> None:\n self.indent = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process indentation spaces in a block scalar.
def parse_block_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() ...
[ "def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process indentation spaces in a plain scalar.
def parse_plain_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() if len(text) <= context.indent: context.stack.pop() context.stack.pop() return if text: yield match.st...
[ "def get_spaces(self):\n pass", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``...
def scan(self, pattern): if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: ...
[ "def match(self, text, pos=0):\n error = ParseError(text)\n node = self.match_core(text, pos, defaultdict(dict), error)\n if node is None:\n raise error\n return node", "def update_matches(self, begin, end):\n if self.entry != None:\n self.__get_matches(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if ``ttype`` is a subtype of ``other``. exists for backwards compatibility. use ``ttype in other`` now.
def is_token_subtype(ttype, other): return ttype in other
[ "def is_subtype_of(self, other):\n # pylint: disable=protected-access\n if type(self) is not type(\n other) or self._callable_params != other._callable_params:\n return False\n\n try:\n tf.nest.assert_same_structure(self._comparable[:-1],\n other._compara...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a static text analysation function that returns float values.
def make_analysator(f): def text_analyse(text): try: rv = f(text) except Exception: return 0.0 if not rv: return 0.0 try: return min(1.0, max(0.0, float(rv))) except (ValueError, TypeError): return 0.0 ...
[ "def eval_texts_calc_wrapper(line):\n # These signals are needed to run the functions as written in eval texts.\n signals = {\n 'signal0': np.array([1, 1, 1, 0, 0]),\n 'signal1': np.array([1, 1, 1, 0, 0]),\n 'time': np.array([0, 1, 2, 3, 4])}\n\n # These variable names are needed to ru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('!/usr/bin/pythonruby', r'pyt...
def shebang_matches(text, regex): index = text.find('\n') if index >= 0: first_line = text[:index].lower() else: first_line = text.lower() if first_line.startswith('#!'): try: found = [x for x in split_path_re.split(first_line[2:].strip()) ...
[ "def rewrite_shebang(data, target, prefix):\n shebang_match = re.match(SHEBANG_REGEX, data, re.MULTILINE)\n prefix_b = prefix.encode('utf-8')\n\n if shebang_match:\n if data.count(prefix_b) > 1:\n # More than one occurrence of prefix, can't fully cleanup.\n return data, False\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE.
def doctype_matches(text, regex): m = doctype_lookup_re.match(text) if m is None: return False doctype = m.group(2) return re.compile(regex).match(doctype.strip()) is not None
[ "def parse_doctype(self):\n if self.seen_doctype == 1:\n xmlproc.XMLProcessor.parse_doctype(self)\n else:\n arizonareport.send_out(4, str(\"Ignoring DOCTYPE (%s,%d)\" % (self.get_current_sysid(), self.get_line())) )\n self.scan_to(\"]>\")\n self.seen_doctype = 1", "def do...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse block attributes. >>> t = Textile() >>> t.pba(r'\3') '' >>> t.pba(r'\\3', element='td') ' colspan="3"' >>> t.pba(r'/4', element='td') ' rowspan="4"' >>> t.pba(r'\\3/4', element='td') ' colspan="3" rowspan="4"' >>> t.pba('^', element='td')
def pba(self, block_attributes, element=None): style = [] aclass = '' lang = '' colspan = '' rowspan = '' block_id = '' if not block_attributes: return '' matched = block_attributes if element == 'td': m = re....
[ "def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks whether the text has text not already enclosed by a block tag >>> t = Textile() >>> t.hasRawText('foo bar biz baz') False >>> t.hasRawText(' why yes, yes it does') True
def hasRawText(self, text): r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip() r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r) return '' != r
[ "def has_text(self):", "def is_html_like(text):\n if isinstance(text, str):\n text = text.strip()\n if text.startswith(\"<\"):\n return True\n return False\n return False", "def has_text_content(element):\n return element.string is not None", "def is_html(text):\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" >>> t = Textile() >>> t.table('(rowclass). |one|two|three|\n|a|b|c|') '\t\n\t\t\n\t\t\tone\n\t\t\ttwo\n\t\t\tthree\n\t\t\n\t\t\n\t\t\ta\n\t\t\tb\n\t\t\tc\n\t\t\n\t\n\n'
def table(self, text): text = text + "\n\n" pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s': self.table_span_re, 'a': self.align_re, 'c': self.c}, ...
[ "def test_multi_line(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),\n ('|', ' Column One ', '|', ' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.fBlock("bq", "", None, "", "Hello BlockQuote") ('\\t\\n', '\\t\\t', 'Hello BlockQuote', '', '\\n\\t')
def fBlock(self, tag, atts, ext, cite, content): atts = self.pba(atts) o1 = o2 = c2 = c1 = '' m = re.search(r'fn(\d+)', tag) if m: tag = 'p' if m.group(1) in self.fn: fnid = self.fn[m.group(1)] else: fnid = m....
[ "def makeNewBlock(self):\n\n block = textlayout.Block(\n width=self._propertyToPoints(\"width\"),\n lineHeight=self._propertyToPoints(\"line_height\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.glyphs("apostrophe's") 'apostrophe&8217;s' >>> t.glyphs("back in '88") 'back in &8217;88' >>> t.glyphs('foo ...') 'foo &8230;' >>> t.glyphs('') '&8212;' >>> t.glyphs('FooBar[tm]') 'FooBar&8482;' >>> t.glyphs("Cat's Cradle by Vonnegut") 'Cat&8217;s Cradle by Vonnegut'
def glyphs(self, text): # fix: hackish text = re.sub(r'"\Z', '\" ', text) glyph_search = ( # apostrophe's re.compile(r"(\w)\'(\w)"), # back in '88 re.compile(r'(\s)\'(\d+\w?)\b(?!\')'), # single closing re.compil...
[ "def get_glyphs(self, text):\n glyph_renderer = None\n glyphs = [] # glyphs that are committed.\n for c in get_grapheme_clusters(str(text)):\n # Get the glyph for 'c'. Hide tabs (Windows and Linux render\n # boxes)\n if c == '\\t':\n c = ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Capture and store URL references in self.urlrefs. >>> t = Textile()
def getRefs(self, text): pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U) text = pattern.sub(self.refs, text) return text
[ "def RefExtract(self):\n Regex = r\"\\\\ref\\{.*?\\}\"\n self.RefRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n RefExtracted = self.RefRegex.findall(self.ParsedText)\n\n for Reference in RefExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedRef[ThisUID] = R...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.span(r"hello %(bob)span strong and bold% goodbye") 'hello span strong and bold goodbye'
def span(self, text): qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^') pnct = ".,\"'?!;:(" for qtag in qtags: pattern = re.compile(r""" (?:^|(?<=[\s>%(pnct)s])|([\[{])) (%(qtag)s)(?!%(qtag)s) ...
[ "def _span_word(tag: Callable, text: Callable, word: str, score: float,\n colormap: Callable):\n bg = colormap(score)\n style = \"color:\" + _get_rgb(bg) + \";font-weight:bold;background-color: \" \\\n \"#ffffff\"\n with tag(\"span\", style=style):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply Textile to a block of text.
def textile(text, head_offset=0, html_type='xhtml', auto_link=False, encoding=None, output=None): return Textile(auto_link=auto_link).textile(text, head_offset=head_offset, html_type=html_type)
[ "def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text", "def highlightBlock(self, text):\r\n self.highlight_function(text)", "def apply_to_fig_text(fig: mpl.figure.Figure, fn: Callable[[str], str]):\n for text in fig.findobj(match=plt.T...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to determine an image's width and height, and returns a string suitable for use in an tag, or an empty string in case of failure. Requires that PIL is installed.
def getimagesize(url): try: from PIL import ImageFile import urllib2 except ImportError: return '' try: p = ImageFile.Parser() f = urllib2.urlopen(url) while True: s = f.read(1024) if not s: break ...
[ "def info(img):\n if hasattr(aq_base(img), 'meta_type') and img.meta_type == 'Image':\n ct, w, h = img.content_type, img.width, img.height\n # Zope Image object can be buggy (tiff)\n if isinstance(w, int) and isinstance(h, int) and ct.startswith('image/'):\n return ct, w, h\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a string of wiki markup and outputs a list of genshi Fragments (Elements and strings). This recursive function, with help from the WikiElement objects, does almost all the parsing. When no WikiElement objects are supplied, escapes are removed from ``text`` (except if remove_escapes=True) and it is returned asis. ...
def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True): while wiki_elements: # If the first supplied wiki_element is actually a list of elements, \ # search for all of them and match the closest one only. if isinstance(wiki_elements[0],(list,tuple)): ...
[ "def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This re_string is for finding generic block elements like lists (ordered, unordered, and definition) that start with a single token.
def re_string(self): leading_whitespace = r'^([ \t]*' only_one_token = re.escape(self.token)+ '(?!' + re.escape(self.token) + ')' rest_of_list = r'.*?(?:\n|\Z))' only_one_stop_token = '([' + re.escape(self.stop_tokens) + r'])(?!\3)' look_ahead = '(?=([ \t]*' + only_o...
[ "def begin_token(self) -> str:", "def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))", "def test_parse_token_single_element_name(self):\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set `self.reader` by name.
def set_reader(self, reader_name, parser, parser_name): reader_class = readers.get_reader_class(reader_name) self.reader = reader_class(parser, parser_name) self.parser = self.reader.parser
[ "def set_reader(self, fd, on_readable):\n raise NotImplementedError", "def setName(self, name):\n self.content = name", "def setScanner(self, scannerName):\n self.scanner = self.sourceManager.OpenSource(scannerName)", "def set_name(self, name):\n if self._status == \"lock\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set `self.writer` by name.
def set_writer(self, writer_name): writer_class = writers.get_writer_class(writer_name) self.writer = writer_class()
[ "def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]", "def set_writer(self, fd, on_writable):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set and return default settings (overrides in `defaults` dict). Set components first (`self.set_reader` & `self.set_writer`). Explicitly setting `self.settings` disables command line option processing from `self.publish()`.
def get_settings(self, usage=None, description=None, settings_spec=None, config_section=None, **defaults): option_parser = self.setup_option_parser( usage, description, settings_spec, config_section, **defaults) self.settings = option_parser.get_default_values() ...
[ "def __init__(self, tool_name=None, file_name=None, path=None, defaults={},\n logger=None):\n self._settings = {}\n self._logger = logger or (lambda x: None)\n SettingsIO.__init__(self, tool_name, file_name, path)\n self.update(defaults, save=False)\n self._default...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output.
def publish(self, argv=None, usage=None, description=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): exit = None try: if self.settings is None: self.process_command_line( ...
[ "def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up & run a `Publisher` for commandlinebased file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. This is just like publish_cmdline, except that it uses io.BinaryFileOutput instead of io.FileOutput.
def publish_cmdline_to_binary(reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, ...
[ "def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client.
def html_parts(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=True, initial_header_level=1): overrides = {'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_head...
[ "def direct_from_string(text: str) -> dict:\n return MarkdownTextObject(text=text).to_dict()", "def _parse_fragment(fragment_string: str) -> Dict[str, str]:\n fragment_string = fragment_string.lstrip('#')\n\n try:\n return dict(\n cast(Tuple[str, str], tuple(key_value_string.split('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input string, returns an HTML fragment as a string. The return value is the contents of the element.
def html_body(input_string, source_path=None, destination_path=None, input_encoding='unicode', output_encoding='unicode', doctitle=True, initial_header_level=1): parts = html_parts( input_string=input_string, source_path=source_path, destination_path=destination_path...
[ "def fragment_fromstring(html, create_parent=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n accept_leading_text = bool(create_parent)\n\n elements = fragments_fromstring(\n html, guess_charset=gu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store multiple values in `parser.values`. (Option callback.) Store `None` for each attribute named in `args`, and store the value for each key (attribute name) in `kwargs`.
def store_multiple(option, opt, value, parser, *args, **kwargs): for attribute in args: setattr(parser.values, attribute, None) for key, value in list(kwargs.items()): setattr(parser.values, key, value)
[ "def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }