query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return a list of windows for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows.
def list(desktop=None): root_window = root(desktop) window_list = [window for window in root_window.descendants() if window.displayed()] window_list.insert(0, root_window) return window_list
[ "def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def user32_EnumDesktops(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the root window for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows.
def root(desktop=None): # NOTE: The desktop parameter is currently ignored and X11 is tested for # NOTE: directly. if _is_x11(): return Window(None) else: raise OSError("Desktop '%s' not supported" % use_desktop(desktop))
[ "def current_desktop(self) -> int:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_CURRENT_DESKTOP\"]\n )\n return cast(List[int], result)[0]", "def _desktopwidget(self):\r\n if self.__desktopwidget is None:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and return windows using the given 'callable' for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows.
def find(callable, desktop=None): return root(desktop).find(callable)
[ "def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a decorator that attaches a callback to a hook. Three hooks
def hook(self, name): def wrapper(func): self.hooks.add(name, func) return func return wrapper
[ "def hook(func: Callable):\n parameters, return_annotation = _extract_params(func, extract_return=True)\n return Hook(str(func), parameters, return_annotation)", "def set_hook(f: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n @wraps(f)\n def set_hook_wrapper(self, **kwargs):\n f(self, **kwa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```XForwardedFor`` header. Note that this information can be forged by malicious clients.
def remote_route(self): proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else []
[ "def get_ips(self):\n \n # Get the IP from each interface\n output = [i.interface_ip for i in self.interfaces if is_ip(i)]\n \n # Add any other IP's it has\n output.extend(self.other_ips)\n \n return output", "def proxies(self):\r\n url = \"%s/sha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None
def parse_auth(header): try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None
[ "def parse_basic_auth(header):\n b64value = header[len(basic_prefix):]\n value = b64decode(b64value).decode()\n return value.split(':', 1)", "def parse_basic_auth(header_value):\n\n if not header_value:\n return None\n\n parts = header_value.split(\" \")\n if len(parts) != 2 or parts[0].l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function
def yieldroutes(func): import inspect # Expensive module. Only import if necessary. path = '/' + func.__name__.replace('__','/').lstrip('/') spec = inspect.getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/:%s' * argc) % tuple(spec[0][:argc]) yield path for arg in s...
[ "def _get_generator(self, path: str) -> Union[_GEN_FUNCTION_TYPE, None]:\n callback = self._routes.get(path)\n if callback:\n return callback\n\n items = reversed(tuple(self._re_routes.items())) # Thread safety\n for pattern, callback in items:\n if pattern.fullmat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate
def load_app(target): global NORUN; NORUN, nr_old = True, NORUN try: tmp = default_app.push() # Create a new "default application" rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary ad...
[ "def load_app(module_name, objects=None):\n from importlib import import_module\n if objects:\n return import_module(module_name + '.' + objects)\n else:\n return import_module(module_name)", "def app_from_config(config: RunConfig) -> Bottle:\n # The _app module instantiates a Bottle ins...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter st...
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in ...
[ "def load(self,\n template_source,\n template_filename='',\n template_identifier='',\n template_encoding='utf-8',\n template_standard='xhtml',\n parser_parameters={}):\n assert template_standard in ('xml', 'xhtml')\n \n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search name in all directories specified in lookup. First without, then with common extensions. Return first hit.
def search(cls, name, lookup=[]): if not lookup: depr('The template lookup path list should not be empty.') lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.') return os.path.abspath(...
[ "def compare(self):\n\n # search result storage\n results_paths = self.result_paths\n to_search = []\n\n # for each search path, get results\n # this now is one list of all results from each search directory\n search_files = self.dir_search(self.ext_1)\n\n # drop ext...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap and return the given socket, plus WSGI environ entries.
def wrap(self, sock): return sock, self._environ.copy()
[ "def wrap_socket(self, socket):\n return ssl.wrap_socket(socket,\n ca_certs=self.cert_file,\n cert_reqs=ssl.CERT_REQUIRED,\n ssl_version=ssl.PROTOCOL_TLSv1)", "def _xwrap(sock):\n return sock.sock if isinstance(soc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an SSL.Context from self attributes.
def get_context(self): # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473 c = SSL.Context(SSL.SSLv23_METHOD) c.use_privatekey_file(self.private_key) if self.certificate_chain: c.load_verify_locations(self.certificate_chain) c.use_certificate_fi...
[ "def create_ssl_context(self):\n ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ssl_context.options |= ssl.OP_NO_TLSv1\n ssl_context.options |= ssl.OP_NO_TLSv1_1\n ssl_context.options |= ssl.OP_NO_COMPRESSION\n ssl_context.set_ciphers(self.tls_ciphers)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names.
def plat_specific_errors(*errnames): errno_names = dir(errno) nums = [getattr(errno, k) for k in errnames if k in errno_names] # de-dupe the list return list(dict.fromkeys(nums).keys())
[ "def listErrorCodes(errno=None):\n\n\tif errno is None:\n\t\tfor i in range(MinErrorNo, (MaxErrorNo+1)):\n\t\t\tlistErrorCodes(errno=i)\n\telse:\n\t\tif errno == 1:\n\t\t\tprint \"1: End of file encountered during filehandle read\"\n\t\telif errno == 2:\n\t\t\tprint \"2: End of file encountered during numpy.fromfil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. Yo...
def read_headers(rfile, hdict=None): if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of ...
[ "def read_header(header):\n request = str(header, \"ASCII\").split(\"\\r\\n\", 1)[0]\n request_headers = str(header, \"ASCII\").split(\"\\r\\n\", 1)[1]\n request_headers = message_from_string(request_headers)\n request_headers = dict(request_headers)\n request_headers[\"Method\"] = request.split(\" \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the next HTTP request startline and messageheaders.
def parse_request(self): self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size) try: success = self.read_request_line() except MaxSizeExceeded: self.simple_response("414 Request-URI Too Long", ...
[ "def parse_request(self):\n self.method, self.location, self.http_version = \\\n self.request_line.decode(\"utf-8\").split()", "def parse_request(self):\r\n self.command = None # set in case of error on the first line\r\n self.request_version = version = \"HTTP/0.9\" # Default\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a RequestURI into (scheme, authority, path).
def parse_request_uri(self, uri): if uri == ASTERISK: return None, None, uri i = uri.find('://') if i > 0 and QUESTION_MARK not in uri[:i]: # An absoluteURI. # If there's a scheme (and it must be http or https), then: # http_URL = "http:" ...
[ "def _url_parse(uri):\n host = \"\"\n path = \"\"\n\n p_uri = urlparse(uri)\n host = p_uri.netloc\n path = p_uri.path.rstrip('/').strip('/')\n\n return (host,path)", "def split_path(uri):\n parsed = urlparse(uri)\n return parsed.path, parsed.query, parsed.fragment", "def parseURI(self, u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert, process, and send the HTTP response messageheaders. You must set self.status, and self.outheaders before calling this.
def send_headers(self): hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif "content-length" not in hkeys: ...
[ "def _sendResponseHeaders(self):\n code, message = self.status.split(None, 1)\n code = int(code)\n self.request.setResponseCode(code, _wsgiStringToBytes(message))\n\n for name, value in self.headers:\n # Don't allow the application to control these required headers.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sendall for nonblocking sockets.
def sendall(self, data): while data: try: bytes_sent = self.send(data) data = data[bytes_sent:] except socket.error, e: if e.args[0] not in socket_errors_nonblocking: raise
[ "def _flush(self):\n\t\t\n\t\tfor element in self._writequeue:\n\t\t\tsize = len(element)\n\t\t\twhile size > 0:\n\t\t\t\ttry:\n\t\t\t\t\tsent = self._socket.send(element)\n\t\t\t\t\telement = element[sent:]\n\t\t\t\t\tsize -= sent\n\t\t\t\texcept socket.error, e:\n\t\t\t\t\tif e.errno == errno.EAGAIN:\n\t\t\t\t\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark the given socket fd as noninheritable (Windows).
def prevent_socket_inheritance(sock): if not _SetHandleInformation(sock.fileno(), 1, 0): raise WinError()
[ "def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_G...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark the given socket fd as noninheritable (POSIX).
def prevent_socket_inheritance(sock): fd = sock.fileno() old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
[ "def prevent_socket_inheritance(sock):\r\n if not _SetHandleInformation(sock.fileno(), 1, 0):\r\n raise WinError()", "def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an SSL adapter class for the given name.
def get_ssl_adapter_class(name='pyopenssl'): adapter = ssl_adapters[name.lower()] if isinstance(adapter, basestring): last_dot = adapter.rfind(".") attr_name = adapter[last_dot + 1:] mod_path = adapter[:last_dot] try: mod = sys.modules[mod_path] ...
[ "def get_ssl_adapter_class(name='builtin'):\r\n adapter = ssl_adapters[name.lower()]\r\n if isinstance(adapter, basestring):\r\n last_dot = adapter.rfind(\".\")\r\n attr_name = adapter[last_dot + 1:]\r\n mod_path = adapter[:last_dot]\r\n\r\n try:\r\n mod = sys.modules[mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names.
def plat_specific_errors(*errnames): errno_names = dir(errno) nums = [getattr(errno, k) for k in errnames if k in errno_names] # de-dupe the list return list(dict.fromkeys(nums).keys())
[ "def listErrorCodes(errno=None):\n\n\tif errno is None:\n\t\tfor i in range(MinErrorNo, (MaxErrorNo+1)):\n\t\t\tlistErrorCodes(errno=i)\n\telse:\n\t\tif errno == 1:\n\t\t\tprint \"1: End of file encountered during filehandle read\"\n\t\telif errno == 2:\n\t\t\tprint \"2: End of file encountered during numpy.fromfil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a RequestURI into (scheme, authority, path).
def parse_request_uri(self, uri): if uri == ASTERISK: return None, None, uri scheme, sep, remainder = uri.partition(b'://') if sep and QUESTION_MARK not in scheme: # An absoluteURI. # If there's a scheme (and it must be http or https), then: ...
[ "def _url_parse(uri):\n host = \"\"\n path = \"\"\n\n p_uri = urlparse(uri)\n host = p_uri.netloc\n path = p_uri.path.rstrip('/').strip('/')\n\n return (host,path)", "def split_path(uri):\n parsed = urlparse(uri)\n return parsed.path, parsed.query, parsed.fragment", "def parseURI(self, u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark the given socket fd as noninheritable (Windows).
def prevent_socket_inheritance(sock): if not _SetHandleInformation(sock.fileno(), 1, 0): raise WinError()
[ "def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_G...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark the given socket fd as noninheritable (POSIX).
def prevent_socket_inheritance(sock): fd = sock.fileno() old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
[ "def prevent_socket_inheritance(sock):\r\n if not _SetHandleInformation(sock.fileno(), 1, 0):\r\n raise WinError()", "def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)", "def ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an SSL adapter class for the given name.
def get_ssl_adapter_class(name='builtin'): adapter = ssl_adapters[name.lower()] if isinstance(adapter, basestring): last_dot = adapter.rfind(".") attr_name = adapter[last_dot + 1:] mod_path = adapter[:last_dot] try: mod = sys.modules[mod_path] if...
[ "def get_adapter(self, name):\n if isinstance(name, Adapter):\n if name.name in self._adapters:\n if self._adapters[name.name] == name:\n return name\n if name in self._adapters:\n return self._adapters[name]", "def get_driver_adapter(driver_na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new environ dict targeting the given wsgi.version
def get_environ(self): req = self.req env_10 = WSGIGateway_10.get_environ(self) env = env_10.copy() env['wsgi.version'] = ('u', 0) # Request-URI env.setdefault('wsgi.url_encoding', 'utf-8') try: # SCRIPT_NAME is the empty string, who cares wh...
[ "def make_environ(extra=None, **kwds):\n environ = {}\n if extra is not None:\n environ.update(extra)\n environ[\"wsgi.version\"] = (1, 0)\n environ[\"wsgi.url_scheme\"] = \"http\"\n environ[\"SERVER_NAME\"] = \"localhost\"\n environ[\"SERVER_PORT\"] = \"80\"\n environ[\"REQUEST_METHOD\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query a valid view by buffer id
def query_valid_view(buffer_id): for window in sublime.windows(): for view in window.views(): if view.buffer_id() == buffer_id: return view return None
[ "def view(view_name, key=None):\n kwargs = {'key': key} if key is not None else {}\n print(settings.DB.view(view_name,**kwargs)).rows", "def get_view(window, vid):\r\n\r\n for view in window.views():\r\n if view.id() == vid:\r\n return view", "def get_by_id(self, view_id):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a markdown document into an ElementTree. Given a list of lines, an ElementTree object (not just a parent Element) is created and the root element is passed to the parser as the parent. The ElementTree object is returned. This should only be called on an entire document, not pieces.
def parseDocument(self, lines): # Create a ElementTree from the lines self.root = util.etree.Element(self.markdown.doc_tag) self.parseChunk(self.root, '\n'.join(lines)) return util.etree.ElementTree(self.root)
[ "def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n root = markdown.etree.Element(\"div\")\r\n self.parseChunk(root, '\\n'.join(lines))\r\n return markdown.etree.ElementTree(root)", "def create_tree(markdown):\n global blocks, pos\n # parse markdown\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a chunk of markdown text and attach to given etree node. While the ``text`` argument is generally assumed to contain multiple blocks which will be split on blank lines, it could contain only one block. Generally, this method would be called by extensions when block parsing is required. The ``parent`` etree Elemen...
def parseChunk(self, parent, text): self.parseBlocks(parent, text.split('\n\n'))
[ "def parse(text):\n md_extensions = getattr(settings, \"DOCCOMMENT_MARKDOWN_EXTENSIONS\", DEFAULT_EXTENSIONS)\n md_safemode = getattr(settings, \"DOCCOMMENT_MARKDOWN_SAFEMODE\", DEFAULT_SAFEMODE)\n return markdown(text, md_extensions, safe_mode=md_safemode)", "def parseText(self, node):\n self.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process blocks of markdown text and attach to given etree node. Given a list of ``blocks``, each blockprocessor is stepped through until there are no blocks left. While an extension could potentially call this method directly, it's generally expected to be used internally. This is a public method as an extension may ne...
def parseBlocks(self, parent, blocks): while blocks: for processor in self.blockprocessors.values(): if processor.test(parent, blocks[0]): if processor.run(parent, blocks) is not False: # run returns True or None ...
[ "def _parse_blocks(self, definition):\n block_matches = re.finditer(self.__class__.block_regex, definition)\n\n for match in block_matches:\n name = match.group('name')\n args = match.group('args')\n defs = match.group('defs')\n\n nodes = re.findall(self.__c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the default block parser used by Markdown.
def build_block_parser(md_instance, **kwargs): parser = BlockParser(md_instance) parser.blockprocessors['empty'] = EmptyBlockProcessor(parser) parser.blockprocessors['indent'] = ListIndentProcessor(parser) parser.blockprocessors['code'] = CodeBlockProcessor(parser) parser.blockprocessors['hashh...
[ "def create_block_parser(self) -> BlockParser:\n parser = BlockParser()\n for processor in self.get_block_processors():\n parser.add_processor(processor(parser))\n return parser", "def get_parser():\n parser = (\n MarkdownIt(\"commonmark\")\n .enable(\"table\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a tab from the front of each line of the given text.
def detab(self, text): newtext = [] lines = text.split('\n') for line in lines: if line.startswith(' '*self.tab_length): newtext.append(line[self.tab_length:]) elif not line.strip(): newtext.append('') else: ...
[ "def filter_spaces_tabs(text):\n\n return re.sub(\" |\\t\", \"\", text)", "def remove_next_tab_chars(docs):\n return list(map(lambda s: s.replace('\\n', ' ').replace('\\t', ' '), docs))", "def remove_indent(self) -> None:\n w = abs(self.tab_width)\n if self.result:\n s = self.resu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new li and parse the block with it as the parent.
def create_item(self, parent, block): li = util.etree.SubElement(parent, 'li') self.parser.parseBlocks(li, [block])
[ "def create_item(parent, block):\r\n li = markdown.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])", "def ul(self, elem, theme, width):\n block, indent = [], ansi.length(theme.margin.head)\n\n for child in self.children(elem, {u('li')}):\n block += [u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get level of indent based on list level.
def get_level(self, parent, block): # Get indent level m = self.INDENT_RE.match(block) if m: indent_level = len(m.group(1))/self.tab_length else: indent_level = 0 if self.parser.state.isstate('list'): # We're in a tightlist - so we alre...
[ "def get_level(list_, level):\n level = deepcopy(level)\n if len(level) > 0:\n i = level.pop(0)\n return get_level(list_[i], level)\n else:\n return list_", "def get_indent(node, level=0):\n if node.parent:\n level += 1\n return get_indent(node.parent, level)\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove ``>`` from beginning of a line.
def clean(self, line): m = self.RE.match(line) if line.strip() == ">": return "" elif m: return m.group(2) else: return line
[ "def _remove_leading_chars(self, line):\n return line[len(self.leading_chars):]", "def rm_first_line(text):\n return '\\n'.join(text.split('\\n')[1:])", "def delete_till_beginning_of_line(text):\n if text.rfind(\"\\n\") == -1:\n return ''\n return text[0:text.rfind(\"\\n\") + 1]", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Break a block into list items.
def get_items(self, block): items = [] for line in block.split('\n'): m = self.CHILD_RE.match(line) if m: # This is a new list item # Check first item for the start index if not items and self.TAG=='ol': ...
[ "def blocks(content):\n\n for group in content.split(DIV):\n yield group", "def cut_to_block_n(items,block=3):\n num_show = int(len(items) / block)\n if num_show > 0:\n results = items[:block * num_show]\n else:\n results = items\n return results", "def split_list_into_sublis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert AbbrPreprocessor before ReferencePreprocessor.
def extendMarkdown(self, md, md_globals): md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
[ "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")", "def AddPreprocesorSymbol(self, symbol):\n \n assert(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add Admonition to Markdown instance.
def extendMarkdown(self, md, md_globals): md.registerExtension(self) md.parser.blockprocessors.add('admonition', AdmonitionProcessor(md.parser), '_begin')
[ "def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)", "def visit_title(self, node):\n # The titles of various admonitions are not commentable as titles.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign attrs to element.
def assign_attrs(self, elem, attrs): for k, v in get_attrs(attrs): if k == '.': # add to class cls = elem.get('class') if cls: elem.set('class', '%s %s' % (cls, v)) else: elem.set('class',...
[ "def setattrs(self, attrs):\n for k, v in attrs:\n self.setattr(k, v)", "def put_elem_attr(self, elem_blk_id, elem_attrs):\n self.__ex_put_elem_attr(elem_blk_id, elem_attrs)", "def set_attribute(self,att,val):\r\n self.attributes[att] = val", "def attributes(self, attributes: \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add HilitePostprocessor to Markdown instance.
def extendMarkdown(self, md, md_globals): hiliter = HiliteTreeprocessor(md) hiliter.config = self.getConfigs() md.treeprocessors.add("hilite", hiliter, "<inline") md.registerExtension(self)
[ "def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r", "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderP...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new dd and parse the block with it as the parent.
def create_item(self, parent, block): dd = etree.SubElement(parent, 'dd') self.parser.parseBlocks(dd, [block])
[ "def create_item(parent, block):\r\n li = markdown.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])", "def _make_record(self, parent, gline):\n\n if parent and gline.tag in (\"CONT\", \"CONC\"):\n # concatenate, only for non-BLOBs\n if parent.tag !...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add FencedBlockPreprocessor to the Markdown instance.
def extendMarkdown(self, md, md_globals): md.registerExtension(self) md.preprocessors.add('fenced_code_block', FencedBlockPreprocessor(md), ">normalize_whitespace")
[ "def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r", "def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', Deta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add pieces to Markdown.
def extendMarkdown(self, md, md_globals): md.registerExtension(self) self.parser = md.parser self.md = md # Insert a preprocessor before ReferencePreprocessor md.preprocessors.add("footnote", FootnotePreprocessor(self), "<reference") # ...
[ "def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)", "def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_bloc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear the footnotes on reset, and prepare for a distinct document.
def reset(self): self.footnotes = OrderedDict() self.unique_prefix += 1
[ "def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()", "def finalize(self):\n # we could not fill out links while parsing (referenced sections where not known),\n # so try to set them now, where the document is complete\n for sec in self.itersections(recursive=True):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ElementTree Element that contains Footnote placeholder.
def findFootnotesPlaceholder(self, root): def finder(element): for child in element: if child.text: if child.text.find(self.getConfig("PLACE_MARKER")) > -1: return child, element, True if child.tail: ...
[ "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store a footnote for later retrieval.
def setFootnote(self, id, text): self.footnotes[id] = text
[ "def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote", "def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return div of footnotes as et Element.
def makeFootnotesDiv(self, root): if not list(self.footnotes.keys()): return None div = etree.Element("div") div.set('class', 'footnote') etree.SubElement(div, "hr") ol = etree.SubElement(div, "ol") for id in self.footnotes.keys(): li...
[ "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an instance of the FootnoteExtension
def makeExtension(configs=[]): return FootnoteExtension(configs=configs)
[ "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure id is unique in set of ids. Append '_1', '_2'... if not
def unique(id, ids): while id in ids or not id: m = IDCOUNT_RE.match(id) if m: id = '%s_%d'% (m.group(1), int(m.group(2))+1) else: id = '%s_%d'% (id, 1) ids.add(id) return id
[ "def test_unique_based_on_id(self):\n unique = misc.unique_based_on_id\n self.assertSequenceEqual(unique([]), [])\n self.assertSequenceEqual(unique([1, 2, 3]), [1, 2, 3])\n self.assertSequenceEqual(unique([1, 1, 3]), [1, 3])\n self.assertSequenceEqual(unique([[], [], 3]), [[], [],...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add MetaPreprocessor to Markdown instance.
def extendMarkdown(self, md, md_globals): md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
[ "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")", "def extendMarkdown(self, md):\r\n # md.registerExte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a url from the label, a base, and an end.
def build_url(label, base, end): clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) return '%s%s%s'% (base, clean_label, end)
[ "def make_url(base_url,start_record, per_page,page):\n final_url = base_url+f'from={start_record}&count={per_page}&page={page}'\n return final_url", "def construct_url(context, request):", "def __url_builder(self, endpoint: str, **kwargs: dict) -> str:\n\n endpoint = self.__clean_endpoints_string(e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the default set of inline patterns for Markdown.
def build_inlinepatterns(md_instance, **kwargs): inlinePatterns = odict.OrderedDict() inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE) inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance) inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance) inlinePatter...
[ "def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)", "def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)", "def extendMarkdown(self, md):\n # Insert del pattern into m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return unescaped text given text with an inline placeholder.
def unescape(self, text): try: stash = self.markdown.treeprocessors['inline'].stashed_nodes except KeyError: return text def itertext(el): ' Reimplement Element.itertext for older python versions ' tag = el.tag if not isinstance...
[ "def templatize(self, text, context):\n return Template(\"{% autoescape off %}\" + text + \"{% endautoescape %}\").render(context)", "def get_inline_expression(self, text):\n text = text.strip()\n if not text.startswith(self.inline_tags[0]) or not text.endswith(\n self.inline_tags[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return unescaped text given text with an inline placeholder.
def unescape(self, text): try: stash = self.markdown.treeprocessors['inline'].stashed_nodes except KeyError: return text def get_stash(m): id = m.group(1) value = stash.get(id) if value is not None: try: ...
[ "def templatize(self, text, context):\n return Template(\"{% autoescape off %}\" + text + \"{% endautoescape %}\").render(context)", "def get_inline_expression(self, text):\n text = text.strip()\n if not text.startswith(self.inline_tags[0]) or not text.endswith(\n self.inline_tags[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sanitize a url against xss attacks in "safe_mode".
def sanitize_url(self, url): url = url.replace(' ', '%20') if not self.markdown.safeMode: # Return immediately bipassing parsing. return url try: scheme, netloc, path, params, query, fragment = url = urlparse(url) except ValueError: ...
[ "def _sanitizeURL(self, couchURL):\n return couchURL", "def safe_uri(uri):\n path, query, frag = split_path(uri)\n safe = True\n for part in (path, query, frag):\n safe = safe and safe_chars_regex.search(part)\n return safe", "def make_valid_url(self, url):\n\n # Replace spaces ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the value of the item at the given zerobased index.
def value_for_index(self, index): return self[self.keyOrder[index]]
[ "def __getitem__( self, index ) :\n\n return( self.zas[index] )", "def value_at(self, index):\n index = np.where(self.indices == index)[0]\n return self.data[index] if index.size != 0 else 0", "def __getitem__(self, index=0):\n if index < 0:\n index = len(self) + index\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the key, value pair before the item with the given index.
def insert(self, index, key, value): if key in self.keyOrder: n = self.keyOrder.index(key) del self.keyOrder[n] if n < index: index -= 1 self.keyOrder.insert(index, key) super(OrderedDict, self).__setitem__(key, value)
[ "def insert_before_element(self, item, element):\n if item is not None and element is not None:\n element.insert_before(item)\n else:\n raise IndexError", "def insert_at(self, index, item):\n ptr = self.head\n if ptr is None:\n self.head = SinglyLinkedL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return index or None for a given location.
def index_for_location(self, location): if location == '_begin': i = 0 elif location == '_end': i = None elif location.startswith('<') or location.startswith('>'): i = self.index(location[1:]) if location.startswith('>'): if...
[ "def index_or_none(l, item, *args):\n\n try:\n idx = l.index(item, *args)\n except ValueError:\n idx = None\n return idx", "def find_index_corresponding_store(job, route):\n store_id_to_find = job.store['id']\n index_to_return = None\n for i in range(len(route.tour)):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the default postprocessors for Markdown.
def build_postprocessors(md_instance, **kwargs): postprocessors = odict.OrderedDict() postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance) postprocessors["amp_substitute"] = AndSubstitutePostprocessor() postprocessors["unescape"] = UnescapePostprocessor() return postprocessors
[ "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")", "def _build_post_processor_list_from_args(self) -> typing.L...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over html stash and restore "safe" html.
def run(self, text): for i in range(self.markdown.htmlStash.html_counter): html, safe = self.markdown.htmlStash.rawHtmlBlocks[i] if self.markdown.safeMode and not safe: if str(self.markdown.safeMode).lower() == 'escape': html = self.escape(html) ...
[ "def _clear_from_html(self, elem):\n if type(elem) == str:\n return html.unescape(elem)\n elif type(elem) == dict:\n return {self._clear_from_html(k): self._clear_from_html(v) for k, v in elem.items()}\n elif type(elem) == list:\n return [self._clear_from_html(e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the default set of preprocessors used by Markdown.
def build_preprocessors(md_instance, **kwargs): preprocessors = odict.OrderedDict() preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance) if md_instance.safeMode != 'escape': preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance) preprocessors["reference"] = Ref...
[ "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")", "def preprocessing():", "def default_processors(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same effect as concatenating the strings in items, finding the character to which stringindex refers in that string, and returning the item in which that character resides.
def _stringindex_to_listindex(self, stringindex, items): items.append('dummy') i, count = 0, 0 while count <= stringindex: count += len(items[i]) i += 1 return i - 1
[ "def __getitem__(self, item):\r\n if isinstance(item, slice):\r\n # Slices must be handled specially.\r\n return self._slice(item)\r\n try:\r\n self._char_indexes[item]\r\n except IndexError:\r\n raise IndexError(\"ANSIString Index out of range\")\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the default treeprocessors for Markdown.
def build_treeprocessors(md_instance, **kwargs): treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors
[ "def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")", "def create_tree(markdown):\n global blocks, pos\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add node to stash
def __stashNode(self, node, type): placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder
[ "def stash(self):", "def add_to_tree(self, node):\n # print(node.state)\n # if(node.parent_node is not None):\n # print(\"parent:\", node.parent_node.state)\n # print()\n self.tree.append(node)\n return", "def create_stash(self, payload, path=None):\n if path:\n self._req...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string,
def run(self, tree): self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): ...
[ "def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the tag is a block level HTML tag.
def isBlockLevel(tag): if isinstance(tag, string_type): return BLOCK_LEVEL_ELEMENTS.match(tag) # Some ElementTree tags are not strings, so return False. return False
[ "def is_block_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"block\"", "def IsBlock(self) -> bool:", "def is_block(self):\n if self.get_level() == 1:\n return True\n else:\n return False", "def IsBlock(block_name):\n idef = scriptcontext.doc.InstanceDefin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a string representing bool value. If parsing was successful, returns True or False. If parsing was not successful, raises ValueError, or, if fail_on_errors=False, returns None.
def parseBoolValue(value, fail_on_errors=True): if not isinstance(value, string_type): return bool(value) elif value.lower() in ('true', 'yes', 'y', 'on', '1'): return True elif value.lower() in ('false', 'no', 'n', 'off', '0'): return False elif fail_on_errors: r...
[ "def parse_bool(value):\n return bool({\n 'True': True,\n 'False': False\n }.get(value, value))", "def str2bool(text: str) -> bool:\n text = text.lower()\n if text == \"true\":\n return True\n elif text == \"false\":\n return False\n else:\n raise ValueError(f\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves an HTML segment for later reinsertion. Returns a placeholder string that needs to be inserted into the document.
def store(self, html, safe=False): self.rawHtmlBlocks.append((html, safe)) placeholder = self.get_placeholder(self.html_counter) self.html_counter += 1 return placeholder
[ "def save(self, destination):\n if not self.html:\n self.html = file_html(self.current_plot, CDN, None)\n with open(destination, 'w') as output_file:\n output_file.write(self.html)\n self.filename = destination\n return self.filename", "def save(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store tag data and return a placeholder.
def store_tag(self, tag, attrs, left_index, right_index): self.tag_data.append({'tag': tag, 'attrs': attrs, 'left_index': left_index, 'right_index': right_index}) placeholder = TAG_PLACEHOLDER % str(self.tag_counter) self.tag_count...
[ "def add_new(self, tag, VR, value):\n data_element = DataElement(tag, VR, value)\n self[data_element.tag] = data_element # use data_element.tag since DataElement verified it", "def _save(self, data, etag):\n cache.set(self.ETAG_KEY, etag, self._cache_timeout)\n cache.set(self.DATA_KE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run Markdown from the command line.
def run(): # Parse options and adjust logging level if necessary options, logging_level = parse_options() if not options: sys.exit(2) logger.setLevel(logging_level) logger.addHandler(logging.StreamHandler()) # Run markdown.markdownFromFile(**options)
[ "def run_markdown(cpp_filename):\n basename, ext = os.path.splitext(cpp_filename)\n # https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output\n # https://stackoverflow.com/questions/35160256/how-do-i-output-lists-as-a-table-in-jupyter-notebook\n\n # Run exe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode codec that converts Unicode characters into named entities (where the names are known), or failing that, numerical entities.
def named_entities_codec(text): if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)): s = [] for c in text.object[text.start:text.end]: if ord(c) in codepoint2name: s.append(u'&%s;' % codepoint2name[ord(c)]) else: s.app...
[ "def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append('&{};'.format(codepoint2name[ord(c)]))\r\n else...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode ampersands into &
def encode_ampersands(text): text = re.sub('&(?!([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+);)', '&amp;', text) return text
[ "def fix_ampersands(qs):\n parts = []\n for p in qs.split('='):\n if p.count('&') > 1:\n l = p.split('&')\n last = l.pop()\n p = '%26'.join(l) + '&' + last\n parts.append(p)\n\n # an & in the last part definitely needs encoding\n parts[-1] = parts[-1].repla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode codec that converts Unicode characters into named entities (where the names are known), or failing that, numerical entities.
def named_entities_codec(text): if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)): s = [] for c in text.object[text.start:text.end]: if ord(c) in codepoint2name: s.append('&{};'.format(codepoint2name[ord(c)])) else: ...
[ "def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append(u'&%s;' % codepoint2name[ord(c)])\r\n else:\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode ampersands into &
def encode_ampersands(text): text = re.sub('&(?!([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+);)', '&amp;', text) return text
[ "def fix_ampersands(qs):\n parts = []\n for p in qs.split('='):\n if p.count('&') > 1:\n l = p.split('&')\n last = l.pop()\n p = '%26'.join(l) + '&' + last\n parts.append(p)\n\n # an & in the last part definitely needs encoding\n parts[-1] = parts[-1].repla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a string of wiki markup and outputs a list of genshi Fragments (Elements and strings). This recursive function, with help from the WikiElement objects, does almost all the parsing. When no WikiElement objects are supplied, escapes are removed from ``text`` (except if remove_escapes=True) and it is returned asis. ...
def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True): while wiki_elements: # If the first supplied wiki_element is actually a list of elements, \ # search for all of them and match the closest one only. if isinstance(wiki_elements[0],(list,tuple)): ...
[ "def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This re_string is for finding generic block elements like lists (ordered, unordered, and definition) that start with a single token.
def re_string(self): leading_whitespace = r'^([ \t]*' only_one_token = re.escape(self.token)+ '(?!' + re.escape(self.token) + ')' rest_of_list = r'.*?(?:\n|\Z))' only_one_stop_token = '([' + re.escape(self.stop_tokens) + r'])(?!\3)' look_ahead = '(?=([ \t]*' + only_o...
[ "def begin_token(self) -> str:", "def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))", "def test_parse_token_single_element_name(self):\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set `self.reader` by name.
def set_reader(self, reader_name, parser, parser_name): reader_class = readers.get_reader_class(reader_name) self.reader = reader_class(parser, parser_name) self.parser = self.reader.parser
[ "def set_reader(self, fd, on_readable):\n raise NotImplementedError", "def setName(self, name):\n self.content = name", "def setScanner(self, scannerName):\n self.scanner = self.sourceManager.OpenSource(scannerName)", "def set_name(self, name):\n if self._status == \"lock\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set `self.writer` by name.
def set_writer(self, writer_name): writer_class = writers.get_writer_class(writer_name) self.writer = writer_class()
[ "def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]", "def set_writer(self, fd, on_writable):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output.
def publish(self, argv=None, usage=None, description=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): exit = None try: if self.settings is None: self.process_command_line( ...
[ "def main(cmdlineargs=None, trimmed_outfile=sys.stdout):\n\tparser = get_option_parser()\n\tif cmdlineargs is None:\n\t\tcmdlineargs = sys.argv[1:]\n\toptions, args = parser.parse_args(args=cmdlineargs)\n\n\tif len(args) == 0:\n\t\tparser.error(\"At least one parameter needed: name of a FASTA or FASTQ file.\")\n\te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up & run a `Publisher` for commandlinebased file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. This is just like publish_cmdline, except that it uses io.BinaryFileOutput instead of io.FileOutput.
def publish_cmdline_to_binary(reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, ...
[ "def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client.
def html_parts(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=True, initial_header_level=1): overrides = {'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_head...
[ "def direct_from_string(text: str) -> dict:\n return MarkdownTextObject(text=text).to_dict()", "def _parse_fragment(fragment_string: str) -> Dict[str, str]:\n fragment_string = fragment_string.lstrip('#')\n\n try:\n return dict(\n cast(Tuple[str, str], tuple(key_value_string.split('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input string, returns an HTML fragment as a string. The return value is the contents of the element.
def html_body(input_string, source_path=None, destination_path=None, input_encoding='unicode', output_encoding='unicode', doctitle=True, initial_header_level=1): parts = html_parts( input_string=input_string, source_path=source_path, destination_path=destination_path...
[ "def fragment_fromstring(html, create_parent=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n accept_leading_text = bool(create_parent)\n\n elements = fragments_fromstring(\n html, guess_charset=gu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store multiple values in `parser.values`. (Option callback.) Store `None` for each attribute named in `args`, and store the value for each key (attribute name) in `kwargs`.
def store_multiple(option, opt, value, parser, *args, **kwargs): for attribute in args: setattr(parser.values, attribute, None) for key, value in kwargs.items(): setattr(parser.values, key, value)
[ "def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a configuration file during option processing. (Option callback.)
def read_config_file(option, opt, value, parser): try: new_settings = parser.get_config_file_settings(value) except ValueError, error: parser.error(error) parser.values.update(new_settings, parser)
[ "def read(self):\n\n # Add options from config file.\n print self._config.get_all()\n for id, (val, type) in self._config.get_all().items():\n if type == 'src' and not self.check(id, val): # Don't use wrong paths\n log.warning(_('idg.options.not.valid.use.default') + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interpret filesystem path settings relative to the `base_path` given. Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from `OptionParser.relative_path_settings`.
def make_paths_absolute(pathdict, keys, base_path=None): if base_path is None: base_path = os.getcwdu() # type(base_path) == unicode # to allow combining non-ASCII cwd with unicode values in `pathdict` for key in keys: if key in pathdict: value = pathdict[key] ...
[ "def process_path_key(self, dirpath, filename, key_path, dictionary, keys, level, must_exist, can_have_subdict, default_val):\n # found the key_path, process values\n if level == len(keys) - 1:\n key = keys[level]\n # if a wildcard is specified at this level, that means we\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of `settings_spec` excluding/replacing some settings. `settings_spec` is a tuple of configuration settings with a structure described for docutils.SettingsSpec.settings_spec. Optional positional arguments are names of tobeexcluded settings. Keyword arguments are option specification replacements. (See the...
def filter_settings_spec(settings_spec, *exclude, **replace): settings = list(settings_spec) # every third item is a sequence of option tuples for i in range(2, len(settings), 3): newopts = [] for opt_spec in settings[i]: # opt_spec is ("<help>", [<option strings>], {<keywo...
[ "def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]", "def get_settings_model(self):\n igno...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call the validator function on applicable settings and evaluate the 'overrides' option. Extends `optparse.Option.process`.
def process(self, opt, value, values, parser): result = optparse.Option.process(self, opt, value, values, parser) setting = self.dest if setting: if self.validator: value = getattr(values, setting) try: new_value = self.valid...
[ "def ValidateOptions(self, opt, args):", "def option_override(options):\n if not options.config_file:\n _logger.warning('config file {0} not found'.format(options.config_file))\n return\n\n config = configparser.RawConfigParser()\n config.read(options.config_file)\n\n section = 'system'\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each component, first populate from the `SettingsSpec.settings_spec` structure, then from the `SettingsSpec.settings_defaults` dictionary. After all components have been processed, check for and populate from each component's `SettingsSpec.settings_default_overrides` dictionary.
def populate_from_components(self, components): for component in components: if component is None: continue settings_spec = component.settings_spec self.relative_path_settings.extend( component.relative_path_settings) for i i...
[ "def propagate_defaults(self, requiredvars, config, defaultsection=None):\n for option, infodic in requiredvars.items():\n if 'section' in infodic:\n section = infodic['section']\n else:\n section = defaultsection\n\n default = infodic['default']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of config files, from environment or standard.
def get_standard_config_files(self): try: config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep) except KeyError: config_files = self.standard_config_files # If 'HOME' is not set, expandvars() requires the 'pwd' module which is # not available under...
[ "def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []", "def in_cwd():\n configs = []\n\n for filename in os.listdir(os.getcwd()):\n if filename.startswith('.tmuxp') and is_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an option by its dest. If you're supplying a dest which is shared by several options, it is undefined which option of those is returned. A KeyError is raised if there is no option with the supplied dest.
def get_option_by_dest(self, dest): for group in self.option_groups + [self]: for option in group.option_list: if option.dest == dest: return option raise KeyError('No option with dest == %r.' % dest)
[ "def get(self, opt, index=0):\n\t\ti = 0\n\t\tfor n, d in self.options:\n\t\t\tif n == opt:\n\t\t\t\tif i == index:\n\t\t\t\t\treturn d\n\t\t\t\ti += 1\n\t\treturn None", "def get_option(cfg, base, opt):\n if cfg.has_option(base, opt):\n return cfg.get(base, opt)\n else:\n return None", "def get_option(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform '' to '_' so the cmdline form of option names can be used.
def optionxform(self, optionstr): return optionstr.lower().replace('-', '_')
[ "def to_option(attr):\n return '--%s' % attr.lower().replace('_', '-')", "def __set_opt(option):\n return \"--\" + option", "def option_prefix(self, option):\n return \"--\"", "def attr_to_arg(attr):\n return '--{}'.format(attr.replace('_', '-'))", "def option_strings(self) -> List[s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a given section as a dictionary (empty if the section doesn't exist).
def get_section(self, section): section_dict = {} if self.has_section(section): for option in self.options(section): section_dict[option] = self.get(section, option) return section_dict
[ "def get_section(self, section):\n output = {}\n for option in self.__config[section]:\n output[option] = self.__config[section][option]\n return output", "def get_section(section):", "def _extract_section(section_content):\n lines = section_content.split(\"\\n\")\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test, whether the encoding of `stream` matches `encoding`. Returns
def check_encoding(stream, encoding): try: return codecs.lookup(stream.encoding) == codecs.lookup(encoding) except (LookupError, AttributeError, TypeError): return None
[ "def test_encoding_detection():\n \n url = 'http://lavr.github.io/python-emails/tests/requests/some-utf8-text.html'\n expected_content = u'我需要单间。' # Chinese is for example only. Any other non-european encodings broken too.\n\n r =\trequests.get(url)\n\n # Response.apparent_encoding is good\n assert r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decode a string, `data`, heuristically. Raise UnicodeError if unsuccessful. The client application should call ``locale.setlocale`` at the
def decode(self, data): if self.encoding and self.encoding.lower() == 'unicode': assert isinstance(data, unicode), ( 'input encoding is "unicode" ' 'but input is not a unicode object') if isinstance(data, unicode): # Accept unicode even if se...
[ "def smart_decode(data, charset):\n try:\n if isinstance(data, str):\n # It's already unicode so just return it\n return data\n else:\n return data.decode(charset, errors='strict')\n\n except UnicodeDecodeError: # PY3\n # Looks like the charset lies, try ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to determine the encoding of `data` by looking in `data`. Check for a byte order mark (BOM) or an encoding declaration.
def determine_encoding_from_data(self, data): # check for a byte order mark: for start_bytes, encoding in self.byte_order_marks: if data.startswith(start_bytes): return encoding # check for an encoding declaration pattern in first 2 lines of file: for li...
[ "def get_data_encoding():", "def strip_byte_order_mark(cls, data):\n encoding = None\n if isinstance(data, str):\n return (\n data, encoding)\n else:\n if len(data) >= 4:\n if data[:2] == b'\\xfe\\xff':\n if data[2:4] != '\\x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode `data`, write it to a single file, and return it. With Python 3 or binary output mode, `data` is returned unchanged, except when specified encoding and output encoding differ.
def write(self, data): if not self.opened: self.open() if ('b' not in self.mode and sys.version_info < (3,0) or check_encoding(self.destination, self.encoding) is False ): if sys.version_info >= (3,0) and os.linesep != '\n': data = da...
[ "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode `data`, store it in `self.destination`, and return it.
def write(self, data): self.destination = self.encode(data) return self.destination
[ "def encode(self, data):\n\t\traise NotImplementedError()", "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterable containing self (if include_self is true) all descendants in tree traversal order (if descend is true) all siblings (if siblings is true) and their descendants (if also descend is true) the siblings of the parent (if ascend is true) and their descendants (if also descend is true), and so on If `condi...
def traverse(self, condition=None, include_self=True, descend=True, siblings=False, ascend=False): if ascend: siblings=True # Check for special argument combinations that allow using an # optimized version of traverse() if include_self and descend and n...
[ "def traverse(self, condition=None, include_self=True, descend=True,\r\n siblings=False, ascend=False):\r\n if ascend:\r\n siblings=True\r\n # Check for special argument combinations that allow using an\r\n # optimized version of traverse()\r\n if include_self ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the first node in the iterable returned by traverse(), or None if the iterable is empty. Parameter list is the same as of traverse. Note that include_self defaults to 0, though.
def next_node(self, condition=None, include_self=False, descend=True, siblings=False, ascend=False): iterable = self.traverse(condition=condition, include_self=include_self, descend=descend, siblings=siblings, ascend=ascend)...
[ "def start_node(self):\n if len(self._nodes) == 0:\n return None\n return self._nodes[0]", "def get_first_node(self):\n return self._nodes[0]", "def suggested_node(self):\n for _ in range(0, len(self.node.children)):\n if self.current_idx == len(self.node.childr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }