Search is not available for this dataset
text
stringlengths
75
104k
def document_frequencies(self, hashes): '''Get document frequencies for a list of hashes. This will return all zeros unless the index was written with `hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is included in `hashes`, that value will be returned with the total number of documents indexed. If you are looking for documents with that hash, pass :data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead. :param hashes: hashes to query :paramtype hashes: list of :class:`int` :return: map from hash to document frequency ''' result = {} for (k, v) in self.client.get(HASH_FREQUENCY_TABLE, *[(h,) for h in hashes]): if v is None: v = 0 result[k[0]] = v return result
def lookup(self, h): '''Get stream IDs for a single hash. This yields strings that can be retrieved using :func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`, or fed back into :mod:`coordinate` or other job queue systems. Note that for common terms this can return a large number of stream IDs! This is a scan over a dense region of a :mod:`kvlayer` table so it should be reasonably efficient, but be prepared for it to return many documents in a large corpus. Blindly storing the results in a :class:`list` may be inadvisable. This will return nothing unless the index was written with :attr:`hash_docs` set. No document will correspond to :data:`DOCUMENT_HASH_KEY`; use :data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead. :param int h: Murmur hash to look up ''' for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE, ((h,), (h,))): yield kvlayer_key_to_stream_id((k1, k2))
def lookup_tf(self, h): '''Get stream IDs and term frequencies for a single hash. This yields pairs of strings that can be retrieved using :func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item` and the corresponding term frequency. ..see:: :meth:`lookup` ''' for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE, ((h,), (h,))): yield (kvlayer_key_to_stream_id((k1, k2)), v)
def _make_stream_items(f): """Given a spinn3r feed, produce a sequence of valid StreamItems. Because of goopy Python interactions, you probably need to call this and re-yield its results, as >>> with open(filename, 'rb') as f: ... for si in _make_stream_items(f): ... yield si """ reader = ProtoStreamReader(f) return itertools.ifilter( lambda x: x is not None, itertools.imap(_make_stream_item, reader))
def _make_stream_item(entry): """Given a single spinn3r feed entry, produce a single StreamItem. Returns 'None' if a complete item can't be constructed. """ # get standard metadata, assuming it's present... if not hasattr(entry, 'permalink_entry'): return None pe = entry.permalink_entry # ...and create a streamitem... si = streamcorpus.make_stream_item( pe.date_found[:-1] + '.0Z', pe.canonical_link.href.encode('utf8')) if not si.stream_time: logger.debug('failed to generate stream_time from {0!r}' .format(pe.date_found)) return None if not si.abs_url: logger.debug('failed to generate abs_url from {0!r}' .format(pe.canonical_link.href)) return None # ...filling in the actual data si.body = _make_content_item( pe.content, alternate_data=entry.feed_entry.content.data) if not si.body: return None if not si.body.raw: return None if pe.content_extract.data: si.other_content['extract'] = _make_content_item(pe.content_extract) si.other_content['title'] = streamcorpus.ContentItem( raw=pe.title.encode('utf8'), media_type=pe.content_extract.mime_type, encoding='UTF-8') si.other_content['feed_entry_title'] = streamcorpus.ContentItem( raw=entry.feed_entry.title.encode('utf8'), media_type=entry.feed_entry.content.mime_type, encoding='UTF-8') if entry.feed_entry.content.data: si.other_content['feed_entry'] = _make_content_item( entry.feed_entry.content) si.source_metadata['lang'] = pe.lang[0].code si.source_metadata['author'] = json.dumps( dict( name=pe.author[0].name, email=pe.author[0].email, link=pe.author[0].link[0].href, ) ) si.source = entry.source.publisher_type return si
def _make_content_item(node, mime_type=None, alternate_data=None): """Create a ContentItem from a node in the spinn3r data tree. The ContentItem is created with raw data set to ``node.data``, decompressed if the node's encoding is 'zlib', and UTF-8 normalized, with a MIME type from ``node.mime_type``. ``node`` the actual node from the spinn3r protobuf data ``mime_type`` string MIME type to use (defaults to ``node.mime_type``) ``alternate_data`` alternate (compressed) data to use, if ``node.data`` is missing or can't be decompressed """ raw = node.data if getattr(node, 'encoding', None) == 'zlib': try: raw = zlib.decompress(node.data) except Exception, exc: if alternate_data is not None: try: raw = zlib.decompress(alternate_data) except Exception: raise exc # the original exception else: raise if mime_type is None: mime_type = node.mime_type raw = raw.decode('utf8').encode('utf8') return streamcorpus.ContentItem(raw=raw, media_type=mime_type)
def _read(self, n): """Read (up to) 'n' bytes from the underlying file. If any bytes have been pushed in with _unread() those are returned first.""" if n <= len(self._prefix): # the read can be fulfilled entirely from the prefix result = self._prefix[:n] self._prefix = self._prefix[n:] return result # otherwise we need to read some n -= len(self._prefix) result = self._prefix + self.f.read(n) self._prefix = "" return result
def _read_varint(self): """Read exactly a varint out of the underlying file.""" buf = self._read(8) (n, l) = _DecodeVarint(buf, 0) self._unread(buf[l:]) return n
def _read_a(self, cls): """Read some protobuf-encoded object stored in a single block out of the file.""" o = cls() o.ParseFromString(self._read_block()) return o
def parse_keys_and_ranges(i_str, keyfunc, rangefunc): '''Parse the :class:`from_kvlayer` input string. This accepts two formats. In the textual format, it accepts any number of stream IDs in timestamp-docid format, separated by ``,`` or ``;``, and processes those as individual stream IDs. In the binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4 bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads scans keys `a` through `f` and loads singly key `x`. `keyfunc` and `rangefunc` are run as generators and their yields are yielded from this function. ''' while i_str: m = _STREAM_ID_RE.match(i_str) if m: # old style text stream_id for retval in keyfunc(stream_id_to_kvlayer_key(m.group())): yield retval i_str = i_str[m.end():] while i_str and ((i_str[0] == ',') or (i_str[0] == ';')): i_str = i_str[1:] continue if len(i_str) == SI_KEY_LENGTH: # one key, get it. key = parse_si_key(i_str) for retval in keyfunc(key): yield retval return keya = i_str[:SI_KEY_LENGTH] splitc = i_str[SI_KEY_LENGTH] if splitc == '<': # range keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH] i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:] keya = parse_si_key(keya) keyb = parse_si_key(keyb) for retval in rangefunc(keya, keyb): yield retval elif splitc == ';': # keya is single key to load keya = parse_si_key(keya) for retval in keyfunc(keya): yield retval i_str = i_str[SI_KEY_LENGTH+1+1:] else: logger.error('bogus key splitter %s, %r', splitc, i_str) return
def get_kvlayer_stream_item(client, stream_id): '''Retrieve a :class:`streamcorpus.StreamItem` from :mod:`kvlayer`. This function requires that `client` already be set up properly:: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) si = get_kvlayer_stream_item(client, stream_id) `stream_id` is in the form of :data:`streamcorpus.StreamItem.stream_id` and contains the ``epoch_ticks``, a hyphen, and the ``doc_id``. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str stream_id: stream Id to retrieve :return: corresponding :class:`streamcorpus.StreamItem` :raise exceptions.KeyError: if `stream_id` is malformed or does not correspond to anything in the database ''' if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) key = stream_id_to_kvlayer_key(stream_id) for k, v in client.get(STREAM_ITEMS_TABLE, key): if v is not None: errors, bytestr = streamcorpus.decrypt_and_uncompress(v) return streamcorpus.deserialize(bytestr) raise KeyError(stream_id)
def make_doc_id_range(doc_id): '''Construct a tuple(begin, end) of one-tuple kvlayer keys from a hexdigest doc_id. ''' assert len(doc_id) == 32, 'expecting 32 hex string, not: %r' % doc_id bin_docid = base64.b16decode(doc_id.upper()) doc_id_range = ((bin_docid,), (bin_docid,)) return doc_id_range
def get_kvlayer_stream_item_by_doc_id(client, doc_id): '''Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`. Namely, it returns an iterator over all documents with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of :class:`streamcorpus.StreamItem` ''' if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) doc_id_range = make_doc_id_range(doc_id) for k, v in client.scan(STREAM_ITEMS_TABLE, doc_id_range): if v is not None: errors, bytestr = streamcorpus.decrypt_and_uncompress(v) yield streamcorpus.deserialize(bytestr)
def get_kvlayer_stream_ids_by_doc_id(client, doc_id): '''Retrieve stream ids from :mod:`kvlayer`. Namely, it returns an iterator over all stream ids with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of str ''' if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) doc_id_range = make_doc_id_range(doc_id) for k in client.scan_keys(STREAM_ITEMS_TABLE, doc_id_range): yield kvlayer_key_to_stream_id(k)
def serialize_si_key(si_key): ''' Return packed bytes representation of StreamItem kvlayer key. The result is 20 bytes, 16 of md5 hash, 4 of int timestamp. ''' if len(si_key[0]) != 16: raise ValueError('bad StreamItem key, expected 16 byte ' 'md5 hash binary digest, got: {0!r}'.format(si_key)) return struct.pack('>16si', si_key[0], si_key[1])
def streamitem_to_key_data(si): ''' extract the parts of a StreamItem that go into a kvlayer key, convert StreamItem to blob for storage. return (kvlayer key tuple), data blob ''' key = key_for_stream_item(si) data = streamcorpus.serialize(si) errors, data = streamcorpus.compress_and_encrypt(data) assert not errors, errors return key, data
def working_directory(path): """Change working directory and restore the previous on exit""" prev_dir = os.getcwd() os.chdir(str(path)) try: yield finally: os.chdir(prev_dir)
def strip_prefix(s, prefix, strict=False): """Removes the prefix, if it's there, otherwise returns input string unchanged. If strict is True, also ensures the prefix was present""" if s.startswith(prefix): return s[len(prefix) :] elif strict: raise WimpyError("string doesn't start with prefix") return s
def strip_suffix(s, suffix, strict=False): """Removes the suffix, if it's there, otherwise returns input string unchanged. If strict is True, also ensures the suffix was present""" if s.endswith(suffix): return s[: len(s) - len(suffix)] elif strict: raise WimpyError("string doesn't end with suffix") return s
def is_subsequence(needle, haystack): """Are all the elements of needle contained in haystack, and in the same order? There may be other elements interspersed throughout""" it = iter(haystack) for element in needle: if element not in it: return False return True
def cube(): """Return an Ice application with a default home page. Create :class:`Ice` object, add a route to return the default page when a client requests the server root, i.e. /, using HTTP GET method, add an error handler to return HTTP error pages when an error occurs and return this object. The returned object can be used as a WSGI application. Returns: Ice: WSGI application. """ app = Ice() @app.get('/') def default_home_page(): """Return a default home page.""" return simple_html('It works!', '<h1>It works!</h1>\n' '<p>This is the default ice web page.</p>') @app.error() def generic_error_page(): """Return a simple and generic error page.""" return simple_html(app.response.status_line, '<h1>{title}</h1>\n' '<p>{description}</p>\n' '<hr>\n' '<address>Ice/{version}</address>'.format( title=app.response.status_line, description=app.response.status_detail, version=__version__)) def simple_html(title, body): """Return a simple HTML page.""" return ( '<!DOCTYPE html>\n' '<html>\n<head><title>{title}</title></head>\n' '<body>\n{body}\n</body>\n</html>\n' ).format(title=title, body=body) return app
def run(self, host='127.0.0.1', port=8080): """Run the application using a simple WSGI server. Arguments: host (str, optional): Host on which to listen. port (int, optional): Port number on which to listen. """ from wsgiref import simple_server self._server = simple_server.make_server(host, port, self) self._server.serve_forever()
def exit(self): """Stop the simple WSGI server running the appliation.""" if self._server is not None: self._server.shutdown() self._server.server_close() self._server = None
def route(self, method, pattern): """Decorator to add route for a request with any HTTP method. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. pattern (str): Routing pattern the path must match. Returns: function: Decorator function to add route. """ def decorator(callback): self._router.add(method, pattern, callback) return callback return decorator
def error(self, status=None): """Decorator to add a callback that generates error page. The *status* parameter specifies the HTTP response status code for which the decorated callback should be invoked. If the *status* argument is not specified, then the decorated callable is considered to be a fallback callback. A fallback callback, when defined, is invoked to generate the error page for any HTTP response representing an error when there is no error handler defined explicitly for the response code of the HTTP response. Arguments: status(int, optional): HTTP response status code. Returns: function: Decorator function to add error handler. """ def decorator(callback): self._error_handlers[status] = callback return callback return decorator
def static(self, root, path, media_type=None, charset='UTF-8'): """Send content of a static file as response. The path to the document root directory should be specified as the root argument. This is very important to prevent directory traversal attack. This method guarantees that only files within the document root directory are served and no files outside this directory can be accessed by a client. The path to the actual file to be returned should be specified as the path argument. This path must be relative to the document directory. The *media_type* and *charset* arguments are used to set the Content-Type header of the HTTP response. If *media_type* is not specified or specified as ``None`` (the default), then it is guessed from the filename of the file to be returned. Arguments: root (str): Path to document root directory. path (str): Path to file relative to document root directory. media_type (str, optional): Media type of file. charset (str, optional): Character set of file. Returns: bytes: Content of file to be returned in the HTTP response. """ root = os.path.abspath(os.path.join(root, '')) path = os.path.abspath(os.path.join(root, path.lstrip('/\\'))) # Save the filename from the path in the response state, so that # a following download() call can default to this filename for # downloadable file when filename is not explicitly specified. self.response.state['filename'] = os.path.basename(path) if not path.startswith(root): return 403 elif not os.path.isfile(path): return 404 if media_type is not None: self.response.media_type = media_type else: self.response.media_type = mimetypes.guess_type(path)[0] self.response.charset = charset with open(path, 'rb') as f: return f.read()
def download(self, content, filename=None, media_type=None, charset='UTF-8'): """Send content as attachment (downloadable file). The *content* is sent after setting Content-Disposition header such that the client prompts the user to save the content locally as a file. An HTTP response status code may be specified as *content*. If the status code is not ``200``, then this method does nothing and returns the status code. The filename used for the download is determined according to the following rules. The rules are followed in the specified order. 1. If *filename* is specified, then the base name from this argument, i.e. ``os.path.basename(filename)``, is used as the filename for the download. 2. If *filename* is not specified or specified as ``None`` (the default), then the base name from the file path specified to a previous :meth:`static` call made while handling the current request is used. 3. If *filename* is not specified and there was no :meth:`static` call made previously for the current request, then the base name from the current HTTP request path is used. 4. As a result of the above steps, if the resultant *filename* turns out to be empty, then :exc:`ice.LogicError` is raised. The *media_type* and *charset* arguments are used in the same manner as they are used in :meth:`static`. Arguments: content (str, bytes or int): Content to be sent as download or HTTP status code of the response to be returned. filename (str): Filename to use for saving the content media_type (str, optional): Media type of file. charset (str, optional): Character set of file. Returns: content, i.e. the first argument passed to this method. Raises: LogicError: When filename cannot be determined. """ if isinstance(content, int) and content != 200: return content if filename is not None: filename = os.path.basename(filename) elif 'filename' in self.response.state: filename = self.response.state['filename'] else: filename = os.path.basename(self.request.path) if filename == '': raise LogicError('Cannot determine filename for download') if media_type is not None: self.response.media_type = media_type else: self.response.media_type = mimetypes.guess_type(filename)[0] self.response.charset = charset self.response.add_header('Content-Disposition', 'attachment; ' 'filename="{}"'.format(filename)) return content
def _get_error_page_callback(self): """Return an error page for the current response status.""" if self.response.status in self._error_handlers: return self._error_handlers[self.response.status] elif None in self._error_handlers: return self._error_handlers[None] else: # Rudimentary error handler if no error handler was found self.response.media_type = 'text/plain' return lambda: self.response.status_line
def add(self, method, pattern, callback): """Add a route. Arguments: method (str): HTTP method, e.g. GET, POST, etc. pattern (str): Pattern that request paths must match. callback (str): Route handler that is invoked when a request path matches the *pattern*. """ pat_type, pat = self._normalize_pattern(pattern) if pat_type == 'literal': self._literal[method][pat] = callback elif pat_type == 'wildcard': self._wildcard[method].append(WildcardRoute(pat, callback)) else: self._regex[method].append(RegexRoute(pat, callback))
def contains_method(self, method): """Check if there is at least one handler for *method*. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. Returns: ``True`` if there is at least one route defined for *method*, ``False`` otherwise """ return method in itertools.chain(self._literal, self._wildcard, self._regex)
def resolve(self, method, path): """Resolve a request to a route handler. Arguments: method (str): HTTP method, e.g. GET, POST, etc. (type: str) path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request. """ if method in self._literal and path in self._literal[method]: return self._literal[method][path], [], {} else: return self._resolve_non_literal_route(method, path)
def _resolve_non_literal_route(self, method, path): """Resolve a request to a wildcard or regex route handler. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request. """ for route_dict in (self._wildcard, self._regex): if method in route_dict: for route in reversed(route_dict[method]): callback_data = route.match(path) if callback_data is not None: return callback_data return None
def _normalize_pattern(pattern): """Return a normalized form of the pattern. Normalize the pattern by removing pattern type prefix if it exists in the pattern. Then return the pattern type and the pattern as a tuple of two strings. Arguments: pattern (str): Route pattern to match request paths Returns: tuple: Ruple of pattern type (str) and pattern (str) """ if pattern.startswith('regex:'): pattern_type = 'regex' pattern = pattern[len('regex:'):] elif pattern.startswith('wildcard:'): pattern_type = 'wildcard' pattern = pattern[len('wildcard:'):] elif pattern.startswith('literal:'): pattern_type = 'literal' pattern = pattern[len('literal:'):] elif RegexRoute.like(pattern): pattern_type = 'regex' elif WildcardRoute.like(pattern): pattern_type = 'wildcard' else: pattern_type = 'literal' return pattern_type, pattern
def match(self, path): """Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if the route does not match the path. """ match = self._re.search(path) if match is None: return None args = [] kwargs = {} for i, wildcard in enumerate(self._wildcards): if wildcard.name == '!': continue value = wildcard.value(match.groups()[i]) if not wildcard.name: args.append(value) else: kwargs[wildcard.name] = value return self._callback, args, kwargs
def match(self, path): """Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if the route does not match the path. """ match = self._re.search(path) if match is None: return None kwargs_indexes = match.re.groupindex.values() args_indexes = [i for i in range(1, match.re.groups + 1) if i not in kwargs_indexes] args = [match.group(i) for i in args_indexes] kwargs = {} for name, index in match.re.groupindex.items(): kwargs[name] = match.group(index) return self._callback, args, kwargs
def response(self): """Return the HTTP response body. Returns: bytes: HTTP response body as a sequence of bytes """ if isinstance(self.body, bytes): out = self.body elif isinstance(self.body, str): out = self.body.encode(self.charset) else: out = b'' self.add_header('Content-Type', self.content_type) self.add_header('Content-Length', str(len(out))) self.start(self.status_line, self._headers) return [out]
def add_header(self, name, value): """Add an HTTP header to response object. Arguments: name (str): HTTP header field name value (str): HTTP header field value """ if value is not None: self._headers.append((name, value))
def set_cookie(self, name, value, attrs={}): """Add a Set-Cookie header to response object. For a description about cookie attribute values, see https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel. Arguments: name (str): Name of the cookie value (str): Value of the cookie attrs (dict): Dicitionary with cookie attribute keys and values. """ cookie = http.cookies.SimpleCookie() cookie[name] = value for key, value in attrs.items(): cookie[name][key] = value self.add_header('Set-Cookie', cookie[name].OutputString())
def status_line(self): """Return the HTTP response status line. The status line is determined from :attr:`status` code. For example, if the status code is 200, then '200 OK' is returned. Returns: str: Status line """ return (str(self.status) + ' ' + Response._responses[self.status].phrase)
def content_type(self): """Return the value of Content-Type header field. The value for the Content-Type header field is determined from the :attr:`media_type` and :attr:`charset` data attributes. Returns: str: Value of Content-Type header field """ if (self.media_type is not None and self.media_type.startswith('text/') and self.charset is not None): return self.media_type + '; charset=' + self.charset else: return self.media_type
def getall(self, key, default=[]): """Return the list of all values for the specified key. Arguments: key (object): Key default (list): Default value to return if the key does not exist, defaults to ``[]``, i.e. an empty list. Returns: list: List of all values for the specified key if the key exists, ``default`` otherwise. """ return self.data[key] if key in self.data else default
def rmtree(path, use_shutil=True, followlinks=False, retries=10): '''remove all files and directories below path, including path itself; works even when shutil.rmtree fails because of read-only files in NFS and Windows. Follows symlinks. `use_shutil` defaults to True; useful for testing `followlinks` defaults to False; if set to True, shutil.rmtree is not used. ''' if use_shutil and not followlinks: try: shutil.rmtree(path) return except Exception, exc: logger.info('shutil.rmtree(%s) failed, so resorting to recursive delete', path) logger.debug('\ntrapped:\n%s', traceback.format_exc(exc)) if not os.path.isdir(path): os.remove(path) return ## bottom up traversal removing files and then removing directories for root, dir_names, file_names in os.walk(path, topdown=False, followlinks=followlinks): for fname in file_names: fpath = os.path.join(root, fname) tries = 0 while tries < retries: tries += 1 try: os.remove(fpath) break except Exception, exc: time.sleep(0.1) if os.path.exists(fpath): logger.critical('os.remove(%s) failed, so leaving data behind!!!', fpath) logger.critical('\ntrapped:\n%s', traceback.format_exc(exc)) #logger.critical(get_open_fds()) for dname in dir_names: full_path = os.path.join(root, dname) if os.path.islink(full_path): real_path = os.path.realpath(full_path) os.remove(full_path) full_path = real_path os.rmdir(full_path) if os.path.exists(path): os.rmdir(path)
def get_open_fds(verbose=False): '''return list of open files for current process .. warning: will only work on UNIX-like os-es. ''' pid = os.getpid() procs = subprocess.check_output( [ "lsof", '-w', '-Ff', "-p", str( pid ) ] ) if verbose: oprocs = subprocess.check_output( [ "lsof", '-w', "-p", str( pid ) ] ) logger.info(oprocs) open_files = filter( lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(), procs.split( '\n' ) ) return open_files
def file_type_stats(config): ''' returns a kba.pipeline "transform" function that generates file type stats from the stream_items that it sees. Currently, these stats are just the first five non-whitespace characters. ''' ## make a closure around config def _file_type_stats(stream_item, context): if stream_item.body and stream_item.body.raw: #print repr(stream_item.body.raw[:250]) #sys.stdout.flush() #doctype_m = doctype_re.match(stream_item.body.raw[:250]) #if doctype_m: #print 'DOCTYPE: %s' % repr(doctype_m.group('doctype').lower()) if 'doctype html' in stream_item.body.raw[:250].lower(): print 'DOCTYPE: html' else: #if probably_html.search(stream_item.body.raw): if has_tags(stream_item.body.raw[:400]): print 'PROBABLY_HTML' else: xml = xml_ish.search(stream_item.body.raw) if xml: print 'XML: %s' % repr(xml.group('intro')) else: pdf = pdf_start.search(stream_item.body.raw) if pdf: print 'PDF %s' % repr(pdf.group('version')) else: ext = stream_item.abs_url.split('.')[-1] if len(ext) < 6: print 'UNK ext: %s' % repr(ext) else: first = first_letters.match(stream_item.body.raw) if first and False: print 'UNK letters: %s' % repr(first.group('first_letters')) else: print 'UNK first bytes: %s' % repr(stream_item.body.raw[:50]) #m = first_three_letters.search(stream_item.body.raw) #if m: # print repr(m.group('first_three_letters')).lower().strip() #else: # print repr(stream_item.body.raw[:50]).lower().strip() return stream_item return _file_type_stats
def rejester_run(work_unit): '''get a rejester.WorkUnit with KBA s3 path, fetch it, and save some counts about it. ''' #fname = 'verify-chunks-%d-%d' % (os.getpid(), time.time()) fname = work_unit.key.strip().split('/')[-1] output_dir_path = work_unit.data.get('output_dir_path', '/mnt') u = uuid.uuid3(uuid.UUID(int=0), work_unit.key.strip()) path1 = u.hex[0] path2 = u.hex[1] fpath = os.path.join(output_dir_path, path1, path2, fname) if not os.path.exists(os.path.dirname(fpath)): os.makedirs(os.path.dirname(fpath)) output = gzip.open(fpath + '-out.gz', 'wb') expected_si_count = int(fname.split('-')[1]) max_tries = 20 tries = 0 while tries < max_tries: try: exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids = \ attempt_fetch(work_unit, fpath) if si_count != expected_si_count: print 'retrying because si_count = %d != %d expected_si_count' % (si_count, expected_si_count) sys.stdout.flush() tries += 1 continue else: print 'succeeded in reading si_count = %d' % (si_count,) sys.stdout.flush() output.write( '%s\t%d\t%d\t%d\t%d\t%s\t%s\n' % ( exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, work_unit.key.strip(), ','.join(['%s|%s' % tup for tup in stream_ids])) ) break except Exception, exc: print 'broken?' print traceback.format_exc(exc) sys.stdout.flush() tries += 1 output.write(traceback.format_exc(exc)) output.close()
def attempt_fetch(work_unit, fpath): '''attempt a fetch and iteration over a work_unit.key path in s3 ''' url = 'http://s3.amazonaws.com/aws-publicdatasets/' + work_unit.key.strip() ## cheapest way to iterate over the corpus is a few stages of ## streamed child processes. Note that stderr needs to go ## separately to a file so that reading the stdin doesn't get ## blocked: cmd = '(wget -O - %s | gpg --no-permission-warning --trust-model always --output - --decrypt - | xz --decompress) 2> %s-err' % (url, fpath) print cmd child = Popen(cmd, stdout=PIPE, shell=True) print 'child launched' sys.stdout.flush() si_count = 0 serif_count = 0 exc = '' stream_ids = list() clean_visible_bytes = 0 clean_visible_count = 0 try: for si in Chunk(file_obj=child.stdout): print si.stream_id, si.abs_url if si.body.language: lang = si.body.language.code else: lang = '' stream_ids.append((lang, si.stream_id)) if si.body.clean_visible: clean_visible_count += 1 clean_visible_bytes += len(si.body.clean_visible) si_count += 1 if 'serif' in si.body.sentences: serif_count += 1 except Exception, exc: exc = re.sub('\s+', ' ', str(exc)).strip() child.terminate() child.wait() child.stdout.close() return exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids
def get_file_lines(file_name): """Return a list of non-empty lines from `file_path`.""" file_path = path.join(path.dirname(path.abspath(__file__)), file_name) with open(file_path) as file_obj: return [line for line in file_obj.read().splitlines() if line]
def get_describers(): """ Return a describer tuple in the form `(name, position)`, where position is either 'prefix' or 'suffix'. """ adjectives = map(lambda x: (x, 'prefix'), get_file_lines('adjectives.txt')) animal_nouns = map(lambda x: (x, 'suffix'), get_file_lines('nouns.txt')) return list(chain(adjectives, animal_nouns))
def _random_adjspecies_pair(): """Return an ordered 2-tuple containing a species and a describer.""" describer, desc_position = random_describer() if desc_position == 'prefix': return (describer, random_species()) elif desc_position == 'suffix': return (random_species(), describer)
def random_adjspecies_pair(maxlen=None, prevent_stutter=True): """ Return an ordered 2-tuple containing a species and a describer. The letter-count of the pair is guarantee to not exceed `maxlen` if it is given. If `prevent_stutter` is True, the last letter of the first item of the pair will be different from the first letter of the second item. """ while True: pair = _random_adjspecies_pair() if maxlen and len(''.join(pair)) > maxlen: continue if prevent_stutter and pair[0][-1] == pair[1][0]: continue return pair
def random_adjspecies(sep='', maxlen=8, prevent_stutter=True): """ Return a random adjective/species, separated by `sep`. The keyword arguments `maxlen` and `prevent_stutter` are the same as for `random_adjspecies_pair`, but note that the maximum length argument is not affected by the separator. """ pair = random_adjspecies_pair(maxlen, prevent_stutter) return pair[0] + sep + pair[1]
def morph(ctx, app_id, sentence_file, json_flag, sentence, info_filter, pos_filter, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode, unicode) -> None # NOQA """ Morphological analysis for Japanese.""" app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) if info_filter: info_filter = info_filter.replace(',', '|') if pos_filter: pos_filter = pos_filter.replace(',', '|') api = GoolabsAPI(app_id) ret = api.morph( sentence=sentence, info_filter=info_filter, pos_filter=pos_filter, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return for words in ret['word_list']: for word in words: click.echo(','.join(word))
def similarity(ctx, app_id, json_flag, query_pair, request_id): # type: (Context, unicode, bool, List[unicode], unicode) -> None """ Scoring the similarity of two words. """ app_id = clean_app_id(app_id) api = GoolabsAPI(app_id) ret = api.similarity( query_pair=query_pair, request_id=request_id ) if json_flag: click.echo(format_json(api.response.json())) return click.echo('{0:.16f}'.format(ret['score']))
def hiragana(ctx, app_id, sentence_file, json_flag, sentence, output_type, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA """ Convert the Japanese to Hiragana or Katakana. """ app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) api = GoolabsAPI(app_id) ret = api.hiragana( sentence=sentence, output_type=output_type, request_id=request_id ) if json_flag: click.echo(format_json(api.response.json())) return click.echo(ret['converted'])
def entity(ctx, app_id, sentence_file, json_flag, sentence, class_filter, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA """ Extract unique representation from sentence. """ app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) if class_filter: class_filter = class_filter.replace(',', '|') api = GoolabsAPI(app_id) ret = api.entity( sentence=sentence, class_filter=class_filter, request_id=request_id ) if json_flag: click.echo(format_json(api.response.json())) return for ne in ret['ne_list']: click.echo(','.join(ne))
def shortsum(ctx, app_id, review_file, json_flag, review, length, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA """Summarize reviews into a short summary.""" app_id = clean_app_id(app_id) review_list = clean_review(review, review_file) length_int = clean_length(length) # type: Optional[int] api = GoolabsAPI(app_id) ret = api.shortsum( review_list=review_list, length=length_int, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return click.echo(ret['summary'])
def keyword(ctx, app_id, body_file, json_flag, title, body, max_num, forcus, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, int, unicode, unicode) -> None # NOQA """Extract "keywords" from an input document. """ app_id = clean_app_id(app_id) body = clean_body(body, body_file) api = GoolabsAPI(app_id) ret = api.keyword( title=title, body=body, max_num=max_num, forcus=forcus, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return for k in ret['keywords']: k = dict((key.encode('utf-8'), k[key]) for key in k.keys()) for keyword, score in six.iteritems(k): click.echo(u'{0},{1}'.format(text(keyword), score))
def chrono(ctx, app_id, sentence_file, json_flag, sentence, doc_time, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA """Extract expression expressing date and time and normalize its value """ app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) api = GoolabsAPI(app_id) ret = api.chrono( sentence=sentence, doc_time=doc_time, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return for pair in ret['datetime_list']: click.echo(u'{0}: {1}'.format(text(pair[0]), pair[1]))
def create(self, stage, scp_config, config=None): '''Create a pipeline stage. Instantiates `stage` with `config`. This essentially translates to ``stage(config)``, except that two keys from `scp_config` are injected into the configuration: ``tmp_dir_path`` is an execution-specific directory from combining the top-level ``tmp_dir_path`` configuration with :attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same path from the top-level configuration. `stage` may be either a callable returning the stage (e.g. its class), or its name in the configuration. `scp_config` is the configuration for the pipeline as a whole, and is required. `config` is the configuration for the stage; if it is :const:`None` then it is extracted from `scp_config`. If you already have a fully formed configuration block and want to create a stage, you can call .. code-block:: python factory.registry[stage](stage_config) In most cases if you have a stage class object and want to instantiate it with its defaults you can call .. code-block:: python stage = stage_cls(stage_cls.default_config) .. note:: This mirrors :meth:`yakonfig.factory.AutoFactory.create`, with some thought that this factory class might migrate to using that as a base in the future. :param stage: pipeline stage class, or its name in the registry :param dict scp_config: configuration block for the pipeline :param dict config: configuration block for the stage, or :const:`None` to get it from `scp_config` ''' # Figure out what we have for a stage and its name if isinstance(stage, basestring): stage_name = stage stage_obj = self.registry[stage_name] else: stage_name = getattr(stage, 'config_name', stage.__name__) stage_obj = stage # Find the configuration; get a copy we can mutate if config is None: config = scp_config.get(stage_name, None) if config is None: config = getattr(stage_obj, 'default_config', {}) config = dict(config) # Fill in more values if self.tmp_dir_suffix is None: config['tmp_dir_path'] = scp_config['tmp_dir_path'] else: config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'], self.tmp_dir_suffix) config['third_dir_path'] = scp_config['third_dir_path'] return stage_obj(config)
def _init_stages(self, config, name): '''Create a list of indirect stages. `name` should be the name of a config item that holds a list of names of stages, for instance, ``writers``. This looks up the names of those stages, then creates and returns the corresponding list of stage objects. For instance, if the config says .. code-block:: yaml incremental_transforms: [clean_html, clean_visible] then calling ``self._init_stages(scp_config, 'incremental_transforms')`` will return a list of the two named stage instances. :param dict config: `streamcorpus_pipeline` configuration block :param str name: name of the stage name list entry :return: list of new stage instances ''' if name not in config: return [] return [self.create(stage, config) for stage in config[name]]
def _init_all_stages(self, config): '''Create stages that are used for the pipeline. :param dict config: `streamcorpus_pipeline` configuration :return: tuple of (reader, incremental transforms, batch transforms, post-batch incremental transforms, writers, temporary directory) ''' reader = self._init_stage(config, 'reader') incremental_transforms = self._init_stages( config, 'incremental_transforms') batch_transforms = self._init_stages(config, 'batch_transforms') post_batch_incremental_transforms = self._init_stages( config, 'post_batch_incremental_transforms') writers = self._init_stages(config, 'writers') tmp_dir_path = os.path.join(config['tmp_dir_path'], self.tmp_dir_suffix) return (reader, incremental_transforms, batch_transforms, post_batch_incremental_transforms, writers, tmp_dir_path)
def _process_task(self, work_unit): '''Process a :class:`coordinate.WorkUnit`. The work unit's key is taken as the input file name. The data should have ``start_count`` and ``start_chunk_time`` values, which are passed on to :meth:`run`. :param work_unit: work unit to process :paramtype work_unit: :class:`coordinate.WorkUnit` :return: number of stream items processed ''' self.work_unit = work_unit i_str = work_unit.key start_count = work_unit.data['start_count'] start_chunk_time = work_unit.data['start_chunk_time'] self.run(i_str, start_count, start_chunk_time)
def run(self, i_str, start_count=0, start_chunk_time=None): '''Run the pipeline. This runs all of the steps described in the pipeline constructor, reading from some input and writing to some output. :param str i_str: name of the input file, or other reader-specific description of where to get input :param int start_count: index of the first stream item :param int start_chunk_time: timestamp for the first stream item ''' try: if not os.path.exists(self.tmp_dir_path): os.makedirs(self.tmp_dir_path) if start_chunk_time is None: start_chunk_time = time.time() ## the reader returns generators of StreamItems i_chunk = self.reader(i_str) ## t_path points to the currently in-progress temp chunk t_path = None ## loop over all docs in the chunk processing and cutting ## smaller chunks if needed len_clean_visible = 0 sources = set() next_idx = 0 ## how many have we input and actually done processing on? input_item_count = 0 for si in i_chunk: # TODO: break out a _process_stream_item function? next_idx += 1 ## yield to the gevent hub to allow other things to run if gevent: gevent.sleep(0) ## skip forward until we reach start_count if next_idx <= start_count: continue if next_idx % self.rate_log_interval == 0: ## indexing is zero-based, so next_idx corresponds ## to length of list of SIs processed so far elapsed = time.time() - start_chunk_time if elapsed > 0: rate = float(next_idx) / elapsed logger.info('%d in %.1f --> %.1f per sec on ' '(pre-partial_commit) %s', next_idx - start_count, elapsed, rate, i_str) if not self.t_chunk: ## make a temporary chunk at a temporary path # (Lazy allocation after we've read an item that might get processed out to the new chunk file) # TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready t_path = os.path.join(self.tmp_dir_path, 't_chunk-%s' % uuid.uuid4().hex) self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb') assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message # TODO: a set of incremental transforms is equivalent # to a batch transform. Make the pipeline explicitly # configurable as such: # # batch_transforms: [[incr set 1], batch op, [incr set 2], ...] # # OR: for some list of transforms (mixed incremental # and batch) pipeline can detect and batchify as needed ## incremental transforms populate t_chunk ## let the incremental transforms destroy the si by ## returning None si = self._run_incremental_transforms( si, self.incremental_transforms) ## insist that every chunk has only one source string if si: sources.add(si.source) if self.assert_single_source and len(sources) != 1: raise InvalidStreamItem( 'stream item %r had source %r, not %r ' '(set assert_single_source: false to suppress)' % (si.stream_id, si.source, sources)) if si and si.body and si.body.clean_visible: len_clean_visible += len(si.body.clean_visible) ## log binned clean_visible lengths, for quick stats estimates #logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10))) #logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible)) if ((self.output_chunk_max_count is not None and len(self.t_chunk) == self.output_chunk_max_count)): logger.info('reached output_chunk_max_count (%d) at: %d', len(self.t_chunk), next_idx) self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) start_count = next_idx elif (self.output_max_clean_visible_bytes is not None and len_clean_visible >= self.output_chunk_max_clean_visible_bytes): logger.info( 'reached output_chunk_max_clean_visible_bytes ' '(%d) at: %d', self.output_chunk_max_clean_visible_bytes, len_clean_visible) len_clean_visible = 0 self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) start_count = next_idx input_item_count += 1 if (((self.input_item_limit is not None) and (input_item_count > self.input_item_limit))): break if self.t_chunk is not None: self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) ## return how many stream items we processed return next_idx finally: if self.t_chunk is not None: self.t_chunk.close() for transform in self.batch_transforms: transform.shutdown() if self.cleanup_tmp_files: rmtree(self.tmp_dir_path)
def _process_output_chunk(self, start_count, next_idx, sources, i_str, t_path): ''' for the current output chunk (which should be open): 1. run batch transforms 2. run post-batch incremental transforms 3. run 'writers' to load-out the data to files or other storage return list of paths that writers wrote to ''' if not self.t_chunk: # nothing to do return [] self.t_chunk.close() # gather the paths as the writers run o_paths = None if len(self.t_chunk) > 0: # only batch transform and load if the chunk # isn't empty, which can happen when filtering # with stages like "find" # batch transforms act on the whole chunk in-place logger.info('running batch transforms on %d StreamItems', len(self.t_chunk)) self._run_batch_transforms(t_path) self._maybe_run_post_batch_incremental_transforms(t_path) # only proceed if above transforms left us with something if (self.t_chunk) and (len(self.t_chunk) >= 0): o_paths = self._run_writers(start_count, next_idx, sources, i_str, t_path) # we're now officially done with the chunk self.t_chunk = None # If we wrote some paths, update the data dictionary of outputs if self.work_unit and o_paths: old_o_paths = self.work_unit.data.get('output', []) o_paths = old_o_paths + o_paths self.work_unit.data['start_count'] = next_idx self.work_unit.data['output'] = o_paths self.work_unit.update()
def _run_writers(self, start_count, next_idx, sources, i_str, t_path): '''Run all of the writers over some intermediate chunk. :param int start_count: index of the first item :param int next_idx: index of the next item (after the last item in this chunk) :param list sources: source strings included in this chunk (usually only one source) :param str i_str: name of input file or other input :param str t_path: location of intermediate chunk on disk :return: list of output file paths or other outputs ''' # writers put the chunk somewhere, and could delete it name_info = dict( first=start_count, # num and md5 computed in each writers source=sources.pop(), ) all_o_paths = [] for writer in self.writers: logger.debug('running %r on %r: %r', writer, i_str, name_info) o_paths = writer(t_path, name_info, i_str) logger.debug('loaded (%d, %d) of %r into %r', start_count, next_idx - 1, i_str, o_paths) all_o_paths += o_paths return all_o_paths
def _run_incremental_transforms(self, si, transforms): ''' Run transforms on stream item. Item may be discarded by some transform. Writes successful items out to current self.t_chunk Returns transformed item or None. ''' ## operate each transform on this one StreamItem for transform in transforms: try: stream_id = si.stream_id si_new = transform(si, context=self.context) if si_new is None: logger.warn('transform %r deleted %s abs_url=%r', transform, stream_id, si and si.abs_url) return None si = si_new except TransformGivingUp: ## do nothing logger.info('transform %r giving up on %r', transform, si.stream_id) except Exception, exc: logger.critical( 'transform %r failed on %r from i_str=%r abs_url=%r', transform, si and si.stream_id, self.context.get('i_str'), si and si.abs_url, exc_info=True) assert si is not None ## expect to always have a stream_time if not si.stream_time: raise InvalidStreamItem('empty stream_time: %s' % si) if si.stream_id is None: raise InvalidStreamItem('empty stream_id: %r' % si) ## put the StreamItem into the output if type(si) != streamcorpus.StreamItem_v0_3_0: raise InvalidStreamItem('incorrect stream item object %r' % type(si)) self.t_chunk.add(si) return si
def get_name_info(chunk_path, assert_one_date_hour=False, i_str=None, chunk_type=Chunk): ''' takes a chunk blob and obtains the date_hour, md5, num makes fields: i_str input_fname input_md5 - parsed from input filename if it contains '-%(md5)s-' md5 num epoch_ticks target_names doc_ids_8 date_hour rand8 date_now time_now date_time_now ''' assert i_str is not None, 'must provide i_str as keyword arg' name_info = dict() if i_str: name_info['i_str'] = i_str else: name_info['i_str'] = '' i_fname = i_str.split('/')[-1] i_fname = i_fname.split('.')[0] ## strip off .sc[.xz[.gpg]] name_info['input_fname'] = i_fname input_md5s = [] for part in i_fname.split('-'): if len(part) == 32 and is_hex_32.match(part): input_md5s.append(part) name_info['input_md5'] = '-'.join(input_md5s) # TODO: return a dict-like object that does the expensive # calculation lazily, the name format might not even need that # value. ch = chunk_type(path=chunk_path, mode='rb') date_hours = set() target_names = set() doc_ids = set() epoch_ticks = None count = 0 try: for si in ch: if chunk_type is Chunk: if epoch_ticks is None: epoch_ticks = si.stream_time.epoch_ticks date_hours.add( si.stream_time.zulu_timestamp[:13] ) doc_ids.add( si.doc_id ) for annotator_id, ratings in si.ratings.items(): for rating in ratings: target_name = rating.target.target_id.split('/')[-1] target_names.add( target_name ) count += 1 except Exception, exc: logger.critical('failed to iter over chunk', exc_info=True) ## create the md5 property, so we can use it in the filename if hasattr(ch, 'md5_hexdigest'): name_info['md5'] = ch.md5_hexdigest else: try: data = open(chunk_path).read() name_info['md5'] = hashlib.md5(data).hexdigest() except Exception, exc: logger.critical('failed to compute md5', exc_info=True) name_info['md5'] = 'broken' name_info['num'] = count name_info['epoch_ticks'] = epoch_ticks name_info['target_names'] = '-'.join( target_names ) name_info['doc_ids_8'] = '-'.join( [di[:8] for di in doc_ids] ) if chunk_type is Chunk: if assert_one_date_hour: assert len(date_hours) == 1, \ 'got a chunk with other than one data_hour! ' + \ repr(date_hours) if len(date_hours) > 0: date_hour = list(date_hours)[0] date_hour = date_hour.replace('T', '-') else: assert count == 0, (date_hours, count) date_hour = None name_info['date_hour'] = date_hour else: name_info['date_hour'] = 'NO-DATE-HOUR-FOR-FC' # TODO: in future lazy evaluation world, rand8 should return a # different value every time it is accessed so that a format could # be 'foo-{rand8}{rand8}' name_info['rand8'] = '%08x' % (random.randint(0, 0x7fffffff),) name_info['date_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d') name_info['time_now'] = datetime.datetime.utcnow().strftime('%H-%M-%S') name_info['date_time_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') return name_info
def replace_config(config, name): '''Replace the top-level pipeline configurable object. This investigates a number of sources, including `external_stages_path` and `external_stages_modules` configuration and `streamcorpus_pipeline.stages` entry points, and uses these to find the actual :data:`sub_modules` for :mod:`streamcorpus_pipeline`. ''' global static_stages if static_stages is None: static_stages = PipelineStages() stages = static_stages if 'external_stages_path' in config: path = config['external_stages_path'] if not os.path.isabs(path) and config.get('root_path'): path = os.path.join(config['root_path'], path) try: stages.load_external_stages(config['external_stages_path']) except IOError: return streamcorpus_pipeline # let check_config re-raise this if 'external_stages_modules' in config: for mod in config['external_stages_modules']: try: stages.load_module_stages(mod) except ImportError: return streamcorpus_pipeline # let check_config re-raise this else: stages = static_stages new_sub_modules = set(stage for stage in stages.itervalues() if hasattr(stage, 'config_name')) return NewSubModules(streamcorpus_pipeline, new_sub_modules)
def make_app(): """Make a WSGI app that has all the HTTPie pieces baked in.""" env = Environment() # STDIN is ignored because HTTPony runs a server that doesn't care. # Additionally, it is needed or else pytest blows up. args = parser.parse_args(args=['/', '--ignore-stdin'], env=env) args.output_options = 'HB' # Output only requests. server = 'HTTPony/{0}'.format(__version__) def application(environ, start_response): # The WSGI server puts content length and type in the environment # even when not provided with the request. Drop them if they are empty. if environ.get('CONTENT_LENGTH') == '': del environ['CONTENT_LENGTH'] if environ.get('CONTENT_TYPE') == '': del environ['CONTENT_TYPE'] wrequest = WerkzeugRequest(environ) data = wrequest.get_data() request = Request( method=wrequest.method, url=wrequest.url, headers=wrequest.headers, data=data, ) prepared = request.prepare() stream = streams.build_output_stream( args, env, prepared, response=None, output_options=args.output_options) streams.write_stream(stream, env.stdout, env.stdout_isatty) # When there is data in the request, give the next one breathing room. if data: print("\n", file=env.stdout) # Make dreams come true. response = Response(headers={'Server': server}) return response(environ, start_response) return application
def make_chains_with_names(sentences): ''' assemble in-doc coref chains by mapping equiv_id to tokens and their cleansed name strings :param sentences: iterator over token generators :returns dict: keys are equiv_ids, values are tuple(concatentated name string, list of tokens) ''' ## if an equiv_id is -1, then the token is classified into some ## entity_type but has not other tokens in its chain. We don't ## want these all lumped together, so we give them distinct "fake" ## equiv_id other than -1 -- counting negatively to avoid ## collisions with "real" equiv_ids fake_equiv_ids = -2 ## use a default dictionary equiv_ids = collections.defaultdict(lambda: (set(), set())) for tagger_id, sents in sentences.items(): for sent in sents: for tok in sent.tokens: if tok.entity_type is not None: ## get an appropriate equiv_id if tok.equiv_id == -1: eqid = fake_equiv_ids fake_equiv_ids -= 1 else: eqid = tok.equiv_id ## store the name parts initially as a set equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8'))) ## carry a *reference* to the entire Token object equiv_ids[eqid][1].add(tok) return equiv_ids
def ALL_mentions(target_mentions, chain_mentions): ''' For each name string in the target_mentions list, searches through all chain_mentions looking for any cleansed Token.token that contains the name. Returns True only if all of the target_mention strings appeared as substrings of at least one cleansed Token.token. Otherwise, returns False. :type target_mentions: list of basestring :type chain_mentions: list of basestring :returns bool: ''' found_all = True for name in target_mentions: found_one = False for chain_ment in chain_mentions: if name in chain_ment: found_one = True break if not found_one: found_all = False break return found_all
def ANY_MULTI_TOKEN_mentions(multi_token_target_mentions, chain_mentions): ''' For each name string (potentially consisting of multiple tokens) in the target_mentions list, searches through all chain_mentions looking for any cleansed Token.token that contains all the tokens in the name. Returns True only if all of the target_mention strings appeared as substrings of at least one cleansed Token.token. Otherwise, returns False. :type target_mentions: list of basestring :type chain_mentions: list of basestring :returns bool: ''' for multi_token_name in multi_token_target_mentions: if ALL_mentions(multi_token_name.split(), chain_mentions): return True return False
def ANY_mentions(target_mentions, chain_mentions): ''' For each name string in the target_mentions list, searches through all chain_mentions looking for any cleansed Token.token that contains the name. Returns True if any of the target_mention strings appeared as substrings of any cleansed Token.token. Otherwise, returns False. :type target_mentions: list of basestring :type chain_mentions: list of basestring :returns bool: ''' for name in target_mentions: for chain_ment in chain_mentions: if name in chain_ment: return True return False
def names_in_chains(stream_item, aligner_data): ''' Convert doc-level Rating object into a Label, and add that Label to all Token in all coref chains identified by aligner_data["chain_selector"] :param stream_item: document that has a doc-level Rating to translate into token-level Labels. :param aligner_data: dict containing: chain_selector: ALL or ANY annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id If chain_selector==ALL, then only apply Label to chains in which all of the Rating.mentions strings appear as substrings within at least one of the Token.token strings. If chain_selector==ANY, then apply Label to chains in which any of the Rating.mentions strings appear as a substring within at least one of the Token.token strings. If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all the names in any of the Rating.mentions strings appear as a substring within at least one of the Token.token strings. ''' chain_selector = aligner_data.get('chain_selector', '') assert chain_selector in _CHAIN_SELECTORS, \ 'chain_selector: %r not in %r' % (chain_selector, _CHAIN_SELECTORS.keys()) ## convert chain_selector to a function chain_selector = _CHAIN_SELECTORS[chain_selector] ## make inverted index equiv_id --> (names, tokens) equiv_ids = make_chains_with_names( stream_item.body.sentences ) required_annotator_id = aligner_data.get('annotator_id') for annotator_id, ratings in stream_item.ratings.items(): if (required_annotator_id is not None) and (annotator_id != required_annotator_id): continue else: for rating in ratings: label = Label(annotator=rating.annotator, target=rating.target) for eqid, (chain_mentions, chain_tokens) in equiv_ids.items(): if chain_selector(rating.mentions, chain_mentions): ## apply the label for tok in chain_tokens: add_annotation(tok, label)
def look_ahead_match(rating, tokens): '''iterate through all tokens looking for matches of cleansed tokens or token regexes, skipping tokens left empty by cleansing and coping with Token objects that produce multiple space-separated strings when cleansed. Yields tokens that match. ''' ## this ensures that all cleansed tokens are non-zero length all_mregexes = [] for m in rating.mentions: mregexes = [] mpatterns = m.decode('utf8').split(' ') for mpat in mpatterns: if mpat.startswith('ur"^') and mpat.endswith('$"'): # is not regex ## chop out the meat of the regex so we can reconstitute it below mpat = mpat[4:-2] else: mpat = cleanse(mpat) if mpat: ## make a unicode raw string ## https://docs.python.org/2/reference/lexical_analysis.html#string-literals mpat = ur'^%s$' % mpat logger.debug('look_ahead_match compiling regex: %s', mpat) mregexes.append(re.compile(mpat, re.UNICODE | re.IGNORECASE)) if not mregexes: logger.warn('got empty cleansed mention: %r\nrating=%r' % (m, rating)) all_mregexes.append(mregexes) ## now that we have all_mregexes, go through all the tokens for i in range(len(tokens)): for mregexes in all_mregexes: if mregexes[0].match(tokens[i][0][0]): ## found the start of a possible match, so iterate ## through the tuples of cleansed strings for each ## Token while stepping through the cleansed strings ## for this mention. m_j = 1 i_j = 0 last_token_matched = 0 matched = True while m_j < len(mregexes): i_j += 1 if i_j == len(tokens[i + last_token_matched][0]): i_j = 0 last_token_matched += 1 if i + last_token_matched == len(tokens): matched = False break target_token = tokens[i + last_token_matched][0][i_j] ## this next line is the actual string comparison if mregexes[m_j].match(target_token): m_j += 1 elif target_token == '': continue else: matched = False break if matched: ## yield each matched token only once toks = set() for j in xrange(last_token_matched + 1): toks.add(tokens[i + j][1]) for tok in toks: yield tok
def multi_token_match(stream_item, aligner_data): ''' iterate through tokens looking for near-exact matches to strings in si.ratings...mentions ''' tagger_id = _get_tagger_id(stream_item, aligner_data) sentences = stream_item.body.sentences.get(tagger_id) if not sentences: return ## construct a list of tuples, where the first part of each tuple ## is a tuple of cleansed strings, and the second part is the ## Token object from which it came. tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok), itertools.chain(*[sent.tokens for sent in sentences])) required_annotator_id = aligner_data['annotator_id'] for annotator_id, ratings in stream_item.ratings.items(): if (required_annotator_id is None) or (annotator_id == required_annotator_id): for rating in ratings: label = Label(annotator=rating.annotator, target=rating.target) num_tokens_matched = 0 for tok in look_ahead_match(rating, tokens): if aligner_data.get('update_labels'): tok.labels.pop(annotator_id, None) add_annotation(tok, label) num_tokens_matched += 1 if num_tokens_matched == 0: logger.warning('multi_token_match didn\'t actually match ' 'entity %r in stream_id %r', rating.target.target_id, stream_item.stream_id) else: logger.debug('matched %d tokens for %r in %r', num_tokens_matched, rating.target.target_id, stream_item.stream_id)
def make_ner_file(self, clean_visible_path, ner_xml_path): '''run tagger a child process to get XML output''' if self.template is None: raise exceptions.NotImplementedError(''' Subclasses must specify a class property "template" that provides command string format for running a tagger. It should take %(tagger_root_path)s as the path from the config file, %(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as the output path to create. ''') tagger_config = dict( tagger_root_path=self.config['tagger_root_path'], clean_visible_path=clean_visible_path, ner_xml_path=ner_xml_path) ## get a java_heap_size or default to 1GB tagger_config['java_heap_size'] = self.config.get('java_heap_size', '') cmd = self.template % tagger_config start_time = time.time() ## make sure we are using as little memory as possible gc.collect() try: self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True) except OSError, exc: msg = traceback.format_exc(exc) msg += make_memory_info_msg(clean_visible_path, ner_xml_path) raise PipelineOutOfMemory(msg) s_out, errors = self._child.communicate() if not self._child.returncode == 0: if 'java.lang.OutOfMemoryError' in errors: msg = errors + make_memory_info_msg(clean_visible_path, ner_xml_path) raise PipelineOutOfMemory(msg) elif self._child.returncode == 137: msg = 'tagger returncode = 137\n' + errors msg += make_memory_info_msg(clean_visible_path, ner_xml_path) # maybe get a tail of /var/log/messages raise PipelineOutOfMemory(msg) elif 'Exception' in errors: raise PipelineBaseException(errors) else: raise PipelineBaseException('tagger exited with %r' % self._child.returncode) elapsed = time.time() - start_time logger.info('finished tagging in %.1f seconds' % elapsed) return elapsed
def align_chunk_with_ner(self, ner_xml_path, i_chunk, o_chunk): ''' iterate through ner_xml_path to fuse with i_chunk into o_chunk ''' ## prepare to iterate over the input chunk input_iter = i_chunk.__iter__() all_ner = xml.dom.minidom.parse(open(ner_xml_path)) ## this converts our UTF-8 data into unicode strings, so when ## we want to compute byte offsets or construct tokens, we ## must .encode('utf8') for ner_dom in all_ner.getElementsByTagName('FILENAME'): #for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')): stream_item = input_iter.next() ## get stream_id out of the XML stream_id = ner_dom.attributes.get('stream_id').value if stream_item.stream_id is None: assert not stream_id, 'out of sync: None != %r' % stream_id logger.critical('si.stream_id is None... ignoring') continue assert stream_id and stream_id == stream_item.stream_id, \ '%s != %s' % (stream_id, stream_item.stream_id) if not stream_item.body: ## the XML better have had an empty clean_visible too... #assert not ner_dom....something continue tagging = Tagging() tagging.tagger_id = self.tagger_id # pylint: disable=E1101 ''' ## get this one file out of its FILENAME tags tagged_doc_parts = list(files(ner_dom.toxml())) if not tagged_doc_parts: continue tagged_doc = tagged_doc_parts[0][1] ## hack hope_original = make_clean_visible(tagged_doc, '') open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8')) print ner_xml_path + '-clean' ''' #tagging.raw_tagging = tagged_doc tagging.generation_time = streamcorpus.make_stream_time() stream_item.body.taggings[self.tagger_id] = tagging # pylint: disable=E1101 ## could consume lots of memory here by instantiating everything sentences, relations, attributes = self.get_sentences(ner_dom) stream_item.body.sentences[self.tagger_id] = sentences # pylint: disable=E1101 stream_item.body.relations[self.tagger_id] = relations # pylint: disable=E1101 stream_item.body.attributes[self.tagger_id] = attributes # pylint: disable=E1101 logger.debug('finished aligning tokens %s' % stream_item.stream_id) ''' for num, sent in enumerate(sentences): for tok in sent.tokens: print '%d\t%d\t%s' % (num, tok.offsets[OffsetType.LINES].first, repr(tok.token)) ''' if 'align_labels_by' in self.config and self.config['align_labels_by']: assert 'aligner_data' in self.config, 'config missing "aligner_data"' aligner = AlignmentStrategies[ self.config['align_labels_by'] ] aligner( stream_item, self.config['aligner_data'] ) ## forcibly collect dereferenced objects gc.collect() try: o_chunk.add(stream_item) except MemoryError, exc: msg = traceback.format_exc(exc) msg += make_memory_info_msg() logger.critical(msg) raise PipelineOutOfMemory(msg) ## all done, so close the o_chunk try: o_chunk.close() logger.info('finished chunk for %r' % ner_xml_path) except MemoryError, exc: msg = traceback.format_exc(exc) msg += make_memory_info_msg() logger.critical(msg) raise PipelineOutOfMemory(msg)
def shutdown(self): ''' send SIGTERM to the tagger child process ''' if self._child: try: self._child.terminate() except OSError, exc: if exc.errno == 3: ## child is already gone, possibly because it ran ## out of memory and caused us to shutdown pass
def mult(p, n): """Returns a Pattern that matches exactly n repetitions of Pattern p. """ np = P() while n >= 1: if n % 2: np = np + p p = p + p n = n // 2 return np
def fix_emails(text): '''Replace all angle bracket emails with a unique key.''' emails = bracket_emails.findall(text) keys = [] for email in emails: _email = email.replace("<","&lt;").replace(">","&gt;") text = text.replace(email, _email) return text
def _sentences(self, clean_visible): 'generate strings identified as sentences' previous_end = 0 clean_visible = clean_visible.decode('utf8') for start, end in self.sentence_tokenizer.span_tokenize(clean_visible): # no need to check start, because the first byte of text # is always first byte of first sentence, and we will # have already made the previous sentence longer on the # end if there was an overlap. if start < previous_end: start = previous_end if start > end: # skip this sentence... because it was eaten by # an earlier sentence with a label continue try: label = self.label_index.find_le(end) except ValueError: label = None if label: ## avoid splitting a label off = label.offsets[OffsetType.CHARS] end = max(off.first + off.length, end) previous_end = end sent_str = clean_visible[start:end] yield start, end, sent_str
def make_label_index(self, stream_item): 'make a sortedcollection on body.labels' labels = stream_item.body.labels.get(self.annotator_id) if not labels: labels = [] self.label_index = SortedCollection( [l for l in labels if OffsetType.CHARS in l.offsets], key=lambda label: label.offsets[OffsetType.CHARS].first)
def make_sentences(self, stream_item): 'assemble Sentence and Token objects' self.make_label_index(stream_item) sentences = [] token_num = 0 new_mention_id = 0 for sent_start, sent_end, sent_str in self._sentences( stream_item.body.clean_visible): assert isinstance(sent_str, unicode) sent = Sentence() sentence_pos = 0 for start, end in self.word_tokenizer.span_tokenize(sent_str): token_str = sent_str[start:end].encode('utf8') tok = Token( token_num=token_num, token=token_str, sentence_pos=sentence_pos, ) tok.offsets[OffsetType.CHARS] = Offset( type=OffsetType.CHARS, first=sent_start + start, length=end - start, ) # whitespace tokenizer will never get a token # boundary in the middle of an 'author' label try: label = self.label_index.find_le(sent_start + start) except ValueError: label = None if label: off = label.offsets[OffsetType.CHARS] if off.first + off.length > sent_start + start: streamcorpus.add_annotation(tok, label) logger.debug('adding label to tok: %r has %r', tok.token, label.target.target_id) if label in self.label_to_mention_id: mention_id = self.label_to_mention_id[label] else: mention_id = new_mention_id new_mention_id += 1 self.label_to_mention_id[label] = mention_id tok.mention_id = mention_id token_num += 1 sentence_pos += 1 sent.tokens.append(tok) sentences.append(sent) return sentences
def html_entities_to_unicode(text, space_padding=False, safe_only=False): ''' Convert any HTML, XML, or numeric entities in the attribute values. For example '&amp;' becomes '&'. This is adapted from BeautifulSoup, which should be able to do the same thing when called like this --- but this fails to convert everything for some bug. text = unicode(BeautifulStoneSoup(text, convertEntities=BeautifulStoneSoup.XML_ENTITIES)) ''' def convert_entities(match): ''' comes from BeautifulSoup.Tag._convertEntities ''' x = match.group(1) if safe_only and x not in ENTITIES_THAT_ARE_SAFE_TO_STRING_PAD: return u'&%s;' % x if x in name2codepoint: ## handles most cases return unichr(name2codepoint[x]) elif x in XML_ENTITIES_TO_SPECIAL_CHARS: return XML_ENTITIES_TO_SPECIAL_CHARS[x] elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) else: ## uh oh, failed to anything return u'&%s;' % x def convert_to_padded_entitites(match): converted_string = convert_entities(match) num_spaces_needed = len(match.group(0)) - len(converted_string) assert num_spaces_needed >= 0, \ 'len(%r) !<= len(%r)' % (converted_string, match.group(0)) ## Where to put the spaces? Before, after, symmetric? # Let's do symmetric. ## cast to int in prep for python3 num_left = int(num_spaces_needed / 2) num_right = num_spaces_needed - num_left return (' ' * num_left) + converted_string + (' ' * num_right) ## brute force regex through all the characters... if space_padding: return tags.sub( convert_to_padded_entitites, text) else: return tags.sub( convert_entities, text)
def tps(text, min_token_len=2, quant_rate=0.01): ''' :param text: tag-free UTF-8 string of free text :returns string: 32-character md5 hash in hexadecimal Python implementation of the TextProfileSignature provided in SOLR. Unlike most other locality sensitive hashes, a TPS can be indexed as a searchable property of each document that does not require n^2 comparisons to find duplicates. http://wiki.apache.org/solr/TextProfileSignature ''' counts = Counter( ifilter(lambda x: len(x) >= min_token_len, imap(cleanse, text.split()))) max_freq = counts.most_common(1)[0][1] if max_freq <= 1: quant = 1 else: quant = max(2, round(max_freq * quant_rate)) to_hash = [] for word, count in counts.most_common(): if count <= quant: break to_hash += [word, str(int(math.floor(count / quant)))] to_hash = u' '.join(to_hash) return hashlib.md5(to_hash.encode('utf8')).hexdigest()
def make_cleansed_file(i_chunk, tmp_cleansed_path): '''make a temp file of cleansed text''' tmp_cleansed = open(tmp_cleansed_path, 'wb') for idx, si in enumerate(i_chunk): tmp_cleansed.write('<FILENAME docid="%s">\n' % si.stream_id) tmp_cleansed.write(si.body.cleansed) ## how to deal with other_content? tmp_cleansed.write('</FILENAME>\n') tmp_cleansed.close() ## replace this with log.info() print 'created %s' % tmp_cleansed_path
def make_ner_file(tagger_id, tmp_cleansed_path, tmp_ner_path, pipeline_root): '''run child process to get OWPL output''' params = dict(INPUT_FILE=tmp_cleansed_path, #RAW_OUTPUT_FILE=tmp_ner_raw_path, OUTPUT_FILE=tmp_ner_path, PIPELINE_ROOT=pipeline_root) pipeline_cmd = pipeline_cmd_templates[tagger_id] % params print pipeline_cmd ## replace this with log.info() print 'creating %s' % tmp_ner_path start_time = time.time() gpg_child = subprocess.Popen( pipeline_cmd, stderr=subprocess.PIPE, shell=True) s_out, errors = gpg_child.communicate() assert gpg_child.returncode == 0 and 'Exception' not in errors, errors elapsed = time.time() - start_time ## replace this with log.info() print 'created %s in %.1f sec' % (tmp_ner_path, elapsed) ''' postproc_cmd = postproc_cmd_templates[tagger_id] % params print postproc_cmd ## replace this with log.info() print 'creating %s' % tmp_ner_raw_path start_time = time.time() gpg_child = subprocess.Popen( postproc_cmd, stderr=subprocess.PIPE, shell=True) s_out, errors = gpg_child.communicate() assert gpg_child.returncode == 0 and 'Exception' not in errors, errors elapsed = time.time() - start_time ## replace this with log.info() print 'created %s in %.1f sec' % (tmp_ner_path, elapsed) '''
def cleanse(span): '''Convert a string of text into a lowercase string with no punctuation and only spaces for whitespace. :param span: string ''' try: ## attempt to force it to utf8, which might fail span = span.encode('utf8', 'ignore') except: pass ## lowercase, strip punctuation, and shrink all whitespace span = span.lower() span = span.translate(strip_punctuation) span = whitespace.sub(' ', span) ## trim any leading or trailing whitespace return span.strip()
def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path): ''' iterate through the i_chunk and tmp_ner_path to generate a new Chunk with body.ner ''' o_chunk = Chunk() input_iter = i_chunk.__iter__() ner = '' stream_id = None all_ner = xml.dom.minidom.parse(open(tmp_ner_path)) for raw_ner in all_ner.getElementsByTagName('FILENAME'): stream_item = input_iter.next() ## get stream_id out of the XML stream_id = raw_ner.attributes.get('docid').value assert stream_id and stream_id == stream_item.stream_id, \ '%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner) tagger_id = 'lingpipe' tagging = Tagging() tagging.tagger_id = tagger_id ## get this one file out of its FILENAME tags tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1] tagging.raw_tagging = tagged_doc tagging.generation_time = streamcorpus.make_stream_time() stream_item.body.taggings[tagger_id] = tagging sentences = list(lingpipe.sentences(tagged_doc)) ## make JS labels on individual tokens assert stream_item.ratings[0].mentions, stream_item.stream_id john_smith_label = Label() john_smith_label.annotator = stream_item.ratings[0].annotator john_smith_label.target_id = stream_item.ratings[0].target_id # first map all corefchains to their words equiv_ids = collections.defaultdict(lambda: set()) for sent in sentences: for tok in sent.tokens: if tok.entity_type is not None: equiv_ids[tok.equiv_id].add(cleanse(tok.token)) ## find all the chains that are John Smith johnsmiths = set() for equiv_id, names in equiv_ids.items(): ## detect 'smith' in 'smithye' _names = cleanse(' '.join(names)) if 'john' in _names and 'smith' in _names: johnsmiths.add(equiv_id) print len(johnsmiths) ## now apply the label for sent in sentences: for tok in sent.tokens: if tok.equiv_id in johnsmiths: tok.labels = [john_smith_label] stream_item.body.sentences[tagger_id] = sentences o_chunk.add(stream_item) ## put the o_chunk bytes into the specified file open(tmp_done_path, 'wb').write(str(o_chunk)) ## replace this with log.info() print 'created %s' % tmp_done_path
def make_absolute_paths(config): '''given a config dict with streamcorpus_pipeline as a key, find all keys under streamcorpus_pipeline that end with "_path" and if the value of that key is a relative path, convert it to an absolute path using the value provided by root_path ''' if not 'streamcorpus_pipeline' in config: logger.critical('bad config: %r', config) raise ConfigurationError('missing "streamcorpus_pipeline" from config') ## remove the root_path, so it does not get extended itself root_path = config['streamcorpus_pipeline'].pop('root_path', None) if not root_path: root_path = os.getcwd() if not root_path.startswith('/'): root_path = os.path.join( os.getcwd(), root_path ) def recursive_abs_path( sub_config, root_path ): for key, val in sub_config.items(): if isinstance(val, basestring): if key.endswith('path'): ## ignore URLs in *_path parameters if re.match('^http.?://', val): continue ## we have a path... is it already absolute? if not val.startswith('/'): ## make the path absolute sub_config[key] = os.path.join(root_path, val) elif isinstance(val, dict): recursive_abs_path( val, root_path ) recursive_abs_path( config, root_path ) ## put the root_path back config['root_path'] = root_path
def make_hash(obj): ''' Makes a hash from a dictionary, list, tuple or set to any level, that contains only other hashable types (including any lists, tuples, sets, and dictionaries). See second answer (not the accepted answer): http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary ''' if isinstance(obj, (set, tuple, list)): return tuple([make_hash(e) for e in obj]) elif not isinstance(obj, dict): return hash(obj) new_obj = copy.deepcopy(obj) for k, v in new_obj.items(): ## call self recursively new_obj[k] = make_hash(v) return hash(tuple(frozenset(new_obj.items())))
def instantiate_config(config): '''setup the config and load external modules This updates 'config' as follows: * All paths are replaced with absolute paths * A hash and JSON dump of the config are stored in the config * If 'pythonpath' is in the config, it is added to sys.path * If 'setup_modules' is in the config, all modules named in it are loaded ''' make_absolute_paths(config) pipeline_config = config['streamcorpus_pipeline'] pipeline_config['config_hash'] = make_hash(config) pipeline_config['config_json'] = json.dumps(config) logger.debug('running config: {0} = {1!r}' .format(pipeline_config['config_hash'], config)) ## Load modules # This is a method of using settings in yaml configs to load plugins. die = False for pathstr in pipeline_config.get('pythonpath', {}).itervalues(): if pathstr not in sys.path: sys.path.append(pathstr) for modname in pipeline_config.get('setup_modules', {}).itervalues(): try: m = importlib.import_module(modname) if not m: logger.critical('could not load module %r', modname) die = True continue if hasattr(m, 'setup'): m.setup() logger.debug('loaded and setup %r', modname) else: logger.debug('loaded %r', modname) except Exception: logger.critical('error loading and initting module %r', modname, exc_info=True) die = True if die: sys.exit(1)
def generate_john_smith_chunk(path_to_original): ''' This _looks_ like a Chunk only in that it generates StreamItem instances when iterated upon. ''' ## Every StreamItem has a stream_time property. It usually comes ## from the document creation time. Here, we assume the JS corpus ## was created at one moment at the end of 1998: creation_time = '1998-12-31T23:59:59.999999Z' correct_time = 915148799 if not os.path.isabs(path_to_original): path_to_original = os.path.join(os.getcwd(), path_to_original) ## iterate over the files in the 35 input directories for label_id in range(35): dir_path = os.path.join(path_to_original, str(label_id)) fnames = os.listdir(dir_path) fnames.sort() for fname in fnames: stream_item = streamcorpus.make_stream_item( creation_time, ## make up an abs_url os.path.join( 'john-smith-corpus', str(label_id), fname)) if int(stream_item.stream_time.epoch_ticks) != correct_time: raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\ % (creation_time, stream_item.stream_time.epoch_ticks, correct_time)) ## These docs came from the authors of the paper cited above. stream_item.source = 'bagga-and-baldwin' ## build a ContentItem for the body body = streamcorpus.ContentItem() raw_string = open(os.path.join(dir_path, fname)).read() ## We know that this is already clean and has nothing ## tricky in it, because we manually cleansed it. To ## illustrate how we stick all strings into thrift, we ## convert this to unicode (which introduces no changes) ## and then encode it as utf-8, which also introduces no ## changes. Thrift stores strings as 8-bit character ## strings. # http://www.mail-archive.com/thrift-user@incubator.apache.org/msg00210.html body.clean_visible = unicode(raw_string).encode('utf8') ## attach the content_item to the stream_item stream_item.body = body stream_item.body.language = streamcorpus.Language(code='en', name='ENGLISH') ## The authors also annotated the corpus anno = streamcorpus.Annotator() anno.annotator_id = 'bagga-and-baldwin' anno.annotation_time = stream_item.stream_time ## build a Label for the doc-level label: rating = streamcorpus.Rating() rating.annotator = anno rating.target = streamcorpus.Target(target_id = str(label_id)) # must be string rating.contains_mention = True rating.mentions = ['john', 'smith'] ## put this one label in the array of labels streamcorpus.add_annotation(stream_item, rating) ## provide this stream_item to the pipeline yield stream_item
def re_based_make_clean_visible(html): ''' Takes an HTML-like binary string as input and returns a binary string of the same length with all tags replaced by whitespace. This also detects script and style tags, and replaces the text between them with whitespace. Pre-existing whitespace of any kind (newlines, tabs) is converted to single spaces ' ', which has the same byte length (and character length). Note: this does not change any characters like &rsquo; and &nbsp;, so taggers operating on this text must cope with such symbols. Converting them to some other character would change their byte length, even if equivalent from a character perspective. This is regex based, which can occassionally just hang... ''' text = '' # Fix emails html = fix_emails(html) for m in invisible.finditer(html): text += m.group('before') text += ' ' * len(m.group('invisible')) # text better be >= original assert len(html) >= len(text), '%d !>= %d' % (len(html), len(text)) # capture any characters after the last tag... such as newlines tail = len(html) - len(text) text += html[-tail:] # now they must be equal assert len(html) == len(text), '%d != %d' % (len(html), len(text)) return text
def make_clean_visible(_html, tag_replacement_char=' '): ''' Takes an HTML-like Unicode string as input and returns a UTF-8 encoded string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This does not detect comments, style, script, link. It also does do anything with HTML-escaped characters. All of these are handled by the clean_html pre-cursor step. Pre-existing whitespace of any kind (newlines, tabs) is converted to single spaces ' ', which has the same byte length (and character length). This is a simple state machine iterator without regexes ''' def non_tag_chars(html): n = 0 while n < len(html): angle = html.find('<', n) if angle == -1: yield html[n:] n = len(html) break yield html[n:angle] n = angle while n < len(html): nl = html.find('\n', n) angle = html.find('>', n) if angle == -1: yield ' ' * (len(html) - n) n = len(html) break elif nl == -1 or angle < nl: yield ' ' * (angle + 1 - n) n = angle + 1 break else: yield ' ' * (nl - n) + '\n' n = nl + 1 # do not break if not isinstance(_html, unicode): _html = unicode(_html, 'utf-8') # Protect emails by substituting with unique key _html = fix_emails(_html) #Strip tags with previous logic non_tag = ''.join(non_tag_chars(_html)) return non_tag.encode('utf-8')
def non_tag_chars_from_raw(html): '''generator that yields clean visible as it transitions through states in the raw `html` ''' n = 0 while n < len(html): # find start of tag angle = html.find('<', n) if angle == -1: yield html[n:] n = len(html) break yield html[n:angle] n = angle # find the end of the tag string space = html.find(' ', n, n + longest_extended_tag + 2) angle = html.find('>', n, n + longest_extended_tag + 2) nl = html.find('\n', n, n + longest_extended_tag + 2) tab = html.find('\t', n, n + longest_extended_tag + 2) ends = filter(lambda end: end > -1, [tab, nl, space, angle]) if ends: tag = html[n + 1 : min(ends)] if tag == '!--': # whiteout comment except newlines end = html.find('-->', n) while n < end: nl = html.find('\n', n, end) if nl != -1: yield ' ' * (nl - n) + '\n' n = nl + 1 else: yield ' ' * (end - n + 3) break n = end + 3 continue is_extended = tag.lower() in extended_tags else: is_extended = False # find end of tag even if on a lower line while n < len(html): squote = html.find("'", n) dquote = html.find('"', n) nl = html.find('\n', n) angle = html.find('>', n) if angle == -1: # hits end of doc before end of tag yield ' ' * (len(html) - n) n = len(html) break elif -1 < squote < angle or -1 < dquote < angle: if squote != -1 and dquote != -1: if squote < dquote: open_quote = squote quote = "'" else: open_quote = dquote quote = '"' elif dquote != -1: open_quote = dquote quote = '"' else: open_quote = squote quote = "'" close_quote = html.find(quote, open_quote + 1) while n < close_quote: nl = html.find('\n', n, close_quote) if nl == -1: break yield ' ' * (nl - n) + '\n' n = nl + 1 yield ' ' * (close_quote + 1 - n) n = close_quote + 1 continue elif nl == -1 or angle < nl: # found close before either newline or end of doc yield ' ' * (angle + 1 - n) n = angle + 1 if is_extended and html[angle - 1] != '/': # find matching closing tag. JavaScript can # include HTML *strings* within it, and in # principle, that HTML could contain a closing # script tag in it; ignoring for now. while n < len(html): nl = html.find('\n', n) close = html.find('</', n) close2 = html.find('</', close + 2) angle = html.find('>', close + 2) if nl != -1 and nl < close: yield ' ' * (nl - n) + '\n' n = nl + 1 elif close == -1 or angle == -1: # end of doc before matching close tag yield ' ' * (len(html) - n) n = len(html) break elif close2 != -1 and close2 < angle: # broken tag inside current tag yield ' ' * (close + 2 - n) n = close + 2 elif html[close + 2:angle].lower() == tag.lower(): yield ' ' * (angle + 1 - n) n = angle + 1 break else: yield ' ' * (angle + 1 - n) n = angle + 1 # do not break # finished with tag break else: # found a newline within the current tag yield ' ' * (nl - n) + '\n' n = nl + 1
def make_clean_visible_from_raw(_html, tag_replacement_char=' '): '''Takes an HTML-like Unicode (or UTF-8 encoded) string as input and returns a Unicode string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This *does* detect comments, style, script, link tags and replaces them with whitespace. This is subtle because these tags can be self-closing or not. It does do anything with HTML-escaped characters. Pre-existing whitespace of any kind *except* newlines (\n) and linefeeds (\r\n) is converted to single spaces ' ', which has the same byte length (and character length). Newlines and linefeeds are left unchanged. This is a simple state machine iterator without regexes ''' if not isinstance(_html, unicode): _html = unicode(_html, 'utf-8') #Strip tags with logic above non_tag = ''.join(non_tag_chars_from_raw(_html)) return non_tag.encode('utf-8')
def make_clean_visible_file(i_chunk, clean_visible_path): '''make a temp file of clean_visible text''' _clean = open(clean_visible_path, 'wb') _clean.write('<?xml version="1.0" encoding="UTF-8"?>') _clean.write('<root>') for idx, si in enumerate(i_chunk): if si.stream_id is None: # create the FILENAME element anyway, so the ordering # remains the same as the i_chunk and can be aligned. stream_id = '' else: stream_id = si.stream_id doc = lxml.etree.Element("FILENAME", stream_id=stream_id) if si.body and si.body.clean_visible: try: # is UTF-8, and etree wants .text to be unicode doc.text = si.body.clean_visible.decode('utf8') except ValueError: doc.text = drop_invalid_and_upper_utf8_chars( si.body.clean_visible.decode('utf8')) except Exception, exc: # this should never ever fail, because if it does, # then it means that clean_visible (or more likely # clean_html) is not what it is supposed to be. # Therefore, do not take it lightly: logger.critical(traceback.format_exc(exc)) logger.critical('failed on stream_id=%s to follow:', si.stream_id) logger.critical(repr(si.body.clean_visible)) logger.critical('above was stream_id=%s', si.stream_id) # [I don't know who calls this, but note that this # will *always* fail if clean_visible isn't valid UTF-8.] raise else: doc.text = '' _clean.write(lxml.etree.tostring(doc, encoding='UTF-8')) _clean.write('</root>') _clean.close() logger.info(clean_visible_path) ''' ## hack to capture html for inspection _html = open(clean_visible_path + '-html', 'wb') for idx, si in enumerate(i_chunk): _html.write('<FILENAME docid="%s">' % si.stream_id) if si.body and si.body.clean_html: _html.write(si.body.clean_html) _html.write('</FILENAME>\n') _html.close() ## replace this with log.info() print clean_visible_path + '-html' '''
def cleanse(span, lower=True): '''Convert a unicode string into a lowercase string with no punctuation and only spaces for whitespace. Replace PennTreebank escaped brackets with ' ': -LRB- -RRB- -RSB- -RSB- -LCB- -RCB- (The acronyms stand for (Left|Right) (Round|Square|Curly) Bracket.) http://www.cis.upenn.edu/~treebank/tokenization.html :param span: string ''' assert isinstance(span, unicode), \ 'got non-unicode string %r' % span # lowercase, strip punctuation, and shrink all whitespace span = penn_treebank_brackets.sub(' ', span) if lower: span = span.lower() span = span.translate(strip_punctuation) span = whitespace.sub(' ', span) # trim any leading or trailing whitespace return span.strip()