code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
app.sitemap_links.append(pagename + ".html")
def add_html_link(app, pagename, templatename, context, doctree)
As each page is built, collect page names for the sitemap
14.309126
7.677018
1.863891
site_url = app.builder.config.site_url or app.builder.config.html_baseurl if not site_url: print("sphinx-sitemap error: neither html_baseurl nor site_url " "are set in conf.py. Sitemap not built.") return if (not app.sitemap_links): print("sphinx-sitemap warning: No pages generated for sitemap.xml") return ET.register_namespace('xhtml', "http://www.w3.org/1999/xhtml") root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") get_locales(app, exception) if app.builder.config.version: version = app.builder.config.version + '/' else: version = app.builder.config.version for link in app.sitemap_links: url = ET.SubElement(root, "url") if app.builder.config.language is not None: ET.SubElement(url, "loc").text = site_url + \ app.builder.config.language + '/' + version + link if len(app.locales) > 0: for lang in app.locales: linktag = ET.SubElement( url, "{http://www.w3.org/1999/xhtml}link" ) linktag.set("rel", "alternate") linktag.set("hreflang", lang) linktag.set("href", site_url + lang + '/' + version + link) elif app.builder.config.version: ET.SubElement(url, "loc").text = site_url + version + link else: ET.SubElement(url, "loc").text = site_url + link filename = app.outdir + "/sitemap.xml" ET.ElementTree(root).write(filename, xml_declaration=True, encoding='utf-8', method="xml") print("sitemap.xml was generated for URL %s in %s" % (site_url, filename))
def create_sitemap(app, exception)
Generates the sitemap.xml from the collected HTML page links
2.312497
2.273273
1.017255
ideal = [float("inf")] * mo_prob.nobj nadir = [float("-inf")] * mo_prob.nobj for i in range(mo_prob.nobj): meth = opt_meth_cls(SelectedOptimizationProblem(mo_prob, i)) _decis, obj = meth.search() for j, obj_val in enumerate(obj): if obj_val < ideal[j]: ideal[j] = obj_val if obj_val > nadir[j]: nadir[j] = obj_val return ideal, nadir
def estimate_payoff_table( opt_meth_cls: Type[OptimizationMethod], mo_prob: MOProblem ) -> Tuple[List[float], List[float]]
Estimates the ideal and nadir by using a payoff table. This should give a good estimate for the ideal, but can be very inaccurate for the nadir. For an explanation of why, see [DEB2010]_. References ---------- .. [DEB2010] Deb, K., Miettinen, K., & Chaudhuri, S. (2010). Toward an estimation of nadir objective vector using a hybrid of evolutionary and local search approaches. IEEE Transactions on Evolutionary Computation, 14(6), 821-841.
3.054627
3.145763
0.971029
ideal, nadir = idnad ideal_arr = np.array(ideal) nadir_arr = np.array(nadir) idnad_range = nadir_arr - ideal_arr nadir_arr += pad_nadir * idnad_range ideal_arr -= pad_ideal * idnad_range return list(ideal_arr), list(nadir_arr)
def pad(idnad: Tuple[List[float], List[float]], pad_nadir=0.05, pad_ideal=0.0)
Pad an ideal/nadir estimate. This is mainly useful for padding the nadir estimated by a payoff table for safety purposes.
2.11663
2.04443
1.035315
ideal, nadir = idnad mult = np.power(10, dp) ideal_arr = np.floor(np.array(ideal) * mult) / mult nadir_arr = np.ceil(np.array(nadir) * mult) / mult return list(ideal_arr), list(nadir_arr)
def round_off(idnad: Tuple[List[float], List[float]], dp: int = 2)
Round off an ideal/nadir estimate e.g. so that it's looks nicer when plotted. This function is careful to round so only ever move the values away from the contained range.
2.831025
2.435779
1.162267
return round_off(estimate_payoff_table(opt_meth, mo_prob), dp)
def default_estimate( opt_meth: Type[OptimizationMethod], mo_prob: MOProblem, dp: int = 2 ) -> Tuple[List[float], List[float]]
The recommended nadir/ideal estimator - use a payoff table and then round off the result.
12.076679
4.030322
2.996455
self._max = max if max: self._coeff = -1.0 else: self._coeff = 1.0 return self._search(**params)
def search(self, max=False, **params) -> Tuple[np.ndarray, List[float]]
Search for the optimal solution This sets up the search for the optimization and calls the _search method Parameters ---------- max : bool (default False) If true find mximum of the objective function instead of minimum **params : dict [optional] Parameters for single objective optimization method
4.692195
5.382724
0.871714
if preference: self.preference = preference print(("Given preference: %s" % self.preference.pref_input)) self._update_fh() # tmpzh = list(self.zh) self._update_zh(self.zh, self.fh) # self.zh = list(np.array(self.zh) / 2. + np.array(self.zh_prev) / 2.) # self.zh_prev = tmpzh if self.current_iter != 1: self.fh_lo = list(self.lower_bounds_factory.result(self.zh_prev)) self.current_iter -= 1 return self.fh_lo, self.zh
def next_iteration(self, preference=None)
Return next iteration bounds
6.364892
6.244725
1.019243
if bounds: self.problem.points = reachable_points( self.problem.points, self.problem.ideal, bounds ) if not utils.isin(self.fh, self.problem.points) or ref_point != self.ref_point: self.ref_point = list(ref_point) self._update_fh() self._update_zh(self.zh, self.fh) self.fh_lo = list(self.lower_bounds_factory.result(self.zh)) self.fh_up = list(self.upper_bounds_factory.result(self.zh)) logging.debug(f"Updated upper boundary: {self.fh_up}") logging.debug(f"Uppadet lower boundary: {self.fh_lo}") if not np.all(np.array(self.fh_up) > np.array(self.fh_lo)): warn(self.NegativeIntervalWarning()) assert utils.isin(self.fh_up, self.problem.points) assert utils.isin(self.fh_lo, self.problem.points) dist = self.distance(self.zh, self.fh) # Reachable points self.update_points() lP = len(self.problem.points) self.current_iter -= 1 return dist, self.fh, self.zh, self.fh_lo, self.fh_up, lP
def next_iteration(self, ref_point, bounds=None)
Calculate the next iteration point to be shown to the DM Parameters ---------- ref_point : list of float Reference point given by the DM
4.112988
4.380352
0.938963
for paragraph in paragraphs: if paragraph.class_type == 'good': if paragraph.heading: tag = 'h' else: tag = 'p' elif no_boilerplate: continue else: tag = 'b' print('<%s> %s' % (tag, cgi.escape(paragraph.text)), file=fp)
def output_default(paragraphs, fp=sys.stdout, no_boilerplate=True)
Outputs the paragraphs as: <tag> text of the first paragraph <tag> text of the second paragraph ... where <tag> is <p>, <h> or <b> which indicates standard paragraph, heading or boilerplate respecitvely.
3.409804
3.253717
1.047972
for paragraph in paragraphs: output = '<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s' % ( paragraph.class_type, paragraph.cf_class, int(paragraph.heading), paragraph.xpath, cgi.escape(paragraph.text) ) print(output, file=fp)
def output_detailed(paragraphs, fp=sys.stdout)
Same as output_default, but only <p> tags are used and the following attributes are added: class, cfclass and heading.
4.876016
3.121975
1.561837
for paragraph in paragraphs: if paragraph.class_type in ('good', 'neargood'): if paragraph.heading: cls = 2 else: cls = 3 else: cls = 1 for text_node in paragraph.text_nodes: print('%i\t%s' % (cls, text_node.strip()), file=fp)
def output_krdwrd(paragraphs, fp=sys.stdout)
Outputs the paragraphs in a KrdWrd compatible format: class<TAB>first text node class<TAB>second text node ... where class is 1, 2 or 3 which means boilerplate, undecided or good respectively. Headings are output as undecided.
4.551466
3.517621
1.293904
if isinstance(html, unicode): decoded_html = html # encode HTML for case it's XML with encoding declaration forced_encoding = encoding if encoding else default_encoding html = html.encode(forced_encoding, errors) else: decoded_html = decode_html(html, default_encoding, encoding, errors) try: dom = lxml.html.fromstring(decoded_html, parser=lxml.html.HTMLParser()) except ValueError: # Unicode strings with encoding declaration are not supported. # for XHTML files with encoding declaration, use the declared encoding dom = lxml.html.fromstring(html, parser=lxml.html.HTMLParser()) return dom
def html_to_dom(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS)
Converts HTML to DOM.
3.709207
3.669468
1.01083
if isinstance(html, unicode): return html if encoding: return html.decode(encoding, errors) match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") # proceed unknown encoding as if it wasn't found at all with ignored(LookupError): return html.decode(declared_encoding, errors) # unknown encoding try: # try UTF-8 first return html.decode("utf8") except UnicodeDecodeError: # try lucky with default encoding try: return html.decode(default_encoding, errors) except UnicodeDecodeError as e: raise JustextError("Unable to decode the HTML to Unicode: " + unicode(e))
def decode_html(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS)
Converts a `html` containing an HTML page into Unicode. Tries to guess character encoding from meta tag.
4.410426
4.173917
1.056663
"Removes unwanted parts of DOM." options = { "processing_instructions": False, "remove_unknown_tags": False, "safe_attrs_only": False, "page_structure": False, "annoying_tags": False, "frames": False, "meta": False, "links": False, "javascript": False, "scripts": True, "comments": True, "style": True, "embedded": True, "forms": True, "kill_tags": ("head",), } cleaner = Cleaner(**options) return cleaner.clean_html(dom)
def preprocessor(dom)
Removes unwanted parts of DOM.
4.175952
3.782643
1.103977
"Context-free paragraph classification." stoplist = frozenset(w.lower() for w in stoplist) for paragraph in paragraphs: length = len(paragraph) stopword_density = paragraph.stopwords_density(stoplist) link_density = paragraph.links_density() paragraph.heading = bool(not no_headings and paragraph.is_heading) if link_density > max_link_density: paragraph.cf_class = 'bad' elif ('\xa9' in paragraph.text) or ('&copy' in paragraph.text): paragraph.cf_class = 'bad' elif re.search('^select|\.select', paragraph.dom_path): paragraph.cf_class = 'bad' elif length < length_low: if paragraph.chars_count_in_links > 0: paragraph.cf_class = 'bad' else: paragraph.cf_class = 'short' elif stopword_density >= stopwords_high: if length > length_high: paragraph.cf_class = 'good' else: paragraph.cf_class = 'neargood' elif stopword_density >= stopwords_low: paragraph.cf_class = 'neargood' else: paragraph.cf_class = 'bad'
def classify_paragraphs(paragraphs, stoplist, length_low=LENGTH_LOW_DEFAULT, length_high=LENGTH_HIGH_DEFAULT, stopwords_low=STOPWORDS_LOW_DEFAULT, stopwords_high=STOPWORDS_HIGH_DEFAULT, max_link_density=MAX_LINK_DENSITY_DEFAULT, no_headings=NO_HEADINGS_DEFAULT)
Context-free paragraph classification.
3.026245
2.999795
1.008817
return _get_neighbour(i, paragraphs, ignore_neargood, 1, len(paragraphs))
def get_next_neighbour(i, paragraphs, ignore_neargood)
Return the class of the paragraph at the bottom end of the short/neargood paragraphs block. If ignore_neargood is True, than only 'bad' or 'good' can be returned, otherwise 'neargood' can be returned, too.
3.937885
4.386343
0.89776
# copy classes for paragraph in paragraphs: paragraph.class_type = paragraph.cf_class # good headings for i, paragraph in enumerate(paragraphs): if not (paragraph.heading and paragraph.class_type == 'short'): continue j = i + 1 distance = 0 while j < len(paragraphs) and distance <= max_heading_distance: if paragraphs[j].class_type == 'good': paragraph.class_type = 'neargood' break distance += len(paragraphs[j].text) j += 1 # classify short new_classes = {} for i, paragraph in enumerate(paragraphs): if paragraph.class_type != 'short': continue prev_neighbour = get_prev_neighbour(i, paragraphs, ignore_neargood=True) next_neighbour = get_next_neighbour(i, paragraphs, ignore_neargood=True) neighbours = set((prev_neighbour, next_neighbour)) if neighbours == set(['good']): new_classes[i] = 'good' elif neighbours == set(['bad']): new_classes[i] = 'bad' # it must be set(['good', 'bad']) elif (prev_neighbour == 'bad' and get_prev_neighbour(i, paragraphs, ignore_neargood=False) == 'neargood') or \ (next_neighbour == 'bad' and get_next_neighbour(i, paragraphs, ignore_neargood=False) == 'neargood'): new_classes[i] = 'good' else: new_classes[i] = 'bad' for i, c in new_classes.items(): paragraphs[i].class_type = c # revise neargood for i, paragraph in enumerate(paragraphs): if paragraph.class_type != 'neargood': continue prev_neighbour = get_prev_neighbour(i, paragraphs, ignore_neargood=True) next_neighbour = get_next_neighbour(i, paragraphs, ignore_neargood=True) if (prev_neighbour, next_neighbour) == ('bad', 'bad'): paragraph.class_type = 'bad' else: paragraph.class_type = 'good' # more good headings for i, paragraph in enumerate(paragraphs): if not (paragraph.heading and paragraph.class_type == 'bad' and paragraph.cf_class != 'bad'): continue j = i + 1 distance = 0 while j < len(paragraphs) and distance <= max_heading_distance: if paragraphs[j].class_type == 'good': paragraph.class_type = 'good' break distance += len(paragraphs[j].text) j += 1
def revise_paragraph_classification(paragraphs, max_heading_distance=MAX_HEADING_DISTANCE_DEFAULT)
Context-sensitive paragraph classification. Assumes that classify_pragraphs has already been called.
1.942046
1.944821
0.998573
dom = html_to_dom(html_text, default_encoding, encoding, enc_errors) dom = preprocessor(dom) paragraphs = ParagraphMaker.make_paragraphs(dom) classify_paragraphs(paragraphs, stoplist, length_low, length_high, stopwords_low, stopwords_high, max_link_density, no_headings) revise_paragraph_classification(paragraphs, max_heading_distance) return paragraphs
def justext(html_text, stoplist, length_low=LENGTH_LOW_DEFAULT, length_high=LENGTH_HIGH_DEFAULT, stopwords_low=STOPWORDS_LOW_DEFAULT, stopwords_high=STOPWORDS_HIGH_DEFAULT, max_link_density=MAX_LINK_DENSITY_DEFAULT, max_heading_distance=MAX_HEADING_DISTANCE_DEFAULT, no_headings=NO_HEADINGS_DEFAULT, encoding=None, default_encoding=DEFAULT_ENCODING, enc_errors=DEFAULT_ENC_ERRORS, preprocessor=preprocessor)
Converts an HTML page into a list of classified paragraphs. Each paragraph is represented as instance of class ˙˙justext.paragraph.Paragraph˙˙.
3.017656
2.919611
1.033582
handler = cls() lxml.sax.saxify(root, handler) return handler.paragraphs
def make_paragraphs(cls, root)
Converts DOM into paragraphs.
6.814499
6.08469
1.119942
path_to_stoplists = os.path.dirname(sys.modules["justext"].__file__) path_to_stoplists = os.path.join(path_to_stoplists, "stoplists") stoplist_names = [] for filename in os.listdir(path_to_stoplists): name, extension = os.path.splitext(filename) if extension == ".txt": stoplist_names.append(name) return frozenset(stoplist_names)
def get_stoplists()
Returns a collection of built-in stop-lists.
2.097574
2.005067
1.046137
file_path = os.path.join("stoplists", "%s.txt" % language) try: stopwords = pkgutil.get_data("justext", file_path) except IOError: raise ValueError( "Stoplist for language '%s' is missing. " "Please use function 'get_stoplists' for complete list of stoplists " "and feel free to contribute by your own stoplist." % language ) return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines())
def get_stoplist(language)
Returns an built-in stop-list for the language as a set of words.
4.281308
4.166738
1.027496
cache_key = '{0}:{1}:{2}:{3}'.format( client, region, aws_access_key_id, endpoint_url or '' ) if not aws_session_token: if cache_key in CLIENT_CACHE: return CLIENT_CACHE[cache_key] session = get_boto_session( region, aws_access_key_id, aws_secret_access_key, aws_session_token ) if not session: logging.error("Failed to get {0} client.".format(client)) return None CLIENT_CACHE[cache_key] = session.client( client, endpoint_url=endpoint_url ) return CLIENT_CACHE[cache_key]
def get_boto_client( client, region=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None )
Get a boto3 client connection.
2.08926
2.099365
0.995186
cache_key = '{0}:{1}:{2}:{3}'.format( resource, region, aws_access_key_id, endpoint_url or '' ) if not aws_session_token: if cache_key in RESOURCE_CACHE: return RESOURCE_CACHE[cache_key] session = get_boto_session( region, aws_access_key_id, aws_secret_access_key, aws_session_token ) if not session: logging.error("Failed to get {0} resource.".format(resource)) return None RESOURCE_CACHE[cache_key] = session.resource( resource, endpoint_url=endpoint_url ) return RESOURCE_CACHE[cache_key]
def get_boto_resource( resource, region=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None )
Get a boto resource connection.
2.068955
2.094429
0.987837
return boto3.session.Session( region_name=region, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id, aws_session_token=aws_session_token )
def get_boto_session( region, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None )
Get a boto3 session.
1.558026
1.555988
1.00131
if not isinstance(str_or_bytes, six.text_type): return str_or_bytes.decode(encoding) return str_or_bytes
def ensure_text(str_or_bytes, encoding='utf-8')
Ensures an input is a string, decoding if it is bytes.
2.033696
1.869413
1.087879
if isinstance(str_or_bytes, six.text_type): return str_or_bytes.encode(encoding, errors) return str_or_bytes
def ensure_bytes(str_or_bytes, encoding='utf-8', errors='strict')
Ensures an input is bytes, encoding if it is a string.
1.869762
1.744552
1.071772
''' Find a key's alias by looking up its key_arn in the KEY_METADATA cache. This function will only work after a key has been lookedup by its alias and is meant as a convenience function for turning an ARN that's already been looked up back into its alias. ''' for alias in self.KEY_METADATA: if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn: return alias return None
def _get_key_alias_from_cache(self, key_arn)
Find a key's alias by looking up its key_arn in the KEY_METADATA cache. This function will only work after a key has been lookedup by its alias and is meant as a convenience function for turning an ARN that's already been looked up back into its alias.
7.482182
1.687894
4.43285
''' Decrypt a token. ''' version, user_type, _from = self._parse_username(username) if (version > self.maximum_token_version or version < self.minimum_token_version): raise TokenValidationError('Unacceptable token version.') try: token_key = '{0}{1}{2}{3}'.format( hashlib.sha256(ensure_bytes(token)).hexdigest(), _from, self.to_auth_context, user_type ) except Exception: raise TokenValidationError('Authentication error.') if token_key not in self.TOKENS: try: token = base64.b64decode(token) # Ensure normal context fields override whatever is in # extra_context. context = copy.deepcopy(self.extra_context) context['to'] = self.to_auth_context context['from'] = _from if version > 1: context['user_type'] = user_type data = self.kms_client.decrypt( CiphertextBlob=token, EncryptionContext=context ) # Decrypt doesn't take KeyId as an argument. We need to verify # the correct key was used to do the decryption. # Annoyingly, the KeyId from the data is actually an arn. key_arn = data['KeyId'] if user_type == 'service': if not self._valid_service_auth_key(key_arn): raise TokenValidationError( 'Authentication error (wrong KMS key).' ) elif user_type == 'user': if not self._valid_user_auth_key(key_arn): raise TokenValidationError( 'Authentication error (wrong KMS key).' ) else: raise TokenValidationError( 'Authentication error. Unsupported user_type.' ) plaintext = data['Plaintext'] payload = json.loads(plaintext) key_alias = self._get_key_alias_from_cache(key_arn) ret = {'payload': payload, 'key_alias': key_alias} except TokenValidationError: raise except (ConnectionError, EndpointConnectionError): logging.exception('Failure connecting to AWS endpoint.') raise TokenValidationError( 'Authentication error. Failure connecting to AWS endpoint.' ) # We don't care what exception is thrown. For paranoia's sake, fail # here. except Exception: logging.exception('Failed to validate token.') raise TokenValidationError( 'Authentication error. General error.' ) else: ret = self.TOKENS[token_key] now = datetime.datetime.utcnow() try: not_before = datetime.datetime.strptime( ret['payload']['not_before'], TIME_FORMAT ) not_after = datetime.datetime.strptime( ret['payload']['not_after'], TIME_FORMAT ) except Exception: logging.exception( 'Failed to get not_before and not_after from token payload.' ) raise TokenValidationError( 'Authentication error. Missing validity.' ) delta = (not_after - not_before).seconds / 60 if delta > self.auth_token_max_lifetime: logging.warning('Token used which exceeds max token lifetime.') raise TokenValidationError( 'Authentication error. Token lifetime exceeded.' ) if (now < not_before) or (now > not_after): logging.warning('Invalid time validity for token.') raise TokenValidationError( 'Authentication error. Invalid time validity for token.' ) self.TOKENS[token_key] = ret return self.TOKENS[token_key]
def decrypt_token(self, username, token)
Decrypt a token.
2.979894
2.967023
1.004338
_from = self.auth_context['from'] if self.token_version == 1: return '{0}'.format(_from) elif self.token_version == 2: _user_type = self.auth_context['user_type'] return '{0}/{1}/{2}'.format( self.token_version, _user_type, _from )
def get_username(self)
Get a username formatted for a specific token version.
3.808836
2.987789
1.274801
# Generate string formatted timestamps for not_before and not_after, # for the lifetime specified in minutes. now = datetime.datetime.utcnow() # Start the not_before time x minutes in the past, to avoid clock skew # issues. _not_before = now - datetime.timedelta(minutes=TOKEN_SKEW) not_before = _not_before.strftime(TIME_FORMAT) # Set the not_after time in the future, by the lifetime, but ensure the # skew we applied to not_before is taken into account. _not_after = now + datetime.timedelta( minutes=self.token_lifetime - TOKEN_SKEW ) not_after = _not_after.strftime(TIME_FORMAT) # Generate a json string for the encryption payload contents. payload = json.dumps({ 'not_before': not_before, 'not_after': not_after }) token = self._get_cached_token() if token: return token # Generate a base64 encoded KMS encrypted token to use for # authentication. We encrypt the token lifetime information as the # payload for verification in Confidant. try: token = self.kms_client.encrypt( KeyId=self.auth_key, Plaintext=payload, EncryptionContext=self.auth_context )['CiphertextBlob'] token = base64.b64encode(ensure_bytes(token)) except (ConnectionError, EndpointConnectionError) as e: logging.exception('Failure connecting to AWS: {}'.format(str(e))) raise ServiceConnectionError() except Exception: logging.exception('Failed to create auth token.') raise TokenGenerationError() self._cache_token(token, not_after) return token
def get_token(self)
Get an authentication token.
3.816371
3.765392
1.013539
key, value = arg.split("=") if value.lower() == "true" or value.lower() == "false": value = bool(value) elif INT_RE.match(value): value = int(value) elif FLOAT_RE.match(value): value = float(value) return (key, value)
def dictstr(arg)
Parse a key=value string as a tuple (key, value) that can be provided as an argument to dict()
2.145576
1.994171
1.075924
words = [dict(v, **dict([('sentence', i)])) for i, s in enumerate(matches['sentences']) for k, v in s.items() if k != 'length'] return words
def regex_matches_to_indexed_words(matches)
Transforms tokensregex and semgrex matches to indexed words. :param matches: unprocessed regex matches :return: flat array of indexed words
5.988665
8.027411
0.746027
self.ensure_alive() try: input_format = properties.get("inputFormat", "text") if input_format == "text": ctype = "text/plain; charset=utf-8" elif input_format == "serialized": ctype = "application/x-protobuf" else: raise ValueError("Unrecognized inputFormat " + input_format) if date: params = {'properties': str(properties),'date': str(date)} else: params = {'properties': str(properties)} r = requests.post(self.endpoint, params=params, data=buf, headers={'content-type': ctype}, timeout=(self.timeout*2)/1000) r.raise_for_status() return r except requests.HTTPError as e: if r.text == "CoreNLP request timed out. Your document may be too long.": raise TimeoutException(r.text) else: raise AnnotationException(r.text)
def _request(self, buf, properties, date=None)
Send a request to the CoreNLP server. :param (str | unicode) text: raw text for the CoreNLPServer to parse :param (dict) properties: properties that the server expects :param (str) date: reference date of document, used by server to set docDate - expects YYYY-MM-DD :return: request result
3.174983
2.917809
1.08814
# set properties for server call if properties is None: properties = self.default_properties properties.update({ 'annotators': ','.join(annotators or self.default_annotators), 'inputFormat': 'text', 'outputFormat': self.default_output_format, 'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer' }) elif "annotators" not in properties: properties.update({'annotators': ','.join(annotators or self.default_annotators)}) # if an output_format is specified, use that to override if output_format is not None: properties["outputFormat"] = output_format # make the request r = self._request(text.encode('utf-8'), properties, date) # customize what is returned based outputFormat if properties["outputFormat"] == "serialized": doc = Document() parseFromDelimitedString(doc, r.content) return doc elif properties["outputFormat"] == "json": return r.json() elif properties["outputFormat"] in ["text", "conllu", "conll", "xml"]: return r.text else: return r
def annotate(self, text, annotators=None, output_format=None, properties=None, date=None)
Send a request to the CoreNLP server. :param (str | unicode) text: raw text for the CoreNLPServer to parse :param (list | string) annotators: list of annotators to use :param (str) output_format: output type from server: serialized, json, text, conll, conllu, or xml :param (dict) properties: properties that the server expects :return: request result
3.004758
2.799878
1.073175
self.ensure_alive() if properties is None: properties = self.default_properties properties.update({ 'annotators': ','.join(annotators or self.default_annotators), 'inputFormat': 'text', 'outputFormat': self.default_output_format, 'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer' }) elif "annotators" not in properties: properties.update({'annotators': ','.join(annotators or self.default_annotators)}) # HACK: For some stupid reason, CoreNLPServer will timeout if we # need to annotate something from scratch. So, we need to call # this to ensure that the _regex call doesn't timeout. self.annotate(text, properties=properties) try: # Error occurs unless put properties in params input_format = properties.get("inputFormat", "text") if input_format == "text": ctype = "text/plain; charset=utf-8" elif input_format == "serialized": ctype = "application/x-protobuf" else: raise ValueError("Unrecognized inputFormat " + input_format) # change request method from `get` to `post` as required by CoreNLP r = requests.post( self.endpoint + path, params={ 'pattern': pattern, 'filter': filter, 'properties': str(properties) }, data=text, headers={'content-type': ctype}, timeout=(self.timeout*2)/1000, ) r.raise_for_status() return json.loads(r.text) except requests.HTTPError as e: if r.text.startswith("Timeout"): raise TimeoutException(r.text) else: raise AnnotationException(r.text) except json.JSONDecodeError: raise AnnotationException(r.text)
def __regex(self, path, text, pattern, filter, annotators=None, properties=None)
Send a regex-related request to the CoreNLP server. :param (str | unicode) path: the path for the regex endpoint :param text: raw text for the CoreNLPServer to apply the regex :param (str | unicode) pattern: regex pattern :param (bool) filter: option to filter sentences that contain matches, if false returns matches :param properties: option to filter sentences that contain matches, if false returns matches :return: request result
3.469643
3.41029
1.017404
return { "customAnnotatorClass.{}".format(self.name): "edu.stanford.nlp.pipeline.GenericWebServiceAnnotator", "generic.endpoint": "http://{}:{}".format(self.host, self.port), "generic.requires": ",".join(self.requires), "generic.provides": ",".join(self.provides), }
def properties(self)
Defines a Java property to define this anntoator to CoreNLP.
5.163545
3.933751
1.312626
httpd = HTTPServer((self.host, self.port), self._Handler) sa = httpd.socket.getsockname() serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..." print(serve_message.format(host=sa[0], port=sa[1])) try: httpd.serve_forever() except KeyboardInterrupt: print("\nKeyboard interrupt received, exiting.") httpd.shutdown()
def run(self)
Runs the server using Python's simple HTTPServer. TODO: make this multithreaded.
1.934364
1.79222
1.079312
flags = _flag_transform(flags) return _wcparse.translate(_wcparse.split(patterns, flags), flags)
def translate(patterns, *, flags=0)
Translate `fnmatch` pattern.
12.921545
10.298353
1.25472
patterns = None flags = self.flags if pathname: flags |= _wcparse.PATHNAME if pattern: patterns = _wcparse.WcSplit(pattern, flags=flags).split() return _wcparse.compile(patterns, flags) if patterns else patterns
def _compile_wildcard(self, pattern, pathname=False)
Compile or format the wildcard inclusion/exclusion pattern.
6.102576
5.809637
1.050423
if not isinstance(file_pattern, _wcparse.WcRegexp): file_pattern = self._compile_wildcard(file_pattern, self.file_pathname) if not isinstance(folder_exclude_pattern, _wcparse.WcRegexp): folder_exclude_pattern = self._compile_wildcard(folder_exclude_pattern, self.dir_pathname) return file_pattern, folder_exclude_pattern
def _compile(self, file_pattern, folder_exclude_pattern)
Compile patterns.
3.122582
2.900066
1.076728
valid = False fullpath = os.path.join(base, name) if self.file_check is not None and self.compare_file(fullpath[self._base_len:] if self.file_pathname else name): valid = True if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_file(base, name) if valid else valid
def _valid_file(self, base, name)
Return whether a file can be searched.
5.508471
5.25042
1.049149
valid = True fullpath = os.path.join(base, name) if ( not self.recursive or ( self.folder_exclude_check is not None and not self.compare_directory(fullpath[self._base_len:] if self.dir_pathname else name) ) ): valid = False if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_directory(base, name) if valid else valid
def _valid_folder(self, base, name)
Return whether a folder can be searched.
6.154159
5.759128
1.068592
return not self.folder_exclude_check.match(directory + self.sep if self.dir_pathname else directory)
def compare_directory(self, directory)
Compare folder.
30.606281
26.354551
1.161328
self._base_len = len(self.base) for base, dirs, files in os.walk(self.base, followlinks=self.follow_links): # Remove child folders based on exclude rules for name in dirs[:]: try: if not self._valid_folder(base, name): dirs.remove(name) except Exception: dirs.remove(name) value = self.on_error(base, name) if value is not None: # pragma: no cover yield value if self._abort: break # Search files if they were found if len(files): # Only search files that are in the include rules for name in files: try: valid = self._valid_file(base, name) except Exception: valid = False value = self.on_error(base, name) if value is not None: yield value if valid: yield self.on_match(base, name) else: self._skipped += 1 value = self.on_skip(base, name) if value is not None: yield value if self._abort: break if self._abort: break
def _walk(self)
Start search for valid files.
2.896122
2.619707
1.105514
if flags & MINUSNEGATE: return flags & NEGATE and pattern[0:1] in MINUS_NEGATIVE_SYM else: return flags & NEGATE and pattern[0:1] in NEGATIVE_SYM
def is_negative(pattern, flags)
Check if negative pattern.
5.440042
5.001845
1.087607
if flags & BRACE: for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): try: yield from bracex.iexpand(p, keep_escapes=True) except Exception: # pragma: no cover # We will probably never hit this as `bracex` # doesn't throw any specific exceptions and # should normally always parse, but just in case. yield p else: for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): yield p
def expand_braces(patterns, flags)
Expand braces.
5.318059
5.107006
1.041326
if not bool(flags & CASE_FLAGS): case_sensitive = util.is_case_sensitive() elif flags & FORCECASE: case_sensitive = True else: case_sensitive = False return case_sensitive
def get_case(flags)
Parse flags for case sensitivity settings.
5.614908
4.215503
1.331966
return (util.platform() != "windows" or (not bool(flags & REALPATH) and get_case(flags))) and not flags & _FORCEWIN
def is_unix_style(flags)
Check if we should use Unix style.
30.55295
23.076723
1.323973
positive = [] negative = [] if isinstance(patterns, (str, bytes)): patterns = [patterns] flags |= _TRANSLATE for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append( WcParse(expanded, flags & FLAG_MASK).parse() ) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return positive, negative
def translate(patterns, flags)
Translate patterns.
6.642194
6.539932
1.015636
if flags & SPLIT: splitted = [] for pattern in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): splitted.extend(WcSplit(pattern, flags).split()) return splitted else: return patterns
def split(patterns, flags)
Split patterns.
4.57996
4.499238
1.017941
patterns = [patterns] for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append(_compile(expanded, flags)) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return WcRegexp(tuple(positive), tuple(negative), flags & REALPATH, flags & PATHNAME, flags & FOLLOW)
def compile(patterns, flags): # noqa A001 positive = [] negative = [] if isinstance(patterns, (str, bytes))
Compile patterns.
5.882987
5.565105
1.057121
return re.compile(WcParse(pattern, flags & FLAG_MASK).parse())
def _compile(pattern, flags)
Compile the pattern to regex.
44.276672
32.899807
1.345803
matched = False base = None m = pattern.fullmatch(filename) if m: matched = True # Lets look at the captured `globstar` groups and see if that part of the path # contains symlinks. if not follow: groups = m.groups() last = len(groups) for i, star in enumerate(m.groups(), 1): if star: parts = star.strip(sep).split(sep) if base is None: base = filename[:m.start(i)] for part in parts: base = os.path.join(base, part) is_link = symlinks.get(base, None) if is_link is not None: matched = not is_link elif i != last or os.path.isdir(base): is_link = os.path.islink(base) symlinks[base] = is_link matched = not is_link if not matched: break if matched: break return matched
def _fs_match(pattern, filename, sep, follow, symlinks)
Match path against the pattern. Since `globstar` doesn't match symlinks (unless `FOLLOW` is enabled), we must look for symlinks. If we identify a symlink in a `globstar` match, we know this result should not actually match.
3.598555
3.383675
1.063505
sep = '\\' if util.platform() == "windows" else '/' if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
def _match_real(filename, include, exclude, follow, symlinks)
Match real filename includes and excludes.
2.660722
2.614005
1.017872
if real: symlinks = {} if isinstance(filename, bytes): curdir = os.fsencode(os.curdir) mount = RE_BWIN_MOUNT if util.platform() == "windows" else RE_BMOUNT else: curdir = os.curdir mount = RE_WIN_MOUNT if util.platform() == "windows" else RE_MOUNT if not mount.match(filename): exists = os.path.lexists(os.path.join(curdir, filename)) else: exists = os.path.lexists(filename) if not exists: return False if path: return _match_real(filename, include, exclude, follow, symlinks) matched = False for pattern in include: if pattern.fullmatch(filename): matched = True break if not include and exclude: matched = True if matched: matched = True if exclude: for pattern in exclude: if not pattern.fullmatch(filename): matched = False break return matched
def _match_pattern(filename, include, exclude, real, path, follow)
Match includes and excludes.
3.34347
3.318676
1.007471
c = next(i) if c == '!': c = next(i) if c in ('^', '-', '['): c = next(i) while c != ']': if c == '\\': # Handle escapes subindex = i.index try: self._references(i, True) except PathNameException: raise StopIteration except StopIteration: i.rewind(i.index - subindex) elif c == '/': raise StopIteration c = next(i)
def _sequence(self, i)
Handle character group.
5.677447
5.226519
1.086277
value = '' c = next(i) if c == '\\': # \\ if sequence and self.bslash_abort: raise PathNameException value = c elif c == '/': # \/ if sequence: raise PathNameException i.rewind(1) else: # \a, \b, \c, etc. pass return value
def _references(self, i, sequence=False)
Handle references.
7.032797
6.733582
1.044436
# Start list parsing success = True index = i.index list_type = c try: c = next(i) if c != '(': raise StopIteration while c != ')': c = next(i) if self.extend and c in EXT_TYPES and self.parse_extend(c, i): continue if c == '\\': try: self._references(i) except StopIteration: pass elif c == '[': index = i.index try: self._sequence(i) except StopIteration: i.rewind(i.index - index) except StopIteration: success = False c = list_type i.rewind(i.index - index) return success
def parse_extend(self, c, i)
Parse extended pattern lists.
4.053314
3.890528
1.041842
if l and value in (b'', ''): return globstar = value in (b'**', '**') and self.globstar magic = self.is_magic(value) if magic: value = compile(value, self.flags) l.append(WcGlob(value, magic, globstar, dir_only, False))
def store(self, value, l, dir_only)
Group patterns by literals and potential magic patterns.
8.501374
6.928762
1.226969
split_index = [] parts = [] start = -1 pattern = self.pattern.decode('latin-1') if self.is_bytes else self.pattern i = util.StringIter(pattern) iter(i) # Detect and store away windows drive as a literal if self.win_drive_detect: m = RE_WIN_PATH.match(pattern) if m: drive = m.group(0).replace('\\\\', '\\') if self.is_bytes: drive = drive.encode('latin-1') parts.append(WcGlob(drive, False, False, True, True)) start = m.end(0) - 1 i.advance(start + 1) elif pattern.startswith('\\\\'): parts.append(WcGlob(b'\\' if self.is_bytes else '\\', False, False, True, True)) start = 1 i.advance(2) elif not self.win_drive_detect and pattern.startswith('/'): parts.append(WcGlob(b'/' if self.is_bytes else '/', False, False, True, True)) start = 0 i.advance(1) for c in i: if self.extend and c in EXT_TYPES and self.parse_extend(c, i): continue if c == '\\': index = i.index value = '' try: value = self._references(i) if self.bslash_abort and value == '\\': split_index.append((i.index - 2, 1)) except StopIteration: i.rewind(i.index - index) if self.bslash_abort: split_index.append((i.index - 1, 0)) elif c == '/': split_index.append((i.index - 1, 0)) elif c == '[': index = i.index try: self._sequence(i) except StopIteration: i.rewind(i.index - index) for split, offset in split_index: if self.is_bytes: value = pattern[start + 1:split].encode('latin-1') else: value = pattern[start + 1:split] self.store(value, parts, True) start = split + offset if start < len(pattern): if self.is_bytes: value = pattern[start + 1:].encode('latin-1') else: value = pattern[start + 1:] if value: self.store(value, parts, False) if len(pattern) == 0: parts.append(WcGlob(pattern.encode('latin-1') if self.is_bytes else pattern, False, False, False, False)) return parts
def split(self)
Start parsing the pattern.
2.958503
2.853337
1.036857
c = next(i) if c == '\\': # \\ if sequence and self.bslash_abort: raise PathNameException elif c == '/': # \/ if sequence and self.pathname: raise PathNameException elif self.pathname: i.rewind(1) else: # \a, \b, \c, etc. pass
def _references(self, i, sequence=False)
Handle references.
7.148799
6.761359
1.057302
split_index = [] parts = [] pattern = self.pattern.decode('latin-1') if self.is_bytes else self.pattern i = util.StringIter(pattern) iter(i) for c in i: if self.extend and c in EXT_TYPES and self.parse_extend(c, i): continue if c == '|': split_index.append(i.index - 1) elif c == '\\': index = i.index try: self._references(i) except StopIteration: i.rewind(i.index - index) elif c == '[': index = i.index try: self._sequence(i) except StopIteration: i.rewind(i.index - index) start = -1 for split in split_index: p = pattern[start + 1:split] parts.append(p.encode('latin-1') if self.is_bytes else p) start = split if start < len(pattern): p = pattern[start + 1:] parts.append(p.encode('latin-1') if self.is_bytes else p) return tuple(parts)
def split(self)
Start parsing the pattern.
3.248627
3.001119
1.082472
if self.dir_start and not self.after_start: self.set_after_start() elif not self.dir_start and self.after_start: self.reset_dir_track()
def update_dir_state(self)
Update the directory state. If we are at the directory start, update to after start state (the character right after). If at after start, reset state.
4.867629
3.184394
1.528589
if self.pathname: value = self.seq_path_dot if self.after_start and not self.dot else self.seq_path if self.after_start: value = self.no_dir + value else: value = _NO_DOT if self.after_start and not self.dot else "" self.reset_dir_track() return value
def _restrict_sequence(self)
Restrict sequence.
9.823457
8.185353
1.200126
removed = False first = result[-2] v1 = ord(first[1:2] if len(first) > 1 else first) v2 = ord(last[1:2] if len(last) > 1 else last) if v2 < v1: result.pop() result.pop() removed = True else: result.append(last) return removed
def _sequence_range_check(self, result, last)
If range backwards, remove it. A bad range will cause the regular expression to fail, so we need to remove it, but return that we removed it so the caller can know the sequence wasn't empty. Caller will have to craft a sequence that makes sense if empty at the end with either an impossible sequence for inclusive sequences or a sequence that matches everything for an exclusive sequence.
3.353859
2.997635
1.118835
last_posix = False m = i.match(RE_POSIX) if m: last_posix = True # Cannot do range with posix class # so escape last `-` if we think this # is the end of a range. if end_range and i.index - 1 >= end_range: result[-1] = '\\' + result[-1] posix_type = uniprops.POSIX_BYTES if self.is_bytes else uniprops.POSIX result.append(uniprops.get_posix_property(m.group(1), posix_type)) return last_posix
def _handle_posix(self, i, result, end_range)
Handle posix classes.
6.829758
6.399475
1.067237
result = ['['] end_range = 0 escape_hyphen = -1 removed = False last_posix = False c = next(i) if c in ('!', '^'): # Handle negate char result.append('^') c = next(i) if c == '[': last_posix = self._handle_posix(i, result, 0) if not last_posix: result.append(re.escape(c)) c = next(i) elif c in ('-', ']'): result.append(re.escape(c)) c = next(i) while c != ']': if c == '-': if last_posix: result.append('\\' + c) last_posix = False elif i.index - 1 > escape_hyphen: # Found a range delimiter. # Mark the next two characters as needing to be escaped if hyphens. # The next character would be the end char range (s-e), # and the one after that would be the potential start char range # of a new range (s-es-e), so neither can be legitimate range delimiters. result.append(c) escape_hyphen = i.index + 1 end_range = i.index elif end_range and i.index - 1 >= end_range: if self._sequence_range_check(result, '\\' + c): removed = True end_range = 0 else: result.append('\\' + c) c = next(i) continue last_posix = False if c == '[': last_posix = self._handle_posix(i, result, end_range) if last_posix: c = next(i) continue if c == '\\': # Handle escapes subindex = i.index try: value = self._references(i, True) except PathNameException: raise StopIteration except StopIteration: i.rewind(i.index - subindex) value = r'\\' elif c == '/': if self.pathname: raise StopIteration value = c elif c in SET_OPERATORS: # Escape &, |, and ~ to avoid &&, ||, and ~~ value = '\\' + c else: # Anything else value = c if end_range and i.index - 1 >= end_range: if self._sequence_range_check(result, value): removed = True end_range = 0 else: result.append(value) c = next(i) result.append(']') # Bad range removed. if removed: value = "".join(result) if value == '[]': # We specified some ranges, but they are all # out of reach. Create an impossible sequence to match. result = ['[^%s]' % ('\x00-\xff' if self.is_bytes else uniprops.UNICODE_RANGE)] elif value == '[^]': # We specified some range, but hey are all # out of reach. Since this is exclusive # that means we can match *anything*. result = ['[%s]' % ('\x00-\xff' if self.is_bytes else uniprops.UNICODE_RANGE)] else: result = [value] if self.pathname or self.after_start: return self._restrict_sequence() + ''.join(result) return ''.join(result)
def _sequence(self, i)
Handle character group.
4.255488
4.151525
1.025042
value = '' c = next(i) if c == '\\': # \\ if sequence and self.bslash_abort: raise PathNameException value = r'\\' if self.bslash_abort: if not self.in_list: value = self.get_path_sep() + _ONE_OR_MORE self.set_start_dir() else: value = self._restrict_extended_slash() + value elif c == '/': # \/ if sequence and self.pathname: raise PathNameException if self.pathname: value = r'\\' if self.in_list: value = self._restrict_extended_slash() + value i.rewind(1) else: value = re.escape(c) else: # \a, \b, \c, etc. value = re.escape(c) if c == '.' and self.after_start and self.in_list: self.allow_special_dir = True self.reset_dir_track() return value
def _references(self, i, sequence=False)
Handle references.
5.324526
5.238581
1.016406
if self.pathname: if self.after_start and not self.dot: star = self.path_star_dot2 globstar = self.path_gstar_dot2 elif self.after_start: star = self.path_star_dot1 globstar = self.path_gstar_dot1 else: star = self.path_star globstar = self.path_gstar_dot1 if self.globstar_capture: globstar = '({})'.format(globstar) else: if self.after_start and not self.dot: star = _NO_DOT + _STAR else: star = _STAR globstar = '' value = star if self.after_start and self.globstar and not self.in_list: skip = False try: c = next(i) if c != '*': i.rewind(1) raise StopIteration except StopIteration: # Could not acquire a second star, so assume single star pattern skip = True if not skip: try: index = i.index c = next(i) if c == '\\': try: self._references(i, True) # Was not what we expected # Assume two single stars except PathNameException: # Looks like escape was a valid slash # Store pattern accordingly value = globstar except StopIteration: # Ran out of characters so assume backslash # count as a double star if self.sep == '\\': value = globstar elif c == '/' and not self.bslash_abort: value = globstar if value != globstar: i.rewind(i.index - index) except StopIteration: # Could not acquire directory slash due to no more characters # Use double star value = globstar if self.after_start and value != globstar: value = _NEED_CHAR + value # Consume duplicate starts try: c = next(i) while c == '*': c = next(i) i.rewind(1) except StopIteration: pass self.reset_dir_track() if value == globstar: sep = _GLOBSTAR_DIV % self.get_path_sep() # Check if the last entry was a `globstar` # If so, don't bother adding another. if current[-1] != sep: if current[-1] == '': # At the beginning of the pattern current[-1] = value else: # Replace the last path separator current[-1] = _NEED_SEP % self.get_path_sep() current.append(value) self.consume_path_sep(i) current.append(sep) self.set_start_dir() else: current.append(value)
def _handle_star(self, i, current)
Handle star.
4.814002
4.790131
1.004983
if not self.inv_ext: return index = len(current) - 1 while index >= 0: if isinstance(current[index], InvPlaceholder): content = current[index + 1:] content.append(_EOP if not self.pathname else self.path_eop) current[index] = (''.join(content)) + (_EXCLA_GROUP_CLOSE % str(current[index])) index -= 1 self.inv_ext = 0
def clean_up_inverse(self, current)
Clean up current. Python doesn't have variable lookbehinds, so we have to do negative lookaheads. !(...) when converted to regular expression is atomic, so once it matches, that's it. So we use the pattern `(?:(?!(?:stuff|to|exclude)<x>))[^/]*?)` where <x> is everything that comes after the negative group. `!(this|that)other` --> `(?:(?!(?:this|that)other))[^/]*?)`. We have to update the list before | in nested cases: *(!(...)|stuff). Before we close a parent `extmatch`: `*(!(...))`. And of course on path separators (when path mode is on): `!(...)/stuff`. Lastly we make sure all is accounted for when finishing the pattern at the end. If there is nothing to store, we store `$`: `(?:(?!(?:this|that)$))[^/]*?)`.
8.4157
7.619852
1.104444
# Save state temp_dir_start = self.dir_start temp_after_start = self.after_start temp_in_list = self.in_list temp_inv_ext = self.inv_ext self.in_list = True if reset_dot: self.allow_special_dir = False # Start list parsing success = True index = i.index list_type = c extended = [] try: c = next(i) if c != '(': raise StopIteration while c != ')': c = next(i) if self.extend and c in EXT_TYPES and self.parse_extend(c, i, extended): # Nothing more to do pass elif c == '*': self._handle_star(i, extended) elif c == '.' and self.after_start: extended.append(re.escape(c)) self.allow_special_dir = True self.reset_dir_track() elif c == '?': extended.append(self._restrict_sequence() + _QMARK) elif c == '/': if self.pathname: extended.append(self._restrict_extended_slash()) extended.append(re.escape(c)) elif c == "|": self.clean_up_inverse(extended) extended.append(c) if temp_after_start: self.set_start_dir() elif c == '\\': try: extended.append(self._references(i)) except StopIteration: # We've reached the end. # Do nothing because this is going to abort the `extmatch` anyways. pass elif c == '[': subindex = i.index try: extended.append(self._sequence(i)) except StopIteration: i.rewind(i.index - subindex) extended.append(r'\[') elif c != ')': extended.append(re.escape(c)) self.update_dir_state() self.clean_up_inverse(extended) if list_type == '?': current.append(_QMARK_GROUP % ''.join(extended)) elif list_type == '*': current.append(_STAR_GROUP % ''.join(extended)) elif list_type == '+': current.append(_PLUS_GROUP % ''.join(extended)) elif list_type == '@': current.append(_GROUP % ''.join(extended)) elif list_type == '!': self.inv_ext += 1 # If pattern is at the end, anchor the match to the end. current.append(_EXCLA_GROUP % ''.join(extended)) if self.pathname: if not temp_after_start or self.allow_special_dir: star = self.path_star elif temp_after_start and not self.dot: star = self.path_star_dot2 else: star = self.path_star_dot1 else: if not temp_after_start or self.dot: star = _STAR else: star = _NO_DOT + _STAR if temp_after_start: star = _NEED_CHAR + star # Place holder for closing, but store the proper star # so we know which one to use current.append(InvPlaceholder(star)) except StopIteration: success = False self.inv_ext = temp_inv_ext i.rewind(i.index - index) # Either restore if extend parsing failed, or reset if it worked if not temp_in_list: self.in_list = False if success: self.reset_dir_track() else: self.dir_start = temp_dir_start self.after_start = temp_after_start return success
def parse_extend(self, c, i, current, reset_dot=False)
Parse extended pattern lists.
3.900231
3.865463
1.008994
try: if self.bslash_abort: count = -1 c = '\\' while c == '\\': count += 1 c = next(i) i.rewind(1) # Rewind one more if we have an odd number (escape): \\\* if count > 0 and count % 2: i.rewind(1) else: c = '/' while c == '/': c = next(i) i.rewind(1) except StopIteration: pass
def consume_path_sep(self, i)
Consume any consecutive path separators are they count as one.
5.305905
5.142587
1.031758
self.set_after_start() i = util.StringIter(pattern) iter(i) root_specified = False if self.win_drive_detect: m = RE_WIN_PATH.match(pattern) if m: drive = m.group(0).replace('\\\\', '\\') if drive.endswith('\\'): slash = True drive = drive[:-1] current.append(re.escape(drive)) if slash: current.append(self.get_path_sep() + _ONE_OR_MORE) i.advance(m.end(0)) self.consume_path_sep(i) root_specified = True elif pattern.startswith('\\\\'): root_specified = True elif not self.win_drive_detect and self.pathname and pattern.startswith('/'): root_specified = True if not root_specified and self.realpath: current.append(_NO_WIN_ROOT if self.win_drive_detect else _NO_ROOT) current.append('') for c in i: index = i.index if self.extend and c in EXT_TYPES and self.parse_extend(c, i, current, True): # Nothing to do pass elif c == '*': self._handle_star(i, current) elif c == '?': current.append(self._restrict_sequence() + _QMARK) elif c == '/': if self.pathname: self.set_start_dir() self.clean_up_inverse(current) current.append(self.get_path_sep() + _ONE_OR_MORE) self.consume_path_sep(i) else: current.append(re.escape(c)) elif c == '\\': index = i.index try: value = self._references(i) if self.dir_start: self.clean_up_inverse(current) self.consume_path_sep(i) current.append(value) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) elif c == '[': index = i.index try: current.append(self._sequence(i)) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) else: current.append(re.escape(c)) self.update_dir_state() self.clean_up_inverse(current) if self.pathname: current.append(_PATH_TRAIL % self.get_path_sep())
def root(self, pattern, current)
Start parsing the pattern.
3.51934
3.44413
1.021837
result = [''] negative = False p = util.norm_pattern(self.pattern, not self.unix, self.raw_chars) p = p.decode('latin-1') if self.is_bytes else p if is_negative(p, self.flags): negative = True p = p[1:] self.root(p, result) case_flag = 'i' if not self.case_sensitive else '' if util.PY36: pattern = ( r'^(?!(?s%s:%s)$).*?$' if negative and not self.globstar_capture else r'^(?s%s:%s)$' ) % (case_flag, ''.join(result)) else: pattern = ( r'(?s%s)^(?!(?:%s)$).*?$' if negative and not self.globstar_capture else r'(?s%s)^(?:%s)$' ) % (case_flag, ''.join(result)) if self.is_bytes: pattern = pattern.encode('latin-1') return pattern
def parse(self)
Parse pattern list.
4.514182
4.127253
1.09375
return _match_pattern(filename, self._include, self._exclude, self._real, self._path, self._follow)
def match(self, filename)
Match filename.
12.308674
9.828152
1.252389
# Here we force `PATHNAME`. flags = (flags & FLAG_MASK) | _wcparse.PATHNAME if flags & _wcparse.REALPATH and util.platform() == "windows": flags |= _wcparse._FORCEWIN if flags & _wcparse.FORCECASE: flags ^= _wcparse.FORCECASE return flags
def _flag_transform(flags)
Transform flags to glob defaults.
9.986062
8.83708
1.130018
return list(iglob(util.to_tuple(patterns), flags=flags))
def glob(patterns, *, flags=0)
Glob.
14.344366
13.912487
1.031043
flags = _flag_transform(flags) if not _wcparse.is_unix_style(flags): filename = util.norm_slash(filename) return _wcparse.compile(_wcparse.split(patterns, flags), flags).match(filename)
def globmatch(filename, patterns, *, flags=0)
Check if filename matches pattern. By default case sensitivity is determined by the file system, but if `case_sensitive` is set, respect that instead.
8.296803
8.478667
0.97855
matches = [] flags = _flag_transform(flags) unix = _wcparse.is_unix_style(flags) obj = _wcparse.compile(_wcparse.split(patterns, flags), flags) for filename in filenames: if not unix: filename = util.norm_slash(filename) if obj.match(filename): matches.append(filename) return matches
def globfilter(filenames, patterns, *, flags=0)
Filter names using pattern.
5.327823
5.100619
1.044544
pattern = util.norm_pattern(pattern, False, True) return escape(pattern, unix)
def raw_escape(pattern, unix=False)
Apply raw character transform before applying escape.
10.062846
9.096643
1.106215
is_bytes = isinstance(pattern, bytes) replace = br'\\\1' if is_bytes else r'\\\1' win = util.platform() == "windows" if win and not unix: magic = _wcparse.RE_BWIN_MAGIC if is_bytes else _wcparse.RE_WIN_MAGIC else: magic = _wcparse.RE_BMAGIC if is_bytes else _wcparse.RE_MAGIC # Handle windows drives special. # Windows drives are handled special internally. # So we shouldn't escape them as we'll just have to # detect and undo it later. drive = b'' if is_bytes else '' if win and not unix: m = (_wcparse.RE_BWIN_PATH if is_bytes else _wcparse.RE_WIN_PATH).match(pattern) if m: drive = m.group(0) pattern = pattern[len(drive):] return drive + magic.sub(replace, pattern)
def escape(pattern, unix=False)
Escape.
4.596177
4.521028
1.016622
self.pattern = [] self.npatterns = None npattern = [] for p in pattern: if _wcparse.is_negative(p, self.flags): # Treat the inverse pattern as a normal pattern if it matches, we will exclude. # This is faster as compiled patterns usually compare the include patterns first, # and then the exclude, but glob will already know it wants to include the file. npattern.append(p[1:]) else: self.pattern.extend( [_wcparse.WcPathSplit(x, self.flags).split() for x in _wcparse.expand_braces(p, self.flags)] ) if npattern: self.npatterns = _wcparse.compile(npattern, self.flags ^ (_wcparse.NEGATE | _wcparse.REALPATH)) if not self.pattern and self.npatterns is not None: self.pattern.append(_wcparse.WcPathSplit((b'**' if self.is_bytes else '**'), self.flags).split())
def _parse_patterns(self, pattern)
Parse patterns.
6.566076
6.361019
1.032237
return _wcparse._match_real( filename, patterns._include, patterns._exclude, patterns._follow, self.symlinks )
def _match_excluded(self, filename, patterns)
Call match real directly to skip unnecessary `exists` check.
21.344664
13.511268
1.579768
return self.npatterns and self._match_excluded(path, self.npatterns)
def _is_excluded(self, path, dir_only)
Check if file is excluded.
17.370626
11.891631
1.460744
return a.lower() == b if not self.case_sensitive else a == b
def _match_literal(self, a, b=None)
Match two names.
8.418046
6.645384
1.266751
if target is None: matcher = None elif isinstance(target, (str, bytes)): # Plain text match if not self.case_sensitive: match = target.lower() else: match = target matcher = functools.partial(self._match_literal, b=match) else: # File match pattern matcher = target.match return matcher
def _get_matcher(self, target)
Get deep match.
4.418445
4.1176
1.073063
scandir = self.current if not curdir else curdir # Python will never return . or .., so fake it. if os.path.isdir(scandir) and matcher is not None: for special in self.specials: if matcher(special): yield os.path.join(curdir, special) try: if NO_SCANDIR_WORKAROUND: # Our current directory can be empty if the path starts with magic, # But we don't want to return paths with '.', so just use it to list # files, but use '' when constructing the path. with os.scandir(scandir) as scan: for f in scan: try: # Quicker to just test this way than to run through `fnmatch`. if deep and self._is_hidden(f.name): continue path = os.path.join(curdir, f.name) is_dir = f.is_dir() if is_dir: is_link = f.is_symlink() self.symlinks[path] = is_link else: # We don't care if a file is a link is_link = False if deep and not self.follow_links and is_link: continue if (not dir_only or is_dir) and (matcher is None or matcher(f.name)): yield path if deep and is_dir: yield from self._glob_dir(path, matcher, dir_only, deep) except OSError: # pragma: no cover pass else: for f in os.listdir(scandir): # Quicker to just test this way than to run through `fnmatch`. if deep and self._is_hidden(f): continue path = os.path.join(curdir, f) is_dir = os.path.isdir(path) if is_dir: is_link = os.path.islink(path) self.symlinks[path] = is_link else: is_link = False if deep and not self.follow_links and is_link: continue if (not dir_only or is_dir) and (matcher is None or matcher(f)): yield path if deep and is_dir: yield from self._glob_dir(path, matcher, dir_only, deep) except OSError: # pragma: no cover pass
def _glob_dir(self, curdir, matcher, dir_only=False, deep=False)
Non recursive directory glob.
2.731781
2.734186
0.99912
is_magic = this.is_magic dir_only = this.dir_only target = this.pattern is_globstar = this.is_globstar if is_magic and is_globstar: # Glob star directory `**`. # Throw away multiple consecutive `globstars` # and acquire the pattern after the `globstars` if available. this = rest.pop(0) if rest else None globstar_end = this is None while this and not globstar_end: if this: dir_only = this.dir_only target = this.pattern if this and this.is_globstar: this = rest.pop(0) if rest else None if this is None: globstar_end = True else: break if globstar_end: target = None # We match `**/next` during a deep glob, so what ever comes back, # we will send back through `_glob` with pattern after `next` (`**/next/after`). # So grab `after` if available. this = rest.pop(0) if rest else None # Deep searching is the unique case where we # might feed in a `None` for the next pattern to match. # Deep glob will account for this. matcher = self._get_matcher(target) # If our pattern ends with `curdir/**`, but does not start with `**` it matches zero or more, # so it should return `curdir/`, signifying `curdir` + no match. # If a pattern follows `**/something`, we always get the appropriate # return already, so this isn't needed in that case. # There is one quirk though with Bash, if `curdir` had magic before `**`, Bash # omits the trailing `/`. We don't worry about that. if globstar_end and curdir: yield os.path.join(curdir, self.empty) # Search for path in self._glob_dir(curdir, matcher, dir_only, deep=True): if this: yield from self._glob(path, this, rest[:]) else: yield path elif not dir_only: # Files: no need to recursively search at this point as we are done. matcher = self._get_matcher(target) yield from self._glob_dir(curdir, matcher) else: # Directory: search current directory against pattern # and feed the results back through with the next pattern. this = rest.pop(0) if rest else None matcher = self._get_matcher(target) for path in self._glob_dir(curdir, matcher, True): if this: yield from self._glob(path, this, rest[:]) else: yield path
def _glob(self, curdir, this, rest)
Handle glob flow. There are really only a couple of cases: - File name. - File name pattern (magic). - Directory. - Directory name pattern (magic). - Extra slashes `////`. - `globstar` `**`.
6.211942
6.137068
1.0122
results = [curdir] if not self._is_parent(curdir) and not self._is_this(curdir): fullpath = os.path.abspath(curdir) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) if basename: matcher = self._get_matcher(basename) results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)] return results
def _get_starting_paths(self, curdir)
Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare.
3.643091
3.671759
0.992192
# Cached symlinks self.symlinks = {} if self.is_bytes: curdir = os.fsencode(os.curdir) else: curdir = os.curdir for pattern in self.pattern: # If the pattern ends with `/` we return the files ending with `/`. dir_only = pattern[-1].dir_only if pattern else False if pattern: if not pattern[0].is_magic: # Path starts with normal plain text # Lets verify the case of the starting directory (if possible) this = pattern[0] curdir = this[0] if not os.path.lexists(curdir): return # Make sure case matches, but running case insensitive # on a case sensitive file system may return more than # one starting location. results = [curdir] if this.is_drive else self._get_starting_paths(curdir) if not results: if not dir_only: # There is no directory with this name, # but we have a file and no directory restriction yield curdir return if this.dir_only: # Glob these directories if they exists for start in results: if os.path.isdir(start): rest = pattern[1:] if rest: this = rest.pop(0) for match in self._glob(curdir, this, rest): if not self._is_excluded(match, dir_only): yield os.path.join(match, self.empty) if dir_only else match elif not self._is_excluded(curdir, dir_only): yield os.path.join(curdir, self.empty) if dir_only else curdir else: # Return the file(s) and finish. for start in results: if os.path.lexists(start) and not self._is_excluded(start, dir_only): yield os.path.join(start, self.empty) if dir_only else start else: # Path starts with a magic pattern, let's get globbing rest = pattern[:] this = rest.pop(0) for match in self._glob(curdir if not curdir == self.current else self.empty, this, rest): if not self._is_excluded(match, dir_only): yield os.path.join(match, self.empty) if dir_only else match
def glob(self)
Starts off the glob iterator.
4.408545
4.326491
1.018966
if isinstance(name, str): return name.replace('/', "\\") if not is_case_sensitive() else name else: return name.replace(b'/', b"\\") if not is_case_sensitive() else name
def norm_slash(name)
Normalize path slashes.
4.162213
3.750817
1.109682
r is_bytes = isinstance(pattern, bytes) if not normalize and not is_raw_chars: return pattern def norm_char(token): if normalize and token in ('/', b'/'): token = br'\\' if is_bytes else r'\\' return token def norm(m): if m.group(1): char = m.group(1) if normalize: char = br'\\\\' if is_bytes else r'\\\\' if len(char) > 1 else norm_char(char) elif m.group(2): char = norm_char(BACK_SLASH_TRANSLATION[m.group(2)] if is_raw_chars else m.group(2)) elif is_raw_chars and m.group(4): char = norm_char(bytes([int(m.group(4), 8) & 0xFF]) if is_bytes else chr(int(m.group(4), 8))) elif is_raw_chars and m.group(3): char = norm_char(bytes([int(m.group(3)[2:], 16)]) if is_bytes else chr(int(m.group(3)[2:], 16))) elif is_raw_chars and not is_bytes and m.group(5): char = norm_char(unicodedata.lookup(m.group(5)[3:-1])) elif not is_raw_chars: char = m.group(0) else: value = m.group(5) if is_bytes else m.group(6) pos = m.start(5) if is_bytes else m.start(6) raise SyntaxError("Could not convert character value %s at position %d" % (value, pos)) return char return (RE_BNORM if is_bytes else RE_NORM).sub(norm, pattern)
def norm_pattern(pattern, normalize, is_raw_chars)
r""" Normalize pattern. - For windows systems we want to normalize slashes to \. - If raw string chars is enabled, we want to also convert encoded string chars to literal characters. - If `normalize` is enabled, take care to convert \/ to \\\\.
2.6941
2.680076
1.005233
hidden = False f = os.path.basename(path) if f[:1] in ('.', b'.'): # Count dot file as hidden on all systems hidden = True elif _PLATFORM == 'windows': # On Windows, look for `FILE_ATTRIBUTE_HIDDEN` FILE_ATTRIBUTE_HIDDEN = 0x2 if PY35: results = os.lstat(path) hidden = bool(results.st_file_attributes & FILE_ATTRIBUTE_HIDDEN) else: if isinstance(path, bytes): attrs = ctypes.windll.kernel32.GetFileAttributesA(path) else: attrs = ctypes.windll.kernel32.GetFileAttributesW(path) hidden = attrs != -1 and attrs & FILE_ATTRIBUTE_HIDDEN elif _PLATFORM == "osx": # pragma: no cover # On macOS, look for `UF_HIDDEN` results = os.lstat(path) hidden = bool(results.st_flags & stat.UF_HIDDEN) return hidden
def is_hidden(path)
Check if file is hidden.
3.30984
3.132251
1.056697
m = pattern.match(self._string, self._index) if m: self._index = m.end() return m
def match(self, pattern)
Perform regex match at index.
4.449226
3.44909
1.289971
try: char = self._string[self._index] self._index += 1 except IndexError: # pragma: no cover raise StopIteration return char
def iternext(self)
Iterate through characters of the string.
4.335464
3.360385
1.290169
from wagtail.core.models import Page # Empty slugs are ugly (eg. '-1' may be generated) so force non-empty if not text: text = 'no-title' # use django slugify filter to slugify slug = cautious_slugify(text)[:255] values_list = Page.objects.filter( slug__startswith=slug ).values_list('id', 'slug') # Find highest suffix max = -1 for tu in values_list: if tu[1] == slug: if max == -1: # Set max to indicate a collision max = 0 # Update max if suffix is greater match = RE_NUMERICAL_SUFFIX.match(tu[1]) if match is not None: i = int(match.group(1)) if i > max: max = i if max >= 0: # There were collisions return "%s-%s" % (slug, max + 1) else: # No collisions return slug
def generate_slug(text, tail_number=0)
Returns a new unique slug. Object must provide a SlugField called slug. URL friendly slugs are generated using django.template.defaultfilters' slugify. Numbers are added to the end of slugs for uniqueness. based on implementation in jmbo.utils https://github.com/praekelt/jmbo/blob/develop/jmbo/utils/__init__.py
5.053955
5.05488
0.999817
''' Update the Current Media Folder. Returns list of files copied across or raises an exception. ''' temp_directory = tempfile.mkdtemp() temp_file = tempfile.TemporaryFile() # assumes the zip file contains a directory called media temp_media_file = os.path.join(temp_directory, 'media') try: for chunk in upload_file.chunks(): temp_file.write(chunk) with zipfile.ZipFile(temp_file, 'r') as z: z.extractall(temp_directory) if os.path.exists(temp_media_file): return distutils.dir_util.copy_tree( temp_media_file, settings.MEDIA_ROOT) else: raise Exception("Error: There is no directory called " "'media' in the root of the zipped file") except Exception as e: raise e finally: temp_file.close() if os.path.exists(temp_directory): shutil.rmtree(temp_directory)
def update_media_file(upload_file)
Update the Current Media Folder. Returns list of files copied across or raises an exception.
3.05987
2.386584
1.282113
''' Returns an MD5 hash of the image file Handles images stored locally and on AWS I know this code is ugly. Please don't ask. The rabbit hole is deep. ''' md5 = hashlib.md5() try: for chunk in image.file.chunks(): md5.update(chunk) return md5.hexdigest() # this should only occur in tests except ValueError: # see link below for why we try not to use .open() # https://docs.djangoproject.com/en/1.9/ref/files/uploads/#django.core.files.uploadedfile.UploadedFile.chunks # noqa image.file.open() for chunk in image.file.chunks(): md5.update(chunk) return md5.hexdigest() finally: image.file.close()
def get_image_hash(image)
Returns an MD5 hash of the image file Handles images stored locally and on AWS I know this code is ugly. Please don't ask. The rabbit hole is deep.
4.26914
2.514291
1.69795
flat_fields = {} nested_fields = {} # exclude "id" and "meta" elements for k, v in fields.items(): # TODO: remove dependence on KEYS_TO_INCLUDE if k not in KEYS_TO_EXCLUDE: if type(v) not in [type({}), type([])]: flat_fields.update({k: v}) else: nested_fields.update({k: v}) return flat_fields, nested_fields
def separate_fields(fields)
Non-foreign key fields can be mapped to new article instances directly. Foreign key fields require a bit more work. This method returns a tuple, of the same format: (flat fields, nested fields)
3.80811
3.537179
1.076595