_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17000
NNAUTILUS.next_iteration
train
def next_iteration(self, ref_point, bounds=None): """ Calculate the next iteration point to be shown to the DM Parameters ---------- ref_point : list of float Reference point given by the DM """ if bounds: self.problem.points = reachable_points( self.problem.points, self.problem.ideal, bounds ) if not utils.isin(self.fh, self.problem.points) or ref_point != self.ref_point: self.ref_point = list(ref_point) self._update_fh() self._update_zh(self.zh, self.fh) self.fh_lo = list(self.lower_bounds_factory.result(self.zh)) self.fh_up = list(self.upper_bounds_factory.result(self.zh)) logging.debug(f"Updated upper boundary: {self.fh_up}") logging.debug(f"Uppadet lower boundary: {self.fh_lo}") if not np.all(np.array(self.fh_up) > np.array(self.fh_lo)): warn(self.NegativeIntervalWarning()) assert utils.isin(self.fh_up, self.problem.points) assert utils.isin(self.fh_lo, self.problem.points) dist = self.distance(self.zh, self.fh) # Reachable points self.update_points() lP = len(self.problem.points) self.current_iter -= 1 return dist, self.fh, self.zh, self.fh_lo, self.fh_up, lP
python
{ "resource": "" }
q17001
html_to_dom
train
def html_to_dom(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS): """Converts HTML to DOM.""" if isinstance(html, unicode): decoded_html = html # encode HTML for case it's XML with encoding declaration forced_encoding = encoding if encoding else default_encoding html = html.encode(forced_encoding, errors) else: decoded_html = decode_html(html, default_encoding, encoding, errors) try: dom = lxml.html.fromstring(decoded_html, parser=lxml.html.HTMLParser()) except ValueError: # Unicode strings with encoding declaration are not supported. # for XHTML files with encoding declaration, use the declared encoding dom = lxml.html.fromstring(html, parser=lxml.html.HTMLParser()) return dom
python
{ "resource": "" }
q17002
decode_html
train
def decode_html(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS): """ Converts a `html` containing an HTML page into Unicode. Tries to guess character encoding from meta tag. """ if isinstance(html, unicode): return html if encoding: return html.decode(encoding, errors) match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") # proceed unknown encoding as if it wasn't found at all with ignored(LookupError): return html.decode(declared_encoding, errors) # unknown encoding try: # try UTF-8 first return html.decode("utf8") except UnicodeDecodeError: # try lucky with default encoding try: return html.decode(default_encoding, errors) except UnicodeDecodeError as e: raise JustextError("Unable to decode the HTML to Unicode: " + unicode(e))
python
{ "resource": "" }
q17003
preprocessor
train
def preprocessor(dom): "Removes unwanted parts of DOM." options = { "processing_instructions": False, "remove_unknown_tags": False, "safe_attrs_only": False, "page_structure": False, "annoying_tags": False, "frames": False, "meta": False, "links": False, "javascript": False, "scripts": True, "comments": True, "style": True, "embedded": True, "forms": True, "kill_tags": ("head",), } cleaner = Cleaner(**options) return cleaner.clean_html(dom)
python
{ "resource": "" }
q17004
classify_paragraphs
train
def classify_paragraphs(paragraphs, stoplist, length_low=LENGTH_LOW_DEFAULT, length_high=LENGTH_HIGH_DEFAULT, stopwords_low=STOPWORDS_LOW_DEFAULT, stopwords_high=STOPWORDS_HIGH_DEFAULT, max_link_density=MAX_LINK_DENSITY_DEFAULT, no_headings=NO_HEADINGS_DEFAULT): "Context-free paragraph classification." stoplist = frozenset(w.lower() for w in stoplist) for paragraph in paragraphs: length = len(paragraph) stopword_density = paragraph.stopwords_density(stoplist) link_density = paragraph.links_density() paragraph.heading = bool(not no_headings and paragraph.is_heading) if link_density > max_link_density: paragraph.cf_class = 'bad' elif ('\xa9' in paragraph.text) or ('&copy' in paragraph.text): paragraph.cf_class = 'bad' elif re.search('^select|\.select', paragraph.dom_path): paragraph.cf_class = 'bad' elif length < length_low: if paragraph.chars_count_in_links > 0: paragraph.cf_class = 'bad' else: paragraph.cf_class = 'short' elif stopword_density >= stopwords_high: if length > length_high: paragraph.cf_class = 'good' else: paragraph.cf_class = 'neargood' elif stopword_density >= stopwords_low: paragraph.cf_class = 'neargood' else: paragraph.cf_class = 'bad'
python
{ "resource": "" }
q17005
revise_paragraph_classification
train
def revise_paragraph_classification(paragraphs, max_heading_distance=MAX_HEADING_DISTANCE_DEFAULT): """ Context-sensitive paragraph classification. Assumes that classify_pragraphs has already been called. """ # copy classes for paragraph in paragraphs: paragraph.class_type = paragraph.cf_class # good headings for i, paragraph in enumerate(paragraphs): if not (paragraph.heading and paragraph.class_type == 'short'): continue j = i + 1 distance = 0 while j < len(paragraphs) and distance <= max_heading_distance: if paragraphs[j].class_type == 'good': paragraph.class_type = 'neargood' break distance += len(paragraphs[j].text) j += 1 # classify short new_classes = {} for i, paragraph in enumerate(paragraphs): if paragraph.class_type != 'short': continue prev_neighbour = get_prev_neighbour(i, paragraphs, ignore_neargood=True) next_neighbour = get_next_neighbour(i, paragraphs, ignore_neargood=True) neighbours = set((prev_neighbour, next_neighbour)) if neighbours == set(['good']): new_classes[i] = 'good' elif neighbours == set(['bad']): new_classes[i] = 'bad' # it must be set(['good', 'bad']) elif (prev_neighbour == 'bad' and get_prev_neighbour(i, paragraphs, ignore_neargood=False) == 'neargood') or \ (next_neighbour == 'bad' and get_next_neighbour(i, paragraphs, ignore_neargood=False) == 'neargood'): new_classes[i] = 'good' else: new_classes[i] = 'bad' for i, c in new_classes.items(): paragraphs[i].class_type = c # revise neargood for i, paragraph in enumerate(paragraphs): if paragraph.class_type != 'neargood': continue prev_neighbour = get_prev_neighbour(i, paragraphs, ignore_neargood=True) next_neighbour = get_next_neighbour(i, paragraphs, ignore_neargood=True) if (prev_neighbour, next_neighbour) == ('bad', 'bad'): paragraph.class_type = 'bad' else: paragraph.class_type = 'good' # more good headings for i, paragraph in enumerate(paragraphs): if not (paragraph.heading and paragraph.class_type == 'bad' and paragraph.cf_class != 'bad'): continue j = i + 1 distance = 0 while j < len(paragraphs) and distance <= max_heading_distance: if paragraphs[j].class_type == 'good': paragraph.class_type = 'good' break distance += len(paragraphs[j].text) j += 1
python
{ "resource": "" }
q17006
get_stoplists
train
def get_stoplists(): """Returns a collection of built-in stop-lists.""" path_to_stoplists = os.path.dirname(sys.modules["justext"].__file__) path_to_stoplists = os.path.join(path_to_stoplists, "stoplists") stoplist_names = [] for filename in os.listdir(path_to_stoplists): name, extension = os.path.splitext(filename) if extension == ".txt": stoplist_names.append(name) return frozenset(stoplist_names)
python
{ "resource": "" }
q17007
get_stoplist
train
def get_stoplist(language): """Returns an built-in stop-list for the language as a set of words.""" file_path = os.path.join("stoplists", "%s.txt" % language) try: stopwords = pkgutil.get_data("justext", file_path) except IOError: raise ValueError( "Stoplist for language '%s' is missing. " "Please use function 'get_stoplists' for complete list of stoplists " "and feel free to contribute by your own stoplist." % language ) return frozenset(w.decode("utf8").lower() for w in stopwords.splitlines())
python
{ "resource": "" }
q17008
get_boto_client
train
def get_boto_client( client, region=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None ): """Get a boto3 client connection.""" cache_key = '{0}:{1}:{2}:{3}'.format( client, region, aws_access_key_id, endpoint_url or '' ) if not aws_session_token: if cache_key in CLIENT_CACHE: return CLIENT_CACHE[cache_key] session = get_boto_session( region, aws_access_key_id, aws_secret_access_key, aws_session_token ) if not session: logging.error("Failed to get {0} client.".format(client)) return None CLIENT_CACHE[cache_key] = session.client( client, endpoint_url=endpoint_url ) return CLIENT_CACHE[cache_key]
python
{ "resource": "" }
q17009
get_boto_resource
train
def get_boto_resource( resource, region=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None ): """Get a boto resource connection.""" cache_key = '{0}:{1}:{2}:{3}'.format( resource, region, aws_access_key_id, endpoint_url or '' ) if not aws_session_token: if cache_key in RESOURCE_CACHE: return RESOURCE_CACHE[cache_key] session = get_boto_session( region, aws_access_key_id, aws_secret_access_key, aws_session_token ) if not session: logging.error("Failed to get {0} resource.".format(resource)) return None RESOURCE_CACHE[cache_key] = session.resource( resource, endpoint_url=endpoint_url ) return RESOURCE_CACHE[cache_key]
python
{ "resource": "" }
q17010
get_boto_session
train
def get_boto_session( region, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None ): """Get a boto3 session.""" return boto3.session.Session( region_name=region, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id, aws_session_token=aws_session_token )
python
{ "resource": "" }
q17011
ensure_text
train
def ensure_text(str_or_bytes, encoding='utf-8'): """Ensures an input is a string, decoding if it is bytes. """ if not isinstance(str_or_bytes, six.text_type): return str_or_bytes.decode(encoding) return str_or_bytes
python
{ "resource": "" }
q17012
ensure_bytes
train
def ensure_bytes(str_or_bytes, encoding='utf-8', errors='strict'): """Ensures an input is bytes, encoding if it is a string. """ if isinstance(str_or_bytes, six.text_type): return str_or_bytes.encode(encoding, errors) return str_or_bytes
python
{ "resource": "" }
q17013
KMSTokenValidator._get_key_alias_from_cache
train
def _get_key_alias_from_cache(self, key_arn): ''' Find a key's alias by looking up its key_arn in the KEY_METADATA cache. This function will only work after a key has been lookedup by its alias and is meant as a convenience function for turning an ARN that's already been looked up back into its alias. ''' for alias in self.KEY_METADATA: if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn: return alias return None
python
{ "resource": "" }
q17014
KMSTokenValidator.decrypt_token
train
def decrypt_token(self, username, token): ''' Decrypt a token. ''' version, user_type, _from = self._parse_username(username) if (version > self.maximum_token_version or version < self.minimum_token_version): raise TokenValidationError('Unacceptable token version.') try: token_key = '{0}{1}{2}{3}'.format( hashlib.sha256(ensure_bytes(token)).hexdigest(), _from, self.to_auth_context, user_type ) except Exception: raise TokenValidationError('Authentication error.') if token_key not in self.TOKENS: try: token = base64.b64decode(token) # Ensure normal context fields override whatever is in # extra_context. context = copy.deepcopy(self.extra_context) context['to'] = self.to_auth_context context['from'] = _from if version > 1: context['user_type'] = user_type data = self.kms_client.decrypt( CiphertextBlob=token, EncryptionContext=context ) # Decrypt doesn't take KeyId as an argument. We need to verify # the correct key was used to do the decryption. # Annoyingly, the KeyId from the data is actually an arn. key_arn = data['KeyId'] if user_type == 'service': if not self._valid_service_auth_key(key_arn): raise TokenValidationError( 'Authentication error (wrong KMS key).' ) elif user_type == 'user': if not self._valid_user_auth_key(key_arn): raise TokenValidationError( 'Authentication error (wrong KMS key).' ) else: raise TokenValidationError( 'Authentication error. Unsupported user_type.' ) plaintext = data['Plaintext'] payload = json.loads(plaintext) key_alias = self._get_key_alias_from_cache(key_arn) ret = {'payload': payload, 'key_alias': key_alias} except TokenValidationError: raise except (ConnectionError, EndpointConnectionError): logging.exception('Failure connecting to AWS endpoint.') raise TokenValidationError( 'Authentication error. Failure connecting to AWS endpoint.' ) # We don't care what exception is thrown. For paranoia's sake, fail # here. except Exception: logging.exception('Failed to validate token.') raise TokenValidationError( 'Authentication error. General error.' ) else: ret = self.TOKENS[token_key] now = datetime.datetime.utcnow() try: not_before = datetime.datetime.strptime( ret['payload']['not_before'], TIME_FORMAT ) not_after = datetime.datetime.strptime( ret['payload']['not_after'], TIME_FORMAT ) except Exception: logging.exception( 'Failed to get not_before and not_after from token payload.' ) raise TokenValidationError( 'Authentication error. Missing validity.' ) delta = (not_after - not_before).seconds / 60 if delta > self.auth_token_max_lifetime: logging.warning('Token used which exceeds max token lifetime.') raise TokenValidationError( 'Authentication error. Token lifetime exceeded.' ) if (now < not_before) or (now > not_after): logging.warning('Invalid time validity for token.') raise TokenValidationError( 'Authentication error. Invalid time validity for token.' ) self.TOKENS[token_key] = ret return self.TOKENS[token_key]
python
{ "resource": "" }
q17015
KMSTokenGenerator.get_username
train
def get_username(self): """Get a username formatted for a specific token version.""" _from = self.auth_context['from'] if self.token_version == 1: return '{0}'.format(_from) elif self.token_version == 2: _user_type = self.auth_context['user_type'] return '{0}/{1}/{2}'.format( self.token_version, _user_type, _from )
python
{ "resource": "" }
q17016
Annotator.properties
train
def properties(self): """ Defines a Java property to define this anntoator to CoreNLP. """ return { "customAnnotatorClass.{}".format(self.name): "edu.stanford.nlp.pipeline.GenericWebServiceAnnotator", "generic.endpoint": "http://{}:{}".format(self.host, self.port), "generic.requires": ",".join(self.requires), "generic.provides": ",".join(self.provides), }
python
{ "resource": "" }
q17017
translate
train
def translate(patterns, *, flags=0): """Translate `fnmatch` pattern.""" flags = _flag_transform(flags) return _wcparse.translate(_wcparse.split(patterns, flags), flags)
python
{ "resource": "" }
q17018
WcMatch._valid_file
train
def _valid_file(self, base, name): """Return whether a file can be searched.""" valid = False fullpath = os.path.join(base, name) if self.file_check is not None and self.compare_file(fullpath[self._base_len:] if self.file_pathname else name): valid = True if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_file(base, name) if valid else valid
python
{ "resource": "" }
q17019
WcMatch._valid_folder
train
def _valid_folder(self, base, name): """Return whether a folder can be searched.""" valid = True fullpath = os.path.join(base, name) if ( not self.recursive or ( self.folder_exclude_check is not None and not self.compare_directory(fullpath[self._base_len:] if self.dir_pathname else name) ) ): valid = False if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_directory(base, name) if valid else valid
python
{ "resource": "" }
q17020
WcMatch.compare_directory
train
def compare_directory(self, directory): """Compare folder.""" return not self.folder_exclude_check.match(directory + self.sep if self.dir_pathname else directory)
python
{ "resource": "" }
q17021
WcMatch._walk
train
def _walk(self): """Start search for valid files.""" self._base_len = len(self.base) for base, dirs, files in os.walk(self.base, followlinks=self.follow_links): # Remove child folders based on exclude rules for name in dirs[:]: try: if not self._valid_folder(base, name): dirs.remove(name) except Exception: dirs.remove(name) value = self.on_error(base, name) if value is not None: # pragma: no cover yield value if self._abort: break # Search files if they were found if len(files): # Only search files that are in the include rules for name in files: try: valid = self._valid_file(base, name) except Exception: valid = False value = self.on_error(base, name) if value is not None: yield value if valid: yield self.on_match(base, name) else: self._skipped += 1 value = self.on_skip(base, name) if value is not None: yield value if self._abort: break if self._abort: break
python
{ "resource": "" }
q17022
is_negative
train
def is_negative(pattern, flags): """Check if negative pattern.""" if flags & MINUSNEGATE: return flags & NEGATE and pattern[0:1] in MINUS_NEGATIVE_SYM else: return flags & NEGATE and pattern[0:1] in NEGATIVE_SYM
python
{ "resource": "" }
q17023
expand_braces
train
def expand_braces(patterns, flags): """Expand braces.""" if flags & BRACE: for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): try: yield from bracex.iexpand(p, keep_escapes=True) except Exception: # pragma: no cover # We will probably never hit this as `bracex` # doesn't throw any specific exceptions and # should normally always parse, but just in case. yield p else: for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): yield p
python
{ "resource": "" }
q17024
get_case
train
def get_case(flags): """Parse flags for case sensitivity settings.""" if not bool(flags & CASE_FLAGS): case_sensitive = util.is_case_sensitive() elif flags & FORCECASE: case_sensitive = True else: case_sensitive = False return case_sensitive
python
{ "resource": "" }
q17025
is_unix_style
train
def is_unix_style(flags): """Check if we should use Unix style.""" return (util.platform() != "windows" or (not bool(flags & REALPATH) and get_case(flags))) and not flags & _FORCEWIN
python
{ "resource": "" }
q17026
translate
train
def translate(patterns, flags): """Translate patterns.""" positive = [] negative = [] if isinstance(patterns, (str, bytes)): patterns = [patterns] flags |= _TRANSLATE for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append( WcParse(expanded, flags & FLAG_MASK).parse() ) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return positive, negative
python
{ "resource": "" }
q17027
split
train
def split(patterns, flags): """Split patterns.""" if flags & SPLIT: splitted = [] for pattern in ([patterns] if isinstance(patterns, (str, bytes)) else patterns): splitted.extend(WcSplit(pattern, flags).split()) return splitted else: return patterns
python
{ "resource": "" }
q17028
_compile
train
def _compile(pattern, flags): """Compile the pattern to regex.""" return re.compile(WcParse(pattern, flags & FLAG_MASK).parse())
python
{ "resource": "" }
q17029
_fs_match
train
def _fs_match(pattern, filename, sep, follow, symlinks): """ Match path against the pattern. Since `globstar` doesn't match symlinks (unless `FOLLOW` is enabled), we must look for symlinks. If we identify a symlink in a `globstar` match, we know this result should not actually match. """ matched = False base = None m = pattern.fullmatch(filename) if m: matched = True # Lets look at the captured `globstar` groups and see if that part of the path # contains symlinks. if not follow: groups = m.groups() last = len(groups) for i, star in enumerate(m.groups(), 1): if star: parts = star.strip(sep).split(sep) if base is None: base = filename[:m.start(i)] for part in parts: base = os.path.join(base, part) is_link = symlinks.get(base, None) if is_link is not None: matched = not is_link elif i != last or os.path.isdir(base): is_link = os.path.islink(base) symlinks[base] = is_link matched = not is_link if not matched: break if matched: break return matched
python
{ "resource": "" }
q17030
_match_real
train
def _match_real(filename, include, exclude, follow, symlinks): """Match real filename includes and excludes.""" sep = '\\' if util.platform() == "windows" else '/' if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
python
{ "resource": "" }
q17031
_match_pattern
train
def _match_pattern(filename, include, exclude, real, path, follow): """Match includes and excludes.""" if real: symlinks = {} if isinstance(filename, bytes): curdir = os.fsencode(os.curdir) mount = RE_BWIN_MOUNT if util.platform() == "windows" else RE_BMOUNT else: curdir = os.curdir mount = RE_WIN_MOUNT if util.platform() == "windows" else RE_MOUNT if not mount.match(filename): exists = os.path.lexists(os.path.join(curdir, filename)) else: exists = os.path.lexists(filename) if not exists: return False if path: return _match_real(filename, include, exclude, follow, symlinks) matched = False for pattern in include: if pattern.fullmatch(filename): matched = True break if not include and exclude: matched = True if matched: matched = True if exclude: for pattern in exclude: if not pattern.fullmatch(filename): matched = False break return matched
python
{ "resource": "" }
q17032
WcPathSplit.store
train
def store(self, value, l, dir_only): """Group patterns by literals and potential magic patterns.""" if l and value in (b'', ''): return globstar = value in (b'**', '**') and self.globstar magic = self.is_magic(value) if magic: value = compile(value, self.flags) l.append(WcGlob(value, magic, globstar, dir_only, False))
python
{ "resource": "" }
q17033
WcParse.update_dir_state
train
def update_dir_state(self): """ Update the directory state. If we are at the directory start, update to after start state (the character right after). If at after start, reset state. """ if self.dir_start and not self.after_start: self.set_after_start() elif not self.dir_start and self.after_start: self.reset_dir_track()
python
{ "resource": "" }
q17034
WcParse._restrict_sequence
train
def _restrict_sequence(self): """Restrict sequence.""" if self.pathname: value = self.seq_path_dot if self.after_start and not self.dot else self.seq_path if self.after_start: value = self.no_dir + value else: value = _NO_DOT if self.after_start and not self.dot else "" self.reset_dir_track() return value
python
{ "resource": "" }
q17035
WcParse._sequence_range_check
train
def _sequence_range_check(self, result, last): """ If range backwards, remove it. A bad range will cause the regular expression to fail, so we need to remove it, but return that we removed it so the caller can know the sequence wasn't empty. Caller will have to craft a sequence that makes sense if empty at the end with either an impossible sequence for inclusive sequences or a sequence that matches everything for an exclusive sequence. """ removed = False first = result[-2] v1 = ord(first[1:2] if len(first) > 1 else first) v2 = ord(last[1:2] if len(last) > 1 else last) if v2 < v1: result.pop() result.pop() removed = True else: result.append(last) return removed
python
{ "resource": "" }
q17036
WcParse._handle_posix
train
def _handle_posix(self, i, result, end_range): """Handle posix classes.""" last_posix = False m = i.match(RE_POSIX) if m: last_posix = True # Cannot do range with posix class # so escape last `-` if we think this # is the end of a range. if end_range and i.index - 1 >= end_range: result[-1] = '\\' + result[-1] posix_type = uniprops.POSIX_BYTES if self.is_bytes else uniprops.POSIX result.append(uniprops.get_posix_property(m.group(1), posix_type)) return last_posix
python
{ "resource": "" }
q17037
WcParse._handle_star
train
def _handle_star(self, i, current): """Handle star.""" if self.pathname: if self.after_start and not self.dot: star = self.path_star_dot2 globstar = self.path_gstar_dot2 elif self.after_start: star = self.path_star_dot1 globstar = self.path_gstar_dot1 else: star = self.path_star globstar = self.path_gstar_dot1 if self.globstar_capture: globstar = '({})'.format(globstar) else: if self.after_start and not self.dot: star = _NO_DOT + _STAR else: star = _STAR globstar = '' value = star if self.after_start and self.globstar and not self.in_list: skip = False try: c = next(i) if c != '*': i.rewind(1) raise StopIteration except StopIteration: # Could not acquire a second star, so assume single star pattern skip = True if not skip: try: index = i.index c = next(i) if c == '\\': try: self._references(i, True) # Was not what we expected # Assume two single stars except PathNameException: # Looks like escape was a valid slash # Store pattern accordingly value = globstar except StopIteration: # Ran out of characters so assume backslash # count as a double star if self.sep == '\\': value = globstar elif c == '/' and not self.bslash_abort: value = globstar if value != globstar: i.rewind(i.index - index) except StopIteration: # Could not acquire directory slash due to no more characters # Use double star value = globstar if self.after_start and value != globstar: value = _NEED_CHAR + value # Consume duplicate starts try: c = next(i) while c == '*': c = next(i) i.rewind(1) except StopIteration: pass self.reset_dir_track() if value == globstar: sep = _GLOBSTAR_DIV % self.get_path_sep() # Check if the last entry was a `globstar` # If so, don't bother adding another. if current[-1] != sep: if current[-1] == '': # At the beginning of the pattern current[-1] = value else: # Replace the last path separator current[-1] = _NEED_SEP % self.get_path_sep() current.append(value) self.consume_path_sep(i) current.append(sep) self.set_start_dir() else: current.append(value)
python
{ "resource": "" }
q17038
WcParse.clean_up_inverse
train
def clean_up_inverse(self, current): """ Clean up current. Python doesn't have variable lookbehinds, so we have to do negative lookaheads. !(...) when converted to regular expression is atomic, so once it matches, that's it. So we use the pattern `(?:(?!(?:stuff|to|exclude)<x>))[^/]*?)` where <x> is everything that comes after the negative group. `!(this|that)other` --> `(?:(?!(?:this|that)other))[^/]*?)`. We have to update the list before | in nested cases: *(!(...)|stuff). Before we close a parent `extmatch`: `*(!(...))`. And of course on path separators (when path mode is on): `!(...)/stuff`. Lastly we make sure all is accounted for when finishing the pattern at the end. If there is nothing to store, we store `$`: `(?:(?!(?:this|that)$))[^/]*?)`. """ if not self.inv_ext: return index = len(current) - 1 while index >= 0: if isinstance(current[index], InvPlaceholder): content = current[index + 1:] content.append(_EOP if not self.pathname else self.path_eop) current[index] = (''.join(content)) + (_EXCLA_GROUP_CLOSE % str(current[index])) index -= 1 self.inv_ext = 0
python
{ "resource": "" }
q17039
WcParse.consume_path_sep
train
def consume_path_sep(self, i): """Consume any consecutive path separators are they count as one.""" try: if self.bslash_abort: count = -1 c = '\\' while c == '\\': count += 1 c = next(i) i.rewind(1) # Rewind one more if we have an odd number (escape): \\\* if count > 0 and count % 2: i.rewind(1) else: c = '/' while c == '/': c = next(i) i.rewind(1) except StopIteration: pass
python
{ "resource": "" }
q17040
WcParse.parse
train
def parse(self): """Parse pattern list.""" result = [''] negative = False p = util.norm_pattern(self.pattern, not self.unix, self.raw_chars) p = p.decode('latin-1') if self.is_bytes else p if is_negative(p, self.flags): negative = True p = p[1:] self.root(p, result) case_flag = 'i' if not self.case_sensitive else '' if util.PY36: pattern = ( r'^(?!(?s%s:%s)$).*?$' if negative and not self.globstar_capture else r'^(?s%s:%s)$' ) % (case_flag, ''.join(result)) else: pattern = ( r'(?s%s)^(?!(?:%s)$).*?$' if negative and not self.globstar_capture else r'(?s%s)^(?:%s)$' ) % (case_flag, ''.join(result)) if self.is_bytes: pattern = pattern.encode('latin-1') return pattern
python
{ "resource": "" }
q17041
WcRegexp.match
train
def match(self, filename): """Match filename.""" return _match_pattern(filename, self._include, self._exclude, self._real, self._path, self._follow)
python
{ "resource": "" }
q17042
_flag_transform
train
def _flag_transform(flags): """Transform flags to glob defaults.""" # Here we force `PATHNAME`. flags = (flags & FLAG_MASK) | _wcparse.PATHNAME if flags & _wcparse.REALPATH and util.platform() == "windows": flags |= _wcparse._FORCEWIN if flags & _wcparse.FORCECASE: flags ^= _wcparse.FORCECASE return flags
python
{ "resource": "" }
q17043
globmatch
train
def globmatch(filename, patterns, *, flags=0): """ Check if filename matches pattern. By default case sensitivity is determined by the file system, but if `case_sensitive` is set, respect that instead. """ flags = _flag_transform(flags) if not _wcparse.is_unix_style(flags): filename = util.norm_slash(filename) return _wcparse.compile(_wcparse.split(patterns, flags), flags).match(filename)
python
{ "resource": "" }
q17044
globfilter
train
def globfilter(filenames, patterns, *, flags=0): """Filter names using pattern.""" matches = [] flags = _flag_transform(flags) unix = _wcparse.is_unix_style(flags) obj = _wcparse.compile(_wcparse.split(patterns, flags), flags) for filename in filenames: if not unix: filename = util.norm_slash(filename) if obj.match(filename): matches.append(filename) return matches
python
{ "resource": "" }
q17045
raw_escape
train
def raw_escape(pattern, unix=False): """Apply raw character transform before applying escape.""" pattern = util.norm_pattern(pattern, False, True) return escape(pattern, unix)
python
{ "resource": "" }
q17046
Glob._parse_patterns
train
def _parse_patterns(self, pattern): """Parse patterns.""" self.pattern = [] self.npatterns = None npattern = [] for p in pattern: if _wcparse.is_negative(p, self.flags): # Treat the inverse pattern as a normal pattern if it matches, we will exclude. # This is faster as compiled patterns usually compare the include patterns first, # and then the exclude, but glob will already know it wants to include the file. npattern.append(p[1:]) else: self.pattern.extend( [_wcparse.WcPathSplit(x, self.flags).split() for x in _wcparse.expand_braces(p, self.flags)] ) if npattern: self.npatterns = _wcparse.compile(npattern, self.flags ^ (_wcparse.NEGATE | _wcparse.REALPATH)) if not self.pattern and self.npatterns is not None: self.pattern.append(_wcparse.WcPathSplit((b'**' if self.is_bytes else '**'), self.flags).split())
python
{ "resource": "" }
q17047
Glob._match_excluded
train
def _match_excluded(self, filename, patterns): """Call match real directly to skip unnecessary `exists` check.""" return _wcparse._match_real( filename, patterns._include, patterns._exclude, patterns._follow, self.symlinks )
python
{ "resource": "" }
q17048
Glob._is_excluded
train
def _is_excluded(self, path, dir_only): """Check if file is excluded.""" return self.npatterns and self._match_excluded(path, self.npatterns)
python
{ "resource": "" }
q17049
Glob._match_literal
train
def _match_literal(self, a, b=None): """Match two names.""" return a.lower() == b if not self.case_sensitive else a == b
python
{ "resource": "" }
q17050
Glob._get_matcher
train
def _get_matcher(self, target): """Get deep match.""" if target is None: matcher = None elif isinstance(target, (str, bytes)): # Plain text match if not self.case_sensitive: match = target.lower() else: match = target matcher = functools.partial(self._match_literal, b=match) else: # File match pattern matcher = target.match return matcher
python
{ "resource": "" }
q17051
Glob._glob_dir
train
def _glob_dir(self, curdir, matcher, dir_only=False, deep=False): """Non recursive directory glob.""" scandir = self.current if not curdir else curdir # Python will never return . or .., so fake it. if os.path.isdir(scandir) and matcher is not None: for special in self.specials: if matcher(special): yield os.path.join(curdir, special) try: if NO_SCANDIR_WORKAROUND: # Our current directory can be empty if the path starts with magic, # But we don't want to return paths with '.', so just use it to list # files, but use '' when constructing the path. with os.scandir(scandir) as scan: for f in scan: try: # Quicker to just test this way than to run through `fnmatch`. if deep and self._is_hidden(f.name): continue path = os.path.join(curdir, f.name) is_dir = f.is_dir() if is_dir: is_link = f.is_symlink() self.symlinks[path] = is_link else: # We don't care if a file is a link is_link = False if deep and not self.follow_links and is_link: continue if (not dir_only or is_dir) and (matcher is None or matcher(f.name)): yield path if deep and is_dir: yield from self._glob_dir(path, matcher, dir_only, deep) except OSError: # pragma: no cover pass else: for f in os.listdir(scandir): # Quicker to just test this way than to run through `fnmatch`. if deep and self._is_hidden(f): continue path = os.path.join(curdir, f) is_dir = os.path.isdir(path) if is_dir: is_link = os.path.islink(path) self.symlinks[path] = is_link else: is_link = False if deep and not self.follow_links and is_link: continue if (not dir_only or is_dir) and (matcher is None or matcher(f)): yield path if deep and is_dir: yield from self._glob_dir(path, matcher, dir_only, deep) except OSError: # pragma: no cover pass
python
{ "resource": "" }
q17052
Glob._glob
train
def _glob(self, curdir, this, rest): """ Handle glob flow. There are really only a couple of cases: - File name. - File name pattern (magic). - Directory. - Directory name pattern (magic). - Extra slashes `////`. - `globstar` `**`. """ is_magic = this.is_magic dir_only = this.dir_only target = this.pattern is_globstar = this.is_globstar if is_magic and is_globstar: # Glob star directory `**`. # Throw away multiple consecutive `globstars` # and acquire the pattern after the `globstars` if available. this = rest.pop(0) if rest else None globstar_end = this is None while this and not globstar_end: if this: dir_only = this.dir_only target = this.pattern if this and this.is_globstar: this = rest.pop(0) if rest else None if this is None: globstar_end = True else: break if globstar_end: target = None # We match `**/next` during a deep glob, so what ever comes back, # we will send back through `_glob` with pattern after `next` (`**/next/after`). # So grab `after` if available. this = rest.pop(0) if rest else None # Deep searching is the unique case where we # might feed in a `None` for the next pattern to match. # Deep glob will account for this. matcher = self._get_matcher(target) # If our pattern ends with `curdir/**`, but does not start with `**` it matches zero or more, # so it should return `curdir/`, signifying `curdir` + no match. # If a pattern follows `**/something`, we always get the appropriate # return already, so this isn't needed in that case. # There is one quirk though with Bash, if `curdir` had magic before `**`, Bash # omits the trailing `/`. We don't worry about that. if globstar_end and curdir: yield os.path.join(curdir, self.empty) # Search for path in self._glob_dir(curdir, matcher, dir_only, deep=True): if this: yield from self._glob(path, this, rest[:]) else: yield path elif not dir_only: # Files: no need to recursively search at this point as we are done. matcher = self._get_matcher(target) yield from self._glob_dir(curdir, matcher) else: # Directory: search current directory against pattern # and feed the results back through with the next pattern. this = rest.pop(0) if rest else None matcher = self._get_matcher(target) for path in self._glob_dir(curdir, matcher, True): if this: yield from self._glob(path, this, rest[:]) else: yield path
python
{ "resource": "" }
q17053
Glob._get_starting_paths
train
def _get_starting_paths(self, curdir): """ Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare. """ results = [curdir] if not self._is_parent(curdir) and not self._is_this(curdir): fullpath = os.path.abspath(curdir) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) if basename: matcher = self._get_matcher(basename) results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)] return results
python
{ "resource": "" }
q17054
Glob.glob
train
def glob(self): """Starts off the glob iterator.""" # Cached symlinks self.symlinks = {} if self.is_bytes: curdir = os.fsencode(os.curdir) else: curdir = os.curdir for pattern in self.pattern: # If the pattern ends with `/` we return the files ending with `/`. dir_only = pattern[-1].dir_only if pattern else False if pattern: if not pattern[0].is_magic: # Path starts with normal plain text # Lets verify the case of the starting directory (if possible) this = pattern[0] curdir = this[0] if not os.path.lexists(curdir): return # Make sure case matches, but running case insensitive # on a case sensitive file system may return more than # one starting location. results = [curdir] if this.is_drive else self._get_starting_paths(curdir) if not results: if not dir_only: # There is no directory with this name, # but we have a file and no directory restriction yield curdir return if this.dir_only: # Glob these directories if they exists for start in results: if os.path.isdir(start): rest = pattern[1:] if rest: this = rest.pop(0) for match in self._glob(curdir, this, rest): if not self._is_excluded(match, dir_only): yield os.path.join(match, self.empty) if dir_only else match elif not self._is_excluded(curdir, dir_only): yield os.path.join(curdir, self.empty) if dir_only else curdir else: # Return the file(s) and finish. for start in results: if os.path.lexists(start) and not self._is_excluded(start, dir_only): yield os.path.join(start, self.empty) if dir_only else start else: # Path starts with a magic pattern, let's get globbing rest = pattern[:] this = rest.pop(0) for match in self._glob(curdir if not curdir == self.current else self.empty, this, rest): if not self._is_excluded(match, dir_only): yield os.path.join(match, self.empty) if dir_only else match
python
{ "resource": "" }
q17055
norm_slash
train
def norm_slash(name): """Normalize path slashes.""" if isinstance(name, str): return name.replace('/', "\\") if not is_case_sensitive() else name else: return name.replace(b'/', b"\\") if not is_case_sensitive() else name
python
{ "resource": "" }
q17056
norm_pattern
train
def norm_pattern(pattern, normalize, is_raw_chars): r""" Normalize pattern. - For windows systems we want to normalize slashes to \. - If raw string chars is enabled, we want to also convert encoded string chars to literal characters. - If `normalize` is enabled, take care to convert \/ to \\\\. """ is_bytes = isinstance(pattern, bytes) if not normalize and not is_raw_chars: return pattern def norm_char(token): """Normalize slash.""" if normalize and token in ('/', b'/'): token = br'\\' if is_bytes else r'\\' return token def norm(m): """Normalize the pattern.""" if m.group(1): char = m.group(1) if normalize: char = br'\\\\' if is_bytes else r'\\\\' if len(char) > 1 else norm_char(char) elif m.group(2): char = norm_char(BACK_SLASH_TRANSLATION[m.group(2)] if is_raw_chars else m.group(2)) elif is_raw_chars and m.group(4): char = norm_char(bytes([int(m.group(4), 8) & 0xFF]) if is_bytes else chr(int(m.group(4), 8))) elif is_raw_chars and m.group(3): char = norm_char(bytes([int(m.group(3)[2:], 16)]) if is_bytes else chr(int(m.group(3)[2:], 16))) elif is_raw_chars and not is_bytes and m.group(5): char = norm_char(unicodedata.lookup(m.group(5)[3:-1])) elif not is_raw_chars: char = m.group(0) else: value = m.group(5) if is_bytes else m.group(6) pos = m.start(5) if is_bytes else m.start(6) raise SyntaxError("Could not convert character value %s at position %d" % (value, pos)) return char return (RE_BNORM if is_bytes else RE_NORM).sub(norm, pattern)
python
{ "resource": "" }
q17057
is_hidden
train
def is_hidden(path): """Check if file is hidden.""" hidden = False f = os.path.basename(path) if f[:1] in ('.', b'.'): # Count dot file as hidden on all systems hidden = True elif _PLATFORM == 'windows': # On Windows, look for `FILE_ATTRIBUTE_HIDDEN` FILE_ATTRIBUTE_HIDDEN = 0x2 if PY35: results = os.lstat(path) hidden = bool(results.st_file_attributes & FILE_ATTRIBUTE_HIDDEN) else: if isinstance(path, bytes): attrs = ctypes.windll.kernel32.GetFileAttributesA(path) else: attrs = ctypes.windll.kernel32.GetFileAttributesW(path) hidden = attrs != -1 and attrs & FILE_ATTRIBUTE_HIDDEN elif _PLATFORM == "osx": # pragma: no cover # On macOS, look for `UF_HIDDEN` results = os.lstat(path) hidden = bool(results.st_flags & stat.UF_HIDDEN) return hidden
python
{ "resource": "" }
q17058
StringIter.match
train
def match(self, pattern): """Perform regex match at index.""" m = pattern.match(self._string, self._index) if m: self._index = m.end() return m
python
{ "resource": "" }
q17059
StringIter.iternext
train
def iternext(self): """Iterate through characters of the string.""" try: char = self._string[self._index] self._index += 1 except IndexError: # pragma: no cover raise StopIteration return char
python
{ "resource": "" }
q17060
update_media_file
train
def update_media_file(upload_file): ''' Update the Current Media Folder. Returns list of files copied across or raises an exception. ''' temp_directory = tempfile.mkdtemp() temp_file = tempfile.TemporaryFile() # assumes the zip file contains a directory called media temp_media_file = os.path.join(temp_directory, 'media') try: for chunk in upload_file.chunks(): temp_file.write(chunk) with zipfile.ZipFile(temp_file, 'r') as z: z.extractall(temp_directory) if os.path.exists(temp_media_file): return distutils.dir_util.copy_tree( temp_media_file, settings.MEDIA_ROOT) else: raise Exception("Error: There is no directory called " "'media' in the root of the zipped file") except Exception as e: raise e finally: temp_file.close() if os.path.exists(temp_directory): shutil.rmtree(temp_directory)
python
{ "resource": "" }
q17061
get_image_hash
train
def get_image_hash(image): ''' Returns an MD5 hash of the image file Handles images stored locally and on AWS I know this code is ugly. Please don't ask. The rabbit hole is deep. ''' md5 = hashlib.md5() try: for chunk in image.file.chunks(): md5.update(chunk) return md5.hexdigest() # this should only occur in tests except ValueError: # see link below for why we try not to use .open() # https://docs.djangoproject.com/en/1.9/ref/files/uploads/#django.core.files.uploadedfile.UploadedFile.chunks # noqa image.file.open() for chunk in image.file.chunks(): md5.update(chunk) return md5.hexdigest() finally: image.file.close()
python
{ "resource": "" }
q17062
attach_image
train
def attach_image(field, nested_fields, page, record_keeper=None): ''' Returns a function that attaches an image to page if it exists Currenlty assumes that images have already been imported and info has been stored in record_keeper ''' if (field in nested_fields) and nested_fields[field]: foreign_image_id = nested_fields[field]["id"] # Handle the following # record keeper may not exist # record keeper may not have image ref if record_keeper: try: local_image_id = record_keeper.get_local_image( foreign_image_id) local_image = Image.objects.get(id=local_image_id) setattr(page, field, local_image) except ObjectDoesNotExist: raise ObjectDoesNotExist( ("executing attach_image: local image referenced" "in record_keeper does not actually exist."), None) except Exception: raise else: raise Exception( ("Attempted to attach image without record_keeper. " "This functionality is not yet implemented"))
python
{ "resource": "" }
q17063
rotate_content
train
def rotate_content(day=None): """ this method gets the parameters that are needed for rotate_latest and rotate_featured_in_homepage methods, and calls them both""" # getting the content rotation settings from site settings for main in Main.objects.all(): site = main.sites_rooted_here.all().first() main_lang = Languages.for_site(site).languages.filter( is_main_language=True).first() index = SectionIndexPage.objects.live().child_of(main).first() site_settings = SiteSettings.for_site(site) if day is None: day = timezone.now().weekday() # calls the two rotate methods with the necessary params if main and index: rotate_latest(main_lang, index, main, site_settings, day) rotate_featured_in_homepage(main_lang, day, main)
python
{ "resource": "" }
q17064
run_wagtail_migration_before_core_34
train
def run_wagtail_migration_before_core_34(apps, schema_editor): """ Migration 34 needs migration 0040 from wagtail core and this Migration will run wagtail migration before molo core migration 34 """ db_alias = schema_editor.connection.alias emit_pre_migrate_signal(verbosity=2, interactive=False, db=db_alias)
python
{ "resource": "" }
q17065
load_child_articles_for_section
train
def load_child_articles_for_section( context, section, featured_in_section=None, count=5): """ Returns all child articles If the `locale_code` in the context is not the main language, it will return the translations of the live articles. """ request = context.get('request') locale = context.get('locale_code') main_language_page = section.get_main_language_page() settings = SiteSettings.for_site(request.site) \ if request else None # TODO: Consider caching the pks of these articles using a timestamp on # section as the key so tha twe don't always do these joins article_ordering = settings and settings.article_ordering_within_section order_by = ArticleOrderingChoices.\ get(settings.article_ordering_within_section).name.lower() \ if article_ordering \ and settings.article_ordering_within_section !=\ ArticleOrderingChoices.CMS_DEFAULT_SORTING\ else '-first_published_at' order_by = order_by if order_by.find('_desc') == -1 \ else '-{}'.format(order_by.replace('_desc', '')) child_articles = ArticlePage.objects.child_of( main_language_page).filter( language__is_main_language=True).order_by(order_by) if featured_in_section is not None: child_articles = child_articles.filter( featured_in_section=featured_in_section)\ .order_by('-featured_in_section_start_date') related_articles = ArticlePage.objects.filter( related_sections__section__slug=main_language_page.slug) qs = list(chain( get_pages(context, child_articles, locale), get_pages(context, related_articles, locale))) # Pagination if count: p = context.get('p', 1) paginator = Paginator(qs, count) try: articles = paginator.page(p) except PageNotAnInteger: articles = paginator.page(1) except EmptyPage: articles = paginator.page(paginator.num_pages) else: articles = qs if not locale: return articles context.update({'articles_paginated': articles}) return articles
python
{ "resource": "" }
q17066
load_child_sections_for_section
train
def load_child_sections_for_section(context, section, count=None): ''' Returns all child sections If the `locale_code` in the context is not the main language, it will return the translations of the live articles. ''' page = section.get_main_language_page() locale = context.get('locale_code') qs = SectionPage.objects.child_of(page).filter( language__is_main_language=True) if not locale: return qs[:count] return get_pages(context, qs, locale)
python
{ "resource": "" }
q17067
load_sibling_sections
train
def load_sibling_sections(context, section, count=None): ''' Returns all sibling sections If the `locale_code` in the context is not the main language, it will return the translations of the live articles. ''' page = section.get_main_language_page() locale = context.get('locale_code') qs = SectionPage.objects.sibling_of(page).filter( language__is_main_language=True) if not locale: return qs[:count] return get_pages(context, qs, locale)
python
{ "resource": "" }
q17068
ImportableMixin.create_page
train
def create_page(self, content, class_, record_keeper=None, logger=None): ''' Robust as possible Attempts to create the page If any of the functions used to attach content to the page fail, keep going, keep a record of those errors in a context dict return the page and the context dict in a tuple ''' fields, nested_fields = separate_fields(content) foreign_id = content.pop('id') # remove unwanted fields if 'latest_revision_created_at' in content: content.pop('latest_revision_created_at') page = class_(**fields) # create functions to attach attributes function_args_mapping = ( # add_section_time (add_json_dump, ("time", nested_fields, page)), # add_tags (add_list_of_things, ("tags", nested_fields, page)), # add_metadata_tags (add_list_of_things, ("metadata_tags", nested_fields, page)), # attach_image (attach_image, ("image", nested_fields, page, record_keeper)), # attach_social_media_image (attach_image, ("social_media_image", nested_fields, page, record_keeper)), # attach_banner_image (attach_image, ("banner", nested_fields, page, record_keeper)), ) for mapping in function_args_mapping: function = mapping[0] _args = mapping[1] try: function(*_args) except Exception as e: if logger: logger.log( ERROR, "Failed to create page content", { "foreign_page_id": foreign_id, "exception": e, "function": function.__name__, }) # Handle content in nested_fields body = add_stream_fields(nested_fields, page) # body has not been added as it contains reference to pages if body: record_keeper.article_bodies[foreign_id] = body # Handle relationships in nested_fields if record_keeper: record_relation_functions = [ record_keeper.record_nav_tags, record_keeper.record_recommended_articles, record_keeper.record_reaction_questions, record_keeper.record_related_sections, record_keeper.record_section_tags, record_keeper.record_banner_page_link, ] for function in record_relation_functions: try: function(nested_fields, foreign_id) except Exception as e: if logger: logger.log( ERROR, "Failed to record content", { "foreign_page_id": foreign_id, "exception": e, "function": function.__name__, }) return page
python
{ "resource": "" }
q17069
upload_file
train
def upload_file(request): '''Upload a Zip File Containing a single file containing media.''' if request.method == 'POST': form = MediaForm(request.POST, request.FILES) if form.is_valid(): context_dict = {} try: context_dict['copied_files'] = update_media_file( request.FILES['zip_file']) except Exception as e: context_dict['error_message'] = e.message return render(request, 'django_admin/transfer_media_message.html', context_dict) else: form = MediaForm() return render(request, 'django_admin/upload_media.html', {'form': form})
python
{ "resource": "" }
q17070
download_file
train
def download_file(request): '''Create and download a zip file containing the media file.''' if request.method == "GET": if path.exists(settings.MEDIA_ROOT): zipfile_name = 'media_%s.zip' % settings.SITE_NAME in_memory_file = BytesIO() media_zipfile = zipfile.ZipFile(in_memory_file, 'w', zipfile.ZIP_DEFLATED) directory_name = path.split(settings.MEDIA_ROOT)[-1] for root, dirs, files in walk(directory_name): for file in files: media_zipfile.write(path.join(root, file)) media_zipfile.close() resp = HttpResponse(in_memory_file.getvalue(), content_type="application/x-zip-compressed") resp['Content-Disposition'] = ( 'attachment; filename=%s' % zipfile_name) else: resp = render(request, 'django_admin/transfer_media_message.html', {'error_message': 'media file does not exist'}) else: resp = HttpResponseNotAllowed(permitted_methods=['GET']) return resp
python
{ "resource": "" }
q17071
update_permissions_for_group
train
def update_permissions_for_group(apps, schema_editor): ''' Update permissions for some users. Give bulk-delete permissions to moderators. Give edit permission to moderators and editors in order to display 'Main' page in the explorer. ''' db_alias = schema_editor.connection.alias try: # Django 1.9 emit_post_migrate_signal(2, False, db_alias) except TypeError: # Django < 1.9 try: # Django 1.8 emit_post_migrate_signal(2, False, 'default', db_alias) except TypeError: # Django < 1.8 emit_post_migrate_signal([], 2, False, 'default', db_alias) Group = apps.get_model('auth.Group') Permission = apps.get_model('auth.Permission') GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission') SectionIndexPage = apps.get_model('core.SectionIndexPage') MainPage = apps.get_model('core.Main') moderator_group = Group.objects.filter(name='Moderators').first() editor_group = Group.objects.filter(name='Editors').first() if moderator_group: sections = SectionIndexPage.objects.first() GroupPagePermission.objects.get_or_create( group_id=moderator_group.id, page_id=sections.id, permission_type='bulk_delete' ) main = MainPage.objects.first() GroupPagePermission.objects.get_or_create( group_id=moderator_group.id, page_id=main.id, permission_type='edit' ) if editor_group: main = MainPage.objects.first() GroupPagePermission.objects.get_or_create( group_id=editor_group.id, page_id=main.id, permission_type='edit' )
python
{ "resource": "" }
q17072
MainLanguageFilter.filter_queryset
train
def filter_queryset(self, request, queryset, view): """ Returns only pages in the main language for a site """ if 'is_main_language' in request.GET: # TODO investigate possible error cases where page # does not have language return queryset.filter(languages__language__is_main_language=True) else: return queryset
python
{ "resource": "" }
q17073
create_recomended_articles
train
def create_recomended_articles(main_article, article_list): ''' Creates recommended article objects from article_list and _prepends_ to existing recommended articles. ''' # store existing recommended articles existing_recommended_articles = [ ra.recommended_article.specific for ra in main_article.recommended_articles.all()] # delete existing recommended articles ArticlePageRecommendedSections.objects.filter(page=main_article).delete() for hyperlinked_article in article_list: ArticlePageRecommendedSections( page=main_article, recommended_article=hyperlinked_article).save() # re-create existing recommended articles for article in existing_recommended_articles: if article not in article_list: ArticlePageRecommendedSections( page=main_article, recommended_article=article).save()
python
{ "resource": "" }
q17074
seperate_end_page_links
train
def seperate_end_page_links(stream_data): ''' Seperate out page blocks at the end of a StreamField. Accepts: List of streamfield blocks Returns: Tuple of 2 lists of blocks - (remaining body, final article) ''' stream_data_copy = list(stream_data) end_page_links = [] for block in stream_data_copy[::-1]: if block['type'] == 'page': end_page_links.insert(0, block) stream_data_copy.pop() else: break return (stream_data_copy, end_page_links)
python
{ "resource": "" }
q17075
cookiecutter
train
def cookiecutter(template, checkout=None, no_input=False, extra_context=None): """ Replacement for cookiecutter's own cookiecutter. The difference with cookiecutter's cookiecutter function is that this one doesn't automatically str() all the values passed along to the template. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param extra_context: A dictionary of context that overrides default and user configuration. """ # Get user config from ~/.cookiecutterrc or equivalent # If no config file, sensible defaults from config.DEFAULT_CONFIG are used config_dict = get_user_config() template = expand_abbreviations(template, config_dict) # TODO: find a better way to tell if it's a repo URL if 'git@' in template or 'https://' in template: repo_dir = clone( repo_url=template, checkout=checkout, clone_to_dir=config_dict['cookiecutters_dir'], no_input=no_input ) else: # If it's a local repo, no need to clone or copy to your # cookiecutters_dir repo_dir = template context_file = os.path.join(repo_dir, 'cookiecutter.json') logging.debug('context_file is {0}'.format(context_file)) context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=extra_context, ) # Create project from local context and project template. generate_files( repo_dir=repo_dir, context=context )
python
{ "resource": "" }
q17076
MoloPagesEndpoint.get_queryset
train
def get_queryset(self): ''' This is overwritten in order to not exclude drafts and pages submitted for moderation ''' request = self.request # Allow pages to be filtered to a specific type if 'type' not in request.GET: model = Page else: model_name = request.GET['type'] try: model = resolve_model_string(model_name) except LookupError: raise BadRequestError("type doesn't exist") if not issubclass(model, Page): raise BadRequestError("type doesn't exist") # This is the overwritten line queryset = model.objects.public() # exclude .live() # Filter by site queryset = queryset.descendant_of( request.site.root_page, inclusive=True) return queryset
python
{ "resource": "" }
q17077
LanguagesAPIEndpoint.get_queryset
train
def get_queryset(self): ''' Only serve site-specific languages ''' request = self.request return (Languages.for_site(request.site) .languages.filter().order_by('pk'))
python
{ "resource": "" }
q17078
list_of_objects_from_api
train
def list_of_objects_from_api(url): ''' API only serves 20 pages by default This fetches info on all of items and return them as a list Assumption: limit of API is not less than 20 ''' response = requests.get(url) content = json.loads(response.content) count = content["meta"]["total_count"] if count <= 20: return content["items"] else: items = [] + content["items"] num_requests = int(math.ceil(count // 20)) for i in range(1, num_requests + 1): paginated_url = "{}?limit=20&offset={}".format( url, str(i * 20)) paginated_response = requests.get(paginated_url) items = items + json.loads(paginated_response.content)["items"] return items
python
{ "resource": "" }
q17079
PageImporter.get_content_from_url
train
def get_content_from_url(self, base_url): """ Sections can have SectionPage and ArticlePage child objects. These have different fields, and thus have to be treated differently. """ # assemble url base_url = base_url.rstrip("/") url = base_url + API_PAGES_ENDPOINT + "?type=" + self._content_type + \ "&fields=" + ",".join(self._fields) + \ "&order=latest_revision_created_at" # make request try: response = requests.get(url) self._base_url = base_url self._content = response.json() self._content = self._content["items"] return self._content except requests.exceptions.ConnectionError: return "No content could be found from {}. " \ "Are you sure this is the correct URL?".format(base_url) except requests.exceptions.RequestException: return "Content could not be imported at this time. " \ "Please try again later."
python
{ "resource": "" }
q17080
SectionPageImporter.save
train
def save(self, indexes, parent_id): """ Save the selected section. This will save the selected section as well as its direct child pages obtained through the ?child_of query parameter. The ?descendant_of query parameter is probably better suited because it all pages under that part of the tree will be obtained. The problem , however, is that that will require being able to traverse the tree and recreate parent-child relationships after they are imported """ if self.content(): parent = Page.objects.get(id=parent_id) # Save the selected section page response = requests.get( self._base_url + API_PAGES_ENDPOINT + str(indexes[0]) + "/" ) section_page = response.json() self.process_child_section(section_page["id"], parent)
python
{ "resource": "" }
q17081
ImageImporter.get_image_details
train
def get_image_details(self): ''' Create a reference of site images by hash If there are duplicate images, only store the first and create warnings for other images ''' if Image.objects.count() == 0: return None total = Image.objects.count() count = 1 for local_image in Image.objects.all(): if not hasattr(local_image, 'image_info'): ImageInfo.objects.create(image=local_image) local_image.refresh_from_db() hash_ = local_image.image_info.image_hash if hash_ in self.image_hashes: self.log(WARNING, "Image found with matching hash", context={ "composite hash": hash_, "hashed image ID": self.image_hashes[hash_].id, "matching image ID": local_image.id, }) else: self.image_hashes[hash_] = local_image self.log(ACTION, "{}/{} images processed".format(count, total)) count += 1
python
{ "resource": "" }
q17082
ImageImporter.fetch_and_create_image
train
def fetch_and_create_image(self, url, image_title): ''' fetches, creates image object returns tuple with Image object and context dictionary containing request URL ''' context = { "file_url": url, "foreign_title": image_title, } try: image_file = requests.get(url) local_image = Image( title=image_title, file=ImageFile( BytesIO(image_file.content), name=image_title ) ) local_image.save() return (local_image, context) except Exception as e: context.update({ "exception": e, }) raise ImageCreationFailed(context, None)
python
{ "resource": "" }
q17083
ImageImporter.import_image
train
def import_image(self, image_id): ''' Imports and returns tuple with image and context dict Input: foreign image ID Output: (Image: imported image, Dict: info about import) Side effects: If Importer object has a record_keeper, it will update the record of foreign to local images. Attempts to avoid duplicates by matching image dimensions and hashes. If a match is found it refers to local instance instead. If it is not, the image is fetched, created and referenced. ''' image_detail_url = "{}{}/".format(self.image_url, image_id) try: img_response = requests.get(image_detail_url) img_info = json.loads(img_response.content) except Exception as e: error_context = { "image detail url": image_detail_url, "exception": e, } raise ImageInfoFetchFailed(error_context) if img_info["image_hash"] is None: raise ValueError('image hash should not be none') # check if a replica exists local_image = self.get_replica_image( img_info["image_hash"]) file_url = img_info['image_url'] # handle when image_url is relative # assumes that image import means local storage if img_info['image_url'][0] == '/': file_url = "{}{}".format( self.base_url, img_info['image_url']) if local_image: context = { "local_version_existed": True, "file_url": file_url, "image_detail_url": image_detail_url, "foreign_title": img_info["title"], } # update record keeper if self.record_keeper: self.record_keeper.record_image_relation( image_id, local_image.id) return (local_image, context) else: new_image, context = self.fetch_and_create_image( file_url, img_info["title"]) # update record keeper if self.record_keeper: self.record_keeper.record_image_relation( image_id, new_image.id) context.update({ "local_version_existed": False, }) return (new_image, context)
python
{ "resource": "" }
q17084
ImageImporter.import_images
train
def import_images(self): ''' Fetches all images from site Handles Errors in creation process Updates record_keeper Logs the result of each attempt to create an image ''' self.log(ACTION, "Importing Images") try: images = list_of_objects_from_api(self.image_url) except Exception as e: raise ImageInfoFetchFailed( "Something went wrong fetching list of images") if not images: return None # iterate through foreign images for image_summary in images: self.log(ACTION, "Importing Image", depth=1) try: (image, context) = self.import_image(image_summary["id"]) # log success self.log(SUCCESS, "Importing Image", context=context, depth=1) except ImageInfoFetchFailed as e: self.log(ERROR, "Importing Images", e, depth=1) except ImageCreationFailed as e: self.log(ERROR, "Importing Images", e.message, depth=1) except Exception as e: context = { "exception": e, "foreign_image_id": image_summary["id"], } self.log(ERROR, "Importing Images", context, depth=1)
python
{ "resource": "" }
q17085
LanguageImporter.get_language_ids
train
def get_language_ids(self): ''' Return list of foreign language IDs from API language endpoint TODO: add in validation before creating languages ''' languages = list_of_objects_from_api(self.language_url) language_ids = [] for language in languages: language_ids.append(language["id"]) return language_ids
python
{ "resource": "" }
q17086
ContentImporter.recreate_relationships
train
def recreate_relationships(self, class_, attribute_name, key): ''' Recreates one-to-many relationship ''' iterable = self.record_keeper.foreign_to_many_foreign_map[key] for foreign_page_id, foreign_page_id_list in iteritems(iterable): # Assumption: local page has been indexed and exists # TODO: handle case where it doesn't exist local_page_id = self.record_keeper.get_local_page(foreign_page_id) local_page = Page.objects.get(id=local_page_id).specific for _foreign_page_id in foreign_page_id_list: try: local_version_page_id = (self.record_keeper .get_local_page(_foreign_page_id)) foreign_page = Page.objects.get( id=local_version_page_id).specific realtionship_object = class_(page=local_page) setattr(realtionship_object, attribute_name, foreign_page) realtionship_object.save() except Exception as e: context = { "exception": e, "function_schema": ("recreate_relationships" "(class, attribute_name, key)"), "attribute_name": str(attribute_name), "key": str(key), "class": str(class_), "foreign_page_id": str(foreign_page_id), } self.log(ERROR, "recreating relationships", context=context)
python
{ "resource": "" }
q17087
ContentImporter.recreate_relationship
train
def recreate_relationship(self, attribute_name, key): ''' Recreates one-to-one relationship ''' iterable = self.record_keeper.foreign_to_foreign_map["banner_link_page"] # noqa for foreign_page_id, linked_page_foreign_id in iteritems(iterable): # get local banner page local_page_id = self.record_keeper.get_local_page(foreign_page_id) local_page = Page.objects.get(id=local_page_id).specific # get local linked page local_id = self.record_keeper.get_local_page( linked_page_foreign_id) linked_page = Page.objects.get(id=local_id).specific # link the two together setattr(local_page, attribute_name, linked_page) # TODO: review publishing and saving revisions local_page.save_revision().publish()
python
{ "resource": "" }
q17088
ContentImporter.recreate_article_body
train
def recreate_article_body(self): ''' Handles case where article body contained page or image. Assumes all articles and images have been created. ''' for foreign_id, body in iteritems(self.record_keeper.article_bodies): try: local_page_id = self.record_keeper.get_local_page(foreign_id) page = Page.objects.get(id=local_page_id).specific # iterate through the body new_body = [] for item in body: if not item['value']: continue if item['type'] == 'page': new_page_id = self.record_keeper.get_local_page( item['value']) item['value'] = new_page_id elif item['type'] == 'image': new_image_id = self.record_keeper.get_local_image( item['value']) item['value'] = new_image_id new_body.append(item) setattr(page, 'body', json.dumps(new_body)) page.save_revision().publish() except Exception as e: self.log(ERROR, "recreating article body", { "exception": e, "foreign_id": foreign_id, "body": body, }, depth=1)
python
{ "resource": "" }
q17089
ContentImporter.get_foreign_page_id_from_type
train
def get_foreign_page_id_from_type(self, page_type): ''' Get the foreign page id based on type Only works for index pages ''' # TODO: log this response = requests.get("{}pages/?type={}".format( self.api_url, page_type)) content = json.loads(response.content) return content["items"][0]["id"]
python
{ "resource": "" }
q17090
ContentImporter.attach_translated_content
train
def attach_translated_content(self, local_main_lang_page, content, locale): ''' Wrapper for attach_page Creates the content Then attaches a language relation from the main language page to the newly created Page Note: we get the parent from the main language page ''' try: page = self.attach_page( local_main_lang_page.get_parent(), content) except: # TODO: log this return None try: # create the translation object for page language = SiteLanguageRelation.objects.get( language_setting=self.language_setting, locale=locale) page.language = language page.translated_pages.add(local_main_lang_page) local_main_lang_page.translated_pages.add(page) page.save() local_main_lang_page.save() except: # TODO: log that creating translation failed # TODO: log that page is now being deleted page.delete() return page
python
{ "resource": "" }
q17091
ContentImporter.copy_page_and_children
train
def copy_page_and_children(self, foreign_id, parent_id, depth=0): ''' Recusively copies over pages, their translations, and child pages ''' url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id) self.log(ACTION, "Requesting Data", {"url": url}, depth) try: # TODO: create a robust wrapper around this functionality response = requests.get(url) content = json.loads(response.content) except Exception as e: self.log(ERROR, "Requesting Data - abandoning copy", {"url": url, "exception": e}, depth) return None parent = Page.objects.get(id=parent_id).specific page = None try: self.log(ACTION, "Create Page", {"url": url}, depth) page = self.attach_page(parent, content) if page: self.log(SUCCESS, "Create Page", {"url": url, "page title": page.title.encode('utf-8')}, depth) except PageNotImportable as e: message = e.message.pop("message") self.log(WARNING, message, e.message.pop("message"), depth) return None if page: # create translations if content["meta"]["translations"]: for translation_obj in content["meta"]["translations"]: _url = "{}/api/v2/pages/{}/".format(self.base_url, translation_obj["id"]) # TODO: create a robust wrapper around this functionality _response = requests.get(_url) self.log( ACTION, "Getting translated content", {"url": _url}, depth) if _response.content: _content = json.loads(_response.content) if ("locale" in translation_obj and translation_obj["locale"]): self.attach_translated_content( page, _content, translation_obj["locale"]) else: self.log( ERROR, "locale is null", {"url": _url, }, depth) else: self.log( ERROR, "Getting translated content", {"url": _url}, depth) main_language_child_ids = content["meta"]["main_language_children"] # recursively iterate through child nodes if main_language_child_ids: for main_language_child_id in main_language_child_ids: try: self.copy_page_and_children( foreign_id=main_language_child_id, parent_id=page.id, depth=depth + 1) except Exception as e: self.log(ERROR, "Copying Children", {"url": url, "exception": e})
python
{ "resource": "" }
q17092
ContentImporter.copy_children
train
def copy_children(self, foreign_id, existing_node): ''' Initiates copying of tree, with existing_node acting as root ''' url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id) self.log( ACTION, "Copying Children", {"existing node type": str(type(existing_node))}) # TODO: create a robust wrapper around this functionality try: self.log(ACTION, "Requesting Data", {"url": url}) response = requests.get(url) content = json.loads(response.content) self.log(SUCCESS, "Data Fetched Successfully", {"url": url}) main_language_child_ids = content["meta"]["main_language_children"] if main_language_child_ids: for main_language_child_id in main_language_child_ids: self.copy_page_and_children( foreign_id=main_language_child_id, parent_id=existing_node.id, depth=1) else: self.log(SUCCESS, "No children to copy") except Exception as e: self.log(ERROR, "Copying Children", {"url": url, "exception": e})
python
{ "resource": "" }
q17093
Logger.get_email_logs
train
def get_email_logs(self): ''' Returns a string representation of logs. Only displays errors and warnings in the email logs to avoid being verbose ''' message = "" for log in self.record: if log["log_type"] in [ERROR, WARNING]: message += self.format_message(**log) return message
python
{ "resource": "" }
q17094
GitRun.run
train
def run(self, cmd): """Execute git command in bash""" cmd = ['git', '--git-dir=%s' % self.path] + cmd print("cmd list", cmd) print("cmd", ' '.join(cmd)) res = None try: res = subprocess.check_output(cmd) except BaseException: pass if res: try: res = res.decode() except UnicodeDecodeError: res = res.decode('utf-8') return res
python
{ "resource": "" }
q17095
GitRun.update
train
def update(self): """Get a repository git or update it""" if not os.path.isdir(os.path.join(self.path)): os.makedirs(self.path) if not os.path.isdir(os.path.join(self.path, 'refs')): subprocess.check_output([ 'git', 'clone', '--bare', self.repo_git, self.path ]) self.run(['gc', '--auto', '--prune=all']) self.run(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*']) # github support self.run(['fetch', 'origin', '+refs/pull/*/head:refs/pull/*']) # gitlab support self.run([ 'fetch', 'origin', '+refs/merge-requests/*/head:refs/pull/*'])
python
{ "resource": "" }
q17096
oauth_session
train
def oauth_session(request, state=None, token=None): """ Constructs the OAuth2 session object. """ if settings.DISCORD_REDIRECT_URI is not None: redirect_uri = settings.DISCORD_REDIRECT_URI else: redirect_uri = request.build_absolute_uri( reverse('discord_bind_callback')) scope = (['email', 'guilds.join'] if settings.DISCORD_EMAIL_SCOPE else ['identity', 'guilds.join']) return OAuth2Session(settings.DISCORD_CLIENT_ID, redirect_uri=redirect_uri, scope=scope, token=token, state=state)
python
{ "resource": "" }
q17097
create_audio_mp3_profile
train
def create_audio_mp3_profile(apps, schema_editor): """ Create audio_mp3 profile """ Profile = apps.get_model('edxval', 'Profile') Profile.objects.get_or_create(profile_name=AUDIO_MP3_PROFILE)
python
{ "resource": "" }
q17098
delete_audio_mp3_profile
train
def delete_audio_mp3_profile(apps, schema_editor): """ Delete audio_mp3 profile """ Profile = apps.get_model('edxval', 'Profile') Profile.objects.filter(profile_name=AUDIO_MP3_PROFILE).delete()
python
{ "resource": "" }
q17099
Transcript.convert
train
def convert(cls, content, input_format, output_format): """ Convert transcript `content` from `input_format` to `output_format`. Arguments: content: Transcript content byte-stream. input_format: Input transcript format. output_format: Output transcript format. Accepted input formats: sjson, srt. Accepted output format: srt, sjson. Raises: TranscriptsGenerationException: On parsing the invalid srt content during conversion from srt to sjson. """ assert input_format in ('srt', 'sjson') assert output_format in ('srt', 'sjson') # Decode the content with utf-8-sig which will also # skip byte order mark(BOM) character if found. content = content.decode('utf-8-sig') if input_format == output_format: return content if input_format == 'srt': if output_format == 'sjson': try: # With error handling (set to 'ERROR_RAISE'), we will be getting # the exception if something went wrong in parsing the transcript. srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE) except Error as ex: # Base exception from pysrt raise TranscriptsGenerationException(text_type(ex)) return json.dumps(cls.generate_sjson_from_srt(srt_subs)) if input_format == 'sjson': if output_format == 'srt': return cls.generate_srt_from_sjson(json.loads(content))
python
{ "resource": "" }