_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q30400
SubsCenterProvider._search_url_titles
train
def _search_url_titles(self, title): """Search the URL titles by kind for the given `title`. :param str title: title to search for. :return: the URL titles by kind. :rtype: collections.defaultdict """ # make the search logger.info('Searching title name for %r', title) r = self.session.get(self.server_url + 'subtitle/search/', params={'q': title}, timeout=10) r.raise_for_status() # check for redirections if r.history and all([h.status_code == 302 for h in r.history]): logger.debug('Redirected to the subtitles page') links = [r.url] else: # get the suggestions (if needed) soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) links = [link.attrs['href'] for link in soup.select('#processes div.generalWindowTop a')] logger.debug('Found %d suggestions', len(links)) url_titles = defaultdict(list) for link in links: parts = link.split('/') url_titles[parts[-3]].append(parts[-2]) return url_titles
python
{ "resource": "" }
q30401
Video.age
train
def age(self): """Age of the video""" if self.exists: return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name)) return timedelta()
python
{ "resource": "" }
q30402
RegistrableExtensionManager.register
train
def register(self, entry_point): """Register an extension :param str entry_point: extension to register (entry point syntax). :raise: ValueError if already registered. """ if entry_point in self.registered_extensions: raise ValueError('Extension already registered') ep = EntryPoint.parse(entry_point) if ep.name in self.names(): raise ValueError('An extension with the same name already exist') ext = self._load_one_plugin(ep, False, (), {}, False) self.extensions.append(ext) if self._extensions_by_name is not None: self._extensions_by_name[ext.name] = ext self.registered_extensions.insert(0, entry_point)
python
{ "resource": "" }
q30403
RegistrableExtensionManager.unregister
train
def unregister(self, entry_point): """Unregister a provider :param str entry_point: provider to unregister (entry point syntax). """ if entry_point not in self.registered_extensions: raise ValueError('Extension not registered') ep = EntryPoint.parse(entry_point) self.registered_extensions.remove(entry_point) if self._extensions_by_name is not None: del self._extensions_by_name[ep.name] for i, ext in enumerate(self.extensions): if ext.name == ep.name: del self.extensions[i] break
python
{ "resource": "" }
q30404
hash_opensubtitles
train
def hash_opensubtitles(video_path): """Compute a hash using OpenSubtitles' algorithm. :param str video_path: path of the video. :return: the hash. :rtype: str """ bytesize = struct.calcsize(b'<q') with open(video_path, 'rb') as f: filesize = os.path.getsize(video_path) filehash = filesize if filesize < 65536 * 2: return for _ in range(65536 // bytesize): filebuffer = f.read(bytesize) (l_value,) = struct.unpack(b'<q', filebuffer) filehash += l_value filehash &= 0xFFFFFFFFFFFFFFFF # to remain as 64bit number f.seek(max(0, filesize - 65536), 0) for _ in range(65536 // bytesize): filebuffer = f.read(bytesize) (l_value,) = struct.unpack(b'<q', filebuffer) filehash += l_value filehash &= 0xFFFFFFFFFFFFFFFF returnedhash = '%016x' % filehash return returnedhash
python
{ "resource": "" }
q30405
hash_thesubdb
train
def hash_thesubdb(video_path): """Compute a hash using TheSubDB's algorithm. :param str video_path: path of the video. :return: the hash. :rtype: str """ readsize = 64 * 1024 if os.path.getsize(video_path) < readsize: return with open(video_path, 'rb') as f: data = f.read(readsize) f.seek(-readsize, os.SEEK_END) data += f.read(readsize) return hashlib.md5(data).hexdigest()
python
{ "resource": "" }
q30406
hash_napiprojekt
train
def hash_napiprojekt(video_path): """Compute a hash using NapiProjekt's algorithm. :param str video_path: path of the video. :return: the hash. :rtype: str """ readsize = 1024 * 1024 * 10 with open(video_path, 'rb') as f: data = f.read(readsize) return hashlib.md5(data).hexdigest()
python
{ "resource": "" }
q30407
hash_shooter
train
def hash_shooter(video_path): """Compute a hash using Shooter's algorithm :param string video_path: path of the video :return: the hash :rtype: string """ filesize = os.path.getsize(video_path) readsize = 4096 if os.path.getsize(video_path) < readsize * 2: return None offsets = (readsize, filesize // 3 * 2, filesize // 3, filesize - readsize * 2) filehash = [] with open(video_path, 'rb') as f: for offset in offsets: f.seek(offset) filehash.append(hashlib.md5(f.read(readsize)).hexdigest()) return ';'.join(filehash)
python
{ "resource": "" }
q30408
sanitize
train
def sanitize(string, ignore_characters=None): """Sanitize a string to strip special characters. :param str string: the string to sanitize. :param set ignore_characters: characters to ignore. :return: the sanitized string. :rtype: str """ # only deal with strings if string is None: return ignore_characters = ignore_characters or set() # replace some characters with one space characters = {'-', ':', '(', ')', '.'} - ignore_characters if characters: string = re.sub(r'[%s]' % re.escape(''.join(characters)), ' ', string) # remove some characters characters = {'\''} - ignore_characters if characters: string = re.sub(r'[%s]' % re.escape(''.join(characters)), '', string) # replace multiple spaces with one string = re.sub(r'\s+', ' ', string) # strip and lower case return string.strip().lower()
python
{ "resource": "" }
q30409
sanitize_release_group
train
def sanitize_release_group(string): """Sanitize a `release_group` string to remove content in square brackets. :param str string: the release group to sanitize. :return: the sanitized release group. :rtype: str """ # only deal with strings if string is None: return # remove content in square brackets string = re.sub(r'\[\w+\]', '', string) # strip and upper case return string.strip().upper()
python
{ "resource": "" }
q30410
subliminal
train
def subliminal(ctx, addic7ed, legendastv, opensubtitles, subscenter, cache_dir, debug): """Subtitles, faster than your thoughts.""" # create cache directory try: os.makedirs(cache_dir) except OSError: if not os.path.isdir(cache_dir): raise # configure cache region.configure('dogpile.cache.dbm', expiration_time=timedelta(days=30), arguments={'filename': os.path.join(cache_dir, cache_file), 'lock_factory': MutexLock}) # configure logging if debug: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) logging.getLogger('subliminal').addHandler(handler) logging.getLogger('subliminal').setLevel(logging.DEBUG) # provider configs ctx.obj = {'provider_configs': {}} if addic7ed: ctx.obj['provider_configs']['addic7ed'] = {'username': addic7ed[0], 'password': addic7ed[1]} if legendastv: ctx.obj['provider_configs']['legendastv'] = {'username': legendastv[0], 'password': legendastv[1]} if opensubtitles: ctx.obj['provider_configs']['opensubtitles'] = {'username': opensubtitles[0], 'password': opensubtitles[1]} if subscenter: ctx.obj['provider_configs']['subscenter'] = {'username': subscenter[0], 'password': subscenter[1]}
python
{ "resource": "" }
q30411
cache
train
def cache(ctx, clear_subliminal): """Cache management.""" if clear_subliminal: for file in glob.glob(os.path.join(ctx.parent.params['cache_dir'], cache_file) + '*'): os.remove(file) click.echo('Subliminal\'s cache cleared.') else: click.echo('Nothing done.')
python
{ "resource": "" }
q30412
LegendasTVProvider.search_titles
train
def search_titles(self, title): """Search for titles matching the `title`. :param str title: the title to search for. :return: found titles. :rtype: dict """ # make the query logger.info('Searching title %r', title) r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(title), timeout=10) r.raise_for_status() results = json.loads(r.text) # loop over results titles = {} for result in results: source = result['_source'] # extract id title_id = int(source['id_filme']) # extract type and title title = {'type': type_map[source['tipo']], 'title': source['dsc_nome']} # extract year if source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit(): title['year'] = int(source['dsc_data_lancamento']) # extract imdb_id if source['id_imdb'] != '0': if not source['id_imdb'].startswith('tt'): title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7) else: title['imdb_id'] = source['id_imdb'] # extract season if title['type'] == 'episode': if source['temporada'] and source['temporada'].isdigit(): title['season'] = int(source['temporada']) else: match = season_re.search(source['dsc_nome_br']) if match: title['season'] = int(match.group('season')) else: logger.warning('No season detected for title %d', title_id) # add title titles[title_id] = title logger.debug('Found %d titles', len(titles)) return titles
python
{ "resource": "" }
q30413
LegendasTVProvider.get_archives
train
def get_archives(self, title_id, language_code): """Get the archive list from a given `title_id` and `language_code`. :param int title_id: title id. :param int language_code: language code. :return: the archives. :rtype: list of :class:`LegendasTVArchive` """ logger.info('Getting archives for title %d and language %d', title_id, language_code) archives = [] page = 1 while True: # get the archive page url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format( title=title_id, language=language_code, page=page) r = self.session.get(url) r.raise_for_status() # parse the results soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) for archive_soup in soup.select('div.list_element > article > div'): # create archive archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, 'pack' in archive_soup['class'], 'destaque' in archive_soup['class'], self.server_url + archive_soup.a['href'][1:]) # extract text containing downloads, rating and timestamp data_text = archive_soup.find('p', class_='data').text # match downloads archive.downloads = int(downloads_re.search(data_text).group('downloads')) # match rating match = rating_re.search(data_text) if match: archive.rating = int(match.group('rating')) # match timestamp and validate it time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()} archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data)) if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc): raise ProviderError('Archive timestamp is in the future') # add archive archives.append(archive) # stop on last page if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None: break # increment page count page += 1 logger.debug('Found %d archives', len(archives)) return archives
python
{ "resource": "" }
q30414
check_video
train
def check_video(video, languages=None, age=None, undefined=False): """Perform some checks on the `video`. All the checks are optional. Return `False` if any of this check fails: * `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`. * `video` is older than `age`. * `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`. :param video: video to check. :type video: :class:`~subliminal.video.Video` :param languages: desired languages. :type languages: set of :class:`~babelfish.language.Language` :param datetime.timedelta age: maximum age of the video. :param bool undefined: fail on existing undefined language. :return: `True` if the video passes the checks, `False` otherwise. :rtype: bool """ # language test if languages and not (languages - video.subtitle_languages): logger.debug('All languages %r exist', languages) return False # age test if age and video.age > age: logger.debug('Video is older than %r', age) return False # undefined test if undefined and Language('und') in video.subtitle_languages: logger.debug('Undefined language found') return False return True
python
{ "resource": "" }
q30415
search_external_subtitles
train
def search_external_subtitles(path, directory=None): """Search for external subtitles from a video `path` and their associated language. Unless `directory` is provided, search will be made in the same directory as the video file. :param str path: path to the video. :param str directory: directory to search for subtitles. :return: found subtitles with their languages. :rtype: dict """ # split path dirpath, filename = os.path.split(path) dirpath = dirpath or '.' fileroot, fileext = os.path.splitext(filename) # search for subtitles subtitles = {} for p in os.listdir(directory or dirpath): # keep only valid subtitle filenames if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS): continue # extract the potential language code language = Language('und') language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:] if language_code: try: language = Language.fromietf(language_code) except (ValueError, LanguageReverseError): logger.error('Cannot parse language code %r', language_code) subtitles[p] = language logger.debug('Found subtitles %r', subtitles) return subtitles
python
{ "resource": "" }
q30416
scan_video
train
def scan_video(path): """Scan a video from a `path`. :param str path: existing path to the video. :return: the scanned video. :rtype: :class:`~subliminal.video.Video` """ # check for non-existing path if not os.path.exists(path): raise ValueError('Path does not exist') # check video extension if not path.endswith(VIDEO_EXTENSIONS): raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1]) dirpath, filename = os.path.split(path) logger.info('Scanning video %r in %r', filename, dirpath) # guess video = Video.fromguess(path, guessit(path)) # size and hashes video.size = os.path.getsize(path) if video.size > 10485760: logger.debug('Size is %d', video.size) video.hashes['opensubtitles'] = hash_opensubtitles(path) video.hashes['shooter'] = hash_shooter(path) video.hashes['thesubdb'] = hash_thesubdb(path) video.hashes['napiprojekt'] = hash_napiprojekt(path) logger.debug('Computed hashes %r', video.hashes) else: logger.warning('Size is lower than 10MB: hashes not computed') return video
python
{ "resource": "" }
q30417
scan_archive
train
def scan_archive(path): """Scan an archive from a `path`. :param str path: existing path to the archive. :return: the scanned video. :rtype: :class:`~subliminal.video.Video` """ # check for non-existing path if not os.path.exists(path): raise ValueError('Path does not exist') # check video extension if not path.endswith(ARCHIVE_EXTENSIONS): raise ValueError('%r is not a valid archive extension' % os.path.splitext(path)[1]) dirpath, filename = os.path.split(path) logger.info('Scanning archive %r in %r', filename, dirpath) # rar extension if filename.endswith('.rar'): rar = RarFile(path) # filter on video extensions rar_filenames = [f for f in rar.namelist() if f.endswith(VIDEO_EXTENSIONS)] # no video found if not rar_filenames: raise ValueError('No video in archive') # more than one video found if len(rar_filenames) > 1: raise ValueError('More than one video in archive') # guess rar_filename = rar_filenames[0] rar_filepath = os.path.join(dirpath, rar_filename) video = Video.fromguess(rar_filepath, guessit(rar_filepath)) # size video.size = rar.getinfo(rar_filename).file_size else: raise ValueError('Unsupported extension %r' % os.path.splitext(path)[1]) return video
python
{ "resource": "" }
q30418
scan_videos
train
def scan_videos(path, age=None, archives=True): """Scan `path` for videos and their subtitles. See :func:`refine` to find additional information for the video. :param str path: existing directory path to scan. :param datetime.timedelta age: maximum age of the video or archive. :param bool archives: scan videos in archives. :return: the scanned videos. :rtype: list of :class:`~subliminal.video.Video` """ # check for non-existing path if not os.path.exists(path): raise ValueError('Path does not exist') # check for non-directory path if not os.path.isdir(path): raise ValueError('Path is not a directory') # walk the path videos = [] for dirpath, dirnames, filenames in os.walk(path): logger.debug('Walking directory %r', dirpath) # remove badly encoded and hidden dirnames for dirname in list(dirnames): if dirname.startswith('.'): logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath) dirnames.remove(dirname) # scan for videos for filename in filenames: # filter on videos and archives if not (filename.endswith(VIDEO_EXTENSIONS) or archives and filename.endswith(ARCHIVE_EXTENSIONS)): continue # skip hidden files if filename.startswith('.'): logger.debug('Skipping hidden filename %r in %r', filename, dirpath) continue # reconstruct the file path filepath = os.path.join(dirpath, filename) # skip links if os.path.islink(filepath): logger.debug('Skipping link %r in %r', filename, dirpath) continue # skip old files if age and datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(filepath)) > age: logger.debug('Skipping old file %r in %r', filename, dirpath) continue # scan if filename.endswith(VIDEO_EXTENSIONS): # video try: video = scan_video(filepath) except ValueError: # pragma: no cover logger.exception('Error scanning video') continue elif archives and filename.endswith(ARCHIVE_EXTENSIONS): # archive try: video = scan_archive(filepath) except (NotRarFile, RarCannotExec, ValueError): # pragma: no cover logger.exception('Error scanning archive') continue else: # pragma: no cover raise ValueError('Unsupported file %r' % filename) videos.append(video) return videos
python
{ "resource": "" }
q30419
download_best_subtitles
train
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None, pool_class=ProviderPool, **kwargs): """List and download the best matching subtitles. The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`. :param videos: videos to download subtitles for. :type videos: set of :class:`~subliminal.video.Video` :param languages: languages to download. :type languages: set of :class:`~babelfish.language.Language` :param int min_score: minimum score for a subtitle to be downloaded. :param bool hearing_impaired: hearing impaired preference. :param bool only_one: download only one subtitle, not one per language. :param compute_score: function that takes `subtitle` and `video` as positional arguments, `hearing_impaired` as keyword argument and returns the score. :param pool_class: class to use as provider pool. :type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar :param \*\*kwargs: additional parameters for the provided `pool_class` constructor. :return: downloaded subtitles per video. :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` """ downloaded_subtitles = defaultdict(list) # check videos checked_videos = [] for video in videos: if not check_video(video, languages=languages, undefined=only_one): logger.info('Skipping video %r', video) continue checked_videos.append(video) # return immediately if no video passed the checks if not checked_videos: return downloaded_subtitles # download best subtitles with pool_class(**kwargs) as pool: for video in checked_videos: logger.info('Downloading best subtitles for %r', video) subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages), video, languages, min_score=min_score, hearing_impaired=hearing_impaired, only_one=only_one, compute_score=compute_score) logger.info('Downloaded %d subtitle(s)', len(subtitles)) downloaded_subtitles[video].extend(subtitles) return downloaded_subtitles
python
{ "resource": "" }
q30420
save_subtitles
train
def save_subtitles(video, subtitles, single=False, directory=None, encoding=None): """Save subtitles on filesystem. Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles with the same language are silently ignored. The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle. :param video: video of the subtitles. :type video: :class:`~subliminal.video.Video` :param subtitles: subtitles to save. :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :param bool single: save a single subtitle, default is to save one subtitle per language. :param str directory: path to directory where to save the subtitles, default is next to the video. :param str encoding: encoding in which to save the subtitles, default is to keep original encoding. :return: the saved subtitles :rtype: list of :class:`~subliminal.subtitle.Subtitle` """ saved_subtitles = [] for subtitle in subtitles: # check content if subtitle.content is None: logger.error('Skipping subtitle %r: no content', subtitle) continue # check language if subtitle.language in set(s.language for s in saved_subtitles): logger.debug('Skipping subtitle %r: language already saved', subtitle) continue # create subtitle path subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language) if directory is not None: subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1]) # save content as is or in the specified encoding logger.info('Saving %r to %r', subtitle, subtitle_path) if encoding is None: with io.open(subtitle_path, 'wb') as f: f.write(subtitle.content) else: with io.open(subtitle_path, 'w', encoding=encoding) as f: f.write(subtitle.text) saved_subtitles.append(subtitle) # check single if single: break return saved_subtitles
python
{ "resource": "" }
q30421
ProviderPool.list_subtitles_provider
train
def list_subtitles_provider(self, provider, video, languages): """List subtitles with a single provider. The video and languages are checked against the provider. :param str provider: name of the provider. :param video: video to list subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` or None """ # check video validity if not provider_manager[provider].plugin.check(video): logger.info('Skipping provider %r: not a valid video', provider) return [] # check supported languages provider_languages = provider_manager[provider].plugin.languages & languages if not provider_languages: logger.info('Skipping provider %r: no language to search for', provider) return [] # list subtitles logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages) try: return self[provider].list_subtitles(video, provider_languages) except (requests.Timeout, socket.timeout): logger.error('Provider %r timed out', provider) except: logger.exception('Unexpected error in provider %r', provider)
python
{ "resource": "" }
q30422
ProviderPool.download_best_subtitles
train
def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None): """Download the best matching subtitles. :param subtitles: the subtitles to use. :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :param video: video to download subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to download. :type languages: set of :class:`~babelfish.language.Language` :param int min_score: minimum score for a subtitle to be downloaded. :param bool hearing_impaired: hearing impaired preference. :param bool only_one: download only one subtitle, not one per language. :param compute_score: function that takes `subtitle` and `video` as positional arguments, `hearing_impaired` as keyword argument and returns the score. :return: downloaded subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` """ compute_score = compute_score or default_compute_score # sort subtitles by score scored_subtitles = sorted([(s, compute_score(s, video, hearing_impaired=hearing_impaired)) for s in subtitles], key=operator.itemgetter(1), reverse=True) # download best subtitles, falling back on the next on error downloaded_subtitles = [] for subtitle, score in scored_subtitles: # check score if score < min_score: logger.info('Score %d is below min_score (%d)', score, min_score) break # check downloaded languages if subtitle.language in set(s.language for s in downloaded_subtitles): logger.debug('Skipping subtitle: %r already downloaded', subtitle.language) continue # download if self.download_subtitle(subtitle): downloaded_subtitles.append(subtitle) # stop when all languages are downloaded if set(s.language for s in downloaded_subtitles) == languages: logger.debug('All languages downloaded') break # stop if only one subtitle is requested if only_one: logger.debug('Only one subtitle downloaded') break return downloaded_subtitles
python
{ "resource": "" }
q30423
get_scores
train
def get_scores(video): """Get the scores dict for the given `video`. This will return either :data:`episode_scores` or :data:`movie_scores` based on the type of the `video`. :param video: the video to compute the score against. :type video: :class:`~subliminal.video.Video` :return: the scores dict. :rtype: dict """ if isinstance(video, Episode): return episode_scores elif isinstance(video, Movie): return movie_scores raise ValueError('video must be an instance of Episode or Movie')
python
{ "resource": "" }
q30424
compute_score
train
def compute_score(subtitle, video, hearing_impaired=None): """Compute the score of the `subtitle` against the `video` with `hearing_impaired` preference. :func:`compute_score` uses the :meth:`Subtitle.get_matches <subliminal.subtitle.Subtitle.get_matches>` method and applies the scores (either from :data:`episode_scores` or :data:`movie_scores`) after some processing. :param subtitle: the subtitle to compute the score of. :type subtitle: :class:`~subliminal.subtitle.Subtitle` :param video: the video to compute the score against. :type video: :class:`~subliminal.video.Video` :param bool hearing_impaired: hearing impaired preference. :return: score of the subtitle. :rtype: int """ logger.info('Computing score of %r for video %r with %r', subtitle, video, dict(hearing_impaired=hearing_impaired)) # get the scores dict scores = get_scores(video) logger.debug('Using scores %r', scores) # get the matches matches = subtitle.get_matches(video) logger.debug('Found matches %r', matches) # on hash match, discard everything else if 'hash' in matches: logger.debug('Keeping only hash match') matches &= {'hash'} # handle equivalent matches if isinstance(video, Episode): if 'title' in matches: logger.debug('Adding title match equivalent') matches.add('episode') if 'series_imdb_id' in matches: logger.debug('Adding series_imdb_id match equivalent') matches |= {'series', 'year'} if 'imdb_id' in matches: logger.debug('Adding imdb_id match equivalents') matches |= {'series', 'year', 'season', 'episode'} if 'tvdb_id' in matches: logger.debug('Adding tvdb_id match equivalents') matches |= {'series', 'year', 'season', 'episode'} if 'series_tvdb_id' in matches: logger.debug('Adding series_tvdb_id match equivalents') matches |= {'series', 'year'} elif isinstance(video, Movie): if 'imdb_id' in matches: logger.debug('Adding imdb_id match equivalents') matches |= {'title', 'year'} # handle hearing impaired if hearing_impaired is not None and subtitle.hearing_impaired == hearing_impaired: logger.debug('Matched hearing_impaired') matches.add('hearing_impaired') # compute the score score = sum((scores.get(match, 0) for match in matches)) logger.info('Computed score %r with final matches %r', score, matches) # ensure score is within valid bounds assert 0 <= score <= scores['hash'] + scores['hearing_impaired'] return score
python
{ "resource": "" }
q30425
Provider.check
train
def check(cls, video): """Check if the `video` can be processed. The `video` is considered invalid if not an instance of :attr:`video_types` or if the :attr:`required_hash` is not present in :attr:`~subliminal.video.Video.hashes` attribute of the `video`. :param video: the video to check. :type video: :class:`~subliminal.video.Video` :return: `True` if the `video` is valid, `False` otherwise. :rtype: bool """ if not isinstance(video, cls.video_types): return False if cls.required_hash is not None and cls.required_hash not in video.hashes: return False return True
python
{ "resource": "" }
q30426
Addic7edProvider._get_show_ids
train
def _get_show_ids(self): """Get the ``dict`` of show ids per series by querying the `shows.php` page. :return: show id per series, lower case and without quotes. :rtype: dict """ # get the show page logger.info('Getting show ids') r = self.session.get(self.server_url + 'shows.php', timeout=10) r.raise_for_status() soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # populate the show ids show_ids = {} for show in soup.select('td.version > h3 > a[href^="/show/"]'): show_ids[sanitize(show.text)] = int(show['href'][6:]) logger.debug('Found %d show ids', len(show_ids)) return show_ids
python
{ "resource": "" }
q30427
Addic7edProvider.get_show_id
train
def get_show_id(self, series, year=None, country_code=None): """Get the best matching show id for `series`, `year` and `country_code`. First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :param country_code: country code of the series, if any. :type country_code: str :return: the show id, if found. :rtype: int """ series_sanitized = sanitize(series).lower() show_ids = self._get_show_ids() show_id = None # attempt with country if not show_id and country_code: logger.debug('Getting show id with country') show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower())) # attempt with year if not show_id and year: logger.debug('Getting show id with year') show_id = show_ids.get('%s %d' % (series_sanitized, year)) # attempt clean if not show_id: logger.debug('Getting show id') show_id = show_ids.get(series_sanitized) # search as last resort if not show_id: logger.warning('Series not found in show ids') show_id = self._search_show_id(series) return show_id
python
{ "resource": "" }
q30428
TVsubtitlesProvider.get_episode_ids
train
def get_episode_ids(self, show_id, season): """Get episode ids from the show id and the season. :param int show_id: show id. :param int season: season of the episode. :return: episode ids per episode number. :rtype: dict """ # get the page of the season of the show logger.info('Getting the page of show id %d, season %d', show_id, season) r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10) soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # loop over episode rows episode_ids = {} for row in soup.select('table#table5 tr'): # skip rows that do not have a link to the episode page if not row('a', href=episode_id_re): continue # extract data from the cells cells = row('td') episode = int(cells[0].text.split('x')[1]) episode_id = int(cells[1].a['href'][8:-5]) episode_ids[episode] = episode_id if episode_ids: logger.debug('Found episode ids %r', episode_ids) else: logger.warning('No episode ids found') return episode_ids
python
{ "resource": "" }
q30429
refine
train
def refine(video, embedded_subtitles=True, **kwargs): """Refine a video by searching its metadata. Several :class:`~subliminal.video.Video` attributes can be found: * :attr:`~subliminal.video.Video.resolution` * :attr:`~subliminal.video.Video.video_codec` * :attr:`~subliminal.video.Video.audio_codec` * :attr:`~subliminal.video.Video.subtitle_languages` :param bool embedded_subtitles: search for embedded subtitles. """ # skip non existing videos if not video.exists: return # check extensions extension = os.path.splitext(video.name)[1] if extension == '.mkv': with open(video.name, 'rb') as f: mkv = MKV(f) # main video track if mkv.video_tracks: video_track = mkv.video_tracks[0] # resolution if video_track.height in (480, 720, 1080): if video_track.interlaced: video.resolution = '%di' % video_track.height else: video.resolution = '%dp' % video_track.height logger.debug('Found resolution %s', video.resolution) # video codec if video_track.codec_id == 'V_MPEG4/ISO/AVC': video.video_codec = 'h264' logger.debug('Found video_codec %s', video.video_codec) elif video_track.codec_id == 'V_MPEG4/ISO/SP': video.video_codec = 'DivX' logger.debug('Found video_codec %s', video.video_codec) elif video_track.codec_id == 'V_MPEG4/ISO/ASP': video.video_codec = 'XviD' logger.debug('Found video_codec %s', video.video_codec) else: logger.warning('MKV has no video track') # main audio track if mkv.audio_tracks: audio_track = mkv.audio_tracks[0] # audio codec if audio_track.codec_id == 'A_AC3': video.audio_codec = 'AC3' logger.debug('Found audio_codec %s', video.audio_codec) elif audio_track.codec_id == 'A_DTS': video.audio_codec = 'DTS' logger.debug('Found audio_codec %s', video.audio_codec) elif audio_track.codec_id == 'A_AAC': video.audio_codec = 'AAC' logger.debug('Found audio_codec %s', video.audio_codec) else: logger.warning('MKV has no audio track') # subtitle tracks if mkv.subtitle_tracks: if embedded_subtitles: embedded_subtitle_languages = set() for st in mkv.subtitle_tracks: if st.language: try: embedded_subtitle_languages.add(Language.fromalpha3b(st.language)) except BabelfishError: logger.error('Embedded subtitle track language %r is not a valid language', st.language) embedded_subtitle_languages.add(Language('und')) elif st.name: try: embedded_subtitle_languages.add(Language.fromname(st.name)) except BabelfishError: logger.debug('Embedded subtitle track name %r is not a valid language', st.name) embedded_subtitle_languages.add(Language('und')) else: embedded_subtitle_languages.add(Language('und')) logger.debug('Found embedded subtitle %r', embedded_subtitle_languages) video.subtitle_languages |= embedded_subtitle_languages else: logger.debug('MKV has no subtitle track') else: logger.debug('Unsupported video extension %s', extension)
python
{ "resource": "" }
q30430
get_series_episode
train
def get_series_episode(series_id, season, episode): """Get an episode of a series. :param int series_id: id of the series. :param int season: season number of the episode. :param int episode: episode number of the episode. :return: the episode data. :rtype: dict """ result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode) if result: return tvdb_client.get_episode(result['data'][0]['id'])
python
{ "resource": "" }
q30431
TVDBClient.get_series_episodes
train
def get_series_episodes(self, id, page=1): """Get series episodes""" # perform the request params = {'page': page} r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()
python
{ "resource": "" }
q30432
TVDBClient.query_series_episodes
train
def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None, dvd_episode=None, imdb_id=None, page=1): """Query series episodes""" # perform the request params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode, 'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page} r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()
python
{ "resource": "" }
q30433
checked
train
def checked(response): """Check a response status before returning it. :param response: a response from a XMLRPC call to OpenSubtitles. :return: the response. :raise: :class:`OpenSubtitlesError` """ status_code = int(response['status'][:3]) if status_code == 401: raise Unauthorized if status_code == 406: raise NoSession if status_code == 407: raise DownloadLimitReached if status_code == 413: raise InvalidImdbid if status_code == 414: raise UnknownUserAgent if status_code == 415: raise DisabledUserAgent if status_code == 503: raise ServiceUnavailable if status_code != 200: raise OpenSubtitlesError(response['status']) return response
python
{ "resource": "" }
q30434
retry_until_ok
train
def retry_until_ok(func, *args, **kwargs): """Retry code block until it succeeds. If it does not succeed in 120 attempts, the function re-raises any error the function raised on its last attempt. """ max_tries = 120 for i in range(max_tries): try: return func(*args, **kwargs) except RuntimeError: # Do not mask RuntimeError. raise except errors.OperationFailure as exc: if exc.code == 13 or ( # MongoDB >= 2.6 sets the error code, exc.details and "unauthorized" == exc.details.get("errmsg") # MongoDB 2.4 does not. ): # Do not mask authorization failures. raise if i == max_tries - 1: LOG.exception( "Call to %s failed too many times in " "retry_until_ok", func ) raise except Exception: if i == max_tries - 1: LOG.exception( "Call to %s failed too many times in " "retry_until_ok", func ) raise time.sleep(1)
python
{ "resource": "" }
q30435
DocManagerBase.apply_update
train
def apply_update(self, doc, update_spec): """Apply an update operation to a document.""" # Helper to cast a key for a list or dict, or raise ValueError def _convert_or_raise(container, key): if isinstance(container, dict): return key elif isinstance(container, list): return int(key) else: raise ValueError # Helper to retrieve (and/or create) # a dot-separated path within a document. def _retrieve_path(container, path, create=False): looking_at = container for part in path: if isinstance(looking_at, dict): if create and part not in looking_at: looking_at[part] = {} looking_at = looking_at[part] elif isinstance(looking_at, list): index = int(part) # Do we need to create additional space in the array? if create and len(looking_at) <= index: # Fill buckets with None up to the index we need. looking_at.extend([None] * (index - len(looking_at))) # Bucket we need gets the empty dictionary. looking_at.append({}) looking_at = looking_at[index] else: raise ValueError return looking_at def _set_field(doc, to_set, value): if "." in to_set: path = to_set.split(".") where = _retrieve_path(doc, path[:-1], create=True) index = _convert_or_raise(where, path[-1]) wl = len(where) if isinstance(where, list) and index >= wl: where.extend([None] * (index + 1 - wl)) where[index] = value else: doc[to_set] = value def _unset_field(doc, to_unset): try: if "." in to_unset: path = to_unset.split(".") where = _retrieve_path(doc, path[:-1]) index_or_key = _convert_or_raise(where, path[-1]) if isinstance(where, list): # Unset an array element sets it to null. where[index_or_key] = None else: # Unset field removes it entirely. del where[index_or_key] else: del doc[to_unset] except (KeyError, IndexError, ValueError): source_version = get_mininum_mongodb_version() if source_version is None or source_version.at_least(2, 6): raise # Ignore unset errors since MongoDB 2.4 records invalid # $unsets in the oplog. LOG.warning( "Could not unset field %r from document %r. " "This may be normal when replicating from " "MongoDB 2.4 or the destination could be out of " "sync." % (to_unset, doc) ) # wholesale document replacement if "$set" not in update_spec and "$unset" not in update_spec: # update spec contains the new document in its entirety return update_spec else: try: # $set for to_set in update_spec.get("$set", []): value = update_spec["$set"][to_set] _set_field(doc, to_set, value) # $unset for to_unset in update_spec.get("$unset", []): _unset_field(doc, to_unset) except (KeyError, ValueError, AttributeError, IndexError): exc_t, exc_v, exc_tb = sys.exc_info() msg = "Cannot apply update %r to %r" % (update_spec, doc) raise UpdateDoesNotApply(msg).with_traceback(exc_tb) return doc
python
{ "resource": "" }
q30436
DocManagerBase.bulk_upsert
train
def bulk_upsert(self, docs, namespace, timestamp): """Upsert each document in a set of documents. This method may be overridden to upsert many documents at once. """ for doc in docs: self.upsert(doc, namespace, timestamp)
python
{ "resource": "" }
q30437
log_startup_info
train
def log_startup_info(): """Log info about the current environment.""" LOG.always("Starting mongo-connector version: %s", __version__) if "dev" in __version__: LOG.warning( "This is a development version (%s) of mongo-connector", __version__ ) LOG.always("Python version: %s", sys.version) LOG.always("Platform: %s", platform.platform()) if hasattr(pymongo, "__version__"): pymongo_version = pymongo.__version__ else: pymongo_version = pymongo.version LOG.always("pymongo version: %s", pymongo_version) if not pymongo.has_c(): LOG.warning( "pymongo version %s was installed without the C extensions. " '"InvalidBSON: Date value out of range" errors may occur if ' "there are documents with BSON Datetimes that represent times " "outside of Python's datetime limit.", pymongo.__version__, )
python
{ "resource": "" }
q30438
Connector.from_config
train
def from_config(cls, config): """Create a new Connector instance from a Config object.""" auth_key = None password_file = config["authentication.passwordFile"] if password_file is not None: try: auth_key = open(config["authentication.passwordFile"]).read() auth_key = re.sub(r"\s", "", auth_key) except IOError: LOG.error("Could not load password file!") sys.exit(1) password = config["authentication.password"] if password is not None: auth_key = password connector = Connector( mongo_address=config["mainAddress"], doc_managers=config["docManagers"], oplog_checkpoint=os.path.abspath(config["oplogFile"]), collection_dump=config["onlyDump"] or not config["noDump"], only_dump=config["onlyDump"], batch_size=config["batchSize"], continue_on_error=config["continueOnError"], auth_username=config["authentication.adminUsername"], auth_key=auth_key, fields=config["fields"], exclude_fields=config["exclude_fields"], ns_set=config["namespaces.include"], ex_ns_set=config["namespaces.exclude"], dest_mapping=config["namespaces.mapping"], namespace_options=config["namespaces.namespace_options"], gridfs_set=config["namespaces.gridfs"], ssl_certfile=config["ssl.sslCertfile"], ssl_keyfile=config["ssl.sslKeyfile"], ssl_ca_certs=config["ssl.sslCACerts"], ssl_cert_reqs=config["ssl.sslCertificatePolicy"], tz_aware=config["timezoneAware"], ) return connector
python
{ "resource": "" }
q30439
Connector.join
train
def join(self): """ Joins thread, stops it from running """ self.can_run = False super(Connector, self).join() for dm in self.doc_managers: dm.stop()
python
{ "resource": "" }
q30440
Connector.write_oplog_progress
train
def write_oplog_progress(self): """ Writes oplog progress to file provided by user """ if self.oplog_checkpoint is None: return None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() items = [[name, util.bson_ts_to_long(oplog_dict[name])] for name in oplog_dict] if not items: return # write to temp file backup_file = self.oplog_checkpoint + ".backup" os.rename(self.oplog_checkpoint, backup_file) # for each of the threads write to file with open(self.oplog_checkpoint, "w") as dest: if len(items) == 1: # Write 1-dimensional array, as in previous versions. json_str = json.dumps(items[0]) else: # Write a 2d array to support sharded clusters. json_str = json.dumps(items) try: dest.write(json_str) except IOError: # Basically wipe the file, copy from backup dest.truncate() with open(backup_file, "r") as backup: shutil.copyfile(backup, dest) os.remove(backup_file)
python
{ "resource": "" }
q30441
Connector.read_oplog_progress
train
def read_oplog_progress(self): """Reads oplog progress from file provided by user. This method is only called once before any threads are spanwed. """ if self.oplog_checkpoint is None: return None # Check for empty file try: if os.stat(self.oplog_checkpoint).st_size == 0: LOG.info("MongoConnector: Empty oplog progress file.") return None except OSError: return None with open(self.oplog_checkpoint, "r") as progress_file: try: data = json.load(progress_file) except ValueError: LOG.exception( 'Cannot read oplog progress file "%s". ' "It may be corrupt after Mongo Connector was shut down" "uncleanly. You can try to recover from a backup file " '(may be called "%s.backup") or create a new progress file ' "starting at the current moment in time by running " "mongo-connector --no-dump <other options>. " "You may also be trying to read an oplog progress file " "created with the old format for sharded clusters. " "See https://github.com/10gen-labs/mongo-connector/wiki" "/Oplog-Progress-File for complete documentation." % (self.oplog_checkpoint, self.oplog_checkpoint) ) return # data format: # [name, timestamp] = replica set # [[name, timestamp], [name, timestamp], ...] = sharded cluster if not isinstance(data[0], list): data = [data] with self.oplog_progress: self.oplog_progress.dict = dict( (name, util.long_to_bson_ts(timestamp)) for name, timestamp in data )
python
{ "resource": "" }
q30442
Connector.copy_uri_options
train
def copy_uri_options(hosts, mongodb_uri): """Returns a MongoDB URI to hosts with the options from mongodb_uri. """ if "?" in mongodb_uri: options = mongodb_uri.split("?", 1)[1] else: options = None uri = "mongodb://" + hosts if options: uri += "/?" + options return uri
python
{ "resource": "" }
q30443
Connector.oplog_thread_join
train
def oplog_thread_join(self): """Stops all the OplogThreads """ LOG.info("MongoConnector: Stopping all OplogThreads") for thread in self.shard_set.values(): thread.join()
python
{ "resource": "" }
q30444
_character_matches
train
def _character_matches(name1, name2): """Yield the number of characters that match the beginning of each string. """ if name1[0] == "*": for i in range(len(name2) + 1): yield 1, i if name2[0] == "*": for i in range(len(name1) + 1): yield i, 1 if name1[0] == name2[0]: yield 1, 1
python
{ "resource": "" }
q30445
wildcards_overlap
train
def wildcards_overlap(name1, name2): """Return true if two wildcard patterns can match the same string.""" if not name1 and not name2: return True if not name1 or not name2: return False for matched1, matched2 in _character_matches(name1, name2): if wildcards_overlap(name1[matched1:], name2[matched2:]): return True return False
python
{ "resource": "" }
q30446
_validate_namespaces
train
def _validate_namespaces(namespaces): """Validate wildcards and renaming in namespaces. Target namespaces should have the same number of wildcards as the source. No target namespaces overlap exactly with each other. Logs a warning when wildcard namespaces have a chance of overlapping. """ for source, namespace in namespaces.items(): target = namespace.dest_name _validate_namespace(source) _validate_namespace(target) if source.count("*") > 1 or target.count("*") > 1: raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' cannot contain more " "than one '*' character." % (source, target) ) if source.count("*") != target.count("*"): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' must contain the " "same number of '*' characters." % (source, target) ) if "*" not in source: continue # Make sure that wildcards are not moved from database name to # collection name or vice versa, eg "db*.foo" => "db.foo_*" if ( wildcard_in_db(source) and not wildcard_in_db(target) or (not wildcard_in_db(source) and wildcard_in_db(target)) ): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' is invalid. A '*' " "that appears in the source database name must also appear" "in the target database name. A '*' that appears in the " "source collection name must also appear in the target " "collection name" % (source, target) ) for source1, source2 in combinations(namespaces.keys(), 2): if wildcards_overlap(source1, source2): LOG.warning( 'Namespaces "%s" and "%s" may match the ' "same source namespace.", source1, source2, ) target1 = namespaces[source1].dest_name target2 = namespaces[source2].dest_name if target1 == target2: raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but '%s' already " "corresponds to '%s' in the target system." % (source2, target2, source1, target1) ) if wildcards_overlap(target1, target2): LOG.warning( "Multiple namespaces cannot be combined into one target " "namespace. Mapping from '%s' to '%s' might overlap " "with mapping from '%s' to '%s'." % (source2, target2, source1, target1) )
python
{ "resource": "" }
q30447
_merge_namespace_options
train
def _merge_namespace_options( namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): """Merges namespaces options together. The first is the set of excluded namespaces and the second is a mapping from source namespace to Namespace instances. """ namespace_set = set(namespace_set or []) ex_namespace_set = set(ex_namespace_set or []) gridfs_set = set(gridfs_set or []) dest_mapping = dest_mapping or {} namespace_options = namespace_options or {} include_fields = set(include_fields or []) exclude_fields = set(exclude_fields or []) namespaces = {} for source_name, options_or_str in namespace_options.items(): if isinstance(options_or_str, dict): namespace_set.add(source_name) if options_or_str.get("gridfs"): gridfs_set.add(source_name) namespaces[source_name] = Namespace( dest_name=options_or_str.get("rename"), include_fields=options_or_str.get("includeFields"), exclude_fields=options_or_str.get("excludeFields"), gridfs=options_or_str.get("gridfs", False), ) elif isinstance(options_or_str, str): namespace_set.add(source_name) namespaces[source_name] = Namespace(dest_name=options_or_str) elif options_or_str: namespace_set.add(source_name) else: ex_namespace_set.add(source_name) # Add namespaces that are renamed but not in namespace_options for source_name, target_name in dest_mapping.items(): namespaces[source_name] = namespaces.get(source_name, Namespace()).with_options( dest_name=target_name ) # Add namespaces that are included but not in namespace_options for included_name in namespace_set: if included_name not in namespaces: namespaces[included_name] = Namespace() # Add namespaces that are excluded but not in namespace_options for gridfs_name in gridfs_set: namespaces[gridfs_name] = namespaces.get(gridfs_name, Namespace()).with_options( gridfs=True ) # Add source, destination name, and globally included and excluded fields for included_name in namespaces: namespace = namespaces[included_name] namespace = namespace.with_options( source_name=included_name, include_fields=validate_include_fields( include_fields, namespace.include_fields ), exclude_fields=validate_exclude_fields( exclude_fields, namespace.exclude_fields ), ) # The default destination name is the same as the source. if not namespace.dest_name: namespace = namespace.with_options(dest_name=included_name) namespaces[included_name] = namespace return ex_namespace_set, namespaces
python
{ "resource": "" }
q30448
match_replace_regex
train
def match_replace_regex(regex, src_namespace, dest_namespace): """Return the new mapped namespace if the src_namespace matches the regex.""" match = regex.match(src_namespace) if match: return dest_namespace.replace("*", match.group(1)) return None
python
{ "resource": "" }
q30449
namespace_to_regex
train
def namespace_to_regex(namespace): """Create a RegexObject from a wildcard namespace.""" db_name, coll_name = namespace.split(".", 1) # A database name cannot contain a '.' character db_regex = re.escape(db_name).replace(r"\*", "([^.]*)") # But a collection name can. coll_regex = re.escape(coll_name).replace(r"\*", "(.*)") return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z")
python
{ "resource": "" }
q30450
NamespaceConfig._register_namespace_and_command
train
def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" self._add_namespace(namespace) # Add the namespace for commands on this database cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd" dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd" self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name))
python
{ "resource": "" }
q30451
NamespaceConfig._add_namespace
train
def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" src_name = namespace.source_name if "*" in src_name: self._regex_map.append((namespace_to_regex(src_name), namespace)) else: self._add_plain_namespace(namespace)
python
{ "resource": "" }
q30452
NamespaceConfig._add_plain_namespace
train
def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" src_name = namespace.source_name target_name = namespace.dest_name src_names = self._reverse_plain.setdefault(target_name, set()) src_names.add(src_name) if len(src_names) > 1: # Another source namespace is already mapped to this target existing_src = (src_names - set([src_name])).pop() raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but there already " "exists a mapping from '%s' to '%s'" % (src_name, target_name, existing_src, target_name) ) self._plain[src_name] = namespace src_db, _ = src_name.split(".", 1) target_db, _ = target_name.split(".", 1) self._plain_db.setdefault(src_db, set()).add(target_db)
python
{ "resource": "" }
q30453
NamespaceConfig.lookup
train
def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ # Ignore the namespace if it is excluded. if plain_src_ns in self._ex_namespace_set: return None # Include all namespaces if there are no included namespaces. if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) # First, search for the namespace in the plain namespaces. try: return self._plain[plain_src_ns] except KeyError: # Search for the namespace in the wildcard namespaces. for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue # Save the new target Namespace in the plain namespaces so # future lookups are fast. new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace # Save the not included namespace to the excluded namespaces so # that future lookups of the same namespace are fast. self._ex_namespace_set.add(plain_src_ns) return None
python
{ "resource": "" }
q30454
NamespaceConfig.map_namespace
train
def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ namespace = self.lookup(plain_src_ns) if namespace: return namespace.dest_name return None
python
{ "resource": "" }
q30455
NamespaceConfig.gridfs_namespace
train
def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ namespace = self.lookup(plain_src_ns) if namespace and namespace.gridfs: return namespace.dest_name return None
python
{ "resource": "" }
q30456
NamespaceConfig.unmap_namespace
train
def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None
python
{ "resource": "" }
q30457
NamespaceConfig.map_db
train
def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ if not self._regex_map and not self._plain: return [plain_src_db] # Lookup this namespace to seed the plain_db dictionary self.lookup(plain_src_db + ".$cmd") return list(self._plain_db.get(plain_src_db, set()))
python
{ "resource": "" }
q30458
NamespaceConfig.projection
train
def projection(self, plain_src_name): """Return the projection for the given source namespace.""" mapped = self.lookup(plain_src_name) if not mapped: return None fields = mapped.include_fields or mapped.exclude_fields if fields: include = 1 if mapped.include_fields else 0 return dict((field, include) for field in fields) return None
python
{ "resource": "" }
q30459
NamespaceConfig.get_included_databases
train
def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ databases = set() databases.update(self._plain_db.keys()) for _, namespace in self._regex_map: database_name, _ = namespace.source_name.split(".", 1) if "*" in database_name: return [] databases.add(database_name) return list(databases)
python
{ "resource": "" }
q30460
DocManager._meta_collections
train
def _meta_collections(self): """Provides the meta collections currently being used """ if self.use_single_meta_collection: yield self.meta_collection_name else: for name in self.meta_database.collection_names( include_system_collections=False ): yield name
python
{ "resource": "" }
q30461
DocManager.upsert
train
def upsert(self, doc, namespace, timestamp): """Update or insert a document into Mongo """ database, coll = self._db_and_collection(namespace) meta_collection_name = self._get_meta_collection(namespace) self.meta_database[meta_collection_name].replace_one( {self.id_field: doc["_id"], "ns": namespace}, {self.id_field: doc["_id"], "_ts": timestamp, "ns": namespace}, upsert=True, ) self.mongo[database][coll].replace_one({"_id": doc["_id"]}, doc, upsert=True)
python
{ "resource": "" }
q30462
DocManager.remove
train
def remove(self, document_id, namespace, timestamp): """Removes document from Mongo The input is a python dictionary that represents a mongo document. The documents has ns and _ts fields. """ database, coll = self._db_and_collection(namespace) meta_collection = self._get_meta_collection(namespace) doc2 = self.meta_database[meta_collection].find_one_and_delete( {self.id_field: document_id} ) if doc2 and doc2.get("gridfs_id"): GridFS(self.mongo[database], coll).delete(doc2["gridfs_id"]) else: self.mongo[database][coll].delete_one({"_id": document_id})
python
{ "resource": "" }
q30463
DocManager.search
train
def search(self, start_ts, end_ts): """Called to query Mongo for documents in a time range. """ for meta_collection_name in self._meta_collections(): meta_coll = self.meta_database[meta_collection_name] for ts_ns_doc in meta_coll.find( {"_ts": {"$lte": end_ts, "$gte": start_ts}} ): yield ts_ns_doc
python
{ "resource": "" }
q30464
DocManager.get_last_doc
train
def get_last_doc(self): """Returns the last document stored in Mongo. """ def docs_by_ts(): for meta_collection_name in self._meta_collections(): meta_coll = self.meta_database[meta_collection_name] for ts_ns_doc in meta_coll.find(limit=-1).sort("_ts", -1): yield ts_ns_doc return max(docs_by_ts(), key=lambda x: x["_ts"])
python
{ "resource": "" }
q30465
DocManager.upsert
train
def upsert(self, doc, namespace, timestamp): """Adds a document to the doc dict. """ # Allow exceptions to be triggered (for testing purposes) if doc.get("_upsert_exception"): raise Exception("upsert exception") doc_id = doc["_id"] self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp)
python
{ "resource": "" }
q30466
DocManager.insert_file
train
def insert_file(self, f, namespace, timestamp): """Inserts a file to the doc dict. """ doc = f.get_metadata() doc["content"] = f.read() self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
python
{ "resource": "" }
q30467
DocManager.remove
train
def remove(self, document_id, namespace, timestamp): """Removes the document from the doc dict. """ try: entry = self.doc_dict[document_id] entry.doc = None entry.update(namespace, timestamp) except KeyError: raise OperationFailed("Document does not exist: %s" % document_id)
python
{ "resource": "" }
q30468
DocManager.search
train
def search(self, start_ts, end_ts): """Searches through all documents and finds all documents that were modified or deleted within the range. Since we have very few documents in the doc dict when this is called, linear search is fine. This method is only used by rollbacks to query all the documents in the target engine within a certain timestamp window. The input will be two longs (converted from Bson timestamp) which specify the time range. The start_ts refers to the timestamp of the last oplog entry after a rollback. The end_ts is the timestamp of the last document committed to the backend. """ for _id in self.doc_dict: entry = self.doc_dict[_id] if entry.ts <= end_ts or entry.ts >= start_ts: yield entry.meta_dict
python
{ "resource": "" }
q30469
DocManager.get_last_doc
train
def get_last_doc(self): """Searches through the doc dict to find the document that was modified or deleted most recently.""" return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict
python
{ "resource": "" }
q30470
DocManager._search
train
def _search(self): """Returns all documents in the doc dict. This function is not a part of the DocManager API, and is only used to simulate searching all documents from a backend. """ results = [] for _id in self.doc_dict: entry = self.doc_dict[_id] if entry.doc is not None: results.append(entry.merged_dict) return results
python
{ "resource": "" }
q30471
OplogThread._should_skip_entry
train
def _should_skip_entry(self, entry): """Determine if this oplog entry should be skipped. This has the possible side effect of modifying the entry's namespace and filtering fields from updates and inserts. """ # Don't replicate entries resulting from chunk moves if entry.get("fromMigrate"): return True, False # Ignore no-ops if entry["op"] == "n": return True, False ns = entry["ns"] if "." not in ns: return True, False coll = ns.split(".", 1)[1] # Ignore system collections if coll.startswith("system."): return True, False # Ignore GridFS chunks if coll.endswith(".chunks"): return True, False is_gridfs_file = False if coll.endswith(".files"): ns = ns[: -len(".files")] if self.namespace_config.gridfs_namespace(ns): is_gridfs_file = True else: return True, False # Commands should not be ignored, filtered, or renamed. Renaming is # handled by the DocManagers via the CommandHelper class. if coll == "$cmd": return False, False # Rename or filter out namespaces that are ignored keeping # included gridfs namespaces. namespace = self.namespace_config.lookup(ns) if namespace is None: LOG.debug( "OplogThread: Skipping oplog entry: " "'%s' is not in the namespace configuration." % (ns,) ) return True, False # Update the namespace. entry["ns"] = namespace.dest_name # Take fields out of the oplog entry that shouldn't be replicated. # This may nullify the document if there's nothing to do. if not self.filter_oplog_entry( entry, include_fields=namespace.include_fields, exclude_fields=namespace.exclude_fields, ): return True, False return False, is_gridfs_file
python
{ "resource": "" }
q30472
OplogThread.join
train
def join(self): """Stop this thread from managing the oplog. """ LOG.debug("OplogThread: exiting due to join call.") self.running = False threading.Thread.join(self)
python
{ "resource": "" }
q30473
OplogThread._find_field
train
def _find_field(cls, field, doc): """Find the field in the document which matches the given field. The field may be in dot notation, eg "a.b.c". Returns a list with a single tuple (path, field_value) or the empty list if the field is not present. """ path = field.split(".") try: for key in path: doc = doc[key] return [(path, doc)] except (KeyError, TypeError): return []
python
{ "resource": "" }
q30474
OplogThread._find_update_fields
train
def _find_update_fields(cls, field, doc): """Find the fields in the update document which match the given field. Both the field and the top level keys in the doc may be in dot notation, eg "a.b.c". Returns a list of tuples (path, field_value) or the empty list if the field is not present. """ def find_partial_matches(): for key in doc: if len(key) > len(field): # Handle case where field is a prefix of key, eg field is # 'a' and key is 'a.b'. if key.startswith(field) and key[len(field)] == ".": yield [key], doc[key] # Continue searching, there may be multiple matches. # For example, field 'a' should match 'a.b' and 'a.c'. elif len(key) < len(field): # Handle case where key is a prefix of field, eg field is # 'a.b' and key is 'a'. if field.startswith(key) and field[len(key)] == ".": # Search for the remaining part of the field matched = cls._find_field(field[len(key) + 1 :], doc[key]) if matched: # Add the top level key to the path. match = matched[0] match[0].insert(0, key) yield match # Stop searching, it's not possible for any other # keys in the update doc to match this field. return try: return [([field], doc[field])] except KeyError: # Field does not exactly match any key in the update doc. return list(find_partial_matches())
python
{ "resource": "" }
q30475
OplogThread.filter_oplog_entry
train
def filter_oplog_entry(self, entry, include_fields=None, exclude_fields=None): """Remove fields from an oplog entry that should not be replicated. NOTE: this does not support array indexing, for example 'a.b.2'""" if not include_fields and not exclude_fields: return entry elif include_fields: filter_fields = self._copy_included_fields else: filter_fields = self._pop_excluded_fields fields = include_fields or exclude_fields entry_o = entry["o"] # Version 3.6 of mongodb includes a $v, # see https://jira.mongodb.org/browse/SERVER-32240 if "$v" in entry_o: entry_o.pop("$v") # 'i' indicates an insert. 'o' field is the doc to be inserted. if entry["op"] == "i": entry["o"] = filter_fields(entry_o, fields) # 'u' indicates an update. The 'o' field describes an update spec # if '$set' or '$unset' are present. elif entry["op"] == "u" and ("$set" in entry_o or "$unset" in entry_o): if "$set" in entry_o: entry["o"]["$set"] = filter_fields(entry_o["$set"], fields, update=True) if "$unset" in entry_o: entry["o"]["$unset"] = filter_fields( entry_o["$unset"], fields, update=True ) # not allowed to have empty $set/$unset, so remove if empty if "$set" in entry_o and not entry_o["$set"]: entry_o.pop("$set") if "$unset" in entry_o and not entry_o["$unset"]: entry_o.pop("$unset") if not entry_o: return None # 'u' indicates an update. The 'o' field is the replacement document # if no '$set' or '$unset' are present. elif entry["op"] == "u": entry["o"] = filter_fields(entry_o, fields) return entry
python
{ "resource": "" }
q30476
OplogThread.get_oplog_cursor
train
def get_oplog_cursor(self, timestamp=None): """Get a cursor to the oplog after the given timestamp, excluding no-op entries. If no timestamp is specified, returns a cursor to the entire oplog. """ query = {"op": {"$ne": "n"}} if timestamp is None: cursor = self.oplog.find(query, cursor_type=CursorType.TAILABLE_AWAIT) else: query["ts"] = {"$gte": timestamp} cursor = self.oplog.find( query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True ) return cursor
python
{ "resource": "" }
q30477
OplogThread.get_collection
train
def get_collection(self, namespace): """Get a pymongo collection from a namespace.""" database, coll = namespace.split(".", 1) return self.primary_client[database][coll]
python
{ "resource": "" }
q30478
OplogThread._get_oplog_timestamp
train
def _get_oplog_timestamp(self, newest_entry): """Return the timestamp of the latest or earliest entry in the oplog. """ sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING curr = ( self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1) ) try: ts = next(curr)["ts"] except StopIteration: LOG.debug("OplogThread: oplog is empty.") return None LOG.debug( "OplogThread: %s oplog entry has timestamp %s." % ("Newest" if newest_entry else "Oldest", ts) ) return ts
python
{ "resource": "" }
q30479
OplogThread.init_cursor
train
def init_cursor(self): """Position the cursor appropriately. The cursor is set to either the beginning of the oplog, or wherever it was last left off. Returns the cursor and True if the cursor is empty. """ timestamp = self.read_last_checkpoint() if timestamp is None or self.only_dump: if self.collection_dump: # dump collection and update checkpoint timestamp = self.dump_collection() if self.only_dump: LOG.info("Finished dump. Exiting.") timestamp = None self.running = False self.update_checkpoint(timestamp) if timestamp is None: return None, True else: # Collection dump disabled: # Return cursor to beginning of oplog but do not set the # checkpoint. The checkpoint will be set after an operation # has been applied. cursor = self.get_oplog_cursor() return cursor, self._cursor_empty(cursor) cursor = self.get_oplog_cursor(timestamp) cursor_empty = self._cursor_empty(cursor) if cursor_empty: # rollback, update checkpoint, and retry LOG.debug("OplogThread: Initiating rollback from " "get_oplog_cursor") self.update_checkpoint(self.rollback()) return self.init_cursor() first_oplog_entry = next(cursor) oldest_ts_long = util.bson_ts_to_long(self.get_oldest_oplog_timestamp()) checkpoint_ts_long = util.bson_ts_to_long(timestamp) if checkpoint_ts_long < oldest_ts_long: # We've fallen behind, the checkpoint has fallen off the oplog return None, True cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"]) if cursor_ts_long > checkpoint_ts_long: # The checkpoint is not present in this oplog and the oplog # did not rollover. This means that we connected to a new # primary which did not replicate the checkpoint and which has # new changes in its oplog for us to process. # rollback, update checkpoint, and retry LOG.debug( "OplogThread: Initiating rollback from " "get_oplog_cursor: new oplog entries found but " "checkpoint is not present" ) self.update_checkpoint(self.rollback()) return self.init_cursor() # first entry has been consumed return cursor, cursor_empty
python
{ "resource": "" }
q30480
OplogThread.update_checkpoint
train
def update_checkpoint(self, checkpoint): """Store the current checkpoint in the oplog progress dictionary. """ if checkpoint is not None and checkpoint != self.checkpoint: self.checkpoint = checkpoint with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() # If we have the repr of our oplog collection # in the dictionary, remove it and replace it # with our replica set name. # This allows an easy upgrade path from mongo-connector 2.3. # For an explanation of the format change, see the comment in # read_last_checkpoint. oplog_dict.pop(str(self.oplog), None) oplog_dict[self.replset_name] = checkpoint LOG.debug("OplogThread: oplog checkpoint updated to %s", checkpoint) else: LOG.debug("OplogThread: no checkpoint to update.")
python
{ "resource": "" }
q30481
OplogThread.read_last_checkpoint
train
def read_last_checkpoint(self): """Read the last checkpoint from the oplog progress dictionary. """ # In versions of mongo-connector 2.3 and before, # we used the repr of the # oplog collection as keys in the oplog_progress dictionary. # In versions thereafter, we use the replica set name. For backwards # compatibility, we check for both. oplog_str = str(self.oplog) ret_val = None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() try: # New format. ret_val = oplog_dict[self.replset_name] except KeyError: try: # Old format. ret_val = oplog_dict[oplog_str] except KeyError: pass LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val)) self.checkpoint = ret_val return ret_val
python
{ "resource": "" }
q30482
ClusteredWeningerFeatures.fit
train
def fit(self, blocks, y=None): """ Fit a k-means clustering model using an ordered sequence of blocks. """ self.kmeans.fit(make_weninger_features(blocks)) # set the cluster center closest to the origin to exactly (0.0, 0.0) self.kmeans.cluster_centers_.sort(axis=0) self.kmeans.cluster_centers_[0, :] = np.zeros(2) return self
python
{ "resource": "" }
q30483
evaluate_model_predictions
train
def evaluate_model_predictions(y_true, y_pred, weights=None): """ Evaluate the performance of an extractor model's binary classification predictions, typically at the block level, of whether a block is content or not. Args: y_true (``np.ndarray``) y_pred (``np.ndarray``) weights (``np.ndarray``) Returns: Dict[str, float] """ if isinstance(y_pred[0], np.ndarray): y_pred = np.concatenate(y_pred) if isinstance(y_true[0], np.ndarray): y_true = np.concatenate(y_true) if (weights is not None) and (isinstance(weights[0], np.ndarray)): weights = np.concatenate(weights) accuracy = accuracy_score( y_true, y_pred, normalize=True, sample_weight=weights) precision = precision_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) recall = recall_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) f1 = f1_score( y_true, y_pred, average='binary', pos_label=1, sample_weight=weights) return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
python
{ "resource": "" }
q30484
evaluate_extracted_tokens
train
def evaluate_extracted_tokens(gold_content, extr_content): """ Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float] """ if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
python
{ "resource": "" }
q30485
extract_gold_standard_blocks
train
def extract_gold_standard_blocks(data_dir, fileroot, encoding=None, tokenizer=simple_tokenizer, cetr=False): """ Extract the gold standard block-level content and comments for a single observation identified by ``fileroot``, and write the results to file. Args: data_dir (str): The root directory containing sub-directories for raw HTML, gold standard extracted content, and gold standard blocks. fileroot (str): Unique identifier for a single observation of training data, corresponding to the start of its raw html and gold standard filenames under ``data_dir``. encoding (str) tokenizer (Callable): Object that takes a string and returns the tokens as a list of strings. cetr (bool): If True, parse the gold standard in clean eval format. Notes: Results are written to a text file in the block-level gold standard dir :obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line corresponds to a single block in its order of appearance, and has the following format:: content_frac comments_frac all_tokens content_tokens comments_tokens where each item is separated by a tab. ``content_frac`` is equal to the fraction of ``all_tokens`` found in the corresponding gold parse content text; ``comments_frac`` is the same but for comments text. """ # read the raw html, split it into blocks, and tokenize each block raw_html = read_html_file(data_dir, fileroot, encoding=encoding) # text is unicode from dragnet.blocks import BlockifyError try: blocks = [b.text for b in Blockifier.blockify(raw_html)] # text is bytes except BlockifyError as e: print('BlockifyError for file "{}"'.format(fileroot)) return blocks_tokens = [tokenizer(block) for block in blocks] num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens] # solve the longest common subsequence problem to determine which blocks were kept # need a list of all the tokens in the blocks, plus a correspondence of which # block they belong to. # we will determine which of the tokens is in the extracted content, # then use the correspondence to block id to determine which blocks were kept # get a flattened sequence of all tokens in all blocks # and their corresponding block ids all_blocks_tokens = [] all_blocks_tokens_block_id = [] for i, block_tokens in enumerate(blocks_tokens): all_blocks_tokens.extend(block_tokens) all_blocks_tokens_block_id.extend([i] * len(block_tokens)) # TODO: do we really need `num_all_blocks_tokens`? # it was used to determine if there were more gold standard tokens than *all* # tokens, and if so, some info was written to disk # but it seems like an odd check, and it's probably better to take the # gold standard data at face value -- presumably, somebody checked it! # num_all_blocks_tokens = len(all_blocks_tokens) def get_frac_and_str_tokens_in_gs(gs_txt): """ For each block, determine which and what fraction of tokens are also in the gold standard text ``gs_txt`` for either content or comments. Returns: List[float] List[str] """ gs_tokens = tokenizer(gs_txt) tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens) num_blocks_tokens_in_gs = [0 for _ in range(len(blocks))] blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))] for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id): if token_in_gs is True: num_blocks_tokens_in_gs[block_id] += 1 blocks_tokens_in_gs_tokens[block_id].append(token) blocks_tokens_strs_in_gs = [ ' '.join(block_tokens_in_gs_tokens) for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens] frac_blocks_tokens_in_gs = [ num_block_tokens_in_gs / num_block_tokens for num_block_tokens_in_gs, num_block_tokens in zip(num_blocks_tokens_in_gs, num_blocks_tokens)] return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs) gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr) frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content = \ get_frac_and_str_tokens_in_gs(gs_content) frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments = \ get_frac_and_str_tokens_in_gs(gs_comments) output_fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) line_fmt = u'{frac_content}\t{frac_comments}\t{block_tokens}\t{content_tokens}\t{comment_tokens}\n' with io.open(output_fname, mode='w') as f: for block_id, block_tokens in enumerate(blocks_tokens): line = line_fmt.format( frac_content=frac_blocks_tokens_in_gs_content[block_id], frac_comments=frac_blocks_tokens_in_gs_comments[block_id], block_tokens=' '.join(block_tokens), content_tokens=blocks_tokens_strs_in_gs_content[block_id], comment_tokens=blocks_tokens_strs_in_gs_comments[block_id]) f.write(line)
python
{ "resource": "" }
q30486
get_filenames
train
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): """ Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename """ if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and not os.path.splitext(filename)[-1] == extension: continue if match_regex and not match_regex.search(filename): continue if full_path is True: yield os.path.join(dirname, filename) else: yield filename
python
{ "resource": "" }
q30487
read_html_file
train
def read_html_file(data_dir, fileroot, encoding=None): """ Read the HTML file corresponding to identifier ``fileroot`` in the raw HTML directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) Returns: str """ fname = os.path.join( data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT) encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1') # 'utf-16' for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: raw_html = f.read() break except (UnicodeDecodeError, UnicodeError): raw_html = None return ftfy.fix_encoding(raw_html).strip()
python
{ "resource": "" }
q30488
read_gold_standard_file
train
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False): """ Read the gold standard content file corresponding to identifier ``fileroot`` in the gold standard directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) cetr (bool): if True, assume no comments and parse the gold standard to remove tags Returns: List[str, str]: contents string and comments string, respectively """ fname = os.path.join( data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT) encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1') for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: gold_standard = f.read() break except (UnicodeDecodeError, UnicodeError): gold_standard = None if not gold_standard: return [u'', u''] if not cetr: content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1) # if no comments delimiter found, append empty comments string if len(content_comments) == 1: content_comments = [content_comments[0], u''] else: tree = etree.fromstring(gold_standard, parser=etree.HTMLParser()) content_comments = [u' '.join(text_from_subtree(tree)), u''] # fix text in case of mangled encodings content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()] return content_comments
python
{ "resource": "" }
q30489
read_gold_standard_blocks_file
train
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True): """ Read the gold standard blocks file corresponding to identifier ``fileroot`` in the gold standard blocks directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) split_blocks (bool): If True, split the file's content into blocks. Returns: str or List[str] """ fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) with io.open(fname, mode='r') as f: data = f.read() if split_blocks: return filter(None, data[:-1].split('\n')) return filter(None, data)
python
{ "resource": "" }
q30490
prepare_data
train
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1): """ Prepare data for a single HTML + gold standard blocks example, uniquely identified by ``fileroot``. Args: data_dir (str) fileroot (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]: The first element is simply the raw html as a string. The second and third elements are 3-tuples for content and comments, respectively, where the first element is a numpy array of 1s and 0s whose values correspond to whether or not a given block is considered non-content or not; the second element is a numpy integer array whose values are the total number of tokens in each block; and the third element is a flat list of content or comment tokens as strings, concatenated from all blocks. See Also: :func:`prepare_all_data` """ if not 0.0 <= block_pct_tokens_thresh <= 1.0: raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]') html = read_html_file(data_dir, fileroot) blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True) content_blocks = [] comments_blocks = [] for block in blocks: block_split = block.split('\t') num_block_tokens = len(block_split[2].split()) # total number of tokens in block is used as weights content_blocks.append( (float(block_split[0]), num_block_tokens, block_split[3].split())) comments_blocks.append( (float(block_split[1]), num_block_tokens, block_split[4].split())) parsed_content_blocks = _parse_content_or_comments_blocks( content_blocks, block_pct_tokens_thresh) parsed_comments_blocks = _parse_content_or_comments_blocks( comments_blocks, block_pct_tokens_thresh) return (html, parsed_content_blocks, parsed_comments_blocks)
python
{ "resource": "" }
q30491
prepare_all_data
train
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1): """ Prepare data for all HTML + gold standard blocks examples in ``data_dir``. Args: data_dir (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]] See Also: :func:`prepare_data` """ gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME) gs_blocks_filenames = get_filenames( gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT)) gs_blocks_fileroots = ( re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames) return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]
python
{ "resource": "" }
q30492
str_cast
train
def str_cast(maybe_bytes, encoding='utf-8'): """ Converts any bytes-like input to a string-like output, with respect to python version Parameters ---------- maybe_bytes : if this is a bytes-like object, it will be converted to a string encoding : str, default='utf-8' encoding to be used when decoding bytes """ if isinstance(maybe_bytes, bytes_): return maybe_bytes.decode(encoding) else: return maybe_bytes
python
{ "resource": "" }
q30493
bytes_cast
train
def bytes_cast(maybe_str, encoding='utf-8'): """ Converts any string-like input to a bytes-like output, with respect to python version Parameters ---------- maybe_str : if this is a string-like object, it will be converted to bytes encoding : str, default='utf-8' encoding to be used when encoding string """ if isinstance(maybe_str, unicode_): return maybe_str.encode(encoding) else: return maybe_str
python
{ "resource": "" }
q30494
str_dict_cast
train
def str_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): """ Converts any bytes-like items in input dict to string-like values, with respect to python version Parameters ---------- dict_ : dict any bytes-like objects contained in the dict will be converted to a string include_keys : bool, default=True if True, cast keys to a string, else ignore include_values : bool, default=True if True, cast values to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes """ new_keys = str_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = str_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
python
{ "resource": "" }
q30495
bytes_dict_cast
train
def bytes_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs): """ Converts any string-like items in input dict to bytes-like values, with respect to python version Parameters ---------- dict_ : dict any string-like objects contained in the dict will be converted to bytes include_keys : bool, default=True if True, cast keys to bytes, else ignore include_values : bool, default=True if True, cast values to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ new_keys = bytes_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys() new_vals = bytes_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values() new_dict = dict(zip_(new_keys, new_vals)) return new_dict
python
{ "resource": "" }
q30496
str_block_cast
train
def str_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any bytes-like items in input Block object to string-like values, with respect to python version Parameters ---------- block : blocks.Block any bytes-like objects contained in the block object will be converted to a string include_text : bool, default=True if True, cast text to a string, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to a string, else ignore include_css : bool, default=True if True, cast css to a string, else ignore include_features : bool, default=True if True, cast features to a string, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when decoding bytes """ if include_text: block.text = str_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = str_list_cast(block.link_tokens, **kwargs) if include_css: block.css = str_dict_cast(block.css, **kwargs) if include_features: block.features = str_dict_cast(block.features, **kwargs) return block
python
{ "resource": "" }
q30497
bytes_block_cast
train
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
python
{ "resource": "" }
q30498
dameraulevenshtein
train
def dameraulevenshtein(seq1, seq2): """Calculate the Damerau-Levenshtein distance between sequences. This distance is the number of additions, deletions, substitutions, and transpositions needed to transform the first sequence into the second. Although generally used with strings, any sequences of comparable objects will work. Transpositions are exchanges of *consecutive* characters; all other operations are self-explanatory. This implementation is O(N*M) time and O(M) space, for N and M the lengths of the two sequences. >>> dameraulevenshtein('ba', 'abc') 2 >>> dameraulevenshtein('fee', 'deed') 2 It works with arbitrary sequences too: >>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e']) 2 """ # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix. # However, only the current and two previous rows are needed at once, # so we only store those. oneago = None thisrow = list(range_(1, len(seq2) + 1)) + [0] for x in range_(len(seq1)): # Python lists wrap around for negative indices, so put the # leftmost column at the *end* of the list. This matches with # the zero-indexed strings and saves extra calculation. twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1] for y in range_(len(seq2)): delcost = oneago[y] + 1 addcost = thisrow[y - 1] + 1 subcost = oneago[y - 1] + (seq1[x] != seq2[y]) thisrow[y] = min(delcost, addcost, subcost) # This block deals with transpositions if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]): thisrow[y] = min(thisrow[y], twoago[y - 2] + 1) return thisrow[len(seq2) - 1]
python
{ "resource": "" }
q30499
load_pickled_model
train
def load_pickled_model(filename, dirname=None): """ Load a pickled ``Extractor`` model from disk. Args: filename (str): Name of pickled model file under ``dirname``. dirname (str): Name of directory on disk containing the pickled model. If None, dragnet's default pickled model directory is used: /path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION] Returns: :class:`dragnet.extractor.Extractor` """ if dirname is None: pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet') pkg_dirname = os.path.dirname(pkg_filename) dirname = os.path.join(pkg_dirname, 'pickled_models', model_path) filepath = os.path.join(dirname, filename) return joblib.load(filepath)
python
{ "resource": "" }