_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q34400
BlockMatcher.load_settings
train
def load_settings(self, settings): """Load settings from file""" with open(settings) as settings_file: settings_dict = simplejson.load(settings_file) for key, value in settings_dict.items(): self.__setattr__(key, value)
python
{ "resource": "" }
q34401
BlockMatcher.save_settings
train
def save_settings(self, settings_file): """Save block matcher settings to a file object""" settings = {} for parameter in self.parameter_maxima: settings[parameter] = self.__getattribute__(parameter) with open(settings_file, "w") as settings_file: simplejson.dump(settings, settings_file)
python
{ "resource": "" }
q34402
StereoBM.search_range
train
def search_range(self, value): """Set private ``_search_range`` and reset ``_block_matcher``.""" if value == 0 or not value % 16: self._search_range = value else: raise InvalidSearchRangeError("Search range must be a multiple of " "16.") self._replace_bm()
python
{ "resource": "" }
q34403
StereoBM.window_size
train
def window_size(self, value): """Set private ``_window_size`` and reset ``_block_matcher``.""" if (value > 4 and value < self.parameter_maxima["window_size"] and value % 2): self._window_size = value else: raise InvalidWindowSizeError("Window size must be an odd number " "between 0 and {}.".format( self.parameter_maxima["window_size"] + 1)) self._replace_bm()
python
{ "resource": "" }
q34404
StereoBM.stereo_bm_preset
train
def stereo_bm_preset(self, value): """Set private ``_stereo_bm_preset`` and reset ``_block_matcher``.""" if value in (cv2.STEREO_BM_BASIC_PRESET, cv2.STEREO_BM_FISH_EYE_PRESET, cv2.STEREO_BM_NARROW_PRESET): self._bm_preset = value else: raise InvalidBMPresetError("Stereo BM preset must be defined as " "cv2.STEREO_BM_*_PRESET.") self._replace_bm()
python
{ "resource": "" }
q34405
StereoSGBM.numDisparities
train
def numDisparities(self, value): """Set private ``_num_disp`` and reset ``_block_matcher``.""" if value > 0 and value % 16 == 0: self._num_disp = value else: raise InvalidNumDisparitiesError("numDisparities must be a " "positive integer evenly " "divisible by 16.") self._replace_bm()
python
{ "resource": "" }
q34406
StereoSGBM.SADWindowSize
train
def SADWindowSize(self, value): """Set private ``_sad_window_size`` and reset ``_block_matcher``.""" if value >= 1 and value <= 11 and value % 2: self._sad_window_size = value else: raise InvalidSADWindowSizeError("SADWindowSize must be odd and " "between 1 and 11.") self._replace_bm()
python
{ "resource": "" }
q34407
StereoSGBM.uniquenessRatio
train
def uniquenessRatio(self, value): """Set private ``_uniqueness`` and reset ``_block_matcher``.""" if value >= 5 and value <= 15: self._uniqueness = value else: raise InvalidUniquenessRatioError("Uniqueness ratio must be " "between 5 and 15.") self._replace_bm()
python
{ "resource": "" }
q34408
StereoSGBM.speckleWindowSize
train
def speckleWindowSize(self, value): """Set private ``_speckle_window_size`` and reset ``_block_matcher``.""" if value >= 0 and value <= 200: self._speckle_window_size = value else: raise InvalidSpeckleWindowSizeError("Speckle window size must be 0 " "for disabled checks or " "between 50 and 200.") self._replace_bm()
python
{ "resource": "" }
q34409
StereoSGBM.speckleRange
train
def speckleRange(self, value): """Set private ``_speckle_range`` and reset ``_block_matcher``.""" if value >= 0: self._speckle_range = value else: raise InvalidSpeckleRangeError("Speckle range cannot be negative.") self._replace_bm()
python
{ "resource": "" }
q34410
StereoSGBM.P1
train
def P1(self, value): """Set private ``_P1`` and reset ``_block_matcher``.""" if value < self.P2: self._P1 = value else: raise InvalidFirstDisparityChangePenaltyError("P1 must be less " "than P2.") self._replace_bm()
python
{ "resource": "" }
q34411
StereoSGBM.P2
train
def P2(self, value): """Set private ``_P2`` and reset ``_block_matcher``.""" if value > self.P1: self._P2 = value else: raise InvalidSecondDisparityChangePenaltyError("P2 must be greater " "than P1.") self._replace_bm()
python
{ "resource": "" }
q34412
StereoCalibration._copy_calibration
train
def _copy_calibration(self, calibration): """Copy another ``StereoCalibration`` object's values.""" for key, item in calibration.__dict__.items(): self.__dict__[key] = item
python
{ "resource": "" }
q34413
StereoCalibrator._get_corners
train
def _get_corners(self, image): """Find subpixel chessboard corners in image.""" temp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(temp, (self.rows, self.columns)) if not ret: raise ChessboardNotFoundError("No chessboard could be found.") cv2.cornerSubPix(temp, corners, (11, 11), (-1, -1), (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 0.01)) return corners
python
{ "resource": "" }
q34414
StereoCalibrator._show_corners
train
def _show_corners(self, image, corners): """Show chessboard corners found in image.""" temp = image cv2.drawChessboardCorners(temp, (self.rows, self.columns), corners, True) window_name = "Chessboard" cv2.imshow(window_name, temp) if cv2.waitKey(0): cv2.destroyWindow(window_name)
python
{ "resource": "" }
q34415
StereoCalibrator.add_corners
train
def add_corners(self, image_pair, show_results=False): """ Record chessboard corners found in an image pair. The image pair should be an iterable composed of two CvMats ordered (left, right). """ side = "left" self.object_points.append(self.corner_coordinates) for image in image_pair: corners = self._get_corners(image) if show_results: self._show_corners(image, corners) self.image_points[side].append(corners.reshape(-1, 2)) side = "right" self.image_count += 1
python
{ "resource": "" }
q34416
StereoCalibrator.calibrate_cameras
train
def calibrate_cameras(self): """Calibrate cameras based on found chessboard corners.""" criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH) calib = StereoCalibration() (calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points["left"], self.image_points["right"], self.image_size, calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:] (calib.rect_trans["left"], calib.rect_trans["right"], calib.proj_mats["left"], calib.proj_mats["right"], calib.disp_to_depth_mat, calib.valid_boxes["left"], calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], self.image_size, calib.rot_mat, calib.trans_vec, flags=0) for side in ("left", "right"): (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap( calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # This is replaced because my results were always bad. Estimates are # taken from the OpenCV samples. width, height = self.image_size focal_length = 0.8 * width calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]) return calib
python
{ "resource": "" }
q34417
StereoCalibrator.check_calibration
train
def check_calibration(self, calibration): """ Check calibration quality by computing average reprojection error. First, undistort detected points and compute epilines for each side. Then compute the error between the computed epipolar lines and the position of the points detected on the other side for each point and return the average error. """ sides = "left", "right" which_image = {sides[0]: 1, sides[1]: 2} undistorted, lines = {}, {} for side in sides: undistorted[side] = cv2.undistortPoints( np.concatenate(self.image_points[side]).reshape(-1, 1, 2), calibration.cam_mats[side], calibration.dist_coefs[side], P=calibration.cam_mats[side]) lines[side] = cv2.computeCorrespondEpilines(undistorted[side], which_image[side], calibration.f_mat) total_error = 0 this_side, other_side = sides for side in sides: for i in range(len(undistorted[side])): total_error += abs(undistorted[this_side][i][0][0] * lines[other_side][i][0][0] + undistorted[this_side][i][0][1] * lines[other_side][i][0][1] + lines[other_side][i][0][2]) other_side, this_side = sides total_points = self.image_count * len(self.object_points) return total_error / total_points
python
{ "resource": "" }
q34418
find_files
train
def find_files(folder): """Discover stereo photos and return them as a pairwise sorted list.""" files = [i for i in os.listdir(folder) if i.startswith("left")] files.sort() for i in range(len(files)): insert_string = "right{}".format(files[i * 2][4:]) files.insert(i * 2 + 1, insert_string) files = [os.path.join(folder, filename) for filename in files] return files
python
{ "resource": "" }
q34419
calibrate_folder
train
def calibrate_folder(args): """ Calibrate camera based on chessboard images, write results to output folder. All images are read from disk. Chessboard points are found and used to calibrate the stereo pair. Finally, the calibration is written to the folder specified in ``args``. ``args`` needs to contain the following fields: input_files: List of paths to input files rows: Number of rows in chessboard columns: Number of columns in chessboard square_size: Size of chessboard squares in cm output_folder: Folder to write calibration to """ height, width = cv2.imread(args.input_files[0]).shape[:2] calibrator = StereoCalibrator(args.rows, args.columns, args.square_size, (width, height)) progress = ProgressBar(maxval=len(args.input_files), widgets=[Bar("=", "[", "]"), " ", Percentage()]) print("Reading input files...") progress.start() while args.input_files: left, right = args.input_files[:2] img_left, im_right = cv2.imread(left), cv2.imread(right) calibrator.add_corners((img_left, im_right), show_results=args.show_chessboards) args.input_files = args.input_files[2:] progress.update(progress.maxval - len(args.input_files)) progress.finish() print("Calibrating cameras. This can take a while.") calibration = calibrator.calibrate_cameras() avg_error = calibrator.check_calibration(calibration) print("The average error between chessboard points and their epipolar " "lines is \n" "{} pixels. This should be as small as possible.".format(avg_error)) calibration.export(args.output_folder)
python
{ "resource": "" }
q34420
BMTuner._set_value
train
def _set_value(self, parameter, new_value): """Try setting new parameter on ``block_matcher`` and update map.""" try: self.block_matcher.__setattr__(parameter, new_value) except BadBlockMatcherArgumentError: return self.update_disparity_map()
python
{ "resource": "" }
q34421
BMTuner._initialize_trackbars
train
def _initialize_trackbars(self): """ Initialize trackbars by discovering ``block_matcher``'s parameters. """ for parameter in self.block_matcher.parameter_maxima.keys(): maximum = self.block_matcher.parameter_maxima[parameter] if not maximum: maximum = self.shortest_dimension cv2.createTrackbar(parameter, self.window_name, self.block_matcher.__getattribute__(parameter), maximum, partial(self._set_value, parameter))
python
{ "resource": "" }
q34422
BMTuner._save_bm_state
train
def _save_bm_state(self): """Save current state of ``block_matcher``.""" for parameter in self.block_matcher.parameter_maxima.keys(): self.bm_settings[parameter].append( self.block_matcher.__getattribute__(parameter))
python
{ "resource": "" }
q34423
BMTuner.update_disparity_map
train
def update_disparity_map(self): """ Update disparity map in GUI. The disparity image is normalized to the range 0-255 and then divided by 255, because OpenCV multiplies it by 255 when displaying. This is because the pixels are stored as floating points. """ disparity = self.block_matcher.get_disparity(self.pair) norm_coeff = 255 / disparity.max() cv2.imshow(self.window_name, disparity * norm_coeff / 255) cv2.waitKey()
python
{ "resource": "" }
q34424
BMTuner.tune_pair
train
def tune_pair(self, pair): """Tune a pair of images.""" self._save_bm_state() self.pair = pair self.update_disparity_map()
python
{ "resource": "" }
q34425
BMTuner.report_settings
train
def report_settings(self, parameter): """ Report chosen settings for ``parameter`` in ``block_matcher``. ``bm_settings`` is updated to include the latest state before work is begun. This state is removed at the end so that the method has no side effects. All settings are reported except for the first one on record, which is ``block_matcher``'s default setting. """ self._save_bm_state() report = [] settings_list = self.bm_settings[parameter][1:] unique_values = list(set(settings_list)) value_frequency = {} for value in unique_values: value_frequency[settings_list.count(value)] = value frequencies = value_frequency.keys() frequencies.sort(reverse=True) header = "{} value | Selection frequency".format(parameter) left_column_width = len(header[:-21]) right_column_width = 21 report.append(header) report.append("{}|{}".format("-" * left_column_width, "-" * right_column_width)) for frequency in frequencies: left_column = str(value_frequency[frequency]).center( left_column_width) right_column = str(frequency).center(right_column_width) report.append("{}|{}".format(left_column, right_column)) # Remove newest settings for param in self.block_matcher.parameter_maxima.keys(): self.bm_settings[param].pop(-1) return "\n".join(report)
python
{ "resource": "" }
q34426
PointCloud.write_ply
train
def write_ply(self, output_file): """Export ``PointCloud`` to PLY file for viewing in MeshLab.""" points = np.hstack([self.coordinates, self.colors]) with open(output_file, 'w') as outfile: outfile.write(self.ply_header.format( vertex_count=len(self.coordinates))) np.savetxt(outfile, points, '%f %f %f %d %d %d')
python
{ "resource": "" }
q34427
PointCloud.filter_infinity
train
def filter_infinity(self): """Filter infinite distances from ``PointCloud.``""" mask = self.coordinates[:, 2] > self.coordinates[:, 2].min() coords = self.coordinates[mask] colors = self.colors[mask] return PointCloud(coords, colors)
python
{ "resource": "" }
q34428
BlockLoader._get_loader_for_url
train
def _get_loader_for_url(self, url): """ Determine loading method based on uri """ parts = url.split('://', 1) if len(parts) < 2: type_ = 'file' else: type_ = parts[0] if '+' in type_: profile_name, scheme = type_.split('+', 1) if len(parts) == 2: url = scheme + '://' + parts[1] else: profile_name = '' scheme = type_ loader = self.cached.get(type_) if loader: return loader, url loader_cls = self._get_loader_class_for_type(scheme) if not loader_cls: raise IOError('No Loader for type: ' + scheme) profile = self.kwargs if self.profile_loader: profile = self.profile_loader(profile_name, scheme) loader = loader_cls(**profile) self.cached[type_] = loader return loader, url
python
{ "resource": "" }
q34429
LocalFileLoader.load
train
def load(self, url, offset=0, length=-1): """ Load a file-like reader from the local file system """ # if starting with . or /, can only be a file path.. file_only = url.startswith(('/', '.')) # convert to filename filename = from_file_url(url) if filename != url: file_only = True url = filename try: # first, try as file afile = open(url, 'rb') except IOError: if file_only: raise return super(LocalFileLoader, self).load(url, offset, length) if offset > 0: afile.seek(offset) if length >= 0: return LimitReader(afile, length) else: return afile
python
{ "resource": "" }
q34430
HttpLoader.load
train
def load(self, url, offset, length): """ Load a file-like reader over http using range requests and an optional cookie created via a cookie_maker """ headers = {} if offset != 0 or length != -1: headers['Range'] = BlockLoader._make_range_header(offset, length) if self.cookie_maker: if isinstance(self.cookie_maker, six.string_types): headers['Cookie'] = self.cookie_maker else: headers['Cookie'] = self.cookie_maker.make() if not self.session: self.session = requests.Session() r = self.session.get(url, headers=headers, stream=True) r.raise_for_status() return r.raw
python
{ "resource": "" }
q34431
BaseLoader.raise_on_self_redirect
train
def raise_on_self_redirect(self, params, cdx, status_code, location_url): """ Check if response is a 3xx redirect to the same url If so, reject this capture to avoid causing redirect loop """ if cdx.get('is_live'): return if not status_code.startswith('3') or status_code == '304': return request_url = params['url'].lower() if not location_url: return location_url = location_url.lower() if location_url.startswith('/'): host = urlsplit(cdx['url']).netloc location_url = host + location_url location_url = location_url.split('://', 1)[-1].rstrip('/') request_url = request_url.split('://', 1)[-1].rstrip('/') self_redir = False if request_url == location_url: self_redir = True elif params.get('sr-urlkey'): # if new location canonicalized matches old key, also self-redirect if canonicalize(location_url) == params.get('sr-urlkey'): self_redir = True if self_redir: msg = 'Self Redirect {0} -> {1}' msg = msg.format(request_url, location_url) params['sr-urlkey'] = cdx['urlkey'] raise LiveResourceException(msg)
python
{ "resource": "" }
q34432
BlockArcWarcRecordLoader.load
train
def load(self, url, offset, length, no_record_parse=False): """ Load a single record from given url at offset with length and parse as either warc or arc record """ try: length = int(length) except: length = -1 stream = self.loader.load(url, int(offset), length) decomp_type = 'gzip' # Create decompressing stream stream = DecompressingBufferedReader(stream=stream, decomp_type=decomp_type, block_size=self.block_size) return self.parse_record_stream(stream, no_record_parse=no_record_parse)
python
{ "resource": "" }
q34433
CDXObject.conv_to_json
train
def conv_to_json(obj, fields=None): """ return cdx as json dictionary string if ``fields`` is ``None``, output will include all fields in order stored, otherwise only specified fields will be included :param fields: list of field names to output """ if fields is None: return json_encode(OrderedDict(((x, obj[x]) for x in obj if not x.startswith('_')))) + '\n' result = json_encode(OrderedDict([(x, obj[x]) for x in fields if x in obj])) + '\n' return result
python
{ "resource": "" }
q34434
StreamingRewriter.rewrite_text_stream_to_gen
train
def rewrite_text_stream_to_gen(self, stream, rwinfo): """ Convert stream to generator using applying rewriting func to each portion of the stream. Align to line boundaries if needed. """ try: buff = self.first_buff # for html rewriting: # if charset is utf-8, use that, otherwise default to encode to ascii-compatible encoding # encoding only used for url rewriting, encoding back to bytes after rewriting if rwinfo.charset == 'utf-8' and rwinfo.text_type == 'html': charset = 'utf-8' else: charset = 'iso-8859-1' if buff: yield buff.encode(charset) decoder = codecs.getincrementaldecoder(charset)() while True: buff = stream.read(BUFF_SIZE) if not buff: break if self.align_to_line: buff += stream.readline() try: buff = decoder.decode(buff) except UnicodeDecodeError: if charset == 'utf-8': rwinfo.charset = 'iso-8859-1' charset = rwinfo.charset decoder = codecs.getincrementaldecoder(charset)() buff = decoder.decode(buff) buff = self.rewrite(buff) yield buff.encode(charset) # For adding a tail/handling final buffer buff = self.final_read() # ensure decoder is marked as finished (final buffer already decoded) decoder.decode(b'', final=True) if buff: yield buff.encode(charset) finally: stream.close()
python
{ "resource": "" }
q34435
cdx_load
train
def cdx_load(sources, query, process=True): """ merge text CDX lines from sources, return an iterator for filtered and access-checked sequence of CDX objects. :param sources: iterable for text CDX sources. :param process: bool, perform processing sorting/filtering/grouping ops """ cdx_iter = create_merged_cdx_gen(sources, query) # page count is a special case, no further processing if query.page_count: return cdx_iter cdx_iter = make_obj_iter(cdx_iter, query) if process and not query.secondary_index_only: cdx_iter = process_cdx(cdx_iter, query) custom_ops = query.custom_ops for op in custom_ops: cdx_iter = op(cdx_iter, query) if query.output == 'text': cdx_iter = cdx_to_text(cdx_iter, query.fields) elif query.output == 'json': cdx_iter = cdx_to_json(cdx_iter, query.fields) return cdx_iter
python
{ "resource": "" }
q34436
create_merged_cdx_gen
train
def create_merged_cdx_gen(sources, query): """ create a generator which loads and merges cdx streams ensures cdxs are lazy loaded """ # Optimize: no need to merge if just one input if len(sources) == 1: cdx_iter = sources[0].load_cdx(query) else: source_iters = map(lambda src: src.load_cdx(query), sources) cdx_iter = merge(*(source_iters)) for cdx in cdx_iter: yield cdx
python
{ "resource": "" }
q34437
cdx_limit
train
def cdx_limit(cdx_iter, limit): """ limit cdx to at most `limit`. """ # for cdx, _ in itertools.izip(cdx_iter, xrange(limit)): # yield cdx return (cdx for cdx, _ in zip(cdx_iter, range(limit)))
python
{ "resource": "" }
q34438
cdx_reverse
train
def cdx_reverse(cdx_iter, limit): """ return cdx records in reverse order. """ # optimize for single last if limit == 1: last = None for cdx in cdx_iter: last = cdx if not last: return yield last reverse_cdxs = deque(maxlen=limit) for cdx in cdx_iter: reverse_cdxs.appendleft(cdx) for cdx in reverse_cdxs: yield cdx
python
{ "resource": "" }
q34439
cdx_clamp
train
def cdx_clamp(cdx_iter, from_ts, to_ts): """ Clamp by start and end ts """ if from_ts and len(from_ts) < 14: from_ts = pad_timestamp(from_ts, PAD_14_DOWN) if to_ts and len(to_ts) < 14: to_ts = pad_timestamp(to_ts, PAD_14_UP) for cdx in cdx_iter: if from_ts and cdx[TIMESTAMP] < from_ts: continue if to_ts and cdx[TIMESTAMP] > to_ts: continue yield cdx
python
{ "resource": "" }
q34440
cdx_collapse_time_status
train
def cdx_collapse_time_status(cdx_iter, timelen=10): """ collapse by timestamp and status code. """ timelen = int(timelen) last_token = None for cdx in cdx_iter: curr_token = (cdx[TIMESTAMP][:timelen], cdx.get(STATUSCODE, '')) # yield if last_dedup_time is diff, otherwise skip if curr_token != last_token: last_token = curr_token yield cdx
python
{ "resource": "" }
q34441
cdx_sort_closest
train
def cdx_sort_closest(closest, cdx_iter, limit=10): """ sort CDXCaptureResult by closest to timestamp. """ closest_cdx = [] closest_keys = [] closest_sec = timestamp_to_sec(closest) for cdx in cdx_iter: sec = timestamp_to_sec(cdx[TIMESTAMP]) key = abs(closest_sec - sec) # create tuple to sort by key #bisect.insort(closest_cdx, (key, cdx)) i = bisect.bisect_right(closest_keys, key) closest_keys.insert(i, key) closest_cdx.insert(i, cdx) if len(closest_cdx) == limit: # assuming cdx in ascending order and keys have started increasing if key > closest_keys[-1]: break if len(closest_cdx) > limit: closest_cdx.pop() for cdx in closest_cdx: yield cdx
python
{ "resource": "" }
q34442
cdx_resolve_revisits
train
def cdx_resolve_revisits(cdx_iter): """ resolve revisits. this filter adds three fields to CDX: ``orig.length``, ``orig.offset``, and ``orig.filename``. for revisit records, these fields have corresponding field values in previous non-revisit (original) CDX record. They are all ``"-"`` for non-revisit records. """ originals = {} for cdx in cdx_iter: is_revisit = cdx.is_revisit() digest = cdx.get(DIGEST) original_cdx = None # only set if digest is valid, otherwise no way to resolve if digest: original_cdx = originals.get(digest) if not original_cdx and not is_revisit: originals[digest] = cdx if original_cdx and is_revisit: fill_orig = lambda field: original_cdx.get(field, '-') # Transfer mimetype and statuscode if MIMETYPE in cdx: cdx[MIMETYPE] = original_cdx.get(MIMETYPE, '') if STATUSCODE in cdx: cdx[STATUSCODE] = original_cdx.get(STATUSCODE, '') else: fill_orig = lambda field: '-' # Always add either the original or empty '- - -' for field in ORIG_TUPLE: cdx['orig.' + field] = fill_orig(field) yield cdx
python
{ "resource": "" }
q34443
BaseCli.load
train
def load(self): """This method is called to load the application. Subclasses must return a application that can be used by used by pywb.utils.geventserver.GeventServer.""" if self.r.live: self.extra_config['collections'] = {'live': {'index': '$live'}} if self.r.debug: self.extra_config['debug'] = True if self.r.record: self.extra_config['recorder'] = 'live'
python
{ "resource": "" }
q34444
BaseCli.run_gevent
train
def run_gevent(self): """Created the server that runs the application supplied a subclass""" from pywb.utils.geventserver import GeventServer, RequestURIWSGIHandler logging.info('Starting Gevent Server on ' + str(self.r.port)) ge = GeventServer(self.application, port=self.r.port, hostname=self.r.bind, handler_class=RequestURIWSGIHandler, direct=True)
python
{ "resource": "" }
q34445
JinjaEnv._make_loaders
train
def _make_loaders(self, paths, packages): """Initialize the template loaders based on the supplied paths and packages. :param list[str] paths: List of paths to search for templates :param list[str] packages: List of assets package names :return: A list of loaders to be used for loading the template assets :rtype: list[FileSystemLoader|PackageLoader] """ loaders = [] # add loaders for paths for path in paths: loaders.append(FileSystemLoader(path)) # add loaders for all specified packages for package in packages: loaders.append(PackageLoader(package)) return loaders
python
{ "resource": "" }
q34446
JinjaEnv.template_filter
train
def template_filter(self, param=None): """Returns a decorator that adds the wrapped function to dictionary of template filters. The wrapped function is keyed by either the supplied param (if supplied) or by the wrapped functions name. :param param: Optional name to use instead of the name of the function to be wrapped :return: A decorator to wrap a template filter function :rtype: callable """ def deco(func): name = param or func.__name__ self.filters[name] = func return func return deco
python
{ "resource": "" }
q34447
JinjaEnv._init_filters
train
def _init_filters(self): """Initialize the default pywb provided Jninja filters available during template rendering""" self.filters = {} @self.template_filter() def format_ts(value, format_='%a, %b %d %Y %H:%M:%S'): """Formats the supplied timestamp using format_ :param str value: The timestamp to be formatted :param str format_: The format string :return: The correctly formatted timestamp as determined by format_ :rtype: str """ if format_ == '%s': return timestamp_to_sec(value) else: value = timestamp_to_datetime(value) return value.strftime(format_) @self.template_filter('urlsplit') def get_urlsplit(url): """Splits the supplied URL :param str url: The url to be split :return: The split url :rtype: urllib.parse.SplitResult """ split = urlsplit(url) return split @self.template_filter() def tojson(obj): """Converts the supplied object/array/any to a JSON string if it can be JSONified :param any obj: The value to be converted to a JSON string :return: The JSON string representation of the supplied value :rtype: str """ return json.dumps(obj) @self.template_filter() def tobool(bool_val): """Converts a python boolean to a JS "true" or "false" string :param any obj: A value to be evaluated as a boolean :return: The string "true" or "false" to be inserted into JS """ return 'true' if bool_val else 'false'
python
{ "resource": "" }
q34448
BaseInsertView.render_to_string
train
def render_to_string(self, env, **kwargs): """Render this template. :param dict env: The WSGI environment associated with the request causing this template to be rendered :param any kwargs: The keyword arguments to be supplied to the Jninja template render method :return: The rendered template :rtype: str """ template = None template_path = env.get(self.jenv.env_template_dir_key) if template_path: # jinja paths are not os paths, always use '/' as separator # https://github.com/pallets/jinja/issues/411 template_path = template_path + '/' + self.insert_file try: template = self.jenv.jinja_env.get_template(template_path) except TemplateNotFound as te: pass if not template: template = self.jenv.jinja_env.get_template(self.insert_file) params = env.get(self.jenv.env_template_params_key) if params: kwargs.update(params) kwargs['env'] = env kwargs['static_prefix'] = env.get('pywb.host_prefix', '') + env.get('pywb.app_prefix', '') + '/static' return template.render(**kwargs)
python
{ "resource": "" }
q34449
HeadInsertView.create_insert_func
train
def create_insert_func(self, wb_url, wb_prefix, host_prefix, top_url, env, is_framed, coll='', include_ts=True, **kwargs): """Create the function used to render the header insert template for the current request. :param rewrite.wburl.WbUrl wb_url: The WbUrl for the request this template is being rendered for :param str wb_prefix: The URL prefix pywb is serving the content using (e.g. http://localhost:8080/live/) :param str host_prefix: The host URL prefix pywb is running on (e.g. http://localhost:8080) :param str top_url: The full URL for this request (e.g. http://localhost:8080/live/http://example.com) :param dict env: The WSGI environment dictionary for this request :param bool is_framed: Is pywb or a specific collection running in framed mode :param str coll: The name of the collection this request is associated with :param bool include_ts: Should a timestamp be included in the rendered template :param kwargs: Additional keyword arguments to be supplied to the Jninja template render method :return: A function to be used to render the header insert for the request this template is being rendered for :rtype: callable """ params = kwargs params['host_prefix'] = host_prefix params['wb_prefix'] = wb_prefix params['wb_url'] = wb_url params['top_url'] = top_url params['coll'] = coll params['is_framed'] = is_framed def make_head_insert(rule, cdx): params['wombat_ts'] = cdx['timestamp'] if include_ts else '' params['wombat_sec'] = timestamp_to_sec(cdx['timestamp']) params['is_live'] = cdx.get('is_live') if self.banner_view: banner_html = self.banner_view.render_to_string(env, cdx=cdx, **params) params['banner_html'] = banner_html return self.render_to_string(env, cdx=cdx, **params) return make_head_insert
python
{ "resource": "" }
q34450
PkgResResolver.get_pkg_path
train
def get_pkg_path(self, item): """Get the package path for the :param str item: A resources full package path :return: The netloc and path from the items package path :rtype: tuple[str, str] """ if not isinstance(item, str): return None parts = urlsplit(item) if parts.scheme == 'pkg' and parts.netloc: return (parts.netloc, parts.path) return None
python
{ "resource": "" }
q34451
WbResponse.text_stream
train
def text_stream(stream, content_type='text/plain; charset=utf-8', status='200 OK'): """Utility method for constructing a streaming text response. :param Any stream: The response body stream :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse that is a text stream :rtype WbResponse: """ if 'charset' not in content_type: content_type += '; charset=utf-8' return WbResponse.bin_stream(WbResponse.encode_stream(stream), content_type, status)
python
{ "resource": "" }
q34452
WbResponse.bin_stream
train
def bin_stream(stream, content_type, status='200 OK', headers=None): """Utility method for constructing a binary response. :param Any stream: The response body stream :param str content_type: The content-type of the response :param str status: The HTTP status line :param list[tuple[str, str]] headers: Additional headers for this response :return: WbResponse that is a binary stream :rtype: WbResponse """ def_headers = [('Content-Type', content_type)] if headers: def_headers += headers status_headers = StatusAndHeaders(status, def_headers) return WbResponse(status_headers, value=stream)
python
{ "resource": "" }
q34453
WbResponse.text_response
train
def text_response(text, status='200 OK', content_type='text/plain; charset=utf-8'): """Utility method for constructing a text response. :param str text: The text response body :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse text response :rtype: WbResponse """ encoded_text = text.encode('utf-8') status_headers = StatusAndHeaders(status, [('Content-Type', content_type), ('Content-Length', str(len(encoded_text)))]) return WbResponse(status_headers, value=[encoded_text])
python
{ "resource": "" }
q34454
WbResponse.json_response
train
def json_response(obj, status='200 OK', content_type='application/json; charset=utf-8'): """Utility method for constructing a JSON response. :param dict obj: The dictionary to be serialized in JSON format :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse JSON response :rtype: WbResponse """ return WbResponse.text_response(json.dumps(obj), status, content_type)
python
{ "resource": "" }
q34455
WbResponse.redir_response
train
def redir_response(location, status='302 Redirect', headers=None): """Utility method for constructing redirection response. :param str location: The location of the resource redirecting to :param str status: The HTTP status line :param list[tuple[str, str]] headers: Additional headers for this response :return: WbResponse redirection response :rtype: WbResponse """ redir_headers = [('Location', location), ('Content-Length', '0')] if headers: redir_headers += headers return WbResponse(StatusAndHeaders(status, redir_headers))
python
{ "resource": "" }
q34456
WbResponse.options_response
train
def options_response(env): """Construct WbResponse for OPTIONS based on the WSGI env dictionary :param dict env: The WSGI environment dictionary :return: The WBResponse for the options request :rtype: WbResponse """ status_headers = StatusAndHeaders('200 Ok', [ ('Content-Type', 'text/plain'), ('Content-Length', '0'), ]) response = WbResponse(status_headers) response.add_access_control_headers(env=env) return response
python
{ "resource": "" }
q34457
canonicalize
train
def canonicalize(url, surt_ordered=True): """ Canonicalize url and convert to surt If not in surt ordered mode, convert back to url form as surt conversion is currently part of canonicalization >>> canonicalize('http://example.com/path/file.html', surt_ordered=True) 'com,example)/path/file.html' >>> canonicalize('http://example.com/path/file.html', surt_ordered=False) 'example.com/path/file.html' >>> canonicalize('urn:some:id') 'urn:some:id' """ try: key = surt.surt(url) except Exception as e: #pragma: no cover # doesn't happen with surt from 0.3b # urn is already canonical, so just use as-is if url.startswith('urn:'): return url raise UrlCanonicalizeException('Invalid Url: ' + url) # if not surt, unsurt the surt to get canonicalized non-surt url if not surt_ordered: key = unsurt(key) return key
python
{ "resource": "" }
q34458
FuzzyMatcher.parse_fuzzy_rule
train
def parse_fuzzy_rule(self, rule): """ Parse rules using all the different supported forms """ url_prefix = rule.get('url_prefix') config = rule.get('fuzzy_lookup') if not config: return if not isinstance(url_prefix, list): url_prefix = [url_prefix] if not isinstance(config, dict): regex = self.make_regex(config) replace_after = self.DEFAULT_REPLACE_AFTER filter_str = self.DEFAULT_FILTER match_type = self.DEFAULT_MATCH_TYPE find_all = False else: regex = self.make_regex(config.get('match')) replace_after = config.get('replace', self.DEFAULT_REPLACE_AFTER) filter_str = config.get('filter', self.DEFAULT_FILTER) match_type = config.get('type', self.DEFAULT_MATCH_TYPE) find_all = config.get('find_all', False) return FuzzyRule(url_prefix, regex, replace_after, filter_str, match_type, find_all)
python
{ "resource": "" }
q34459
ResolvingLoader.load_headers_and_payload
train
def load_headers_and_payload(self, cdx, failed_files, cdx_loader): """ Resolve headers and payload for a given capture In the simple case, headers and payload are in the same record. In the case of revisit records, the payload and headers may be in different records. If the original has already been found, lookup original using orig. fields in cdx dict. Otherwise, call _load_different_url_payload() to get cdx index from a different url to find the original record. """ has_curr = (cdx['filename'] != '-') #has_orig = (cdx.get('orig.filename', '-') != '-') orig_f = cdx.get('orig.filename') has_orig = orig_f and orig_f != '-' # load headers record from cdx['filename'] unless it is '-' (rare) headers_record = None if has_curr: headers_record = self._resolve_path_load(cdx, False, failed_files) # two index lookups # Case 1: if mimetype is still warc/revisit if cdx.get('mime') == 'warc/revisit' and headers_record: payload_record = self._load_different_url_payload(cdx, headers_record, failed_files, cdx_loader) # single lookup cases # case 2: non-revisit elif (has_curr and not has_orig): payload_record = headers_record # case 3: identical url revisit, load payload from orig.filename elif (has_orig): payload_record = self._resolve_path_load(cdx, True, failed_files) return headers_record, payload_record
python
{ "resource": "" }
q34460
ResolvingLoader._load_different_url_payload
train
def _load_different_url_payload(self, cdx, headers_record, failed_files, cdx_loader): """ Handle the case where a duplicate of a capture with same digest exists at a different url. If a cdx_server is provided, a query is made for matching url, timestamp and digest. Raise exception if no matches found. """ ref_target_uri = (headers_record.rec_headers. get_header('WARC-Refers-To-Target-URI')) target_uri = headers_record.rec_headers.get_header('WARC-Target-URI') # if no target uri, no way to find the original if not ref_target_uri: raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG) ref_target_date = (headers_record.rec_headers. get_header('WARC-Refers-To-Date')) if not ref_target_date: ref_target_date = cdx['timestamp'] else: ref_target_date = iso_date_to_timestamp(ref_target_date) digest = cdx.get('digest', '-') try: orig_cdx_lines = self.load_cdx_for_dupe(ref_target_uri, ref_target_date, digest, cdx_loader) except NotFoundException: raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG) for orig_cdx in orig_cdx_lines: try: payload_record = self._resolve_path_load(orig_cdx, False, failed_files) return payload_record except ArchiveLoadFailed as e: pass raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG)
python
{ "resource": "" }
q34461
ResolvingLoader.load_cdx_for_dupe
train
def load_cdx_for_dupe(self, url, timestamp, digest, cdx_loader): """ If a cdx_server is available, return response from server, otherwise empty list """ if not cdx_loader: return iter([]) filters = [] filters.append('!mime:warc/revisit') if digest and digest != '-': filters.append('digest:' + digest) params = dict(url=url, closest=timestamp, filter=filters) return cdx_loader(params)
python
{ "resource": "" }
q34462
binsearch_offset
train
def binsearch_offset(reader, key, compare_func=cmp, block_size=8192): """ Find offset of the line which matches a given 'key' using binary search If key is not found, the offset is of the line after the key File is subdivided into block_size (default 8192) sized blocks Optional compare_func may be specified """ min_ = 0 reader.seek(0, 2) max_ = int(reader.tell() / block_size) while max_ - min_ > 1: mid = int(min_ + ((max_ - min_) / 2)) reader.seek(mid * block_size) if mid > 0: reader.readline() # skip partial line line = reader.readline() if compare_func(key, line) > 0: min_ = mid else: max_ = mid return min_ * block_size
python
{ "resource": "" }
q34463
linearsearch
train
def linearsearch(iter_, key, prev_size=0, compare_func=cmp): """ Perform a linear search over iterator until current_line >= key optionally also tracking upto N previous lines, which are returned before the first matched line. if end of stream is reached before a match is found, nothing is returned (prev lines discarded also) """ prev_deque = deque(maxlen=prev_size + 1) matched = False for line in iter_: prev_deque.append(line) if compare_func(line, key) >= 0: matched = True break # no matches, so return empty iterator if not matched: return iter([]) return itertools.chain(prev_deque, iter_)
python
{ "resource": "" }
q34464
iter_prefix
train
def iter_prefix(reader, key): """ Creates an iterator which iterates over lines that start with prefix 'key' in a sorted text file. """ return itertools.takewhile( lambda line: line.startswith(key), search(reader, key))
python
{ "resource": "" }
q34465
FrontEndApp.get_upstream_paths
train
def get_upstream_paths(self, port): """Retrieve a dictionary containing the full URLs of the upstream apps :param int port: The port used by the replay and cdx servers :return: A dictionary containing the upstream paths (replay, cdx-server, record [if enabled]) :rtype: dict[str, str] """ base_paths = { 'replay': self.REPLAY_API % port, 'cdx-server': self.CDX_API % port, } if self.recorder_path: base_paths['record'] = self.recorder_path return base_paths
python
{ "resource": "" }
q34466
FrontEndApp.init_recorder
train
def init_recorder(self, recorder_config): """Initialize the recording functionality of pywb. If recording_config is None this function is a no op""" if not recorder_config: self.recorder = None self.recorder_path = None return if isinstance(recorder_config, str): recorder_coll = recorder_config recorder_config = {} else: recorder_coll = recorder_config['source_coll'] # TODO: support dedup dedup_index = None warc_writer = MultiFileWARCWriter(self.warcserver.archive_paths, max_size=int(recorder_config.get('rollover_size', 1000000000)), max_idle_secs=int(recorder_config.get('rollover_idle_secs', 600)), filename_template=recorder_config.get('filename_template'), dedup_index=dedup_index) self.recorder = RecorderApp(self.RECORD_SERVER % str(self.warcserver_server.port), warc_writer, accept_colls=recorder_config.get('source_filter')) recorder_server = GeventServer(self.recorder, port=0) self.recorder_path = self.RECORD_API % (recorder_server.port, recorder_coll)
python
{ "resource": "" }
q34467
FrontEndApp.init_autoindex
train
def init_autoindex(self, auto_interval): """Initialize and start the auto-indexing of the collections. If auto_interval is None this is a no op. :param str|int auto_interval: The auto-indexing interval from the configuration file or CLI argument """ if not auto_interval: return from pywb.manager.autoindex import AutoIndexer colls_dir = self.warcserver.root_dir if self.warcserver.root_dir else None indexer = AutoIndexer(colls_dir=colls_dir, interval=int(auto_interval)) if not os.path.isdir(indexer.root_path): msg = 'No managed directory "{0}" for auto-indexing' logging.error(msg.format(indexer.root_path)) import sys sys.exit(2) msg = 'Auto-Indexing Enabled on "{0}", checking every {1} secs' logging.info(msg.format(indexer.root_path, auto_interval)) indexer.start()
python
{ "resource": "" }
q34468
FrontEndApp.serve_static
train
def serve_static(self, environ, coll='', filepath=''): """Serve a static file associated with a specific collection or one of pywb's own static assets :param dict environ: The WSGI environment dictionary for the request :param str coll: The collection the static file is associated with :param str filepath: The file path (relative to the collection) for the static assest :return: The WbResponse for the static asset :rtype: WbResponse """ proxy_enabled = self.is_proxy_enabled(environ) if proxy_enabled and environ.get('REQUEST_METHOD') == 'OPTIONS': return WbResponse.options_response(environ) if coll: path = os.path.join(self.warcserver.root_dir, coll, self.static_dir) else: path = self.static_dir environ['pywb.static_dir'] = path try: response = self.static_handler(environ, filepath) if proxy_enabled: response.add_access_control_headers(env=environ) return response except: self.raise_not_found(environ, 'Static File Not Found: {0}'.format(filepath))
python
{ "resource": "" }
q34469
FrontEndApp.get_metadata
train
def get_metadata(self, coll): """Retrieve the metadata associated with a collection :param str coll: The name of the collection to receive metadata for :return: The collections metadata if it exists :rtype: dict """ #if coll == self.all_coll: # coll = '*' metadata = {'coll': coll, 'type': 'replay'} if coll in self.warcserver.list_fixed_routes(): metadata.update(self.warcserver.get_coll_config(coll)) else: metadata.update(self.metadata_cache.load(coll)) return metadata
python
{ "resource": "" }
q34470
FrontEndApp.serve_cdx
train
def serve_cdx(self, environ, coll='$root'): """Make the upstream CDX query for a collection and response with the results of the query :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection this CDX query is for :return: The WbResponse containing the results of the CDX query :rtype: WbResponse """ base_url = self.rewriterapp.paths['cdx-server'] #if coll == self.all_coll: # coll = '*' cdx_url = base_url.format(coll=coll) if environ.get('QUERY_STRING'): cdx_url += '&' if '?' in cdx_url else '?' cdx_url += environ.get('QUERY_STRING') try: res = requests.get(cdx_url, stream=True) content_type = res.headers.get('Content-Type') return WbResponse.bin_stream(StreamIter(res.raw), content_type=content_type) except Exception as e: return WbResponse.text_response('Error: ' + str(e), status='400 Bad Request')
python
{ "resource": "" }
q34471
FrontEndApp.setup_paths
train
def setup_paths(self, environ, coll, record=False): """Populates the WSGI environment dictionary with the path information necessary to perform a response for content or record. :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection the record is to be served from :param bool record: Should the content being served by recorded (save to a warc). Only valid in record mode """ if not coll or not self.warcserver.root_dir: return if coll != '$root': pop_path_info(environ) if record: pop_path_info(environ) paths = [self.warcserver.root_dir] if coll != '$root': paths.append(coll) paths.append(self.templates_dir) # jinja2 template paths always use '/' as separator environ['pywb.templates_dir'] = '/'.join(paths)
python
{ "resource": "" }
q34472
FrontEndApp.raise_not_found
train
def raise_not_found(self, environ, msg): """Utility function for raising a werkzeug.exceptions.NotFound execption with the supplied WSGI environment and message. :param dict environ: The WSGI environment dictionary for the request :param str msg: The error message """ raise NotFound(response=self.rewriterapp._error_response(environ, msg))
python
{ "resource": "" }
q34473
FrontEndApp._check_refer_redirect
train
def _check_refer_redirect(self, environ): """Returns a WbResponse for a HTTP 307 redirection if the HTTP referer header is the same as the HTTP host header :param dict environ: The WSGI environment dictionary for the request :return: WbResponse HTTP 307 redirection :rtype: WbResponse """ referer = environ.get('HTTP_REFERER') if not referer: return host = environ.get('HTTP_HOST') if host not in referer: return inx = referer[1:].find('http') if not inx: inx = referer[1:].find('///') if inx > 0: inx + 1 if inx < 0: return url = referer[inx + 1:] host = referer[:inx + 1] orig_url = environ['PATH_INFO'] if environ.get('QUERY_STRING'): orig_url += '?' + environ['QUERY_STRING'] full_url = host + urljoin(url, orig_url) return WbResponse.redir_response(full_url, '307 Redirect')
python
{ "resource": "" }
q34474
FrontEndApp.handle_request
train
def handle_request(self, environ, start_response): """Retrieves the route handler and calls the handler returning its the response :param dict environ: The WSGI environment dictionary for the request :param start_response: :return: The WbResponse for the request :rtype: WbResponse """ urls = self.url_map.bind_to_environ(environ) try: endpoint, args = urls.match() # store original script_name (original prefix) before modifications are made environ['pywb.app_prefix'] = environ.get('SCRIPT_NAME') response = endpoint(environ, **args) return response(environ, start_response) except HTTPException as e: redir = self._check_refer_redirect(environ) if redir: return redir(environ, start_response) return e(environ, start_response) except Exception as e: if self.debug: traceback.print_exc() response = self.rewriterapp._error_response(environ, 'Internal Error: ' + str(e), '500 Server Error') return response(environ, start_response)
python
{ "resource": "" }
q34475
FrontEndApp.create_app
train
def create_app(cls, port): """Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer """ app = FrontEndApp() app_server = GeventServer(app, port=port, hostname='0.0.0.0') return app_server
python
{ "resource": "" }
q34476
FrontEndApp.init_proxy
train
def init_proxy(self, config): """Initialize and start proxy mode. If proxy configuration entry is not contained in the config this is a no op. Causes handler to become an instance of WSGIProxMiddleware. :param dict config: The configuration object used to configure this instance of FrontEndApp """ proxy_config = config.get('proxy') if not proxy_config: return if isinstance(proxy_config, str): proxy_coll = proxy_config proxy_config = {} else: proxy_coll = proxy_config['coll'] if '/' in proxy_coll: raise Exception('Proxy collection can not contain "/"') proxy_config['ca_name'] = proxy_config.get('ca_name', self.PROXY_CA_NAME) proxy_config['ca_file_cache'] = proxy_config.get('ca_file_cache', self.PROXY_CA_PATH) if proxy_config.get('recording'): logging.info('Proxy recording into collection "{0}"'.format(proxy_coll)) if proxy_coll in self.warcserver.list_fixed_routes(): raise Exception('Can not record into fixed collection') proxy_coll += self.RECORD_ROUTE if not config.get('recorder'): config['recorder'] = 'live' else: logging.info('Proxy enabled for collection "{0}"'.format(proxy_coll)) if proxy_config.get('enable_content_rewrite', True): self.proxy_prefix = '/{0}/bn_/'.format(proxy_coll) else: self.proxy_prefix = '/{0}/id_/'.format(proxy_coll) self.proxy_default_timestamp = proxy_config.get('default_timestamp') if self.proxy_default_timestamp: if not self.ALL_DIGITS.match(self.proxy_default_timestamp): try: self.proxy_default_timestamp = iso_date_to_timestamp(self.proxy_default_timestamp) except: raise Exception('Invalid Proxy Timestamp: Must Be All-Digit Timestamp or ISO Date Format') self.proxy_coll = proxy_coll self.handler = WSGIProxMiddleware(self.handle_request, self.proxy_route_request, proxy_host=proxy_config.get('host', 'pywb.proxy'), proxy_options=proxy_config)
python
{ "resource": "" }
q34477
FrontEndApp.proxy_route_request
train
def proxy_route_request(self, url, environ): """ Return the full url that this proxy request will be routed to The 'environ' PATH_INFO and REQUEST_URI will be modified based on the returned url Default is to use the 'proxy_prefix' to point to the proxy collection """ if self.proxy_default_timestamp: environ['pywb_proxy_default_timestamp'] = self.proxy_default_timestamp return self.proxy_prefix + url
python
{ "resource": "" }
q34478
FrontEndApp.proxy_fetch
train
def proxy_fetch(self, env, url): """Proxy mode only endpoint that handles OPTIONS requests and COR fetches for Preservation Worker. Due to normal cross-origin browser restrictions in proxy mode, auto fetch worker cannot access the CSS rules of cross-origin style sheets and must re-fetch them in a manner that is CORS safe. This endpoint facilitates that by fetching the stylesheets for the auto fetch worker and then responds with its contents :param dict env: The WSGI environment dictionary :param str url: The URL of the resource to be fetched :return: WbResponse that is either response to an Options request or the results of fetching url :rtype: WbResponse """ if not self.is_proxy_enabled(env): # we are not in proxy mode so just respond with forbidden return WbResponse.text_response('proxy mode must be enabled to use this endpoint', status='403 Forbidden') if env.get('REQUEST_METHOD') == 'OPTIONS': return WbResponse.options_response(env) # ensure full URL request_url = env['REQUEST_URI'] # replace with /id_ so we do not get rewritten url = request_url.replace('/proxy-fetch', '/id_') # update WSGI environment object env['REQUEST_URI'] = self.proxy_coll + url env['PATH_INFO'] = env['PATH_INFO'].replace('/proxy-fetch', self.proxy_coll + '/id_') # make request using normal serve_content response = self.serve_content(env, self.proxy_coll, url) # for WR if isinstance(response, WbResponse): response.add_access_control_headers(env=env) return response
python
{ "resource": "" }
q34479
MetadataCache.load
train
def load(self, coll): """Load and receive the metadata associated with a collection. If the metadata for the collection is not cached yet its metadata file is read in and stored. If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent than the cached time, the cache is updated and returned otherwise the cached version is returned. :param str coll: Name of a collection :return: The cached metadata for a collection :rtype: dict """ path = self.template_str.format(coll=coll) try: mtime = os.path.getmtime(path) obj = self.cache.get(path) except: return {} if not obj: return self.store_new(coll, path, mtime) cached_mtime, data = obj if mtime == cached_mtime == mtime: return obj return self.store_new(coll, path, mtime)
python
{ "resource": "" }
q34480
MetadataCache.store_new
train
def store_new(self, coll, path, mtime): """Load a collections metadata file and store it :param str coll: The name of the collection the metadata is for :param str path: The path to the collections metadata file :param float mtime: The current mtime of the collections metadata file :return: The collections metadata :rtype: dict """ obj = load_yaml_config(path) self.cache[coll] = (mtime, obj) return obj
python
{ "resource": "" }
q34481
ZipNumIndexSource.load_blocks
train
def load_blocks(self, location, blocks, ranges, query): """ Load one or more blocks of compressed cdx lines, return a line iterator which decompresses and returns one line at a time, bounded by query.key and query.end_key """ if (logging.getLogger().getEffectiveLevel() <= logging.DEBUG): msg = 'Loading {b.count} blocks from {loc}:{b.offset}+{b.length}' logging.debug(msg.format(b=blocks, loc=location)) reader = self.blk_loader.load(location, blocks.offset, blocks.length) def decompress_block(range_): decomp = gzip_decompressor() buff = decomp.decompress(reader.read(range_)) for line in BytesIO(buff): yield line def iter_blocks(reader): try: for r in ranges: yield decompress_block(r) finally: reader.close() # iterate over all blocks iter_ = itertools.chain.from_iterable(iter_blocks(reader)) # start bound iter_ = linearsearch(iter_, query.key) # end bound iter_ = itertools.takewhile(lambda line: line < query.end_key, iter_) return iter_
python
{ "resource": "" }
q34482
ArchiveIndexEntryMixin.extract_mime
train
def extract_mime(self, mime, def_mime='unk'): """ Utility function to extract mimetype only from a full content type, removing charset settings """ self['mime'] = def_mime if mime: self['mime'] = self.MIME_RE.split(mime, 1)[0] self['_content_type'] = mime
python
{ "resource": "" }
q34483
ArchiveIndexEntryMixin.extract_status
train
def extract_status(self, status_headers): """ Extract status code only from status line """ self['status'] = status_headers.get_statuscode() if not self['status']: self['status'] = '-' elif self['status'] == '204' and 'Error' in status_headers.statusline: self['status'] = '-'
python
{ "resource": "" }
q34484
DefaultRecordParser.parse_warc_record
train
def parse_warc_record(self, record): """ Parse warc record """ entry = self._create_index_entry(record.rec_type) if record.rec_type == 'warcinfo': entry['url'] = record.rec_headers.get_header('WARC-Filename') entry['urlkey'] = entry['url'] entry['_warcinfo'] = record.raw_stream.read(record.length) return entry entry['url'] = record.rec_headers.get_header('WARC-Target-Uri') # timestamp entry['timestamp'] = iso_date_to_timestamp(record.rec_headers. get_header('WARC-Date')) # mime if record.rec_type == 'revisit': entry['mime'] = 'warc/revisit' elif self.options.get('minimal'): entry['mime'] = '-' else: def_mime = '-' if record.rec_type == 'request' else 'unk' entry.extract_mime(record.http_headers. get_header('Content-Type'), def_mime) # detected mime from WARC-Identified-Payload-Type entry['mime-detected'] = record.rec_headers.get_header( 'WARC-Identified-Payload-Type') # status -- only for response records (by convention): if record.rec_type == 'response' and not self.options.get('minimal'): entry.extract_status(record.http_headers) else: entry['status'] = '-' # digest digest = record.rec_headers.get_header('WARC-Payload-Digest') entry['digest'] = digest if digest and digest.startswith('sha1:'): entry['digest'] = digest[len('sha1:'):] elif not entry.get('digest'): entry['digest'] = '-' # optional json metadata, if present metadata = record.rec_headers.get_header('WARC-Json-Metadata') if metadata: entry['metadata'] = metadata return entry
python
{ "resource": "" }
q34485
DefaultRecordParser.parse_arc_record
train
def parse_arc_record(self, record): """ Parse arc record """ url = record.rec_headers.get_header('uri') url = url.replace('\r', '%0D') url = url.replace('\n', '%0A') # replace formfeed url = url.replace('\x0c', '%0C') # replace nulls url = url.replace('\x00', '%00') entry = self._create_index_entry(record.rec_type) entry['url'] = url # timestamp entry['timestamp'] = record.rec_headers.get_header('archive-date') if len(entry['timestamp']) > 14: entry['timestamp'] = entry['timestamp'][:14] if not self.options.get('minimal'): # mime entry.extract_mime(record.rec_headers.get_header('content-type')) # status entry.extract_status(record.http_headers) # digest entry['digest'] = '-' return entry
python
{ "resource": "" }
q34486
render_field
train
def render_field(parser, token): """ Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending. """ error_msg = '%r tag requires a form field followed by a list of attributes and values in the form attr="value"' % token.split_contents()[0] try: bits = token.split_contents() tag_name = bits[0] form_field = bits[1] attr_list = bits[2:] except ValueError: raise TemplateSyntaxError(error_msg) form_field = parser.compile_filter(form_field) set_attrs = [] append_attrs = [] for pair in attr_list: match = ATTRIBUTE_RE.match(pair) if not match: raise TemplateSyntaxError(error_msg + ": %s" % pair) dct = match.groupdict() attr, sign, value = \ dct['attr'], dct['sign'], parser.compile_filter(dct['value']) if sign == "=": set_attrs.append((attr, value)) else: append_attrs.append((attr, value)) return FieldAttributeNode(form_field, set_attrs, append_attrs)
python
{ "resource": "" }
q34487
TokenIntrospectionEndpoint.response
train
def response(cls, dic, status=200): """ Create and return a response object. """ response = JsonResponse(dic, status=status) response['Cache-Control'] = 'no-store' response['Pragma'] = 'no-cache' return response
python
{ "resource": "" }
q34488
ScopeClaims.create_response_dic
train
def create_response_dic(self): """ Generate the dic that will be jsonify. Checking scopes given vs registered. Returns a dic. """ dic = {} for scope in self.scopes: if scope in self._scopes_registered(): dic.update(getattr(self, 'scope_' + scope)()) dic = self._clean_dic(dic) return dic
python
{ "resource": "" }
q34489
ScopeClaims._scopes_registered
train
def _scopes_registered(self): """ Return a list that contains all the scopes registered in the class. """ scopes = [] for name in dir(self.__class__): if name.startswith('scope_'): scope = name.split('scope_')[1] scopes.append(scope) return scopes
python
{ "resource": "" }
q34490
ScopeClaims._clean_dic
train
def _clean_dic(self, dic): """ Clean recursively all empty or None values inside a dict. """ aux_dic = dic.copy() for key, value in iter(dic.items()): if value is None or value == '': del aux_dic[key] elif type(value) is dict: cleaned_dict = self._clean_dic(value) if not cleaned_dict: del aux_dic[key] continue aux_dic[key] = cleaned_dict return aux_dic
python
{ "resource": "" }
q34491
AuthorizeEndpoint.set_client_user_consent
train
def set_client_user_consent(self): """ Save the user consent given to a specific client. Return None. """ date_given = timezone.now() expires_at = date_given + timedelta( days=settings.get('OIDC_SKIP_CONSENT_EXPIRE')) uc, created = UserConsent.objects.get_or_create( user=self.request.user, client=self.client, defaults={ 'expires_at': expires_at, 'date_given': date_given, } ) uc.scope = self.params['scope'] # Rewrite expires_at and date_given if object already exists. if not created: uc.expires_at = expires_at uc.date_given = date_given uc.save()
python
{ "resource": "" }
q34492
AuthorizeEndpoint.client_has_user_consent
train
def client_has_user_consent(self): """ Check if already exists user consent for some client. Return bool. """ value = False try: uc = UserConsent.objects.get(user=self.request.user, client=self.client) if (set(self.params['scope']).issubset(uc.scope)) and not (uc.has_expired()): value = True except UserConsent.DoesNotExist: pass return value
python
{ "resource": "" }
q34493
AuthorizeEndpoint.get_scopes_information
train
def get_scopes_information(self): """ Return a list with the description of all the scopes requested. """ scopes = StandardScopeClaims.get_scopes_info(self.params['scope']) if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'): scopes_extra = settings.get( 'OIDC_EXTRA_SCOPE_CLAIMS', import_str=True).get_scopes_info(self.params['scope']) for index_extra, scope_extra in enumerate(scopes_extra): for index, scope in enumerate(scopes[:]): if scope_extra['scope'] == scope['scope']: del scopes[index] else: scopes_extra = [] return scopes + scopes_extra
python
{ "resource": "" }
q34494
get
train
def get(name, import_str=False): """ Helper function to use inside the package. """ value = None default_value = getattr(default_settings, name) try: value = getattr(settings, name) except AttributeError: if name in default_settings.required_attrs: raise Exception('You must set ' + name + ' in your settings.') if isinstance(default_value, dict) and value: default_value.update(value) value = default_value else: if value is None: value = default_value value = import_from_str(value) if import_str else value return value
python
{ "resource": "" }
q34495
DefaultSettings.OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY
train
def OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY(self): """ OPTIONAL. Supply a fixed string to use as browser-state key for unauthenticated clients. """ # Memoize generated value if not self._unauthenticated_session_management_key: self._unauthenticated_session_management_key = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(100)) return self._unauthenticated_session_management_key
python
{ "resource": "" }
q34496
strip_prompt_login
train
def strip_prompt_login(path): """ Strips 'login' from the 'prompt' query parameter. """ uri = urlsplit(path) query_params = parse_qs(uri.query) prompt_list = query_params.get('prompt', '')[0].split() if 'login' in prompt_list: prompt_list.remove('login') query_params['prompt'] = ' '.join(prompt_list) if not query_params['prompt']: del query_params['prompt'] uri = uri._replace(query=urlencode(query_params, doseq=True)) return urlunsplit(uri)
python
{ "resource": "" }
q34497
get_site_url
train
def get_site_url(site_url=None, request=None): """ Construct the site url. Orders to decide site url: 1. valid `site_url` parameter 2. valid `SITE_URL` in settings 3. construct from `request` object """ site_url = site_url or settings.get('SITE_URL') if site_url: return site_url elif request: return '{}://{}'.format(request.scheme, request.get_host()) else: raise Exception('Either pass `site_url`, ' 'or set `SITE_URL` in settings, ' 'or pass `request` object.')
python
{ "resource": "" }
q34498
get_issuer
train
def get_issuer(site_url=None, request=None): """ Construct the issuer full url. Basically is the site url with some path appended. """ site_url = get_site_url(site_url=site_url, request=request) path = reverse('oidc_provider:provider-info') \ .split('/.well-known/openid-configuration')[0] issuer = site_url + path return str(issuer)
python
{ "resource": "" }
q34499
get_browser_state_or_default
train
def get_browser_state_or_default(request): """ Determine value to use as session state. """ key = (request.session.session_key or settings.get('OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY')) return sha224(key.encode('utf-8')).hexdigest()
python
{ "resource": "" }