_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q14400
setup_decoder
train
def setup_decoder(codec, dparams): """Wraps openjp2 library function opj_setup_decoder. Setup the decoder with decompression parameters. Parameters ---------- codec: CODEC_TYPE Codec initialized by create_compress function. dparams: DecompressionParametersType Decompression parameters. Raises ------ RuntimeError If the OpenJPEG library routine opj_setup_decoder fails. """ ARGTYPES = [CODEC_TYPE, ctypes.POINTER(DecompressionParametersType)] OPENJP2.opj_setup_decoder.argtypes = ARGTYPES OPENJP2.opj_setup_decoder.restype = check_error OPENJP2.opj_setup_decoder(codec, ctypes.byref(dparams))
python
{ "resource": "" }
q14401
setup_encoder
train
def setup_encoder(codec, cparams, image): """Wraps openjp2 library function opj_setup_encoder. Setup the encoder parameters using the current image and using user parameters. Parameters ---------- codec : CODEC_TYPE codec initialized by create_compress function cparams : CompressionParametersType compression parameters image : ImageType input-filled image Raises ------ RuntimeError If the OpenJPEG library routine opj_setup_encoder fails. """ ARGTYPES = [CODEC_TYPE, ctypes.POINTER(CompressionParametersType), ctypes.POINTER(ImageType)] OPENJP2.opj_setup_encoder.argtypes = ARGTYPES OPENJP2.opj_setup_encoder.restype = check_error OPENJP2.opj_setup_encoder(codec, ctypes.byref(cparams), image)
python
{ "resource": "" }
q14402
start_compress
train
def start_compress(codec, image, stream): """Wraps openjp2 library function opj_start_compress. Start to compress the current image. Parameters ---------- codec : CODEC_TYPE Compressor handle. image : pointer to ImageType Input filled image. stream : STREAM_TYPE_P Input stream. Raises ------ RuntimeError If the OpenJPEG library routine opj_start_compress fails. """ OPENJP2.opj_start_compress.argtypes = [CODEC_TYPE, ctypes.POINTER(ImageType), STREAM_TYPE_P] OPENJP2.opj_start_compress.restype = check_error OPENJP2.opj_start_compress(codec, image, stream)
python
{ "resource": "" }
q14403
stream_create_default_file_stream
train
def stream_create_default_file_stream(fname, isa_read_stream): """Wraps openjp2 library function opj_stream_create_default_vile_stream. Sets the stream to be a file stream. This function is only valid for the 2.1 version of the openjp2 library. Parameters ---------- fname : str Specifies a file. isa_read_stream: bool True (read) or False (write) Returns ------- stream : stream_t An OpenJPEG file stream. """ ARGTYPES = [ctypes.c_char_p, ctypes.c_int32] OPENJP2.opj_stream_create_default_file_stream.argtypes = ARGTYPES OPENJP2.opj_stream_create_default_file_stream.restype = STREAM_TYPE_P read_stream = 1 if isa_read_stream else 0 file_argument = ctypes.c_char_p(fname.encode()) stream = OPENJP2.opj_stream_create_default_file_stream(file_argument, read_stream) return stream
python
{ "resource": "" }
q14404
stream_destroy
train
def stream_destroy(stream): """Wraps openjp2 library function opj_stream_destroy. Destroys the stream created by create_stream. Parameters ---------- stream : STREAM_TYPE_P The file stream. """ OPENJP2.opj_stream_destroy.argtypes = [STREAM_TYPE_P] OPENJP2.opj_stream_destroy.restype = ctypes.c_void_p OPENJP2.opj_stream_destroy(stream)
python
{ "resource": "" }
q14405
write_tile
train
def write_tile(codec, tile_index, data, data_size, stream): """Wraps openjp2 library function opj_write_tile. Write a tile into an image. Parameters ---------- codec : CODEC_TYPE The jpeg2000 codec tile_index : int The index of the tile to write, zero-indexing assumed data : array Image data arranged in usual C-order data_size : int Size of a tile in bytes stream : STREAM_TYPE_P The stream to write data to Raises ------ RuntimeError If the OpenJPEG library routine opj_write_tile fails. """ OPENJP2.opj_write_tile.argtypes = [CODEC_TYPE, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint8), ctypes.c_uint32, STREAM_TYPE_P] OPENJP2.opj_write_tile.restype = check_error datap = data.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)) OPENJP2.opj_write_tile(codec, ctypes.c_uint32(int(tile_index)), datap, ctypes.c_uint32(int(data_size)), stream)
python
{ "resource": "" }
q14406
removeZeroLenPadding
train
def removeZeroLenPadding(str, blocksize=AES_blocksize): 'Remove Padding with zeroes + last byte equal to the number of padding bytes' try: pad_len = ord(str[-1]) # last byte contains number of padding bytes except TypeError: pad_len = str[-1] assert pad_len < blocksize, 'padding error' assert pad_len < len(str), 'padding error' return str[:-pad_len]
python
{ "resource": "" }
q14407
appendNullPadding
train
def appendNullPadding(str, blocksize=AES_blocksize): 'Pad with null bytes' pad_len = paddingLength(len(str), blocksize) padding = '\0'*pad_len return str + padding
python
{ "resource": "" }
q14408
removeNullPadding
train
def removeNullPadding(str, blocksize=AES_blocksize): 'Remove padding with null bytes' pad_len = 0 for char in str[::-1]: # str[::-1] reverses string if char == '\0': pad_len += 1 else: break str = str[:-pad_len] return str
python
{ "resource": "" }
q14409
appendSpacePadding
train
def appendSpacePadding(str, blocksize=AES_blocksize): 'Pad with spaces' pad_len = paddingLength(len(str), blocksize) padding = '\0'*pad_len return str + padding
python
{ "resource": "" }
q14410
removeSpacePadding
train
def removeSpacePadding(str, blocksize=AES_blocksize): 'Remove padding with spaces' pad_len = 0 for char in str[::-1]: # str[::-1] reverses string if char == ' ': pad_len += 1 else: break str = str[:-pad_len] return str
python
{ "resource": "" }
q14411
_default_error_handler
train
def _default_error_handler(msg, _): """Default error handler callback for libopenjp2.""" msg = "OpenJPEG library error: {0}".format(msg.decode('utf-8').rstrip()) opj2.set_error_message(msg)
python
{ "resource": "" }
q14412
_default_warning_handler
train
def _default_warning_handler(library_msg, _): """Default warning handler callback.""" library_msg = library_msg.decode('utf-8').rstrip() msg = "OpenJPEG library warning: {0}".format(library_msg) warnings.warn(msg, UserWarning)
python
{ "resource": "" }
q14413
Jp2k.parse
train
def parse(self): """Parses the JPEG 2000 file. Raises ------ IOError The file was not JPEG 2000. """ self.length = os.path.getsize(self.filename) with open(self.filename, 'rb') as fptr: # Make sure we have a JPEG2000 file. It could be either JP2 or # J2C. Check for J2C first, single box in that case. read_buffer = fptr.read(2) signature, = struct.unpack('>H', read_buffer) if signature == 0xff4f: self._codec_format = opj2.CODEC_J2K # That's it, we're done. The codestream object is only # produced upon explicit request. return self._codec_format = opj2.CODEC_JP2 # Should be JP2. # First 4 bytes should be 12, the length of the 'jP ' box. # 2nd 4 bytes should be the box ID ('jP '). # 3rd 4 bytes should be the box signature (13, 10, 135, 10). fptr.seek(0) read_buffer = fptr.read(12) values = struct.unpack('>I4s4B', read_buffer) box_length = values[0] box_id = values[1] signature = values[2:] if (((box_length != 12) or (box_id != b'jP ') or (signature != (13, 10, 135, 10)))): msg = '{filename} is not a JPEG 2000 file.' msg = msg.format(filename=self.filename) raise IOError(msg) # Back up and start again, we know we have a superbox (box of # boxes) here. fptr.seek(0) self.box = self.parse_superbox(fptr) self._validate()
python
{ "resource": "" }
q14414
Jp2k._validate
train
def _validate(self): """Validate the JPEG 2000 outermost superbox. These checks must be done at a file level. """ # A JP2 file must contain certain boxes. The 2nd box must be a file # type box. if not isinstance(self.box[1], FileTypeBox): msg = "{filename} does not contain a valid File Type box." msg = msg.format(filename=self.filename) raise IOError(msg) # A jp2-branded file cannot contain an "any ICC profile ftyp = self.box[1] if ftyp.brand == 'jp2 ': jp2h = [box for box in self.box if box.box_id == 'jp2h'][0] colrs = [box for box in jp2h.box if box.box_id == 'colr'] for colr in colrs: if colr.method not in (core.ENUMERATED_COLORSPACE, core.RESTRICTED_ICC_PROFILE): msg = ("Color Specification box method must specify " "either an enumerated colorspace or a restricted " "ICC profile if the file type box brand is 'jp2 '.") warnings.warn(msg, UserWarning)
python
{ "resource": "" }
q14415
Jp2k._set_cinema_params
train
def _set_cinema_params(self, cinema_mode, fps): """Populate compression parameters structure for cinema2K. Parameters ---------- params : ctypes struct Corresponds to compression parameters structure used by the library. cinema_mode : {'cinema2k', 'cinema4k} Use either Cinema2K or Cinema4K profile. fps : {24, 48} Frames per second. """ if re.match("1.5|2.0", version.openjpeg_version) is not None: msg = ("Writing Cinema2K or Cinema4K files is not supported with " "OpenJPEG library versions less than 2.1.0. The installed " "version of OpenJPEG is {version}.") msg = msg.format(version=version.openjpeg_version) raise IOError(msg) # Cinema modes imply MCT. self._cparams.tcp_mct = 1 if cinema_mode == 'cinema2k': if fps not in [24, 48]: msg = 'Cinema2K frame rate must be either 24 or 48.' raise IOError(msg) if fps == 24: self._cparams.rsiz = core.OPJ_PROFILE_CINEMA_2K self._cparams.max_comp_size = core.OPJ_CINEMA_24_COMP self._cparams.max_cs_size = core.OPJ_CINEMA_24_CS else: self._cparams.rsiz = core.OPJ_PROFILE_CINEMA_2K self._cparams.max_comp_size = core.OPJ_CINEMA_48_COMP self._cparams.max_cs_size = core.OPJ_CINEMA_48_CS else: # cinema4k self._cparams.rsiz = core.OPJ_PROFILE_CINEMA_4K
python
{ "resource": "" }
q14416
Jp2k._write_openjpeg
train
def _write_openjpeg(self, img_array, verbose=False): """ Write JPEG 2000 file using OpenJPEG 1.5 interface. """ if img_array.ndim == 2: # Force the image to be 3D. Just makes things easier later on. img_array = img_array.reshape(img_array.shape[0], img_array.shape[1], 1) self._populate_comptparms(img_array) with ExitStack() as stack: image = opj.image_create(self._comptparms, self._colorspace) stack.callback(opj.image_destroy, image) numrows, numcols, numlayers = img_array.shape # set image offset and reference grid image.contents.x0 = self._cparams.image_offset_x0 image.contents.y0 = self._cparams.image_offset_y0 image.contents.x1 = (image.contents.x0 + (numcols - 1) * self._cparams.subsampling_dx + 1) image.contents.y1 = (image.contents.y0 + (numrows - 1) * self._cparams.subsampling_dy + 1) # Stage the image data to the openjpeg data structure. for k in range(0, numlayers): layer = np.ascontiguousarray(img_array[:, :, k], dtype=np.int32) dest = image.contents.comps[k].data src = layer.ctypes.data ctypes.memmove(dest, src, layer.nbytes) cinfo = opj.create_compress(self._cparams.codec_fmt) stack.callback(opj.destroy_compress, cinfo) # Setup the info, warning, and error handlers. # Always use the warning and error handler. Use of an info # handler is optional. event_mgr = opj.EventMgrType() _info_handler = _INFO_CALLBACK if verbose else None event_mgr.info_handler = _info_handler event_mgr.warning_handler = ctypes.cast(_WARNING_CALLBACK, ctypes.c_void_p) event_mgr.error_handler = ctypes.cast(_ERROR_CALLBACK, ctypes.c_void_p) opj.setup_encoder(cinfo, ctypes.byref(self._cparams), image) cio = opj.cio_open(cinfo) stack.callback(opj.cio_close, cio) if not opj.encode(cinfo, cio, image): raise IOError("Encode error.") pos = opj.cio_tell(cio) blob = ctypes.string_at(cio.contents.buffer, pos) fptr = open(self.filename, 'wb') stack.callback(fptr.close) fptr.write(blob) self.parse()
python
{ "resource": "" }
q14417
Jp2k._validate_j2k_colorspace
train
def _validate_j2k_colorspace(self, cparams, colorspace): """ Cannot specify a colorspace with J2K. """ if cparams.codec_fmt == opj2.CODEC_J2K and colorspace is not None: msg = 'Do not specify a colorspace when writing a raw codestream.' raise IOError(msg)
python
{ "resource": "" }
q14418
Jp2k._validate_codeblock_size
train
def _validate_codeblock_size(self, cparams): """ Code block dimensions must satisfy certain restrictions. They must both be a power of 2 and the total area defined by the width and height cannot be either too great or too small for the codec. """ if cparams.cblockw_init != 0 and cparams.cblockh_init != 0: # These fields ARE zero if uninitialized. width = cparams.cblockw_init height = cparams.cblockh_init if height * width > 4096 or height < 4 or width < 4: msg = ("The code block area is specified as " "{height} x {width} = {area} square pixels. " "Code block area cannot exceed 4096 square pixels. " "Code block height and width dimensions must be larger " "than 4 pixels.") msg = msg.format(height=height, width=width, area=height * width) raise IOError(msg) if ((math.log(height, 2) != math.floor(math.log(height, 2)) or math.log(width, 2) != math.floor(math.log(width, 2)))): msg = ("Bad code block size ({height} x {width}). " "The dimensions must be powers of 2.") msg = msg.format(height=height, width=width) raise IOError(msg)
python
{ "resource": "" }
q14419
Jp2k._validate_precinct_size
train
def _validate_precinct_size(self, cparams): """ Precinct dimensions must satisfy certain restrictions if specified. They must both be a power of 2 and must both be at least twice the size of their codeblock size counterparts. """ code_block_specified = False if cparams.cblockw_init != 0 and cparams.cblockh_init != 0: code_block_specified = True if cparams.res_spec != 0: # precinct size was not specified if this field is zero. for j in range(cparams.res_spec): prch = cparams.prch_init[j] prcw = cparams.prcw_init[j] if j == 0 and code_block_specified: height, width = cparams.cblockh_init, cparams.cblockw_init if prch < height * 2 or prcw < width * 2: msg = ("The highest resolution precinct size " "({prch} x {prcw}) must be at least twice that " "of the code block size " "({cbh} x {cbw}).") msg = msg.format(prch=prch, prcw=prcw, cbh=height, cbw=width) raise IOError(msg) if ((math.log(prch, 2) != math.floor(math.log(prch, 2)) or math.log(prcw, 2) != math.floor(math.log(prcw, 2)))): msg = ("Bad precinct size ({height} x {width}). " "Precinct dimensions must be powers of 2.") msg = msg.format(height=prch, width=prcw) raise IOError(msg)
python
{ "resource": "" }
q14420
Jp2k._validate_image_rank
train
def _validate_image_rank(self, img_array): """ Images must be either 2D or 3D. """ if img_array.ndim == 1 or img_array.ndim > 3: msg = "{0}D imagery is not allowed.".format(img_array.ndim) raise IOError(msg)
python
{ "resource": "" }
q14421
Jp2k._validate_image_datatype
train
def _validate_image_datatype(self, img_array): """ Only uint8 and uint16 images are currently supported. """ if img_array.dtype != np.uint8 and img_array.dtype != np.uint16: msg = ("Only uint8 and uint16 datatypes are currently supported " "when writing.") raise RuntimeError(msg)
python
{ "resource": "" }
q14422
Jp2k._validate_compression_params
train
def _validate_compression_params(self, img_array, cparams, colorspace): """Check that the compression parameters are valid. Parameters ---------- img_array : ndarray Image data to be written to file. cparams : CompressionParametersType(ctypes.Structure) Corresponds to cparameters_t type in openjp2 headers. """ self._validate_j2k_colorspace(cparams, colorspace) self._validate_codeblock_size(cparams) self._validate_precinct_size(cparams) self._validate_image_rank(img_array) self._validate_image_datatype(img_array)
python
{ "resource": "" }
q14423
Jp2k._determine_colorspace
train
def _determine_colorspace(self, colorspace=None, **kwargs): """Determine the colorspace from the supplied inputs. Parameters ---------- colorspace : str, optional Either 'rgb' or 'gray'. """ if colorspace is None: # Must infer the colorspace from the image dimensions. if len(self.shape) < 3: # A single channel image is grayscale. self._colorspace = opj2.CLRSPC_GRAY elif self.shape[2] == 1 or self.shape[2] == 2: # A single channel image or an image with two channels is going # to be greyscale. self._colorspace = opj2.CLRSPC_GRAY else: # Anything else must be RGB, right? self._colorspace = opj2.CLRSPC_SRGB else: if colorspace.lower() not in ('rgb', 'grey', 'gray'): msg = 'Invalid colorspace "{0}".'.format(colorspace) raise IOError(msg) elif colorspace.lower() == 'rgb' and self.shape[2] < 3: msg = 'RGB colorspace requires at least 3 components.' raise IOError(msg) # Turn the colorspace from a string to the enumerated value that # the library expects. COLORSPACE_MAP = {'rgb': opj2.CLRSPC_SRGB, 'gray': opj2.CLRSPC_GRAY, 'grey': opj2.CLRSPC_GRAY, 'ycc': opj2.CLRSPC_YCC} self._colorspace = COLORSPACE_MAP[colorspace.lower()]
python
{ "resource": "" }
q14424
Jp2k._write_openjp2
train
def _write_openjp2(self, img_array, verbose=False): """ Write JPEG 2000 file using OpenJPEG 2.x interface. """ if img_array.ndim == 2: # Force the image to be 3D. Just makes things easier later on. numrows, numcols = img_array.shape img_array = img_array.reshape(numrows, numcols, 1) self._populate_comptparms(img_array) with ExitStack() as stack: image = opj2.image_create(self._comptparms, self._colorspace) stack.callback(opj2.image_destroy, image) self._populate_image_struct(image, img_array) codec = opj2.create_compress(self._cparams.codec_fmt) stack.callback(opj2.destroy_codec, codec) if self._verbose or verbose: info_handler = _INFO_CALLBACK else: info_handler = None opj2.set_info_handler(codec, info_handler) opj2.set_warning_handler(codec, _WARNING_CALLBACK) opj2.set_error_handler(codec, _ERROR_CALLBACK) opj2.setup_encoder(codec, self._cparams, image) strm = opj2.stream_create_default_file_stream(self.filename, False) stack.callback(opj2.stream_destroy, strm) opj2.start_compress(codec, image, strm) opj2.encode(codec, strm) opj2.end_compress(codec, strm) # Refresh the metadata. self.parse()
python
{ "resource": "" }
q14425
Jp2k.append
train
def append(self, box): """Append a JP2 box to the file in-place. Parameters ---------- box : Jp2Box Instance of a JP2 box. Only UUID and XML boxes can currently be appended. """ if self._codec_format == opj2.CODEC_J2K: msg = "Only JP2 files can currently have boxes appended to them." raise IOError(msg) if not ((box.box_id == 'xml ') or (box.box_id == 'uuid' and box.uuid == UUID('be7acfcb-97a9-42e8-9c71-999491e3afac'))): msg = ("Only XML boxes and XMP UUID boxes can currently be " "appended.") raise IOError(msg) # Check the last box. If the length field is zero, then rewrite # the length field to reflect the true length of the box. with open(self.filename, 'rb') as ifile: offset = self.box[-1].offset ifile.seek(offset) read_buffer = ifile.read(4) box_length, = struct.unpack('>I', read_buffer) if box_length == 0: # Reopen the file in write mode and rewrite the length field. true_box_length = os.path.getsize(ifile.name) - offset with open(self.filename, 'r+b') as ofile: ofile.seek(offset) write_buffer = struct.pack('>I', true_box_length) ofile.write(write_buffer) # Can now safely append the box. with open(self.filename, 'ab') as ofile: box.write(ofile) self.parse()
python
{ "resource": "" }
q14426
Jp2k._write_wrapped_codestream
train
def _write_wrapped_codestream(self, ofile, box): """Write wrapped codestream.""" # Codestreams require a bit more care. # Am I a raw codestream? if len(self.box) == 0: # Yes, just write the codestream box header plus all # of myself out to file. ofile.write(struct.pack('>I', self.length + 8)) ofile.write(b'jp2c') with open(self.filename, 'rb') as ifile: ofile.write(ifile.read()) return # OK, I'm a jp2/jpx file. Need to find out where the raw codestream # actually starts. offset = box.offset if offset == -1: if self.box[1].brand == 'jpx ': msg = ("The codestream box must have its offset and length " "attributes fully specified if the file type brand is " "JPX.") raise IOError(msg) # Find the first codestream in the file. jp2c = [_box for _box in self.box if _box.box_id == 'jp2c'] offset = jp2c[0].offset # Ready to write the codestream. with open(self.filename, 'rb') as ifile: ifile.seek(offset) # Verify that the specified codestream is right. read_buffer = ifile.read(8) L, T = struct.unpack_from('>I4s', read_buffer, 0) if T != b'jp2c': msg = "Unable to locate the specified codestream." raise IOError(msg) if L == 0: # The length of the box is presumed to last until the end of # the file. Compute the effective length of the box. L = os.path.getsize(ifile.name) - ifile.tell() + 8 elif L == 1: # The length of the box is in the XL field, a 64-bit value. read_buffer = ifile.read(8) L, = struct.unpack('>Q', read_buffer) ifile.seek(offset) read_buffer = ifile.read(L) ofile.write(read_buffer)
python
{ "resource": "" }
q14427
Jp2k._get_default_jp2_boxes
train
def _get_default_jp2_boxes(self): """Create a default set of JP2 boxes.""" # Try to create a reasonable default. boxes = [JPEG2000SignatureBox(), FileTypeBox(), JP2HeaderBox(), ContiguousCodestreamBox()] height = self.codestream.segment[1].ysiz width = self.codestream.segment[1].xsiz num_components = len(self.codestream.segment[1].xrsiz) if num_components < 3: colorspace = core.GREYSCALE else: if len(self.box) == 0: # Best guess is SRGB colorspace = core.SRGB else: # Take whatever the first jp2 header / color specification # says. jp2hs = [box for box in self.box if box.box_id == 'jp2h'] colorspace = jp2hs[0].box[1].colorspace boxes[2].box = [ImageHeaderBox(height=height, width=width, num_components=num_components), ColourSpecificationBox(colorspace=colorspace)] return boxes
python
{ "resource": "" }
q14428
Jp2k._remove_ellipsis
train
def _remove_ellipsis(self, index, numrows, numcols, numbands): """ resolve the first ellipsis in the index so that it references the image Parameters ---------- index : tuple tuple of index arguments, presumably one of them is the Ellipsis numrows, numcols, numbands : int image dimensions Returns ------- tuple Same as index, except that the first Ellipsis is replaced with a proper slice whose start and stop members are not None """ # Remove the first ellipsis we find. rows = slice(0, numrows) cols = slice(0, numcols) bands = slice(0, numbands) if index[0] is Ellipsis: if len(index) == 2: # jp2k[..., other_slice] newindex = (rows, cols, index[1]) else: # jp2k[..., cols, bands] newindex = (rows, index[1], index[2]) elif index[1] is Ellipsis: if len(index) == 2: # jp2k[rows, ...] newindex = (index[0], cols, bands) else: # jp2k[rows, ..., bands] newindex = (index[0], cols, index[2]) else: # Assume that we don't have 4D imagery, of course. newindex = (index[0], index[1], bands) return newindex
python
{ "resource": "" }
q14429
Jp2k._subsampling_sanity_check
train
def _subsampling_sanity_check(self): """Check for differing subsample factors. """ dxs = np.array(self.codestream.segment[1].xrsiz) dys = np.array(self.codestream.segment[1].yrsiz) if np.any(dxs - dxs[0]) or np.any(dys - dys[0]): msg = ("The read_bands method should be used with the subsampling " "factors are different. " "\n\n{siz_segment}") msg = msg.format(siz_segment=str(self.codestream.segment[1])) raise IOError(msg)
python
{ "resource": "" }
q14430
Jp2k._read_openjpeg
train
def _read_openjpeg(self, rlevel=0, verbose=False, area=None): """Read a JPEG 2000 image using libopenjpeg. Parameters ---------- rlevel : int, optional Factor by which to rlevel output resolution. Use -1 to get the lowest resolution thumbnail. verbose : bool, optional Print informational messages produced by the OpenJPEG library. area : tuple, optional Specifies decoding image area, (first_row, first_col, last_row, last_col) Returns ------- ndarray The image data. Raises ------ RuntimeError If the image has differing subsample factors. """ self._subsampling_sanity_check() self._populate_dparams(rlevel) with ExitStack() as stack: try: self._dparams.decod_format = self._codec_format dinfo = opj.create_decompress(self._dparams.decod_format) event_mgr = opj.EventMgrType() handler = ctypes.cast(_INFO_CALLBACK, ctypes.c_void_p) event_mgr.info_handler = handler if self.verbose else None event_mgr.warning_handler = ctypes.cast(_WARNING_CALLBACK, ctypes.c_void_p) event_mgr.error_handler = ctypes.cast(_ERROR_CALLBACK, ctypes.c_void_p) opj.set_event_mgr(dinfo, ctypes.byref(event_mgr)) opj.setup_decoder(dinfo, self._dparams) with open(self.filename, 'rb') as fptr: src = fptr.read() cio = opj.cio_open(dinfo, src) raw_image = opj.decode(dinfo, cio) stack.callback(opj.image_destroy, raw_image) stack.callback(opj.destroy_decompress, dinfo) stack.callback(opj.cio_close, cio) image = self._extract_image(raw_image) except ValueError: opj2.check_error(0) if area is not None: x0, y0, x1, y1 = area extent = 2 ** rlevel area = [int(round(float(x) / extent + 2 ** -20)) for x in area] rows = slice(area[0], area[2], None) cols = slice(area[1], area[3], None) image = image[rows, cols] return image
python
{ "resource": "" }
q14431
Jp2k._populate_dparams
train
def _populate_dparams(self, rlevel, tile=None, area=None): """Populate decompression structure with appropriate input parameters. Parameters ---------- rlevel : int Factor by which to rlevel output resolution. area : tuple Specifies decoding image area, (first_row, first_col, last_row, last_col) tile : int Number of tile to decode. """ if opj2.OPENJP2 is not None: dparam = opj2.set_default_decoder_parameters() else: dparam = opj.DecompressionParametersType() opj.set_default_decoder_parameters(ctypes.byref(dparam)) infile = self.filename.encode() nelts = opj2.PATH_LEN - len(infile) infile += b'0' * nelts dparam.infile = infile # Return raw codestream components instead of "interpolating" the # colormap? dparam.flags |= 1 if self.ignore_pclr_cmap_cdef else 0 dparam.decod_format = self._codec_format dparam.cp_layer = self._layer # Must check the specified rlevel against the maximum. if rlevel != 0: # Must check the specified rlevel against the maximum. max_rlevel = self.codestream.segment[2].num_res if rlevel == -1: # -1 is shorthand for the largest rlevel rlevel = max_rlevel elif rlevel < -1 or rlevel > max_rlevel: msg = ("rlevel must be in the range [-1, {max_rlevel}] " "for this image.") msg = msg.format(max_rlevel=max_rlevel) raise IOError(msg) dparam.cp_reduce = rlevel if area is not None: if area[0] < 0 or area[1] < 0 or area[2] <= 0 or area[3] <= 0: msg = ("The upper left corner coordinates must be nonnegative " "and the lower right corner coordinates must be " "positive. The specified upper left and lower right " "coordinates are ({y0}, {x0}) and ({y1}, {x1}).") msg = msg.format(x0=area[1], y0=area[0], x1=area[3], y1=area[2]) raise IOError(msg) dparam.DA_y0 = area[0] dparam.DA_x0 = area[1] dparam.DA_y1 = area[2] dparam.DA_x1 = area[3] if tile is not None: dparam.tile_index = tile dparam.nb_tile_to_decode = 1 self._dparams = dparam
python
{ "resource": "" }
q14432
Jp2k._extract_image
train
def _extract_image(self, raw_image): """ Extract unequally-sized image bands. Parameters ---------- raw_image : reference to openjpeg ImageType instance The image structure initialized with image characteristics. Returns ------- list or ndarray If the JPEG 2000 image has unequally-sized components, they are extracted into a list, otherwise a numpy array. """ ncomps = raw_image.contents.numcomps # Make a pass thru the image, see if any of the band datatypes or # dimensions differ. dtypes, nrows, ncols = [], [], [] for k in range(raw_image.contents.numcomps): component = raw_image.contents.comps[k] dtypes.append(self._component2dtype(component)) nrows.append(component.h) ncols.append(component.w) is_cube = all(r == nrows[0] and c == ncols[0] and d == dtypes[0] for r, c, d in zip(nrows, ncols, dtypes)) if is_cube: image = np.zeros((nrows[0], ncols[0], ncomps), dtypes[0]) else: image = [] for k in range(raw_image.contents.numcomps): component = raw_image.contents.comps[k] self._validate_nonzero_image_size(nrows[k], ncols[k], k) addr = ctypes.addressof(component.data.contents) with warnings.catch_warnings(): warnings.simplefilter("ignore") nelts = nrows[k] * ncols[k] band = np.ctypeslib.as_array( (ctypes.c_int32 * nelts).from_address(addr)) if is_cube: image[:, :, k] = np.reshape(band.astype(dtypes[k]), (nrows[k], ncols[k])) else: image.append(np.reshape(band.astype(dtypes[k]), (nrows[k], ncols[k]))) if is_cube and image.shape[2] == 1: # The third dimension has just a single layer. Make the image # data 2D instead of 3D. image.shape = image.shape[0:2] return image
python
{ "resource": "" }
q14433
Jp2k._component2dtype
train
def _component2dtype(self, component): """Determin the appropriate numpy datatype for an OpenJPEG component. Parameters ---------- component : ctypes pointer to ImageCompType (image_comp_t) single image component structure. Returns ------- builtins.type numpy datatype to be used to construct an image array """ if component.prec > 16: msg = "Unhandled precision: {0} bits.".format(component.prec) raise IOError(msg) if component.sgnd: dtype = np.int8 if component.prec <=8 else np.int16 else: dtype = np.uint8 if component.prec <=8 else np.uint16 return dtype
python
{ "resource": "" }
q14434
Jp2k.get_codestream
train
def get_codestream(self, header_only=True): """Retrieve codestream. Parameters ---------- header_only : bool, optional If True, only marker segments in the main header are parsed. Supplying False may impose a large performance penalty. Returns ------- Codestream Object describing the codestream syntax. Examples -------- >>> import glymur >>> jfile = glymur.data.nemo() >>> jp2 = glymur.Jp2k(jfile) >>> codestream = jp2.get_codestream() >>> print(codestream.segment[1]) SIZ marker segment @ (3233, 47) Profile: no profile Reference Grid Height, Width: (1456 x 2592) Vertical, Horizontal Reference Grid Offset: (0 x 0) Reference Tile Height, Width: (1456 x 2592) Vertical, Horizontal Reference Tile Offset: (0 x 0) Bitdepth: (8, 8, 8) Signed: (False, False, False) Vertical, Horizontal Subsampling: ((1, 1), (1, 1), (1, 1)) """ with open(self.filename, 'rb') as fptr: if self._codec_format == opj2.CODEC_J2K: codestream = Codestream(fptr, self.length, header_only=header_only) else: box = [x for x in self.box if x.box_id == 'jp2c'] fptr.seek(box[0].offset) read_buffer = fptr.read(8) (box_length, _) = struct.unpack('>I4s', read_buffer) if box_length == 0: # The length of the box is presumed to last until the end # of the file. Compute the effective length of the box. box_length = os.path.getsize(fptr.name) - fptr.tell() + 8 elif box_length == 1: # Seek past the XL field. read_buffer = fptr.read(8) box_length, = struct.unpack('>Q', read_buffer) codestream = Codestream(fptr, box_length - 8, header_only=header_only) return codestream
python
{ "resource": "" }
q14435
Jp2k._populate_image_struct
train
def _populate_image_struct(self, image, imgdata): """Populates image struct needed for compression. Parameters ---------- image : ImageType(ctypes.Structure) Corresponds to image_t type in openjp2 headers. img_array : ndarray Image data to be written to file. """ numrows, numcols, num_comps = imgdata.shape for k in range(num_comps): self._validate_nonzero_image_size(numrows, numcols, k) # set image offset and reference grid image.contents.x0 = self._cparams.image_offset_x0 image.contents.y0 = self._cparams.image_offset_y0 image.contents.x1 = (image.contents.x0 + (numcols - 1) * self._cparams.subsampling_dx + 1) image.contents.y1 = (image.contents.y0 + (numrows - 1) * self._cparams.subsampling_dy + 1) # Stage the image data to the openjpeg data structure. for k in range(0, num_comps): if self._cparams.rsiz in (core.OPJ_PROFILE_CINEMA_2K, core.OPJ_PROFILE_CINEMA_4K): image.contents.comps[k].prec = 12 image.contents.comps[k].bpp = 12 layer = np.ascontiguousarray(imgdata[:, :, k], dtype=np.int32) dest = image.contents.comps[k].data src = layer.ctypes.data ctypes.memmove(dest, src, layer.nbytes) return image
python
{ "resource": "" }
q14436
Jp2k._populate_comptparms
train
def _populate_comptparms(self, img_array): """Instantiate and populate comptparms structure. This structure defines the image components. Parameters ---------- img_array : ndarray Image data to be written to file. """ # Only two precisions are possible. if img_array.dtype == np.uint8: comp_prec = 8 else: comp_prec = 16 numrows, numcols, num_comps = img_array.shape if version.openjpeg_version_tuple[0] == 1: comptparms = (opj.ImageComptParmType * num_comps)() else: comptparms = (opj2.ImageComptParmType * num_comps)() for j in range(num_comps): comptparms[j].dx = self._cparams.subsampling_dx comptparms[j].dy = self._cparams.subsampling_dy comptparms[j].w = numcols comptparms[j].h = numrows comptparms[j].x0 = self._cparams.image_offset_x0 comptparms[j].y0 = self._cparams.image_offset_y0 comptparms[j].prec = comp_prec comptparms[j].bpp = comp_prec comptparms[j].sgnd = 0 self._comptparms = comptparms
python
{ "resource": "" }
q14437
Jp2k._validate_nonzero_image_size
train
def _validate_nonzero_image_size(self, nrows, ncols, component_index): """The image cannot have area of zero. """ if nrows == 0 or ncols == 0: # Letting this situation continue would segfault openjpeg. msg = "Component {0} has dimensions {1} x {2}" msg = msg.format(component_index, nrows, ncols) raise IOError(msg)
python
{ "resource": "" }
q14438
Jp2k._validate_jp2_box_sequence
train
def _validate_jp2_box_sequence(self, boxes): """Run through series of tests for JP2 box legality. This is non-exhaustive. """ JP2_IDS = ['colr', 'cdef', 'cmap', 'jp2c', 'ftyp', 'ihdr', 'jp2h', 'jP ', 'pclr', 'res ', 'resc', 'resd', 'xml ', 'ulst', 'uinf', 'url ', 'uuid'] self._validate_signature_compatibility(boxes) self._validate_jp2h(boxes) self._validate_jp2c(boxes) if boxes[1].brand == 'jpx ': self._validate_jpx_box_sequence(boxes) else: # Validate the JP2 box IDs. count = self._collect_box_count(boxes) for box_id in count.keys(): if box_id not in JP2_IDS: msg = ("The presence of a '{0}' box requires that the " "file type brand be set to 'jpx '.") raise IOError(msg.format(box_id)) self._validate_jp2_colr(boxes)
python
{ "resource": "" }
q14439
Jp2k._validate_jp2_colr
train
def _validate_jp2_colr(self, boxes): """ Validate JP2 requirements on colour specification boxes. """ lst = [box for box in boxes if box.box_id == 'jp2h'] jp2h = lst[0] for colr in [box for box in jp2h.box if box.box_id == 'colr']: if colr.approximation != 0: msg = ("A JP2 colr box cannot have a non-zero approximation " "field.") raise IOError(msg)
python
{ "resource": "" }
q14440
Jp2k._validate_jpx_box_sequence
train
def _validate_jpx_box_sequence(self, boxes): """Run through series of tests for JPX box legality.""" self._validate_label(boxes) self._validate_jpx_compatibility(boxes, boxes[1].compatibility_list) self._validate_singletons(boxes) self._validate_top_level(boxes)
python
{ "resource": "" }
q14441
Jp2k._validate_signature_compatibility
train
def _validate_signature_compatibility(self, boxes): """Validate the file signature and compatibility status.""" # Check for a bad sequence of boxes. # 1st two boxes must be 'jP ' and 'ftyp' if boxes[0].box_id != 'jP ' or boxes[1].box_id != 'ftyp': msg = ("The first box must be the signature box and the second " "must be the file type box.") raise IOError(msg) # The compatibility list must contain at a minimum 'jp2 '. if 'jp2 ' not in boxes[1].compatibility_list: msg = "The ftyp box must contain 'jp2 ' in the compatibility list." raise IOError(msg)
python
{ "resource": "" }
q14442
Jp2k._validate_jp2c
train
def _validate_jp2c(self, boxes): """Validate the codestream box in relation to other boxes.""" # jp2c must be preceeded by jp2h jp2h_lst = [idx for (idx, box) in enumerate(boxes) if box.box_id == 'jp2h'] jp2h_idx = jp2h_lst[0] jp2c_lst = [idx for (idx, box) in enumerate(boxes) if box.box_id == 'jp2c'] if len(jp2c_lst) == 0: msg = ("A codestream box must be defined in the outermost " "list of boxes.") raise IOError(msg) jp2c_idx = jp2c_lst[0] if jp2h_idx >= jp2c_idx: msg = "The codestream box must be preceeded by a jp2 header box." raise IOError(msg)
python
{ "resource": "" }
q14443
Jp2k._validate_jp2h
train
def _validate_jp2h(self, boxes): """Validate the JP2 Header box.""" self._check_jp2h_child_boxes(boxes, 'top-level') jp2h_lst = [box for box in boxes if box.box_id == 'jp2h'] jp2h = jp2h_lst[0] # 1st jp2 header box cannot be empty. if len(jp2h.box) == 0: msg = "The JP2 header superbox cannot be empty." raise IOError(msg) # 1st jp2 header box must be ihdr if jp2h.box[0].box_id != 'ihdr': msg = ("The first box in the jp2 header box must be the image " "header box.") raise IOError(msg) # colr must be present in jp2 header box. colr_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'colr'] if len(colr_lst) == 0: msg = "The jp2 header box must contain a color definition box." raise IOError(msg) colr = jp2h.box[colr_lst[0]] self._validate_channel_definition(jp2h, colr)
python
{ "resource": "" }
q14444
Jp2k._validate_channel_definition
train
def _validate_channel_definition(self, jp2h, colr): """Validate the channel definition box.""" cdef_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'cdef'] if len(cdef_lst) > 1: msg = ("Only one channel definition box is allowed in the " "JP2 header.") raise IOError(msg) elif len(cdef_lst) == 1: cdef = jp2h.box[cdef_lst[0]] if colr.colorspace == core.SRGB: if any([chan + 1 not in cdef.association or cdef.channel_type[chan] != 0 for chan in [0, 1, 2]]): msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg) elif colr.colorspace == core.GREYSCALE: if 0 not in cdef.channel_type: msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg)
python
{ "resource": "" }
q14445
Jp2k._check_jp2h_child_boxes
train
def _check_jp2h_child_boxes(self, boxes, parent_box_name): """Certain boxes can only reside in the JP2 header.""" JP2H_CHILDREN = set(['bpcc', 'cdef', 'cmap', 'ihdr', 'pclr']) box_ids = set([box.box_id for box in boxes]) intersection = box_ids.intersection(JP2H_CHILDREN) if len(intersection) > 0 and parent_box_name not in ['jp2h', 'jpch']: msg = "A {0} box can only be nested in a JP2 header box." raise IOError(msg.format(list(intersection)[0])) # Recursively check any contained superboxes. for box in boxes: if hasattr(box, 'box'): self._check_jp2h_child_boxes(box.box, box.box_id)
python
{ "resource": "" }
q14446
Jp2k._collect_box_count
train
def _collect_box_count(self, boxes): """Count the occurences of each box type.""" count = Counter([box.box_id for box in boxes]) # Add the counts in the superboxes. for box in boxes: if hasattr(box, 'box'): count.update(self._collect_box_count(box.box)) return count
python
{ "resource": "" }
q14447
Jp2k._validate_singletons
train
def _validate_singletons(self, boxes): """Several boxes can only occur once.""" count = self._collect_box_count(boxes) # Which boxes occur more than once? multiples = [box_id for box_id, bcount in count.items() if bcount > 1] if 'dtbl' in multiples: raise IOError('There can only be one dtbl box in a file.')
python
{ "resource": "" }
q14448
Jp2k._validate_jpx_compatibility
train
def _validate_jpx_compatibility(self, boxes, compatibility_list): """ If there is a JPX box then the compatibility list must also contain 'jpx '. """ JPX_IDS = ['asoc', 'nlst'] jpx_cl = set(compatibility_list) for box in boxes: if box.box_id in JPX_IDS: if len(set(['jpx ', 'jpxb']).intersection(jpx_cl)) == 0: msg = ("A JPX box requires that either 'jpx ' or 'jpxb' " "be present in the ftype compatibility list.") raise RuntimeError(msg) if hasattr(box, 'box') != 0: # Same set of checks on any child boxes. self._validate_jpx_compatibility(box.box, compatibility_list)
python
{ "resource": "" }
q14449
Jp2k._validate_label
train
def _validate_label(self, boxes): """ Label boxes can only be inside association, codestream headers, or compositing layer header boxes. """ for box in boxes: if box.box_id != 'asoc': if hasattr(box, 'box'): for boxi in box.box: if boxi.box_id == 'lbl ': msg = ("A label box cannot be nested inside a " "{0} box.") msg = msg.format(box.box_id) raise IOError(msg) # Same set of checks on any child boxes. self._validate_label(box.box)
python
{ "resource": "" }
q14450
KeenApi.fulfill
train
def fulfill(self, method, *args, **kwargs): """ Fulfill an HTTP request to Keen's API. """ return getattr(self.session, method)(*args, **kwargs)
python
{ "resource": "" }
q14451
KeenApi._order_by_is_valid_or_none
train
def _order_by_is_valid_or_none(self, params): """ Validates that a given order_by has proper syntax. :param params: Query params. :return: Returns True if either no order_by is present, or if the order_by is well-formed. """ if not "order_by" in params or not params["order_by"]: return True def _order_by_dict_is_not_well_formed(d): if not isinstance(d, dict): # Bad type. return True if "property_name" in d and d["property_name"]: if "direction" in d and not direction.is_valid_direction(d["direction"]): # Bad direction provided. return True for k in d: if k != "property_name" and k != "direction": # Unexpected key. return True # Everything looks good! return False # Missing required key. return True # order_by is converted to a list before this point if it wasn't one before. order_by_list = json.loads(params["order_by"]) for order_by in order_by_list: if _order_by_dict_is_not_well_formed(order_by): return False if not "group_by" in params or not params["group_by"]: # We must have group_by to have order_by make sense. return False return True
python
{ "resource": "" }
q14452
KeenApi._limit_is_valid_or_none
train
def _limit_is_valid_or_none(self, params): """ Validates that a given limit is not present or is well-formed. :param params: Query params. :return: Returns True if a limit is present or is well-formed. """ if not "limit" in params or not params["limit"]: return True if not isinstance(params["limit"], int) or params["limit"] < 1: return False if not "order_by" in params: return False return True
python
{ "resource": "" }
q14453
KeenApi.query
train
def query(self, analysis_type, params, all_keys=False): """ Performs a query using the Keen IO analysis API. A read key must be set first. """ if not self._order_by_is_valid_or_none(params): raise ValueError("order_by given is invalid or is missing required group_by.") if not self._limit_is_valid_or_none(params): raise ValueError("limit given is invalid or is missing required order_by.") url = "{0}/{1}/projects/{2}/queries/{3}".format(self.base_url, self.api_version, self.project_id, analysis_type) headers = utilities.headers(self.read_key) payload = params response = self.fulfill(HTTPMethods.GET, url, params=payload, headers=headers, timeout=self.get_timeout) self._error_handling(response) response = response.json() if not all_keys: response = response["result"] return response
python
{ "resource": "" }
q14454
KeenApi.delete_events
train
def delete_events(self, event_collection, params): """ Deletes events via the Keen IO API. A master key must be set first. :param event_collection: string, the event collection from which event are being deleted """ url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version, self.project_id, event_collection) headers = utilities.headers(self.master_key) response = self.fulfill(HTTPMethods.DELETE, url, params=params, headers=headers, timeout=self.post_timeout) self._error_handling(response) return True
python
{ "resource": "" }
q14455
KeenApi.get_collection
train
def get_collection(self, event_collection): """ Extracts info about a collection using the Keen IO API. A master key must be set first. :param event_collection: the name of the collection to retrieve info for """ url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version, self.project_id, event_collection) headers = utilities.headers(self.read_key) response = self.fulfill(HTTPMethods.GET, url, headers=headers, timeout=self.get_timeout) self._error_handling(response) return response.json()
python
{ "resource": "" }
q14456
KeenApi._update_access_key_pair
train
def _update_access_key_pair(self, access_key_id, key, val): """ Helper for updating access keys in a DRY fashion. """ # Get current state via HTTPS. current_access_key = self.get_access_key(access_key_id) # Copy and only change the single parameter. payload_dict = KeenApi._build_access_key_dict(current_access_key) payload_dict[key] = val # Now just treat it like a full update. return self.update_access_key_full(access_key_id, **payload_dict)
python
{ "resource": "" }
q14457
KeenApi.add_access_key_permissions
train
def add_access_key_permissions(self, access_key_id, permissions): """ Adds to the existing list of permissions on this key with the contents of this list. Will not remove any existing permissions or modify the remainder of the key. :param access_key_id: the 'key' value of the access key to add permissions to :param permissions: the new permissions to add to the existing list of permissions """ # Get current state via HTTPS. current_access_key = self.get_access_key(access_key_id) # Copy and only change the single parameter. payload_dict = KeenApi._build_access_key_dict(current_access_key) # Turn into sets to avoid duplicates. old_permissions = set(payload_dict["permitted"]) new_permissions = set(permissions) combined_permissions = old_permissions.union(new_permissions) payload_dict["permitted"] = list(combined_permissions) # Now just treat it like a full update. return self.update_access_key_full(access_key_id, **payload_dict)
python
{ "resource": "" }
q14458
KeenApi.remove_access_key_permissions
train
def remove_access_key_permissions(self, access_key_id, permissions): """ Removes a list of permissions from the existing list of permissions. Will not remove all existing permissions unless all such permissions are included in this list. Not to be confused with key revocation. See also: revoke_access_key() :param access_key_id: the 'key' value of the access key to remove some permissions from :param permissions: the permissions you wish to remove from this access key """ # Get current state via HTTPS. current_access_key = self.get_access_key(access_key_id) # Copy and only change the single parameter. payload_dict = KeenApi._build_access_key_dict(current_access_key) # Turn into sets to avoid duplicates. old_permissions = set(payload_dict["permitted"]) removal_permissions = set(permissions) reduced_permissions = old_permissions.difference(removal_permissions) payload_dict["permitted"] = list(reduced_permissions) # Now just treat it like a full update. return self.update_access_key_full(access_key_id, **payload_dict)
python
{ "resource": "" }
q14459
KeenApi._error_handling
train
def _error_handling(self, res): """ Helper function to do the error handling :params res: the response from a request """ # making the error handling generic so if an status_code starting with 2 doesn't exist, we raise the error if res.status_code // 100 != 2: error = self._get_response_json(res) raise exceptions.KeenApiError(error)
python
{ "resource": "" }
q14460
KeenApi._get_response_json
train
def _get_response_json(self, res): """ Helper function to extract the JSON body out of a response OR throw an exception. :param res: the response from a request :return: the JSON body OR throws an exception """ try: error = res.json() except ValueError: error = { "message": "The API did not respond with JSON, but: {0}".format(res.text[:1000]), "error_code": "{0}".format(res.status_code) } return error
python
{ "resource": "" }
q14461
CachedDatasetsInterface.all
train
def all(self): """ Fetch all Cached Datasets for a Project. Read key must be set. """ return self._get_json(HTTPMethods.GET, self._cached_datasets_url, self._get_master_key())
python
{ "resource": "" }
q14462
CachedDatasetsInterface.get
train
def get(self, dataset_name): """ Fetch a single Cached Dataset for a Project. Read key must be set. :param dataset_name: Name of Cached Dataset (not `display_name`) """ url = "{0}/{1}".format(self._cached_datasets_url, dataset_name) return self._get_json(HTTPMethods.GET, url, self._get_read_key())
python
{ "resource": "" }
q14463
CachedDatasetsInterface.create
train
def create(self, dataset_name, query, index_by, display_name): """ Create a Cached Dataset for a Project. Master key must be set. """ url = "{0}/{1}".format(self._cached_datasets_url, dataset_name) payload = { "query": query, "index_by": index_by, "display_name": display_name } return self._get_json(HTTPMethods.PUT, url, self._get_master_key(), json=payload)
python
{ "resource": "" }
q14464
CachedDatasetsInterface.results
train
def results(self, dataset_name, index_by, timeframe): """ Retrieve results from a Cached Dataset. Read key must be set. """ url = "{0}/{1}/results".format(self._cached_datasets_url, dataset_name) index_by = index_by if isinstance(index_by, str) else json.dumps(index_by) timeframe = timeframe if isinstance(timeframe, str) else json.dumps(timeframe) query_params = { "index_by": index_by, "timeframe": timeframe } return self._get_json( HTTPMethods.GET, url, self._get_read_key(), params=query_params )
python
{ "resource": "" }
q14465
CachedDatasetsInterface.delete
train
def delete(self, dataset_name): """ Delete a Cached Dataset. Master Key must be set. """ url = "{0}/{1}".format(self._cached_datasets_url, dataset_name) self._get_json(HTTPMethods.DELETE, url, self._get_master_key()) return True
python
{ "resource": "" }
q14466
switch.match
train
def match(self, *args): """Whether or not to enter a given case statement""" self.fall = self.fall or not args self.fall = self.fall or (self.value in args) return self.fall
python
{ "resource": "" }
q14467
glymurrc_fname
train
def glymurrc_fname(): """Return the path to the configuration file. Search order: 1) current working directory 2) environ var XDG_CONFIG_HOME 3) $HOME/.config/glymur/glymurrc """ # Current directory. fname = os.path.join(os.getcwd(), 'glymurrc') if os.path.exists(fname): return fname confdir = get_configdir() if confdir is not None: fname = os.path.join(confdir, 'glymurrc') if os.path.exists(fname): return fname # didn't find a configuration file. return None
python
{ "resource": "" }
q14468
load_library_handle
train
def load_library_handle(libname, path): """Load the library, return the ctypes handle.""" if path is None or path in ['None', 'none']: # Either could not find a library via ctypes or # user-configuration-file, or we could not find it in any of the # default locations, or possibly the user intentionally does not want # one of the libraries to load. return None try: if os.name == "nt": opj_lib = ctypes.windll.LoadLibrary(path) else: opj_lib = ctypes.CDLL(path) except (TypeError, OSError): msg = 'The {libname} library at {path} could not be loaded.' msg = msg.format(path=path, libname=libname) warnings.warn(msg, UserWarning) opj_lib = None return opj_lib
python
{ "resource": "" }
q14469
read_config_file
train
def read_config_file(libname): """ Extract library locations from a configuration file. Parameters ---------- libname : str One of either 'openjp2' or 'openjpeg' Returns ------- path : None or str None if no location is specified, otherwise a path to the library """ filename = glymurrc_fname() if filename is None: # There's no library file path to return in this case. return None # Read the configuration file for the library location. parser = ConfigParser() parser.read(filename) try: path = parser.get('library', libname) except (NoOptionError, NoSectionError): path = None return path
python
{ "resource": "" }
q14470
glymur_config
train
def glymur_config(): """ Try to ascertain locations of openjp2, openjpeg libraries. Returns ------- tuple tuple of library handles """ handles = (load_openjpeg_library(x) for x in ['openjp2', 'openjpeg']) handles = tuple(handles) if all(handle is None for handle in handles): msg = "Neither the openjp2 nor the openjpeg library could be loaded. " warnings.warn(msg) return handles
python
{ "resource": "" }
q14471
get_configdir
train
def get_configdir(): """Return string representing the configuration directory. Default is $HOME/.config/glymur. You can override this with the XDG_CONFIG_HOME environment variable. """ if 'XDG_CONFIG_HOME' in os.environ: return os.path.join(os.environ['XDG_CONFIG_HOME'], 'glymur') if 'HOME' in os.environ and os.name != 'nt': # HOME is set by WinPython to something unusual, so we don't # necessarily want that. return os.path.join(os.environ['HOME'], '.config', 'glymur') # Last stand. Should handle windows... others? return os.path.join(os.path.expanduser('~'), 'glymur')
python
{ "resource": "" }
q14472
set_option
train
def set_option(key, value): """Set the value of the specified option. Available options: parse.full_codestream print.xml print.codestream print.short Parameters ---------- key : str Name of a single option. value : New value of option. Option Descriptions ------------------- parse.full_codestream : bool When False, only the codestream header is parsed for metadata. This can results in faster JP2/JPX parsing. When True, the entire codestream is parsed. [default: False] print.codestream : bool When False, the codestream segments are not printed. Otherwise the segments are printed depending on the value of the parse.full_codestream option. [default: True] print.short : bool When True, only the box ID, offset, and length are displayed. Useful for displaying only the basic structure or skeleton of a JPEG 2000 file. [default: False] print.xml : bool When False, printing of the XML contents of any XML boxes or UUID XMP boxes is suppressed. [default: True] See also -------- get_option """ if key not in _options.keys(): raise KeyError('{key} not valid.'.format(key=key)) _options[key] = value
python
{ "resource": "" }
q14473
reset_option
train
def reset_option(key): """ Reset one or more options to their default value. Pass "all" as argument to reset all options. Available options: parse.full_codestream print.xml print.codestream print.short Parameter --------- key : str Name of a single option. """ global _options if key == 'all': _options = copy.deepcopy(_original_options) else: if key not in _options.keys(): raise KeyError('{key} not valid.'.format(key=key)) _options[key] = _original_options[key]
python
{ "resource": "" }
q14474
set_printoptions
train
def set_printoptions(**kwargs): """Set printing options. These options determine the way JPEG 2000 boxes are displayed. Parameters ---------- short : bool, optional When True, only the box ID, offset, and length are displayed. Useful for displaying only the basic structure or skeleton of a JPEG 2000 file. xml : bool, optional When False, printing of the XML contents of any XML boxes or UUID XMP boxes is suppressed. codestream : bool, optional When False, the codestream segments are not printed. Otherwise the segments are printed depending on how set_parseoptions has been used. See also -------- get_printoptions Examples -------- To put back the default options, you can use: >>> import glymur >>> glymur.set_printoptions(short=False, xml=True, codestream=True) """ warnings.warn('Use set_option instead of set_printoptions.', DeprecationWarning) for key, value in kwargs.items(): if key not in ['short', 'xml', 'codestream']: raise KeyError('"{0}" not a valid keyword parameter.'.format(key)) set_option('print.' + key, value)
python
{ "resource": "" }
q14475
get_printoptions
train
def get_printoptions(): """Return the current print options. Returns ------- dict Dictionary of current print options with keys - short : bool - xml : bool - codestream : bool For a full description of these options, see `set_printoptions`. See also -------- set_printoptions """ warnings.warn('Use get_option instead of get_printoptions.', DeprecationWarning) d = {} for key in ['short', 'xml', 'codestream']: d[key] = _options['print.' + key] return d
python
{ "resource": "" }
q14476
main
train
def main(): """ Entry point for console script jp2dump. """ kwargs = {'description': 'Print JPEG2000 metadata.', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter} parser = argparse.ArgumentParser(**kwargs) parser.add_argument('-x', '--noxml', help='suppress XML', action='store_true') parser.add_argument('-s', '--short', help='only print box id, offset, and length', action='store_true') chelp = 'Level of codestream information. 0 suppresses all details, ' chelp += '1 prints the main header, 2 prints the full codestream.' parser.add_argument('-c', '--codestream', help=chelp, metavar='LEVEL', nargs=1, type=int, default=[1]) parser.add_argument('filename') args = parser.parse_args() if args.noxml: set_option('print.xml', False) if args.short: set_option('print.short', True) codestream_level = args.codestream[0] if codestream_level not in [0, 1, 2]: raise ValueError("Invalid level of codestream information specified.") if codestream_level == 0: set_option('print.codestream', False) elif codestream_level == 2: set_option('parse.full_codestream', True) filename = args.filename # JP2 metadata can be extensive, so don't print any warnings until we # are done with the metadata. with warnings.catch_warnings(record=True) as wctx: jp2 = Jp2k(filename) if jp2._codec_format == lib.openjp2.CODEC_J2K: if codestream_level == 0: print('File: {0}'.format(os.path.basename(filename))) elif codestream_level == 1: print(jp2) elif codestream_level == 2: print('File: {0}'.format(os.path.basename(filename))) print(jp2.get_codestream(header_only=False)) else: print(jp2) # Now re-emit any suppressed warnings. if len(wctx) > 0: print("\n") for warning in wctx: print("{0}:{1}: {2}: {3}".format(warning.filename, warning.lineno, warning.category.__name__, warning.message))
python
{ "resource": "" }
q14477
CerberusClient._set_token
train
def _set_token(self): """Set the Cerberus token based on auth type""" try: self.token = os.environ['CERBERUS_TOKEN'] if self.verbose: print("Overriding Cerberus token with environment variable.", file=sys.stderr) logger.info("Overriding Cerberus token with environment variable.") return except: pass if self.username: ua = UserAuth(self.cerberus_url, self.username, self.password) self.token = ua.get_token() else: awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose) self.token = awsa.get_token()
python
{ "resource": "" }
q14478
CerberusClient.get_role
train
def get_role(self, key): """Return id of named role.""" json_resp = self.get_roles() for item in json_resp: if key in item["name"]: return item["id"] raise CerberusClientException("Key '%s' not found" % key)
python
{ "resource": "" }
q14479
CerberusClient.get_categories
train
def get_categories(self): """ Return a list of categories that a safe deposit box can belong to""" sdb_resp = get_with_retry(self.cerberus_url + '/v1/category', headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
python
{ "resource": "" }
q14480
CerberusClient.create_sdb
train
def create_sdb(self, name, category_id, owner, description="", user_group_permissions=None, iam_principal_permissions=None): """Create a safe deposit box. You need to refresh your token before the iam role is granted permission to the new safe deposit box. Keyword arguments: name (string) -- name of the safe deposit box category_id (string) -- category id that determines where to store the sdb. (ex: shared, applications) owner (string) -- AD group that owns the safe deposit box description (string) -- Description of the safe deposit box user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn and role_id """ # Do some sanity checking if user_group_permissions is None: user_group_permissions = [] if iam_principal_permissions is None: iam_principal_permissions = [] if list != type(user_group_permissions): raise(TypeError('Expected list, but got ' + str(type(user_group_permissions)))) if list != type(iam_principal_permissions): raise(TypeError('Expected list, but got ' + str(type(iam_principal_permissions)))) temp_data = { "name": name, "description": description, "category_id": category_id, "owner": owner, } if len(user_group_permissions) > 0: temp_data["user_group_permissions"] = user_group_permissions if len(iam_principal_permissions) > 0: temp_data["iam_principal_permissions"] = iam_principal_permissions data = json.encoder.JSONEncoder().encode(temp_data) sdb_resp = post_with_retry(self.cerberus_url + '/v2/safe-deposit-box', data=str(data), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
python
{ "resource": "" }
q14481
CerberusClient.delete_sdb
train
def delete_sdb(self, sdb_id): """ Delete a safe deposit box specified by id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path.""" sdb_resp = delete_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp
python
{ "resource": "" }
q14482
CerberusClient.get_sdb_path
train
def get_sdb_path(self, sdb): """Return the path for a SDB""" sdb_id = self.get_sdb_id(sdb) sdb_resp = get_with_retry( self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/', headers=self.HEADERS ) throw_if_bad_response(sdb_resp) return sdb_resp.json()['path']
python
{ "resource": "" }
q14483
CerberusClient.get_sdb_keys
train
def get_sdb_keys(self, path): """Return the keys for a SDB, which are need for the full secure data path""" list_resp = get_with_retry( self.cerberus_url + '/v1/secret/' + path + '/?list=true', headers=self.HEADERS ) throw_if_bad_response(list_resp) return list_resp.json()['data']['keys']
python
{ "resource": "" }
q14484
CerberusClient.get_sdb_id
train
def get_sdb_id(self, sdb): """ Return the ID for the given safe deposit box. Keyword arguments: sdb -- This is the name of the safe deposit box, not the path""" json_resp = self.get_sdbs() for r in json_resp: if r['name'] == sdb: return str(r['id']) # If we haven't returned yet then we didn't find what we were # looking for. raise CerberusClientException("'%s' not found" % sdb)
python
{ "resource": "" }
q14485
CerberusClient.get_sdb_id_by_path
train
def get_sdb_id_by_path(self, sdb_path): """ Given the path, return the ID for the given safe deposit box.""" json_resp = self.get_sdbs() # Deal with the supplied path possibly missing an ending slash path = self._add_slash(sdb_path) for r in json_resp: if r['path'] == path: return str(r['id']) # If we haven't returned yet then we didn't find what we were # looking for. raise CerberusClientException("'%s' not found" % sdb_path)
python
{ "resource": "" }
q14486
CerberusClient.get_sdb_by_id
train
def get_sdb_by_id(self, sdb_id): """ Return the details for the given safe deposit box id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path. """ sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
python
{ "resource": "" }
q14487
CerberusClient.get_sdb_secret_version_paths
train
def get_sdb_secret_version_paths(self, sdb_id): """ Get SDB secret version paths. This function takes the sdb_id """ sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
python
{ "resource": "" }
q14488
CerberusClient.list_sdbs
train
def list_sdbs(self): """ Return sdbs by Name """ sdb_raw = self.get_sdbs() sdbs = [] for s in sdb_raw: sdbs.append(s['name']) return sdbs
python
{ "resource": "" }
q14489
CerberusClient.update_sdb
train
def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None, iam_principal_permissions=None): """ Update a safe deposit box. Keyword arguments: owner (string) -- AD group that owns the safe deposit box description (string) -- Description of the safe deposit box user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn and role_id """ # Grab current data old_data = self.get_sdb_by_id(sdb_id) # Assemble information to update temp_data = {} keys = ('owner', 'description', 'iam_principal_permissions', 'user_group_permissions') for k in keys: if k in old_data: temp_data[k] = old_data[k] if owner is not None: temp_data["owner"] = owner if description is not None: temp_data["description"] = description if user_group_permissions is not None and len(user_group_permissions) > 0: temp_data["user_group_permissions"] = user_group_permissions if iam_principal_permissions is not None and len(iam_principal_permissions) > 0: temp_data["iam_principal_permissions"] = iam_principal_permissions data = json.encoder.JSONEncoder().encode(temp_data) sdb_resp = put_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, data=str(data), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
python
{ "resource": "" }
q14490
CerberusClient.delete_file
train
def delete_file(self, secure_data_path): """Delete a file at the given secure data path""" secret_resp = delete_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp
python
{ "resource": "" }
q14491
CerberusClient.get_file_metadata
train
def get_file_metadata(self, secure_data_path, version=None): """Get just the metadata for a file, not the content""" if not version: version = "CURRENT" payload = {'versionId': str(version)} secret_resp = head_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.headers
python
{ "resource": "" }
q14492
CerberusClient._parse_metadata_filename
train
def _parse_metadata_filename(self, metadata): """ Parse the header metadata to pull out the filename and then store it under the key 'filename' """ index = metadata['Content-Disposition'].index('=')+1 metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '') return metadata
python
{ "resource": "" }
q14493
CerberusClient.get_file_versions
train
def get_file_versions(self, secure_data_path, limit=None, offset=None): """ Get versions of a particular file This is just a shim to get_secret_versions secure_data_path -- full path to the file in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. offset -- Default(0), used for pagination. Will request records from the given offset. """ return self.get_secret_versions(secure_data_path, limit, offset)
python
{ "resource": "" }
q14494
CerberusClient._get_all_file_versions
train
def _get_all_file_versions(self, secure_data_path, limit=None): """ Convenience function that returns a generator yielding the contents of all versions of a file and its version info secure_data_path -- full path to the file in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. """ for secret in self._get_all_file_version_ids(secure_data_path, limit): yield {'secret': self.get_file_data(secure_data_path, version=secret['id']), 'version': secret}
python
{ "resource": "" }
q14495
CerberusClient.list_files
train
def list_files(self, secure_data_path, limit=None, offset=None): """Return the list of files in the path. May need to be paginated""" # Make sure that limit and offset are in range. # Set the normal defaults if not limit or limit <= 0: limit = 100 if not offset or offset < 0: offset = 0 payload = {'limit': str(limit), 'offset': str(offset)} # Because of the addition of versionId and the way URLs are constructed, secure_data_path should # always end in a '/'. secure_data_path = self._add_slash(secure_data_path) secret_resp = get_with_retry(self.cerberus_url + '/v1/secure-files/' + secure_data_path, params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
python
{ "resource": "" }
q14496
CerberusClient.put_file
train
def put_file(self, secure_data_path, filehandle, content_type=None): """ Upload a file to a secure data path provided Keyword arguments: secure_data_path -- full path in the safety deposit box that contains the file key to store things under filehandle -- Pass an opened filehandle to the file you want to upload. Make sure that the file was opened in binary mode, otherwise the size calculations can be off for text files. content_type -- Optional. Set the Mime type of the file you're uploading. """ # Parse out the filename from the path filename = secure_data_path.rsplit('/', 1) if content_type: data = {'file-content': (filename, filehandle, content_type)} else: data = {'file-content': (filename, filehandle)} headers = self.HEADERS.copy() if 'Content-Type' in headers: headers.__delitem__('Content-Type') secret_resp = post_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path, files=data, headers=headers) throw_if_bad_response(secret_resp) return secret_resp
python
{ "resource": "" }
q14497
CerberusClient.get_secret_versions
train
def get_secret_versions(self, secure_data_path, limit=None, offset=None): """ Get versions of a particular secret key secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. offset -- Default(0), used for pagination. Will request records from the given offset. """ # Make sure that limit and offset are in range. # Set the normal defaults if not limit or limit <= 0: limit = 100 if not offset or offset < 0: offset = 0 payload = {'limit': str(limit), 'offset': str(offset)} secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret-versions/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
python
{ "resource": "" }
q14498
CerberusClient.list_secrets
train
def list_secrets(self, secure_data_path): """Return json secrets based on the secure_data_path, this will list keys in a folder""" # Because of the addition of versionId and the way URLs are constructed, secure_data_path should # always end in a '/'. secure_data_path = self._add_slash(secure_data_path) secret_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path + '?list=true', headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
python
{ "resource": "" }
q14499
AWSAuth._get_v4_signed_headers
train
def _get_v4_signed_headers(self): """Returns V4 signed get-caller-identity request headers""" if self.aws_session is None: boto_session = session.Session() creds = boto_session.get_credentials() else: creds = self.aws_session.get_credentials() if creds is None: raise CerberusClientException("Unable to locate AWS credentials") readonly_credentials = creds.get_frozen_credentials() # hardcode get-caller-identity request data = OrderedDict((('Action','GetCallerIdentity'), ('Version', '2011-06-15'))) url = 'https://sts.{}.amazonaws.com/'.format(self.region) request_object = awsrequest.AWSRequest(method='POST', url=url, data=data) signer = auth.SigV4Auth(readonly_credentials, 'sts', self.region) signer.add_auth(request_object) return request_object.headers
python
{ "resource": "" }