_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q14500
AWSAuth.get_token
train
def get_token(self): """Returns a client token from Cerberus""" signed_headers = self._get_v4_signed_headers() for header in self.HEADERS: signed_headers[header] = self.HEADERS[header] resp = post_with_retry(self.cerberus_url + '/v2/auth/sts-identity', headers=signed_headers) throw_if_bad_response(resp) token = resp.json()['client_token'] iam_principal_arn = resp.json()['metadata']['aws_iam_principal_arn'] if self.verbose: print('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn), file=sys.stderr) logger.info('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn)) return token
python
{ "resource": "" }
q14501
tiff_header
train
def tiff_header(read_buffer): """ Interpret the uuid raw data as a tiff header. """ # First 8 should be (73, 73, 42, 8) or (77, 77, 42, 8) data = struct.unpack('BB', read_buffer[0:2]) if data[0] == 73 and data[1] == 73: # little endian endian = '<' elif data[0] == 77 and data[1] == 77: # big endian endian = '>' else: msg = ("The byte order indication in the TIFF header ({byte_order}) " "is invalid. It should be either {little_endian} or " "{big_endian}.") msg = msg.format(byte_order=read_buffer[6:8], little_endian=bytes([73, 73]), big_endian=bytes([77, 77])) raise IOError(msg) _, offset = struct.unpack(endian + 'HI', read_buffer[2:8]) # This is the 'Exif Image' portion. exif = ExifImageIfd(endian, read_buffer, offset) return exif.processed_ifd
python
{ "resource": "" }
q14502
Ifd.parse_tag
train
def parse_tag(self, dtype, count, offset_buf): """Interpret an Exif image tag data payload. """ try: fmt = self.datatype2fmt[dtype][0] * count payload_size = self.datatype2fmt[dtype][1] * count except KeyError: msg = 'Invalid TIFF tag datatype ({0}).'.format(dtype) raise IOError(msg) if payload_size <= 4: # Interpret the payload from the 4 bytes in the tag entry. target_buffer = offset_buf[:payload_size] else: # Interpret the payload at the offset specified by the 4 bytes in # the tag entry. offset, = struct.unpack(self.endian + 'I', offset_buf) target_buffer = self.read_buffer[offset:offset + payload_size] if dtype == 2: # ASCII payload = target_buffer.decode('utf-8').rstrip('\x00') else: payload = struct.unpack(self.endian + fmt, target_buffer) if dtype == 5 or dtype == 10: # Rational or Signed Rational. Construct the list of values. rational_payload = [] for j in range(count): value = float(payload[j * 2]) / float(payload[j * 2 + 1]) rational_payload.append(value) payload = rational_payload if count == 1: # If just a single value, then return a scalar instead of a # tuple. payload = payload[0] return payload
python
{ "resource": "" }
q14503
Ifd.post_process
train
def post_process(self, tagnum2name): """Map the tag name instead of tag number to the tag value. """ for tag, value in self.raw_ifd.items(): try: tag_name = tagnum2name[tag] except KeyError: # Ok, we don't recognize this tag. Just use the numeric id. msg = 'Unrecognized Exif tag ({tag}).'.format(tag=tag) warnings.warn(msg, UserWarning) tag_name = tag self.processed_ifd[tag_name] = value
python
{ "resource": "" }
q14504
SavedQueriesInterface.all
train
def all(self): """ Gets all saved queries for a project from the Keen IO API. Master key must be set. """ response = self._get_json(HTTPMethods.GET, self.saved_query_url, self._get_master_key()) return response
python
{ "resource": "" }
q14505
SavedQueriesInterface.get
train
def get(self, query_name): """ Gets a single saved query for a project from the Keen IO API given a query name. Master key must be set. """ url = "{0}/{1}".format(self.saved_query_url, query_name) response = self._get_json(HTTPMethods.GET, url, self._get_master_key()) return response
python
{ "resource": "" }
q14506
SavedQueriesInterface.results
train
def results(self, query_name): """ Gets a single saved query with a 'result' object for a project from the Keen IO API given a query name. Read or Master key must be set. """ url = "{0}/{1}/result".format(self.saved_query_url, query_name) response = self._get_json(HTTPMethods.GET, url, self._get_read_key()) return response
python
{ "resource": "" }
q14507
SavedQueriesInterface.create
train
def create(self, query_name, saved_query): """ Creates the saved query via a PUT request to Keen IO Saved Query endpoint. Master key must be set. """ url = "{0}/{1}".format(self.saved_query_url, query_name) payload = saved_query # To support clients that may have already called dumps() to work around how this used to # work, make sure it's not a str. Hopefully it's some sort of mapping. When we actually # try to send the request, client code will get an InvalidJSONError if payload isn't # a json-formatted string. if not isinstance(payload, str): payload = json.dumps(saved_query) response = self._get_json(HTTPMethods.PUT, url, self._get_master_key(), data=payload) return response
python
{ "resource": "" }
q14508
SavedQueriesInterface.delete
train
def delete(self, query_name): """ Deletes a saved query from a project with a query name. Master key must be set. """ url = "{0}/{1}".format(self.saved_query_url, query_name) self._get_json(HTTPMethods.DELETE, url, self._get_master_key()) return True
python
{ "resource": "" }
q14509
UserAuth.get_auth
train
def get_auth(self): """Returns auth response which has client token unless MFA is required""" auth_resp = get_with_retry(self.cerberus_url + '/v2/auth/user', auth=(self.username, self.password), headers=self.HEADERS) if auth_resp.status_code != 200: throw_if_bad_response(auth_resp) return auth_resp.json()
python
{ "resource": "" }
q14510
UserAuth.get_token
train
def get_token(self): """sets client token from Cerberus""" auth_resp = self.get_auth() if auth_resp['status'] == 'mfa_req': token_resp = self.get_mfa(auth_resp) else: token_resp = auth_resp token = token_resp['data']['client_token']['client_token'] return token
python
{ "resource": "" }
q14511
UserAuth.get_mfa
train
def get_mfa(self, auth_resp): """Gets MFA code from user and returns response which includes the client token""" devices = auth_resp['data']['devices'] if len(devices) == 1: # If there's only one option, don't show selection prompt selection = "0" x = 1 else: print("Found the following MFA devices") x=0 for device in devices: print("{0}: {1}".format(x, device['name'])) x = x + 1 selection = input("Enter a selection: ") if selection.isdigit(): selection_num=int(str(selection)) else: raise CerberusClientException( str.join('', ["Selection: '", selection,"' is not a number"])) if (selection_num >= x) or (selection_num < 0): raise CerberusClientException(str.join('', ["Selection: '", str(selection_num), "' is out of range"])) sec_code = input('Enter ' + auth_resp['data']['devices'][selection_num]['name'] + ' security code: ') mfa_resp = post_with_retry( self.cerberus_url + '/v2/auth/mfa_check', json={'otp_token': sec_code, 'device_id': auth_resp['data']['devices'][selection_num]['id'], 'state_token': auth_resp['data']['state_token']}, headers=self.HEADERS ) if mfa_resp.status_code != 200: throw_if_bad_response(mfa_resp) return mfa_resp.json()
python
{ "resource": "" }
q14512
_parse_standard_flag
train
def _parse_standard_flag(read_buffer, mask_length): """Construct standard flag, standard mask data from the file. Specifically working on Reader Requirements box. Parameters ---------- fptr : file object File object for JP2K file. mask_length : int Length of standard mask flag """ # The mask length tells us the format string to use when unpacking # from the buffer read from file. mask_format = {1: 'B', 2: 'H', 4: 'I'}[mask_length] num_standard_flags, = struct.unpack_from('>H', read_buffer, offset=0) # Read in standard flags and standard masks. Each standard flag should # be two bytes, but the standard mask flag is as long as specified by # the mask length. fmt = '>' + ('H' + mask_format) * num_standard_flags data = struct.unpack_from(fmt, read_buffer, offset=2) standard_flag = data[0:num_standard_flags * 2:2] standard_mask = data[1:num_standard_flags * 2:2] return standard_flag, standard_mask
python
{ "resource": "" }
q14513
_parse_vendor_features
train
def _parse_vendor_features(read_buffer, mask_length): """Construct vendor features, vendor mask data from the file. Specifically working on Reader Requirements box. Parameters ---------- fptr : file object File object for JP2K file. mask_length : int Length of vendor mask flag """ # The mask length tells us the format string to use when unpacking # from the buffer read from file. mask_format = {1: 'B', 2: 'H', 4: 'I'}[mask_length] num_vendor_features, = struct.unpack_from('>H', read_buffer) # Each vendor feature consists of a 16-byte UUID plus a mask whose # length is specified by, you guessed it, "mask_length". entry_length = 16 + mask_length vendor_feature = [] vendor_mask = [] for j in range(num_vendor_features): uslice = slice(2 + j * entry_length, 2 + (j + 1) * entry_length) ubuffer = read_buffer[uslice] vendor_feature.append(UUID(bytes=ubuffer[0:16])) vmask = struct.unpack('>' + mask_format, ubuffer[16:]) vendor_mask.append(vmask) return vendor_feature, vendor_mask
python
{ "resource": "" }
q14514
Jp2kBox._dispatch_validation_error
train
def _dispatch_validation_error(self, msg, writing=False): """Issue either a warning or an error depending on circumstance. If writing to file, then error out, as we do not wish to create bad JP2 files. If reading, then we should be more lenient and just warn. """ if writing: raise IOError(msg) else: warnings.warn(msg)
python
{ "resource": "" }
q14515
Jp2kBox._indent
train
def _indent(self, textstr, indent_level=4): """ Indent a string. Textwrap's indent method only exists for 3.3 or above. In 2.7 we have to fake it. Parameters ---------- textstring : str String to be indented. indent_level : str Number of spaces of indentation to add. Returns ------- str Possibly multi-line string indented by the specified amount. """ if sys.hexversion >= 0x03030000: return textwrap.indent(textstr, ' ' * indent_level) else: lst = [(' ' * indent_level + x) for x in textstr.split('\n')] return '\n'.join(lst)
python
{ "resource": "" }
q14516
Jp2kBox._write_superbox
train
def _write_superbox(self, fptr, box_id): """Write a superbox. Parameters ---------- fptr : file or file object Superbox (box of boxes) to be written to this file. box_id : bytes 4-byte sequence that identifies the superbox. """ # Write the contained boxes, then come back and write the length. orig_pos = fptr.tell() fptr.write(struct.pack('>I4s', 0, box_id)) for box in self.box: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack('>I', end_pos - orig_pos)) fptr.seek(end_pos)
python
{ "resource": "" }
q14517
Jp2kBox._parse_this_box
train
def _parse_this_box(self, fptr, box_id, start, num_bytes): """Parse the current box. Parameters ---------- fptr : file Open file object, currently points to start of box payload, not the start of the box. box_id : str 4-letter identifier for the current box. start, num_bytes : int Byte offset and length of the current box. Returns ------- Jp2kBox Object corresponding to the current box. """ try: parser = _BOX_WITH_ID[box_id].parse except KeyError: # We don't recognize the box ID, so create an UnknownBox and be # done with it. msg = ('Unrecognized box ({box_id}) encountered at byte offset ' '{offset}.') msg = msg.format(box_id=box_id, offset=fptr.tell() - 8) warnings.warn(msg, UserWarning) box = UnknownBox(box_id, offset=start, length=num_bytes, longname='Unknown') return box try: box = parser(fptr, start, num_bytes) except ValueError as err: msg = ("Encountered an unrecoverable ValueError while parsing a " "{box_id} box at byte offset {offset}. The original error " "message was \"{original_error_message}\".") msg = msg.format(box_id=_BOX_WITH_ID[box_id].longname, offset=start, original_error_message=str(err)) warnings.warn(msg, UserWarning) box = UnknownBox(box_id.decode('utf-8'), length=num_bytes, offset=start, longname='Unknown') return box
python
{ "resource": "" }
q14518
Jp2kBox.parse_superbox
train
def parse_superbox(self, fptr): """Parse a superbox (box consisting of nothing but other boxes. Parameters ---------- fptr : file Open file object. Returns ------- list List of top-level boxes in the JPEG 2000 file. """ superbox = [] start = fptr.tell() while True: # Are we at the end of the superbox? if start >= self.offset + self.length: break read_buffer = fptr.read(8) if len(read_buffer) < 8: msg = "Extra bytes at end of file ignored." warnings.warn(msg, UserWarning) return superbox (box_length, box_id) = struct.unpack('>I4s', read_buffer) if box_length == 0: # The length of the box is presumed to last until the end of # the file. Compute the effective length of the box. num_bytes = os.path.getsize(fptr.name) - fptr.tell() + 8 elif box_length == 1: # The length of the box is in the XL field, a 64-bit value. read_buffer = fptr.read(8) num_bytes, = struct.unpack('>Q', read_buffer) else: # The box_length value really is the length of the box! num_bytes = box_length box = self._parse_this_box(fptr, box_id, start, num_bytes) superbox.append(box) # Position to the start of the next box. if num_bytes > self.length: # Length of the current box goes past the end of the # enclosing superbox. msg = '{0} box has incorrect box length ({1})' msg = msg.format(box_id, num_bytes) warnings.warn(msg) elif fptr.tell() > start + num_bytes: # The box must be invalid somehow, as the file pointer is # positioned past the end of the box. msg = ('{box_id} box may be invalid, the file pointer is ' 'positioned {num_bytes} bytes past the end of the box.') msg = msg.format(box_id=box_id, num_bytes=fptr.tell() - (start + num_bytes)) warnings.warn(msg, UserWarning) fptr.seek(start + num_bytes) start += num_bytes return superbox
python
{ "resource": "" }
q14519
ColourSpecificationBox._write_validate
train
def _write_validate(self): """In addition to constructor validation steps, run validation steps for writing.""" if self.colorspace is None: msg = ("Writing colr boxes without enumerated " "colorspaces is not supported at this time.") self._dispatch_validation_error(msg, writing=True) if self.icc_profile is None: if self.colorspace not in [SRGB, GREYSCALE, YCC]: msg = ("Colorspace should correspond to one of SRGB, " "GREYSCALE, or YCC.") self._dispatch_validation_error(msg, writing=True) self._validate(writing=True)
python
{ "resource": "" }
q14520
ColourSpecificationBox.write
train
def write(self, fptr): """Write an Colour Specification box to file. """ self._write_validate() length = 15 if self.icc_profile is None else 11 + len(self.icc_profile) fptr.write(struct.pack('>I4s', length, b'colr')) read_buffer = struct.pack('>BBBI', self.method, self.precedence, self.approximation, self.colorspace) fptr.write(read_buffer)
python
{ "resource": "" }
q14521
ColourSpecificationBox.parse
train
def parse(cls, fptr, offset, length): """Parse JPEG 2000 color specification box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ColourSpecificationBox Instance of the current colour specification box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) lst = struct.unpack_from('>BBB', read_buffer, offset=0) method, precedence, approximation = lst if method == 1: # enumerated colour space colorspace, = struct.unpack_from('>I', read_buffer, offset=3) if colorspace not in _COLORSPACE_MAP_DISPLAY.keys(): msg = "Unrecognized colorspace ({colorspace})." msg = msg.format(colorspace=colorspace) warnings.warn(msg, UserWarning) icc_profile = None else: # ICC profile colorspace = None if (num_bytes - 3) < 128: msg = ("ICC profile header is corrupt, length is " "only {length} when it should be at least 128.") warnings.warn(msg.format(length=num_bytes - 3), UserWarning) icc_profile = None else: profile = _ICCProfile(read_buffer[3:]) icc_profile = profile.header return cls(method=method, precedence=precedence, approximation=approximation, colorspace=colorspace, icc_profile=icc_profile, length=length, offset=offset)
python
{ "resource": "" }
q14522
ChannelDefinitionBox.write
train
def write(self, fptr): """Write a channel definition box to file. """ self._validate(writing=True) num_components = len(self.association) fptr.write(struct.pack('>I4s', 8 + 2 + num_components * 6, b'cdef')) fptr.write(struct.pack('>H', num_components)) for j in range(num_components): fptr.write(struct.pack('>' + 'H' * 3, self.index[j], self.channel_type[j], self.association[j]))
python
{ "resource": "" }
q14523
ChannelDefinitionBox.parse
train
def parse(cls, fptr, offset, length): """Parse component definition box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ComponentDefinitionBox Instance of the current component definition box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of components. num_components, = struct.unpack_from('>H', read_buffer) data = struct.unpack_from('>' + 'HHH' * num_components, read_buffer, offset=2) index = data[0:num_components * 6:3] channel_type = data[1:num_components * 6:3] association = data[2:num_components * 6:3] return cls(index=tuple(index), channel_type=tuple(channel_type), association=tuple(association), length=length, offset=offset)
python
{ "resource": "" }
q14524
ColourGroupBox.write
train
def write(self, fptr): """Write a colour group box to file. """ self._validate(writing=True) self._write_superbox(fptr, b'cgrp')
python
{ "resource": "" }
q14525
ComponentMappingBox.write
train
def write(self, fptr): """Write a Component Mapping box to file. """ length = 8 + 4 * len(self.component_index) write_buffer = struct.pack('>I4s', length, b'cmap') fptr.write(write_buffer) for j in range(len(self.component_index)): write_buffer = struct.pack('>HBB', self.component_index[j], self.mapping_type[j], self.palette_index[j]) fptr.write(write_buffer)
python
{ "resource": "" }
q14526
ComponentMappingBox.parse
train
def parse(cls, fptr, offset, length): """Parse component mapping box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ComponentMappingBox Instance of the current component mapping box. """ num_bytes = offset + length - fptr.tell() num_components = int(num_bytes / 4) read_buffer = fptr.read(num_bytes) data = struct.unpack('>' + 'HBB' * num_components, read_buffer) component_index = data[0:num_bytes:3] mapping_type = data[1:num_bytes:3] palette_index = data[2:num_bytes:3] return cls(component_index, mapping_type, palette_index, length=length, offset=offset)
python
{ "resource": "" }
q14527
ContiguousCodestreamBox.parse
train
def parse(cls, fptr, offset=0, length=0): """Parse a codestream box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ContiguousCodestreamBox Instance of the current contiguous codestream box. """ main_header_offset = fptr.tell() if config.get_option('parse.full_codestream'): codestream = Codestream(fptr, length, header_only=False) else: codestream = None box = cls(codestream, main_header_offset=main_header_offset, length=length, offset=offset) box._filename = fptr.name box._length = length return box
python
{ "resource": "" }
q14528
DataReferenceBox.write
train
def write(self, fptr): """Write a Data Reference box to file. """ self._write_validate() # Very similar to the way a superbox is written. orig_pos = fptr.tell() fptr.write(struct.pack('>I4s', 0, b'dtbl')) # Write the number of data entry url boxes. write_buffer = struct.pack('>H', len(self.DR)) fptr.write(write_buffer) for box in self.DR: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack('>I', end_pos - orig_pos)) fptr.seek(end_pos)
python
{ "resource": "" }
q14529
DataReferenceBox.parse
train
def parse(cls, fptr, offset, length): """Parse data reference box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- DataReferenceBox Instance of the current data reference box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of data references ndr, = struct.unpack_from('>H', read_buffer, offset=0) # Need to keep track of where the next url box starts. box_offset = 2 data_entry_url_box_list = [] for j in range(ndr): # Create an in-memory binary stream for each URL box. box_fptr = io.BytesIO(read_buffer[box_offset:]) box_buffer = box_fptr.read(8) (box_length, box_id) = struct.unpack_from('>I4s', box_buffer, offset=0) box = DataEntryURLBox.parse(box_fptr, 0, box_length) # Need to adjust the box start to that of the "real" file. box.offset = offset + 8 + box_offset data_entry_url_box_list.append(box) # Point to the next embedded URL box. box_offset += box_length return cls(data_entry_url_box_list, length=length, offset=offset)
python
{ "resource": "" }
q14530
FileTypeBox._validate
train
def _validate(self, writing=False): """ Validate the box before writing to file. """ if self.brand not in ['jp2 ', 'jpx ']: msg = ("The file type brand was '{brand}'. " "It should be either 'jp2 ' or 'jpx '.") msg = msg.format(brand=self.brand) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning) for item in self.compatibility_list: if item not in self._valid_cls: msg = ("The file type compatibility list {items} is " "not valid. All items should be members of " "{valid_entries}.") msg = msg.format(items=self.compatibility_list, valid_entries=self._valid_cls) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning)
python
{ "resource": "" }
q14531
FileTypeBox.write
train
def write(self, fptr): """Write a File Type box to file. """ self._validate(writing=True) length = 16 + 4 * len(self.compatibility_list) fptr.write(struct.pack('>I4s', length, b'ftyp')) fptr.write(self.brand.encode()) fptr.write(struct.pack('>I', self.minor_version)) for item in self.compatibility_list: fptr.write(item.encode())
python
{ "resource": "" }
q14532
FileTypeBox.parse
train
def parse(cls, fptr, offset, length): """Parse JPEG 2000 file type box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FileTypeBox Instance of the current file type box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Extract the brand, minor version. (brand, minor_version) = struct.unpack_from('>4sI', read_buffer, 0) if sys.hexversion >= 0x030000: brand = brand.decode('utf-8') # Extract the compatibility list. Each entry has 4 bytes. num_entries = int((length - 16) / 4) compatibility_list = [] for j in range(int(num_entries)): entry, = struct.unpack_from('>4s', read_buffer, 8 + j * 4) if sys.hexversion >= 0x03000000: try: entry = entry.decode('utf-8') except UnicodeDecodeError: # The entry is invalid, but we've got code to catch this # later on. pass compatibility_list.append(entry) return cls(brand=brand, minor_version=minor_version, compatibility_list=compatibility_list, length=length, offset=offset)
python
{ "resource": "" }
q14533
FragmentListBox._validate
train
def _validate(self, writing=False): """Validate internal correctness.""" if (((len(self.fragment_offset) != len(self.fragment_length)) or (len(self.fragment_length) != len(self.data_reference)))): msg = ("The lengths of the fragment offsets ({len_offsets}), " "fragment lengths ({len_fragments}), and " "data reference items ({len_drefs}) must be the same.") msg = msg.format(len_offsets=len(self.fragment_offset), len_fragments=len(self.fragment_length), len_drefs=len(self.data_reference)) self._dispatch_validation_error(msg, writing=writing) if any([x <= 0 for x in self.fragment_offset]): msg = "Fragment offsets must all be positive." self._dispatch_validation_error(msg, writing=writing) if any([x <= 0 for x in self.fragment_length]): msg = "Fragment lengths must all be positive." self._dispatch_validation_error(msg, writing=writing)
python
{ "resource": "" }
q14534
FragmentListBox.write
train
def write(self, fptr): """Write a fragment list box to file. """ self._validate(writing=True) num_items = len(self.fragment_offset) length = 8 + 2 + num_items * 14 fptr.write(struct.pack('>I4s', length, b'flst')) fptr.write(struct.pack('>H', num_items)) for j in range(num_items): write_buffer = struct.pack('>QIH', self.fragment_offset[j], self.fragment_length[j], self.data_reference[j]) fptr.write(write_buffer)
python
{ "resource": "" }
q14535
FragmentTableBox._validate
train
def _validate(self, writing=False): """Self-validate the box before writing.""" box_ids = [box.box_id for box in self.box] if len(box_ids) != 1 or box_ids[0] != 'flst': msg = ("Fragment table boxes must have a single fragment list " "box as a child box.") self._dispatch_validation_error(msg, writing=writing)
python
{ "resource": "" }
q14536
FragmentTableBox.write
train
def write(self, fptr): """Write a fragment table box to file. """ self._validate(writing=True) self._write_superbox(fptr, b'ftbl')
python
{ "resource": "" }
q14537
ImageHeaderBox.write
train
def write(self, fptr): """Write an Image Header box to file. """ fptr.write(struct.pack('>I4s', 22, b'ihdr')) # signedness and bps are stored together in a single byte bit_depth_signedness = 0x80 if self.signed else 0x00 bit_depth_signedness |= self.bits_per_component - 1 read_buffer = struct.pack('>IIHBBBB', self.height, self.width, self.num_components, bit_depth_signedness, self.compression, 1 if self.colorspace_unknown else 0, 1 if self.ip_provided else 0) fptr.write(read_buffer)
python
{ "resource": "" }
q14538
ImageHeaderBox.parse
train
def parse(cls, fptr, offset, length): """Parse JPEG 2000 image header box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ImageHeaderBox Instance of the current image header box. """ # Read the box information read_buffer = fptr.read(14) params = struct.unpack('>IIHBBBB', read_buffer) height = params[0] width = params[1] num_components = params[2] bits_per_component = (params[3] & 0x7f) + 1 signed = (params[3] & 0x80) > 1 compression = params[4] colorspace_unknown = True if params[5] else False ip_provided = True if params[6] else False return cls(height, width, num_components=num_components, bits_per_component=bits_per_component, signed=signed, compression=compression, colorspace_unknown=colorspace_unknown, ip_provided=ip_provided, length=length, offset=offset)
python
{ "resource": "" }
q14539
BitsPerComponentBox.parse
train
def parse(cls, fptr, offset, length): """Parse bits per component box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- BitsPerComponent Instance of the current bits per component box. """ nbytes = length - 8 data = fptr.read(nbytes) bpc = tuple(((x & 0x7f) + 1) for x in bytearray(data)) signed = tuple(((x & 0x80) > 0) for x in bytearray(data)) return cls(bpc, signed, length=length, offset=offset)
python
{ "resource": "" }
q14540
JP2HeaderBox.parse
train
def parse(cls, fptr, offset, length): """Parse JPEG 2000 header box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- JP2HeaderBox Instance of the current JP2 header box. """ box = cls(length=length, offset=offset) # The JP2 header box is a superbox, so go ahead and parse its child # boxes. box.box = box.parse_superbox(fptr) return box
python
{ "resource": "" }
q14541
JPEG2000SignatureBox.write
train
def write(self, fptr): """Write a JPEG 2000 Signature box to file. """ fptr.write(struct.pack('>I4s', 12, b'jP ')) fptr.write(struct.pack('>BBBB', *self.signature))
python
{ "resource": "" }
q14542
JPEG2000SignatureBox.parse
train
def parse(cls, fptr, offset, length): """Parse JPEG 2000 signature box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- JPEG2000SignatureBox Instance of the current JPEG2000 signature box. """ read_buffer = fptr.read(4) signature = struct.unpack('>BBBB', read_buffer) return cls(signature=signature, length=length, offset=offset)
python
{ "resource": "" }
q14543
PaletteBox.write
train
def write(self, fptr): """Write a Palette box to file. """ self._validate(writing=True) bytes_per_row = sum(self.bits_per_component) / 8 bytes_per_palette = bytes_per_row * self.palette.shape[0] box_length = 8 + 3 + self.palette.shape[1] + bytes_per_palette # Write the usual (L, T) header. write_buffer = struct.pack('>I4s', int(box_length), b'pclr') fptr.write(write_buffer) # NE, NPC write_buffer = struct.pack('>HB', self.palette.shape[0], self.palette.shape[1]) fptr.write(write_buffer) # Bits Per Sample. Signed components aren't supported. bps_signed = [x - 1 for x in self.bits_per_component] write_buffer = struct.pack('>' + 'B' * self.palette.shape[1], *bps_signed) fptr.write(write_buffer) # C(i,j) fptr.write(memoryview(self.palette))
python
{ "resource": "" }
q14544
PaletteBox.parse
train
def parse(cls, fptr, offset, length): """Parse palette box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- PaletteBox Instance of the current palette box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) nrows, ncols = struct.unpack_from('>HB', read_buffer, offset=0) bps_signed = struct.unpack_from('>' + 'B' * ncols, read_buffer, offset=3) bps = [((x & 0x7f) + 1) for x in bps_signed] signed = [((x & 0x80) > 1) for x in bps_signed] # Are any components signed or differently sized? We don't handle # that. if any(signed) or len(set(bps)) != 1: msg = ("Palettes with signed components or differently sized " "components are not supported.") raise IOError(msg) # The palette is unsigned and all components have the same width. # This should cover all but a vanishingly small share of palettes. b = bps[0] dtype = np.uint8 if b <=8 else np.uint16 if b <= 16 else np.uint32 palette = np.frombuffer(read_buffer[3 + ncols:], dtype=dtype) palette = np.reshape(palette, (nrows, ncols)) return cls(palette, bps, signed, length=length, offset=offset)
python
{ "resource": "" }
q14545
ReaderRequirementsBox.parse
train
def parse(cls, fptr, offset, length): """Parse reader requirements box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ReaderRequirementsBox Instance of the current reader requirements box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) mask_length, = struct.unpack_from('>B', read_buffer, offset=0) # Fully Understands Aspect Mask # Decodes Completely Mask fuam = dcm = standard_flag = standard_mask = [] vendor_feature = vendor_mask = [] # The mask length tells us the format string to use when unpacking # from the buffer read from file. try: mask_format = {1: 'B', 2: 'H', 4: 'I', 8: 'Q'}[mask_length] fuam, dcm = struct.unpack_from('>' + mask_format * 2, read_buffer, offset=1) std_flg_offset = 1 + 2 * mask_length data = _parse_standard_flag(read_buffer[std_flg_offset:], mask_length) standard_flag, standard_mask = data nflags = len(standard_flag) vndr_offset = 1 + 2 * mask_length + 2 + (2 + mask_length) * nflags data = _parse_vendor_features(read_buffer[vndr_offset:], mask_length) vendor_feature, vendor_mask = data except KeyError: msg = ('The ReaderRequirements box (rreq) has a mask length of ' '{length} bytes, but only values of 1, 2, 4, or 8 are ' 'supported. The box contents will not be interpreted.') warnings.warn(msg.format(length=mask_length), UserWarning) return cls(fuam, dcm, standard_flag, standard_mask, vendor_feature, vendor_mask, length=length, offset=offset)
python
{ "resource": "" }
q14546
CaptureResolutionBox.parse
train
def parse(cls, fptr, offset, length): """Parse CaptureResolutionBox. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- CaptureResolutionBox Instance of the current capture resolution box. """ read_buffer = fptr.read(10) (rn1, rd1, rn2, rd2, re1, re2) = struct.unpack('>HHHHBB', read_buffer) vres = rn1 / rd1 * math.pow(10, re1) hres = rn2 / rd2 * math.pow(10, re2) return cls(vres, hres, length=length, offset=offset)
python
{ "resource": "" }
q14547
LabelBox.write
train
def write(self, fptr): """Write a Label box to file. """ length = 8 + len(self.label.encode()) fptr.write(struct.pack('>I4s', length, b'lbl ')) fptr.write(self.label.encode())
python
{ "resource": "" }
q14548
LabelBox.parse
train
def parse(cls, fptr, offset, length): """Parse Label box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- LabelBox Instance of the current label box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) label = read_buffer.decode('utf-8') return cls(label, length=length, offset=offset)
python
{ "resource": "" }
q14549
NumberListBox.parse
train
def parse(cls, fptr, offset, length): """Parse number list box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- LabelBox Instance of the current number list box. """ num_bytes = offset + length - fptr.tell() raw_data = fptr.read(num_bytes) num_associations = int(len(raw_data) / 4) lst = struct.unpack('>' + 'I' * num_associations, raw_data) return cls(lst, length=length, offset=offset)
python
{ "resource": "" }
q14550
NumberListBox.write
train
def write(self, fptr): """Write a NumberList box to file. """ fptr.write(struct.pack('>I4s', len(self.associations) * 4 + 8, b'nlst')) fmt = '>' + 'I' * len(self.associations) write_buffer = struct.pack(fmt, *self.associations) fptr.write(write_buffer)
python
{ "resource": "" }
q14551
XMLBox.write
train
def write(self, fptr): """ Write an XML box to file. """ read_buffer = ET.tostring(self.xml.getroot(), encoding='utf-8') fptr.write(struct.pack('>I4s', len(read_buffer) + 8, b'xml ')) fptr.write(read_buffer)
python
{ "resource": "" }
q14552
XMLBox.parse
train
def parse(cls, fptr, offset, length): """Parse XML box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- XMLBox Instance of the current XML box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) if sys.hexversion < 0x03000000 and codecs.BOM_UTF8 in read_buffer: # Python3 with utf-8 handles this just fine. Actually so does # Python2 right here since we decode using utf-8. The real # problem comes when __str__ is used on the XML box, and that # is where Python2 falls short because of the ascii codec. msg = ('A BOM (byte order marker) was detected and ' 'removed from the XML contents in the box starting at byte ' 'offset {offset:d}.') msg = msg.format(offset=offset) warnings.warn(msg, UserWarning) read_buffer = read_buffer.replace(codecs.BOM_UTF8, b'') try: text = read_buffer.decode('utf-8') except UnicodeDecodeError as err: # Possibly bad string of bytes to begin with. # Try to search for <?xml and go from there. decl_start = read_buffer.find(b'<?xml') if decl_start <= -1: # Nope, that's not it. All is lost. msg = ('A problem was encountered while parsing an XML box:' '\n\n\t"{error}"\n\nNo XML was retrieved.') warnings.warn(msg.format(error=str(err)), UserWarning) return XMLBox(xml=None, length=length, offset=offset) text = read_buffer[decl_start:].decode('utf-8') # Let the user know that the XML box was problematic. msg = ('A UnicodeDecodeError was encountered parsing an XML box ' 'at byte position {offset:d} ({reason}), but the XML was ' 'still recovered.') msg = msg.format(offset=offset, reason=err.reason) warnings.warn(msg, UserWarning) # Strip out any trailing nulls, as they can foul up XML parsing. text = text.rstrip(chr(0)) bfptr = io.BytesIO(text.encode('utf-8')) try: xml = ET.parse(bfptr) except ET.ParseError as err: msg = ('A problem was encountered while parsing an XML box:' '\n\n\t"{reason}"\n\nNo XML was retrieved.') msg = msg.format(reason=str(err)) warnings.warn(msg, UserWarning) xml = None return cls(xml=xml, length=length, offset=offset)
python
{ "resource": "" }
q14553
UUIDListBox.write
train
def write(self, fptr): """Write a UUID list box to file. """ num_uuids = len(self.ulst) length = 4 + 4 + 2 + num_uuids * 16 write_buffer = struct.pack('>I4sH', length, b'ulst', num_uuids) fptr.write(write_buffer) for j in range(num_uuids): fptr.write(self.ulst[j].bytes)
python
{ "resource": "" }
q14554
UUIDListBox.parse
train
def parse(cls, fptr, offset, length): """Parse UUIDList box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- UUIDListBox Instance of the current UUID list box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) num_uuids, = struct.unpack_from('>H', read_buffer) ulst = [] for j in range(num_uuids): uuid_buffer = read_buffer[2 + j * 16:2 + (j + 1) * 16] ulst.append(UUID(bytes=uuid_buffer)) return cls(ulst, length=length, offset=offset)
python
{ "resource": "" }
q14555
DataEntryURLBox.write
train
def write(self, fptr): """Write a data entry url box to file. """ # Make sure it is written out as null-terminated. url = self.url if self.url[-1] != chr(0): url = url + chr(0) url = url.encode() length = 8 + 1 + 3 + len(url) write_buffer = struct.pack('>I4sBBBB', length, b'url ', self.version, self.flag[0], self.flag[1], self.flag[2]) fptr.write(write_buffer) fptr.write(url)
python
{ "resource": "" }
q14556
DataEntryURLBox.parse
train
def parse(cls, fptr, offset, length): """Parse data entry URL box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- DataEntryURLbox Instance of the current data entry URL box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) data = struct.unpack_from('>BBBB', read_buffer) version = data[0] flag = data[1:4] url = read_buffer[4:].decode('utf-8').rstrip(chr(0)) return cls(version, flag, url, length=length, offset=offset)
python
{ "resource": "" }
q14557
UUIDBox._parse_raw_data
train
def _parse_raw_data(self): """ Private function for parsing UUID payloads if possible. """ if self.uuid == _XMP_UUID: txt = self.raw_data.decode('utf-8') elt = ET.fromstring(txt) self.data = ET.ElementTree(elt) elif self.uuid == _GEOTIFF_UUID: self.data = tiff_header(self.raw_data) elif self.uuid == _EXIF_UUID: # Cut off 'EXIF\0\0' part. self.data = tiff_header(self.raw_data[6:]) else: self.data = self.raw_data
python
{ "resource": "" }
q14558
UUIDBox._print_geotiff
train
def _print_geotiff(self): """ Print geotiff information. Shamelessly ripped off from gdalinfo.py Returns ------- str String representation of the degenerate geotiff. """ if self.data is None: return "corrupt" in_mem_name = '/vsimem/geo.tif' gdal.FileFromMemBuffer(in_mem_name, self.raw_data) gtif = gdal.Open(in_mem_name) # Report projection proj_ref = gtif.GetProjectionRef() sref = osr.SpatialReference() sref.ImportFromWkt(proj_ref) psz_pretty_wkt = sref.ExportToPrettyWkt(False) # report geotransform geo_transform = gtif.GetGeoTransform(can_return_null=True) fmt = ('Origin = ({origin_x:.15f},{origin_y:.15f})\n' 'Pixel Size = ({pixel_x:.15f},{pixel_y:.15f})') geotransform_str = fmt.format(origin_x=geo_transform[0], origin_y=geo_transform[3], pixel_x=geo_transform[1], pixel_y=geo_transform[5]) # setup projected to lat/long transform if appropriate if proj_ref is not None and len(proj_ref) > 0: hProj = osr.SpatialReference(proj_ref) if hProj is not None: hLatLong = hProj.CloneGeogCS() if hLatLong is not None: gdal.PushErrorHandler('CPLQuietErrorHandler') hTransform = osr.CoordinateTransformation(hProj, hLatLong) gdal.PopErrorHandler() msg = 'Unable to load PROJ.4 library' # report corners uleft = self.GDALInfoReportCorner(gtif, hTransform, "Upper Left", 0, 0) lleft = self.GDALInfoReportCorner(gtif, hTransform, "Lower Left", 0, gtif.RasterYSize) uright = self.GDALInfoReportCorner(gtif, hTransform, "Upper Right", gtif.RasterXSize, 0) lright = self.GDALInfoReportCorner(gtif, hTransform, "Lower Right", gtif.RasterXSize, gtif.RasterYSize) center = self.GDALInfoReportCorner(gtif, hTransform, "Center", gtif.RasterXSize / 2.0, gtif.RasterYSize / 2.0) gdal.Unlink(in_mem_name) fmt = ("Coordinate System =\n" "{coordinate_system}\n" "{geotransform}\n" "Corner Coordinates:\n" "{upper_left}\n" "{lower_left}\n" "{upper_right}\n" "{lower_right}\n" "{center}") msg = fmt.format(coordinate_system=self._indent(psz_pretty_wkt), geotransform=geotransform_str, upper_left=uleft, upper_right=uright, lower_left=lleft, lower_right=lright, center=center) return msg
python
{ "resource": "" }
q14559
UUIDBox.write
train
def write(self, fptr): """Write a UUID box to file. """ length = 4 + 4 + 16 + len(self.raw_data) write_buffer = struct.pack('>I4s', length, b'uuid') fptr.write(write_buffer) fptr.write(self.uuid.bytes) fptr.write(self.raw_data)
python
{ "resource": "" }
q14560
UUIDBox.parse
train
def parse(cls, fptr, offset, length): """Parse UUID box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- UUIDBox Instance of the current UUID box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) the_uuid = UUID(bytes=read_buffer[0:16]) return cls(the_uuid, read_buffer[16:], length=length, offset=offset)
python
{ "resource": "" }
q14561
_parse_precinct_size
train
def _parse_precinct_size(spcod): """Compute precinct size from SPcod or SPcoc.""" spcod = np.frombuffer(spcod, dtype=np.uint8) precinct_size = [] for item in spcod: ep2 = (item & 0xF0) >> 4 ep1 = item & 0x0F precinct_size.append((2 ** ep1, 2 ** ep2)) return tuple(precinct_size)
python
{ "resource": "" }
q14562
_context_string
train
def _context_string(context): """Produce a string to represent the code block context""" msg = 'Code block context:\n ' lines = ['Selective arithmetic coding bypass: {0}', 'Reset context probabilities on coding pass boundaries: {1}', 'Termination on each coding pass: {2}', 'Vertically stripe causal context: {3}', 'Predictable termination: {4}', 'Segmentation symbols: {5}'] msg += '\n '.join(lines) msg = msg.format(((context & 0x01) > 0), ((context & 0x02) > 0), ((context & 0x04) > 0), ((context & 0x08) > 0), ((context & 0x10) > 0), ((context & 0x20) > 0)) return msg
python
{ "resource": "" }
q14563
parse_quantization
train
def parse_quantization(read_buffer, sqcd): """Tease out the quantization values. Parameters ---------- read_buffer: sequence of bytes from the QCC and QCD segments. Returns ------ tuple Mantissa and exponents from quantization buffer. """ numbytes = len(read_buffer) exponent = [] mantissa = [] if sqcd & 0x1f == 0: # no quantization data = struct.unpack('>' + 'B' * numbytes, read_buffer) for j in range(len(data)): exponent.append(data[j] >> 3) mantissa.append(0) else: fmt = '>' + 'H' * int(numbytes / 2) data = struct.unpack(fmt, read_buffer) for j in range(len(data)): exponent.append(data[j] >> 11) mantissa.append(data[j] & 0x07ff) return mantissa, exponent
python
{ "resource": "" }
q14564
_print_quantization_style
train
def _print_quantization_style(sqcc): """Only to be used with QCC and QCD segments.""" msg = '\n Quantization style: ' if sqcc & 0x1f == 0: msg += 'no quantization, ' elif sqcc & 0x1f == 1: msg += 'scalar implicit, ' elif sqcc & 0x1f == 2: msg += 'scalar explicit, ' return msg
python
{ "resource": "" }
q14565
Codestream._parse_unrecognized_segment
train
def _parse_unrecognized_segment(self, fptr): """Looks like a valid marker, but not sure from reading the specs. """ msg = ("Unrecognized codestream marker 0x{marker_id:x} encountered at " "byte offset {offset}.") msg = msg.format(marker_id=self._marker_id, offset=fptr.tell()) warnings.warn(msg, UserWarning) cpos = fptr.tell() read_buffer = fptr.read(2) next_item, = struct.unpack('>H', read_buffer) fptr.seek(cpos) if ((next_item & 0xff00) >> 8) == 255: # No segment associated with this marker, so reset # to two bytes after it. segment = Segment(id='0x{0:x}'.format(self._marker_id), offset=self._offset, length=0) else: segment = self._parse_reserved_segment(fptr) return segment
python
{ "resource": "" }
q14566
Codestream._parse_reserved_segment
train
def _parse_reserved_segment(self, fptr): """Parse valid marker segment, segment description is unknown. Parameters ---------- fptr : file object The file to parse. Returns ------- Segment The current segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) if length > 0: data = fptr.read(length - 2) else: data = None segment = Segment(marker_id='0x{0:x}'.format(self._marker_id), offset=offset, length=length, data=data) return segment
python
{ "resource": "" }
q14567
Codestream._parse_tile_part_bit_stream
train
def _parse_tile_part_bit_stream(self, fptr, sod_marker, tile_length): """Parse the tile part bit stream for SOP, EPH marker segments.""" read_buffer = fptr.read(tile_length) # The tile length could possibly be too large and extend past # the end of file. We need to be a bit resilient. count = min(tile_length, len(read_buffer)) packet = np.frombuffer(read_buffer, dtype=np.uint8, count=count) indices = np.where(packet == 0xff) for idx in indices[0]: try: if packet[idx + 1] == 0x91 and (idx < (len(packet) - 5)): offset = sod_marker.offset + 2 + idx length = 4 nsop = packet[(idx + 4):(idx + 6)].view('uint16')[0] if sys.byteorder == 'little': nsop = nsop.byteswap() segment = SOPsegment(nsop, length, offset) self.segment.append(segment) elif packet[idx + 1] == 0x92: offset = sod_marker.offset + 2 + idx length = 0 segment = EPHsegment(length, offset) self.segment.append(segment) except IndexError: continue
python
{ "resource": "" }
q14568
Codestream._parse_cme_segment
train
def _parse_cme_segment(self, fptr): """Parse the CME marker segment. Parameters ---------- fptr : file Open file object. Returns ------- CMESegment The current CME segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(4) data = struct.unpack('>HH', read_buffer) length = data[0] rcme = data[1] ccme = fptr.read(length - 4) return CMEsegment(rcme, ccme, length, offset)
python
{ "resource": "" }
q14569
Codestream._parse_coc_segment
train
def _parse_coc_segment(self, fptr): """Parse the COC marker segment. Parameters ---------- fptr : file Open file object. Returns ------- COCSegment The current COC segment. """ kwargs = {} offset = fptr.tell() - 2 kwargs['offset'] = offset read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) kwargs['length'] = length fmt = '>B' if self._csiz <= 255 else '>H' nbytes = 1 if self._csiz <= 255 else 2 read_buffer = fptr.read(nbytes) ccoc, = struct.unpack(fmt, read_buffer) read_buffer = fptr.read(1) scoc, = struct.unpack('>B', read_buffer) numbytes = offset + 2 + length - fptr.tell() read_buffer = fptr.read(numbytes) spcoc = np.frombuffer(read_buffer, dtype=np.uint8) spcoc = spcoc return COCsegment(ccoc, scoc, spcoc, length, offset)
python
{ "resource": "" }
q14570
Codestream._parse_cod_segment
train
def _parse_cod_segment(cls, fptr): """Parse the COD segment. Parameters ---------- fptr : file Open file object. Returns ------- CODSegment The current COD segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(length - 2) lst = struct.unpack_from('>BBHBBBBBB', read_buffer, offset=0) scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform = lst if len(read_buffer) > 10: precinct_size = _parse_precinct_size(read_buffer[10:]) else: precinct_size = None sop = (scod & 2) > 0 eph = (scod & 4) > 0 if sop or eph: cls._parse_tpart_flag = True else: cls._parse_tpart_flag = False pargs = (scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform, precinct_size) return CODsegment(*pargs, length=length, offset=offset)
python
{ "resource": "" }
q14571
Codestream._parse_crg_segment
train
def _parse_crg_segment(self, fptr): """Parse the CRG marker segment. Parameters ---------- fptr : file Open file object. Returns ------- CRGSegment The current CRG segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(4 * self._csiz) data = struct.unpack('>' + 'HH' * self._csiz, read_buffer) xcrg = data[0::2] ycrg = data[1::2] return CRGsegment(xcrg, ycrg, length, offset)
python
{ "resource": "" }
q14572
Codestream._parse_plt_segment
train
def _parse_plt_segment(self, fptr): """Parse the PLT segment. The packet headers are not parsed, i.e. they remain uninterpreted raw data buffers. Parameters ---------- fptr : file Open file object. Returns ------- PLTSegment The current PLT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zplt = struct.unpack('>HB', read_buffer) numbytes = length - 3 read_buffer = fptr.read(numbytes) iplt = np.frombuffer(read_buffer, dtype=np.uint8) packet_len = [] plen = 0 for byte in iplt: plen |= (byte & 0x7f) if byte & 0x80: # Continue by or-ing in the next byte. plen <<= 7 else: packet_len.append(plen) plen = 0 iplt = packet_len return PLTsegment(zplt, iplt, length, offset)
python
{ "resource": "" }
q14573
Codestream._parse_pod_segment
train
def _parse_pod_segment(self, fptr): """Parse the POD segment. Parameters ---------- fptr : file Open file object. Returns ------- PODSegment The current POD segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) n = ((length - 2) / 7) if self._csiz < 257 else ((length - 2) / 9) n = int(n) nbytes = n * 7 if self._csiz < 257 else n * 9 read_buffer = fptr.read(nbytes) fmt = '>' + 'BBHBBB' * n if self._csiz < 257 else '>' + 'BHHBHB' * n pod_params = struct.unpack(fmt, read_buffer) return PODsegment(pod_params, length, offset)
python
{ "resource": "" }
q14574
Codestream._parse_ppm_segment
train
def _parse_ppm_segment(self, fptr): """Parse the PPM segment. Parameters ---------- fptr : file Open file object. Returns ------- PPMSegment The current PPM segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zppm = struct.unpack('>HB', read_buffer) numbytes = length - 3 read_buffer = fptr.read(numbytes) return PPMsegment(zppm, read_buffer, length, offset)
python
{ "resource": "" }
q14575
Codestream._parse_ppt_segment
train
def _parse_ppt_segment(self, fptr): """Parse the PPT segment. The packet headers are not parsed, i.e. they remain "uninterpreted" raw data beffers. Parameters ---------- fptr : file object The file to parse. Returns ------- PPTSegment The current PPT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zppt = struct.unpack('>HB', read_buffer) length = length zppt = zppt numbytes = length - 3 ippt = fptr.read(numbytes) return PPTsegment(zppt, ippt, length, offset)
python
{ "resource": "" }
q14576
Codestream._parse_qcc_segment
train
def _parse_qcc_segment(cls, fptr): """Parse the QCC segment. Parameters ---------- fptr : file object The file to parse. Returns ------- QCCSegment The current QCC segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(length - 2) fmt = '>HB' if cls._csiz > 256 else '>BB' mantissa_exponent_offset = 3 if cls._csiz > 256 else 2 cqcc, sqcc = struct.unpack_from(fmt, read_buffer) if cqcc >= cls._csiz: msg = ("Invalid QCC component number ({invalid_comp_no}), " "the actual number of components is only {valid_comp_no}.") msg = msg.format(invalid_comp_no=cqcc, valid_comp_no=cls._csiz) warnings.warn(msg, UserWarning) spqcc = read_buffer[mantissa_exponent_offset:] return QCCsegment(cqcc, sqcc, spqcc, length, offset)
python
{ "resource": "" }
q14577
Codestream._parse_qcd_segment
train
def _parse_qcd_segment(self, fptr): """Parse the QCD segment. Parameters ---------- fptr : file Open file object. Returns ------- QCDSegment The current QCD segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, sqcd = struct.unpack('>HB', read_buffer) spqcd = fptr.read(length - 3) return QCDsegment(sqcd, spqcd, length, offset)
python
{ "resource": "" }
q14578
Codestream._parse_rgn_segment
train
def _parse_rgn_segment(cls, fptr): """Parse the RGN segment. Parameters ---------- fptr : file Open file object. Returns ------- RGNSegment The current RGN segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) nbytes = 3 if cls._csiz < 257 else 4 fmt = '>BBB' if cls._csiz < 257 else '>HBB' read_buffer = fptr.read(nbytes) data = struct.unpack(fmt, read_buffer) length = length crgn = data[0] srgn = data[1] sprgn = data[2] return RGNsegment(crgn, srgn, sprgn, length, offset)
python
{ "resource": "" }
q14579
Codestream._parse_sot_segment
train
def _parse_sot_segment(self, fptr): """Parse the SOT segment. Parameters ---------- fptr : file Open file object. Returns ------- SOTSegment The current SOT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(10) data = struct.unpack('>HHIBB', read_buffer) length = data[0] isot = data[1] psot = data[2] tpsot = data[3] tnsot = data[4] segment = SOTsegment(isot, psot, tpsot, tnsot, length, offset) # Need to keep easy access to tile offsets and lengths for when # we encounter start-of-data marker segments. self._tile_offset.append(segment.offset) if segment.psot == 0: tile_part_length = (self.offset + self.length - segment.offset - 2) else: tile_part_length = segment.psot self._tile_length.append(tile_part_length) return segment
python
{ "resource": "" }
q14580
Codestream._parse_tlm_segment
train
def _parse_tlm_segment(self, fptr): """Parse the TLM segment. Parameters ---------- fptr : file Open file object. Returns ------- TLMSegment The current TLM segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(length - 2) ztlm, stlm = struct.unpack_from('>BB', read_buffer) ttlm_st = (stlm >> 4) & 0x3 ptlm_sp = (stlm >> 6) & 0x1 nbytes = length - 4 if ttlm_st == 0: ntiles = nbytes / ((ptlm_sp + 1) * 2) else: ntiles = nbytes / (ttlm_st + (ptlm_sp + 1) * 2) if ttlm_st == 0: ttlm = None fmt = '' elif ttlm_st == 1: fmt = 'B' elif ttlm_st == 2: fmt = 'H' if ptlm_sp == 0: fmt += 'H' else: fmt += 'I' data = struct.unpack_from('>' + fmt * int(ntiles), read_buffer, offset=2) if ttlm_st == 0: ttlm = None ptlm = data else: ttlm = data[0::2] ptlm = data[1::2] return TLMsegment(ztlm, ttlm, ptlm, length, offset)
python
{ "resource": "" }
q14581
Codestream._parse_reserved_marker
train
def _parse_reserved_marker(self, fptr): """Marker range between 0xff30 and 0xff39. """ the_id = '0x{0:x}'.format(self._marker_id) segment = Segment(marker_id=the_id, offset=self._offset, length=0) return segment
python
{ "resource": "" }
q14582
Event.to_json
train
def to_json(self): """ Serializes the event to JSON. :returns: a string """ event_as_dict = copy.deepcopy(self.event_body) if self.timestamp: if "keen" in event_as_dict: event_as_dict["keen"]["timestamp"] = self.timestamp.isoformat() else: event_as_dict["keen"] = {"timestamp": self.timestamp.isoformat()} return json.dumps(event_as_dict)
python
{ "resource": "" }
q14583
KeenClient.delete_events
train
def delete_events(self, event_collection, timeframe=None, timezone=None, filters=None): """ Deletes events. :param event_collection: string, the event collection from which event are being deleted :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] """ params = self.get_params(timeframe=timeframe, timezone=timezone, filters=filters) return self.api.delete_events(event_collection, params)
python
{ "resource": "" }
q14584
KeenClient._base64_encode
train
def _base64_encode(self, string_to_encode): """ Base64 encodes a string, with either Python 2 or 3. :param string_to_encode: the string to encode """ try: # python 2 return base64.b64encode(string_to_encode) except TypeError: # python 3 encoding = sys.getdefaultencoding() base64_bytes = base64.b64encode(bytes(string_to_encode, encoding)) return base64_bytes.decode(encoding)
python
{ "resource": "" }
q14585
KeenClient.select_unique
train
def select_unique(self, event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): """ Performs a select unique query Returns an array of the unique values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group your results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds """ params = self.get_params(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, target_property=target_property, max_age=max_age, limit=limit) return self.api.query("select_unique", params)
python
{ "resource": "" }
q14586
KeenClient.funnel
train
def funnel(self, steps, timeframe=None, timezone=None, max_age=None, all_keys=False): """ Performs a Funnel query Returns an object containing the results for each step of the funnel. :param steps: array of dictionaries, one for each step. example: [{"event_collection":"signup","actor_property":"user.id"}, {"event_collection":"purchase","actor_property:"user.id"}] :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds :all_keys: set to true to return all keys on response (i.e. "result", "actors", "steps") """ params = self.get_params( steps=steps, timeframe=timeframe, timezone=timezone, max_age=max_age, ) return self.api.query("funnel", params, all_keys=all_keys)
python
{ "resource": "" }
q14587
cio_open
train
def cio_open(cinfo, src=None): """Wrapper for openjpeg library function opj_cio_open.""" argtypes = [ctypes.POINTER(CommonStructType), ctypes.c_char_p, ctypes.c_int] OPENJPEG.opj_cio_open.argtypes = argtypes OPENJPEG.opj_cio_open.restype = ctypes.POINTER(CioType) if src is None: length = 0 else: length = len(src) cio = OPENJPEG.opj_cio_open(ctypes.cast(cinfo, ctypes.POINTER(CommonStructType)), src, length) return cio
python
{ "resource": "" }
q14588
cio_close
train
def cio_close(cio): """Wraps openjpeg library function cio_close. """ OPENJPEG.opj_cio_close.argtypes = [ctypes.POINTER(CioType)] OPENJPEG.opj_cio_close(cio)
python
{ "resource": "" }
q14589
cio_tell
train
def cio_tell(cio): """Get position in byte stream.""" OPENJPEG.cio_tell.argtypes = [ctypes.POINTER(CioType)] OPENJPEG.cio_tell.restype = ctypes.c_int pos = OPENJPEG.cio_tell(cio) return pos
python
{ "resource": "" }
q14590
create_compress
train
def create_compress(fmt): """Wrapper for openjpeg library function opj_create_compress. Creates a J2K/JPT/JP2 compression structure. """ OPENJPEG.opj_create_compress.argtypes = [ctypes.c_int] OPENJPEG.opj_create_compress.restype = ctypes.POINTER(CompressionInfoType) cinfo = OPENJPEG.opj_create_compress(fmt) return cinfo
python
{ "resource": "" }
q14591
create_decompress
train
def create_decompress(fmt): """Wraps openjpeg library function opj_create_decompress. """ OPENJPEG.opj_create_decompress.argtypes = [ctypes.c_int] restype = ctypes.POINTER(DecompressionInfoType) OPENJPEG.opj_create_decompress.restype = restype dinfo = OPENJPEG.opj_create_decompress(fmt) return dinfo
python
{ "resource": "" }
q14592
decode
train
def decode(dinfo, cio): """Wrapper for opj_decode. """ argtypes = [ctypes.POINTER(DecompressionInfoType), ctypes.POINTER(CioType)] OPENJPEG.opj_decode.argtypes = argtypes OPENJPEG.opj_decode.restype = ctypes.POINTER(ImageType) image = OPENJPEG.opj_decode(dinfo, cio) return image
python
{ "resource": "" }
q14593
destroy_compress
train
def destroy_compress(cinfo): """Wrapper for openjpeg library function opj_destroy_compress. Release resources for a compressor handle. """ argtypes = [ctypes.POINTER(CompressionInfoType)] OPENJPEG.opj_destroy_compress.argtypes = argtypes OPENJPEG.opj_destroy_compress(cinfo)
python
{ "resource": "" }
q14594
encode
train
def encode(cinfo, cio, image): """Wrapper for openjpeg library function opj_encode. Encodes an image into a JPEG-2000 codestream. Parameters ---------- cinfo : compression handle cio : output buffer stream image : image to encode """ argtypes = [ctypes.POINTER(CompressionInfoType), ctypes.POINTER(CioType), ctypes.POINTER(ImageType)] OPENJPEG.opj_encode.argtypes = argtypes OPENJPEG.opj_encode.restype = ctypes.c_int status = OPENJPEG.opj_encode(cinfo, cio, image) return status
python
{ "resource": "" }
q14595
destroy_decompress
train
def destroy_decompress(dinfo): """Wraps openjpeg library function opj_destroy_decompress.""" argtypes = [ctypes.POINTER(DecompressionInfoType)] OPENJPEG.opj_destroy_decompress.argtypes = argtypes OPENJPEG.opj_destroy_decompress(dinfo)
python
{ "resource": "" }
q14596
image_create
train
def image_create(cmptparms, cspace): """Wrapper for openjpeg library function opj_image_create. """ lst = [ctypes.c_int, ctypes.POINTER(ImageComptParmType), ctypes.c_int] OPENJPEG.opj_image_create.argtypes = lst OPENJPEG.opj_image_create.restype = ctypes.POINTER(ImageType) image = OPENJPEG.opj_image_create(len(cmptparms), cmptparms, cspace) return(image)
python
{ "resource": "" }
q14597
image_destroy
train
def image_destroy(image): """Wraps openjpeg library function opj_image_destroy.""" OPENJPEG.opj_image_destroy.argtypes = [ctypes.POINTER(ImageType)] OPENJPEG.opj_image_destroy(image)
python
{ "resource": "" }
q14598
set_default_encoder_parameters
train
def set_default_encoder_parameters(): """Wrapper for openjpeg library function opj_set_default_encoder_parameters. """ cparams = CompressionParametersType() argtypes = [ctypes.POINTER(CompressionParametersType)] OPENJPEG.opj_set_default_encoder_parameters.argtypes = argtypes OPENJPEG.opj_set_default_encoder_parameters(ctypes.byref(cparams)) return cparams
python
{ "resource": "" }
q14599
set_default_decoder_parameters
train
def set_default_decoder_parameters(dparams_p): """Wrapper for opj_set_default_decoder_parameters. """ argtypes = [ctypes.POINTER(DecompressionParametersType)] OPENJPEG.opj_set_default_decoder_parameters.argtypes = argtypes OPENJPEG.opj_set_default_decoder_parameters(dparams_p)
python
{ "resource": "" }