code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
url = "{0}/{1}".format(self._cached_datasets_url, dataset_name) payload = { "query": query, "index_by": index_by, "display_name": display_name } return self._get_json(HTTPMethods.PUT, url, self._get_master_key(), json=payload)
def create(self, dataset_name, query, index_by, display_name)
Create a Cached Dataset for a Project. Master key must be set.
3.467275
2.910133
1.191449
url = "{0}/{1}/results".format(self._cached_datasets_url, dataset_name) index_by = index_by if isinstance(index_by, str) else json.dumps(index_by) timeframe = timeframe if isinstance(timeframe, str) else json.dumps(timeframe) query_params = { "index_by": index_by, "timeframe": timeframe } return self._get_json( HTTPMethods.GET, url, self._get_read_key(), params=query_params )
def results(self, dataset_name, index_by, timeframe)
Retrieve results from a Cached Dataset. Read key must be set.
2.736756
2.354517
1.162343
url = "{0}/{1}".format(self._cached_datasets_url, dataset_name) self._get_json(HTTPMethods.DELETE, url, self._get_master_key()) return True
def delete(self, dataset_name)
Delete a Cached Dataset. Master Key must be set.
6.930872
4.334762
1.598905
self.fall = self.fall or not args self.fall = self.fall or (self.value in args) return self.fall
def match(self, *args)
Whether or not to enter a given case statement
10.446826
7.60417
1.373829
# Current directory. fname = os.path.join(os.getcwd(), 'glymurrc') if os.path.exists(fname): return fname confdir = get_configdir() if confdir is not None: fname = os.path.join(confdir, 'glymurrc') if os.path.exists(fname): return fname # didn't find a configuration file. return None
def glymurrc_fname()
Return the path to the configuration file. Search order: 1) current working directory 2) environ var XDG_CONFIG_HOME 3) $HOME/.config/glymur/glymurrc
2.637045
2.639731
0.998982
if path is None or path in ['None', 'none']: # Either could not find a library via ctypes or # user-configuration-file, or we could not find it in any of the # default locations, or possibly the user intentionally does not want # one of the libraries to load. return None try: if os.name == "nt": opj_lib = ctypes.windll.LoadLibrary(path) else: opj_lib = ctypes.CDLL(path) except (TypeError, OSError): msg = 'The {libname} library at {path} could not be loaded.' msg = msg.format(path=path, libname=libname) warnings.warn(msg, UserWarning) opj_lib = None return opj_lib
def load_library_handle(libname, path)
Load the library, return the ctypes handle.
4.218913
4.238745
0.995321
filename = glymurrc_fname() if filename is None: # There's no library file path to return in this case. return None # Read the configuration file for the library location. parser = ConfigParser() parser.read(filename) try: path = parser.get('library', libname) except (NoOptionError, NoSectionError): path = None return path
def read_config_file(libname)
Extract library locations from a configuration file. Parameters ---------- libname : str One of either 'openjp2' or 'openjpeg' Returns ------- path : None or str None if no location is specified, otherwise a path to the library
5.197144
5.538923
0.938295
handles = (load_openjpeg_library(x) for x in ['openjp2', 'openjpeg']) handles = tuple(handles) if all(handle is None for handle in handles): msg = "Neither the openjp2 nor the openjpeg library could be loaded. " warnings.warn(msg) return handles
def glymur_config()
Try to ascertain locations of openjp2, openjpeg libraries. Returns ------- tuple tuple of library handles
5.744577
4.80988
1.194328
if 'XDG_CONFIG_HOME' in os.environ: return os.path.join(os.environ['XDG_CONFIG_HOME'], 'glymur') if 'HOME' in os.environ and os.name != 'nt': # HOME is set by WinPython to something unusual, so we don't # necessarily want that. return os.path.join(os.environ['HOME'], '.config', 'glymur') # Last stand. Should handle windows... others? return os.path.join(os.path.expanduser('~'), 'glymur')
def get_configdir()
Return string representing the configuration directory. Default is $HOME/.config/glymur. You can override this with the XDG_CONFIG_HOME environment variable.
4.07928
3.8756
1.052554
if key not in _options.keys(): raise KeyError('{key} not valid.'.format(key=key)) _options[key] = value
def set_option(key, value)
Set the value of the specified option. Available options: parse.full_codestream print.xml print.codestream print.short Parameters ---------- key : str Name of a single option. value : New value of option. Option Descriptions ------------------- parse.full_codestream : bool When False, only the codestream header is parsed for metadata. This can results in faster JP2/JPX parsing. When True, the entire codestream is parsed. [default: False] print.codestream : bool When False, the codestream segments are not printed. Otherwise the segments are printed depending on the value of the parse.full_codestream option. [default: True] print.short : bool When True, only the box ID, offset, and length are displayed. Useful for displaying only the basic structure or skeleton of a JPEG 2000 file. [default: False] print.xml : bool When False, printing of the XML contents of any XML boxes or UUID XMP boxes is suppressed. [default: True] See also -------- get_option
4.065984
6.262058
0.649305
global _options if key == 'all': _options = copy.deepcopy(_original_options) else: if key not in _options.keys(): raise KeyError('{key} not valid.'.format(key=key)) _options[key] = _original_options[key]
def reset_option(key)
Reset one or more options to their default value. Pass "all" as argument to reset all options. Available options: parse.full_codestream print.xml print.codestream print.short Parameter --------- key : str Name of a single option.
3.004392
3.291315
0.912824
warnings.warn('Use set_option instead of set_printoptions.', DeprecationWarning) for key, value in kwargs.items(): if key not in ['short', 'xml', 'codestream']: raise KeyError('"{0}" not a valid keyword parameter.'.format(key)) set_option('print.' + key, value)
def set_printoptions(**kwargs)
Set printing options. These options determine the way JPEG 2000 boxes are displayed. Parameters ---------- short : bool, optional When True, only the box ID, offset, and length are displayed. Useful for displaying only the basic structure or skeleton of a JPEG 2000 file. xml : bool, optional When False, printing of the XML contents of any XML boxes or UUID XMP boxes is suppressed. codestream : bool, optional When False, the codestream segments are not printed. Otherwise the segments are printed depending on how set_parseoptions has been used. See also -------- get_printoptions Examples -------- To put back the default options, you can use: >>> import glymur >>> glymur.set_printoptions(short=False, xml=True, codestream=True)
5.882069
4.950952
1.188068
warnings.warn('Use get_option instead of get_printoptions.', DeprecationWarning) d = {} for key in ['short', 'xml', 'codestream']: d[key] = _options['print.' + key] return d
def get_printoptions()
Return the current print options. Returns ------- dict Dictionary of current print options with keys - short : bool - xml : bool - codestream : bool For a full description of these options, see `set_printoptions`. See also -------- set_printoptions
8.112914
4.945657
1.640412
kwargs = {'description': 'Print JPEG2000 metadata.', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter} parser = argparse.ArgumentParser(**kwargs) parser.add_argument('-x', '--noxml', help='suppress XML', action='store_true') parser.add_argument('-s', '--short', help='only print box id, offset, and length', action='store_true') chelp = 'Level of codestream information. 0 suppresses all details, ' chelp += '1 prints the main header, 2 prints the full codestream.' parser.add_argument('-c', '--codestream', help=chelp, metavar='LEVEL', nargs=1, type=int, default=[1]) parser.add_argument('filename') args = parser.parse_args() if args.noxml: set_option('print.xml', False) if args.short: set_option('print.short', True) codestream_level = args.codestream[0] if codestream_level not in [0, 1, 2]: raise ValueError("Invalid level of codestream information specified.") if codestream_level == 0: set_option('print.codestream', False) elif codestream_level == 2: set_option('parse.full_codestream', True) filename = args.filename # JP2 metadata can be extensive, so don't print any warnings until we # are done with the metadata. with warnings.catch_warnings(record=True) as wctx: jp2 = Jp2k(filename) if jp2._codec_format == lib.openjp2.CODEC_J2K: if codestream_level == 0: print('File: {0}'.format(os.path.basename(filename))) elif codestream_level == 1: print(jp2) elif codestream_level == 2: print('File: {0}'.format(os.path.basename(filename))) print(jp2.get_codestream(header_only=False)) else: print(jp2) # Now re-emit any suppressed warnings. if len(wctx) > 0: print("\n") for warning in wctx: print("{0}:{1}: {2}: {3}".format(warning.filename, warning.lineno, warning.category.__name__, warning.message))
def main()
Entry point for console script jp2dump.
3.021284
2.886012
1.046872
if(not str.endswith(string, '/')): return str.join('', [string, '/']) return str(string)
def _add_slash(self, string=None)
if a string doesn't end in a '/' add one
7.209511
5.727198
1.25882
try: self.token = os.environ['CERBERUS_TOKEN'] if self.verbose: print("Overriding Cerberus token with environment variable.", file=sys.stderr) logger.info("Overriding Cerberus token with environment variable.") return except: pass if self.username: ua = UserAuth(self.cerberus_url, self.username, self.password) self.token = ua.get_token() else: awsa = AWSAuth(self.cerberus_url, region=self.region, aws_session=self.aws_session, verbose=self.verbose) self.token = awsa.get_token()
def _set_token(self)
Set the Cerberus token based on auth type
2.789646
2.524008
1.105245
roles_resp = get_with_retry(self.cerberus_url + '/v1/role', headers=self.HEADERS) throw_if_bad_response(roles_resp) return roles_resp.json()
def get_roles(self)
Return all the roles (IAM or User Groups) that can be granted to a safe deposit box. Roles are permission levels that are granted to IAM or User Groups. Associating the id for the write role would allow that IAM or User Group to write in the safe deposit box.
6.941341
7.059525
0.983259
json_resp = self.get_roles() for item in json_resp: if key in item["name"]: return item["id"] raise CerberusClientException("Key '%s' not found" % key)
def get_role(self, key)
Return id of named role.
4.730277
3.98864
1.185937
json_resp = self.get_roles() temp_dict = {} for item in json_resp: temp_dict[item["name"]] = item["id"] return temp_dict
def list_roles(self)
Simplified version of get_roles that returns a dict of just name: id for the roles
4.108225
2.829653
1.451847
sdb_resp = get_with_retry(self.cerberus_url + '/v1/category', headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
def get_categories(self)
Return a list of categories that a safe deposit box can belong to
7.870194
6.704108
1.173936
# Do some sanity checking if user_group_permissions is None: user_group_permissions = [] if iam_principal_permissions is None: iam_principal_permissions = [] if list != type(user_group_permissions): raise(TypeError('Expected list, but got ' + str(type(user_group_permissions)))) if list != type(iam_principal_permissions): raise(TypeError('Expected list, but got ' + str(type(iam_principal_permissions)))) temp_data = { "name": name, "description": description, "category_id": category_id, "owner": owner, } if len(user_group_permissions) > 0: temp_data["user_group_permissions"] = user_group_permissions if len(iam_principal_permissions) > 0: temp_data["iam_principal_permissions"] = iam_principal_permissions data = json.encoder.JSONEncoder().encode(temp_data) sdb_resp = post_with_retry(self.cerberus_url + '/v2/safe-deposit-box', data=str(data), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
def create_sdb(self, name, category_id, owner, description="", user_group_permissions=None, iam_principal_permissions=None)
Create a safe deposit box. You need to refresh your token before the iam role is granted permission to the new safe deposit box. Keyword arguments: name (string) -- name of the safe deposit box category_id (string) -- category id that determines where to store the sdb. (ex: shared, applications) owner (string) -- AD group that owns the safe deposit box description (string) -- Description of the safe deposit box user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn and role_id
2.214781
2.170387
1.020455
sdb_resp = delete_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp
def delete_sdb(self, sdb_id)
Delete a safe deposit box specified by id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path.
6.006131
6.092914
0.985757
sdb_id = self.get_sdb_id(sdb) sdb_resp = get_with_retry( self.cerberus_url + '/v1/safe-deposit-box/' + sdb_id + '/', headers=self.HEADERS ) throw_if_bad_response(sdb_resp) return sdb_resp.json()['path']
def get_sdb_path(self, sdb)
Return the path for a SDB
4.546845
4.407008
1.031731
list_resp = get_with_retry( self.cerberus_url + '/v1/secret/' + path + '/?list=true', headers=self.HEADERS ) throw_if_bad_response(list_resp) return list_resp.json()['data']['keys']
def get_sdb_keys(self, path)
Return the keys for a SDB, which are need for the full secure data path
6.286513
5.932525
1.059669
json_resp = self.get_sdbs() for r in json_resp: if r['name'] == sdb: return str(r['id']) # If we haven't returned yet then we didn't find what we were # looking for. raise CerberusClientException("'%s' not found" % sdb)
def get_sdb_id(self, sdb)
Return the ID for the given safe deposit box. Keyword arguments: sdb -- This is the name of the safe deposit box, not the path
4.394613
4.791666
0.917137
json_resp = self.get_sdbs() # Deal with the supplied path possibly missing an ending slash path = self._add_slash(sdb_path) for r in json_resp: if r['path'] == path: return str(r['id']) # If we haven't returned yet then we didn't find what we were # looking for. raise CerberusClientException("'%s' not found" % sdb_path)
def get_sdb_id_by_path(self, sdb_path)
Given the path, return the ID for the given safe deposit box.
5.052582
4.76898
1.059468
sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
def get_sdb_by_id(self, sdb_id)
Return the details for the given safe deposit box id Keyword arguments: sdb_id -- this is the id of the safe deposit box, not the path.
5.692805
5.231521
1.088174
sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
def get_sdb_secret_version_paths(self, sdb_id)
Get SDB secret version paths. This function takes the sdb_id
5.650261
5.608257
1.00749
sdb_raw = self.get_sdbs() sdbs = [] for s in sdb_raw: sdbs.append(s['name']) return sdbs
def list_sdbs(self)
Return sdbs by Name
3.679258
3.141407
1.171213
# Grab current data old_data = self.get_sdb_by_id(sdb_id) # Assemble information to update temp_data = {} keys = ('owner', 'description', 'iam_principal_permissions', 'user_group_permissions') for k in keys: if k in old_data: temp_data[k] = old_data[k] if owner is not None: temp_data["owner"] = owner if description is not None: temp_data["description"] = description if user_group_permissions is not None and len(user_group_permissions) > 0: temp_data["user_group_permissions"] = user_group_permissions if iam_principal_permissions is not None and len(iam_principal_permissions) > 0: temp_data["iam_principal_permissions"] = iam_principal_permissions data = json.encoder.JSONEncoder().encode(temp_data) sdb_resp = put_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id, data=str(data), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None, iam_principal_permissions=None)
Update a safe deposit box. Keyword arguments: owner (string) -- AD group that owns the safe deposit box description (string) -- Description of the safe deposit box user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn and role_id
2.328016
2.287073
1.017902
secret_resp = delete_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp
def delete_file(self, secure_data_path)
Delete a file at the given secure data path
6.795835
6.837421
0.993918
if not version: version = "CURRENT" payload = {'versionId': str(version)} secret_resp = head_with_retry(str.join('', [self.cerberus_url, '/v1/secure-file/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.headers
def get_file_metadata(self, secure_data_path, version=None)
Get just the metadata for a file, not the content
6.904153
6.372927
1.083357
index = metadata['Content-Disposition'].index('=')+1 metadata['filename'] = metadata['Content-Disposition'][index:].replace('"', '') return metadata
def _parse_metadata_filename(self, metadata)
Parse the header metadata to pull out the filename and then store it under the key 'filename'
4.723332
3.657367
1.291457
query = self._get_file(secure_data_path, version) resp = query.headers.copy() resp = self._parse_metadata_filename(resp) resp['data'] = query.content return resp
def get_file(self, secure_data_path, version=None)
Return a requests.structures.CaseInsensitiveDict object containing a file and the metadata/header information around it. The binary data of the file is under the key 'data'
7.011624
5.90097
1.188216
return self.get_secret_versions(secure_data_path, limit, offset)
def get_file_versions(self, secure_data_path, limit=None, offset=None)
Get versions of a particular file This is just a shim to get_secret_versions secure_data_path -- full path to the file in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. offset -- Default(0), used for pagination. Will request records from the given offset.
5.601086
3.187438
1.757237
offset = 0 # Prime the versions dictionary so that all the logic can happen in the loop versions = {'has_next': True, 'next_offset': 0} while (versions['has_next']): offset = versions['next_offset'] versions = self.get_file_versions(secure_data_path, limit, offset) for summary in versions['secure_data_version_summaries']: yield summary
def _get_all_file_version_ids(self, secure_data_path, limit=None)
Convenience function that returns a generator that will paginate over the file version ids secure_data_path -- full path to the file in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once.
4.751987
4.532458
1.048435
for secret in self._get_all_file_version_ids(secure_data_path, limit): yield {'secret': self.get_file_data(secure_data_path, version=secret['id']), 'version': secret}
def _get_all_file_versions(self, secure_data_path, limit=None)
Convenience function that returns a generator yielding the contents of all versions of a file and its version info secure_data_path -- full path to the file in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once.
4.672317
4.675979
0.999217
# Make sure that limit and offset are in range. # Set the normal defaults if not limit or limit <= 0: limit = 100 if not offset or offset < 0: offset = 0 payload = {'limit': str(limit), 'offset': str(offset)} # Because of the addition of versionId and the way URLs are constructed, secure_data_path should # always end in a '/'. secure_data_path = self._add_slash(secure_data_path) secret_resp = get_with_retry(self.cerberus_url + '/v1/secure-files/' + secure_data_path, params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
def list_files(self, secure_data_path, limit=None, offset=None)
Return the list of files in the path. May need to be paginated
4.898777
4.864545
1.007037
# Parse out the filename from the path filename = secure_data_path.rsplit('/', 1) if content_type: data = {'file-content': (filename, filehandle, content_type)} else: data = {'file-content': (filename, filehandle)} headers = self.HEADERS.copy() if 'Content-Type' in headers: headers.__delitem__('Content-Type') secret_resp = post_with_retry(self.cerberus_url + '/v1/secure-file/' + secure_data_path, files=data, headers=headers) throw_if_bad_response(secret_resp) return secret_resp
def put_file(self, secure_data_path, filehandle, content_type=None)
Upload a file to a secure data path provided Keyword arguments: secure_data_path -- full path in the safety deposit box that contains the file key to store things under filehandle -- Pass an opened filehandle to the file you want to upload. Make sure that the file was opened in binary mode, otherwise the size calculations can be off for text files. content_type -- Optional. Set the Mime type of the file you're uploading.
3.558374
3.586397
0.992186
warnings.warn( "get_secret is deprecated, use get_secrets_data instead", DeprecationWarning ) secret_resp_json = self._get_secrets(secure_data_path, version) if key in secret_resp_json['data']: return secret_resp_json['data'][key] else: raise CerberusClientException("Key '%s' not found" % key)
def get_secret(self, secure_data_path, key, version=None)
(Deprecated)Return the secret based on the secure data path and key This method is deprecated because it misleads users into thinking they're only getting one value from Cerberus when in reality they're getting all values, from which a single value is returned. Use get_secrets_data(secure_data_path)[key] instead. (See https://github.com/Nike-Inc/cerberus-python-client/issues/18)
3.165713
2.540372
1.246161
if not version: version = "CURRENT" payload = {'versionId': str(version)} secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
def _get_secrets(self, secure_data_path, version=None)
Return full json secrets based on the secure data path Keyword arguments: secure_data_path (string) -- full path in the secret deposit box that contains the key /shared/sdb-path/secret
5.999982
6.349736
0.944918
warnings.warn( "get_secrets is deprecated, use get_secrets_data instead", DeprecationWarning ) return self._get_secrets(secure_data_path, version)
def get_secrets(self, secure_data_path, version=None)
(Deprecated)Return json secrets based on the secure data path This method is deprecated because an addition step of reading value with ['data'] key from the returned data is required to get secrets, which contradicts the method name. Use get_secrets_data(secure_data_path) instead. (See https://github.com/Nike-Inc/cerberus-python-client/issues/19)
3.282076
2.576845
1.27368
# Make sure that limit and offset are in range. # Set the normal defaults if not limit or limit <= 0: limit = 100 if not offset or offset < 0: offset = 0 payload = {'limit': str(limit), 'offset': str(offset)} secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret-versions/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
def get_secret_versions(self, secure_data_path, limit=None, offset=None)
Get versions of a particular secret key secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. offset -- Default(0), used for pagination. Will request records from the given offset.
4.259087
4.20624
1.012564
offset = 0 # Prime the versions dictionary so that all the logic can happen in the loop versions = {'has_next': True, 'next_offset': 0} while (versions['has_next']): offset = versions['next_offset'] versions = self.get_secret_versions(secure_data_path, limit, offset) for summary in versions['secure_data_version_summaries']: yield summary
def _get_all_secret_version_ids(self, secure_data_path, limit=None)
Convenience function that returns a generator that will paginate over the secret version ids secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once.
4.694694
4.540758
1.033901
for secret in self._get_all_secret_version_ids(secure_data_path, limit): yield {'secret': self.get_secrets_data(secure_data_path, version=secret['id']), 'version': secret}
def _get_all_secret_versions(self, secure_data_path, limit=None)
Convenience function that returns a generator yielding the contents of secrets and their version info secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once.
4.32923
3.895107
1.111454
# Because of the addition of versionId and the way URLs are constructed, secure_data_path should # always end in a '/'. secure_data_path = self._add_slash(secure_data_path) secret_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path + '?list=true', headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
def list_secrets(self, secure_data_path)
Return json secrets based on the secure_data_path, this will list keys in a folder
6.239725
6.131362
1.017674
# json encode the input. Cerberus is sensitive to double vs single quotes. # an added bonus is that json encoding transforms python2 unicode strings # into a compatible format. data = json.encoder.JSONEncoder().encode(secret) if merge: data = self.secret_merge(secure_data_path, secret) secret_resp = post_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path, data=str(data), headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp
def put_secret(self, secure_data_path, secret, merge=True)
Write secret(s) to a secure data path provided a dictionary of key/values Keyword arguments: secure_data_path -- full path in the safety deposit box that contains the key secret -- A dictionary containing key/values to be written at the secure data path merge -- Boolean that determines if the provided secret keys should be merged with the values already present at the secure data path. If False the keys will completely overwrite what was stored at the secure data path. (default True)
7.327589
7.75825
0.94449
get_resp = get_with_retry(self.cerberus_url + '/v1/secret/' + secure_data_path, headers=self.HEADERS) temp_key = {} # Ignore a return of 404 since it means the key might not exist if get_resp.status_code == requests.codes.bad and get_resp.status_code not in [403, 404]: throw_if_bad_response(get_resp) elif get_resp.status_code in [403, 404]: temp_key = {} else: temp_key = get_resp.json()['data'] # Allow key to be either a string describing a dict or a dict. if type(key) == str: temp_key.update(ast.literal_eval(key)) else: temp_key.update(key) # This is a bit of a hack to get around python 2 treating unicode strings # differently. Cerberus will throw a 400 if we try to post python 2 style # unicode stings as the payload. combined_key = json.encoder.JSONEncoder().encode(temp_key) return combined_key
def secret_merge(self, secure_data_path, key)
Compare key/values at secure_data_path and merges them. New values will overwrite old.
4.322323
4.283134
1.009149
if self.aws_session is None: boto_session = session.Session() creds = boto_session.get_credentials() else: creds = self.aws_session.get_credentials() if creds is None: raise CerberusClientException("Unable to locate AWS credentials") readonly_credentials = creds.get_frozen_credentials() # hardcode get-caller-identity request data = OrderedDict((('Action','GetCallerIdentity'), ('Version', '2011-06-15'))) url = 'https://sts.{}.amazonaws.com/'.format(self.region) request_object = awsrequest.AWSRequest(method='POST', url=url, data=data) signer = auth.SigV4Auth(readonly_credentials, 'sts', self.region) signer.add_auth(request_object) return request_object.headers
def _get_v4_signed_headers(self)
Returns V4 signed get-caller-identity request headers
3.28277
3.051839
1.07567
signed_headers = self._get_v4_signed_headers() for header in self.HEADERS: signed_headers[header] = self.HEADERS[header] resp = post_with_retry(self.cerberus_url + '/v2/auth/sts-identity', headers=signed_headers) throw_if_bad_response(resp) token = resp.json()['client_token'] iam_principal_arn = resp.json()['metadata']['aws_iam_principal_arn'] if self.verbose: print('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn), file=sys.stderr) logger.info('Successfully authenticated with Cerberus as {}'.format(iam_principal_arn)) return token
def get_token(self)
Returns a client token from Cerberus
3.882142
3.568337
1.087942
# First 8 should be (73, 73, 42, 8) or (77, 77, 42, 8) data = struct.unpack('BB', read_buffer[0:2]) if data[0] == 73 and data[1] == 73: # little endian endian = '<' elif data[0] == 77 and data[1] == 77: # big endian endian = '>' else: msg = ("The byte order indication in the TIFF header ({byte_order}) " "is invalid. It should be either {little_endian} or " "{big_endian}.") msg = msg.format(byte_order=read_buffer[6:8], little_endian=bytes([73, 73]), big_endian=bytes([77, 77])) raise IOError(msg) _, offset = struct.unpack(endian + 'HI', read_buffer[2:8]) # This is the 'Exif Image' portion. exif = ExifImageIfd(endian, read_buffer, offset) return exif.processed_ifd
def tiff_header(read_buffer)
Interpret the uuid raw data as a tiff header.
3.202355
3.179717
1.00712
try: fmt = self.datatype2fmt[dtype][0] * count payload_size = self.datatype2fmt[dtype][1] * count except KeyError: msg = 'Invalid TIFF tag datatype ({0}).'.format(dtype) raise IOError(msg) if payload_size <= 4: # Interpret the payload from the 4 bytes in the tag entry. target_buffer = offset_buf[:payload_size] else: # Interpret the payload at the offset specified by the 4 bytes in # the tag entry. offset, = struct.unpack(self.endian + 'I', offset_buf) target_buffer = self.read_buffer[offset:offset + payload_size] if dtype == 2: # ASCII payload = target_buffer.decode('utf-8').rstrip('\x00') else: payload = struct.unpack(self.endian + fmt, target_buffer) if dtype == 5 or dtype == 10: # Rational or Signed Rational. Construct the list of values. rational_payload = [] for j in range(count): value = float(payload[j * 2]) / float(payload[j * 2 + 1]) rational_payload.append(value) payload = rational_payload if count == 1: # If just a single value, then return a scalar instead of a # tuple. payload = payload[0] return payload
def parse_tag(self, dtype, count, offset_buf)
Interpret an Exif image tag data payload.
3.182171
3.079634
1.033295
for tag, value in self.raw_ifd.items(): try: tag_name = tagnum2name[tag] except KeyError: # Ok, we don't recognize this tag. Just use the numeric id. msg = 'Unrecognized Exif tag ({tag}).'.format(tag=tag) warnings.warn(msg, UserWarning) tag_name = tag self.processed_ifd[tag_name] = value
def post_process(self, tagnum2name)
Map the tag name instead of tag number to the tag value.
4.379153
4.122682
1.06221
response = self._get_json(HTTPMethods.GET, self.saved_query_url, self._get_master_key()) return response
def all(self)
Gets all saved queries for a project from the Keen IO API. Master key must be set.
16.620441
7.46865
2.225361
url = "{0}/{1}".format(self.saved_query_url, query_name) response = self._get_json(HTTPMethods.GET, url, self._get_master_key()) return response
def get(self, query_name)
Gets a single saved query for a project from the Keen IO API given a query name. Master key must be set.
6.684374
4.335765
1.541683
url = "{0}/{1}/result".format(self.saved_query_url, query_name) response = self._get_json(HTTPMethods.GET, url, self._get_read_key()) return response
def results(self, query_name)
Gets a single saved query with a 'result' object for a project from the Keen IO API given a query name. Read or Master key must be set.
7.454659
5.116804
1.456897
url = "{0}/{1}".format(self.saved_query_url, query_name) payload = saved_query # To support clients that may have already called dumps() to work around how this used to # work, make sure it's not a str. Hopefully it's some sort of mapping. When we actually # try to send the request, client code will get an InvalidJSONError if payload isn't # a json-formatted string. if not isinstance(payload, str): payload = json.dumps(saved_query) response = self._get_json(HTTPMethods.PUT, url, self._get_master_key(), data=payload) return response
def create(self, query_name, saved_query)
Creates the saved query via a PUT request to Keen IO Saved Query endpoint. Master key must be set.
8.303405
7.550087
1.099776
query_name_attr_name = "query_name" refresh_rate_attr_name = "refresh_rate" query_attr_name = "query" metadata_attr_name = "metadata" old_saved_query = self.get(query_name) # Create a new query def to send back. We cannot send values for attributes like 'urls', # 'last_modified_date', 'run_information', etc. new_saved_query = { query_name_attr_name: old_saved_query[query_name_attr_name], # expected refresh_rate_attr_name: old_saved_query[refresh_rate_attr_name], # expected query_attr_name: {} } # If metadata was set, preserve it. The Explorer UI currently stores information here. old_metadata = (old_saved_query[metadata_attr_name] if metadata_attr_name in old_saved_query else None) if old_metadata: new_saved_query[metadata_attr_name] = old_metadata # Preserve any non-empty properties of the existing query. We get back values like None # for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating. old_query = old_saved_query[query_attr_name] # expected # Shallow copy since we want the entire object heirarchy to start with. for (key, value) in six.iteritems(old_query): if value: new_saved_query[query_attr_name][key] = value # Now, recursively overwrite any attributes passed in. SavedQueriesInterface._deep_update(new_saved_query, saved_query_attributes) return self.create(query_name, new_saved_query)
def update(self, query_name, saved_query_attributes)
Given a dict of attributes to be updated, update only those attributes in the Saved Query at the resource given by 'query_name'. This will perform two HTTP requests--one to fetch the query definition, and one to set the new attributes. This method will intend to preserve any other properties on the query. Master key must be set.
4.175265
4.050203
1.030878
url = "{0}/{1}".format(self.saved_query_url, query_name) self._get_json(HTTPMethods.DELETE, url, self._get_master_key()) return True
def delete(self, query_name)
Deletes a saved query from a project with a query name. Master key must be set.
7.920235
4.863904
1.62837
if len(s) % AES.block_size == 0: return s return Padding.appendPadding(s, blocksize=AES.block_size)
def pad_aes256(s)
Pads an input string to a given block size. :param s: string :returns: The padded string.
5.009244
5.399863
0.927661
if not s: return s try: return Padding.removePadding(s, blocksize=AES.block_size) except AssertionError: # if there's an error while removing padding, just return s. return s
def unpad_aes256(s)
Removes padding from an input string based on a given block size. :param s: string :returns: The unpadded string.
6.554384
6.411162
1.022339
if len(s) % OLD_BLOCK_SIZE == 0: return s return Padding.appendPadding(s, blocksize=OLD_BLOCK_SIZE)
def old_pad(s)
Pads an input string to a given block size. :param s: string :returns: The padded string.
5.812459
6.131742
0.947929
if not s: return s try: return Padding.removePadding(s, blocksize=OLD_BLOCK_SIZE) except AssertionError: # if there's an error while removing padding, just return s. return s
def old_unpad(s)
Removes padding from an input string based on a given block size. :param s: string :returns: The unpadded string.
7.413205
7.237367
1.024296
if len(key) != 64: raise TypeError("encode_aes256() expects a 256 bit key encoded as a 64 hex character string") # generate AES.block_size cryptographically secure random bytes for our IV (initial value) iv = os.urandom(AES.block_size) # set up an AES cipher object cipher = AES.new(binascii.unhexlify(key.encode('ascii')), mode=AES.MODE_CBC, IV=iv) # encrypt the plaintext after padding it ciphertext = cipher.encrypt(ensure_bytes(pad_aes256(plaintext))) # append the hexed IV and the hexed ciphertext iv_plus_encrypted = binascii.hexlify(iv) + binascii.hexlify(ciphertext) # return that return iv_plus_encrypted
def encode_aes256(key, plaintext)
Utility method to encode some given plaintext with the given key. Important thing to note: This is not a general purpose encryption method - it has specific semantics (see below for details). Takes the given hex string key and converts it to a 256 bit binary blob. Then pads the given plaintext to AES block size which is always 16 bytes, regardless of AES key size. Then encrypts using AES-256-CBC using a random IV. Then converts both the IV and the ciphertext to hex. Finally returns the IV appended by the ciphertext. :param key: string, 64 hex chars long :param plaintext: string, any amount of data
3.769805
3.641837
1.035138
# grab first AES.block_size bytes (aka 2 * AES.block_size characters of hex) - that's the IV iv_size = 2 * AES.block_size hexed_iv = iv_plus_encrypted[:iv_size] # grab everything else - that's the ciphertext (aka encrypted message) hexed_ciphertext = iv_plus_encrypted[iv_size:] # unhex the iv and ciphertext iv = binascii.unhexlify(hexed_iv) ciphertext = binascii.unhexlify(hexed_ciphertext) # set up the correct AES cipher object cipher = AES.new(binascii.unhexlify(key.encode('ascii')), mode=AES.MODE_CBC, IV=iv) # decrypt! plaintext = cipher.decrypt(ciphertext) # return the unpadded version of this return unpad_aes256(plaintext)
def decode_aes256(key, iv_plus_encrypted)
Utility method to decode a payload consisting of the hexed IV + the hexed ciphertext using the given key. See above for more details. :param key: string, 64 hex characters long :param iv_plus_encrypted: string, a hexed IV + hexed ciphertext
3.390135
3.405683
0.995435
# generate 16 cryptographically secure random bytes for our IV (initial value) iv = os.urandom(16) # set up an AES cipher object cipher = AES.new(ensure_bytes(old_pad(key)), mode=AES.MODE_CBC, IV=iv) # encrypte the plaintext after padding it ciphertext = cipher.encrypt(ensure_bytes(old_pad(plaintext))) # append the hexed IV and the hexed ciphertext iv_plus_encrypted = binascii.hexlify(iv) + binascii.hexlify(ciphertext) # return that return iv_plus_encrypted
def old_encode_aes(key, plaintext)
Utility method to encode some given plaintext with the given key. Important thing to note: This is not a general purpose encryption method - it has specific semantics (see below for details). Takes the given key, pads it to 32 bytes. Then takes the given plaintext and pads that to a 32 byte block size. Then encrypts using AES-256-CBC using a random IV. Then converts both the IV and the ciphertext to hex. Finally returns the IV appended by the ciphertext. :param key: string, <= 32 bytes long :param plaintext: string, any amount of data
3.929306
3.946139
0.995734
# grab first 16 bytes (aka 32 characters of hex) - that's the IV hexed_iv = iv_plus_encrypted[:32] # grab everything else - that's the ciphertext (aka encrypted message) hexed_ciphertext = iv_plus_encrypted[32:] # unhex the iv and ciphertext iv = binascii.unhexlify(hexed_iv) ciphertext = binascii.unhexlify(hexed_ciphertext) # set up the correct AES cipher object cipher = AES.new(ensure_bytes(old_pad(key)), mode=AES.MODE_CBC, IV=iv) # decrypt! plaintext = cipher.decrypt(ciphertext) # return the unpadded version of this return old_unpad(plaintext)
def old_decode_aes(key, iv_plus_encrypted)
Utility method to decode a payload consisting of the hexed IV + the hexed ciphertext using the given key. See above for more details. :param key: string, <= 32 bytes long :param iv_plus_encrypted: string, a hexed IV + hexed ciphertext
3.334947
3.416732
0.976063
auth_resp = get_with_retry(self.cerberus_url + '/v2/auth/user', auth=(self.username, self.password), headers=self.HEADERS) if auth_resp.status_code != 200: throw_if_bad_response(auth_resp) return auth_resp.json()
def get_auth(self)
Returns auth response which has client token unless MFA is required
3.902288
3.768842
1.035408
auth_resp = self.get_auth() if auth_resp['status'] == 'mfa_req': token_resp = self.get_mfa(auth_resp) else: token_resp = auth_resp token = token_resp['data']['client_token']['client_token'] return token
def get_token(self)
sets client token from Cerberus
4.04707
3.608161
1.121643
devices = auth_resp['data']['devices'] if len(devices) == 1: # If there's only one option, don't show selection prompt selection = "0" x = 1 else: print("Found the following MFA devices") x=0 for device in devices: print("{0}: {1}".format(x, device['name'])) x = x + 1 selection = input("Enter a selection: ") if selection.isdigit(): selection_num=int(str(selection)) else: raise CerberusClientException( str.join('', ["Selection: '", selection,"' is not a number"])) if (selection_num >= x) or (selection_num < 0): raise CerberusClientException(str.join('', ["Selection: '", str(selection_num), "' is out of range"])) sec_code = input('Enter ' + auth_resp['data']['devices'][selection_num]['name'] + ' security code: ') mfa_resp = post_with_retry( self.cerberus_url + '/v2/auth/mfa_check', json={'otp_token': sec_code, 'device_id': auth_resp['data']['devices'][selection_num]['id'], 'state_token': auth_resp['data']['state_token']}, headers=self.HEADERS ) if mfa_resp.status_code != 200: throw_if_bad_response(mfa_resp) return mfa_resp.json()
def get_mfa(self, auth_resp)
Gets MFA code from user and returns response which includes the client token
3.235965
3.238986
0.999067
# The mask length tells us the format string to use when unpacking # from the buffer read from file. mask_format = {1: 'B', 2: 'H', 4: 'I'}[mask_length] num_standard_flags, = struct.unpack_from('>H', read_buffer, offset=0) # Read in standard flags and standard masks. Each standard flag should # be two bytes, but the standard mask flag is as long as specified by # the mask length. fmt = '>' + ('H' + mask_format) * num_standard_flags data = struct.unpack_from(fmt, read_buffer, offset=2) standard_flag = data[0:num_standard_flags * 2:2] standard_mask = data[1:num_standard_flags * 2:2] return standard_flag, standard_mask
def _parse_standard_flag(read_buffer, mask_length)
Construct standard flag, standard mask data from the file. Specifically working on Reader Requirements box. Parameters ---------- fptr : file object File object for JP2K file. mask_length : int Length of standard mask flag
3.548497
3.591617
0.987994
# The mask length tells us the format string to use when unpacking # from the buffer read from file. mask_format = {1: 'B', 2: 'H', 4: 'I'}[mask_length] num_vendor_features, = struct.unpack_from('>H', read_buffer) # Each vendor feature consists of a 16-byte UUID plus a mask whose # length is specified by, you guessed it, "mask_length". entry_length = 16 + mask_length vendor_feature = [] vendor_mask = [] for j in range(num_vendor_features): uslice = slice(2 + j * entry_length, 2 + (j + 1) * entry_length) ubuffer = read_buffer[uslice] vendor_feature.append(UUID(bytes=ubuffer[0:16])) vmask = struct.unpack('>' + mask_format, ubuffer[16:]) vendor_mask.append(vmask) return vendor_feature, vendor_mask
def _parse_vendor_features(read_buffer, mask_length)
Construct vendor features, vendor mask data from the file. Specifically working on Reader Requirements box. Parameters ---------- fptr : file object File object for JP2K file. mask_length : int Length of vendor mask flag
3.469813
3.673005
0.94468
if writing: raise IOError(msg) else: warnings.warn(msg)
def _dispatch_validation_error(self, msg, writing=False)
Issue either a warning or an error depending on circumstance. If writing to file, then error out, as we do not wish to create bad JP2 files. If reading, then we should be more lenient and just warn.
5.150824
4.447803
1.15806
msg = Jp2kBox.__str__(self) for box in self.box: boxstr = str(box) # Indent the child boxes to make the association clear. msg += '\n' + self._indent(boxstr) return msg
def _str_superbox(self)
__str__ method for all superboxes.
8.371352
7.164025
1.168526
if sys.hexversion >= 0x03030000: return textwrap.indent(textstr, ' ' * indent_level) else: lst = [(' ' * indent_level + x) for x in textstr.split('\n')] return '\n'.join(lst)
def _indent(self, textstr, indent_level=4)
Indent a string. Textwrap's indent method only exists for 3.3 or above. In 2.7 we have to fake it. Parameters ---------- textstring : str String to be indented. indent_level : str Number of spaces of indentation to add. Returns ------- str Possibly multi-line string indented by the specified amount.
2.50967
2.768803
0.90641
# Write the contained boxes, then come back and write the length. orig_pos = fptr.tell() fptr.write(struct.pack('>I4s', 0, box_id)) for box in self.box: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack('>I', end_pos - orig_pos)) fptr.seek(end_pos)
def _write_superbox(self, fptr, box_id)
Write a superbox. Parameters ---------- fptr : file or file object Superbox (box of boxes) to be written to this file. box_id : bytes 4-byte sequence that identifies the superbox.
2.933792
2.859981
1.025808
try: parser = _BOX_WITH_ID[box_id].parse except KeyError: # We don't recognize the box ID, so create an UnknownBox and be # done with it. msg = ('Unrecognized box ({box_id}) encountered at byte offset ' '{offset}.') msg = msg.format(box_id=box_id, offset=fptr.tell() - 8) warnings.warn(msg, UserWarning) box = UnknownBox(box_id, offset=start, length=num_bytes, longname='Unknown') return box try: box = parser(fptr, start, num_bytes) except ValueError as err: msg = ("Encountered an unrecoverable ValueError while parsing a " "{box_id} box at byte offset {offset}. The original error " "message was \"{original_error_message}\".") msg = msg.format(box_id=_BOX_WITH_ID[box_id].longname, offset=start, original_error_message=str(err)) warnings.warn(msg, UserWarning) box = UnknownBox(box_id.decode('utf-8'), length=num_bytes, offset=start, longname='Unknown') return box
def _parse_this_box(self, fptr, box_id, start, num_bytes)
Parse the current box. Parameters ---------- fptr : file Open file object, currently points to start of box payload, not the start of the box. box_id : str 4-letter identifier for the current box. start, num_bytes : int Byte offset and length of the current box. Returns ------- Jp2kBox Object corresponding to the current box.
2.935791
2.880216
1.019295
superbox = [] start = fptr.tell() while True: # Are we at the end of the superbox? if start >= self.offset + self.length: break read_buffer = fptr.read(8) if len(read_buffer) < 8: msg = "Extra bytes at end of file ignored." warnings.warn(msg, UserWarning) return superbox (box_length, box_id) = struct.unpack('>I4s', read_buffer) if box_length == 0: # The length of the box is presumed to last until the end of # the file. Compute the effective length of the box. num_bytes = os.path.getsize(fptr.name) - fptr.tell() + 8 elif box_length == 1: # The length of the box is in the XL field, a 64-bit value. read_buffer = fptr.read(8) num_bytes, = struct.unpack('>Q', read_buffer) else: # The box_length value really is the length of the box! num_bytes = box_length box = self._parse_this_box(fptr, box_id, start, num_bytes) superbox.append(box) # Position to the start of the next box. if num_bytes > self.length: # Length of the current box goes past the end of the # enclosing superbox. msg = '{0} box has incorrect box length ({1})' msg = msg.format(box_id, num_bytes) warnings.warn(msg) elif fptr.tell() > start + num_bytes: # The box must be invalid somehow, as the file pointer is # positioned past the end of the box. msg = ('{box_id} box may be invalid, the file pointer is ' 'positioned {num_bytes} bytes past the end of the box.') msg = msg.format(box_id=box_id, num_bytes=fptr.tell() - (start + num_bytes)) warnings.warn(msg, UserWarning) fptr.seek(start + num_bytes) start += num_bytes return superbox
def parse_superbox(self, fptr)
Parse a superbox (box consisting of nothing but other boxes. Parameters ---------- fptr : file Open file object. Returns ------- list List of top-level boxes in the JPEG 2000 file.
3.206453
3.232
0.992096
if self.colorspace is not None and self.icc_profile is not None: msg = ("Colorspace and icc_profile cannot both be set when " "creating a ColourSpecificationBox.") self._dispatch_validation_error(msg, writing=writing) if self.method not in _COLORSPACE_METHODS.keys(): msg = "Invalid colorspace method value ({method})." msg = msg.format(method=self.method) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning) if self.approximation not in _APPROXIMATION_MEASURES.keys(): msg = "Invalid colr approximation value ({approx})." msg = msg.format(approx=self.approximation) if not writing: # Don't bother to check this for the case of writing=True # because it's already handles in the wrapping code. warnings.warn(msg, UserWarning)
def _validate(self, writing=False)
Verify that the box obeys the specifications.
4.232935
4.019423
1.05312
if self.colorspace is None: msg = ("Writing colr boxes without enumerated " "colorspaces is not supported at this time.") self._dispatch_validation_error(msg, writing=True) if self.icc_profile is None: if self.colorspace not in [SRGB, GREYSCALE, YCC]: msg = ("Colorspace should correspond to one of SRGB, " "GREYSCALE, or YCC.") self._dispatch_validation_error(msg, writing=True) self._validate(writing=True)
def _write_validate(self)
In addition to constructor validation steps, run validation steps for writing.
5.271427
5.022501
1.049562
self._write_validate() length = 15 if self.icc_profile is None else 11 + len(self.icc_profile) fptr.write(struct.pack('>I4s', length, b'colr')) read_buffer = struct.pack('>BBBI', self.method, self.precedence, self.approximation, self.colorspace) fptr.write(read_buffer)
def write(self, fptr)
Write an Colour Specification box to file.
5.818643
5.183337
1.122567
num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) lst = struct.unpack_from('>BBB', read_buffer, offset=0) method, precedence, approximation = lst if method == 1: # enumerated colour space colorspace, = struct.unpack_from('>I', read_buffer, offset=3) if colorspace not in _COLORSPACE_MAP_DISPLAY.keys(): msg = "Unrecognized colorspace ({colorspace})." msg = msg.format(colorspace=colorspace) warnings.warn(msg, UserWarning) icc_profile = None else: # ICC profile colorspace = None if (num_bytes - 3) < 128: msg = ("ICC profile header is corrupt, length is " "only {length} when it should be at least 128.") warnings.warn(msg.format(length=num_bytes - 3), UserWarning) icc_profile = None else: profile = _ICCProfile(read_buffer[3:]) icc_profile = profile.header return cls(method=method, precedence=precedence, approximation=approximation, colorspace=colorspace, icc_profile=icc_profile, length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse JPEG 2000 color specification box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ColourSpecificationBox Instance of the current colour specification box.
3.419067
3.418529
1.000157
# channel type and association must be specified. if not ((len(self.index) == len(self.channel_type)) and (len(self.channel_type) == len(self.association))): msg = ("The length of the index ({index}), channel_type " "({channel_type}), and association ({association}) inputs " "must be the same.") msg = msg.format(index=len(self.index), channel_type=len(self.channel_type), association=len(self.association)) self._dispatch_validation_error(msg, writing=writing) # channel types must be one of 0, 1, 2, 65535 if any(x not in [0, 1, 2, 65535] for x in self.channel_type): msg = ("channel_type specified as {channel_type}, but all values " "must be in the set of\n\n" " 0 - colour image data for associated color\n" " 1 - opacity\n" " 2 - premultiplied opacity\n" " 65535 - unspecified\n") msg = msg.format(channel_type=self.channel_type) self._dispatch_validation_error(msg, writing=writing)
def _validate(self, writing=False)
Verify that the box obeys the specifications.
2.826194
2.770115
1.020244
self._validate(writing=True) num_components = len(self.association) fptr.write(struct.pack('>I4s', 8 + 2 + num_components * 6, b'cdef')) fptr.write(struct.pack('>H', num_components)) for j in range(num_components): fptr.write(struct.pack('>' + 'H' * 3, self.index[j], self.channel_type[j], self.association[j]))
def write(self, fptr)
Write a channel definition box to file.
4.087894
3.38238
1.208585
num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of components. num_components, = struct.unpack_from('>H', read_buffer) data = struct.unpack_from('>' + 'HHH' * num_components, read_buffer, offset=2) index = data[0:num_components * 6:3] channel_type = data[1:num_components * 6:3] association = data[2:num_components * 6:3] return cls(index=tuple(index), channel_type=tuple(channel_type), association=tuple(association), length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse component definition box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ComponentDefinitionBox Instance of the current component definition box.
2.924642
3.124809
0.935943
if any([box.box_id != 'colr' for box in self.box]): msg = ("Colour group boxes can only contain colour specification " "boxes.") self._dispatch_validation_error(msg, writing=writing)
def _validate(self, writing=True)
Verify that the box obeys the specifications.
12.672242
10.44459
1.213283
self._validate(writing=True) self._write_superbox(fptr, b'cgrp')
def write(self, fptr)
Write a colour group box to file.
35.251987
16.958883
2.078674
length = 8 + 4 * len(self.component_index) write_buffer = struct.pack('>I4s', length, b'cmap') fptr.write(write_buffer) for j in range(len(self.component_index)): write_buffer = struct.pack('>HBB', self.component_index[j], self.mapping_type[j], self.palette_index[j]) fptr.write(write_buffer)
def write(self, fptr)
Write a Component Mapping box to file.
3.102353
2.783875
1.114401
num_bytes = offset + length - fptr.tell() num_components = int(num_bytes / 4) read_buffer = fptr.read(num_bytes) data = struct.unpack('>' + 'HBB' * num_components, read_buffer) component_index = data[0:num_bytes:3] mapping_type = data[1:num_bytes:3] palette_index = data[2:num_bytes:3] return cls(component_index, mapping_type, palette_index, length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse component mapping box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ComponentMappingBox Instance of the current component mapping box.
3.226176
3.136496
1.028592
main_header_offset = fptr.tell() if config.get_option('parse.full_codestream'): codestream = Codestream(fptr, length, header_only=False) else: codestream = None box = cls(codestream, main_header_offset=main_header_offset, length=length, offset=offset) box._filename = fptr.name box._length = length return box
def parse(cls, fptr, offset=0, length=0)
Parse a codestream box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ContiguousCodestreamBox Instance of the current contiguous codestream box.
4.49679
4.343087
1.03539
for box in self.DR: if box.box_id != 'url ': msg = ('Child boxes of a data reference box can only be data ' 'entry URL boxes.') self._dispatch_validation_error(msg, writing=writing)
def _validate(self, writing=False)
Verify that the box obeys the specifications.
18.004414
14.719627
1.223157
self._write_validate() # Very similar to the way a superbox is written. orig_pos = fptr.tell() fptr.write(struct.pack('>I4s', 0, b'dtbl')) # Write the number of data entry url boxes. write_buffer = struct.pack('>H', len(self.DR)) fptr.write(write_buffer) for box in self.DR: box.write(fptr) end_pos = fptr.tell() fptr.seek(orig_pos) fptr.write(struct.pack('>I', end_pos - orig_pos)) fptr.seek(end_pos)
def write(self, fptr)
Write a Data Reference box to file.
4.435321
3.910019
1.134348
num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of data references ndr, = struct.unpack_from('>H', read_buffer, offset=0) # Need to keep track of where the next url box starts. box_offset = 2 data_entry_url_box_list = [] for j in range(ndr): # Create an in-memory binary stream for each URL box. box_fptr = io.BytesIO(read_buffer[box_offset:]) box_buffer = box_fptr.read(8) (box_length, box_id) = struct.unpack_from('>I4s', box_buffer, offset=0) box = DataEntryURLBox.parse(box_fptr, 0, box_length) # Need to adjust the box start to that of the "real" file. box.offset = offset + 8 + box_offset data_entry_url_box_list.append(box) # Point to the next embedded URL box. box_offset += box_length return cls(data_entry_url_box_list, length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse data reference box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- DataReferenceBox Instance of the current data reference box.
3.688187
3.646858
1.011333
if self.brand not in ['jp2 ', 'jpx ']: msg = ("The file type brand was '{brand}'. " "It should be either 'jp2 ' or 'jpx '.") msg = msg.format(brand=self.brand) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning) for item in self.compatibility_list: if item not in self._valid_cls: msg = ("The file type compatibility list {items} is " "not valid. All items should be members of " "{valid_entries}.") msg = msg.format(items=self.compatibility_list, valid_entries=self._valid_cls) if writing: raise IOError(msg) else: warnings.warn(msg, UserWarning)
def _validate(self, writing=False)
Validate the box before writing to file.
3.346981
3.26099
1.026369
self._validate(writing=True) length = 16 + 4 * len(self.compatibility_list) fptr.write(struct.pack('>I4s', length, b'ftyp')) fptr.write(self.brand.encode()) fptr.write(struct.pack('>I', self.minor_version)) for item in self.compatibility_list: fptr.write(item.encode())
def write(self, fptr)
Write a File Type box to file.
3.866042
3.184641
1.213965
num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Extract the brand, minor version. (brand, minor_version) = struct.unpack_from('>4sI', read_buffer, 0) if sys.hexversion >= 0x030000: brand = brand.decode('utf-8') # Extract the compatibility list. Each entry has 4 bytes. num_entries = int((length - 16) / 4) compatibility_list = [] for j in range(int(num_entries)): entry, = struct.unpack_from('>4s', read_buffer, 8 + j * 4) if sys.hexversion >= 0x03000000: try: entry = entry.decode('utf-8') except UnicodeDecodeError: # The entry is invalid, but we've got code to catch this # later on. pass compatibility_list.append(entry) return cls(brand=brand, minor_version=minor_version, compatibility_list=compatibility_list, length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse JPEG 2000 file type box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FileTypeBox Instance of the current file type box.
2.948169
3.105891
0.949219
if (((len(self.fragment_offset) != len(self.fragment_length)) or (len(self.fragment_length) != len(self.data_reference)))): msg = ("The lengths of the fragment offsets ({len_offsets}), " "fragment lengths ({len_fragments}), and " "data reference items ({len_drefs}) must be the same.") msg = msg.format(len_offsets=len(self.fragment_offset), len_fragments=len(self.fragment_length), len_drefs=len(self.data_reference)) self._dispatch_validation_error(msg, writing=writing) if any([x <= 0 for x in self.fragment_offset]): msg = "Fragment offsets must all be positive." self._dispatch_validation_error(msg, writing=writing) if any([x <= 0 for x in self.fragment_length]): msg = "Fragment lengths must all be positive." self._dispatch_validation_error(msg, writing=writing)
def _validate(self, writing=False)
Validate internal correctness.
2.171285
2.151019
1.009422
self._validate(writing=True) num_items = len(self.fragment_offset) length = 8 + 2 + num_items * 14 fptr.write(struct.pack('>I4s', length, b'flst')) fptr.write(struct.pack('>H', num_items)) for j in range(num_items): write_buffer = struct.pack('>QIH', self.fragment_offset[j], self.fragment_length[j], self.data_reference[j]) fptr.write(write_buffer)
def write(self, fptr)
Write a fragment list box to file.
3.518568
3.238704
1.086412
num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) num_fragments, = struct.unpack_from('>H', read_buffer, offset=0) lst = struct.unpack_from('>' + 'QIH' * num_fragments, read_buffer, offset=2) frag_offset = lst[0::3] frag_len = lst[1::3] data_reference = lst[2::3] return cls(frag_offset, frag_len, data_reference, length=length, offset=offset)
def parse(cls, fptr, offset, length)
Parse JPX free box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FragmentListBox Instance of the current fragment list box.
3.413697
3.70423
0.921567
box_ids = [box.box_id for box in self.box] if len(box_ids) != 1 or box_ids[0] != 'flst': msg = ("Fragment table boxes must have a single fragment list " "box as a child box.") self._dispatch_validation_error(msg, writing=writing)
def _validate(self, writing=False)
Self-validate the box before writing.
7.359159
6.395505
1.150677