code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] if level > 0: str_level = str(level) low_sub_low = self.spec_level[str_level]["fragments_dict"][ low_frag ]["sub_low_index"] low_sub_high = self.spec_level[str_level]["fragments_dict"][ low_frag ]["sub_high_index"] high_sub_low = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_low_index"] high_sub_high = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_high_index"] vect = [low_sub_low, low_sub_high, high_sub_low, high_sub_high] new_pix_low = min(vect) new_pix_high = max(vect) new_level = level - 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
def zoom_in_pixel(self, curr_pixel)
return the curr_frag at a higher resolution
2.020832
1.891603
1.068317
low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] str_level = str(level) if level < self.n_level - 1: low_super = self.spec_level[str_level]["fragments_dict"][low_frag][ "super_index" ] high_super = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_index"] new_pix_low = min([low_super, high_super]) new_pix_high = max([low_super, high_super]) new_level = level + 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
def zoom_out_pixel(self, curr_pixel)
return the curr_frag at a lower resolution
2.79806
2.609834
1.072122
x = area[0] y = area[1] level = x[2] logger.debug("x = {}".format(x)) logger.debug("y = {}".format(y)) logger.debug("level = {}".format(level)) if level == y[2] and level > 0: new_level = level - 1 high_x = self.zoom_in_pixel(x) high_y = self.zoom_in_pixel(y) new_x = [ min([high_x[0], high_y[0]]), min([high_x[1], high_y[1]]), new_level, ] new_y = [ max([high_x[0], high_y[0]]), max([high_x[1], high_y[1]]), new_level, ] new_area = [new_x, new_y] else: new_area = area return new_area
def zoom_in_area(self, area)
zoom in area
2.044819
2.040016
1.002354
config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
def load_config(args, config_path=".inlineplz.yml")
Load inline-plz config from yaml config file with reasonable defaults.
3.408023
3.333425
1.022379
# don't load trusted value from config because we don't trust the config trusted = args.trusted args = load_config(args) print("Args:") pprint.pprint(args) ret_code = 0 # TODO: consider moving this git parsing stuff into the github interface url = args.url if args.repo_slug: owner = args.repo_slug.split("/")[0] repo = args.repo_slug.split("/")[1] else: owner = args.owner repo = args.repo if args.url: try: url_to_parse = args.url # giturlparse won't parse URLs that don't end in .git if not url_to_parse.endswith(".git"): url_to_parse += ".git" parsed = giturlparse.parse(str(url_to_parse)) url = parsed.resource if not url.startswith("https://"): url = "https://" + url if parsed.owner: owner = parsed.owner if parsed.name: repo = parsed.name except giturlparse.parser.ParserError: pass if not args.dryrun and args.interface not in interfaces.INTERFACES: print("Valid inline-plz config not found") return 1 print("Using interface: {0}".format(args.interface)) my_interface = None filenames = None if not args.dryrun: my_interface = interfaces.INTERFACES[args.interface]( owner, repo, args.pull_request, args.branch, args.token, url, args.commit, args.ignore_paths, args.prefix, args.autofix, args.set_status, ) if not my_interface.is_valid(): print("Invalid review. Exiting.") return 0 filenames = my_interface.filenames my_interface.start_review() try: linter_runner = LinterRunner( args.install, args.autorun, args.ignore_paths, args.config_dir, args.enabled_linters, args.disabled_linters, args.autofix, trusted, filenames, ) messages = linter_runner.run_linters() except Exception: # pylint: disable=broad-except print("Linting failed:\n{}".format(traceback.format_exc())) print("inline-plz version: {}".format(__version__)) print("Python version: {}".format(sys.version)) ret_code = 1 if my_interface: my_interface.finish_review(error=True) return ret_code print("{} lint messages found".format(len(messages))) print("inline-plz version: {}".format(__version__)) print("Python version: {}".format(sys.version)) # TODO: implement dryrun as an interface instead of a special case here if args.dryrun: print_messages(messages) write_messages_to_json(messages) return ret_code try: if my_interface.post_messages(messages, args.max_comments): if not args.zero_exit: ret_code = 1 if args.delete_outdated: my_interface.clear_outdated_messages() my_interface.finish_review(success=False) write_messages_to_json(messages) return ret_code if args.delete_outdated: my_interface.clear_outdated_messages() my_interface.finish_review(success=True) except KeyError: print("Interface not found: {}".format(args.interface)) traceback.print_exc() write_messages_to_json(messages) return ret_code
def inline(args)
Parse input file with the specified parser and post messages based on lint output :param args: Contains the following interface: How are we going to post comments? owner: Username of repo owner repo: Repository name pr: Pull request ID token: Authentication for repository url: Root URL of repository (not your project) Default: https://github.com dryrun: Prints instead of posting comments. zero_exit: If true: always return a 0 exit code. install: If true: install linters. max_comments: Maximum comments to write :return: Exit code. 1 if there are any comments, 0 if there are none.
3.214118
3.021246
1.063839
for install_dir in linters.INSTALL_DIRS: try: shutil.rmtree(install_dir, ignore_errors=True) except Exception: print( "{0}\nFailed to delete {1}".format( traceback.format_exc(), install_dir ) ) sys.stdout.flush()
def cleanup()
Delete standard installation directories.
3.918994
3.325258
1.178554
if not isinstance(data, list): data = [data] result = [] for entry in data: if not isinstance(entry, dict): raise KongError('dictionary required') ensure = entry.pop('ensure', None) name = entry.pop('name', None) routes = entry.pop('routes', []) plugins = entry.pop('plugins', []) host = entry.pop('host', None) if host in LOCAL_HOST: host = local_ip() if not name: raise KongError('Service name is required') if ensure in REMOVE: if await self.has(name): await self.delete(name) continue # backward compatible with config entry config = entry.pop('config', None) if isinstance(config, dict): entry.update(config) if await self.has(name): srv = await self.update(name, host=host, **entry) else: srv = await self.create(name=name, host=host, **entry) srv.data['routes'] = await srv.routes.apply_json(routes) srv.data['plugins'] = await srv.plugins.apply_json(plugins) result.append(srv.data) return result
async def apply_json(self, data)
Apply a JSON data object for a service
2.729314
2.672426
1.021287
if not isinstance(data, list): data = [data] result = [] for entry in data: name = entry.pop('name') if await self.has(name): sni = await self.update(name, **entry) else: sni = await self.create(name=name, **entry) result.append(sni.data) return result
async def apply_json(self, data)
Apply a JSON data object for a service
3.083091
2.993502
1.029928
self.instance.resize_volume(size) self.size = size
def resize(self, size)
Resize the volume to the specified size (in GB).
11.765527
6.343689
1.854682
resource = super(CloudDatabaseManager, self).get(item) resource.volume = CloudDatabaseVolume(resource, resource.volume) return resource
def get(self, item)
This additional code is necessary to properly return the 'volume' attribute of the instance as a CloudDatabaseVolume object instead of a raw dict.
9.317046
4.101229
2.271769
if flavor is None: flavor = 1 flavor_ref = self.api._get_flavor_ref(flavor) if volume is None: volume = 1 if databases is None: databases = [] if users is None: users = [] body = {"instance": { "name": name, "flavorRef": flavor_ref, "volume": {"size": volume}, "databases": databases, "users": users, }} if type is not None or version is not None: required = (type, version) if all(required): body['instance']['datastore'] = {"type": type, "version": version} else: raise exc.MissingCloudDatabaseParameter("Specifying a datastore" " requires both the datastore type as well as the version.") return body
def _create_body(self, name, flavor=None, volume=None, databases=None, users=None, version=None, type=None)
Used to create the dict required to create a Cloud Database instance.
2.871466
2.775743
1.034486
body = {"backup": { "instance": utils.get_id(instance), "name": name, }} if description is not None: body["backup"]["description"] = description uri = "/backups" resp, resp_body = self.api.method_post(uri, body=body) mgr = self.api._backup_manager return CloudDatabaseBackup(mgr, body.get("backup"))
def create_backup(self, instance, name, description=None)
Creates a backup of the specified instance, giving it the specified name along with an optional description.
4.045638
4.064351
0.995396
flavor_ref = self.api._get_flavor_ref(flavor) body = {"instance": { "name": name, "flavorRef": flavor_ref, "volume": {"size": volume}, "restorePoint": {"backupRef": utils.get_id(backup)}, }} uri = "/%s" % self.uri_base resp, resp_body = self.api.method_post(uri, body=body) return CloudDatabaseInstance(self, resp_body.get("instance", {}))
def restore_backup(self, backup, name, flavor, volume)
Restores a backup to a new database instance. You must supply a backup (either the ID or a CloudDatabaseBackup object), a name for the new instance, as well as a flavor and volume size (in GB) for the instance.
3.641254
3.232235
1.126544
return self.api._backup_manager.list(instance=instance, limit=limit, marker=marker)
def list_backups(self, instance=None, marker=0, limit=20)
Returns a paginated list of backups, or just for a particular instance.
6.508699
7.755425
0.839245
uri = "/%s/%s/backups?limit=%d&marker=%d" % (self.uri_base, utils.get_id(instance), int(limit), int(marker)) resp, resp_body = self.api.method_get(uri) mgr = self.api._backup_manager return [CloudDatabaseBackup(mgr, backup) for backup in resp_body.get("backups")]
def _list_backups_for_instance(self, instance, marker=0, limit=20)
Instance-specific backups are handled through the instance manager, not the backup manager.
4.080294
3.719432
1.097021
dbs = utils.coerce_to_list(dbs) db_names = [utils.get_name(db) for db in dbs] if strict: good_dbs = self.instance.list_databases() good_names = [utils.get_name(good_db) for good_db in good_dbs] bad_names = [db_name for db_name in db_names if db_name not in good_names] if bad_names: bad = ", ".join(bad_names) raise exc.NoSuchDatabase("The following database(s) were not " "found: %s" % bad) return db_names
def _get_db_names(self, dbs, strict=True)
Accepts a single db (name or object) or a list of dbs, and returns a list of database names. If any of the supplied dbs do not exist, a NoSuchDatabase exception will be raised, unless you pass strict=False.
2.263258
2.220798
1.019119
if not any((name, password, host)): raise exc.MissingDBUserParameters("You must supply at least one of " "the following: new username, new password, or new host " "specification.") if not isinstance(user, CloudDatabaseUser): # Must be the ID/name user = self.get(user) dct = {} if name and (name != user.name): dct["name"] = name if host and (host != user.host): dct["host"] = host if password: dct["password"] = password if not dct: raise exc.DBUpdateUnchanged("You must supply at least one changed " "value when updating a user.") uri = "/%s/%s" % (self.uri_base, user.name) body = {"user": dct} resp, resp_body = self.api.method_put(uri, body=body) return None
def update(self, user, name=None, password=None, host=None)
Allows you to change one or more of the user's username, password, or host.
3.335857
3.367027
0.990742
user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) try: resp, resp_body = self.api.method_get(uri) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user) dbs = resp_body.get("databases", {}) return [CloudDatabaseDatabase(self, db) for db in dbs]
def list_user_access(self, user)
Returns a list of all database names for which the specified user has access rights.
4.030007
4.005473
1.006125
user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) db_names = self._get_db_names(db_names, strict=strict) dbs = [{"name": db_name} for db_name in db_names] body = {"databases": dbs} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user)
def grant_user_access(self, user, db_names, strict=True)
Gives access to the databases listed in `db_names` to the user. You may pass in either a single db or a list of dbs. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call.
2.99028
3.175956
0.941537
user = utils.get_name(user) db_names = self._get_db_names(db_names, strict=strict) bad_names = [] for db_name in db_names: uri = "/%s/%s/databases/%s" % (self.uri_base, user, db_name) resp, resp_body = self.api.method_delete(uri)
def revoke_user_access(self, user, db_names, strict=True)
Revokes access to the databases listed in `db_names` for the user. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call.
3.142445
3.471607
0.905184
if instance is None: return super(CloudDatabaseBackupManager, self).list() return self.api._manager._list_backups_for_instance(instance, limit=limit, marker=marker)
def list(self, instance=None, limit=20, marker=0)
Return a paginated list of backups, or just for a particular instance.
6.446496
5.53252
1.165201
super(CloudDatabaseInstance, self).get() # Make the volume into an accessible object instead of a dict self.volume = CloudDatabaseVolume(self, self.volume)
def get(self)
Need to override the default get() behavior by making the 'volume' attribute into a CloudDatabaseVolume object instead of the raw dict.
12.698558
4.722961
2.688686
return self._database_manager.list(limit=limit, marker=marker)
def list_databases(self, limit=None, marker=None)
Returns a list of the names of all databases for this instance.
4.989045
4.961927
1.005465
return self._user_manager.list(limit=limit, marker=marker)
def list_users(self, limit=None, marker=None)
Returns a list of the names of all users for this instance.
5.113441
5.080404
1.006503
try: return self._user_manager.get(name) except exc.NotFound: raise exc.NoSuchDatabaseUser("No user by the name '%s' exists." % name)
def get_user(self, name)
Finds the user in this instance with the specified name, and returns a CloudDatabaseUser object. If no match is found, a NoSuchDatabaseUser exception is raised.
6.273529
4.554148
1.377542
try: return [db for db in self.list_databases() if db.name == name][0] except IndexError: raise exc.NoSuchDatabase("No database by the name '%s' exists." % name)
def get_database(self, name)
Finds the database in this instance with the specified name, and returns a CloudDatabaseDatabase object. If no match is found, a NoSuchDatabase exception is raised.
3.745856
3.499119
1.070514
if character_set is None: character_set = "utf8" if collate is None: collate = "utf8_general_ci" self._database_manager.create(name=name, character_set=character_set, collate=collate, return_none=True) # Since the API doesn't return the info for creating the database # object, we have to do it manually. return self._database_manager.find(name=name)
def create_database(self, name, character_set=None, collate=None)
Creates a database with the specified name. If a database with that name already exists, a BadRequest (400) exception will be raised.
3.214607
3.091561
1.039801
if not isinstance(database_names, (list, tuple)): database_names = [database_names] # The API only accepts names, not DB objects database_names = [db if isinstance(db, six.string_types) else db.name for db in database_names] self._user_manager.create(name=name, password=password, database_names=database_names, host=host, return_none=True) # Since the API doesn't return the info for creating the user object, # we have to do it manually. return self._user_manager.find(name=name)
def create_user(self, name, password, database_names, host=None)
Creates a user with the specified name and password, and gives that user access to the specified database(s). If a user with that name already exists, a BadRequest (400) exception will be raised.
3.377184
3.530057
0.956694
name = utils.get_name(name_or_obj) self._database_manager.delete(name)
def delete_database(self, name_or_obj)
Deletes the specified database. If no database by that name exists, no exception will be raised; instead, nothing at all is done.
4.997518
6.096533
0.819731
return self._user_manager.update(user, name=name, password=password, host=host)
def update_user(self, user, name=None, password=None, host=None)
Allows you to change one or more of the user's username, password, or host.
4.419357
5.362185
0.824171
return self._user_manager.grant_user_access(user, db_names, strict=strict)
def grant_user_access(self, user, db_names, strict=True)
Gives access to the databases listed in `db_names` to the user.
4.706591
5.075313
0.92735
return self._user_manager.revoke_user_access(user, db_names, strict=strict)
def revoke_user_access(self, user, db_names, strict=True)
Revokes access to the databases listed in `db_names` for the user.
4.741743
5.534401
0.856776
name = utils.get_name(user) self._user_manager.delete(name)
def delete_user(self, user)
Deletes the specified user. If no user by that name exists, no exception will be raised; instead, nothing at all is done.
8.871746
9.293804
0.954587
uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_post(uri) return body["user"]["password"]
def enable_root_user(self)
Enables login from any host for the root user and provides the user with a generated root password.
7.158412
5.943179
1.204475
uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_get(uri) return body["rootEnabled"]
def root_user_status(self)
Returns True or False, depending on whether the root user for this instance has been enabled.
7.072365
4.416426
1.601377
# We need the flavorRef, not the flavor or size. flavorRef = self.manager.api._get_flavor_ref(flavor) body = {"flavorRef": flavorRef} self.manager.action(self, "resize", body=body)
def resize(self, flavor)
Set the size of this instance to a different flavor.
6.483892
5.463679
1.186726
curr_size = self.volume.size if size <= curr_size: raise exc.InvalidVolumeResize("The new volume size must be larger " "than the current volume size of '%s'." % curr_size) body = {"volume": {"size": size}} self.manager.action(self, "resize", body=body)
def resize_volume(self, size)
Changes the size of the volume for this instance.
3.954022
3.693722
1.070471
return self.manager._list_backups_for_instance(self, limit=limit, marker=marker)
def list_backups(self, limit=20, marker=0)
Returns a paginated list of backups for this instance.
6.086068
5.874514
1.036012
return self.manager.create_backup(self, name, description=description)
def create_backup(self, name, description=None)
Creates a backup of this instance, giving it the specified name along with an optional description.
5.014352
5.439512
0.921839
return self.manager.update(self, name=name, password=password, host=host)
def update(self, name=None, password=None, host=None)
Allows you to change one or more of the user's username, password, or host.
4.52883
5.278282
0.858012
return self.manager.grant_user_access(self, db_names, strict=strict)
def grant_user_access(self, db_names, strict=True)
Gives access to the databases listed in `db_names` to the user.
4.194599
3.690971
1.136449
return self.manager.revoke_user_access(self, db_names, strict=strict)
def revoke_user_access(self, db_names, strict=True)
Revokes access to the databases listed in `db_names` for the user.
4.185187
3.889546
1.076009
self._manager = CloudDatabaseManager(self, resource_class=CloudDatabaseInstance, response_key="instance", uri_base="instances") self._flavor_manager = BaseManager(self, resource_class=CloudDatabaseFlavor, response_key="flavor", uri_base="flavors") self._backup_manager = CloudDatabaseBackupManager(self, resource_class=CloudDatabaseBackup, response_key="backup", uri_base="backups")
def _configure_manager(self)
Creates a manager to handle the instances, and another to handle flavors.
3.23571
2.675996
1.209161
return instance.list_databases(limit=limit, marker=marker)
def list_databases(self, instance, limit=None, marker=None)
Returns all databases for the specified instance.
4.465736
4.09058
1.091712
return instance.create_database(name, character_set=character_set, collate=collate)
def create_database(self, instance, name, character_set=None, collate=None)
Creates a database with the specified name on the given instance.
3.191452
3.032401
1.05245
return instance.list_users(limit=limit, marker=marker)
def list_users(self, instance, limit=None, marker=None)
Returns all users for the specified instance.
4.041876
3.842112
1.051993
return instance.create_user(name=name, password=password, database_names=database_names, host=host)
def create_user(self, instance, name, password, database_names, host=None)
Creates a user with the specified name and password, and gives that user access to the specified database(s).
2.91572
2.812905
1.036551
return instance.update_user(user, name=name, password=password, host=host)
def update_user(self, instance, user, name=None, password=None, host=None)
Allows you to change one or more of the user's username, password, or host.
3.899651
3.922906
0.994072
return instance.grant_user_access(user, db_names, strict=strict)
def grant_user_access(self, instance, user, db_names, strict=True)
Gives access to the databases listed in `db_names` to the user on the specified instance.
3.413484
3.489885
0.978108
return instance.revoke_user_access(user, db_names, strict=strict)
def revoke_user_access(self, instance, user, db_names, strict=True)
Revokes access to the databases listed in `db_names` for the user on the specified instance.
3.340296
3.513201
0.950784
return self._flavor_manager.list(limit=limit, marker=marker)
def list_flavors(self, limit=None, marker=None)
Returns a list of all available Flavors.
5.028592
5.025899
1.000536
flavor_obj = None if isinstance(flavor, CloudDatabaseFlavor): flavor_obj = flavor elif isinstance(flavor, int): # They passed an ID or a size try: flavor_obj = self.get_flavor(flavor) except exc.NotFound: # Must be either a size or bad ID, which will # be handled below pass if flavor_obj is None: # Try flavor name flavors = self.list_flavors() try: flavor_obj = [flav for flav in flavors if flav.name == flavor][0] except IndexError: # No such name; try matching RAM try: flavor_obj = [flav for flav in flavors if flav.ram == flavor][0] except IndexError: raise exc.FlavorNotFound("Could not determine flavor from " "'%s'." % flavor) # OK, we have a Flavor object. Get the href href = [link["href"] for link in flavor_obj.links if link["rel"] == "self"][0] return href
def _get_flavor_ref(self, flavor)
Flavors are odd in that the API expects an href link, not an ID, as with nearly every other resource. This method takes either a CloudDatabaseFlavor object, a flavor ID, a RAM size, or a flavor name, and uses that to determine the appropriate href.
3.235861
2.721227
1.189119
return instance.create_backup(name, description=description)
def create_backup(self, instance, name, description=None)
Creates a backup of the specified instance, giving it the specified name along with an optional description.
6.513512
6.102382
1.067372
return self._manager.restore_backup(backup, name, flavor, volume)
def restore_backup(self, backup, name, flavor, volume)
Restores a backup to a new database instance. You must supply a backup (either the ID or a CloudDatabaseBackup object), a name for the new instance, as well as a flavor and size (in GB) for the instance.
4.346028
6.233418
0.697214
proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) stdoutdata, stderrdata = proc.communicate() return (stdoutdata, stderrdata)
def runproc(cmd)
Convenience method for executing operating system commands. Accepts a single string that would be the command as executed on the command line. Returns a 2-tuple consisting of the output of (STDOUT, STDERR). In your code you should check for an empty STDERR output to determine if your command completed successfully.
2.338272
2.647456
0.883215
md = hashlib.md5() def safe_update(txt): if isinstance(txt, six.text_type): txt = txt.encode(encoding) md.update(txt) try: isfile = os.path.isfile(content) except (TypeError, ValueError): # Will happen with binary content. isfile = False if isfile: with open(content, "rb") as ff: txt = ff.read(block_size) while txt: safe_update(txt) txt = ff.read(block_size) elif hasattr(content, "read"): pos = content.tell() content.seek(0) txt = content.read(block_size) while txt: safe_update(txt) txt = content.read(block_size) content.seek(pos) else: safe_update(content) return md.hexdigest()
def get_checksum(content, encoding="utf8", block_size=8192)
Returns the MD5 checksum in hex for the given content. If 'content' is a file-like object, the content will be obtained from its read() method. If 'content' is a file path, that file is read and its contents used. Otherwise, 'content' is assumed to be the string whose checksum is desired. If the content is unicode, it will be encoded using the specified encoding. To conserve memory, files and file-like objects will be read in blocks, with the default block size of 8192 bytes, which is 64 * the digest block size of md5 (128). This is optimal for most cases, but you can change this by passing in a different value for `block_size`.
2.168876
2.284329
0.949459
mult = int(length / len(chars)) + 1 mult_chars = chars * mult return "".join(random.sample(mult_chars, length))
def _join_chars(chars, length)
Used by the random character functions.
3.733603
3.531556
1.057212
def get_char(): return six.unichr(random.randint(32, 1000)) chars = u"".join([get_char() for ii in six.moves.range(length)]) return _join_chars(chars, length)
def random_unicode(length=20)
Generates a random name; useful for testing. Returns an encoded string of the specified length containing unicode values up to code point 1000.
4.504577
4.156336
1.083785
if val: if not isinstance(val, (list, tuple)): val = [val] else: val = [] return val
def coerce_to_list(val)
For parameters that can take either a single string or a list of strings, this function will ensure that the result is a list containing the passed values.
2.669168
2.684184
0.994406
if not os.path.isdir(pth): raise exc.FolderNotFound ignore = coerce_to_list(ignore) total = 0 for root, _, names in os.walk(pth): paths = [os.path.realpath(os.path.join(root, nm)) for nm in names] for pth in paths[::-1]: if not os.path.exists(pth): paths.remove(pth) elif match_pattern(pth, ignore): paths.remove(pth) total += sum(os.stat(pth).st_size for pth in paths) return total
def folder_size(pth, ignore=None)
Returns the total bytes for the specified path, optionally ignoring any files which match the 'ignore' parameter. 'ignore' can either be a single string pattern, or a list of such patterns.
2.642149
2.641726
1.00016
if name is None: name = func.__name__ if sys.version_info < (3,): method = types.MethodType(func, obj, obj.__class__) else: method = types.MethodType(func, obj) setattr(obj, name, method)
def add_method(obj, func, name=None)
Adds an instance method to an object.
1.847008
1.797226
1.0277
if callback: waiter = _WaitThread(obj=obj, att=att, desired=desired, callback=callback, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts) waiter.start() return waiter else: return _wait_until(obj=obj, att=att, desired=desired, callback=None, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts)
def wait_until(obj, att, desired, callback=None, interval=5, attempts=0, verbose=False, verbose_atts=None)
When changing the state of an object, it will commonly be in a transitional state until the change is complete. This will reload the object every `interval` seconds, and check its `att` attribute until the `desired` value is reached, or until the maximum number of attempts is reached. The updated object is returned. It is up to the calling program to check the returned object to make sure that it successfully reached the desired state. Once the desired value of the attribute is reached, the method returns. If not, it will re-try until the attribute's value matches one of the `desired` values. By default (attempts=0) it will loop infinitely until the attribute reaches the desired value. You can optionally limit the number of times that the object is reloaded by passing a positive value to `attempts`. If the attribute has not reached the desired value by then, the method will exit. If `verbose` is True, each attempt will print out the current value of the watched attribute and the time that has elapsed since the original request. Also, if `verbose_atts` is specified, the values of those attributes will also be output. If `verbose` is False, then `verbose_atts` has no effect. Note that `desired` can be a list of values; if the attribute becomes equal to any of those values, this will succeed. For example, when creating a new cloud server, it will initially have a status of 'BUILD', and you can't work with it until its status is 'ACTIVE'. However, there might be a problem with the build process, and the server will change to a status of 'ERROR'. So for this case you need to set the `desired` parameter to `['ACTIVE', 'ERROR']`. If you simply pass 'ACTIVE' as the desired state, this will loop indefinitely if a build fails, as the server will never reach a status of 'ACTIVE'. Since this process of waiting can take a potentially long time, and will block your program's execution until the desired state of the object is reached, you may specify a callback function. The callback can be any callable that accepts a single parameter; the parameter it receives will be either the updated object (success), or None (failure). If a callback is specified, the program will return immediately after spawning the wait process in a separate thread.
1.785668
1.920198
0.92994
if not isinstance(desired, (list, tuple)): desired = [desired] if verbose_atts is None: verbose_atts = [] if not isinstance(verbose_atts, (list, tuple)): verbose_atts = [verbose_atts] infinite = (attempts == 0) attempt = 0 start = time.time() while infinite or (attempt < attempts): try: # For servers: obj.get() except AttributeError: try: # For other objects that don't support .get() obj = obj.manager.get(obj.id) except AttributeError: # punt raise exc.NoReloadError("The 'wait_until' method is not " "supported for '%s' objects." % obj.__class__) attval = getattr(obj, att) if verbose: elapsed = time.time() - start msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % ( att, attval, elapsed)] for vatt in verbose_atts: vattval = getattr(obj, vatt, None) msgs.append("%s=%s" % (vatt, vattval)) print(" ".join(msgs)) if attval in desired: return obj time.sleep(interval) attempt += 1 return obj
def _wait_until(obj, att, desired, callback, interval, attempts, verbose, verbose_atts)
Loops until either the desired value of the attribute is reached, or the number of attempts is exceeded.
2.92972
2.939631
0.996628
att = att or "status" desired = desired or ["ACTIVE", "ERROR", "available", "COMPLETED"] interval = interval or 20 attempts = attempts or 0 verbose_atts = verbose_atts or "progress" return wait_until(obj, att, desired, callback=callback, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts)
def wait_for_build(obj, att=None, desired=None, callback=None, interval=None, attempts=None, verbose=None, verbose_atts=None)
Designed to handle the most common use case for wait_until: an object whose 'status' attribute will end up in either 'ACTIVE' or 'ERROR' state. Since builds don't happen very quickly, the interval will default to 20 seconds to avoid excess polling.
3.182932
2.823819
1.127173
dt = None lenval = len(val) fmt = {19: "%Y-%m-%d %H:%M:%S", 10: "%Y-%m-%d"}.get(lenval) if fmt is None: # Invalid date raise exc.InvalidDateTimeString("The supplied value '%s' does not " "match either of the formats 'YYYY-MM-DD HH:MM:SS' or " "'YYYY-MM-DD'." % val) return datetime.datetime.strptime(val, fmt)
def _parse_datetime_string(val)
Attempts to parse a string representation of a date or datetime value, and returns a datetime if successful. If not, a InvalidDateTimeString exception will be raised.
3.325241
3.122933
1.064781
if not val: return "" if isinstance(val, six.string_types): dt = _parse_datetime_string(val) else: dt = val if not isinstance(dt, datetime.datetime): dt = datetime.datetime.fromordinal(dt.toordinal()) has_tz = (dt.tzinfo is not None) if show_tzinfo and has_tz: # Need to remove the colon in the TZ portion ret = "".join(dt.isoformat().rsplit(":", 1)) elif show_tzinfo and not has_tz: ret = "%s+0000" % dt.isoformat().split(".")[0] elif not show_tzinfo and has_tz: ret = dt.isoformat()[:-6] elif not show_tzinfo and not has_tz: ret = dt.isoformat().split(".")[0] return ret
def iso_time_string(val, show_tzinfo=False)
Takes either a date, datetime or a string, and returns the standard ISO formatted string for that date/time, with any fractional second portion removed.
2.225178
2.202662
1.010222
if isinstance(val, six.string_types): return val elif isinstance(val, (datetime.datetime, datetime.date)): # Convert to a timestamp val = time.mktime(val.timetuple()) if isinstance(val, numbers.Number): return email.utils.formatdate(val) else: # Bail return val
def rfc2822_format(val)
Takes either a date, a datetime, or a string, and returns a string that represents the value in RFC 2822 format. If a string is passed it is returned unchanged.
2.848085
2.724529
1.045349
# If we're given a number, give it right back - it's already a timestamp. if isinstance(val, numbers.Number): return val elif isinstance(val, six.string_types): dt = _parse_datetime_string(val) else: dt = val return time.mktime(dt.timetuple())
def to_timestamp(val)
Takes a value that is either a Python date, datetime, or a string representation of a date/datetime value. Returns a standard Unix timestamp corresponding to that value.
3.49534
3.502659
0.99791
if isinstance(id_or_obj, six.string_types + (int,)): # It's an ID return id_or_obj try: return id_or_obj.id except AttributeError: return id_or_obj
def get_id(id_or_obj)
Returns the 'id' attribute of 'id_or_obj' if present; if not, returns 'id_or_obj'.
2.330822
2.383815
0.97777
if isinstance(name_or_obj, six.string_types): # It's a name return name_or_obj try: return name_or_obj.name except AttributeError: raise exc.MissingName(name_or_obj)
def get_name(name_or_obj)
Returns the 'name' attribute of 'name_or_obj' if present; if not, returns 'name_or_obj'.
2.646797
2.769734
0.955614
for param, val in params.items(): if val is None: continue dct[param] = val return dct
def params_to_dict(params, dct)
Updates the 'dct' dictionary with the 'params' dictionary, filtering out all those whose param value is None.
2.489257
2.520733
0.987513
itms = ["%s=%s" % (key, val) for key, val in list(dct.items()) if val is not None] return "&".join(itms)
def dict_to_qs(dct)
Takes a dictionary and uses it to create a query string.
3.101865
3.106415
0.998535
patterns = coerce_to_list(patterns) for pat in patterns: if fnmatch.fnmatch(nm, pat): return True return False
def match_pattern(nm, patterns)
Compares `nm` with the supplied patterns, and returns True if it matches at least one. Patterns are standard file-name wildcard strings, as defined in the `fnmatch` module. For example, the pattern "*.py" will match the names of all Python scripts.
2.913608
3.510831
0.829891
emsg = exc.message if before: parts = (msg, separator, emsg) else: parts = (emsg, separator, msg) new_msg = "%s%s%s" % parts new_args = (new_msg, ) + exc.args[1:] exc.message = new_msg exc.args = new_args return exc
def update_exc(exc, msg, before=True, separator="\n")
Adds additional text to an exception's error message. The new text will be added before the existing text by default; to append it after the original text, pass False to the `before` parameter. By default the old and new text will be separated by a newline. If you wish to use a different separator, pass that as the `separator` parameter.
2.86323
2.944069
0.972542
lowkeys = dict([(key.lower(), key) for key in dct1]) for key, val in dct2.items(): d1_key = lowkeys.get(key.lower(), key) dct1[d1_key] = val
def case_insensitive_update(dct1, dct2)
Given two dicts, updates the first one with the second, but considers keys that are identical except for case to be the same. No return value; this function modified dct1 similar to the update() method.
2.587981
2.778147
0.93155
for arg in args: value = os.environ.get(arg, None) if value: return value return kwargs.get("default", "")
def env(*args, **kwargs)
Returns the first environment variable set if none are non-empty, defaults to "" or keyword arg default
2.814291
2.351947
1.196579
mod_str, _sep, class_str = import_str.rpartition(".") __import__(mod_str) return getattr(sys.modules[mod_str], class_str)
def import_class(import_str)
Returns a class from a string including module and class.
2.142703
2.057423
1.04145
if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors)
def safe_decode(text, incoming=None, errors='strict')
Decodes incoming text/bytes string using `incoming` if they're not already unicode. This function was copied from novaclient.openstack.strutils :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an instance of str
4.169378
4.037589
1.03264
value = safe_decode(value, incoming, errors) # NOTE(aababilov): no need to use safe_(encode|decode) here: # encodings are always "ascii", error handling is always "ignore" # and types are always known (first: unicode; second: str) value = unicodedata.normalize("NFKD", value).encode( "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value)
def to_slug(value, incoming=None, errors="strict")
Normalize string. Convert to lowercase, remove non-word characters, and convert spaces to hyphens. This function was copied from novaclient.openstack.strutils Inspired by Django's `slugify` filter. :param value: Text to slugify :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: slugified unicode representation of `value` :raises TypeError: If text is not an instance of str
6.016115
6.131362
0.981204
try: return self.results.pop(0) except IndexError: if self.next_uri is None: raise StopIteration() else: if not self.next_uri: self.results = self.list_method(marker=self.marker, limit=self.limit, prefix=self.prefix) else: args = self.extra_args self.results = self._list_method(self.next_uri, *args) if self.results: last_res = self.results[-1] self.marker = getattr(last_res, self.marker_att) # We should have more results. try: return self.results.pop(0) except IndexError: raise StopIteration()
def next(self)
Return the next available item. If there are no more items in the local 'results' list, check if there is a 'next_uri' value. If so, use that to get the next page of results from the API, and return the first item from that query.
3.256224
2.934081
1.109794
resp = _wait_until(obj=self.obj, att=self.att, desired=self.desired, callback=None, interval=self.interval, attempts=self.attempts, verbose=False, verbose_atts=None) self.callback(resp)
def run(self)
Starts the thread.
8.750547
8.718287
1.0037
@wraps(fnc) def _wrapped(self, volume, *args, **kwargs): if not isinstance(volume, CloudBlockStorageVolume): # Must be the ID volume = self._manager.get(volume) return fnc(self, volume, *args, **kwargs) return _wrapped
def assure_volume(fnc)
Converts a volumeID passed as the volume to a CloudBlockStorageVolume object.
3.368636
2.463762
1.367273
@wraps(fnc) def _wrapped(self, snapshot, *args, **kwargs): if not isinstance(snapshot, CloudBlockStorageSnapshot): # Must be the ID snapshot = self._snapshot_manager.get(snapshot) return fnc(self, snapshot, *args, **kwargs) return _wrapped
def assure_snapshot(fnc)
Converts a snapshot ID passed as the snapshot to a CloudBlockStorageSnapshot object.
3.214031
2.415623
1.330518
if self.status not in ("available", "error"): raise exc.SnapshotNotAvailable("Snapshot must be in 'available' " "or 'error' status before deleting. Current status: %s" % self.status) # When there are more thann one snapshot for a given volume, attempting to # delete them all will throw a 409 exception. This will help by retrying # such an error once after a RETRY_INTERVAL second delay. try: super(CloudBlockStorageSnapshot, self).delete() except exc.ClientException as e: if "Request conflicts with in-progress 'DELETE" in str(e): time.sleep(RETRY_INTERVAL) # Try again; if it fails, oh, well... super(CloudBlockStorageSnapshot, self).delete()
def delete(self)
Adds a check to make sure that the snapshot is able to be deleted.
7.078622
6.339298
1.116625
return self.manager.update(self, display_name=display_name, display_description=display_description)
def update(self, display_name=None, display_description=None)
Update the specified values on this snapshot. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised.
3.439125
3.764158
0.913651
instance_id = _resolve_id(instance) try: resp = self._nova_volumes.create_server_volume(instance_id, self.id, mountpoint) except Exception as e: raise exc.VolumeAttachmentFailed("%s" % e)
def attach_to_instance(self, instance, mountpoint)
Attaches this volume to the cloud server instance at the specified mountpoint. This requires a call to the cloud servers API; it cannot be done directly.
4.811064
4.429652
1.086104
attachments = self.attachments if not attachments: # Not attached; no error needed, just return return # A volume can only be attached to one device at a time, but for some # reason this is a list instead of a singular value att = attachments[0] instance_id = att["server_id"] attachment_id = att["id"] try: self._nova_volumes.delete_server_volume(instance_id, attachment_id) except Exception as e: raise exc.VolumeDetachmentFailed("%s" % e)
def detach(self)
Detaches this volume from any device it may be attached to. If it is not attached, nothing happens.
4.986439
4.40902
1.130963
if force: self.detach() self.delete_all_snapshots() try: super(CloudBlockStorageVolume, self).delete() except exc.VolumeNotAvailable: # Notify the user? Record it somewhere? # For now, just re-raise raise
def delete(self, force=False)
Volumes cannot be deleted if either a) they are attached to a device, or b) they have any snapshots. This method overrides the base delete() method to both better handle these failures, and also to offer a 'force' option. When 'force' is True, the volume is detached, and any dependent snapshots are deleted before calling the volume's delete.
9.753096
7.073998
1.378725
name = name or "" description = description or "" # Note that passing in non-None values is required for the _create_body # method to distinguish between this and the request to create and # instance. return self.manager.create_snapshot(volume=self, name=name, description=description, force=force)
def create_snapshot(self, name=None, description=None, force=False)
Creates a snapshot of this volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True.
7.120277
8.160118
0.87257
return [snap for snap in self.manager.list_snapshots() if snap.volume_id == self.id]
def list_snapshots(self)
Returns a list of all snapshots of this volume.
4.612471
3.334817
1.383126
if not isinstance(size, six.integer_types): raise exc.InvalidSize("Volume sizes must be integers") if volume_type is None: volume_type = "SATA" if description is None: description = "" if metadata is None: metadata = {} if image is not None: image = utils.get_id(image) body = {"volume": { "size": size, "snapshot_id": snapshot_id, "source_volid": clone_id, "display_name": name, "display_description": description, "volume_type": volume_type, "metadata": metadata, "availability_zone": availability_zone, "imageRef": image, }} return body
def _create_body(self, name, size=None, volume_type=None, description=None, metadata=None, snapshot_id=None, clone_id=None, availability_zone=None, image=None)
Used to create the dict required to create a new volume
2.017216
1.973293
1.022259
try: return super(CloudBlockStorageManager, self).create(*args, **kwargs) except exc.BadRequest as e: msg = e.message if "Clones currently must be >= original volume size" in msg: raise exc.VolumeCloneTooSmall(msg) else: raise
def create(self, *args, **kwargs)
Catches errors that may be returned, and raises more informational exceptions.
7.567386
7.603331
0.995272
uri = "/%s/%s" % (self.uri_base, utils.get_id(volume)) param_dict = {} if display_name: param_dict["display_name"] = display_name if display_description: param_dict["display_description"] = display_description if not param_dict: # Nothing to do! return body = {"volume": param_dict} resp, resp_body = self.api.method_put(uri, body=body)
def update(self, volume, display_name=None, display_description=None)
Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised.
2.276542
2.302286
0.988818
return self.api.create_snapshot(volume, name, description=description, force=force)
def create_snapshot(self, volume, name, description=None, force=False)
Pass-through method to allow the create_snapshot() call to be made directly on a volume.
3.835741
3.851992
0.995781
body = {"snapshot": { "display_name": name, "display_description": description, "volume_id": volume.id, "force": str(force).lower(), }} return body
def _create_body(self, name, description=None, volume=None, force=False)
Used to create the dict required to create a new snapshot
2.82432
2.707339
1.043209
try: snap = super(CloudBlockStorageSnapshotManager, self).create( name=name, volume=volume, description=description, force=force) except exc.BadRequest as e: msg = str(e) if "Invalid volume: must be available" in msg: # The volume for the snapshot was attached. raise exc.VolumeNotAvailable("Cannot create a snapshot from an " "attached volume. Detach the volume before trying " "again, or pass 'force=True' to the create_snapshot() " "call.") else: # Some other error raise except exc.ClientException as e: if e.code == 409: if "Request conflicts with in-progress" in str(e): txt = ("The volume is current creating a snapshot. You " "must wait until that completes before attempting " "to create an additional snapshot.") raise exc.VolumeNotAvailable(txt) else: raise else: raise return snap
def create(self, name, volume, description=None, force=False)
Adds exception handling to the default create() call.
4.051986
3.927481
1.031701
self._manager = CloudBlockStorageManager(self, resource_class=CloudBlockStorageVolume, response_key="volume", uri_base="volumes") self._types_manager = BaseManager(self, resource_class=CloudBlockStorageVolumeType, response_key="volume_type", uri_base="types") self._snapshot_manager = CloudBlockStorageSnapshotManager(self, resource_class=CloudBlockStorageSnapshot, response_key="snapshot", uri_base="snapshots")
def _configure_manager(self)
Create the manager to handle the instances, and also another to handle flavors.
3.237326
2.901532
1.11573
return volume.update(display_name=display_name, display_description=display_description)
def update(self, volume, display_name=None, display_description=None)
Update the specified values on the specified volume. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised.
3.418926
3.763026
0.908558
return self._snapshot_manager.create(volume=volume, name=name, description=description, force=force)
def create_snapshot(self, volume, name=None, description=None, force=False)
Creates a snapshot of the volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True.
4.048826
5.490209
0.737463
return snapshot.update(display_name=display_name, display_description=display_description)
def update_snapshot(self, snapshot, display_name=None, display_description=None)
Update the specified values on the specified snapshot. You may specify one or more values to update.
3.034195
4.863304
0.623896