code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def process_exception_message(exception): """ Process an exception message. Args: exception: The exception to process. Returns: A filtered string summarizing the exception. """ exception_message = str(exception) for replace_char in ['\t', '\n', '\\n']: exception_message = exception_message.replace(replace_char, '' if replace_char != '\t' else ' ') return exception_message.replace('section', 'alias')
Process an exception message. Args: exception: The exception to process. Returns: A filtered string summarizing the exception.
def endStep(self,key): """ Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class. """ ptime = _ptime() if key is not None: self.steps[key]['end'] = ptime self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1] self.end = ptime print('==== Processing Step ',key,' finished at ',ptime[0]) print('')
Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class.
def change_email(*_, user_id=None, new_email=None): """ Change email for a user """ click.echo(green('\nChange email:')) click.echo(green('-' * 40)) with get_app().app_context(): user = find_user(dict(id=user_id)) if not user: click.echo(red('User not found\n')) return user.email = new_email result = user_service.save(user) if not isinstance(result, User): print_validation_errors(result) return user.confirm_email() user_service.save(user) msg = 'Change email for user {} to {} \n' click.echo(green(msg.format(user.email, new_email)))
Change email for a user
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = TabLayout(self.get_context(), None, d.style)
Create the underlying widget.
def getStatus(self) : """returns a word describing the status of the collection (loaded, loading, deleted, unloaded, newborn) instead of a number, if you prefer the number it's in self.status""" if self.status == CONST.COLLECTION_LOADING_STATUS : return "loading" elif self.status == CONST.COLLECTION_LOADED_STATUS : return "loaded" elif self.status == CONST.COLLECTION_DELETED_STATUS : return "deleted" elif self.status == CONST.COLLECTION_UNLOADED_STATUS : return "unloaded" elif self.status == CONST.COLLECTION_NEWBORN_STATUS : return "newborn" else : raise ValueError("The collection has an Unknown status %s" % self.status)
returns a word describing the status of the collection (loaded, loading, deleted, unloaded, newborn) instead of a number, if you prefer the number it's in self.status
def async_save_result(self): """ Retrieves the result of this subject's asynchronous save. - Returns `True` if the subject was saved successfully. - Raises `concurrent.futures.CancelledError` if the save was cancelled. - If the save failed, raises the relevant exception. - Returns `False` if the subject hasn't finished saving or if the subject has not been queued for asynchronous save. """ if hasattr(self, "_async_future") and self._async_future.done(): self._async_future.result() return True else: return False
Retrieves the result of this subject's asynchronous save. - Returns `True` if the subject was saved successfully. - Raises `concurrent.futures.CancelledError` if the save was cancelled. - If the save failed, raises the relevant exception. - Returns `False` if the subject hasn't finished saving or if the subject has not been queued for asynchronous save.
def extra_create_kwargs(self): """ Inject the domain of the current user in the new model instances. """ user = self.get_agnocomplete_context() if user: _, domain = user.email.split('@') return { 'domain': domain } return {}
Inject the domain of the current user in the new model instances.
def _request(self, service, **kw): """Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError. """ fb_request = { 'service': service, } for key in ['limit', 'offset', 'filter', 'data']: fb_request[key] = kw.pop(key, None) if kw: raise _exc.FastbillRequestError("Unknown arguments: %s" % ", ".join(kw.keys())) data = _jsonencoder.dumps(fb_request) _logger.debug("Sending data: %r", data) self._pre_request_callback(service, fb_request) # TODO: Retry when we hit a 404 (api not found). Probably a deploy. http_resp = self.session.post(self.SERVICE_URL, auth=self.auth, headers=self.headers, timeout=self.timeout, data=data) self._post_request_callback(service, fb_request, http_resp) try: json_resp = http_resp.json() except ValueError: _logger.debug("Got data: %r", http_resp.content) _abort_http(service, http_resp) return # to make PyCharm happy else: _logger.debug("Got data: %r", json_resp) errors = json_resp['RESPONSE'].get('ERRORS') if errors: _abort_api(service, json_resp, errors) # If Fastbill should ever remove the REQUEST or SERVICE section # from their responses, just remove the checks. if json_resp['REQUEST']['SERVICE'] != service: raise _exc.FastbillError( "API Error: Got response from wrong service.") return _response.FastbillResponse(json_resp['RESPONSE'], self)
Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError.
def create(gandi, resource, domain, duration, owner, admin, tech, bill, nameserver, extra_parameter, background): """Buy a domain.""" if domain: gandi.echo('/!\ --domain option is deprecated and will be removed ' 'upon next release.') gandi.echo("You should use 'gandi domain create %s' instead." % domain) if (domain and resource) and (domain != resource): gandi.echo('/!\ You specified both an option and an argument which ' 'are different, please choose only one between: %s and %s.' % (domain, resource)) return _domain = domain or resource if not _domain: _domain = click.prompt('Name of the domain') result = gandi.domain.create(_domain, duration, owner, admin, tech, bill, nameserver, extra_parameter, background) if background: gandi.pretty_echo(result) return result
Buy a domain.
def headerHTML(header,fname): """given the bytestring ABF header, make and launch HTML.""" html="<html><body><code>" html+="<h2>%s</h2>"%(fname) html+=pprint.pformat(header, indent=1) html=html.replace("\n",'<br>').replace(" ","&nbsp;") html=html.replace(r"\x00","") html+="</code></body></html>" print("saving header file:",fname) f=open(fname,'w') f.write(html) f.close() webbrowser.open(fname)
given the bytestring ABF header, make and launch HTML.
def parse_args(spectypes): """ Return arguments object formed by parsing the command line used to launch the program. """ arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "-c", "--constants", help="emit constants instead of spec dict", action="store_true" ) arg_parser.add_argument( "spectype", help="specifies the spec type to be generated", choices=spectypes ) return arg_parser.parse_args()
Return arguments object formed by parsing the command line used to launch the program.
def import_domaindump(): """ Parses ldapdomaindump files and stores hosts and users in elasticsearch. """ parser = argparse.ArgumentParser( description="Imports users, groups and computers result files from the ldapdomaindump tool, will resolve the names from domain_computers output for IPs") parser.add_argument("files", nargs='+', help="The domaindump files to import") arguments = parser.parse_args() domain_users_file = '' domain_groups_file = '' computer_count = 0 user_count = 0 stats = {} for filename in arguments.files: if filename.endswith('domain_computers.json'): print_notification('Parsing domain computers') computer_count = parse_domain_computers(filename) if computer_count: stats['hosts'] = computer_count print_success("{} hosts imported".format(computer_count)) elif filename.endswith('domain_users.json'): domain_users_file = filename elif filename.endswith('domain_groups.json'): domain_groups_file = filename if domain_users_file: print_notification("Parsing domain users") user_count = parse_domain_users(domain_users_file, domain_groups_file) if user_count: print_success("{} users imported".format(user_count)) stats['users'] = user_count Logger().log("import_domaindump", 'Imported domaindump, found {} user, {} systems'.format(user_count, computer_count), stats)
Parses ldapdomaindump files and stores hosts and users in elasticsearch.
def _tile(self, n): """Get the update tile surrounding particle `n` """ pos = self._trans(self.pos[n]) return Tile(pos, pos).pad(self.support_pad)
Get the update tile surrounding particle `n`
def perform_command(self): """ Perform command and return the appropriate exit code. :rtype: int """ # if no actual arguments, print help if len(self.actual_arguments) < 1: return self.print_help(short=True) # check if we have a recognized tool switch for cls, switches in self.TOOLS: if self.has_option(switches): arguments = [a for a in sys.argv if a not in switches] return cls(invoke=(self.invoke + u" %s" % switches[0])).run(arguments=arguments) # check if we have -h, --help, or --version if u"-h" in self.actual_arguments: return self.print_help(short=True) if u"--help" in self.actual_arguments: return self.print_help(short=False) if u"--version" in self.actual_arguments: return self.print_name_version() # default to run ExecuteTaskCLI return ExecuteTaskCLI(invoke=self.invoke).run(arguments=sys.argv)
Perform command and return the appropriate exit code. :rtype: int
def _query_zendesk(self, endpoint, object_type, *endpoint_args, **endpoint_kwargs): """ Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk to retrieve the items. :param endpoint: target endpoint. :param object_type: object type we are expecting. :param endpoint_args: args for endpoint :param endpoint_kwargs: kwargs for endpoint :return: either a ResultGenerator or a Zenpy object. """ _id = endpoint_kwargs.get('id', None) if _id: item = self.cache.get(object_type, _id) if item: return item else: return self._get(url=self._build_url(endpoint(*endpoint_args, **endpoint_kwargs))) elif 'ids' in endpoint_kwargs: cached_objects = [] # Check to see if we have all objects in the cache. # If we are missing even one we request them all again. # This could be optimized to only request the missing objects. for _id in endpoint_kwargs['ids']: obj = self.cache.get(object_type, _id) if not obj: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs))) cached_objects.append(obj) return ZendeskResultGenerator(self, {}, response_objects=cached_objects, object_type=object_type) else: return self._get(self._build_url(endpoint=endpoint(*endpoint_args, **endpoint_kwargs)))
Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk to retrieve the items. :param endpoint: target endpoint. :param object_type: object type we are expecting. :param endpoint_args: args for endpoint :param endpoint_kwargs: kwargs for endpoint :return: either a ResultGenerator or a Zenpy object.
def propagate_defaults(config_doc): """ Propagate default values to sections of the doc. """ for group_name, group_doc in config_doc.items(): if isinstance(group_doc, dict): defaults = group_doc.get('defaults', {}) for item_name, item_doc in group_doc.items(): if item_name == 'defaults': continue if isinstance(item_doc, dict): group_doc[item_name] = \ dict_merge_pair(copy.deepcopy(defaults), item_doc) return config_doc
Propagate default values to sections of the doc.
def server(self): """Returns the size of remote files """ try: tar = urllib2.urlopen(self.registry) meta = tar.info() return int(meta.getheaders("Content-Length")[0]) except (urllib2.URLError, IndexError): return " "
Returns the size of remote files
def menucheck(self, window_name, object_name): """ Check (click) a menu item. @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer """ menu_handle = self._get_menu_handle(window_name, object_name) if not menu_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) try: if menu_handle.AXMenuItemMarkChar: # Already checked return 1 except atomac._a11y.Error: pass menu_handle.Press() return 1
Check (click) a menu item. @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer
def makeAsn(segID,N, CA, C, O, geo): '''Creates an Asparagine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_OD1_length=geo.CG_OD1_length CB_CG_OD1_angle=geo.CB_CG_OD1_angle CA_CB_CG_OD1_diangle=geo.CA_CB_CG_OD1_diangle CG_ND2_length=geo.CG_ND2_length CB_CG_ND2_angle=geo.CB_CG_ND2_angle CA_CB_CG_ND2_diangle=geo.CA_CB_CG_ND2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") oxygen_d1= calculateCoordinates(CA, CB, CG, CG_OD1_length, CB_CG_OD1_angle, CA_CB_CG_OD1_diangle) OD1= Atom("OD1", oxygen_d1, 0.0, 1.0, " ", " OD1", 0, "O") nitrogen_d2= calculateCoordinates(CA, CB, CG, CG_ND2_length, CB_CG_ND2_angle, CA_CB_CG_ND2_diangle) ND2= Atom("ND2", nitrogen_d2, 0.0, 1.0, " ", " ND2", 0, "N") res= Residue((' ', segID, ' '), "ASN", ' ') ##Create Residue Data Structure res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(OD1) res.add(ND2) return res
Creates an Asparagine residue
def get_ad_via_hitid(hit_id): ''' Get ad via HIT id ''' username = CONFIG.get('psiTurk Access', 'psiturk_access_key_id') password = CONFIG.get('psiTurk Access', 'psiturk_secret_access_id') try: req = requests.get('https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ExperimentError('api_server_not_reachable') else: if req.status_code == 200: return req.json()['ad_id'] else: return "error"
Get ad via HIT id
def user_remove(name, database=None, user=None, password=None, host=None, port=None): ''' Remove a cluster admin or a database user. If a database is specified: it will remove the database user. If a database is not specified: it will remove the cluster admin. name User name to remove database The database to remove the user from user User name for the new user to delete user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.user_remove <name> salt '*' influxdb08.user_remove <name> <database> salt '*' influxdb08.user_remove <name> <database> <user> <password> <host> <port> ''' if not user_exists(name, database, user, password, host, port): if database: log.info('User \'%s\' does not exist for DB \'%s\'', name, database) else: log.info('Cluster admin \'%s\' does not exist', name) return False client = _client(user=user, password=password, host=host, port=port) if not database: return client.delete_cluster_admin(name) client.switch_database(database) return client.delete_database_user(name)
Remove a cluster admin or a database user. If a database is specified: it will remove the database user. If a database is not specified: it will remove the cluster admin. name User name to remove database The database to remove the user from user User name for the new user to delete user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.user_remove <name> salt '*' influxdb08.user_remove <name> <database> salt '*' influxdb08.user_remove <name> <database> <user> <password> <host> <port>
def _parse_reserved_marker(self, fptr): """Marker range between 0xff30 and 0xff39. """ the_id = '0x{0:x}'.format(self._marker_id) segment = Segment(marker_id=the_id, offset=self._offset, length=0) return segment
Marker range between 0xff30 and 0xff39.
def step_undefined_step_snippets_should_not_exist_for_table(context): """ Checks if undefined-step snippets are not provided. EXAMPLE: Then undefined-step snippets should not exist for: | Step | | When an known step is used | | Then another known step is used | """ assert context.table, "REQUIRES: table" for row in context.table.rows: step = row["Step"] step_undefined_step_snippet_should_not_exist_for(context, step)
Checks if undefined-step snippets are not provided. EXAMPLE: Then undefined-step snippets should not exist for: | Step | | When an known step is used | | Then another known step is used |
def _format_regular_value(self, str_in): # type: (Text) -> Text """ we need to reformat integer strings, as there can be different strings for the same integer. The strategy of unification here is to first parse the integer string to an Integer type. Thus all of '+13', ' 13', '13' will be parsed to 13. We then convert the integer value to an unambiguous string (no whitespaces, leading '-' for negative numbers, no leading '+'). :param str_in: integer string :return: integer string without whitespaces, leading '-' for negative numbers, no leading '+' """ try: value = int(str_in, base=10) return str(value) except ValueError as e: msg = "Invalid integer. Read '{}'.".format(str_in) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e)
we need to reformat integer strings, as there can be different strings for the same integer. The strategy of unification here is to first parse the integer string to an Integer type. Thus all of '+13', ' 13', '13' will be parsed to 13. We then convert the integer value to an unambiguous string (no whitespaces, leading '-' for negative numbers, no leading '+'). :param str_in: integer string :return: integer string without whitespaces, leading '-' for negative numbers, no leading '+'
def matches(self, name): """Does the name match my requirements? To match, a name must match config.testMatch OR config.include and it must not match config.exclude """ return ((self.match.search(name) or (self.include and filter(None, [inc.search(name) for inc in self.include]))) and ((not self.exclude) or not filter(None, [exc.search(name) for exc in self.exclude]) ))
Does the name match my requirements? To match, a name must match config.testMatch OR config.include and it must not match config.exclude
def restart(name, timeout=90, with_deps=False, with_parents=False): ''' Restart the named service. This issues a stop command followed by a start. Args: name: The name of the service to restart. .. note:: If the name passed is ``salt-minion`` a scheduled task is created and executed to restart the salt-minion service. timeout (int): The time in seconds to wait for the service to stop and start before returning. Default is 90 seconds .. note:: The timeout is cumulative meaning it is applied to the stop and then to the start command. A timeout of 90 could take up to 180 seconds if the service is long in stopping and starting .. versionadded:: 2017.7.9,2018.3.4 with_deps (bool): If enabled restart the given service and the services the current service depends on. with_parents (bool): If enabled and in case other running services depend on the to be restarted service, this flag indicates that those other services will be restarted as well. If disabled, the service restart will fail in case other running services depend on the to be restarted service. Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' service.restart <service name> ''' if 'salt-minion' in name: create_win_salt_restart_task() return execute_salt_restart_task() ret = set() ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents)) ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents)) return False not in ret
Restart the named service. This issues a stop command followed by a start. Args: name: The name of the service to restart. .. note:: If the name passed is ``salt-minion`` a scheduled task is created and executed to restart the salt-minion service. timeout (int): The time in seconds to wait for the service to stop and start before returning. Default is 90 seconds .. note:: The timeout is cumulative meaning it is applied to the stop and then to the start command. A timeout of 90 could take up to 180 seconds if the service is long in stopping and starting .. versionadded:: 2017.7.9,2018.3.4 with_deps (bool): If enabled restart the given service and the services the current service depends on. with_parents (bool): If enabled and in case other running services depend on the to be restarted service, this flag indicates that those other services will be restarted as well. If disabled, the service restart will fail in case other running services depend on the to be restarted service. Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' service.restart <service name>
def reset(self): """ Deactivate all cells. """ self.activeCells = np.empty(0, dtype="uint32") self.activeDeltaSegments = np.empty(0, dtype="uint32") self.activeFeatureLocationSegments = np.empty(0, dtype="uint32")
Deactivate all cells.
def get_commands_from(self, args): """ We have to code the key names for each depth. This method scans for each level and returns a list of the command arguments. """ commands = [] for i in itertools.count(0): try: commands.append(getattr(args, self.arg_label_fmt % i)) except AttributeError: break return commands
We have to code the key names for each depth. This method scans for each level and returns a list of the command arguments.
def create_df_file_with_query(self, query, output): """ Dumps in df in chunks to avoid crashes. """ chunk_size = 100000 offset = 0 data = defaultdict(lambda : defaultdict(list)) with open(output, 'wb') as outfile: query = query.replace(';', '') query += """ LIMIT {chunk_size} OFFSET {offset};""" while True: print(offset) query = query.format( chunk_size=chunk_size, offset=offset ) df = pd.read_sql(query, self.engine) pickle.dump(df, outfile) offset += chunk_size if len(df) < chunk_size: break outfile.close()
Dumps in df in chunks to avoid crashes.
def _do_identity_role_list(args): """Lists the current on-chain configuration values. """ rest_client = RestClient(args.url) state = rest_client.list_state(subtree=IDENTITY_NAMESPACE + _ROLE_PREFIX) head = state['head'] state_values = state['data'] printable_roles = [] for state_value in state_values: role_list = RoleList() decoded = b64decode(state_value['data']) role_list.ParseFromString(decoded) for role in role_list.roles: printable_roles.append(role) printable_roles.sort(key=lambda r: r.name) if args.format == 'default': tty_width = tty.width() for role in printable_roles: # Set value width to the available terminal space, or the min width width = tty_width - len(role.name) - 3 width = width if width > _MIN_PRINT_WIDTH else _MIN_PRINT_WIDTH value = (role.policy_name[:width] + '...' if len(role.policy_name) > width else role.policy_name) print('{}: {}'.format(role.name, value)) elif args.format == 'csv': try: writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL) writer.writerow(['KEY', 'VALUE']) for role in printable_roles: writer.writerow([role.name, role.policy_name]) except csv.Error: raise CliException('Error writing CSV') elif args.format == 'json' or args.format == 'yaml': roles_snapshot = { 'head': head, 'roles': {role.name: role.policy_name for role in printable_roles} } if args.format == 'json': print(json.dumps(roles_snapshot, indent=2, sort_keys=True)) else: print(yaml.dump(roles_snapshot, default_flow_style=False)[0:-1]) else: raise AssertionError('Unknown format {}'.format(args.format))
Lists the current on-chain configuration values.
def numdiff(fun, args): """Vectorized numerical differentiation""" # vectorized version epsilon = 1e-8 args = list(args) v0 = fun(*args) N = v0.shape[0] l_v = len(v0) dvs = [] for i, a in enumerate(args): l_a = (a).shape[1] dv = numpy.zeros((N, l_v, l_a)) nargs = list(args) #.copy() for j in range(l_a): xx = args[i].copy() xx[:, j] += epsilon nargs[i] = xx dv[:, :, j] = (fun(*nargs) - v0) / epsilon dvs.append(dv) return [v0] + dvs
Vectorized numerical differentiation
def locations(self, exists=True): """ Return the location of the config file(s). A given directory will be scanned for ``*.conf`` files, in alphabetical order. Any duplicates will be eliminated. If ``exists`` is True, only existing configuration locations are returned. """ result = [] for config_files in self.config_paths: if not config_files: continue if os.path.isdir(config_files): config_files = [os.path.join(config_files, i) for i in sorted(os.listdir(config_files)) if i.endswith('.conf')] else: config_files = [config_files] for config_file in config_files: if not exists or os.path.exists(config_file): config_file = os.path.abspath(config_file) if config_file in result: result.remove(config_file) result.append(config_file) return result
Return the location of the config file(s). A given directory will be scanned for ``*.conf`` files, in alphabetical order. Any duplicates will be eliminated. If ``exists`` is True, only existing configuration locations are returned.
def get_videos_for_ids( edx_video_ids, sort_field=None, sort_dir=SortDirection.asc ): """ Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order """ videos, __ = _get_videos_for_filter( {"edx_video_id__in":edx_video_ids}, sort_field, sort_dir, ) return videos
Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order
def getmembers(obj, *predicates): """ Return all the members of an object as a list of `(key, value)` tuples, sorted by name. The optional list of predicates can be used to filter the members. The default predicate drops members whose name starts with '_'. To disable it, pass `None` as the first predicate. :param obj: Object to list the members for :param predicates: Functions to filter the members. If the first value is not None, a default predicate is added that filters private members out (name starts with '_') :type predicates: tuple[Callable|None] :returns: Sorted list of (name, value) tuples :rtype: list[(str, *)] """ # Add default if not predicates or predicates[0] is not None: predicates = (lambda key, value: not key.startswith('_'),) + predicates # Build composite predicate def predicate(key_value_tuple): key, value = key_value_tuple for p in predicates: if p is not None and not p(key, value): return False return True # Filter return filter(predicate, inspect.getmembers(obj))
Return all the members of an object as a list of `(key, value)` tuples, sorted by name. The optional list of predicates can be used to filter the members. The default predicate drops members whose name starts with '_'. To disable it, pass `None` as the first predicate. :param obj: Object to list the members for :param predicates: Functions to filter the members. If the first value is not None, a default predicate is added that filters private members out (name starts with '_') :type predicates: tuple[Callable|None] :returns: Sorted list of (name, value) tuples :rtype: list[(str, *)]
def quote_single_identifier(self, string): """ Quotes a single identifier (no dot chain separation). :param string: The identifier name to be quoted. :type string: str :return: The quoted identifier string. :rtype: str """ c = self.get_identifier_quote_character() return "%s%s%s" % (c, string.replace(c, c + c), c)
Quotes a single identifier (no dot chain separation). :param string: The identifier name to be quoted. :type string: str :return: The quoted identifier string. :rtype: str
def run(self, command): """ Execute a command on the remote host. Return a tuple containing an integer status and a two strings, the first containing stdout and the second containing stderr from the command. """ boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) status = 0 try: t = self._ssh_client.exec_command(command) except paramiko.SSHException: status = 1 std_out = t[1].read() std_err = t[2].read() t[0].close() t[1].close() t[2].close() boto.log.debug('stdout: %s' % std_out) boto.log.debug('stderr: %s' % std_err) return (status, std_out, std_err)
Execute a command on the remote host. Return a tuple containing an integer status and a two strings, the first containing stdout and the second containing stderr from the command.
def attach_network_interface(device_index, name=None, network_interface_id=None, instance_name=None, instance_id=None, region=None, key=None, keyid=None, profile=None): ''' Attach an Elastic Network Interface. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt myminion boto_ec2.attach_network_interface my_eni instance_name=salt-master device_index=0 ''' if not salt.utils.data.exactly_one((name, network_interface_id)): raise SaltInvocationError( "Exactly one (but not both) of 'name' or 'network_interface_id' " "must be provided." ) if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError( "Exactly one (but not both) of 'instance_name' or 'instance_id' " "must be provided." ) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} result = _get_network_interface(conn, name, network_interface_id) if 'error' in result: return result eni = result['result'] try: info = _describe_network_interface(eni) network_interface_id = info['id'] except KeyError: r['error'] = {'message': 'ID not found for this network interface.'} return r if instance_name: try: instance_id = get_id(name=instance_name, region=region, key=key, keyid=keyid, profile=profile) except boto.exception.BotoServerError as e: log.error(e) return False try: r['result'] = conn.attach_network_interface( network_interface_id, instance_id, device_index ) except boto.exception.EC2ResponseError as e: r['error'] = __utils__['boto.get_error'](e) return r
Attach an Elastic Network Interface. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt myminion boto_ec2.attach_network_interface my_eni instance_name=salt-master device_index=0
def get(self, *args, **kwargs): """ Get the sub interfaces for this VlanInterface >>> itf = engine.interface.get(3) >>> list(itf.vlan_interface) [Layer3PhysicalInterfaceVlan(name=VLAN 3.3), Layer3PhysicalInterfaceVlan(name=VLAN 3.5), Layer3PhysicalInterfaceVlan(name=VLAN 3.4)] :param int args: args are translated to vlan_id=args[0] :param kwargs: key value for sub interface :rtype: VlanInterface or None """ if args: kwargs = {'vlan_id': str(args[0])} key, value = kwargs.popitem() for item in self: if 'vlan_id' in key and getattr(item, key, None) == value: return item for vlan in item.interfaces: if getattr(vlan, key, None) == value: return item
Get the sub interfaces for this VlanInterface >>> itf = engine.interface.get(3) >>> list(itf.vlan_interface) [Layer3PhysicalInterfaceVlan(name=VLAN 3.3), Layer3PhysicalInterfaceVlan(name=VLAN 3.5), Layer3PhysicalInterfaceVlan(name=VLAN 3.4)] :param int args: args are translated to vlan_id=args[0] :param kwargs: key value for sub interface :rtype: VlanInterface or None
def flatten(repertoire, big_endian=False): """Flatten a repertoire, removing empty dimensions. By default, the flattened repertoire is returned in little-endian order. Args: repertoire (np.ndarray or None): A repertoire. Keyword Args: big_endian (boolean): If ``True``, flatten the repertoire in big-endian order. Returns: np.ndarray: The flattened repertoire. """ if repertoire is None: return None order = 'C' if big_endian else 'F' # For efficiency, use `ravel` (which returns a view of the array) instead # of `np.flatten` (which copies the whole array). return repertoire.squeeze().ravel(order=order)
Flatten a repertoire, removing empty dimensions. By default, the flattened repertoire is returned in little-endian order. Args: repertoire (np.ndarray or None): A repertoire. Keyword Args: big_endian (boolean): If ``True``, flatten the repertoire in big-endian order. Returns: np.ndarray: The flattened repertoire.
def _init_rgb(self, r: int, g: int, b: int) -> None: """ Initialize from red, green, blue args. """ if self.rgb_mode: self.rgb = (r, g, b) self.hexval = rgb2hex(r, g, b) else: self.rgb = hex2rgb(rgb2termhex(r, g, b)) self.hexval = rgb2termhex(r, g, b) self.code = hex2term(self.hexval)
Initialize from red, green, blue args.
def save(self, filename=None): """ saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return: """ content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), 'w') as f: f.write(content)
saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return:
def delete_invalid_route(self): """ Delete any invalid routes for this interface. An invalid route is a left over when an interface is changed to a different network. :return: None """ try: routing = self._engine.routing.get(self.interface_id) for route in routing: if route.invalid or route.to_delete: route.delete() except InterfaceNotFound: # Only VLAN identifiers, so no routing pass
Delete any invalid routes for this interface. An invalid route is a left over when an interface is changed to a different network. :return: None
def chgrp(path, group): ''' Change the group of a file Under Windows, this will do nothing. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps this function to do nothing while still being compatible with Unix behavior. When managing Windows systems, this function is superfluous and will generate an info level log entry if used directly. If you do actually want to set the 'primary group' of a file, use ``file .chpgrp``. To set group permissions use ``file.set_perms`` Args: path (str): The path to the file or directory group (str): The group (unused) Returns: None CLI Example: .. code-block:: bash salt '*' file.chpgrp c:\\temp\\test.txt administrators ''' func_name = '{0}.chgrp'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; see ' 'function docs for details.', func_name) log.debug('win_file.py %s Doing nothing for %s', func_name, path) return None
Change the group of a file Under Windows, this will do nothing. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps this function to do nothing while still being compatible with Unix behavior. When managing Windows systems, this function is superfluous and will generate an info level log entry if used directly. If you do actually want to set the 'primary group' of a file, use ``file .chpgrp``. To set group permissions use ``file.set_perms`` Args: path (str): The path to the file or directory group (str): The group (unused) Returns: None CLI Example: .. code-block:: bash salt '*' file.chpgrp c:\\temp\\test.txt administrators
def getrawpart(self, msgid, stream=sys.stdout): """Get the first part from the message and print it raw. """ for hdr, part in self._get(msgid): pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream) break
Get the first part from the message and print it raw.
def stop(self): """Stop the Heartbeat Checker. :return: """ self._running.clear() with self._lock: if self._timer: self._timer.cancel() self._timer = None
Stop the Heartbeat Checker. :return:
def on(self): """! \~english Open Audio output. set pin mode to ALT0 @return a boolean value. if True means open audio output is OK otherwise failed to open. \~chinese 打开音频输出。 将引脚模式设置为ALT0 @return 布尔值。 如果 True 表示打开音频输出成功,否则不成功。 """ isOK = True try: if self.channelR!=None: sub.call(["gpio", "-g", "mode", "{}".format(self.channelR), self.PIN_MODE_AUDIO ]) except: isOK = False print("Open audio right channel failed.") try: if self.channelL!=None: sub.call(["gpio","-g","mode", "{}".format(self.channelL), self.PIN_MODE_AUDIO ]) except: isOK = False print("Open audio left channel failed.") return isOK
! \~english Open Audio output. set pin mode to ALT0 @return a boolean value. if True means open audio output is OK otherwise failed to open. \~chinese 打开音频输出。 将引脚模式设置为ALT0 @return 布尔值。 如果 True 表示打开音频输出成功,否则不成功。
def records(self): """ Access the records :returns: twilio.rest.api.v2010.account.usage.record.RecordList :rtype: twilio.rest.api.v2010.account.usage.record.RecordList """ if self._records is None: self._records = RecordList(self._version, account_sid=self._solution['account_sid'], ) return self._records
Access the records :returns: twilio.rest.api.v2010.account.usage.record.RecordList :rtype: twilio.rest.api.v2010.account.usage.record.RecordList
def _estimate_AICc(self, y, mu, weights=None): """ estimate the corrected Akaike Information Criterion relies on the estimated degrees of freedom, which must be computed before. Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None """ edof = self.statistics_['edof'] if self.statistics_['AIC'] is None: self.statistics_['AIC'] = self._estimate_AIC(y, mu, weights) return self.statistics_['AIC'] + 2*(edof + 1)*(edof + 2)/(y.shape[0] - edof -2)
estimate the corrected Akaike Information Criterion relies on the estimated degrees of freedom, which must be computed before. Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None
def serial(self, may_block=True): """ Get the YubiKey serial number (requires YubiKey 2.2). """ if not self.capabilities.have_serial_number(): raise yubikey_base.YubiKeyVersionError("Serial number unsupported in YubiKey %s" % self.version() ) return self._read_serial(may_block)
Get the YubiKey serial number (requires YubiKey 2.2).
def width(self, level): """ Width at given level :param level: :return: """ return self.x_at_y(level, reverse=True) - self.x_at_y(level)
Width at given level :param level: :return:
def split_signature(klass, signature): """Return a list of the element signatures of the topmost signature tuple. If the signature is not a tuple, it returns one element with the entire signature. If the signature is an empty tuple, the result is []. This is useful for e. g. iterating over method parameters which are passed as a single Variant. """ if signature == '()': return [] if not signature.startswith('('): return [signature] result = [] head = '' tail = signature[1:-1] # eat the surrounding () while tail: c = tail[0] head += c tail = tail[1:] if c in ('m', 'a'): # prefixes, keep collecting continue if c in ('(', '{'): # consume until corresponding )/} level = 1 up = c if up == '(': down = ')' else: down = '}' while level > 0: c = tail[0] head += c tail = tail[1:] if c == up: level += 1 elif c == down: level -= 1 # otherwise we have a simple type result.append(head) head = '' return result
Return a list of the element signatures of the topmost signature tuple. If the signature is not a tuple, it returns one element with the entire signature. If the signature is an empty tuple, the result is []. This is useful for e. g. iterating over method parameters which are passed as a single Variant.
def lm_deltat(freqs, damping_times, modes): """Return the minimum delta_t of all the modes given, with delta_t given by the inverse of the frequency at which the amplitude of the ringdown falls to 1/1000 of the peak amplitude. """ dt = {} for lmn in modes: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): dt['%d%d%d' %(l,m,n)] = 1. / qnm_freq_decay(freqs['%d%d%d' %(l,m,n)], damping_times['%d%d%d' %(l,m,n)], 1./1000) delta_t = min(dt.values()) if delta_t < min_dt: delta_t = min_dt return delta_t
Return the minimum delta_t of all the modes given, with delta_t given by the inverse of the frequency at which the amplitude of the ringdown falls to 1/1000 of the peak amplitude.
def clone(self, **kwargs): """ Clone a part. .. versionadded:: 2.3 :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: cloned :class:`models.Part` :raises APIError: if the `Part` could not be cloned Example ------- >>> bike = client.model('Bike') >>> bike2 = bike.clone() """ parent = self.parent() return self._client._create_clone(parent, self, **kwargs)
Clone a part. .. versionadded:: 2.3 :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: cloned :class:`models.Part` :raises APIError: if the `Part` could not be cloned Example ------- >>> bike = client.model('Bike') >>> bike2 = bike.clone()
def register_event(self, event_type, pattern, handler): """ When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state. """ if event_type not in self._supported_events: raise ValueError("Unsupported event type {}".format(event_type)) if event_type != "CHANGE" and not isinstance(pattern, Pattern) and not isinstance(pattern, basestring): raise ValueError("Expected pattern to be a Pattern or string") if event_type == "CHANGE" and not (len(pattern)==2 and isinstance(pattern[0], int) and isinstance(pattern[1], numpy.ndarray)): raise ValueError("For \"CHANGE\" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.") # Create event object event = { "pattern": pattern, "event_type": event_type, "count": 0, "handler": handler, "name": uuid.uuid4(), "active": True } self._events[event["name"]] = event return event["name"]
When ``event_type`` is observed for ``pattern``, triggers ``handler``. For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and the base screen state.
def add_relationship(self, rel_uri, obj): """ Add a new relationship to the RELS-EXT for this object. Calls :meth:`API_M.addRelationship`. Example usage:: isMemberOfCollection = 'info:fedora/fedora-system:def/relations-external#isMemberOfCollection' collection_uri = 'info:fedora/foo:456' object.add_relationship(isMemberOfCollection, collection_uri) :param rel_uri: URI for the new relationship :param obj: related object; can be :class:`DigitalObject` or string; if string begins with info:fedora/ it will be treated as a resource, otherwise it will be treated as a literal :rtype: boolean """ if isinstance(rel_uri, URIRef): rel_uri = force_text(rel_uri) obj_is_literal = True if isinstance(obj, DigitalObject): obj = obj.uri obj_is_literal = False elif (isinstance(obj, str) or isinstance(obj, six.string_types)) \ and obj.startswith('info:fedora/'): obj_is_literal = False # this call will change RELS-EXT, possibly creating it if it's # missing. remove any cached info we have for that datastream. if 'RELS-EXT' in self.dscache: del self.dscache['RELS-EXT'] self._ds_list = None return self.api.addRelationship(self.pid, self.uri, rel_uri, obj, obj_is_literal)
Add a new relationship to the RELS-EXT for this object. Calls :meth:`API_M.addRelationship`. Example usage:: isMemberOfCollection = 'info:fedora/fedora-system:def/relations-external#isMemberOfCollection' collection_uri = 'info:fedora/foo:456' object.add_relationship(isMemberOfCollection, collection_uri) :param rel_uri: URI for the new relationship :param obj: related object; can be :class:`DigitalObject` or string; if string begins with info:fedora/ it will be treated as a resource, otherwise it will be treated as a literal :rtype: boolean
def observed_data_to_xarray(self): """Convert observed data to xarray.""" posterior_model = self.posterior_model if self.dims is None: dims = {} else: dims = self.dims observed_names = self.observed_data if isinstance(observed_names, str): observed_names = [observed_names] observed_data = OrderedDict() for key in observed_names: vals = np.atleast_1d(posterior_model.data[key]) val_dims = dims.get(key) val_dims, coords = generate_dims_coords( vals.shape, key, dims=val_dims, coords=self.coords ) observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords) return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.stan))
Convert observed data to xarray.
def init(self, context): """Initializes sitetree to handle new request. :param Context|None context: """ self.cache = Cache() self.current_page_context = context self.current_request = context.get('request', None) if context else None self.current_lang = get_language() self._current_app_is_admin = None self._current_user_permissions = _UNSET self._items_urls = {} # Resolved urls are cache for a request. self._current_items = {}
Initializes sitetree to handle new request. :param Context|None context:
def _Close(self): """Closes the file system object. Raises: IOError: if the close failed. """ self._vslvm_volume_group = None self._vslvm_handle.close() self._vslvm_handle = None self._file_object.close() self._file_object = None
Closes the file system object. Raises: IOError: if the close failed.
def init(options=None, ini_paths=None, argv=None, strict=False, **parser_kwargs): """Initialize singleton config and read/parse configuration. :keyword bool strict: when true, will error out on invalid arguments (default behavior is to ignore them) :returns: the loaded configuration. """ global SINGLETON SINGLETON = Config( options=options, ini_paths=ini_paths, argv=argv, **parser_kwargs) SINGLETON.parse(argv, strict=strict) return SINGLETON
Initialize singleton config and read/parse configuration. :keyword bool strict: when true, will error out on invalid arguments (default behavior is to ignore them) :returns: the loaded configuration.
def preview_email_marketing_campaign(self, email_marketing_campaign): """Returns HTML and text previews of an EmailMarketingCampaign. """ url = self.api.join('/'.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id), 'preview'])) response = url.get() self.handle_response_status(response) return (response.json()['preview_email_content'], response.json()['preview_text_content'])
Returns HTML and text previews of an EmailMarketingCampaign.
def get (self, key, def_val=None): """Return lowercase key value.""" assert isinstance(key, basestring) return dict.get(self, key.lower(), def_val)
Return lowercase key value.
def dump(result): """Dump result into a string, useful for debugging.""" if isinstance(result, dict): # Result is a search result. statuses = result['results'] else: # Result is a lookup result. statuses = result status_str_list = [] for status in statuses: status_str_list.append(textwrap.dedent(u""" @{screen_name} -- https://twitter.com/{screen_name} {text} """).strip().format( screen_name=status['actor']['preferredUsername'], text=status['body'])) return u'\n\n'.join(status_str_list)
Dump result into a string, useful for debugging.
def setup(docker_mount=None, force=False): ''' Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg`` exists in the current working directory, it is uploaded as ``~/.dockercfg``. Args: * docker_mount=None: Partition that will be mounted as /var/lib/docker ''' if not is_ubuntu() and not is_boot2docker(): raise Exception('Head In The Clouds Docker is only supported on Ubuntu') # a bit hacky if os.path.exists('dot_dockercfg') and not fabric.contrib.files.exists('~/.dockercfg'): put('dot_dockercfg', '~/.dockercfg') if not fabric.contrib.files.exists('~/.ssh/id_rsa'): fab.run('ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa') if docker_is_installed() and not force: return for attempt in range(3): sudo('wget -qO- https://get.docker.io/gpg | apt-key add -') sudo('sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"') with settings(warn_only=True): sudo('apt-get update') failed = sudo('apt-get install -y lxc-docker sshpass curl').failed if not failed: break if docker_mount: create_docker_mount(docker_mount)
Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg`` exists in the current working directory, it is uploaded as ``~/.dockercfg``. Args: * docker_mount=None: Partition that will be mounted as /var/lib/docker
def new_dxworkflow(title=None, summary=None, description=None, output_folder=None, init_from=None, **kwargs): ''' :param title: Workflow title (optional) :type title: string :param summary: Workflow summary (optional) :type summary: string :param description: Workflow description (optional) :type description: string :param output_folder: Default output folder of the workflow (optional) :type output_folder: string :param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional) :type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only) :rtype: :class:`DXWorkflow` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`, except `details`. Creates a new remote workflow object with project set to *project* and returns the appropriate handler. Example: r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...") Note that this function is shorthand for:: dxworkflow = DXWorkflow() dxworkflow.new(**kwargs) ''' dxworkflow = DXWorkflow() dxworkflow.new(title=title, summary=summary, description=description, output_folder=output_folder, init_from=init_from, **kwargs) return dxworkflow
:param title: Workflow title (optional) :type title: string :param summary: Workflow summary (optional) :type summary: string :param description: Workflow description (optional) :type description: string :param output_folder: Default output folder of the workflow (optional) :type output_folder: string :param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional) :type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only) :rtype: :class:`DXWorkflow` Additional optional parameters not listed: all those under :func:`dxpy.bindings.DXDataObject.new`, except `details`. Creates a new remote workflow object with project set to *project* and returns the appropriate handler. Example: r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...") Note that this function is shorthand for:: dxworkflow = DXWorkflow() dxworkflow.new(**kwargs)
def telnet_config(self, status): """ status: false - Telnet is disabled true - Telnet is enabled """ ret = self.command( 'configManager.cgi?action=setConfig&Telnet.Enable={0}'.format( status) ) return ret.content.decode('utf-8')
status: false - Telnet is disabled true - Telnet is enabled
def incr(self, name, amount=1): """ Increase the value at key ``name`` by ``amount``. If no key exists, the value will be initialized as ``amount`` . Like **Redis.INCR** :param string name: the key name :param int amount: increments :return: the integer value at key ``name`` :rtype: int >>> ssdb.incr('set_count', 3) 13 >>> ssdb.incr('set_count', 1) 14 >>> ssdb.incr('set_count', -2) 12 >>> ssdb.incr('temp_count', 42) 42 """ amount = get_integer('amount', amount) return self.execute_command('incr', name, amount)
Increase the value at key ``name`` by ``amount``. If no key exists, the value will be initialized as ``amount`` . Like **Redis.INCR** :param string name: the key name :param int amount: increments :return: the integer value at key ``name`` :rtype: int >>> ssdb.incr('set_count', 3) 13 >>> ssdb.incr('set_count', 1) 14 >>> ssdb.incr('set_count', -2) 12 >>> ssdb.incr('temp_count', 42) 42
def ping(dest_ip=None, **kwargs): ''' Send a ping RPC to a device dest_ip The IP of the device to ping dev_timeout : 30 The NETCONF RPC timeout (in seconds) rapid : False When ``True``, executes ping at 100pps instead of 1pps ttl Maximum number of IP routers (IP hops) allowed between source and destination routing_instance Name of the routing instance to use to send the ping interface Interface used to send traffic count : 5 Number of packets to send CLI Examples: .. code-block:: bash salt 'device_name' junos.ping '8.8.8.8' count=5 salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True ''' conn = __proxy__['junos.conn']() ret = {} if dest_ip is None: ret['message'] = 'Please specify the destination ip to ping.' ret['out'] = False return ret op = {'host': dest_ip} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['count'] = six.text_type(op.pop('count', 5)) if 'ttl' in op: op['ttl'] = six.text_type(op['ttl']) ret['out'] = True try: ret['message'] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op))) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret
Send a ping RPC to a device dest_ip The IP of the device to ping dev_timeout : 30 The NETCONF RPC timeout (in seconds) rapid : False When ``True``, executes ping at 100pps instead of 1pps ttl Maximum number of IP routers (IP hops) allowed between source and destination routing_instance Name of the routing instance to use to send the ping interface Interface used to send traffic count : 5 Number of packets to send CLI Examples: .. code-block:: bash salt 'device_name' junos.ping '8.8.8.8' count=5 salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
def load(self, config_data): """ Loads sanitizers according to rulesets defined in given already parsed configuration file. :param config_data: Already parsed configuration data, as dictionary. :type config_data: dict[str,any] """ if not isinstance(config_data, dict): raise ConfigurationError( "Configuration data is %s instead of dict." % ( type(config_data), ) ) self.load_addon_packages(config_data) self.load_sanitizers(config_data)
Loads sanitizers according to rulesets defined in given already parsed configuration file. :param config_data: Already parsed configuration data, as dictionary. :type config_data: dict[str,any]
def next(self): """Returns the next line from this input reader as (lineinfo, line) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple describes the source, it is itself a tuple (blobkey, filenumber, byteoffset). The second element of the tuple is the line found at that offset. """ if not self._filestream: if not self._zip: self._zip = zipfile.ZipFile(self._reader(self._blob_key)) # Get a list of entries, reversed so we can pop entries off in order self._entries = self._zip.infolist()[self._start_file_index: self._end_file_index] self._entries.reverse() if not self._entries: raise StopIteration() entry = self._entries.pop() value = self._zip.read(entry.filename) self._filestream = StringIO.StringIO(value) if self._initial_offset: self._filestream.seek(self._initial_offset) self._filestream.readline() start_position = self._filestream.tell() line = self._filestream.readline() if not line: # Done with this file in the zip. Move on to the next file. self._filestream.close() self._filestream = None self._start_file_index += 1 self._initial_offset = 0 return self.next() return ((self._blob_key, self._start_file_index, start_position), line.rstrip("\n"))
Returns the next line from this input reader as (lineinfo, line) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple describes the source, it is itself a tuple (blobkey, filenumber, byteoffset). The second element of the tuple is the line found at that offset.
def read_ttl(path): ''' Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt) E.g. Document.read_ttl('~/data/myfile') is the same as Document('myfile', '~/data/').read() ''' warnings.warn("Document.read_ttl() is deprecated and will be removed in near future. Use read() instead", DeprecationWarning) doc_path = os.path.dirname(path) doc_name = os.path.basename(path) return Document(doc_name, doc_path).read()
Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt) E.g. Document.read_ttl('~/data/myfile') is the same as Document('myfile', '~/data/').read()
def create(self, fname, lname, group, type, group_api): """Create an LDAP User.""" self.__username(fname, lname) self.client.add( self.__distinguished_name(type, fname=fname, lname=lname), API.__object_class(), self.__ldap_attr(fname, lname, type, group, group_api))
Create an LDAP User.
def get_environ(self, sock): """Create WSGI environ entries to be merged into each request.""" cipher = sock.cipher() ssl_environ = { "wsgi.url_scheme": "https", "HTTPS": "on", 'SSL_PROTOCOL': cipher[1], 'SSL_CIPHER': cipher[0] ## SSL_VERSION_INTERFACE string The mod_ssl program version ## SSL_VERSION_LIBRARY string The OpenSSL program version } return ssl_environ
Create WSGI environ entries to be merged into each request.
def process_results(self): """ Process results by providers """ for result in self._results: provider = result.provider self.providers.append(provider) if result.error: self.failed_providers.append(provider) continue if not result.response: continue # set blacklisted to True if ip is detected with at least one dnsbl self.blacklisted = True provider_categories = provider.process_response(result.response) assert provider_categories.issubset(DNSBL_CATEGORIES) self.categories = self.categories.union(provider_categories) self.detected_by[provider.host] = list(provider_categories)
Process results by providers
def lpc(blk, order=None): """ Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation is based on the covariance method, assuming a zero-mean stochastic process, finding the coefficients iteratively and greedily like the lattice implementation in Levinson-Durbin algorithm, although the lag matrix found from the given block don't have to be toeplitz. Slow, but this strategy don't need NumPy. """ # Calculate the covariance for each lag pair phi = lag_matrix(blk, order) order = len(phi) - 1 # Inner product for filters based on above statistics def inner(a, b): return sum(phi[i][j] * ai * bj for i, ai in enumerate(a.numlist) for j, bj in enumerate(b.numlist) ) A = ZFilter(1) B = [z ** -1] beta = [inner(B[0], B[0])] m = 1 while True: try: k = -inner(A, z ** -m) / beta[m - 1] # Last one is really a PARCOR coeff except ZeroDivisionError: raise ZeroDivisionError("Can't find next coefficient") if k >= 1 or k <= -1: raise ValueError("Unstable filter") A += k * B[m - 1] if m >= order: A.error = inner(A, A) return A gamma = [inner(z ** -(m + 1), B[q]) / beta[q] for q in xrange(m)] B.append(z ** -(m + 1) - sum(gamma[q] * B[q] for q in xrange(m))) beta.append(inner(B[m], B[m])) m += 1
Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation is based on the covariance method, assuming a zero-mean stochastic process, finding the coefficients iteratively and greedily like the lattice implementation in Levinson-Durbin algorithm, although the lag matrix found from the given block don't have to be toeplitz. Slow, but this strategy don't need NumPy.
def transpose(self): """Return the transpose of the QuantumChannel.""" kraus_l, kraus_r = self._data kraus_l = [k.T for k in kraus_l] if kraus_r is not None: kraus_r = [k.T for k in kraus_r] return Kraus((kraus_l, kraus_r), input_dims=self.output_dims(), output_dims=self.input_dims())
Return the transpose of the QuantumChannel.
def arc(pRA, pDecl, sRA, sDecl, mcRA, lat): """ Returns the arc of direction between a Promissor and Significator. It uses the generic proportional semi-arc method. """ pDArc, pNArc = utils.dnarcs(pDecl, lat) sDArc, sNArc = utils.dnarcs(sDecl, lat) # Select meridian and arcs to be used # Default is MC and Diurnal arcs mdRA = mcRA sArc = sDArc pArc = pDArc if not utils.isAboveHorizon(sRA, sDecl, mcRA, lat): # Use IC and Nocturnal arcs mdRA = angle.norm(mcRA + 180) sArc = sNArc pArc = pNArc # Promissor and Significator distance to meridian pDist = angle.closestdistance(mdRA, pRA) sDist = angle.closestdistance(mdRA, sRA) # Promissor should be after significator (in degrees) if pDist < sDist: pDist += 360 # Meridian distances proportional to respective semi-arcs sPropDist = sDist / (sArc / 2.0) pPropDist = pDist / (pArc / 2.0) # The arc is how much of the promissor's semi-arc is # needed to reach the significator return (pPropDist - sPropDist) * (pArc / 2.0)
Returns the arc of direction between a Promissor and Significator. It uses the generic proportional semi-arc method.
def put(self, request, bot_id, id, format=None): """ Update existing MessengerBot --- serializer: MessengerBotUpdateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request """ return super(MessengerBotDetail, self).put(request, bot_id, id, format)
Update existing MessengerBot --- serializer: MessengerBotUpdateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request
def editText(self, y, x, w, record=True, **kwargs): 'Wrap global editText with `preedit` and `postedit` hooks.' v = self.callHook('preedit') if record else None if not v or v[0] is None: with EnableCursor(): v = editText(self.scr, y, x, w, **kwargs) else: v = v[0] if kwargs.get('display', True): status('"%s"' % v) self.callHook('postedit', v) if record else None return v
Wrap global editText with `preedit` and `postedit` hooks.
def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None: """ Simulate mouse left button drag from point x1, y1 drop to point x2, y2. x1: int. y1: int. x2: int. y2: int. moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster. waitTime: float. """ PressMouse(x1, y1, 0.05) MoveTo(x2, y2, moveSpeed, 0.05) ReleaseMouse(waitTime)
Simulate mouse left button drag from point x1, y1 drop to point x2, y2. x1: int. y1: int. x2: int. y2: int. moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster. waitTime: float.
def ordered_storage(config, name=None): '''Return ordered storage system based on the specified config. The canonical example of such a storage container is ``defaultdict(list)``. Thus, the return value of this method contains keys and values. The values are ordered lists with the last added item at the end. Args: config (dict): Defines the configurations for the storage. For in-memory storage, the config ``{'type': 'dict'}`` will suffice. For Redis storage, the type should be ``'redis'`` and the configurations for the Redis database should be supplied under the key ``'redis'``. These parameters should be in a form suitable for `redis.Redis`. The parameters may alternatively contain references to environment variables, in which case literal configuration values should be replaced by dicts of the form:: {'env': 'REDIS_HOSTNAME', 'default': 'localhost'} For a full example, see :ref:`minhash_lsh_at_scale` name (bytes, optional): A reference name for this storage container. For dict-type containers, this is ignored. For Redis containers, this name is used to prefix keys pertaining to this storage container within the database. ''' tp = config['type'] if tp == 'dict': return DictListStorage(config) if tp == 'redis': return RedisListStorage(config, name=name)
Return ordered storage system based on the specified config. The canonical example of such a storage container is ``defaultdict(list)``. Thus, the return value of this method contains keys and values. The values are ordered lists with the last added item at the end. Args: config (dict): Defines the configurations for the storage. For in-memory storage, the config ``{'type': 'dict'}`` will suffice. For Redis storage, the type should be ``'redis'`` and the configurations for the Redis database should be supplied under the key ``'redis'``. These parameters should be in a form suitable for `redis.Redis`. The parameters may alternatively contain references to environment variables, in which case literal configuration values should be replaced by dicts of the form:: {'env': 'REDIS_HOSTNAME', 'default': 'localhost'} For a full example, see :ref:`minhash_lsh_at_scale` name (bytes, optional): A reference name for this storage container. For dict-type containers, this is ignored. For Redis containers, this name is used to prefix keys pertaining to this storage container within the database.
def compact(self): """ Switch to the compact variant of the live form template. By default, this will simply create a loader for the C{self.compactFragmentName} template and compact all of this form's parameters. """ self.docFactory = webtheme.getLoader(self.compactFragmentName) for param in self.parameters: param.compact()
Switch to the compact variant of the live form template. By default, this will simply create a loader for the C{self.compactFragmentName} template and compact all of this form's parameters.
def get_memory_info(self): """Return a tuple with the process' RSS and VMS size.""" rss, vms = _psutil_osx.get_process_memory_info(self.pid)[:2] return nt_meminfo(rss, vms)
Return a tuple with the process' RSS and VMS size.
def delete(self, name): """ Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted. """ if name in self._cache: del self._cache[name] self.writeCache() # TODO clean files return True return False
Deletes the named entry in the cache. :param name: the name. :return: true if it is deleted.
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
Actualizes the portfolio universe with the alog state
def remove_item(self, item_id, assessment_part_id): """Removes an ``Item`` from an ``AssessmentPartId``. arg: item_id (osid.id.Id): ``Id`` of the ``Item`` arg: assessment_part_id (osid.id.Id): ``Id`` of the ``AssessmentPartId`` raise: NotFound - ``item_id`` ``not found in assessment_part_id`` raise: NullArgument - ``item_id`` or ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.* """ if (not isinstance(assessment_part_id, ABCId) and assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart'): raise errors.InvalidArgument('the argument is not a valid OSID Id') assessment_part_map, collection = self._get_assessment_part_collection(assessment_part_id) try: assessment_part_map['itemIds'].remove(str(item_id)) except (KeyError, ValueError): raise errors.NotFound() collection.save(assessment_part_map)
Removes an ``Item`` from an ``AssessmentPartId``. arg: item_id (osid.id.Id): ``Id`` of the ``Item`` arg: assessment_part_id (osid.id.Id): ``Id`` of the ``AssessmentPartId`` raise: NotFound - ``item_id`` ``not found in assessment_part_id`` raise: NullArgument - ``item_id`` or ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization fauilure *compliance: mandatory -- This method must be implemented.*
def endpoint_from_model_data(self, model_s3_location, deployment_image, initial_instance_count, instance_type, name=None, role=None, wait=True, model_environment_vars=None, model_vpc_config=None, accelerator_type=None): """Create and deploy to an ``Endpoint`` using existing model data stored in S3. Args: model_s3_location (str): S3 URI of the model artifacts to use for the endpoint. deployment_image (str): The Docker image which defines the runtime code to be used as the entry point for accepting prediction requests. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, e.g. 'ml.c4.xlarge'. name (str): Name of the ``Endpoint`` to create. If not specified, uses a name generated by combining the image name with a timestamp. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). model_vpc_config (dict[str, list[str]]): The VpcConfig set on the model (default: None) * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created. """ model_environment_vars = model_environment_vars or {} name = name or name_from_image(deployment_image) model_vpc_config = vpc_utils.sanitize(model_vpc_config) if _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=name)): raise ValueError('Endpoint with name "{}" already exists; please pick a different name.'.format(name)) if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_model(ModelName=name)): primary_container = container_def(image=deployment_image, model_data_url=model_s3_location, env=model_environment_vars) self.create_model(name=name, role=role, container_defs=primary_container, vpc_config=model_vpc_config) if not _deployment_entity_exists( lambda: self.sagemaker_client.describe_endpoint_config(EndpointConfigName=name)): self.create_endpoint_config(name=name, model_name=name, initial_instance_count=initial_instance_count, instance_type=instance_type, accelerator_type=accelerator_type) self.create_endpoint(endpoint_name=name, config_name=name, wait=wait) return name
Create and deploy to an ``Endpoint`` using existing model data stored in S3. Args: model_s3_location (str): S3 URI of the model artifacts to use for the endpoint. deployment_image (str): The Docker image which defines the runtime code to be used as the entry point for accepting prediction requests. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, e.g. 'ml.c4.xlarge'. name (str): Name of the ``Endpoint`` to create. If not specified, uses a name generated by combining the image name with a timestamp. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). model_vpc_config (dict[str, list[str]]): The VpcConfig set on the model (default: None) * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created.
def check_slice_perms(self, slice_id): """ Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method.
def expand_config(d, dirs): """ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. """ context = { 'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context)
Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside.
def next_token(self, tok, include_extra=False): """ Returns the next token after the given one. If include_extra is True, includes non-coding tokens from the tokenize module, such as NL and COMMENT. """ i = tok.index + 1 if not include_extra: while is_non_coding_token(self._tokens[i].type): i += 1 return self._tokens[i]
Returns the next token after the given one. If include_extra is True, includes non-coding tokens from the tokenize module, such as NL and COMMENT.
def write_int8(self, value, little_endian=True): """ Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sb' % endian, value)
Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
def rewrite_elife_authors_json(json_content, doi): """ this does the work of rewriting elife authors json """ # Convert doi from testing doi if applicable article_doi = elifetools.utils.convert_testing_doi(doi) # Edge case fix an affiliation name if article_doi == "10.7554/eLife.06956": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "0000-0001-6798-0064": json_content[i]["affiliations"][0]["name"] = ["Cambridge"] # Edge case fix an ORCID if article_doi == "10.7554/eLife.09376": for i, ref in enumerate(json_content): if ref.get("orcid") and ref.get("orcid") == "000-0001-7224-925X": json_content[i]["orcid"] = "0000-0001-7224-925X" # Edge case competing interests if article_doi == "10.7554/eLife.00102": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Chen,"): json_content[i]["competingInterests"] = "ZJC: Reviewing Editor, <i>eLife</i>" elif ref["name"]["index"].startswith("Li,"): json_content[i]["competingInterests"] = "The remaining authors have no competing interests to declare." if article_doi == "10.7554/eLife.00270": for i, ref in enumerate(json_content): if not ref.get("competingInterests"): if ref["name"]["index"].startswith("Patterson,"): json_content[i]["competingInterests"] = "MP: Managing Executive Editor, <i>eLife</i>" # Remainder of competing interests rewrites elife_author_competing_interests = {} elife_author_competing_interests["10.7554/eLife.00133"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00190"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00230"] = "The authors have declared that no competing interests exist" elife_author_competing_interests["10.7554/eLife.00288"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00352"] = "The author declares that no competing interest exist" elife_author_competing_interests["10.7554/eLife.00362"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00475"] = "The remaining authors have no competing interests to declare." elife_author_competing_interests["10.7554/eLife.00592"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.00633"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02725"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.02935"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04126"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.04878"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.05322"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.06416"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.07383"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08421"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08494"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08648"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.08924"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09083"] = "The other authors declare that no competing interests exists." elife_author_competing_interests["10.7554/eLife.09102"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09460"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09591"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.09600"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10113"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10230"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10453"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.10635"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11407"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11473"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.11750"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12217"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12620"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.12724"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.13732"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14116"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14258"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.14694"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15085"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.15312"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16011"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.16940"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17023"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17092"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17218"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17267"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17523"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17556"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17769"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.17834"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18101"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18515"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18544"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.18648"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19071"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19334"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.19510"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20183"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20242"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20375"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.20797"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21454"] = "The authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.21491"] = "The other authors declare that no competing interests exist." elife_author_competing_interests["10.7554/eLife.22187"] = "The authors declare that no competing interests exist." if article_doi in elife_author_competing_interests: for i, ref in enumerate(json_content): if not ref.get("competingInterests"): json_content[i]["competingInterests"] = elife_author_competing_interests[article_doi] # Rewrite "other authors declare" ... competing interests statements using a string match for i, ref in enumerate(json_content): if (ref.get("competingInterests") and ( ref.get("competingInterests").startswith("The other author") or ref.get("competingInterests").startswith("The others author") or ref.get("competingInterests").startswith("The remaining authors") or ref.get("competingInterests").startswith("The remaining have declared") )): json_content[i]["competingInterests"] = "No competing interests declared." return json_content
this does the work of rewriting elife authors json
def remove_server_data(server_id): """ Remove a server from the server data Args: server_id (int): The server to remove from the server data """ logger.debug("Removing server from serverdata") # Remove the server from data data = datatools.get_data() if server_id in data["discord"]["servers"]: data["discord"]["servers"].pop(server_id) datatools.write_data(data)
Remove a server from the server data Args: server_id (int): The server to remove from the server data
def force_invalidate(self, vts): """Force invalidation of a VersionedTargetSet.""" for vt in vts.versioned_targets: self._invalidator.force_invalidate(vt.cache_key) vt.valid = False self._invalidator.force_invalidate(vts.cache_key) vts.valid = False
Force invalidation of a VersionedTargetSet.
def cmd_link(self, args): '''handle link commands''' if len(args) < 1: self.show_link() elif args[0] == "list": self.cmd_link_list() elif args[0] == "add": if len(args) != 2: print("Usage: link add LINK") return self.cmd_link_add(args[1:]) elif args[0] == "ports": self.cmd_link_ports() elif args[0] == "remove": if len(args) != 2: print("Usage: link remove LINK") return self.cmd_link_remove(args[1:]) else: print("usage: link <list|add|remove>")
handle link commands
def name_variants(self): """A list of namedtuples representing variants of the affiliation name with number of documents referring to this variant. """ out = [] variant = namedtuple('Variant', 'name doc_count') for var in chained_get(self._json, ['name-variants', 'name-variant'], []): new = variant(name=var['$'], doc_count=var.get('@doc-count')) out.append(new) return out
A list of namedtuples representing variants of the affiliation name with number of documents referring to this variant.
def assign_taxonomy(dataPath, reference_sequences_fp, id_to_taxonomy_fp, read_1_seqs_fp, read_2_seqs_fp, single_ok=False, no_single_ok_generic=False, header_id_regex=None, read_id_regex = "\S+\s+(\S+)", amplicon_id_regex = "(\S+)\s+(\S+?)\/", output_fp=None, log_path=None, HALT_EXEC=False, base_tmp_dir = '/tmp'): """Assign taxonomy to each sequence in data with the RTAX classifier # data: open fasta file object or list of fasta lines dataPath: path to a fasta file output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)} """ usearch_command = "usearch" if not (exists(usearch_command) or app_path(usearch_command)): raise ApplicationNotFoundError,\ "Cannot find %s. Is it installed? Is it in your path?"\ % usearch_command my_tmp_dir = get_tmp_filename(tmp_dir=base_tmp_dir,prefix='rtax_',suffix='',result_constructor=str) os.makedirs(my_tmp_dir) try: # RTAX classifier doesn't necessarily preserve identifiers # it reports back only the id extracted as $1 using header_id_regex # since rtax takes the original unclustered sequence files as input, # the usual case is that the regex extracts the amplicon ID from the second field # Use lookup table read_1_id_to_orig_id = {} readIdExtractor = re.compile(read_id_regex) # OTU clustering produces ">clusterID read_1_id" data = open(dataPath,'r') for seq_id, seq in parse_fasta(data): # apply the regex extract = readIdExtractor.match(seq_id) if extract is None: stderr.write("Matched no ID with read_id_regex " + read_id_regex +" in '" + seq_id + "' from file " + dataPath + "\n") else: read_1_id_to_orig_id[extract.group(1)] = seq_id #stderr.write(extract.group(1) + " => " + seq_id + "\n") #seq_id_lookup[seq_id.split()[1]] = seq_id data.close() # make list of amplicon IDs to pass to RTAX id_list_fp = open(my_tmp_dir+"/ampliconIdsToClassify", "w") # Establish mapping of amplicon IDs to read_1 IDs # simultaneously write the amplicon ID file for those IDs found in the input mapping above amplicon_to_read_1_id = {} ampliconIdExtractor = re.compile(amplicon_id_regex) # split_libraries produces >read_1_id ampliconID/1 ... // see also assign_taxonomy 631 read_1_data = open(read_1_seqs_fp,'r') for seq_id, seq in parse_fasta(read_1_data): # apply the regex extract = ampliconIdExtractor.match(seq_id) if extract is None: stderr.write("Matched no ID with amplicon_id_regex " + amplicon_id_regex + " in '" + seq_id + "' from file " + read_1_seqs_fp + "\n") else: read_1_id = extract.group(1) amplicon_id = extract.group(2) try: amplicon_to_read_1_id[amplicon_id] = read_1_id bogus = read_1_id_to_orig_id[read_1_id] # verify that the id is valid id_list_fp.write('%s\n' % (amplicon_id)) except KeyError: pass data.close() id_list_fp.close() app = Rtax(HALT_EXEC=HALT_EXEC) temp_output_file = tempfile.NamedTemporaryFile( prefix='RtaxAssignments_', suffix='.txt') app.Parameters['-o'].on(temp_output_file.name) app.Parameters['-r'].on(reference_sequences_fp) app.Parameters['-t'].on(id_to_taxonomy_fp) # app.Parameters['-d'].on(delimiter) app.Parameters['-l'].on(id_list_fp.name) # these are amplicon IDs app.Parameters['-a'].on(read_1_seqs_fp) if read_2_seqs_fp is not None: app.Parameters['-b'].on(read_2_seqs_fp) app.Parameters['-i'].on(header_id_regex) app.Parameters['-m'].on(my_tmp_dir) if single_ok: app.Parameters['-f'].on(); if no_single_ok_generic: app.Parameters['-g'].on(); #app.Parameters['-v'].on() app_result = app() if log_path: f=open(log_path, 'a') errString=''.join(app_result['StdErr'].readlines()) + '\n' f.write(errString) f.close() assignments = {} # restore original sequence IDs with spaces for line in app_result['Assignments']: toks = line.strip().split('\t') rtax_id = toks.pop(0) if len(toks): bestpcid = toks.pop(0) # ignored lineage = toks # RTAX does not provide a measure of confidence. We could pass one in, # based on the choice of primers, or even look it up on the fly in the tables # from the "optimal primers" paper; but it would be the same for every # query sequence anyway. # we could also return bestpcid, but that's not the same thing as confidence. confidence = 1.0 read_1_id = amplicon_to_read_1_id[rtax_id] orig_id = read_1_id_to_orig_id[read_1_id] if lineage: assignments[orig_id] = (';'.join(lineage), confidence) else: assignments[orig_id] = ('Unclassified', 1.0) if output_fp: try: output_file = open(output_fp, 'w') except OSError: raise OSError("Can't open output file for writing: %s" % output_fp) for seq_id, assignment in assignments.items(): lineage, confidence = assignment output_file.write( '%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments finally: try: rmtree(my_tmp_dir) except OSError: pass
Assign taxonomy to each sequence in data with the RTAX classifier # data: open fasta file object or list of fasta lines dataPath: path to a fasta file output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
def wait_time(departure, now=None): """ Calculate waiting time until the next departure time in 'HH:MM' format. Return time-delta (as 'MM:SS') from now until next departure time in the future ('HH:MM') given as (year, month, day, hour, minute, seconds). Time-deltas shorter than 60 seconds are reduced to 0. """ now = now or datetime.datetime.now() yn, mn, dn = now.year, now.month, now.day hour, minute = map(int, departure.split(':')) dt = datetime.datetime(yn, mn, dn, hour=hour, minute=minute) delta = (dt - now).seconds if (dt - now).days < 0: delta = 0 if delta < 3600: return '%02d:%02d' % (delta // 60, delta % 60) else: delta_hh = delta // 3600 delta_rest = delta - delta_hh * 3600 return '%02d:%02d:%02d' % (delta_hh, delta_rest // 60, delta_rest % 60)
Calculate waiting time until the next departure time in 'HH:MM' format. Return time-delta (as 'MM:SS') from now until next departure time in the future ('HH:MM') given as (year, month, day, hour, minute, seconds). Time-deltas shorter than 60 seconds are reduced to 0.
def _write_cvvr(self, f, data): ''' Write compressed "data" variable to the end of the file in a CVVR ''' f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack('>q', block_size) cvvr1[8:12] = struct.pack('>i', section_type) cvvr1[12:16] = struct.pack('>i', rfuA) cvvr1[16:24] = struct.pack('>q', cSize) f.write(cvvr1) f.write(data) return byte_loc
Write compressed "data" variable to the end of the file in a CVVR
def gdf_from_places(queries, gdf_name='unnamed', buffer_dist=None): """ Create a GeoDataFrame from a list of place names to query. Parameters ---------- queries : list list of query strings or structured query dicts to geocode/download, one at a time gdf_name : string name attribute metadata for GeoDataFrame (this is used to save shapefile later) buffer_dist : float distance to buffer around the place geometry, in meters Returns ------- GeoDataFrame """ # create an empty GeoDataFrame then append each result as a new row gdf = gpd.GeoDataFrame() for query in queries: gdf = gdf.append(gdf_from_place(query, buffer_dist=buffer_dist)) # reset the index, name the GeoDataFrame gdf = gdf.reset_index().drop(labels='index', axis=1) gdf.gdf_name = gdf_name # set the original CRS of the GeoDataFrame to default_crs, and return it gdf.crs = settings.default_crs log('Finished creating GeoDataFrame with {} rows from {} queries'.format(len(gdf), len(queries))) return gdf
Create a GeoDataFrame from a list of place names to query. Parameters ---------- queries : list list of query strings or structured query dicts to geocode/download, one at a time gdf_name : string name attribute metadata for GeoDataFrame (this is used to save shapefile later) buffer_dist : float distance to buffer around the place geometry, in meters Returns ------- GeoDataFrame