code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def putcellslice(self, rownr, value, blc, trc, inc=[]): """Put into a slice of a table cell holding an array. (see :func:`table.putcellslice`)""" return self._table.putcellslice(self._column, rownr, value, blc, trc, inc)
Put into a slice of a table cell holding an array. (see :func:`table.putcellslice`)
def is_external_url(self, url, site_url): """ Check if the URL is an external URL. """ url_splitted = urlsplit(url) if not url_splitted.netloc: return False return url_splitted.netloc != urlsplit(site_url).netloc
Check if the URL is an external URL.
def insert(self, iterable, index=0, data=None, weight=1.0): """Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added. """ if index == len(iterable): self.is_terminal = True self.key = iterable self.weight = weight if data: self.data.add(data) else: if iterable[index] not in self.children: self.children[iterable[index]] = TrieNode() self.children[iterable[index]].insert(iterable, index + 1, data)
Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added.
def wait( self, timeout: Union[int, float] = None, safe: bool = False ) -> List[Union[Any, Exception]]: """ Call :py:meth:`~Process.wait()` on all the Processes in this list. :param timeout: Same as :py:meth:`~Process.wait()`. This parameter controls the timeout for all the Processes combined, not a single :py:meth:`~Process.wait()` call. :param safe: Suppress any errors that occur while waiting for a Process. The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred. :return: A ``list`` containing the values returned by child Processes of this Context. """ if safe: _wait = self._wait_or_catch_exc else: _wait = Process.wait if timeout is None: return [_wait(process) for process in self] else: final = time.time() + timeout return [_wait(process, final - time.time()) for process in self]
Call :py:meth:`~Process.wait()` on all the Processes in this list. :param timeout: Same as :py:meth:`~Process.wait()`. This parameter controls the timeout for all the Processes combined, not a single :py:meth:`~Process.wait()` call. :param safe: Suppress any errors that occur while waiting for a Process. The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred. :return: A ``list`` containing the values returned by child Processes of this Context.
def generate_sources_zip(milestone_id=None, output=None): """ Generate a sources archive for given milestone id. """ if not is_input_valid(milestone_id, output): logging.error("invalid input") return 1 create_work_dir(output) download_sources_artifacts(milestone_id, output) create_zip(output)
Generate a sources archive for given milestone id.
def get_departures(self, stop_id, route, destination, api_key): """Get the latest data from Transport NSW.""" self.stop_id = stop_id self.route = route self.destination = destination self.api_key = api_key # Build the URL including the STOP_ID and the API key url = \ 'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' \ 'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' \ 'mode=direct&type_dm=stop&name_dm=' \ + self.stop_id \ + '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42' auth = 'apikey ' + self.api_key header = {'Accept': 'application/json', 'Authorization': auth} # Send query or return error try: response = requests.get(url, headers=header, timeout=10) except: logger.warning("Network or Timeout error") return self.info # If there is no valid request if response.status_code != 200: logger.warning("Error with the request sent; check api key") return self.info # Parse the result as a JSON object result = response.json() # If there is no stop events for the query try: result['stopEvents'] except KeyError: logger.warning("No stop events for this query") return self.info # Set variables maxresults = 1 monitor = [] if self.destination != '': for i in range(len(result['stopEvents'])): destination = result['stopEvents'][i]['transportation']['destination']['name'] if destination == self.destination: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break elif self.route != '': # Find the next stop events for a specific route for i in range(len(result['stopEvents'])): number = result['stopEvents'][i]['transportation']['number'] if number == self.route: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: # We found enough results, lets stop break else: # No route defined, find any route leaving next for i in range(0, maxresults): event = self.parseEvent(result, i) if event != None: monitor.append(event) if monitor: self.info = { ATTR_STOP_ID: self.stop_id, ATTR_ROUTE: monitor[0][0], ATTR_DUE_IN: monitor[0][1], ATTR_DELAY: monitor[0][2], ATTR_REALTIME: monitor[0][5], ATTR_DESTINATION: monitor[0][6], ATTR_MODE: monitor[0][7] } return self.info
Get the latest data from Transport NSW.
def freeze(self): """ Freeze all settings so they cannot be altered """ self.app.disable() self.clear.disable() self.nod.disable() self.led.disable() self.dummy.disable() self.readSpeed.disable() self.expose.disable() self.number.disable() self.wframe.disable(everything=True) self.nmult.disable() self.frozen = True
Freeze all settings so they cannot be altered
def appliance_device_snmp_v1_trap_destinations(self): """ Gets the ApplianceDeviceSNMPv1TrapDestinations API client. Returns: ApplianceDeviceSNMPv1TrapDestinations: """ if not self.__appliance_device_snmp_v1_trap_destinations: self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection) return self.__appliance_device_snmp_v1_trap_destinations
Gets the ApplianceDeviceSNMPv1TrapDestinations API client. Returns: ApplianceDeviceSNMPv1TrapDestinations:
def in_config(self, key): """Check to see if the given key (or an alias) is in the config file. """ # if the requested key is an alias, then return the proper key key = self._real_key(key) exists = self._config.get(key) return exists
Check to see if the given key (or an alias) is in the config file.
def _newConsole(cls, console): """Make a Console instance, from a console ctype""" self = cls.__new__(cls) _BaseConsole.__init__(self) self.console_c = console self.console = self self.width = _lib.TCOD_console_get_width(console) self.height = _lib.TCOD_console_get_height(console) return self
Make a Console instance, from a console ctype
def on_lstCanvasExpLayers_itemSelectionChanged(self): """Update layer description label .. note:: This is an automatic Qt slot executed when the category selection changes. """ self.parent.exposure_layer = self.selected_canvas_explayer() lblText = self.parent.get_layer_description_from_canvas( self.parent.exposure_layer, 'exposure') self.lblDescribeCanvasExpLayer.setText(lblText) self.parent.pbnNext.setEnabled(True)
Update layer description label .. note:: This is an automatic Qt slot executed when the category selection changes.
def golden_images(self): """ Gets the Golden Images API client. Returns: GoldenImages: """ if not self.__golden_images: self.__golden_images = GoldenImages(self.__connection) return self.__golden_images
Gets the Golden Images API client. Returns: GoldenImages:
def delete_servers(self, server_id): """ Requires: account ID, server ID Input should be server id Returns: list of failed deletions (if any) Endpoint: api.newrelic.com Errors: 403 Invalid API Key Method: Delete """ endpoint = "https://api.newrelic.com" uri = "{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml".format( endpoint=endpoint, account_id=self.account_id, server_id=server_id) response = self._make_delete_request(uri) failed_deletions = [] for server in response.findall('.//server'): if not 'deleted' in server.findall('.//result')[0].text: failed_deletions.append({'server_id': server.get('id')}) return failed_deletions
Requires: account ID, server ID Input should be server id Returns: list of failed deletions (if any) Endpoint: api.newrelic.com Errors: 403 Invalid API Key Method: Delete
def to_iter(obj): """Convert an object to a list if it is not already an iterable. Nones are returned unaltered. This is an awful function that proliferates an explosion of types, please do not use anymore. """ if isinstance(obj, type(None)): return None elif isinstance(obj, six.string_types): return [obj] else: # Nesting here since symmetry is broken in isinstance checks. # Strings are iterables in python 3, so the relative order of if statements is important. if isinstance(obj, collections.Iterable): return obj else: return [obj]
Convert an object to a list if it is not already an iterable. Nones are returned unaltered. This is an awful function that proliferates an explosion of types, please do not use anymore.
def get_global_cache_dir(appname='default', ensure=False): """ Returns (usually) writable directory for an application cache """ if appname is None or appname == 'default': appname = get_default_appname() global_cache_dir = util_cplat.get_app_resource_dir(appname, meta_util_constants.global_cache_dname) if ensure: util_path.ensuredir(global_cache_dir) return global_cache_dir
Returns (usually) writable directory for an application cache
def cubehelix_pal(start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False): """ Utility for creating continuous palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. Parameters ---------- start : float (0 <= start <= 3) The hue at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float (0 <= gamma) Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1) colors. hue : float (0 <= hue <= 1) Saturation of the colors. dark : float (0 <= dark <= 1) Intensity of the darkest color in the palette. light : float (0 <= light <= 1) Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. Examples -------- >>> palette = cubehelix_pal() >>> palette(5) ['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e'] """ cdict = mpl._cm.cubehelix(gamma, start, rot, hue) cubehelix_cmap = mpl.colors.LinearSegmentedColormap('cubehelix', cdict) def cubehelix_palette(n): values = np.linspace(light, dark, n) return [mcolors.rgb2hex(cubehelix_cmap(x)) for x in values] return cubehelix_palette
Utility for creating continuous palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. Parameters ---------- start : float (0 <= start <= 3) The hue at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float (0 <= gamma) Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1) colors. hue : float (0 <= hue <= 1) Saturation of the colors. dark : float (0 <= dark <= 1) Intensity of the darkest color in the palette. light : float (0 <= light <= 1) Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. Examples -------- >>> palette = cubehelix_pal() >>> palette(5) ['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e']
def evaluate(self, verbose=True, passes=None): """Summary Returns: TYPE: Description """ if self.is_pivot: index, pivot, columns = LazyOpResult( self.expr, self.weld_type, 0 ).evaluate(verbose=verbose, passes=passes) df_dict = {} for i, column_name in enumerate(columns): df_dict[column_name] = pivot[i] return DataFrameWeld(pd.DataFrame(df_dict, index=index)) else: df = pd.DataFrame(columns=[]) weldvec_type_list = [] for type in self.column_types: weldvec_type_list.append(WeldVec(type)) columns = LazyOpResult( grizzly_impl.unzip_columns( self.expr, self.column_types ), WeldStruct(weldvec_type_list), 0 ).evaluate(verbose=verbose, passes=passes) for i, column_name in enumerate(self.column_names): df[column_name] = columns[i] return DataFrameWeld(df)
Summary Returns: TYPE: Description
def list_folder_content(self, folder, name=None, entity_type=None, content_type=None, page_size=DEFAULT_PAGE_SIZE, page=None, ordering=None): '''List files and folders (not recursively) contained in the folder. This function does not retrieve all results, pages have to be manually retrieved by the caller. Args: folder (str): The UUID of the requested folder. name (str): Optional filter on entity name. entity_type (str): Optional filter on entity type. Admitted values: ['file', 'folder']. content_type (str): Optional filter on entity content type (only files are returned). page_size (int): Number of elements per page. page (int): Number of the page. ordering (str): Indicate on which fields to sort the result. Prepend '-' to invert order. Multiple values can be provided. Ordering is supported on: ['name', 'created_on', 'modified_on']. Example: 'ordering=name,created_on' Returns: A dictionary of the results:: { u'count': 1, u'next': None, u'previous': None, u'results': [{u'content_type': u'plain/text', u'created_by': u'303447', u'created_on': u'2017-03-13T10:17:01.688472Z', u'description': u'', u'entity_type': u'file', u'modified_by': u'303447', u'modified_on': u'2017-03-13T10:17:01.688632Z', u'name': u'file_1', u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509', u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}] } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' if not is_valid_uuid(folder): raise StorageArgumentException( 'Invalid UUID for folder: {0}'.format(folder)) params = self._prep_params(locals()) del params['folder'] # not a query parameter return self._authenticated_request \ .to_endpoint('folder/{}/children/'.format(folder)) \ .with_params(params) \ .return_body() \ .get()
List files and folders (not recursively) contained in the folder. This function does not retrieve all results, pages have to be manually retrieved by the caller. Args: folder (str): The UUID of the requested folder. name (str): Optional filter on entity name. entity_type (str): Optional filter on entity type. Admitted values: ['file', 'folder']. content_type (str): Optional filter on entity content type (only files are returned). page_size (int): Number of elements per page. page (int): Number of the page. ordering (str): Indicate on which fields to sort the result. Prepend '-' to invert order. Multiple values can be provided. Ordering is supported on: ['name', 'created_on', 'modified_on']. Example: 'ordering=name,created_on' Returns: A dictionary of the results:: { u'count': 1, u'next': None, u'previous': None, u'results': [{u'content_type': u'plain/text', u'created_by': u'303447', u'created_on': u'2017-03-13T10:17:01.688472Z', u'description': u'', u'entity_type': u'file', u'modified_by': u'303447', u'modified_on': u'2017-03-13T10:17:01.688632Z', u'name': u'file_1', u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509', u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}] } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
def get_data_and_shared_column_widths(self, data_kwargs, width_kwargs): """ :param data_kwargs: kwargs used for converting data to strings :param width_kwargs: kwargs used for determining column widths :return: tuple(list of list of strings, list of int) """ list_of_list, column_widths = self.get_data_and_column_widths( data_kwargs, width_kwargs) for table, shared_limit in self.shared_tables: _, widths = table.get_data_and_column_widths( data_kwargs, width_kwargs) for i, width in enumerate(widths[:len(column_widths)]): delta = width - column_widths[i] if delta > 0 and (not shared_limit or delta <= shared_limit): column_widths[i] = width return list_of_list, column_widths
:param data_kwargs: kwargs used for converting data to strings :param width_kwargs: kwargs used for determining column widths :return: tuple(list of list of strings, list of int)
def check(self, cfg, state, peek_blocks): """ Check if the specified address will be executed :param cfg: :param state: :param int peek_blocks: :return: :rtype: bool """ # Get the current CFGNode from the CFG node = self._get_cfg_node(cfg, state) if node is None: # Umm it doesn't exist on the control flow graph - why? l.error('Failed to find CFGNode for state %s on the control flow graph.', state) return False # crawl the graph to see if we can reach the target address next for src, dst in self._dfs_edges(cfg.graph, node, max_steps=peek_blocks): if src.addr == self.addr or dst.addr == self.addr: l.debug("State %s will reach %#x.", state, self.addr) return True l.debug('SimState %s will not reach %#x.', state, self.addr) return False
Check if the specified address will be executed :param cfg: :param state: :param int peek_blocks: :return: :rtype: bool
def close(self): """ Closes the connection to the database for this connection. :return <bool> closed """ for pool in self.__pool.values(): while not pool.empty(): conn = pool.get_nowait() try: self._close(conn) except Exception: pass # reset the pool size after closing all connections self.__poolSize.clear()
Closes the connection to the database for this connection. :return <bool> closed
def trisolve(dl, d, du, b, inplace=False): """ The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html """ if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1) or d.shape[0] != b.shape[0]): raise ValueError('Invalid diagonal shapes') bshape_in = b.shape rtype = np.result_type(dl, d, du, b) if not inplace: # force a copy dl = np.array(dl, dtype=rtype, copy=True, order='F') d = np.array(d, dtype=rtype, copy=True, order='F') du = np.array(du, dtype=rtype, copy=True, order='F') b = np.array(b, dtype=rtype, copy=True, order='F') # this may also force copies if arrays have inconsistent types / incorrect # order dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order='F') for v in (dl, d, du, b)) # use the LAPACK implementation _lapack_trisolve(dl, d, du, b, rtype) return b.reshape(bshape_in)
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
def estimate_bitstring_probs(results): """ Given an array of single shot results estimate the probability distribution over all bitstrings. :param np.array results: A 2d array where the outer axis iterates over shots and the inner axis over bits. :return: An array with as many axes as there are qubit and normalized such that it sums to one. ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array """ nshots, nq = np.shape(results) outcomes = np.array([int("".join(map(str, r)), 2) for r in results]) probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots) return _bitstring_probs_by_qubit(probs)
Given an array of single shot results estimate the probability distribution over all bitstrings. :param np.array results: A 2d array where the outer axis iterates over shots and the inner axis over bits. :return: An array with as many axes as there are qubit and normalized such that it sums to one. ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array
def apply_mask(img, mask): """Return the image with the given `mask` applied.""" from .mask import apply_mask vol, _ = apply_mask(img, mask) return vector_to_volume(vol, read_img(mask).get_data().astype(bool))
Return the image with the given `mask` applied.
def _set_rspan_access(self, v, load=False): """ Setter method for rspan_access, mapped from YANG variable /interface/fortygigabitethernet/switchport/access/rspan_access (container) If this variable is read-only (config: false) in the source YANG file, then _set_rspan_access is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rspan_access() directly. YANG Description: The access layer characteristics of this interface. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=rspan_access.rspan_access, is_container='container', presence=False, yang_name="rspan-access", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-drop-node-name': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rspan_access must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=rspan_access.rspan_access, is_container='container', presence=False, yang_name="rspan-access", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the Layer2 interface as Access', u'cli-drop-node-name': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__rspan_access = t if hasattr(self, '_set'): self._set()
Setter method for rspan_access, mapped from YANG variable /interface/fortygigabitethernet/switchport/access/rspan_access (container) If this variable is read-only (config: false) in the source YANG file, then _set_rspan_access is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rspan_access() directly. YANG Description: The access layer characteristics of this interface.
def add_directory(self, *args, **kwargs): """ Add directory or directories list to bundle :param exclusions: List of excluded paths :type path: str|unicode :type exclusions: list """ exc = kwargs.get('exclusions', None) for path in args: self.files.append(DirectoryPath(path, self, exclusions=exc))
Add directory or directories list to bundle :param exclusions: List of excluded paths :type path: str|unicode :type exclusions: list
def add_region_location(self, region, locations=None, use_live=True): # type: (str, Optional[List[str]], bool) -> bool """Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present. """ return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present.
def setup_actions(self): """ Connects slots to signals """ self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect( self.on_current_tab_changed) self.actionAbout.triggered.connect(self.on_about)
Connects slots to signals
def normalizeToTag(val): """Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_ """ try: val = val.upper() except AttributeError: raise KeyError("{} is not a tag or name string".format(val)) if val not in tagsAndNameSetUpper: raise KeyError("{} is not a tag or name string".format(val)) else: try: return fullToTagDictUpper[val] except KeyError: return val
Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100): """Bar plot of the autocorrelation function for a trace""" try: # MultiTrace traces = trace.traces except AttributeError: # NpTrace traces = [trace] if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4} if vars is None: vars = traces[0].varnames # Extract sample data samples = [{v:trace[v] for v in vars} for trace in traces] chains = len(traces) n = len(samples[0]) f, ax = subplots(n, chains, squeeze=False) max_lag = min(len(samples[0][vars[0]])-1, max_lag) for i, v in enumerate(vars): for j in xrange(chains): d = np.squeeze(samples[j][v]) ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag) if not j: ax[i, j].set_ylabel("correlation") ax[i, j].set_xlabel("lag") if chains > 1: ax[i, j].set_title("chain {0}".format(j+1)) # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1])
Bar plot of the autocorrelation function for a trace
def fromurl(url): """ Parse patch from an URL, return False if an error occured. Note that this also can throw urlopen() exceptions. """ ps = PatchSet( urllib_request.urlopen(url) ) if ps.errors == 0: return ps return False
Parse patch from an URL, return False if an error occured. Note that this also can throw urlopen() exceptions.
def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'): """ Binarizes a network, returning the network. General wrapper function for different binarization functions. Parameters ---------- netin : array or dict Network (graphlet or contact representation), threshold_type : str What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'. threshold_level : str Paramter dependent on threshold type. If 'rdp', it is the delta (i.e. error allowed in compression). If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal). If 'magnitude', it is the amplitude of signal to keep. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet. Returns ------- netout : array or dict (depending on input) Binarized network """ if threshold_type == 'percent': netout = binarize_percent(netin, threshold_level, sign, axis) elif threshold_type == 'magnitude': netout = binarize_magnitude(netin, threshold_level, sign) elif threshold_type == 'rdp': netout = binarize_rdp(netin, threshold_level, sign, axis) else: raise ValueError('Unknown value to parameter: threshold_type.') return netout
Binarizes a network, returning the network. General wrapper function for different binarization functions. Parameters ---------- netin : array or dict Network (graphlet or contact representation), threshold_type : str What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'. threshold_level : str Paramter dependent on threshold type. If 'rdp', it is the delta (i.e. error allowed in compression). If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal). If 'magnitude', it is the amplitude of signal to keep. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet. Returns ------- netout : array or dict (depending on input) Binarized network
def count_rows(self, table_name): """Return the number of entries in a table by counting them.""" self.table_must_exist(table_name) query = "SELECT COUNT (*) FROM `%s`" % table_name.lower() self.own_cursor.execute(query) return int(self.own_cursor.fetchone()[0])
Return the number of entries in a table by counting them.
def _create_window_info(self, window, wm_title: str, wm_class: str): """ Creates a WindowInfo object from the window title and WM_CLASS. Also checks for the Java XFocusProxyWindow workaround and applies it if needed: Workaround for Java applications: Java AWT uses a XFocusProxyWindow class, so to get usable information, the parent window needs to be queried. Credits: https://github.com/mooz/xkeysnail/pull/32 https://github.com/JetBrains/jdk8u_jdk/blob/master/src/solaris/classes/sun/awt/X11/XFocusProxyWindow.java#L35 """ if "FocusProxy" in wm_class: parent = window.query_tree().parent # Discard both the already known wm_class and window title, because both are known to be wrong. return self._get_window_info(parent, False) else: return WindowInfo(wm_title=wm_title, wm_class=wm_class)
Creates a WindowInfo object from the window title and WM_CLASS. Also checks for the Java XFocusProxyWindow workaround and applies it if needed: Workaround for Java applications: Java AWT uses a XFocusProxyWindow class, so to get usable information, the parent window needs to be queried. Credits: https://github.com/mooz/xkeysnail/pull/32 https://github.com/JetBrains/jdk8u_jdk/blob/master/src/solaris/classes/sun/awt/X11/XFocusProxyWindow.java#L35
def retrieve_page(self, method, path, post_params={}, headers={}, status=200, username=None, password=None, *args, **kwargs): """ Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL. """ # Copy headers so that making changes here won't affect the original headers = headers.copy() # Update basic auth information basicauth = self._prepare_basicauth(username, password) if basicauth: headers.update([basicauth]) # If this is a POST or PUT, we can put the data into the body as # form-data encoded; otherwise, it should be part of the query string. if method in ["PUT", "POST"]: datagen, form_hdrs = poster.encode.multipart_encode(post_params) body = "".join(datagen) headers.update(form_hdrs) uri = self._prepare_uri(path) else: body = "" uri = self._prepare_uri(path, post_params) # Make the actual request response = self._make_request(uri, method, body, headers) # Assert that the status we received was expected. if status: real_status = int(response.status_int) assert real_status == int(status), \ "expected %s, received %s." % (status, real_status) return response
Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL.
def _run(self): '''Continually poll TWS''' stop = self._stop_evt connected = self._connected_evt tws = self._tws fd = tws.fd() pollfd = [fd] while not stop.is_set(): while (not connected.is_set() or not tws.isConnected()) and not stop.is_set(): connected.clear() backoff = 0 retries = 0 while not connected.is_set() and not stop.is_set(): if tws.reconnect_auto and not tws.reconnect(): if backoff < self.MAX_BACKOFF: retries += 1 backoff = min(2**(retries + 1), self.MAX_BACKOFF) connected.wait(backoff / 1000.) else: connected.wait(1) fd = tws.fd() pollfd = [fd] if fd > 0: try: evtin, _evtout, evterr = select.select(pollfd, [], pollfd, 1) except select.error: connected.clear() continue else: if fd in evtin: try: if not tws.checkMessages(): tws.eDisconnect(stop_polling=False) continue except (SystemExit, SystemError, KeyboardInterrupt): break except: try: self._wrapper.pyError(*sys.exc_info()) except: print_exc() elif fd in evterr: connected.clear() continue
Continually poll TWS
def slice_shift(self, periods=1, axis=0): """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment.
def sync(self, videoQuality, client=None, clientId=None, limit=None, unwatched=False, title=None): """ Add current video (movie, tv-show, season or episode) as sync item for specified device. See :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. limit (int): maximum count of items to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be generated from metadata of current media. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem. """ from plexapi.sync import SyncItem, Policy, MediaSettings myplex = self._server.myPlexAccount() sync_item = SyncItem(self._server, None) sync_item.title = title if title else self._defaultSyncTitle() sync_item.rootTitle = self.title sync_item.contentType = self.listType sync_item.metadataType = self.METADATA_TYPE sync_item.machineIdentifier = self._server.machineIdentifier section = self._server.library.sectionByID(self.librarySectionID) sync_item.location = 'library://%s/item/%s' % (section.uuid, quote_plus(self.key)) sync_item.policy = Policy.create(limit, unwatched) sync_item.mediaSettings = MediaSettings.createVideo(videoQuality) return myplex.sync(sync_item, client=client, clientId=clientId)
Add current video (movie, tv-show, season or episode) as sync item for specified device. See :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. limit (int): maximum count of items to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be generated from metadata of current media. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem.
def rdf_catalog(): '''Root RDF endpoint with content negociation handling''' format = RDF_EXTENSIONS[negociate_content()] url = url_for('site.rdf_catalog_format', format=format) return redirect(url)
Root RDF endpoint with content negociation handling
async def _formulate_body(self): ''' Takes user supplied data / files and forms it / them appropriately, returning the contents type, len, and the request body its self. Returns: The str mime type for the Content-Type header. The len of the body. The body as a str. ''' c_type, body = None, '' multipart_ctype = 'multipart/form-data; boundary={}'.format(_BOUNDARY) if self.data is not None: if self.files or self.json is not None: raise TypeError('data arg cannot be used in conjunction with' 'files or json arg.') c_type = 'application/x-www-form-urlencoded' try: body = self._dict_to_query(self.data, params=False) except AttributeError: body = self.data c_type = self.mimetype or 'text/plain' elif self.files is not None: if self.data or self.json is not None: raise TypeError('files arg cannot be used in conjunction with' 'data or json arg.') c_type = multipart_ctype body = await self._multipart(self.files) elif self.json is not None: if self.data or self.files: raise TypeError('json arg cannot be used in conjunction with' 'data or files arg.') c_type = 'application/json' body = _json.dumps(self.json) return c_type, str(len(body)), body
Takes user supplied data / files and forms it / them appropriately, returning the contents type, len, and the request body its self. Returns: The str mime type for the Content-Type header. The len of the body. The body as a str.
def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs): """An operator that filters out non unique items according to the specified field. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- other inputs, e.g. to feed terminals for rule values conf : {'field': {'type': 'text', 'value': <field to be unique>}} Returns ------- _OUTPUT : generator of unique items """ funcs = get_splits(None, conf, **cdicts(opts, kwargs)) pieces, _pass = funcs[0](), funcs[2]() _OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field) return _OUTPUT
An operator that filters out non unique items according to the specified field. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- other inputs, e.g. to feed terminals for rule values conf : {'field': {'type': 'text', 'value': <field to be unique>}} Returns ------- _OUTPUT : generator of unique items
def _set_intf_isis(self, v, load=False): """ Setter method for intf_isis, mapped from YANG variable /routing_system/interface/ve/intf_isis (container) If this variable is read-only (config: false) in the source YANG file, then _set_intf_isis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_intf_isis() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=intf_isis.intf_isis, is_container='container', presence=True, yang_name="intf-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisVeInterface', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """intf_isis must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=intf_isis.intf_isis, is_container='container', presence=True, yang_name="intf-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IsisVeInterface', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""", }) self.__intf_isis = t if hasattr(self, '_set'): self._set()
Setter method for intf_isis, mapped from YANG variable /routing_system/interface/ve/intf_isis (container) If this variable is read-only (config: false) in the source YANG file, then _set_intf_isis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_intf_isis() directly.
def create_post(self, title, body, board, category, username): '''create a Discourse post, given a title, body, board, and token. Parameters ========== title: the issue title body: the issue body board: the discourse board to post to ''' category_url = "%s/categories.json" % board response = requests.get(category_url) if response.status_code != 200: print('Error with retrieving %s' % category_url) sys.exit(1) # Get a list of all categories categories = response.json()['category_list']['categories'] categories = {c['name']:c['id'] for c in categories} # And if not valid, warn the user if category not in categories: bot.warning('%s is not valid, will use default' % category) category_id = categories.get(category, None) headers = {"Content-Type": "application/json", "User-Api-Client-Id": self.client_id, "User-Api-Key": self.token } # First get the category ids data = {'title': title, 'raw': body, 'category': category_id} response = requests.post("%s/posts.json" % board, headers=headers, data=json.dumps(data)) if response.status_code in [200, 201, 202]: topic = response.json() url = "%s/t/%s/%s" %(board, topic['topic_slug'], topic['topic_id']) bot.info(url) return url elif response.status_code == 404: bot.error('Cannot post to board, not found. Do you have permission?') sys.exit(1) else: bot.error('Cannot post to board %s' % board) bot.error(response.content) sys.exit(1)
create a Discourse post, given a title, body, board, and token. Parameters ========== title: the issue title body: the issue body board: the discourse board to post to
def head(self, lines=10): """\ Return the top lines of the file. """ self.seek(0) for i in range(lines): if not self.seek_line_forward(): break end_pos = self.file.tell() self.seek(0) data = self.file.read(end_pos - 1) if data: return self.splitlines(data) else: return []
\ Return the top lines of the file.
def serialize_to_normalized_compact_json(py_obj): """Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string. """ return json.dumps( py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes )
Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string.
def save_experiment(self, name, variants): """ Persist an experiment and its variants (unless they already exist). :param name a unique string name for the experiment :param variants a list of strings, each with a unique variant name """ try: model.Experiment( name=name, started_on=datetime.utcnow(), variants=[ model.Variant(name=v, order=i) for i, v in enumerate(variants) ] ) self.Session.commit() finally: self.Session.close()
Persist an experiment and its variants (unless they already exist). :param name a unique string name for the experiment :param variants a list of strings, each with a unique variant name
def clear_dcnm_in_part(self, tenant_id, fw_dict, is_fw_virt=False): """Clear the DCNM in partition service information. Clear the In partition service node IP address in DCNM and update the result. """ res = fw_const.DCNM_IN_PART_UPDDEL_SUCCESS tenant_name = fw_dict.get('tenant_name') ret = True try: self._update_partition_in_delete(tenant_name) except Exception as exc: LOG.error("Clear of In Partition failed for tenant %(tenant)s" " , Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) res = fw_const.DCNM_IN_PART_UPDDEL_FAIL ret = False self.update_fw_db_result(tenant_id, dcnm_status=res) LOG.info("In partition cleared off service ip addr") return ret
Clear the DCNM in partition service information. Clear the In partition service node IP address in DCNM and update the result.
def get_firmware(self): """Get the current firmware version.""" self.get_status() try: self.firmware = self.data['fw_version'] except TypeError: self.firmware = 'Unknown' return self.firmware
Get the current firmware version.
def getPotential(self, columnIndex, potential): """ :param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs. """ assert(columnIndex < self._numColumns) potential[:] = self._potentialPools[columnIndex]
:param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs.
def workspace_backup_restore(ctx, choose_first, bak): """ Restore backup BAK """ backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)) backup_manager.restore(bak, choose_first)
Restore backup BAK
def execute(self): """ execute Webhook :return: """ if bool(self.files) is False: response = requests.post(self.url, json=self.json, proxies=self.proxies) else: self.files['payload_json'] = (None, json.dumps(self.json)) response = requests.post(self.url, files=self.files, proxies=self.proxies) if response.status_code in [200, 204]: logger.debug("Webhook executed") else: logger.error('status code %s: %s' % (response.status_code, response.content.decode("utf-8")))
execute Webhook :return:
def get_correctness(self, question_id): """get measure of correctness for the question""" response = self.get_response(question_id) if response.is_answered(): item = self._get_item(response.get_item_id()) return item.get_correctness_for_response(response) raise errors.IllegalState()
get measure of correctness for the question
def section_term_lengths(neurites, neurite_type=NeuriteType.all): '''Termination section lengths in a collection of neurites''' return map_sections(_section_length, neurites, neurite_type=neurite_type, iterator_type=Tree.ileaf)
Termination section lengths in a collection of neurites
def validate_all_values_for_key_in_obj(obj, key, validation_fun): """Validate value for all (nested) occurrence of `key` in `obj` using `validation_fun`. Args: obj (dict): dictionary object. key (str): key whose value is to be validated. validation_fun (function): function used to validate the value of `key`. Raises: ValidationError: `validation_fun` will raise this error on failure """ for vkey, value in obj.items(): if vkey == key: validation_fun(value) elif isinstance(value, dict): validate_all_values_for_key_in_obj(value, key, validation_fun) elif isinstance(value, list): validate_all_values_for_key_in_list(value, key, validation_fun)
Validate value for all (nested) occurrence of `key` in `obj` using `validation_fun`. Args: obj (dict): dictionary object. key (str): key whose value is to be validated. validation_fun (function): function used to validate the value of `key`. Raises: ValidationError: `validation_fun` will raise this error on failure
def absdeg(deg): '''Change from signed degrees to 0-180 or 0-360 ranges deg: ndarray Movement data in pitch, roll, yaw (degrees) Returns ------- deg_abs: ndarray Movement translated from -180:180/-90:90 degrees to 0:360/0:180 degrees Example ------- deg = numpy.array([-170, -120, 0, 90]) absdeg(deg) # returns array([190, 240, 0, 90]) ''' import numpy d = numpy.copy(deg) if numpy.max(numpy.abs(deg)) > 90.0: d[deg < 0] = 360 + deg[deg < 0] else: d[deg < 0] = 180 + deg[deg < 0] return d
Change from signed degrees to 0-180 or 0-360 ranges deg: ndarray Movement data in pitch, roll, yaw (degrees) Returns ------- deg_abs: ndarray Movement translated from -180:180/-90:90 degrees to 0:360/0:180 degrees Example ------- deg = numpy.array([-170, -120, 0, 90]) absdeg(deg) # returns array([190, 240, 0, 90])
def match_classes(self, el, classes): """Match element's classes.""" current_classes = self.get_classes(el) found = True for c in classes: if c not in current_classes: found = False break return found
Match element's classes.
def _fit_newton(self, fitcache=None, ebin=None, **kwargs): """Fast fitting method using newton fitter.""" tol = kwargs.get('tol', self.config['optimizer']['tol']) max_iter = kwargs.get('max_iter', self.config['optimizer']['max_iter']) init_lambda = kwargs.get('init_lambda', self.config['optimizer']['init_lambda']) use_reduced = kwargs.get('use_reduced', True) free_params = self.get_params(True) free_norm_params = [p for p in free_params if p['is_norm'] is True] if len(free_params) != len(free_norm_params): msg = 'Executing Newton fitter with one ' + \ 'or more free shape parameters.' self.logger.error(msg) raise Exception(msg) verbosity = kwargs.get('verbosity', 0) if fitcache is None: fitcache = self._create_fitcache(**kwargs) fitcache.update(self.get_params(), tol, max_iter, init_lambda, use_reduced) logemin = self.loge_bounds[0] logemax = self.loge_bounds[1] imin = int(utils.val_to_edge(self.log_energies, logemin)[0]) imax = int(utils.val_to_edge(self.log_energies, logemax)[0]) if ebin is not None: fitcache.fitcache.setEnergyBin(ebin) elif imin == 0 and imax == self.enumbins: fitcache.fitcache.setEnergyBin(-1) else: fitcache.fitcache.setEnergyBins(imin, imax) num_free = len(free_norm_params) o = {'fit_status': 0, 'fit_quality': 3, 'fit_success': True, 'edm': 0, 'loglike': None, 'values': np.ones(num_free) * np.nan, 'errors': np.ones(num_free) * np.nan, 'indices': np.zeros(num_free, dtype=int), 'is_norm': np.empty(num_free, dtype=bool), 'src_names': num_free * [None], 'par_names': num_free * [None], } if num_free == 0: return o ref_vals = np.array(fitcache.fitcache.refValues()) free = np.array(fitcache.fitcache.currentFree()) norm_vals = ref_vals[free] norm_idxs = [] for i, p in enumerate(free_norm_params): norm_idxs += [p['idx']] o['indices'][i] = p['idx'] o['src_names'][i] = p['src_name'] o['par_names'][i] = p['par_name'] o['is_norm'][i] = p['is_norm'] o['fit_status'] = fitcache.fit(verbose=verbosity) o['edm'] = fitcache.fitcache.currentEDM() pars, errs, cov = fitcache.get_pars() pars *= norm_vals errs *= norm_vals cov = cov * np.outer(norm_vals, norm_vals) o['values'] = pars o['errors'] = errs o['covariance'] = cov errinv = np.zeros_like(o['errors']) m = o['errors'] > 0 errinv[m] = 1. / o['errors'][m] o['correlation'] = o['covariance'] * np.outer(errinv, errinv) if o['fit_status'] in [-2, 0]: for idx, val, err in zip(norm_idxs, pars, errs): self._set_value_bounded(idx, val) self.like[idx].setError(err) self.like.syncSrcParams() o['fit_success'] = True else: o['fit_success'] = False if o['fit_status']: self.logger.error('Error in NEWTON fit. Fit Status: %i', o['fit_status']) # FIXME: Figure out why currentLogLike gets out of sync #loglike = fitcache.fitcache.currentLogLike() #prior_vals, prior_errs, has_prior = gtutils.get_priors(self.like) #loglike -= np.sum(has_prior) * np.log(np.sqrt(2 * np.pi)) loglike = -self.like() o['loglike'] = loglike return o
Fast fitting method using newton fitter.
def update(self): """ Stops and starts the framework, if the framework is active. :raise BundleException: Something wrong occurred while stopping or starting the framework. """ with self._lock: if self._state == Bundle.ACTIVE: self.stop() self.start()
Stops and starts the framework, if the framework is active. :raise BundleException: Something wrong occurred while stopping or starting the framework.
def install_lib(url, replace_existing=False, fix_wprogram=True): """install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None """ d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) d, src_dlib = find_lib_dir(d) move_examples(d, src_dlib) fix_examples_dir(src_dlib) if fix_wprogram: fix_wprogram_in_files(src_dlib) targ_dlib = libraries_dir() / src_dlib.name if targ_dlib.exists(): log.debug('library already exists: %s', targ_dlib) if replace_existing: log.debug('remove %s', targ_dlib) targ_dlib.rmtree() else: raise ConfduinoError('library already exists:' + targ_dlib) log.debug('move %s -> %s', src_dlib, targ_dlib) src_dlib.move(targ_dlib) libraries_dir().copymode(targ_dlib) for x in targ_dlib.walk(): libraries_dir().copymode(x) return targ_dlib.name
install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None
def intervals_to_fragment_list(self, text_file, time_values): """ Transform a list of at least 4 time values (corresponding to at least 3 intervals) into a sync map fragment list and store it internally. The first interval is a HEAD, the last is a TAIL. For example: time_values=[0.000, 1.000, 2.000, 3.456] => [(0.000, 1.000), (1.000, 2.000), (2.000, 3.456)] :param text_file: the text file containing the text fragments associated :type text_file: :class:`~aeneas.textfile.TextFile` :param time_values: the time values :type time_values: list of :class:`~aeneas.exacttiming.TimeValue` :raises: TypeError: if ``text_file`` is not an instance of :class:`~aeneas.textfile.TextFile` or ``time_values`` is not a list :raises: ValueError: if ``time_values`` has length less than four """ if not isinstance(text_file, TextFile): self.log_exc(u"text_file is not an instance of TextFile", None, True, TypeError) if not isinstance(time_values, list): self.log_exc(u"time_values is not a list", None, True, TypeError) if len(time_values) < 4: self.log_exc(u"time_values has length < 4", None, True, ValueError) self.log(u"Converting time values to fragment list...") begin = time_values[0] end = time_values[-1] self.log([u" Creating SyncMapFragmentList with begin %.3f and end %.3f", begin, end]) self.smflist = SyncMapFragmentList( begin=begin, end=end, rconf=self.rconf, logger=self.logger ) self.log(u" Creating HEAD fragment") self.smflist.add(SyncMapFragment( # NOTE lines and filtered lines MUST be set, # otherwise some output format might break # when adding HEAD/TAIL to output text_fragment=TextFragment(identifier=u"HEAD", lines=[], filtered_lines=[]), begin=time_values[0], end=time_values[1], fragment_type=SyncMapFragment.HEAD ), sort=False) self.log(u" Creating REGULAR fragments") # NOTE text_file.fragments() returns a list, # so we cache a copy here instead of # calling it once per loop fragments = text_file.fragments for i in range(1, len(time_values) - 2): self.log([u" Adding fragment %d ...", i]) self.smflist.add(SyncMapFragment( text_fragment=fragments[i - 1], begin=time_values[i], end=time_values[i + 1], fragment_type=SyncMapFragment.REGULAR ), sort=False) self.log([u" Adding fragment %d ... done", i]) self.log(u" Creating TAIL fragment") self.smflist.add(SyncMapFragment( # NOTE lines and filtered lines MUST be set, # otherwise some output format might break # when adding HEAD/TAIL to output text_fragment=TextFragment(identifier=u"TAIL", lines=[], filtered_lines=[]), begin=time_values[len(time_values) - 2], end=end, fragment_type=SyncMapFragment.TAIL ), sort=False) self.log(u"Converting time values to fragment list... done") self.log(u"Sorting fragment list...") self.smflist.sort() self.log(u"Sorting fragment list... done") return self.smflist
Transform a list of at least 4 time values (corresponding to at least 3 intervals) into a sync map fragment list and store it internally. The first interval is a HEAD, the last is a TAIL. For example: time_values=[0.000, 1.000, 2.000, 3.456] => [(0.000, 1.000), (1.000, 2.000), (2.000, 3.456)] :param text_file: the text file containing the text fragments associated :type text_file: :class:`~aeneas.textfile.TextFile` :param time_values: the time values :type time_values: list of :class:`~aeneas.exacttiming.TimeValue` :raises: TypeError: if ``text_file`` is not an instance of :class:`~aeneas.textfile.TextFile` or ``time_values`` is not a list :raises: ValueError: if ``time_values`` has length less than four
def active(self, include=None): """ Return all active views. """ return self._get(self._build_url(self.endpoint.active(include=include)))
Return all active views.
def find_module(module, paths=None): """Just like 'imp.find_module()', but with package support""" parts = module.split('.') while parts: part = parts.pop(0) f, path, (suffix, mode, kind) = info = imp.find_module(part, paths) if kind == PKG_DIRECTORY: parts = parts or ['__init__'] paths = [path] elif parts: raise ImportError("Can't find %r in %s" % (parts, module)) return info
Just like 'imp.find_module()', but with package support
def db_create(name, character_set=None, collate=None, **connection_args): ''' Adds a databases to the MySQL server. name The name of the database to manage character_set The character set, if left empty the MySQL default will be used collate The collation, if left empty the MySQL default will be used CLI Example: .. code-block:: bash salt '*' mysql.db_create 'dbname' salt '*' mysql.db_create 'dbname' 'utf8' 'utf8_general_ci' ''' # check if db exists if db_exists(name, **connection_args): log.info('DB \'%s\' already exists', name) return False # db doesn't exist, proceed dbc = _connect(**connection_args) if dbc is None: return False cur = dbc.cursor() s_name = quote_identifier(name) # identifiers cannot be used as values qry = 'CREATE DATABASE IF NOT EXISTS {0}'.format(s_name) args = {} if character_set is not None: qry += ' CHARACTER SET %(character_set)s' args['character_set'] = character_set if collate is not None: qry += ' COLLATE %(collate)s' args['collate'] = collate qry += ';' try: if _execute(cur, qry, args): log.info('DB \'%s\' created', name) return True except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return False
Adds a databases to the MySQL server. name The name of the database to manage character_set The character set, if left empty the MySQL default will be used collate The collation, if left empty the MySQL default will be used CLI Example: .. code-block:: bash salt '*' mysql.db_create 'dbname' salt '*' mysql.db_create 'dbname' 'utf8' 'utf8_general_ci'
def getSets(self, **kwargs): ''' A way to get different sets from a query. All parameters are optional, but you should probably use some (so that you get results) :param str query: The thing you're searching for. :param str theme: The theme of the set. :param str subtheme: The subtheme of the set. :param str setNumber: The LEGO set number. :param str year: The year in which the set came out. :param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'. :param int pageSize: How many results are on a page. Defaults to 20. :param int pageNumber: The number of the page you're looking at. Defaults to 1. :param str userName: The name of a user whose sets you want to search. :returns: A list of :class:`brickfront.build.Build` objects. :rtype: list ''' # Generate a dictionary to send as parameters params = { 'apiKey': self.apiKey, 'userHash': self.userHash, 'query': kwargs.get('query', ''), 'theme': kwargs.get('theme', ''), 'subtheme': kwargs.get('subtheme', ''), 'setNumber': kwargs.get('setNumber', ''), 'year': kwargs.get('year', ''), 'owned': kwargs.get('owned', ''), 'wanted': kwargs.get('wanted', ''), 'orderBy': kwargs.get('orderBy', 'Number'), 'pageSize': kwargs.get('pageSize', '20'), 'pageNumber': kwargs.get('pageNumber', '1'), 'userName': kwargs.get('userName', '') } url = Client.ENDPOINT.format('getSets') returned = get(url, params=params) self.checkResponse(returned) # Construct the build objects and return them graciously root = ET.fromstring(returned.text) return [Build(i, self) for i in root]
A way to get different sets from a query. All parameters are optional, but you should probably use some (so that you get results) :param str query: The thing you're searching for. :param str theme: The theme of the set. :param str subtheme: The subtheme of the set. :param str setNumber: The LEGO set number. :param str year: The year in which the set came out. :param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'. :param int pageSize: How many results are on a page. Defaults to 20. :param int pageNumber: The number of the page you're looking at. Defaults to 1. :param str userName: The name of a user whose sets you want to search. :returns: A list of :class:`brickfront.build.Build` objects. :rtype: list
def convert_complexFaultSource(self, node): """ Convert the given node into a complex fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.ComplexFaultSource` instance """ geom = node.complexFaultGeometry edges = self.geo_lines(geom) mfd = self.convert_mfdist(node) msr = valid.SCALEREL[~node.magScaleRel]() with context(self.fname, node): cmplx = source.ComplexFaultSource( source_id=node['id'], name=node['name'], tectonic_region_type=node.attrib.get('tectonicRegion'), mfd=mfd, rupture_mesh_spacing=self.complex_fault_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, edges=edges, rake=~node.rake, temporal_occurrence_model=self.get_tom(node)) return cmplx
Convert the given node into a complex fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.ComplexFaultSource` instance
def get_workflow_info(func_list): """Return function info, go through lists recursively.""" funcs = [] for item in func_list: if item is None: continue if isinstance(item, list): funcs.append(get_workflow_info(item)) else: funcs.append(get_func_info(item)) return funcs
Return function info, go through lists recursively.
def instance_from_str(instance_str): """ Given an instance string in the form "app.Model:pk", returns a tuple of ``(model, instance)``. If the pk part is empty, ``instance`` will be ``None``. Raises ``ValueError`` on invalid model strings or missing instances. """ match = instance_str_re.match(instance_str) if not match: raise ValueError("Invalid instance string") model_string = match.group(1) try: model = apps.get_model(model_string) except (LookupError, ValueError): raise ValueError("Invalid instance string") pk = match.group(2) if pk: try: return model, model._default_manager.get(pk=pk) except model.DoesNotExist: raise ValueError("Invalid instance string") return model, None
Given an instance string in the form "app.Model:pk", returns a tuple of ``(model, instance)``. If the pk part is empty, ``instance`` will be ``None``. Raises ``ValueError`` on invalid model strings or missing instances.
def delete_subtree(self, nodes): # noqa: D302 r""" Delete nodes (and their sub-trees) from the tree. :param nodes: Node(s) to delete :type nodes: :ref:`NodeName` or list of :ref:`NodeName` :raises: * RuntimeError (Argument \`nodes\` is not valid) * RuntimeError (Node *[node_name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> tobj.delete_subtree(['root.branch1.leaf1', 'root.branch2']) >>> print(tobj) root └branch1 (*) └leaf2 (*) └subleaf2 """ if self._validate_node_name(nodes): raise RuntimeError("Argument `nodes` is not valid") self._delete_subtree(nodes)
r""" Delete nodes (and their sub-trees) from the tree. :param nodes: Node(s) to delete :type nodes: :ref:`NodeName` or list of :ref:`NodeName` :raises: * RuntimeError (Argument \`nodes\` is not valid) * RuntimeError (Node *[node_name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> tobj.delete_subtree(['root.branch1.leaf1', 'root.branch2']) >>> print(tobj) root └branch1 (*) └leaf2 (*) └subleaf2
def table_repr(columns, rows, data, padding=2): """Generate a table for cli output""" padding = ' ' * padding column_lengths = [len(column) for column in columns] for row in rows: for i, column in enumerate(columns): item = str(data[row][column]) column_lengths[i] = max(len(item), column_lengths[i]) max_row_length = max(len(row) for row in rows) if len(rows) else 0 table_row = ' ' * max_row_length for i, column in enumerate(columns): table_row += padding + column.rjust(column_lengths[i]) table_rows = [table_row] for row in rows: table_row = row.rjust(max_row_length) for i, column in enumerate(columns): item = str(data[row][column]) table_row += padding + item.rjust(column_lengths[i]) table_rows.append(table_row) return '\n'.join(table_rows)
Generate a table for cli output
def get_object(self, view_name, view_args, view_kwargs): """ Return the object corresponding to a matched URL. Takes the matched URL conf arguments, and should return an object instance, or raise an `ObjectDoesNotExist` exception. """ lookup_value = view_kwargs.get(self.lookup_url_kwarg) parent_lookup_value = view_kwargs.get(self.parent_lookup_field) lookup_kwargs = { self.lookup_field: lookup_value, } # Try to lookup parent attr if parent_lookup_value: lookup_kwargs.update({self.parent_lookup_field: parent_lookup_value}) return self.get_queryset().get(**lookup_kwargs)
Return the object corresponding to a matched URL. Takes the matched URL conf arguments, and should return an object instance, or raise an `ObjectDoesNotExist` exception.
def BVV(value, size=None, **kwargs): """ Creates a bit-vector value (i.e., a concrete value). :param value: The value. Either an integer or a string. If it's a string, it will be interpreted as the bytes of a big-endian constant. :param size: The size (in bits) of the bit-vector. Optional if you provide a string, required for an integer. :returns: A BV object representing this value. """ if type(value) in (bytes, str): if type(value) is str: l.warning("BVV value is a unicode string, encoding as utf-8") value = value.encode('utf-8') if size is None: size = len(value)*8 elif type(size) is not int: raise TypeError("Bitvector size must be either absent (implicit) or an integer") elif size != len(value)*8: raise ClaripyValueError('string/size mismatch for BVV creation') value = int(binascii.hexlify(value), 16) if value != b"" else 0 elif size is None or (type(value) is not int and value is not None): raise TypeError('BVV() takes either an integer value and a size or a string of bytes') # ensure the 0 <= value < (1 << size) # FIXME hack to handle None which is used for an Empty Strided Interval (ESI) if value is not None: value &= (1 << size) -1 if not kwargs: try: return _bvv_cache[(value, size)] except KeyError: pass result = BV('BVV', (value, size), length=size, **kwargs) _bvv_cache[(value, size)] = result return result
Creates a bit-vector value (i.e., a concrete value). :param value: The value. Either an integer or a string. If it's a string, it will be interpreted as the bytes of a big-endian constant. :param size: The size (in bits) of the bit-vector. Optional if you provide a string, required for an integer. :returns: A BV object representing this value.
def reset(self): '''Resets the stream pointer to the beginning of the file.''' if self.__row_number > self.__sample_size: self.__parser.reset() self.__extract_sample() self.__extract_headers() self.__row_number = 0
Resets the stream pointer to the beginning of the file.
def _pre_job_handling(self, job): # pylint:disable=arguments-differ """ Some pre job-processing tasks, like update progress bar. :param CFGJob job: The CFGJob instance. :return: None """ if self._low_priority: self._release_gil(len(self._nodes), 20, 0.0001) # a new entry is picked. Deregister it self._deregister_analysis_job(job.func_addr, job) if not self._inside_regions(job.addr): obj = self.project.loader.find_object_containing(job.addr) if obj is not None and isinstance(obj, self._cle_pseudo_objects): pass else: # it's outside permitted regions. skip. raise AngrSkipJobNotice() # Do not calculate progress if the user doesn't care about the progress at all if self._show_progressbar or self._progress_callback: max_percentage_stage_1 = 50.0 percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._regions_size if percentage > max_percentage_stage_1: percentage = max_percentage_stage_1 self._update_progress(percentage, cfg=self)
Some pre job-processing tasks, like update progress bar. :param CFGJob job: The CFGJob instance. :return: None
def __get_doc_block_parts_wrapper(self): """ Generates the DocBlock parts to be used by the wrapper generator. """ self.__get_doc_block_parts_source() helper = self._get_data_type_helper() parameters = list() for parameter_info in self._parameters: parameters.append( {'parameter_name': parameter_info['name'], 'python_type': helper.column_type_to_python_type(parameter_info), 'data_type_descriptor': parameter_info['data_type_descriptor'], 'description': self.__get_parameter_doc_description(parameter_info['name'])}) self._doc_block_parts_wrapper['description'] = self._doc_block_parts_source['description'] self._doc_block_parts_wrapper['parameters'] = parameters
Generates the DocBlock parts to be used by the wrapper generator.
def deprecated(function): # pylint: disable=invalid-name """Decorator to mark functions or methods as deprecated.""" def IssueDeprecationWarning(*args, **kwargs): """Issue a deprecation warning.""" warnings.simplefilter('default', DeprecationWarning) warnings.warn('Call to deprecated function: {0:s}.'.format( function.__name__), category=DeprecationWarning, stacklevel=2) return function(*args, **kwargs) IssueDeprecationWarning.__name__ = function.__name__ IssueDeprecationWarning.__doc__ = function.__doc__ IssueDeprecationWarning.__dict__.update(function.__dict__) return IssueDeprecationWarning
Decorator to mark functions or methods as deprecated.
def _proc_sparse(self, tarfile): """Process a GNU sparse header plus extra headers. """ # We already collected some sparse structures in frombuf(). structs, isextended, origsize = self._sparse_structs del self._sparse_structs # Collect sparse structures from extended header blocks. while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self
Process a GNU sparse header plus extra headers.
def get(self, artifact): """Gets the coordinate with the correct version for the given artifact coordinate. :param M2Coordinate artifact: the coordinate to lookup. :return: a coordinate which is the same as the input, but with the correct pinned version. If this artifact set does not pin a version for the input artifact, this just returns the original coordinate. :rtype: M2Coordinate """ coord = self._key(artifact) if coord in self._artifacts_to_versions: return self._artifacts_to_versions[coord] return artifact
Gets the coordinate with the correct version for the given artifact coordinate. :param M2Coordinate artifact: the coordinate to lookup. :return: a coordinate which is the same as the input, but with the correct pinned version. If this artifact set does not pin a version for the input artifact, this just returns the original coordinate. :rtype: M2Coordinate
def backdate(res, date=None, as_datetime=False, fmt='%Y-%m-%d'): """ get past date based on currect date """ if res is None: return None if date is None: date = datetime.datetime.now() else: try: date = parse_date(date) except Exception as e: pass new_date = date periods = int("".join([s for s in res if s.isdigit()])) if periods > 0: if "K" in res: new_date = date - datetime.timedelta(microseconds=periods) elif "S" in res: new_date = date - datetime.timedelta(seconds=periods) elif "T" in res: new_date = date - datetime.timedelta(minutes=periods) elif "H" in res or "V" in res: new_date = date - datetime.timedelta(hours=periods) elif "W" in res: new_date = date - datetime.timedelta(weeks=periods) else: # days new_date = date - datetime.timedelta(days=periods) # not a week day: while new_date.weekday() > 4: # Mon-Fri are 0-4 new_date = backdate(res="1D", date=new_date, as_datetime=True) if as_datetime: return new_date return new_date.strftime(fmt)
get past date based on currect date
def pause_and_wait_for_user(self, timeout=None, prompt_text='Click to resume (WebDriver is paused)'): """Injects a radio button into the page and waits for the user to click it; will raise an exception if the radio to resume is never checked @return: None """ timeout = timeout if timeout is not None else self.user_wait_timeout # Set the browser state paused self.paused = True def check_user_ready(driver): """Polls for the user to be "ready" (meaning they checked the checkbox) and the driver to be unpaused. If the checkbox is not displayed (e.g. user navigates the page), it will re-insert it into the page @type driver: WebDriverWrapper @param driver: Driver to execute @return: True if user is ready, false if not """ if driver.paused: if driver.is_user_ready(): # User indicated they are ready; free the browser lock driver.paused = False return True else: if not driver.is_present(Locator('css', '#webdriver-resume-radio', 'radio to unpause webdriver')): # Display the prompt pause_html = staticreader.read_html_file('webdriverpaused.html')\ .replace('\n', '')\ .replace('PROMPT_TEXT', prompt_text) webdriver_style = staticreader.read_css_file('webdriverstyle.css').replace('\n', '') # Insert the webdriver style driver.js_executor.execute_template_and_return_result( 'injectCssTemplate.js', {'css': webdriver_style}) # Insert the paused html driver.js_executor.execute_template_and_return_result( 'injectHtmlTemplate.js', {'selector': 'body', 'html': pause_html}) return False self.wait_until( lambda: check_user_ready(self), timeout=timeout, failure_message='Webdriver actions were paused but did not receive the command to continue. ' 'You must click the on-screen message to resume.' ) # Remove all injected elements self.js_executor.execute_template_and_return_result( 'deleteElementsTemplate.js', {'selector': '.webdriver-injected'} )
Injects a radio button into the page and waits for the user to click it; will raise an exception if the radio to resume is never checked @return: None
def verify_order(self, hostname, domain, location, hourly, flavor, router=None): """Verifies an order for a dedicated host. See :func:`place_order` for a list of available options. """ create_options = self._generate_create_dict(hostname=hostname, router=router, domain=domain, flavor=flavor, datacenter=location, hourly=hourly) return self.client['Product_Order'].verifyOrder(create_options)
Verifies an order for a dedicated host. See :func:`place_order` for a list of available options.
async def pulse(self, *args, **kwargs): """ Publish a Pulse Message Publish a message on pulse with the given `routingKey`. This method takes input: ``v1/pulse-request.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
Publish a Pulse Message Publish a message on pulse with the given `routingKey`. This method takes input: ``v1/pulse-request.json#`` This method is ``experimental``
def copy(self, name=None, description=None, meta=None): """ Create a copy of the current object (may alter the container's name, description, and update the metadata if needed). """ cls = self.__class__ kwargs = self._rel(copy=True) kwargs.update(self._data(copy=True)) if name is not None: kwargs['name'] = name if description is not None: kwargs['description'] = description if meta is not None: kwargs['meta'] = meta return cls(**kwargs)
Create a copy of the current object (may alter the container's name, description, and update the metadata if needed).
def _merge_objects(tref, merged, obj): """ Merge the snapshot size information of multiple tracked objects. The tracked object `obj` is scanned for size information at time `tref`. The sizes are merged into **Asized** instance `merged`. """ size = None for (timestamp, tsize) in obj.snapshots: if timestamp == tref: size = tsize if size: _merge_asized(merged, size)
Merge the snapshot size information of multiple tracked objects. The tracked object `obj` is scanned for size information at time `tref`. The sizes are merged into **Asized** instance `merged`.
def H(self, H): """ Set the enthalpy of the package to the specified value, and recalculate it's temperature. :param H: The new enthalpy value. [kWh] """ self._H = H self._T = self._calculate_T(H)
Set the enthalpy of the package to the specified value, and recalculate it's temperature. :param H: The new enthalpy value. [kWh]
def dict(self, **kwargs): """ Dictionary representation """ return dict( time = self.timestamp, event_data = self.event_data, event_type = self.event_type, partition = self.partition, report_code = self.report_code, event_prefix = self.event_prefix, event_source = self.event_source, event_status = self.event_status, event_code = hex(self.event_code), event_description = self.event_description, **kwargs )
Dictionary representation
def parse_unifrac_v1_8(unifrac, file_data): """ Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters. """ for line in file_data: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] unifrac["eigvals"] = [float(entry) for entry in file_data[-2].split("\t")[1:]] unifrac["varexp"] = [float(entry) for entry in file_data[-1].split("\t")[1:]] return unifrac
Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters.
def login(team=None): """ Authenticate. Launches a web browser and asks the user for a token. """ _check_team_id(team) _check_team_exists(team) _check_team_login(team) login_url = "%s/login" % get_registry_url(team) print("Launching a web browser...") print("If that didn't work, please visit the following URL: %s" % login_url) _open_url(login_url) print() refresh_token = input("Enter the code from the webpage: ") login_with_token(refresh_token, team)
Authenticate. Launches a web browser and asks the user for a token.
def normalize_key_phrases (path, ranks, stopwords=None, spacy_nlp=None, skip_ner=True): """ collect keyphrases, named entities, etc., while removing stop words """ global STOPWORDS, SPACY_NLP # set up the stop words if (type(stopwords) is list) or (type(stopwords) is set): # explicit conversion to a set, for better performance stopwords = set(stopwords) else: if not STOPWORDS: STOPWORDS = load_stopwords(stopwords) stopwords = STOPWORDS # set up the spaCy NLP parser if not spacy_nlp: if not SPACY_NLP: SPACY_NLP = spacy.load("en") spacy_nlp = SPACY_NLP # collect keyphrases single_lex = {} phrase_lex = {} if isinstance(path, str): path = json_iter(path) for meta in path: sent = [w for w in map(WordNode._make, meta["graf"])] for rl in collect_keyword(sent, ranks, stopwords): id = str(rl.ids) if id not in single_lex: single_lex[id] = rl else: prev_lex = single_lex[id] single_lex[id] = rl._replace(count = prev_lex.count + 1) if not skip_ner: for rl in collect_entities(sent, ranks, stopwords, spacy_nlp): id = str(rl.ids) if id not in phrase_lex: phrase_lex[id] = rl else: prev_lex = phrase_lex[id] phrase_lex[id] = rl._replace(count = prev_lex.count + 1) for rl in collect_phrases(sent, ranks, spacy_nlp): id = str(rl.ids) if id not in phrase_lex: phrase_lex[id] = rl else: prev_lex = phrase_lex[id] phrase_lex[id] = rl._replace(count = prev_lex.count + 1) # normalize ranks across single keywords and longer phrases: # * boost the noun phrases based on their length # * penalize the noun phrases for repeated words rank_list = [rl.rank for rl in single_lex.values()] if len(rank_list) < 1: max_single_rank = 0 else: max_single_rank = max(rank_list) repeated_roots = {} for rl in sorted(phrase_lex.values(), key=lambda rl: len(rl), reverse=True): rank_list = [] for i in iter(range(0, len(rl.ids))): id = rl.ids[i] if not id in repeated_roots: repeated_roots[id] = 1.0 rank_list.append(rl.rank[i]) else: repeated_roots[id] += 1.0 rank_list.append(rl.rank[i] / repeated_roots[id]) phrase_rank = calc_rms(rank_list) single_lex[str(rl.ids)] = rl._replace(rank = phrase_rank) # scale all the ranks together, so they sum to 1.0 sum_ranks = sum([rl.rank for rl in single_lex.values()]) for rl in sorted(single_lex.values(), key=lambda rl: rl.rank, reverse=True): if sum_ranks > 0.0: rl = rl._replace(rank=rl.rank / sum_ranks) elif rl.rank == 0.0: rl = rl._replace(rank=0.1) rl = rl._replace(text=re.sub(r"\s([\.\,\-\+\:\@])\s", r"\1", rl.text)) yield rl
collect keyphrases, named entities, etc., while removing stop words
def provides_defaults_for(self, rule: 'Rule', **values: Any) -> bool: """Returns true if this rule provides defaults for the argument and values.""" defaults_match = all( values[key] == self.defaults[key] for key in self.defaults if key in values # noqa: S101, E501 ) return self != rule and bool(self.defaults) and defaults_match
Returns true if this rule provides defaults for the argument and values.
def mutex(func): """use a thread lock on current method, if self.lock is defined""" def wrapper(*args, **kwargs): """Decorator Wrapper""" lock = args[0].lock lock.acquire(True) try: return func(*args, **kwargs) except: raise finally: lock.release() return wrapper
use a thread lock on current method, if self.lock is defined
def address_exists(name, addressname=None, vsys=1, ipnetmask=None, iprange=None, fqdn=None, description=None, commit=False): ''' Ensures that an address object exists in the configured state. If it does not exist or is not configured with the specified attributes, it will be adjusted to match the specified values. This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address type. name: The name of the module function to execute. addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters, which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama, unique within its device group and any ancestor or descendant device groups. vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6, you specify only the network portion, not the host portion. iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both can be IPv6. fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the entries are picked up at the refresh cycle. description(str): A description for the policy (up to 255 characters). commit(bool): If true the firewall will commit the changes, if false do not commit changes. SLS Example: .. code-block:: yaml panos/address/h-10.10.10.10: panos.address_exists: - addressname: h-10.10.10.10 - vsys: 1 - ipnetmask: 10.10.10.10 - commit: False panos/address/10.0.0.1-10.0.0.50: panos.address_exists: - addressname: r-10.0.0.1-10.0.0.50 - vsys: 1 - iprange: 10.0.0.1-10.0.0.50 - commit: False panos/address/foo.bar.com: panos.address_exists: - addressname: foo.bar.com - vsys: 1 - fqdn: foo.bar.com - description: My fqdn object - commit: False ''' ret = _default_ret(name) if not addressname: ret.update({'comment': "The service name field must be provided."}) return ret # Check if address object currently exists address = __salt__['panos.get_address'](addressname, vsys)['result'] if address and 'entry' in address: address = address['entry'] else: address = {} element = "" # Verify the arguments if ipnetmask: element = "<ip-netmask>{0}</ip-netmask>".format(ipnetmask) elif iprange: element = "<ip-range>{0}</ip-range>".format(iprange) elif fqdn: element = "<fqdn>{0}</fqdn>".format(fqdn) else: ret.update({'comment': "A valid address type must be specified."}) return ret if description: element += "<description>{0}</description>".format(description) full_element = "<entry name='{0}'>{1}</entry>".format(addressname, element) new_address = xml.to_dict(ET.fromstring(full_element), True) if address == new_address: ret.update({ 'comment': 'Address object already exists. No changes required.', 'result': True }) return ret else: xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/address/" \ "entry[@name=\'{1}\']".format(vsys, addressname) result, msg = _edit_config(xpath, full_element) if not result: ret.update({ 'comment': msg }) return ret if commit is True: ret.update({ 'changes': {'before': address, 'after': new_address}, 'commit': __salt__['panos.commit'](), 'comment': 'Address object successfully configured.', 'result': True }) else: ret.update({ 'changes': {'before': address, 'after': new_address}, 'comment': 'Service object successfully configured.', 'result': True }) return ret
Ensures that an address object exists in the configured state. If it does not exist or is not configured with the specified attributes, it will be adjusted to match the specified values. This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address type. name: The name of the module function to execute. addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters, which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama, unique within its device group and any ancestor or descendant device groups. vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6, you specify only the network portion, not the host portion. iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both can be IPv6. fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the entries are picked up at the refresh cycle. description(str): A description for the policy (up to 255 characters). commit(bool): If true the firewall will commit the changes, if false do not commit changes. SLS Example: .. code-block:: yaml panos/address/h-10.10.10.10: panos.address_exists: - addressname: h-10.10.10.10 - vsys: 1 - ipnetmask: 10.10.10.10 - commit: False panos/address/10.0.0.1-10.0.0.50: panos.address_exists: - addressname: r-10.0.0.1-10.0.0.50 - vsys: 1 - iprange: 10.0.0.1-10.0.0.50 - commit: False panos/address/foo.bar.com: panos.address_exists: - addressname: foo.bar.com - vsys: 1 - fqdn: foo.bar.com - description: My fqdn object - commit: False
def list_cards(self, *args, **kwargs): """ List the cards of the customer. :param page: the page number :type page: int|None :param per_page: number of customers per page. It's a good practice to increase this number if you know that you will need a lot of payments. :type per_page: int|None :return: The cards of the customer :rtype APIResourceCollection """ return payplug.Card.list(self, *args, **kwargs)
List the cards of the customer. :param page: the page number :type page: int|None :param per_page: number of customers per page. It's a good practice to increase this number if you know that you will need a lot of payments. :type per_page: int|None :return: The cards of the customer :rtype APIResourceCollection
def get_pages(self, include_draft=False): """ Get all custom pages (supported formats, excluding other files like '.js', '.css', '.html'). :param include_draft: return draft page or not :return: an iterable of Page objects """ def pages_generator(pages_root_path): for file_path in traverse_directory(pages_root_path, yield_dir=False): rel_path = os.path.relpath(file_path, pages_root_path) rel_path, ext = os.path.splitext(rel_path) if not ext or ext == '.' or get_standard_format_name( ext[1:]) is None: continue # pragma: no cover if rel_path.endswith(os.path.sep + 'index'): rel_path = rel_path[:-len('index')] else: rel_path += '.html' page = self.get_page(rel_path.replace(os.path.sep, '/'), include_draft=include_draft) if page is not None: yield page pages_path = os.path.join(current_app.instance_path, 'pages') return list(pages_generator(pages_path))
Get all custom pages (supported formats, excluding other files like '.js', '.css', '.html'). :param include_draft: return draft page or not :return: an iterable of Page objects
def threshold(self, messy_data, recall_weight=1.5): # pragma: no cover """ Returns the threshold that maximizes the expected F score, a weighted average of precision and recall for a sample of data. Arguments: messy_data -- Dictionary of records from messy dataset, where the keys are record_ids and the values are dictionaries with the keys being field names recall_weight -- Sets the tradeoff between precision and recall. I.e. if you care twice as much about recall as you do precision, set recall_weight to 2. """ blocked_pairs = self._blockData(messy_data) return self.thresholdBlocks(blocked_pairs, recall_weight)
Returns the threshold that maximizes the expected F score, a weighted average of precision and recall for a sample of data. Arguments: messy_data -- Dictionary of records from messy dataset, where the keys are record_ids and the values are dictionaries with the keys being field names recall_weight -- Sets the tradeoff between precision and recall. I.e. if you care twice as much about recall as you do precision, set recall_weight to 2.
def get_child_books(self, book_id): """Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=book_id) return BookLookupSession( self._proxy, self._runtime).get_books_by_ids( list(self.get_child_book_ids(book_id)))
Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def list_extensions(): ''' List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions. ''' import nnabla_ext.cpu from os.path import dirname, join, realpath from os import listdir ext_dir = realpath((join(dirname(nnabla_ext.cpu.__file__), '..'))) return listdir(ext_dir)
List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions.
def ReadTriggers(self, collection_link, options=None): """Reads all triggers in a collection. :param str collection_link: The link to the document collection. :param dict options: The request options for the request. :return: Query Iterable of Triggers. :rtype: query_iterable.QueryIterable """ if options is None: options = {} return self.QueryTriggers(collection_link, None, options)
Reads all triggers in a collection. :param str collection_link: The link to the document collection. :param dict options: The request options for the request. :return: Query Iterable of Triggers. :rtype: query_iterable.QueryIterable
def _handleSmsStatusReport(self, notificationLine): """ Handler for SMS status reports """ self.log.debug('SMS status report received') cdsiMatch = self.CDSI_REGEX.match(notificationLine) if cdsiMatch: msgMemory = cdsiMatch.group(1) msgIndex = cdsiMatch.group(2) report = self.readStoredSms(msgIndex, msgMemory) self.deleteStoredSms(msgIndex) # Update sent SMS status if possible if report.reference in self.sentSms: self.sentSms[report.reference].report = report if self._smsStatusReportEvent: # A sendSms() call is waiting for this response - notify waiting thread self._smsStatusReportEvent.set() else: # Nothing is waiting for this report directly - use callback self.smsStatusReportCallback(report)
Handler for SMS status reports
def ndim(self): """Number of dimensions of the grid.""" try: return self.__ndim except AttributeError: ndim = len(self.coord_vectors) self.__ndim = ndim return ndim
Number of dimensions of the grid.
def remove(args): """ Remove the feed given in <args> """ session = c.Session(args) if not args["name"] in session.feeds: sys.exit("You don't have a feed with that name.") inputtext = ("Are you sure you want to remove the {} " " feed? (y/N) ").format(args["name"]) reply = input(inputtext) if reply != "y" and reply != "Y": return 0 else: session.feeds.remove_section(args["name"]) with open(session.data_filename, 'w') as configfile: session.feeds.write(configfile) try: os.remove(os.path.join(session.data_dir, args["name"])) except FileNotFoundError: pass
Remove the feed given in <args>