code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def midi_inputs(self): """ :return: A list of MIDI input :class:`Ports`. """ return self.client.get_ports(is_midi=True, is_physical=True, is_input=True)
:return: A list of MIDI input :class:`Ports`.
def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = 0 < self.maxsize == self._qsize() self.mutex.release() return n
Return True if the queue is full, False otherwise (not reliable!).
def partition(cls, iterable, pred): """ Use a predicate to partition items into false and true entries. """ t1, t2 = itertools.tee(iterable) return cls(itertools.filterfalse(pred, t1), filter(pred, t2))
Use a predicate to partition items into false and true entries.
def finish_plot(): """Helper for plotting.""" plt.legend() plt.grid(color='0.7') plt.xlabel('x') plt.ylabel('y') plt.show()
Helper for plotting.
def load_network(self, layers=1): """ Given an Ethernet frame, determine the appropriate sub-protocol; If layers is greater than zerol determine the type of the payload and load the appropriate type of network packet. It is expected that the payload be a hexified string. The layers argument determines how many layers to descend while parsing the packet. """ if layers: ctor = payload_type(self.type)[0] if ctor: ctor = ctor payload = self.payload self.payload = ctor(payload, layers - 1) else: # if no type is found, do not touch the packet. pass
Given an Ethernet frame, determine the appropriate sub-protocol; If layers is greater than zerol determine the type of the payload and load the appropriate type of network packet. It is expected that the payload be a hexified string. The layers argument determines how many layers to descend while parsing the packet.
def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol): """ Remove a symbol from a string, and replace it with a different one Args: string_item: String that you want to replace symbols in remove_symbol: Symbol to remove add_symbol: Symbol to add Returns: returns a string with symbols swapped """ string_item = add_symbol.join(string_item.split(remove_symbol)) return string_item
Remove a symbol from a string, and replace it with a different one Args: string_item: String that you want to replace symbols in remove_symbol: Symbol to remove add_symbol: Symbol to add Returns: returns a string with symbols swapped
def GenomicRangeFromString(range_string,payload=None,dir=None): """Constructor for a GenomicRange object that takes a string""" m = re.match('^(.+):(\d+)-(\d+)$',range_string) if not m: sys.stderr.write("ERROR bad genomic range string\n"+range_string+"\n") sys.exit() chr = m.group(1) start = int(m.group(2)) end = int(m.group(3)) return GenomicRange(chr,start,end,payload,dir)
Constructor for a GenomicRange object that takes a string
def authAddress(val): """ # The C1 Tag extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns `list[str]` > A list of addresses """ ret = [] for a in val: if a[0] == '[': ret.append('] '.join(a.split('] ')[1:])) else: ret.append(a) return ret
# The C1 Tag extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns `list[str]` > A list of addresses
def run_chunk(environ, lowstate): ''' Expects a list of lowstate dictionaries that are executed and returned in order ''' client = environ['SALT_APIClient'] for chunk in lowstate: yield client.run(chunk)
Expects a list of lowstate dictionaries that are executed and returned in order
def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any: """Pull a value from the request's ``match_info``.""" return core.get_value(req.match_info, name, field)
Pull a value from the request's ``match_info``.
def _validate(self): """Validate model data and save errors """ errors = {} for name, validator in self._validators.items(): value = getattr(self, name) try: validator(self, value) except ValidationError as e: errors[name] = str(e) self._validate_errors = errors
Validate model data and save errors
def is_valid(cls, oid): """Checks if a `oid` string is valid or not. :Parameters: - `oid`: the object id to validate .. versionadded:: 2.3 """ if not oid: return False try: ObjectId(oid) return True except (InvalidId, TypeError): return False
Checks if a `oid` string is valid or not. :Parameters: - `oid`: the object id to validate .. versionadded:: 2.3
def _parse_use(self, string): """Extracts use dependencies from the innertext of a module.""" result = {} for ruse in self.RE_USE.finditer(string): #We also handle comments for individual use cases, the "only" section #won't pick up any comments. name = ruse.group("name").split("!")[0].strip() if name.lower() == "mpi": continue if ruse.group("only"): only = ruse.group("only").split(",") for method in only: key = "{}.{}".format(name, method.strip()) self._dict_increment(result, key) else: self._dict_increment(result, name) return result
Extracts use dependencies from the innertext of a module.
def make_coursera_absolute_url(url): """ If given url is relative adds coursera netloc, otherwise returns it without any changes. """ if not bool(urlparse(url).netloc): return urljoin(COURSERA_URL, url) return url
If given url is relative adds coursera netloc, otherwise returns it without any changes.
def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False): """ Import the workflow script and load all known objects. The workflow script is treated like a module and imported into the Python namespace. After the import, the method looks for instances of known classes and stores a reference for further use in the workflow object. Args: name (str): The name of the workflow script. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. validate_arguments (bool): Whether to check that all required arguments have been supplied. strict_dag (bool): If true then the loaded workflow module must contain an instance of Dag. Raises: WorkflowArgumentError: If the workflow requires arguments to be set that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. """ arguments = {} if arguments is None else arguments try: workflow_module = importlib.import_module(name) dag_present = False # extract objects of specific types from the workflow module for key, obj in workflow_module.__dict__.items(): if isinstance(obj, Dag): self._dags_blueprint[obj.name] = obj dag_present = True elif isinstance(obj, Parameters): self._parameters.extend(obj) self._name = name self._docstring = inspect.getdoc(workflow_module) del sys.modules[name] if strict_dag and not dag_present: raise WorkflowImportError( 'Workflow does not include a dag {}'.format(name)) if validate_arguments: missing_parameters = self._parameters.check_missing(arguments) if len(missing_parameters) > 0: raise WorkflowArgumentError( 'The following parameters are required ' + 'by the workflow, but are missing: {}'.format( ', '.join(missing_parameters))) self._provided_arguments = arguments except (TypeError, ImportError): logger.error('Cannot import workflow {}'.format(name)) raise WorkflowImportError('Cannot import workflow {}'.format(name))
Import the workflow script and load all known objects. The workflow script is treated like a module and imported into the Python namespace. After the import, the method looks for instances of known classes and stores a reference for further use in the workflow object. Args: name (str): The name of the workflow script. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. validate_arguments (bool): Whether to check that all required arguments have been supplied. strict_dag (bool): If true then the loaded workflow module must contain an instance of Dag. Raises: WorkflowArgumentError: If the workflow requires arguments to be set that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails.
def request(self, message, timeout=False, *args, **kwargs): """Populate connection pool, send message, return BytesIO, and cleanup""" if not self.connection_pool.full(): self.connection_pool.put(self._register_socket()) _socket = self.connection_pool.get() # setting timeout to None enables the socket to block. if timeout or timeout is None: _socket.settimeout(timeout) data = self.send_and_receive(_socket, message, *args, **kwargs) if self.connection.proto in Socket.streams: _socket.shutdown(socket.SHUT_RDWR) return Response(data, None, None)
Populate connection pool, send message, return BytesIO, and cleanup
def setNonExpert(self): """ Turns off 'expert' status whereby to allow a button to be disabled """ self._expert = False if self._active: self.enable() else: self.disable()
Turns off 'expert' status whereby to allow a button to be disabled
def sample(self, frame): """Samples the given frame.""" frames = self.frame_stack(frame) if frames: frames.pop() parent_stats = self.stats for f in frames: parent_stats = parent_stats.ensure_child(f.f_code, void) stats = parent_stats.ensure_child(frame.f_code, RecordingStatistics) stats.own_hits += 1
Samples the given frame.
def crypto_box(message, nonce, pk, sk): """ Encrypts and returns a message ``message`` using the secret key ``sk``, public key ``pk``, and the nonce ``nonce``. :param message: bytes :param nonce: bytes :param pk: bytes :param sk: bytes :rtype: bytes """ if len(nonce) != crypto_box_NONCEBYTES: raise exc.ValueError("Invalid nonce size") if len(pk) != crypto_box_PUBLICKEYBYTES: raise exc.ValueError("Invalid public key") if len(sk) != crypto_box_SECRETKEYBYTES: raise exc.ValueError("Invalid secret key") padded = (b"\x00" * crypto_box_ZEROBYTES) + message ciphertext = ffi.new("unsigned char[]", len(padded)) rc = lib.crypto_box(ciphertext, padded, len(padded), nonce, pk, sk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(ciphertext, len(padded))[crypto_box_BOXZEROBYTES:]
Encrypts and returns a message ``message`` using the secret key ``sk``, public key ``pk``, and the nonce ``nonce``. :param message: bytes :param nonce: bytes :param pk: bytes :param sk: bytes :rtype: bytes
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): """ Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str """ response = self.get_conn().restore_from_cluster_snapshot( ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str
def get_credit_card_payment_by_id(cls, credit_card_payment_id, **kwargs): """Find CreditCardPayment Return single instance of CreditCardPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_credit_card_payment_by_id(credit_card_payment_id, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to return (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) else: (data) = cls._get_credit_card_payment_by_id_with_http_info(credit_card_payment_id, **kwargs) return data
Find CreditCardPayment Return single instance of CreditCardPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_credit_card_payment_by_id(credit_card_payment_id, async=True) >>> result = thread.get() :param async bool :param str credit_card_payment_id: ID of creditCardPayment to return (required) :return: CreditCardPayment If the method is called asynchronously, returns the request thread.
def demonstrate_colored_logging(): """Interactively demonstrate the :mod:`coloredlogs` package.""" # Determine the available logging levels and order them by numeric value. decorated_levels = [] defined_levels = coloredlogs.find_defined_levels() normalizer = coloredlogs.NameNormalizer() for name, level in defined_levels.items(): if name != 'NOTSET': item = (level, normalizer.normalize_name(name)) if item not in decorated_levels: decorated_levels.append(item) ordered_levels = sorted(decorated_levels) # Initialize colored output to the terminal, default to the most # verbose logging level but enable the user the customize it. coloredlogs.install(level=os.environ.get('COLOREDLOGS_LOG_LEVEL', ordered_levels[0][1])) # Print some examples with different timestamps. for level, name in ordered_levels: log_method = getattr(logger, name, None) if log_method: log_method("message with level %s (%i)", name, level) time.sleep(DEMO_DELAY)
Interactively demonstrate the :mod:`coloredlogs` package.
def row(self, *args): """ Adds a list of KeyboardButton to the keyboard. This function does not consider row_width. ReplyKeyboardMarkup#row("A")#row("B", "C")#to_json() outputs '{keyboard: [["A"], ["B", "C"]]}' See https://core.telegram.org/bots/api#inlinekeyboardmarkup :param args: strings :return: self, to allow function chaining. """ btn_array = [] for button in args: btn_array.append(button.to_dic()) self.keyboard.append(btn_array) return self
Adds a list of KeyboardButton to the keyboard. This function does not consider row_width. ReplyKeyboardMarkup#row("A")#row("B", "C")#to_json() outputs '{keyboard: [["A"], ["B", "C"]]}' See https://core.telegram.org/bots/api#inlinekeyboardmarkup :param args: strings :return: self, to allow function chaining.
def analysis_question_report(feature, parent): """Retrieve the analysis question section from InaSAFE report. """ _ = feature, parent # NOQA project_context_scope = QgsExpressionContextUtils.projectScope() key = provenance_layer_analysis_impacted['provenance_key'] if not project_context_scope.hasVariable(key): return None analysis_dir = dirname(project_context_scope.variable(key)) complete_html_report = get_impact_report_as_string(analysis_dir) requested_html_report = get_report_section( complete_html_report, component_id=analysis_question_component['key']) return requested_html_report
Retrieve the analysis question section from InaSAFE report.
def info(): ''' Return information about the license, if the license is not correctly activated this will return None. CLI Example: .. code-block:: bash salt '*' license.info ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /dli' out = __salt__['cmd.run'](cmd) match = re.search(r'Name: (.*)\r\nDescription: (.*)\r\nPartial Product Key: (.*)\r\nLicense Status: (.*)', out, re.MULTILINE) if match is not None: groups = match.groups() return { 'name': groups[0], 'description': groups[1], 'partial_key': groups[2], 'licensed': 'Licensed' in groups[3] } return None
Return information about the license, if the license is not correctly activated this will return None. CLI Example: .. code-block:: bash salt '*' license.info
def handle_read_value(self, buff, start, end): ''' handle read of the value based on the expected length :param buff: :param start: :param end: ''' segmenttype = self._state[1].value.segmenttype value = None eventtype = None ftype = self._state[0] # parsing value if segmenttype <= SegmentType.VARIABLE_LENGTH_VALUE: self._scstate = self.next_state_afterraw() value = self.parse_value(self._state[0], buff, start, end) eventtype = EventType.VALUE # next we should expect length elif segmenttype >= SegmentType.EXT_FORMAT: value = self.parse_ext_value(self._state[0], self._state[4], buff, start, end) eventtype = EventType.EXT ftype = ExtType(self._state[0], self._state[4]) else: raise InvalidStateException(self._scstate, "header") self.events.append((self.value_event_type(eventtype), ftype, value))
handle read of the value based on the expected length :param buff: :param start: :param end:
def select_resample_op(da, op, freq="YS", **indexer): """Apply operation over each period that is part of the index selection. Parameters ---------- da : xarray.DataArray Input data. op : str {'min', 'max', 'mean', 'std', 'var', 'count', 'sum', 'argmax', 'argmin'} or func Reduce operation. Can either be a DataArray method or a function that can be applied to a DataArray. freq : str Resampling frequency defining the periods defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. **indexer : {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values, month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are considered. Returns ------- xarray.DataArray The maximum value for each period. """ da = select_time(da, **indexer) r = da.resample(time=freq, keep_attrs=True) if isinstance(op, str): return getattr(r, op)(dim='time', keep_attrs=True) return r.apply(op)
Apply operation over each period that is part of the index selection. Parameters ---------- da : xarray.DataArray Input data. op : str {'min', 'max', 'mean', 'std', 'var', 'count', 'sum', 'argmax', 'argmin'} or func Reduce operation. Can either be a DataArray method or a function that can be applied to a DataArray. freq : str Resampling frequency defining the periods defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. **indexer : {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values, month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are considered. Returns ------- xarray.DataArray The maximum value for each period.
def links(self): """ list: List of all MediaWiki page links on the page Note: Not settable """ if self._links is None: self._links = list() self.__pull_combined_properties() return self._links
list: List of all MediaWiki page links on the page Note: Not settable
def setup_table(self): """Setup table""" self.horizontalHeader().setStretchLastSection(True) self.adjust_columns() # Sorting columns self.setSortingEnabled(True) self.sortByColumn(0, Qt.AscendingOrder)
Setup table
def run_job(self): """Execute a logdissect job""" try: self.load_parsers() self.load_filters() self.load_outputs() self.config_args() if self.args.list_parsers: self.list_parsers() if self.args.verbosemode: print('Loading input files') self.load_inputs() if self.args.verbosemode: print('Running parsers') self.run_parse() if self.args.verbosemode: print('Merging data') self.data_set['finalized_data'] = \ logdissect.utils.merge_logs( self.data_set['data_set'], sort=True) if self.args.verbosemode: print('Running filters') self.run_filters() if self.args.verbosemode: print('Running output') self.run_output() except KeyboardInterrupt: sys.exit(1)
Execute a logdissect job
def copy_selection(self, _cut=False): """ Copy selected text and return :class:`.ClipboardData` instance. """ new_document, clipboard_data = self.document.cut_selection() if _cut: self.document = new_document self.selection_state = None return clipboard_data
Copy selected text and return :class:`.ClipboardData` instance.
def parse_arg(arg): """ Parses arguments for convenience. Argument can be a csv list ('a,b,c'), a string, a list, a tuple. Returns a list. """ # handle string input if type(arg) == str: arg = arg.strip() # parse csv as tickers and create children if ',' in arg: arg = arg.split(',') arg = [x.strip() for x in arg] # assume single string - create single item list else: arg = [arg] return arg
Parses arguments for convenience. Argument can be a csv list ('a,b,c'), a string, a list, a tuple. Returns a list.
def describe_api_model(restApiId, modelName, flatten=True, region=None, key=None, keyid=None, profile=None): ''' Get a model by name for a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_model restApiId modelName [True] ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) model = conn.get_model(restApiId=restApiId, modelName=modelName, flatten=flatten) return {'model': _convert_datetime_str(model)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Get a model by name for a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_model restApiId modelName [True]
def createMappings(self, name, body, verbose=None): """ Create a new Visual Mapping function and add it to the Visual Style specified by the `name` parameter. Existing mappings in the Visual Style will be overidden by the new mappings created. The types of mapping available in Cytoscape are explained in depth [here](http://manual.cytoscape.org/en/stable/Styles.html#how-mappings-work). An example of the data format for each is included below. For additional details, such as what Visual Properties supported by each Mapping, click on the relevant JavaDoc API link. #### Discrete Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/DiscreteMapping.html) ``` { "mappingType": "discrete", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_WIDTH", "map": [ { "key" : "pd", "value" : "20" }, { "key" : "pp", "value" : "1.5" } ] }``` #### Continuous Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/ContinuousMapping.html) ``` { "mappingType": "continuous", "mappingColumn": "Degree", "mappingColumnType": "Integer", "visualProperty": "NODE_SIZE", "points": [ { "value" : 1, "lesser" : "20", "equal" : "20", "greater" : "20" }, { "value" : 20, "lesser" : "120", "equal" : "120", "greater" : "220" } ] }``` #### Passthrough Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/PassthroughMapping.html) ``` { "mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL" }``` Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param name: Name of the Visual Style :param body: A list of new mappings. :param verbose: print more :returns: default: successful operation """ PARAMS=set_param(['name','body'],[name,body]) response=api(url=self.___url+'styles/'+str(name)+'/mappings', PARAMS=PARAMS, method="POST", verbose=verbose) return response
Create a new Visual Mapping function and add it to the Visual Style specified by the `name` parameter. Existing mappings in the Visual Style will be overidden by the new mappings created. The types of mapping available in Cytoscape are explained in depth [here](http://manual.cytoscape.org/en/stable/Styles.html#how-mappings-work). An example of the data format for each is included below. For additional details, such as what Visual Properties supported by each Mapping, click on the relevant JavaDoc API link. #### Discrete Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/DiscreteMapping.html) ``` { "mappingType": "discrete", "mappingColumn": "interaction", "mappingColumnType": "String", "visualProperty": "EDGE_WIDTH", "map": [ { "key" : "pd", "value" : "20" }, { "key" : "pp", "value" : "1.5" } ] }``` #### Continuous Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/ContinuousMapping.html) ``` { "mappingType": "continuous", "mappingColumn": "Degree", "mappingColumnType": "Integer", "visualProperty": "NODE_SIZE", "points": [ { "value" : 1, "lesser" : "20", "equal" : "20", "greater" : "20" }, { "value" : 20, "lesser" : "120", "equal" : "120", "greater" : "220" } ] }``` #### Passthrough Mapping [JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/vizmap/mappings/PassthroughMapping.html) ``` { "mappingType": "passthrough", "mappingColumn": "name", "mappingColumnType": "String", "visualProperty": "EDGE_LABEL" }``` Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param name: Name of the Visual Style :param body: A list of new mappings. :param verbose: print more :returns: default: successful operation
def _compare_across(collections, key): """Return whether all the collections return equal values when called with `key`.""" if len(collections) < 2: return True c0 = key(collections[0]) return all(c0 == key(c) for c in collections[1:])
Return whether all the collections return equal values when called with `key`.
def _Open(self, path_spec, mode='rb'): """Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') compression_method = getattr(path_spec, 'compression_method', None) if not compression_method: raise errors.PathSpecError( 'Unsupported path specification without compression method.') self._compression_method = compression_method
Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def get_primary_at(source_code, offset, retry=True): """Return Python object in *source_code* at *offset* Periods to the left of the cursor are carried forward e.g. 'functools.par^tial' would yield 'functools.partial' Retry prevents infinite recursion: retry only once """ obj = '' left = re.split(r"[^0-9a-zA-Z_.]", source_code[:offset]) if left and left[-1]: obj = left[-1] right = re.split(r"\W", source_code[offset:]) if right and right[0]: obj += right[0] if obj and obj[0].isdigit(): obj = '' # account for opening chars with no text to the right if not obj and retry and offset and source_code[offset - 1] in '([.': return get_primary_at(source_code, offset - 1, retry=False) return obj
Return Python object in *source_code* at *offset* Periods to the left of the cursor are carried forward e.g. 'functools.par^tial' would yield 'functools.partial' Retry prevents infinite recursion: retry only once
def book(self, name): """Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS """ self._validate_order_book(name) return OrderBook(name, self._rest_client, self._logger)
Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS
def ion_equals(a, b, timestamps_instants_only=False): """Tests two objects for equivalence under the Ion data model. There are three important cases: * When neither operand specifies its `ion_type` or `annotations`, this method will only return True when the values of both operands are equivalent under the Ion data model. * When only one of the operands specifies its `ion_type` and `annotations`, this method will only return True when that operand has no annotations and has a value equivalent to the other operand under the Ion data model. * When both operands specify `ion_type` and `annotations`, this method will only return True when the ion_type and annotations of both are the same and their values are equivalent under the Ion data model. Note that the order of the operands does not matter. Args: a (object): The first operand. b (object): The second operand. timestamps_instants_only (Optional[bool]): False if timestamp objects (datetime and its subclasses) should be compared according to the Ion data model (where the instant, precision, and offset must be equal); True if these objects should be considered equivalent if they simply represent the same instant. """ if timestamps_instants_only: return _ion_equals_timestamps_instants(a, b) return _ion_equals_timestamps_data_model(a, b)
Tests two objects for equivalence under the Ion data model. There are three important cases: * When neither operand specifies its `ion_type` or `annotations`, this method will only return True when the values of both operands are equivalent under the Ion data model. * When only one of the operands specifies its `ion_type` and `annotations`, this method will only return True when that operand has no annotations and has a value equivalent to the other operand under the Ion data model. * When both operands specify `ion_type` and `annotations`, this method will only return True when the ion_type and annotations of both are the same and their values are equivalent under the Ion data model. Note that the order of the operands does not matter. Args: a (object): The first operand. b (object): The second operand. timestamps_instants_only (Optional[bool]): False if timestamp objects (datetime and its subclasses) should be compared according to the Ion data model (where the instant, precision, and offset must be equal); True if these objects should be considered equivalent if they simply represent the same instant.
def delete_view(self, request, object_id, extra_context=None): """ Overrides the default to enable redirecting to the directory view after deletion of a folder. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to. The delete_view breaks with polymorphic models if the cascade will try delete objects that are of different polymorphic types (AttributeError: 'File' object has no attribute 'file_ptr'). The default implementation of the delete_view is hard to override without just copying the whole big thing. Since we've already done the overriding work on the delete_files_or_folders admin action, we can re-use that here instead. """ try: obj = self.get_queryset(request).get(pk=unquote(object_id)) parent_folder = obj.parent except self.model.DoesNotExist: parent_folder = None if request.POST: self.delete_files_or_folders( request, files_queryset=File.objects.none(), folders_queryset=self.get_queryset(request).filter(id=object_id) ) if parent_folder: url = reverse('admin:filer-directory_listing', kwargs={'folder_id': parent_folder.id}) else: url = reverse('admin:filer-directory_listing-root') url = "{0}{1}".format( url, admin_url_params_encoded(request), ) return HttpResponseRedirect(url) return self.delete_files_or_folders( request, files_queryset=File.objects.none(), folders_queryset=self.get_queryset(request).filter(id=object_id) )
Overrides the default to enable redirecting to the directory view after deletion of a folder. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to. The delete_view breaks with polymorphic models if the cascade will try delete objects that are of different polymorphic types (AttributeError: 'File' object has no attribute 'file_ptr'). The default implementation of the delete_view is hard to override without just copying the whole big thing. Since we've already done the overriding work on the delete_files_or_folders admin action, we can re-use that here instead.
def transfer_from(self, spender_acct: Account, b58_from_address: str, b58_to_address: str, value: int, payer_acct: Account, gas_limit: int, gas_price: int): """ This interface is used to call the Allowance method in ope4 that allow spender to withdraw amount of oep4 token from from-account to to-account. :param spender_acct: an Account class that actually spend oep4 token. :param b58_from_address: an base58 encode address that actually pay oep4 token for the spender's spending. :param b58_to_address: a base58 encode address that receive the oep4 token. :param value: the amount of ope4 token in this transaction. :param payer_acct: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: the hexadecimal transaction hash value. """ func = InvokeFunction('transferFrom') Oep4.__b58_address_check(b58_from_address) Oep4.__b58_address_check(b58_to_address) if not isinstance(spender_acct, Account): raise SDKException(ErrorCode.param_err('the data type of spender_acct should be Account.')) spender_address_array = spender_acct.get_address().to_bytes() from_address_array = Address.b58decode(b58_from_address).to_bytes() to_address_array = Address.b58decode(b58_to_address).to_bytes() if not isinstance(value, int): raise SDKException(ErrorCode.param_err('the data type of value should be int.')) func.set_params_value(spender_address_array, from_address_array, to_address_array, value) params = func.create_invoke_code() unix_time_now = int(time.time()) params.append(0x67) bytearray_contract_address = bytearray.fromhex(self.__hex_contract_address) bytearray_contract_address.reverse() for i in bytearray_contract_address: params.append(i) if payer_acct is None: raise SDKException(ErrorCode.param_err('payer account is None.')) payer_address_array = payer_acct.get_address().to_bytes() tx = Transaction(0, 0xd1, unix_time_now, gas_price, gas_limit, payer_address_array, params, bytearray(), []) tx.sign_transaction(spender_acct) if spender_acct.get_address_base58() != payer_acct.get_address_base58(): tx.add_sign_transaction(payer_acct) tx_hash = self.__sdk.get_network().send_raw_transaction(tx) return tx_hash
This interface is used to call the Allowance method in ope4 that allow spender to withdraw amount of oep4 token from from-account to to-account. :param spender_acct: an Account class that actually spend oep4 token. :param b58_from_address: an base58 encode address that actually pay oep4 token for the spender's spending. :param b58_to_address: a base58 encode address that receive the oep4 token. :param value: the amount of ope4 token in this transaction. :param payer_acct: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: the hexadecimal transaction hash value.
def findBinomialNsWithExpectedSampleMinimum(desiredValuesSorted, p, numSamples, nMax): """ For each desired value, find an approximate n for which the sample minimum has a expected value equal to this value. For each value, find an adjacent pair of n values whose expected sample minima are below and above the desired value, respectively, and return a linearly-interpolated n between these two values. @param p (float) The p if the binomial distribution. @param numSamples (int) The number of samples in the sample minimum distribution. @return A list of results. Each result contains (interpolated_n, lower_value, upper_value). where each lower_value and upper_value are the expected sample minimum for floor(interpolated_n) and ceil(interpolated_n) """ # mapping from n -> expected value actualValues = [ getExpectedValue( SampleMinimumDistribution(numSamples, BinomialDistribution(n, p, cache=True))) for n in xrange(nMax + 1)] results = [] n = 0 for desiredValue in desiredValuesSorted: while n + 1 <= nMax and actualValues[n + 1] < desiredValue: n += 1 if n + 1 > nMax: break interpolated = n + ((desiredValue - actualValues[n]) / (actualValues[n+1] - actualValues[n])) result = (interpolated, actualValues[n], actualValues[n + 1]) results.append(result) return results
For each desired value, find an approximate n for which the sample minimum has a expected value equal to this value. For each value, find an adjacent pair of n values whose expected sample minima are below and above the desired value, respectively, and return a linearly-interpolated n between these two values. @param p (float) The p if the binomial distribution. @param numSamples (int) The number of samples in the sample minimum distribution. @return A list of results. Each result contains (interpolated_n, lower_value, upper_value). where each lower_value and upper_value are the expected sample minimum for floor(interpolated_n) and ceil(interpolated_n)
def query_by_login(self, login_id, end_time=None, start_time=None): """ Query by login. List authentication events for a given login. """ path = {} data = {} params = {} # REQUIRED - PATH - login_id """ID""" path["login_id"] = login_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True)
Query by login. List authentication events for a given login.
def encode(df, encoding='utf8', verbosity=1): """If you try to encode each element individually with python, this would take days!""" if verbosity > 0: # pbar_i = 0 pbar = progressbar.ProgressBar(maxval=df.shape[1]) pbar.start() # encode strings as UTF-8 so they'll work in python2 and python3 for colnum, col in enumerate(df.columns): if isinstance(df[col], pd.Series): if verbosity: pbar.update(colnum) if df[col].dtype in (np.dtype('object'), np.dtype('U'), np.dtype('S')) and any(isinstance(obj, basestring) for obj in df[col]): strmask = np.array([isinstance(obj, basestring) for obj in df[col]]) series = df[col].copy() try: series[strmask] = np.char.encode(series[strmask].values.astype('U')) except TypeError: print("Unable to convert {} elements starting at position {} in column {}".format( sum(strmask), [i for i, b in enumerate(strmask) if b][:1], col)) raise except (UnicodeDecodeError, UnicodeEncodeError): try: series[strmask] = np.array([eval(s, {}, {}) for s in series[strmask]]) # FIXME: do something different for unicode and decode errors except (SyntaxError, UnicodeDecodeError, UnicodeEncodeError): newseries = [] for s in series[strmask]: try: newseries += [s.encode('utf8')] except: print(u'Had trouble encoding {} so used repr to turn it into {}'.format(s, repr(transcode_unicode(s)))) # strip all unicode chars are convert to ASCII str newseries += [transcode_unicode(s)] # for dtype('U'): UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 207: ordinal not in r series[strmask] = np.array(newseries).astype('O') df[col] = series # df[col] = np.array([x.encode('utf8') if isinstance(x, unicode) else x for x in df[col]]) # WARNING: this takes DAYS for only 100k tweets! # series = df[col].copy() # for i, value in series.iteritems(): # if isinstance(value, basestring): # series[i] = str(value.encode(encoding)) # df[col] = series if verbosity: pbar.finish() return df
If you try to encode each element individually with python, this would take days!
def is_conditional(self, include_loop=True): """ Check if the node is a conditional node A conditional node is either a IF or a require/assert or a RETURN bool Returns: bool: True if the node is a conditional node """ if self.contains_if(include_loop) or self.contains_require_or_assert(): return True if self.irs: last_ir = self.irs[-1] if last_ir: if isinstance(last_ir, Return): for r in last_ir.read: if r.type == ElementaryType('bool'): return True return False
Check if the node is a conditional node A conditional node is either a IF or a require/assert or a RETURN bool Returns: bool: True if the node is a conditional node
def _log(self, x): """Modified version of np.log that manually sets values <=0 to -inf Parameters ---------- x: ndarray of floats Input to the log function Returns ------- log_ma: ndarray of floats log of x, with x<=0 values replaced with -inf """ xshape = x.shape _x = x.flatten() y = utils.masked_log(_x) return y.reshape(xshape)
Modified version of np.log that manually sets values <=0 to -inf Parameters ---------- x: ndarray of floats Input to the log function Returns ------- log_ma: ndarray of floats log of x, with x<=0 values replaced with -inf
def itemAdded(self): """ Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. It will also start the batch process if it is not yet running and there are any registered remote listeners. """ localCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.LOCAL), limit=1).count() remoteCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.REMOTE), limit=1).count() if localCount and self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) if remoteCount: batchService = iaxiom.IBatchService(self.store, None) if batchService is not None: batchService.start()
Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. It will also start the batch process if it is not yet running and there are any registered remote listeners.
def _status_filter_to_query(clause): """ Convert a clause querying for an experiment state RUNNING or DEAD. Queries that check for experiment state RUNNING and DEAD need to be replaced by the logic that decides these two states as both of them are stored in the Mongo Database as "RUNNING". We use querying by last heartbeat time. :param clause: A clause whose field is "status" and "value" is one of RUNNING, DEAD. :return: A MongoDB clause. """ if clause["value"] == "RUNNING": mongo_clause = MongoRunDAO.RUNNING_NOT_DEAD_CLAUSE elif clause["value"] == "DEAD": mongo_clause = MongoRunDAO.RUNNING_DEAD_RUN_CLAUSE if clause["operator"] == "!=": mongo_clause = {"$not": mongo_clause} return mongo_clause
Convert a clause querying for an experiment state RUNNING or DEAD. Queries that check for experiment state RUNNING and DEAD need to be replaced by the logic that decides these two states as both of them are stored in the Mongo Database as "RUNNING". We use querying by last heartbeat time. :param clause: A clause whose field is "status" and "value" is one of RUNNING, DEAD. :return: A MongoDB clause.
def data(self): # pragma: no cover pylint: disable=inconsistent-return-statements """ Management and input of data to the table. :raises: :code:`Exception` When self.data_to_print is not a list. """ if isinstance(self.data_to_print, list): # The data to print is a list. # We initiate the data we are going to print. to_print = {} # We initiate the size we are going to print. to_print_size = [] # We initiate a variable which will list the list of # alone case. alone_cases = ["Percentage", "HTTP"] # we initiate a variable which will list the list of # template which does not need a header. without_header = ["FullHosts", "PlainDomain"] if self.template.lower() == "json": # The template is the json template. if not PyFunceble.CONFIGURATION["no_files"] and self.output: # * We are allowed to generate file. # and # * The given output is not empty. # We print the json file. return self._json_print() # We return nothing. return None if self.template not in alone_cases and self.template not in without_header: # * The template is not in the list of alone case. # and # * THe template is not in the list of template without header. # We get the template we should use. # Note: We basically only need the self.currently_used_header to be filled. self.header(True) # And we get the size from the header. to_print_size = self._size_from_header(self.currently_used_header) elif self.template in without_header: # The template is in the list of template which does not need a header. for data in self.data_to_print: # We loop through the list of data to print. # And we construct the (spacement) size of the data to print. to_print_size.append(str(len(data))) else: # We get the size from the given template name. to_print_size = self._size_from_header(self.headers[self.template]) # We construct and format the data to print. to_print = self._data_constructor(to_print_size) # We print the before header section. self._before_header() for data in self._header_constructor(to_print, False): # We loop through the formatted data. if self.template.lower() in PyFunceble.STATUS["list"][ "generic" ] or self.template in ["Less", "Percentage"]: # * The template is in the list of generic status. # or # * The template is in a specific list. if not self.only_on_file: # We are authorized to print on screen. # We colorify the data to print. colorified_data = self._colorify(data) # And we print the data. print(colorified_data) if not PyFunceble.CONFIGURATION["no_files"] and self.output: # * We are authorized to print on any file. # and # * The output is given. # We write our data into the printed file. File(self.output).write(data + "\n") else: # This should never happend. If it's happens then there's a big issue # around data_to_print. raise Exception("Please review Prints().data()")
Management and input of data to the table. :raises: :code:`Exception` When self.data_to_print is not a list.
def magic_api(word): """ This is our magic API that we're simulating. It'll return a random number and a cache timer. """ result = sum(ord(x)-65 + randint(1,50) for x in word) delta = timedelta(seconds=result) cached_until = datetime.now() + delta return result, cached_until
This is our magic API that we're simulating. It'll return a random number and a cache timer.
def inherit_docstrings(cls): """Class decorator for inheriting docstrings. Automatically inherits base class doc-strings if not present in the derived class. """ @functools.wraps(cls) def _inherit_docstrings(cls): if not isinstance(cls, (type, colorise.compat.ClassType)): raise RuntimeError("Type is not a class") for name, value in colorise.compat.iteritems(vars(cls)): if isinstance(getattr(cls, name), types.MethodType): if not getattr(value, '__doc__', None): for base in cls.__bases__: basemethod = getattr(base, name, None) if basemethod and getattr(base, '__doc__', None): value.__doc__ = basemethod.__doc__ return cls return _inherit_docstrings(cls)
Class decorator for inheriting docstrings. Automatically inherits base class doc-strings if not present in the derived class.
def principal_rotation_axis(gyro_data): """Get the principal rotation axis of angular velocity measurements. Parameters ------------- gyro_data : (3, N) ndarray Angular velocity measurements Returns ------------- v : (3,1) ndarray The principal rotation axis for the chosen sequence """ N = np.zeros((3,3)) for x in gyro_data.T: # Transpose because samples are stored as columns y = x.reshape(3,1) N += y.dot(y.T) (eig_val, eig_vec) = np.linalg.eig(N) i = np.argmax(eig_val) v = eig_vec[:,i] # Make sure v has correct sign s = 0 for x in gyro_data.T: # Transpose because samples are stored as columns s += v.T.dot(x.reshape(3,1)) v *= np.sign(s) return v
Get the principal rotation axis of angular velocity measurements. Parameters ------------- gyro_data : (3, N) ndarray Angular velocity measurements Returns ------------- v : (3,1) ndarray The principal rotation axis for the chosen sequence
def spiro_image(R, r, r_, resolution=2*PI/1000, spins=50, size=[32, 32]): '''Create image with given Spirograph parameters using numpy and scipy. ''' x, y = give_dots(200, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, size) fimg = img / 255.0 return fimg
Create image with given Spirograph parameters using numpy and scipy.
def convert_msg(self, msg): """ Takes one POEntry object and converts it (adds a dummy translation to it) msg is an instance of polib.POEntry """ source = msg.msgid if not source: # don't translate empty string return plural = msg.msgid_plural if plural: # translate singular and plural foreign_single = self.convert(source) foreign_plural = self.convert(plural) plural = { '0': self.final_newline(source, foreign_single), '1': self.final_newline(plural, foreign_plural), } msg.msgstr_plural = plural else: foreign = self.convert(source) msg.msgstr = self.final_newline(source, foreign)
Takes one POEntry object and converts it (adds a dummy translation to it) msg is an instance of polib.POEntry
def log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num, mlm_metric, nsp_metric, trainer, log_interval): """Log training progress.""" end_time = time.time() duration = end_time - begin_time throughput = running_num_tks / duration / 1000.0 running_mlm_loss = running_mlm_loss / log_interval running_nsp_loss = running_nsp_loss / log_interval lr = trainer.learning_rate if trainer else 0 # pylint: disable=line-too-long logging.info('[step {}]\tmlm_loss={:.5f}\tmlm_acc={:.5f}\tnsp_loss={:.5f}\tnsp_acc={:.3f}\tthroughput={:.1f}K tks/s\tlr={:.7f} time={:.2f}, latency={:.1f} ms/batch' .format(step_num, running_mlm_loss.asscalar(), mlm_metric.get()[1] * 100, running_nsp_loss.asscalar(), nsp_metric.get()[1] * 100, throughput.asscalar(), lr, duration, duration*1000/log_interval))
Log training progress.
def displayTriples(triples, qname=qname): """ triples can also be an rdflib Graph instance """ [print(*(e[:5] if isinstance(e, rdflib.BNode) else qname(e) for e in t), '.') for t in sorted(triples)]
triples can also be an rdflib Graph instance
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.ipv4, self.ipv6, self.port, self.path) if info] return SecuredBMAEndpoint.API + " " + " ".join(inlined)
Return endpoint string :return:
def get_skype(self): """Returns Skype window ID or None if Skype not running.""" skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True) if not skype_inst: return type_ret = Atom() format_ret = c_int() nitems_ret = c_ulong() bytes_after_ret = c_ulong() winp = pointer(Window()) fail = x11.XGetWindowProperty(self.disp, self.win_root, skype_inst, 0, 1, False, 33, byref(type_ret), byref(format_ret), byref(nitems_ret), byref(bytes_after_ret), byref(winp)) if not fail and format_ret.value == 32 and nitems_ret.value == 1: return winp.contents.value
Returns Skype window ID or None if Skype not running.
def get_scan_parameters_table_from_meta_data(meta_data_array, scan_parameters=None): '''Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data . Parameters ---------- meta_data_array : numpy.ndarray The array with the scan parameters. scan_parameters : list of strings The name of the scan parameters to take. If none all are used. Returns ------- numpy.Histogram ''' if scan_parameters is None: try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: return if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return # http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array scan_par_data = {name: meta_data_array.dtype.fields[name] for name in meta_data_array.dtype.names[last_not_parameter_column + 1:]} else: scan_par_data = collections.OrderedDict() for name in scan_parameters: scan_par_data[name] = meta_data_array.dtype.fields[name] return np.ndarray(meta_data_array.shape, np.dtype(scan_par_data), meta_data_array, 0, meta_data_array.strides)
Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data . Parameters ---------- meta_data_array : numpy.ndarray The array with the scan parameters. scan_parameters : list of strings The name of the scan parameters to take. If none all are used. Returns ------- numpy.Histogram
def epiweek_to_date(ew: Epiweek) -> datetime.date: """ Return date from epiweek (starts at Sunday) """ day_one = _start_date_of_year(ew.year) diff = 7 * (ew.week - 1) + (ew.day - 1) return day_one + datetime.timedelta(days=diff)
Return date from epiweek (starts at Sunday)
def make_geojson(contents): """ Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string """ if isinstance(contents, six.string_types): return contents if hasattr(contents, '__geo_interface__'): features = [_geo_to_feature(contents)] else: try: feature_iter = iter(contents) except TypeError: raise ValueError('Unknown type for input') features = [] for i, f in enumerate(feature_iter): if not hasattr(f, '__geo_interface__'): raise ValueError('Unknown type at index {0}'.format(i)) features.append(_geo_to_feature(f)) data = {'type': 'FeatureCollection', 'features': features} return json.dumps(data)
Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string
def get_scrollbar_position_height(self): """Return the pixel span height of the scrollbar area in which the slider handle may move""" vsb = self.editor.verticalScrollBar() style = vsb.style() opt = QStyleOptionSlider() vsb.initStyleOption(opt) # Get the area in which the slider handle may move. groove_rect = style.subControlRect( QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarGroove, self) return float(groove_rect.height())
Return the pixel span height of the scrollbar area in which the slider handle may move
def first_or_create( self, _attributes=None, _joining=None, _touch=True, **attributes ): """ Get the first related model record matching the attributes or create it. :param attributes: The attributes :type attributes: dict :rtype: Model """ if _attributes is not None: attributes.update(_attributes) instance = self._query.where(attributes).first() if instance is None: instance = self.create(attributes, _joining or {}, _touch) return instance
Get the first related model record matching the attributes or create it. :param attributes: The attributes :type attributes: dict :rtype: Model
def metamodel_from_str(lang_desc, metamodel=None, **kwargs): """ Creates a new metamodel from the textX description given as a string. Args: lang_desc(str): A textX language description. metamodel(TextXMetaModel): A metamodel that should be used. other params: See TextXMetaModel. """ if not metamodel: metamodel = TextXMetaModel(**kwargs) language_from_str(lang_desc, metamodel) return metamodel
Creates a new metamodel from the textX description given as a string. Args: lang_desc(str): A textX language description. metamodel(TextXMetaModel): A metamodel that should be used. other params: See TextXMetaModel.
def list_extra_features(self): ''' Returns ------- List of dicts. One dict for each document, keys are metadata, values are counts ''' return FeatureLister(self._mX, self._metadata_idx_store, self.get_num_docs()).output()
Returns ------- List of dicts. One dict for each document, keys are metadata, values are counts
def updateConfig(self, eleobj, config, type='simu'): """ write new configuration to element :param eleobj: define element object :param config: new configuration for element, string or dict :param type: 'simu' by default, could be online, misc, comm, ctrl """ eleobj.setConf(config, type=type)
write new configuration to element :param eleobj: define element object :param config: new configuration for element, string or dict :param type: 'simu' by default, could be online, misc, comm, ctrl
def expression_statement(self): """ expression_statement: assignment ';' """ node = self.assignment() self._process(Nature.SEMI) return node
expression_statement: assignment ';'
def set_ram(self, ram): """ Set the RAM amount for the GNS3 VM. :param ram: amount of memory """ yield from self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3) log.info("GNS3 VM RAM amount set to {}".format(ram))
Set the RAM amount for the GNS3 VM. :param ram: amount of memory
def create_floating_ip(self, droplet_id=None, region=None, **kwargs): """ Create a new floating IP assigned to a droplet or reserved to a region. Either ``droplet_id`` or ``region`` must be specified, but not both. The returned `FloatingIP` object will represent the IP at the moment of creation; if the IP address is supposed to be assigned to a droplet, the assignment may not have been completed at the time the object is returned. To wait for the assignment to complete, use the `FloatingIP`'s :meth:`~FloatingIP.wait_for_action` method. :param droplet_id: the droplet to assign the floating IP to as either an ID or a `Droplet` object :type droplet_id: integer or `Droplet` :param region: the region to reserve the floating IP to as either a slug or a `Region` object :type region: string or `Region` :param kwargs: additional fields to include in the API request :return: the new floating IP :rtype: FloatingIP :raises TypeError: if both ``droplet_id`` & ``region`` or neither of them are defined :raises DOAPIError: if the API endpoint replies with an error """ if (droplet_id is None) == (region is None): ### TODO: Is TypeError the right type of error? raise TypeError('Exactly one of "droplet_id" and "region" must be' ' specified') if droplet_id is not None: if isinstance(droplet_id, Droplet): droplet_id = droplet_id.id data = {"droplet_id": droplet_id} else: if isinstance(region, Region): region = region.slug data = {"region": region} data.update(kwargs) return self._floating_ip(self.request('/v2/floating_ips', method='POST', data=data)["floating_ip"])
Create a new floating IP assigned to a droplet or reserved to a region. Either ``droplet_id`` or ``region`` must be specified, but not both. The returned `FloatingIP` object will represent the IP at the moment of creation; if the IP address is supposed to be assigned to a droplet, the assignment may not have been completed at the time the object is returned. To wait for the assignment to complete, use the `FloatingIP`'s :meth:`~FloatingIP.wait_for_action` method. :param droplet_id: the droplet to assign the floating IP to as either an ID or a `Droplet` object :type droplet_id: integer or `Droplet` :param region: the region to reserve the floating IP to as either a slug or a `Region` object :type region: string or `Region` :param kwargs: additional fields to include in the API request :return: the new floating IP :rtype: FloatingIP :raises TypeError: if both ``droplet_id`` & ``region`` or neither of them are defined :raises DOAPIError: if the API endpoint replies with an error
def _equaBreaks(self, orbit_index_period=24.): """Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not exist in ' + 'loaded data') # get difference in orbit index around the orbit lt_diff = self.sat[self.orbit_index].diff() # universal time values, from datetime index ut_vals = Series(self.sat.data.index) # UT difference ut_diff = ut_vals.diff() # get locations where orbit index derivative is less than 0 # then do some basic checks on these locations ind, = np.where((lt_diff < -0.1)) if len(ind) > 0: ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])]))) # look at distance between breaks dist = ind[1:] - ind[0:-1] # only keep orbit breaks with a distance greater than 1 # done for robustness if len(ind) > 1: if min(dist) == 1: print('There are orbit breaks right next to each other') ind = ind[:-1][dist > 1] # check for large positive gradients around the break that would # suggest not a true orbit break, but rather bad orbit_index values new_ind = [] for idx in ind: tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1) if len(tidx) != 0: # there are large changes, suggests a false alarm # iterate over samples and check for tidx in tidx: # look at time change vs local time change if(ut_diff[idx - 5:idx + 6].iloc[tidx] < lt_diff[idx - 5:idx + 6].iloc[tidx] / orbit_index_period * self.orbit_period): # change in ut is small compared to the change in # the orbit index this is flagged as a false alarm, # or dropped from consideration pass else: # change in UT is significant, keep orbit break new_ind.append(idx) break else: # no large positive gradients, current orbit break passes # the first test new_ind.append(idx) # replace all breaks with those that are 'good' ind = np.array(new_ind) # now, assemble some orbit breaks that are not triggered by changes in # the orbit index # check if there is a UT break that is larger than orbital period, aka # a time gap ut_change_vs_period = ( ut_diff > self.orbit_period ) # characterize ut change using orbital period norm_ut = ut_diff / self.orbit_period # now, look for breaks because the length of time between samples is # too large, thus there is no break in slt/mlt/etc, lt_diff is small # but UT change is big norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values / orbit_index_period)) # indices when one or other flag is true ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt & (norm_ut > 0.95))) # added the or and check after or on 10/20/2014 # & lt_diff.notnull() ))# & (lt_diff != 0) ) ) # combine these UT determined orbit breaks with the orbit index orbit # breaks if len(ut_ind) > 0: ind = np.hstack((ind, ut_ind)) ind = np.sort(ind) ind = np.unique(ind) print('Time Gap') # now that most problems in orbits should have been caught, look at # the time difference between orbits (not individual orbits) orbit_ut_diff = ut_vals[ind].diff() orbit_lt_diff = self.sat[self.orbit_index][ind].diff() # look for time gaps between partial orbits. The full orbital time # period is not required between end of one orbit and begining of next # if first orbit is partial. Also provides another general test of the # orbital breaks determined. idx, = np.where((orbit_ut_diff / self.orbit_period - orbit_lt_diff.values / orbit_index_period) > 0.97) # pull out breaks that pass the test, need to make sure the first one # is always included it gets dropped via the nature of diff if len(idx) > 0: if idx[0] != 0: idx = np.hstack((0, idx)) else: idx = np.array([0]) # only keep the good indices if len(ind) > 0: ind = ind[idx] # create orbitbreak index, ensure first element is always 0 if ind[0] != 0: ind = np.hstack((np.array([0]), ind)) else: ind = np.array([0]) # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit
def delocalization_analysis(self, defect_entry): """ Do delocalization analysis. To do this, one considers: i) sampling region of planar averaged electrostatic potential (freysoldt approach) ii) sampling region of atomic site averaged potentials (kumagai approach) iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius) iv) if defect is not a vacancy type -> track to see how much the defect has moved calculations that fail delocalization get "is_compatibile" set to False in parameters also parameters recieves a "delocalization_meta" with following dict: plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this} atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this} structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this} defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determing this} """ defect_entry.parameters.update({'is_compatible': True}) #this will be switched to False if delocalization is detected if 'freysoldt_meta' in defect_entry.parameters.keys(): defect_entry = self.is_freysoldt_delocalized(defect_entry) else: print('Insufficient information provided for performing Freysoldt ' 'correction delocalization analysis.\n' 'Cannot perform planar averaged electrostatic potential ' 'compatibility analysis.') if 'kumagai_meta' in defect_entry.parameters.keys(): defect_entry = self.is_kumagai_delocalized(defect_entry) else: print('Insufficient information provided for performing Kumagai ' 'correction delocalization analysis.\n' 'Cannot perform atomic site averaged electrostatic ' 'potential compatibility analysis.') if ('final_defect_structure' in defect_entry.parameters.keys()) and \ ('initial_defect_structure' in defect_entry.parameters.keys()) and \ ('sampling_radius' in defect_entry.parameters.keys()): defect_entry = self.is_final_relaxed_structure_delocalized(defect_entry) else: print('Insufficient information provided in defect_entry.parameters. ' 'Cannot perform full structure site relaxation compatibility analysis.') return defect_entry
Do delocalization analysis. To do this, one considers: i) sampling region of planar averaged electrostatic potential (freysoldt approach) ii) sampling region of atomic site averaged potentials (kumagai approach) iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius) iv) if defect is not a vacancy type -> track to see how much the defect has moved calculations that fail delocalization get "is_compatibile" set to False in parameters also parameters recieves a "delocalization_meta" with following dict: plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this} atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this} structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this} defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determing this}
def find_executable(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found. """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) base, ext = os.path.splitext(executable) if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): executable = executable + '.exe' if not os.path.isfile(executable): for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None else: return executable
Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found.
def parse_posting_id(text, city): """ Parse the posting ID from the Backpage ad. text -> The ad's HTML (or the a substring containing the "Post ID:" section) city -> The Backpage city of the ad """ parts = text.split('Post ID: ') if len(parts) == 2: post_id = parts[1].split(' ')[0] if post_id: return post_id + post_id_bp_groups[city]
Parse the posting ID from the Backpage ad. text -> The ad's HTML (or the a substring containing the "Post ID:" section) city -> The Backpage city of the ad
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' errors = [] ret = [] cmd = ['pacman', '-Ql'] if packages and os.path.exists(packages[0]): packages = list(packages) cmd.extend(('-r', packages.pop(0))) cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if line.startswith('error'): errors.append(line) else: comps = line.split() ret.append(' '.join(comps[1:])) return {'errors': errors, 'files': ret}
List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list
def at(self, t): """At time ``t``, compute the target's position relative to the center. If ``t`` is an array of times, then the returned position object will specify as many positions as there were times. The kind of position returned depends on the value of the ``center`` attribute: * Solar System Barycenter: :class:`~skyfield.positionlib.Barycentric` * Center of the Earth: :class:`~skyfield.positionlib.Geocentric` * Difference: :class:`~skyfield.positionlib.Geometric` * Anything else: :class:`~skyfield.positionlib.ICRF` """ if not isinstance(t, Time): raise ValueError('please provide the at() method with a Time' ' instance as its argument, instead of the' ' value {0!r}'.format(t)) observer_data = ObserverData() observer_data.ephemeris = self.ephemeris p, v, observer_data.gcrs_position, message = self._at(t) center = self.center if center == 0: observer_data.bcrs_position = p observer_data.bcrs_velocity = v self._snag_observer_data(observer_data, t) position = build_position(p, v, t, center, self.target, observer_data) position.message = message return position
At time ``t``, compute the target's position relative to the center. If ``t`` is an array of times, then the returned position object will specify as many positions as there were times. The kind of position returned depends on the value of the ``center`` attribute: * Solar System Barycenter: :class:`~skyfield.positionlib.Barycentric` * Center of the Earth: :class:`~skyfield.positionlib.Geocentric` * Difference: :class:`~skyfield.positionlib.Geometric` * Anything else: :class:`~skyfield.positionlib.ICRF`
def main(): """Main entry function.""" if len(sys.argv) < 3: print('Usage: <project-name> <filetype> <list-of-path to traverse>') print('\tfiletype can be python/cpp/all') exit(-1) _HELPER.project_name = sys.argv[1] file_type = sys.argv[2] allow_type = [] if file_type == 'python' or file_type == 'all': allow_type += [x for x in PYTHON_SUFFIX] if file_type == 'cpp' or file_type == 'all': allow_type += [x for x in CXX_SUFFIX] allow_type = set(allow_type) if os.name != 'nt': sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') for path in sys.argv[3:]: if os.path.isfile(path): process(path, allow_type) else: for root, dirs, files in os.walk(path): for name in files: process(os.path.join(root, name), allow_type) nerr = _HELPER.print_summary(sys.stderr) sys.exit(nerr > 0)
Main entry function.
def library_directories(self) -> typing.List[str]: """ The list of directories to all of the library locations """ def listify(value): return [value] if isinstance(value, str) else list(value) # If this is a project running remotely remove external library # folders as the remote shared libraries folder will contain all # of the necessary dependencies is_local_project = not self.is_remote_project folders = [ f for f in listify(self.settings.fetch('library_folders', ['libs'])) if is_local_project or not f.startswith('..') ] # Include the remote shared library folder as well folders.append('../__cauldron_shared_libs') # Include the project directory as well folders.append(self.source_directory) return [ environ.paths.clean(os.path.join(self.source_directory, folder)) for folder in folders ]
The list of directories to all of the library locations
def doeqdi(x, y, UP=False): """ Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination """ xp, yp = y, x # need to switch into geographic convention r = np.sqrt(xp**2+yp**2) z = 1.-r**2 t = np.arcsin(z) if UP == 1: t = -t p = np.arctan2(yp, xp) dec, inc = np.degrees(p) % 360, np.degrees(t) return dec, inc
Takes digitized x,y, data and returns the dec,inc, assuming an equal area projection Parameters __________________ x : array of digitized x from point on equal area projection y : array of igitized y from point on equal area projection UP : if True, is an upper hemisphere projection Output : dec : declination inc : inclination
def jpath_parse(jpath): """ Parse given JPath into chunks. Returns list of dictionaries describing all of the JPath chunks. :param str jpath: JPath to be parsed into chunks :return: JPath chunks as list of dicts :rtype: :py:class:`list` :raises JPathException: in case of invalid JPath syntax """ result = [] breadcrumbs = [] # Split JPath into chunks based on '.' character. chunks = jpath.split('.') for chnk in chunks: match = RE_JPATH_CHUNK.match(chnk) if match: res = {} # Record whole match. res['m'] = chnk # Record breadcrumb path. breadcrumbs.append(chnk) res['p'] = '.'.join(breadcrumbs) # Handle node name. res['n'] = match.group(1) # Handle node index (optional, may be omitted). if match.group(2): res['i'] = match.group(3) if str(res['i']) == '#': res['i'] = -1 elif str(res['i']) == '*': pass else: res['i'] = int(res['i']) - 1 result.append(res) else: raise JPathException("Invalid JPath chunk '{}'".format(chnk)) return result
Parse given JPath into chunks. Returns list of dictionaries describing all of the JPath chunks. :param str jpath: JPath to be parsed into chunks :return: JPath chunks as list of dicts :rtype: :py:class:`list` :raises JPathException: in case of invalid JPath syntax
def remove_board(board_id): """remove board. :param board_id: board id (e.g. 'diecimila') :rtype: None """ log.debug('remove %s', board_id) lines = boards_txt().lines() lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines) boards_txt().write_lines(lines)
remove board. :param board_id: board id (e.g. 'diecimila') :rtype: None
def _load_same_codes(self, refresh=False): """Loads the Same Codes into this object""" if refresh is True: self._get_same_codes() else: self._cached_same_codes()
Loads the Same Codes into this object
def ip_check(*args, func=None): """Check if arguments are IP addresses.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, ipaddress._IPAddressBase): name = type(var).__name__ raise IPError( f'Function {func} expected IP address, {name} got instead.')
Check if arguments are IP addresses.
def build_pattern(body, features): '''Converts body into a pattern i.e. a point in the features space. Applies features to the body lines and sums up the results. Elements of the pattern indicate how many times a certain feature occurred in the last lines of the body. ''' line_patterns = apply_features(body, features) return reduce(lambda x, y: [i + j for i, j in zip(x, y)], line_patterns)
Converts body into a pattern i.e. a point in the features space. Applies features to the body lines and sums up the results. Elements of the pattern indicate how many times a certain feature occurred in the last lines of the body.
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1): """ Build a dataset with an empty output/target vector Identical to `dataset_from_dataframe`, except that default values for 2 arguments: outputs: None """ return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs, normalize=normalize, verbosity=verbosity)
Build a dataset with an empty output/target vector Identical to `dataset_from_dataframe`, except that default values for 2 arguments: outputs: None
def weapon_cooldown(self) -> Union[int, float]: """ Returns some time (more than game loops) until the unit can fire again, returns -1 for units that can't attack. Usage: if unit.weapon_cooldown == 0: await self.do(unit.attack(target)) elif unit.weapon_cooldown < 0: await self.do(unit.move(closest_allied_unit_because_cant_attack)) else: await self.do(unit.move(retreatPosition)) """ if self.can_attack: return self._proto.weapon_cooldown return -1
Returns some time (more than game loops) until the unit can fire again, returns -1 for units that can't attack. Usage: if unit.weapon_cooldown == 0: await self.do(unit.attack(target)) elif unit.weapon_cooldown < 0: await self.do(unit.move(closest_allied_unit_because_cant_attack)) else: await self.do(unit.move(retreatPosition))
def dist_location(dist): """ Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. """ egg_link = egg_link_path(dist) if os.path.exists(egg_link): return egg_link return dist.location
Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is.
def bounding_box(self): """Bounding box (`~regions.BoundingBox`).""" xmin = self.center.x - self.radius xmax = self.center.x + self.radius ymin = self.center.y - self.radius ymax = self.center.y + self.radius return BoundingBox.from_float(xmin, xmax, ymin, ymax)
Bounding box (`~regions.BoundingBox`).
def check_frame_id(frame_id): """Check that the provided frame id is valid in Rapids language.""" if frame_id is None: return if frame_id.strip() == "": raise H2OValueError("Frame id cannot be an empty string: %r" % frame_id) for i, ch in enumerate(frame_id): # '$' character has special meaning at the beginning of the string; and prohibited anywhere else if ch == "$" and i == 0: continue if ch not in _id_allowed_characters: raise H2OValueError("Character '%s' is illegal in frame id: %s" % (ch, frame_id)) if re.match(r"-?[0-9]", frame_id): raise H2OValueError("Frame id cannot start with a number: %s" % frame_id)
Check that the provided frame id is valid in Rapids language.
def namedb_get_account_diff(current, prior): """ Figure out what the expenditure difference is between two accounts. They must be for the same token type and address. Calculates current - prior """ if current['address'] != prior['address'] or current['type'] != prior['type']: raise ValueError("Accounts for two different addresses and/or token types") # NOTE: only possible since Python doesn't overflow :P return namedb_get_account_balance(current) - namedb_get_account_balance(prior)
Figure out what the expenditure difference is between two accounts. They must be for the same token type and address. Calculates current - prior
def partition(self, dimension): """ Partition subspace into desired dimension. :type dimension: int :param dimension: Maximum dimension to use. """ # Take leftmost 'dimension' input basis vectors for i, channel in enumerate(self.u): if self.v[i].shape[1] < dimension: raise IndexError('Channel is max dimension %s' % self.v[i].shape[1]) self.data[i] = channel[:, 0:dimension] self.dimension = dimension return self
Partition subspace into desired dimension. :type dimension: int :param dimension: Maximum dimension to use.
def _get_permission(self, authorizer_name, authorizer_lambda_function_arn): """Constructs and returns the Lambda Permission resource allowing the Authorizer to invoke the function. :returns: the permission resource :rtype: model.lambda_.LambdaPermission """ rest_api = ApiGatewayRestApi(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes) api_id = rest_api.get_runtime_attr('rest_api_id') partition = ArnGenerator.get_partition_name() resource = '${__ApiId__}/authorizers/*' source_arn = fnSub(ArnGenerator.generate_arn(partition=partition, service='execute-api', resource=resource), {"__ApiId__": api_id}) lambda_permission = LambdaPermission(self.logical_id + authorizer_name + 'AuthorizerPermission', attributes=self.passthrough_resource_attributes) lambda_permission.Action = 'lambda:invokeFunction' lambda_permission.FunctionName = authorizer_lambda_function_arn lambda_permission.Principal = 'apigateway.amazonaws.com' lambda_permission.SourceArn = source_arn return lambda_permission
Constructs and returns the Lambda Permission resource allowing the Authorizer to invoke the function. :returns: the permission resource :rtype: model.lambda_.LambdaPermission
def _fixup_cdef_enums(string, reg=re.compile(r"=\s*(\d+)\s*<<\s*(\d+)")): """Converts some common enum expressions to constants""" def repl_shift(match): shift_by = int(match.group(2)) value = int(match.group(1)) int_value = ctypes.c_int(value << shift_by).value return "= %s" % str(int_value) return reg.sub(repl_shift, string)
Converts some common enum expressions to constants
def _report_error(self, request, exp): """When making the request, if an error happens, log it.""" message = ( "Failure to perform %s due to [ %s ]" % (request, exp) ) self.log.fatal(message) raise requests.RequestException(message)
When making the request, if an error happens, log it.
def getserialized(self, key, decoder_func=None, **kwargs): """ Gets the setting value as a :obj:`dict` or :obj:`list` trying :meth:`json.loads`, followed by :meth:`yaml.load`. :rtype: dict, list """ value = self.get(key, cast_func=None, **kwargs) if isinstance(value, (dict, list, tuple)) or value is None: return value if decoder_func: return decoder_func(value) try: o = json.loads(value) return o except json.decoder.JSONDecodeError: pass try: o = yaml.load(value) return o except yaml.parser.ParserError: pass raise ValueError('Unable to parse {} setting using JSON or YAML.'.format(key))
Gets the setting value as a :obj:`dict` or :obj:`list` trying :meth:`json.loads`, followed by :meth:`yaml.load`. :rtype: dict, list
def get(self, filepath): """ Get file details for the specified file. """ try: res = self.fs.get_file_details(filepath) res = res.to_dict() self.write(res) except OSError: raise tornado.web.HTTPError(404)
Get file details for the specified file.
def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "?? GiB" size_in_bytes = float(size_in_bytes) for (name, size_bytes) in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return "{:.2f} {}".format(value, name) return "{} {}".format(int(size_in_bytes), "bytes")
Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string.
def json_decode(data): """ Decodes the given JSON as primitives """ if isinstance(data, six.binary_type): data = data.decode('utf-8') return json.loads(data)
Decodes the given JSON as primitives
def _get_erred_shared_settings_module(self): """ Returns a LinkList based module which contains link to shared service setting instances in ERRED state. """ result_module = modules.LinkList(title=_('Shared provider settings in erred state')) result_module.template = 'admin/dashboard/erred_link_list.html' erred_state = structure_models.SharedServiceSettings.States.ERRED queryset = structure_models.SharedServiceSettings.objects settings_in_erred_state = queryset.filter(state=erred_state).count() if settings_in_erred_state: result_module.title = '%s (%s)' % (result_module.title, settings_in_erred_state) for service_settings in queryset.filter(state=erred_state).iterator(): module_child = self._get_link_to_instance(service_settings) module_child['error'] = service_settings.error_message result_module.children.append(module_child) else: result_module.pre_content = _('Nothing found.') return result_module
Returns a LinkList based module which contains link to shared service setting instances in ERRED state.
def object_new(self, template=None, **kwargs): """Creates a new object from an IPFS template. By default this creates and returns a new empty merkledag node, but you may pass an optional template argument to create a preformatted node. .. code-block:: python >>> c.object_new() {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'} Parameters ---------- template : str Blueprints from which to construct the new object. Possible values: * ``"unixfs-dir"`` * ``None`` Returns ------- dict : Object hash """ args = (template,) if template is not None else () return self._client.request('/object/new', args, decoder='json', **kwargs)
Creates a new object from an IPFS template. By default this creates and returns a new empty merkledag node, but you may pass an optional template argument to create a preformatted node. .. code-block:: python >>> c.object_new() {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'} Parameters ---------- template : str Blueprints from which to construct the new object. Possible values: * ``"unixfs-dir"`` * ``None`` Returns ------- dict : Object hash
def entityTriples(self, aURI): """ Builds all triples for an entity Note: if a triple object is a blank node (=a nested definition) we try to extract all relevant data recursively (does not work with sparql endpoins) 2015-10-18: updated """ aURI = aURI qres = self.rdfgraph.query( """CONSTRUCT {<%s> ?y ?z } WHERE { { <%s> ?y ?z } } """ % (aURI, aURI )) lres = list(qres) def recurse(triples_list): """ uses the rdflib <triples> method to pull out all blank nodes info""" out = [] for tripl in triples_list: if isBlankNode(tripl[2]): # print "blank node", str(tripl[2]) temp = [x for x in self.rdfgraph.triples((tripl[2], None, None))] out += temp + recurse(temp) else: pass return out try: return lres + recurse(lres) except: printDebug("Error extracting blank nodes info", "important") return lres
Builds all triples for an entity Note: if a triple object is a blank node (=a nested definition) we try to extract all relevant data recursively (does not work with sparql endpoins) 2015-10-18: updated