code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_api_gateway_resource(name): """Get the resource associated with our gateway.""" client = boto3.client('apigateway', region_name=PRIMARY_REGION) matches = [x for x in client.get_rest_apis().get('items', list()) if x['name'] == API_GATEWAY] match = matches.pop() resources = client.get_resources(restApiId=match.get('id')) resource_id = None for item in resources.get('items', list()): if item.get('pathPart', '/') != name: continue resource_id = item['id'] return resource_id
Get the resource associated with our gateway.
def optimize(self, piter=3, pmaxf=300, ppert=0.1): ''' Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`. ''' self._save_npz() optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf, ppert=ppert, debug=True, clobber=True) optimized.publish() self.reset()
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
def length_of_associated_transcript(effect): """ Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.sequence), default=0)
Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0).
def remove(self, line): """Delete all lines matching the given line.""" nb = 0 for block in self.blocks: nb += block.remove(line) return nb
Delete all lines matching the given line.
def is_multilingual_project(site_id=None): """ Whether the current Django project is configured for multilingual support. """ from parler import appsettings if site_id is None: site_id = getattr(settings, 'SITE_ID', None) return appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS or site_id in appsettings.PARLER_LANGUAGES
Whether the current Django project is configured for multilingual support.
def axis_names_without(self, axis): """Return axis names without axis, or None if axis_names is None""" if self.axis_names is None: return None return itemgetter(*self.other_axes(axis))(self.axis_names)
Return axis names without axis, or None if axis_names is None
def now_heating(self): """Return current heating state.""" try: if self.side == 'left': heat = self.device.device_data['leftNowHeating'] elif self.side == 'right': heat = self.device.device_data['rightNowHeating'] return heat except TypeError: return None
Return current heating state.
def get_object(self): """ Get the object for publishing Raises a http404 error if the object is not found. """ obj = super(PublishView, self).get_object() if not obj or not hasattr(obj, 'publish'): raise http.Http404 return obj
Get the object for publishing Raises a http404 error if the object is not found.
def update_position(self, newpos): '''update trail''' tnow = time.time() if tnow >= self.last_time + self.timestep: self.points.append(newpos.latlon) self.last_time = tnow while len(self.points) > self.count: self.points.pop(0)
update trail
def printComparison(results, class_or_prop): """ print(out the results of the comparison using a nice table) """ data = [] Row = namedtuple('Row',[class_or_prop,'VALIDATED']) for k,v in sorted(results.items(), key=lambda x: x[1]): data += [Row(k, str(v))] pprinttable(data)
print(out the results of the comparison using a nice table)
def _make_images(self, images): """ Takes an image dict from the giphy api and converts it to attributes. Any fields expected to be int (width, height, size, frames) will be attempted to be converted. Also, the keys of `data` serve as the attribute names, but with special action taken. Keys are split by the last underscore; anything prior becomes the attribute name, anything after becomes a sub-attribute. For example: fixed_width_downsampled will end up at `self.fixed_width.downsampled` """ # Order matters :) process = ('original', 'fixed_width', 'fixed_height', 'fixed_width_downsampled', 'fixed_width_still', 'fixed_height_downsampled', 'fixed_height_still', 'downsized') for key in process: data = images.get(key) # Ignore empties if not data: continue parts = key.split('_') # attr/subattr style if len(parts) > 2: attr, subattr = '_'.join(parts[:-1]), parts[-1] else: attr, subattr = '_'.join(parts), None # Normalize data img = AttrDict(self._normalized(data)) if subattr is None: setattr(self, attr, img) else: setattr(getattr(self, attr), subattr, img)
Takes an image dict from the giphy api and converts it to attributes. Any fields expected to be int (width, height, size, frames) will be attempted to be converted. Also, the keys of `data` serve as the attribute names, but with special action taken. Keys are split by the last underscore; anything prior becomes the attribute name, anything after becomes a sub-attribute. For example: fixed_width_downsampled will end up at `self.fixed_width.downsampled`
def submit(self, fn, *args, **kwargs): """Submits a callable to be executed with the given arguments. Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached. """ future = super().submit(fn, *args, **kwargs) work_queue_size = self._work_queue.qsize() if work_queue_size > self.max_queue_size_reached: self.max_queue_size_reached = work_queue_size return future
Submits a callable to be executed with the given arguments. Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached.
def parse(self, fo): """ Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances. """ motifs = [] #160: 112 CACGTGC 7.25 chr14:32308489-32308689 p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)') wm = [] name = "" for line in fo.readlines(): if line.startswith("Motif") and line.strip().endswith(":"): if name: motifs.append(Motif(wm)) motifs[-1].id = name name = "" wm = [] name = "%s_%s" % (self.name, line.split(":")[0]) else: m = p.search(line) if m: wm.append([float(m.group(x)) for x in range(1,5)]) motifs.append(Motif(wm)) motifs[-1].id = name return motifs
Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances.
def refresh(self, thread=None): """refresh thread metadata from the index""" if not thread: thread = self._dbman._get_notmuch_thread(self._id) self._total_messages = thread.get_total_messages() self._notmuch_authors_string = thread.get_authors() subject_type = settings.get('thread_subject') if subject_type == 'notmuch': subject = thread.get_subject() elif subject_type == 'oldest': try: first_msg = list(thread.get_toplevel_messages())[0] subject = first_msg.get_header('subject') except IndexError: subject = '' self._subject = subject self._authors = None ts = thread.get_oldest_date() try: self._oldest_date = datetime.fromtimestamp(ts) except ValueError: # year is out of range self._oldest_date = None try: timestamp = thread.get_newest_date() self._newest_date = datetime.fromtimestamp(timestamp) except ValueError: # year is out of range self._newest_date = None self._tags = {t for t in thread.get_tags()} self._messages = {} # this maps messages to its children self._toplevel_messages = []
refresh thread metadata from the index
def create_partition(self, partition_spec, if_not_exists=False, async_=False, **kw): """ Create a partition within the table. :param partition_spec: specification of the partition. :param if_not_exists: :param async_: :return: partition object :rtype: odps.models.partition.Partition """ async_ = kw.get('async', async_) return self.partitions.create(partition_spec, if_not_exists=if_not_exists, async_=async_)
Create a partition within the table. :param partition_spec: specification of the partition. :param if_not_exists: :param async_: :return: partition object :rtype: odps.models.partition.Partition
def parse_form_request(api_secret, request): """ >>> parse_form_request("123456",{"nonce": 1451122677, "msg": "helllo", "code": 0, "sign": "DB30F4D1112C20DFA736F65458F89C64"}) <Storage {'nonce': 1451122677, 'msg': 'helllo', 'code': 0, 'sign': 'DB30F4D1112C20DFA736F65458F89C64'}> """ if not check_sign(api_secret, request): raise SignError(u"message sign error") return Storage(request)
>>> parse_form_request("123456",{"nonce": 1451122677, "msg": "helllo", "code": 0, "sign": "DB30F4D1112C20DFA736F65458F89C64"}) <Storage {'nonce': 1451122677, 'msg': 'helllo', 'code': 0, 'sign': 'DB30F4D1112C20DFA736F65458F89C64'}>
def process_common_disease_file(self, raw, unpadded_doids, limit=None): """ Make disaese-phenotype associations. Some identifiers need clean up: * DOIDs are listed as DOID-DOID: --> DOID: * DOIDs may be unnecessarily zero-padded. these are remapped to their non-padded equivalent. :param raw: :param unpadded_doids: :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph assoc_count = 0 replace_id_flag = False col = self.small_files['columns'] with open(raw, 'r', encoding="utf8") as tsvfile: reader = csv.reader(tsvfile, delimiter='\t', quotechar='\"') header = tsvfile.readline() if header != col: LOG.error("HEADER: has changed in %s.", raw) raise ValueError(col - header) disease_id = None for row in reader: row = [str(x).strip() for x in row] did = row[col.index('Disease ID')] # genotype = row[col.index('Genotype')] phenotype_id = row[col.index('Phenotype ID')] age_of_onset_id = row[col.index('Age of Onset ID')] eid = row[col.index('Evidence ID')] frequency = row[col.index('Frequency')] negation_id = row[col.index('Negation ID')] description = row[col.index('Description')] pub_ids = row[col.index('Pub')] disease_id = re.sub(r'DO(ID)?[-\:](DOID:)?', 'DOID:', did) disease_id = re.sub(r'MESH-', 'MESH:', disease_id) if not re.search(r'(DOID\:|MESH\:\w)\d+', disease_id): LOG.warning("Invalid id format: %s", disease_id) # figure out if the doid should be unpadded, # then use the unpadded version instead if re.match(r'DOID', disease_id): unpadded_num = re.sub(r'DOID:', '', disease_id) unpadded_num = unpadded_num.lstrip('0') if unpadded_num in unpadded_doids: fixed_id = 'DOID:' + unpadded_num replace_id_flag = True disease_id = fixed_id.strip() if self.test_mode and disease_id not in self.test_ids: # since these are broken up into disease-by-disease, # just skip the whole file return 0 if negation_id != '': continue # TODO add negative associations if disease_id != '' and phenotype_id != '': assoc = D2PAssoc( graph, self.name, disease_id, phenotype_id.strip()) if age_of_onset_id != '': assoc.onset = age_of_onset_id if frequency != '': assoc.frequency = frequency eco_id = self.localtt[eid] if eco_id is None: eco_id = self.localtt['ITM'] assoc.add_evidence(eco_id) # TODO add sex? - not in dataset yet if description != '': assoc.set_description(description) if pub_ids != '': for pub in pub_ids.split(';'): pub = re.sub(r' *', '', pub) # fixed now but just in case # there have been several malformed PMIDs curies if pub[:4] != 'http' and \ graph.curie_regexp.fullmatch(pub) is None: LOG.warning( 'Record %s has a malformed Pub %s', did, pub) continue if re.search( r'(DOID|MESH)', pub) or re.search( r'Disease name contained', description): # skip "pubs" that are derived from # the classes themselves continue assoc.add_source(pub.strip()) # TODO assigned by? assoc.add_association_to_graph() assoc_count += 1 if not self.test_mode and limit is not None\ and reader.line_num > limit: break if replace_id_flag: LOG.info("replaced DOID with unpadded version") self.replaced_id_count += 1 LOG.info( "Added %d associations for %s.", assoc_count, disease_id) return assoc_count
Make disaese-phenotype associations. Some identifiers need clean up: * DOIDs are listed as DOID-DOID: --> DOID: * DOIDs may be unnecessarily zero-padded. these are remapped to their non-padded equivalent. :param raw: :param unpadded_doids: :param limit: :return:
def _delete_request(self, url, headers, data=None): """ Issue a DELETE request to the specified endpoint with the data provided. :param url: str :pararm headers: dict :param data: dict """ return self._session.delete(url, headers=headers, data=data)
Issue a DELETE request to the specified endpoint with the data provided. :param url: str :pararm headers: dict :param data: dict
def set_config(self, key, option, value): """Set a configuration option for a key""" if '_config' not in self.proxy: newopt = dict(default_cfg) newopt[option] = value self.proxy['_config'] = {key: newopt} else: if key in self.proxy['_config']: self.proxy['_config'][key][option] = value else: newopt = dict(default_cfg) newopt[option] = value self.proxy['_config'][key] = newopt
Set a configuration option for a key
def ask_int(question: str, default: int = None) -> int: """Asks for a number in a question""" default_q = " [default: {0}]: ".format( default) if default is not None else "" answer = input("{0} [{1}]: ".format(question, default_q)) if not answer: if default is None: print("No default set, try again.") return ask_int(question, default) return default if any(x not in "1234567890" for x in answer): print("Please enter only numbers (0-9).") return ask_int(question, default) return int(answer)
Asks for a number in a question
def extractLargestSubNetwork(cls, network_file, out_subset_network_file, river_id_field, next_down_id_field, river_magnitude_field, safe_mode=True): """ Extracts the larges sub network from the watershed based on the magnitude parameter. Parameters ---------- network_file: str Path to the stream network shapefile. out_subset_network_file: str Path to the output subset stream network shapefile. river_id_field: str Name of the river ID field in the stream network shapefile. next_down_id_field: str Name of the field with the river ID of the next downstream river segment in the stream network shapefile. river_magnitude_field: str Name of the river magnitude field in the stream network shapefile. safe_mode: bool, optional If True, it will kill the simulation early before over taxing your computer. If you are confident your computer can handle it, set it to False. Here is an example of how to use this: .. code:: python import os from RAPIDpy.gis.taudem import TauDEM output_directory = '/path/to/output/files' network_shp = os.path.join(output_directory, "stream_reach_file.shp") out_shp = os.path.join(output_directory, "stream_reach_file_subset.shp") TauDEM.extractLargestSubNetwork( network_file=network_shp, out_subset_network_file=out_shp, river_id_field="LINKNO", next_down_id_field="DSLINKNO", river_magnitude_field="Magnitude", ) """ network_shapefile = ogr.Open(network_file) network_layer = network_shapefile.GetLayer() number_of_features = network_layer.GetFeatureCount() riv_magnuitude_list = np.zeros(number_of_features, dtype=np.int32) for feature_idx, drainage_line_feature in enumerate(network_layer): riv_magnuitude_list[feature_idx] =\ drainage_line_feature.GetField(river_magnitude_field) max_magnitude_feature = \ network_layer.GetFeature(np.argmax(riv_magnuitude_list)) cls.extractSubNetwork(network_file, out_subset_network_file, [max_magnitude_feature.GetField(river_id_field)], river_id_field, next_down_id_field, river_magnitude_field, safe_mode)
Extracts the larges sub network from the watershed based on the magnitude parameter. Parameters ---------- network_file: str Path to the stream network shapefile. out_subset_network_file: str Path to the output subset stream network shapefile. river_id_field: str Name of the river ID field in the stream network shapefile. next_down_id_field: str Name of the field with the river ID of the next downstream river segment in the stream network shapefile. river_magnitude_field: str Name of the river magnitude field in the stream network shapefile. safe_mode: bool, optional If True, it will kill the simulation early before over taxing your computer. If you are confident your computer can handle it, set it to False. Here is an example of how to use this: .. code:: python import os from RAPIDpy.gis.taudem import TauDEM output_directory = '/path/to/output/files' network_shp = os.path.join(output_directory, "stream_reach_file.shp") out_shp = os.path.join(output_directory, "stream_reach_file_subset.shp") TauDEM.extractLargestSubNetwork( network_file=network_shp, out_subset_network_file=out_shp, river_id_field="LINKNO", next_down_id_field="DSLINKNO", river_magnitude_field="Magnitude", )
def recompute(self, quiet=False, **kwargs): """ Re-compute a previously computed model. You might want to do this if the kernel parameters change and the kernel is labeled as ``dirty``. :param quiet: (optional) If ``True``, return false when the computation fails. Otherwise, throw an error if something goes wrong. (default: ``False``) """ if not self.computed: if not (hasattr(self, "_x") and hasattr(self, "_yerr2")): raise RuntimeError("You need to compute the model first") try: # Update the model making sure that we store the original # ordering of the points. self.compute(self._x, np.sqrt(self._yerr2), **kwargs) except (ValueError, LinAlgError): if quiet: return False raise return True
Re-compute a previously computed model. You might want to do this if the kernel parameters change and the kernel is labeled as ``dirty``. :param quiet: (optional) If ``True``, return false when the computation fails. Otherwise, throw an error if something goes wrong. (default: ``False``)
def window_visu(N=51, name='hamming', **kargs): """A Window visualisation tool :param N: length of the window :param name: name of the window :param NFFT: padding used by the FFT :param mindB: the minimum frequency power in dB :param maxdB: the maximum frequency power in dB :param kargs: optional arguments passed to :func:`create_window` This function plot the window shape and its equivalent in the Fourier domain. .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'kaiser', beta=8.) """ # get the default parameters mindB = kargs.pop('mindB', -100) maxdB = kargs.pop('maxdB', None) norm = kargs.pop('norm', True) # create a window object w = Window(N, name, **kargs) # plot the time and frequency windows w.plot_time_freq(mindB=mindB, maxdB=maxdB, norm=norm)
A Window visualisation tool :param N: length of the window :param name: name of the window :param NFFT: padding used by the FFT :param mindB: the minimum frequency power in dB :param maxdB: the maximum frequency power in dB :param kargs: optional arguments passed to :func:`create_window` This function plot the window shape and its equivalent in the Fourier domain. .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'kaiser', beta=8.)
def search(self, query): """ Search the play store for an app. nb_result (int): is the maximum number of result to be returned offset (int): is used to take result starting from an index. """ if self.authSubToken is None: raise LoginError("You need to login before executing any request") path = SEARCH_URL + "?c=3&q={}".format(requests.utils.quote(query)) # FIXME: not sure if this toc call should be here self.toc() data = self.executeRequestApi2(path) if utils.hasPrefetch(data): response = data.preFetch[0].response else: response = data resIterator = response.payload.listResponse.doc return list(map(utils.parseProtobufObj, resIterator))
Search the play store for an app. nb_result (int): is the maximum number of result to be returned offset (int): is used to take result starting from an index.
def t_FLOAT(tok): # pylint: disable=locally-disabled,invalid-name r'\d+\.\d+' tok.value = (tok.type, float(tok.value)) return tok
r'\d+\.\d+
def htmlNodeDumpOutput(self, doc, cur, encoding): """Dump an HTML node, recursive behaviour,children are printed too, and formatting returns/spaces are added. """ if doc is None: doc__o = None else: doc__o = doc._o if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.htmlNodeDumpOutput(self._o, doc__o, cur__o, encoding)
Dump an HTML node, recursive behaviour,children are printed too, and formatting returns/spaces are added.
def fail(message=None, exit_status=None): """Prints the specified message and exits the program with the specified exit status. """ print('Error:', message, file=sys.stderr) sys.exit(exit_status or 1)
Prints the specified message and exits the program with the specified exit status.
def get_oa_policy(doi): """ Get OA policy for a given DOI. .. note:: Uses beta.dissem.in API. :param doi: A canonical DOI. :returns: The OpenAccess policy for the associated publications, or \ ``None`` if unknown. >>> tmp = get_oa_policy('10.1209/0295-5075/111/40005'); (tmp["published"], tmp["preprint"], tmp["postprint"], tmp["romeo_id"]) ('can', 'can', 'can', '1896') >>> get_oa_policy('10.1215/9780822387268') is None True """ try: request = requests.get("%s%s" % (DISSEMIN_API, doi)) request.raise_for_status() result = request.json() assert result["status"] == "ok" return ([i for i in result["paper"]["publications"] if i["doi"] == doi][0])["policy"] except (AssertionError, ValueError, KeyError, RequestException, IndexError): return None
Get OA policy for a given DOI. .. note:: Uses beta.dissem.in API. :param doi: A canonical DOI. :returns: The OpenAccess policy for the associated publications, or \ ``None`` if unknown. >>> tmp = get_oa_policy('10.1209/0295-5075/111/40005'); (tmp["published"], tmp["preprint"], tmp["postprint"], tmp["romeo_id"]) ('can', 'can', 'can', '1896') >>> get_oa_policy('10.1215/9780822387268') is None True
def load(cls, file): """ Loads a :class:`~pypot.primitive.move.Move` from a json file. """ d = json.load(file) return cls.create(d)
Loads a :class:`~pypot.primitive.move.Move` from a json file.
def _reset(self, index, total, percentage_step, length): """Resets to the progressbar to start a new one""" self._start_time = datetime.datetime.now() self._start_index = index self._current_index = index self._percentage_step = percentage_step self._total = float(total) self._total_minus_one = total - 1 self._length = length self._norm_factor = total * percentage_step / 100.0 self._current_interval = int((index + 1.0) / self._norm_factor)
Resets to the progressbar to start a new one
def loadVars(filename, ask=True, into=None, only=None): r"""Load variables pickled with `saveVars`. Parameters: - `ask`: If `True` then don't overwrite existing variables without asking. - `only`: A list to limit the variables to or `None`. - `into`: The dictionary the variables should be loaded into (defaults to global dictionary). """ filename = os.path.expanduser(filename) if into is None: into = magicGlobals() varH = loadDict(filename) toUnpickle = only or varH.keys() alreadyDefined = filter(into.has_key, toUnpickle) if alreadyDefined and ask: print "The following vars already exist; overwrite (yes/NO)?\n",\ "\n".join(alreadyDefined) if raw_input() != "yes": toUnpickle = without(toUnpickle, alreadyDefined) if not toUnpickle: print "nothing to unpickle" return None print "unpickling:\n",\ "\n".join(sorted(toUnpickle)) for k in varH.keys(): if k not in toUnpickle: del varH[k] into.update(varH)
r"""Load variables pickled with `saveVars`. Parameters: - `ask`: If `True` then don't overwrite existing variables without asking. - `only`: A list to limit the variables to or `None`. - `into`: The dictionary the variables should be loaded into (defaults to global dictionary).
def check_column_id( problems: List, table: str, df: DataFrame, column: str, *, column_required: bool = True, ) -> List: """ A specialization of :func:`check_column`. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` column : string A column of ``df`` column_required : boolean ``True`` if and only if ``column`` is required (and not optional) by the GTFS Returns ------- list The ``problems`` list extended as follows. Record the indices of ``df`` where the given column has duplicated entry or an invalid strings. If the list of indices is nonempty, append to the problems the item ``[type_, problem, table, indices]``; otherwise do not append anything. If not ``column_required``, then NaN entries will be ignored in the checking. """ f = df.copy() if not column_required: if column not in f.columns: f[column] = np.nan f = f.dropna(subset=[column]) cond = ~f[column].map(valid_str) problems = check_table( problems, table, f, cond, f"Invalid {column}; maybe has extra space characters", ) cond = f[column].duplicated() problems = check_table(problems, table, f, cond, f"Repeated {column}") return problems
A specialization of :func:`check_column`. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the GTFS is violated; ``'warning'`` means there is a problem but it is not a GTFS violation 2. A message (string) that describes the problem 3. A GTFS table name, e.g. ``'routes'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ``table`` column : string A column of ``df`` column_required : boolean ``True`` if and only if ``column`` is required (and not optional) by the GTFS Returns ------- list The ``problems`` list extended as follows. Record the indices of ``df`` where the given column has duplicated entry or an invalid strings. If the list of indices is nonempty, append to the problems the item ``[type_, problem, table, indices]``; otherwise do not append anything. If not ``column_required``, then NaN entries will be ignored in the checking.
def get_commit_request(self, id): # pylint: disable=invalid-name,redefined-builtin """Get a commit request for a staged import. :param id: Staged import ID as an int. :return: :class:`imports.Request <imports.Request>` object :rtype: imports.Request """ schema = RequestSchema() resp = self.service.get(self.base+str(id)+'/request/') return self.service.decode(schema, resp)
Get a commit request for a staged import. :param id: Staged import ID as an int. :return: :class:`imports.Request <imports.Request>` object :rtype: imports.Request
def check_dns_name_availability(name, region, **kwargs): ''' .. versionadded:: 2019.2.0 Check whether a domain name in the current zone is available for use. :param name: The DNS name to query. :param region: The region to query for the DNS name in question. CLI Example: .. code-block:: bash salt-call azurearm_network.check_dns_name_availability testdnsname westus ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: check_dns_name = netconn.check_dns_name_availability( location=region, domain_name_label=name ) result = check_dns_name.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
.. versionadded:: 2019.2.0 Check whether a domain name in the current zone is available for use. :param name: The DNS name to query. :param region: The region to query for the DNS name in question. CLI Example: .. code-block:: bash salt-call azurearm_network.check_dns_name_availability testdnsname westus
def reset_stats(self): """ Returns: mean, max: two stats of the runners, to be added to backend """ scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
Returns: mean, max: two stats of the runners, to be added to backend
def add_extra_chain_cert(self, certobj): """ Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None """ if not isinstance(certobj, X509): raise TypeError("certobj must be an X509 instance") copy = _lib.X509_dup(certobj._x509) add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy) if not add_result: # TODO: This is untested. _lib.X509_free(copy) _raise_current_error()
Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None
def run_container(name, image, command=None, environment=None, ro=None, rw=None, links=None, detach=True, volumes_from=None, port_bindings=None, log_syslog=False): """ Wrapper for docker create_container, start calls :param log_syslog: bool flag to redirect container's logs to host's syslog :returns: container info dict or None if container couldn't be created Raises PortAllocatedError if container couldn't start on the requested port. """ binds = ro_rw_to_binds(ro, rw) log_config = LogConfig(type=LogConfig.types.JSON) if log_syslog: log_config = LogConfig( type=LogConfig.types.SYSLOG, config={'syslog-tag': name}) host_config = _get_docker().create_host_config(binds=binds, log_config=log_config, links=links, volumes_from=volumes_from, port_bindings=port_bindings) c = _get_docker().create_container( name=name, image=image, command=command, environment=environment, volumes=binds_to_volumes(binds), detach=detach, stdin_open=False, tty=False, ports=list(port_bindings) if port_bindings else None, host_config=host_config) try: _get_docker().start( container=c['Id'], ) except APIError as e: if 'address already in use' in e.explanation: try: _get_docker().remove_container(name, force=True) except APIError: pass raise PortAllocatedError() raise return c
Wrapper for docker create_container, start calls :param log_syslog: bool flag to redirect container's logs to host's syslog :returns: container info dict or None if container couldn't be created Raises PortAllocatedError if container couldn't start on the requested port.
def geocode(client, address=None, components=None, bounds=None, region=None, language=None): """ Geocoding is the process of converting addresses (like ``"1600 Amphitheatre Parkway, Mountain View, CA"``) into geographic coordinates (like latitude 37.423021 and longitude -122.083739), which you can use to place markers or position the map. :param address: The address to geocode. :type address: string :param components: A component filter for which you wish to obtain a geocode, for example: ``{'administrative_area': 'TX','country': 'US'}`` :type components: dict :param bounds: The bounding box of the viewport within which to bias geocode results more prominently. :type bounds: string or dict with northeast and southwest keys. :param region: The region code, specified as a ccTLD ("top-level domain") two-character value. :type region: string :param language: The language in which to return results. :type language: string :rtype: list of geocoding results. """ params = {} if address: params["address"] = address if components: params["components"] = convert.components(components) if bounds: params["bounds"] = convert.bounds(bounds) if region: params["region"] = region if language: params["language"] = language return client._request("/maps/api/geocode/json", params).get("results", [])
Geocoding is the process of converting addresses (like ``"1600 Amphitheatre Parkway, Mountain View, CA"``) into geographic coordinates (like latitude 37.423021 and longitude -122.083739), which you can use to place markers or position the map. :param address: The address to geocode. :type address: string :param components: A component filter for which you wish to obtain a geocode, for example: ``{'administrative_area': 'TX','country': 'US'}`` :type components: dict :param bounds: The bounding box of the viewport within which to bias geocode results more prominently. :type bounds: string or dict with northeast and southwest keys. :param region: The region code, specified as a ccTLD ("top-level domain") two-character value. :type region: string :param language: The language in which to return results. :type language: string :rtype: list of geocoding results.
def _fill_naked_singles(self): """Look for naked singles, i.e. cells with ony one possible value. :return: If any Naked Single has been found. :rtype: bool """ simple_found = False for i in utils.range_(self.side): for j in utils.range_(self.side): if self[i][j] > 0: continue p = self._possibles[i][j] if len(p) == 1: self.set_cell(i, j, list(p)[0]) self.solution_steps.append(self._format_step("NAKED", (i, j), self[i][j])) simple_found = True elif len(p) == 0: raise SudokuHasNoSolutionError("Error made! No possible value for ({0},{1})!".format(i + 1, j + 1)) return simple_found
Look for naked singles, i.e. cells with ony one possible value. :return: If any Naked Single has been found. :rtype: bool
def check_interactive_docker_worker(link): """Given a task, make sure the task was not defined as interactive. * ``task.payload.features.interactive`` must be absent or False. * ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False. Args: link (LinkOfTrust): the task link we're checking. Returns: list: the list of error errors. Success is an empty list. """ errors = [] log.info("Checking for {} {} interactive docker-worker".format(link.name, link.task_id)) try: if link.task['payload']['features'].get('interactive'): errors.append("{} is interactive: task.payload.features.interactive!".format(link.name)) if link.task['payload']['env'].get('TASKCLUSTER_INTERACTIVE'): errors.append("{} is interactive: task.payload.env.TASKCLUSTER_INTERACTIVE!".format(link.name)) except KeyError: errors.append("check_interactive_docker_worker: {} task definition is malformed!".format(link.name)) return errors
Given a task, make sure the task was not defined as interactive. * ``task.payload.features.interactive`` must be absent or False. * ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False. Args: link (LinkOfTrust): the task link we're checking. Returns: list: the list of error errors. Success is an empty list.
def iter_rows(self): """Generator reading .dbf row one by one. Yields named tuple Row object. :rtype: Row """ fileobj = self._fileobj cls_row = self.cls_row fields = self.fields for idx in range(self.prolog.records_count): data = fileobj.read(1) marker = struct.unpack('<1s', data)[0] is_deleted = marker == b'*' if is_deleted: continue row_values = [] for field in fields: val = field.cast(fileobj.read(field.len)) row_values.append(val) yield cls_row(*row_values)
Generator reading .dbf row one by one. Yields named tuple Row object. :rtype: Row
def selects(self, code, start, end=None): """ 选择code,start,end 如果end不填写,默认获取到结尾 @2018/06/03 pandas 的索引问题导致 https://github.com/pandas-dev/pandas/issues/21299 因此先用set_index去重做一次index 影响的有selects,select_time,select_month,get_bar @2018/06/04 当选择的时间越界/股票不存在,raise ValueError @2018/06/04 pandas索引问题已经解决 全部恢复 """ def _selects(code, start, end): if end is not None: return self.data.loc[(slice(pd.Timestamp(start), pd.Timestamp(end)), code), :] else: return self.data.loc[(slice(pd.Timestamp(start), None), code), :] try: return self.new(_selects(code, start, end), self.type, self.if_fq) except: raise ValueError( 'QA CANNOT GET THIS CODE {}/START {}/END{} '.format( code, start, end ) )
选择code,start,end 如果end不填写,默认获取到结尾 @2018/06/03 pandas 的索引问题导致 https://github.com/pandas-dev/pandas/issues/21299 因此先用set_index去重做一次index 影响的有selects,select_time,select_month,get_bar @2018/06/04 当选择的时间越界/股票不存在,raise ValueError @2018/06/04 pandas索引问题已经解决 全部恢复
def init(self): """ Adds custom calculations to orbit simulation. This routine is run once, and only once, upon instantiation. Adds quasi-dipole coordiantes, velocity calculation in ECEF coords, adds the attitude vectors of spacecraft assuming x is ram pointing and z is generally nadir, adds ionospheric parameters from the Interational Reference Ionosphere (IRI), as well as simulated winds from the Horiontal Wind Model (HWM). """ self.custom.add(add_quasi_dipole_coordinates, 'modify') self.custom.add(add_aacgm_coordinates, 'modify') self.custom.add(calculate_ecef_velocity, 'modify') self.custom.add(add_sc_attitude_vectors, 'modify') self.custom.add(add_iri_thermal_plasma, 'modify') self.custom.add(add_hwm_winds_and_ecef_vectors, 'modify') self.custom.add(add_igrf, 'modify') # project simulated vectors onto s/c basis # IGRF # create metadata to be added along with vector projection in_meta = {'desc':'IGRF geomagnetic field expressed in the s/c basis.', 'units':'nT'} # project IGRF self.custom.add(project_ecef_vector_onto_sc, 'modify', 'end', 'B_ecef_x', 'B_ecef_y', 'B_ecef_z', 'B_sc_x', 'B_sc_y', 'B_sc_z', meta=[in_meta.copy(), in_meta.copy(), in_meta.copy()]) # project total wind vector self.custom.add(project_hwm_onto_sc, 'modify') # neutral parameters self.custom.add(add_msis, 'modify')
Adds custom calculations to orbit simulation. This routine is run once, and only once, upon instantiation. Adds quasi-dipole coordiantes, velocity calculation in ECEF coords, adds the attitude vectors of spacecraft assuming x is ram pointing and z is generally nadir, adds ionospheric parameters from the Interational Reference Ionosphere (IRI), as well as simulated winds from the Horiontal Wind Model (HWM).
def calc_signal_power(params): ''' calculates power spectrum of sum signal for all channels ''' for i, data_type in enumerate(['CSD','LFP','CSD_10_0', 'LFP_10_0']): if i % SIZE == RANK: # Load data if data_type in ['CSD','LFP']: fname=os.path.join(params.savefolder, data_type+'sum.h5') else: fname=os.path.join(params.populations_path, 'subsamples', str.split(data_type,'_')[0] + 'sum_' + str.split(data_type,'_')[1] + '_' + str.split(data_type,'_')[2] + '.h5') #open file f = h5py.File(fname) data = f['data'].value srate = f['srate'].value tvec = np.arange(data.shape[1]) * 1000. / srate # slice slica = (tvec >= ana_params.transient) data = data[:,slica] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() #extract PSD PSD=[] for i in np.arange(len(params.electrodeParams['z'])): if ana_params.mlab: Pxx, freqs=plt.mlab.psd(data[i], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) else: [freqs, Pxx] = hlp.powerspec([data[i]], tbin= 1., Df=ana_params.Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] Pxx = Pxx.flatten() Pxx = Pxx[mask] Pxx = Pxx/tvec[tvec >= ana_params.transient].size**2 PSD +=[Pxx.flatten()] PSD=np.array(PSD) # Save data f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'w') f['freqs']=freqs f['psd']=PSD f['transient']=ana_params.transient f['mlab']=ana_params.mlab f['NFFT']=ana_params.NFFT f['noverlap']=ana_params.noverlap f['window']=str(ana_params.window) f['Df']=str(ana_params.Df) f.close() return
calculates power spectrum of sum signal for all channels
def t_UFIXEDMN(t): r"ufixed(?P<M>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)x(?P<N>80|79|78|77|76|75|74|73|72|71|70|69|68|67|66|65|64|63|62|61|60|59|58|57|56|55|54|53|52|51|50|49|48|47|46|45|44|43|42|41|40|39|38|37|36|35|34|33|32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)" M = int(t.lexer.lexmatch.group('M')) N = int(t.lexer.lexmatch.group('N')) t.value = ("ufixed", M, N) return t
r"ufixed(?P<M>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)x(?P<N>80|79|78|77|76|75|74|73|72|71|70|69|68|67|66|65|64|63|62|61|60|59|58|57|56|55|54|53|52|51|50|49|48|47|46|45|44|43|42|41|40|39|38|37|36|35|34|33|32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)
def get_genus_type(self): """Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object compliance: mandatory - This method must be implemented. """ if self._my_genus_type_map is None: url_path = '/handcar/services/learning/types/' + self._my_map['genusTypeId'] # url_str = self._base_url + '/types/' + self._my_map['genusTypeId'] # self._my_genus_type_map = self._load_json(url_str) self._my_genus_type_map = self._get_request(url_path) return Type(self._my_genus_type_map)
Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object compliance: mandatory - This method must be implemented.
def disassemble_current(self, dwThreadId): """ Disassemble the instruction at the program counter of the given thread. @type dwThreadId: int @param dwThreadId: Global thread ID. The program counter for this thread will be used as the disassembly address. @rtype: tuple( long, int, str, str ) @return: The tuple represents an assembly instruction and contains: - Memory address of instruction. - Size of instruction in bytes. - Disassembly line of instruction. - Hexadecimal dump of instruction. """ aThread = self.get_thread(dwThreadId) return self.disassemble_instruction(aThread.get_pc())
Disassemble the instruction at the program counter of the given thread. @type dwThreadId: int @param dwThreadId: Global thread ID. The program counter for this thread will be used as the disassembly address. @rtype: tuple( long, int, str, str ) @return: The tuple represents an assembly instruction and contains: - Memory address of instruction. - Size of instruction in bytes. - Disassembly line of instruction. - Hexadecimal dump of instruction.
def remove_user_from_group(group, role, email): """Remove a user from a group the caller owns Args: group (str): Group name role (str) : Role of user for group; either 'member' or 'admin' email (str): Email of user or group to remove Swagger: https://api.firecloud.org/#!/Groups/removeUserFromGroup """ uri = "groups/{0}/{1}/{2}".format(group, role, email) return __delete(uri)
Remove a user from a group the caller owns Args: group (str): Group name role (str) : Role of user for group; either 'member' or 'admin' email (str): Email of user or group to remove Swagger: https://api.firecloud.org/#!/Groups/removeUserFromGroup
def set_XY(self, X, Y): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr """ self.update_model(False) self.set_Y(Y) self.set_X(X) self.update_model(True)
Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr
def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels): r"""Merge the time intervals of two sequences. Parameters ---------- x_intervals : np.ndarray Array of interval times (seconds) x_labels : list or None List of labels y_intervals : np.ndarray Array of interval times (seconds) y_labels : list or None List of labels Returns ------- new_intervals : np.ndarray New interval times of the merged sequences. new_x_labels : list New labels for the sequence ``x`` new_y_labels : list New labels for the sequence ``y`` """ align_check = [x_intervals[0, 0] == y_intervals[0, 0], x_intervals[-1, 1] == y_intervals[-1, 1]] if False in align_check: raise ValueError( "Time intervals do not align; did you mean to call " "'adjust_intervals()' first?") time_boundaries = np.unique( np.concatenate([x_intervals, y_intervals], axis=0)) output_intervals = np.array( [time_boundaries[:-1], time_boundaries[1:]]).T x_labels_out, y_labels_out = [], [] x_label_range = np.arange(len(x_labels)) y_label_range = np.arange(len(y_labels)) for t0, _ in output_intervals: x_idx = x_label_range[(t0 >= x_intervals[:, 0])] x_labels_out.append(x_labels[x_idx[-1]]) y_idx = y_label_range[(t0 >= y_intervals[:, 0])] y_labels_out.append(y_labels[y_idx[-1]]) return output_intervals, x_labels_out, y_labels_out
r"""Merge the time intervals of two sequences. Parameters ---------- x_intervals : np.ndarray Array of interval times (seconds) x_labels : list or None List of labels y_intervals : np.ndarray Array of interval times (seconds) y_labels : list or None List of labels Returns ------- new_intervals : np.ndarray New interval times of the merged sequences. new_x_labels : list New labels for the sequence ``x`` new_y_labels : list New labels for the sequence ``y``
def delete_password(self, service, username): """Delete the password for the username of the service. """ items = self._find_passwords(service, username, deleting=True) if not items: raise PasswordDeleteError("Password not found") for current in items: result = GnomeKeyring.item_delete_sync(current.keyring, current.item_id) if result == GnomeKeyring.Result.CANCELLED: raise PasswordDeleteError("Cancelled by user") elif result != GnomeKeyring.Result.OK: raise PasswordDeleteError(result.value_name)
Delete the password for the username of the service.
def tc_at_grid_col(self, idx): """ The ``<w:tc>`` element appearing at grid column *idx*. Raises |ValueError| if no ``w:tc`` element begins at that grid column. """ grid_col = 0 for tc in self.tc_lst: if grid_col == idx: return tc grid_col += tc.grid_span if grid_col > idx: raise ValueError('no cell on grid column %d' % idx) raise ValueError('index out of bounds')
The ``<w:tc>`` element appearing at grid column *idx*. Raises |ValueError| if no ``w:tc`` element begins at that grid column.
def sbd_to_steem(self, sbd=0, price=0, account=None): ''' Uses the ticker to get the lowest ask and moves the sbd at that price. ''' if not account: account = self.mainaccount if self.check_balances(account): if sbd == 0: sbd = self.sbdbal elif sbd > self.sbdbal: self.msg.error_message("INSUFFICIENT FUNDS. CURRENT SBD BAL: " + str(self.sbdbal)) return False if price == 0: price = 1 / self.dex_ticker()['lowest_ask'] try: self.dex.sell(sbd, "SBD", price, account=account) except Exception as e: self.msg.error_message("COULD NOT SELL SBD FOR STEEM: " + str(e)) return False else: self.msg.message("TRANSFERED " + str(sbd) + " SBD TO STEEM AT THE PRICE OF: $" + str(price)) return True else: return False
Uses the ticker to get the lowest ask and moves the sbd at that price.
def clear_widget(self): """ Clears the thumbnail display widget of all thumbnails, but does not remove them from the thumb_dict or thumb_list. """ if not self.gui_up: return canvas = self.c_view.get_canvas() canvas.delete_all_objects() self.c_view.redraw(whence=0)
Clears the thumbnail display widget of all thumbnails, but does not remove them from the thumb_dict or thumb_list.
def success(self): """Return boolean indicating whether a solution was found""" self._check_valid() return self._problem._cp.solution.get_status() in ( self._problem._cp.solution.status.optimal, self._problem._cp.solution.status.optimal_tolerance, self._problem._cp.solution.status.MIP_optimal)
Return boolean indicating whether a solution was found
def declare_api_routes(config): """Declaration of routing""" add_route = config.add_route add_route('get-content', '/contents/{ident_hash}') add_route('get-resource', '/resources/{hash}') # User actions API add_route('license-request', '/contents/{uuid}/licensors') add_route('roles-request', '/contents/{uuid}/roles') add_route('acl-request', '/contents/{uuid}/permissions') # Publishing API add_route('publications', '/publications') add_route('get-publication', '/publications/{id}') add_route('publication-license-acceptance', '/publications/{id}/license-acceptances/{uid}') add_route('publication-role-acceptance', '/publications/{id}/role-acceptances/{uid}') # TODO (8-May-12017) Remove because the term collate is being phased out. add_route('collate-content', '/contents/{ident_hash}/collate-content') add_route('bake-content', '/contents/{ident_hash}/baked') # Moderation routes add_route('moderation', '/moderations') add_route('moderate', '/moderations/{id}') add_route('moderation-rss', '/feeds/moderations.rss') # API Key routes add_route('api-keys', '/api-keys') add_route('api-key', '/api-keys/{id}')
Declaration of routing
def select_storage_for(cls, section_name, storage): """Selects the data storage for a config section within the :param:`storage`. The primary config section is normally merged into the :param:`storage`. :param section_name: Config section (name) to process. :param storage: Data storage to use. :return: :param:`storage` or a part of it (as section storage). """ section_storage = storage storage_name = cls.get_storage_name_for(section_name) if storage_name: section_storage = storage.get(storage_name, None) if section_storage is None: section_storage = storage[storage_name] = dict() return section_storage
Selects the data storage for a config section within the :param:`storage`. The primary config section is normally merged into the :param:`storage`. :param section_name: Config section (name) to process. :param storage: Data storage to use. :return: :param:`storage` or a part of it (as section storage).
def _parseStats(self, lines, parse_slabs = False): """Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary. """ info_dict = {} info_dict['slabs'] = {} for line in lines: mobj = re.match('^STAT\s(\w+)\s(\S+)$', line) if mobj: info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True) continue elif parse_slabs: mobj = re.match('STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$', line) if mobj: (slab, key, val) = mobj.groups()[-3:] if not info_dict['slabs'].has_key(slab): info_dict['slabs'][slab] = {} info_dict['slabs'][slab][key] = util.parse_value(val, True) return info_dict
Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary.
def _quoteattr(self, attr): """Escape an XML attribute. Value can be unicode.""" attr = xml_safe(attr) if isinstance(attr, unicode) and not UNICODE_STRINGS: attr = attr.encode(self.encoding) return saxutils.quoteattr(attr)
Escape an XML attribute. Value can be unicode.
def get_action(self, parent, undo_stack: QUndoStack, sel_range, protocol, view: int): """ :type parent: QTableView :type undo_stack: QUndoStack """ self.command = ZeroHideAction(protocol, self.following_zeros, view, self.zero_hide_offsets) action = QAction(self.command.text(), parent) action.triggered.connect(self.action_triggered) self.undo_stack = undo_stack return action
:type parent: QTableView :type undo_stack: QUndoStack
def append(self, P, closed=False, itemsize=None, **kwargs): """ Append a new set of vertices to the collection. For kwargs argument, n is the number of vertices (local) or the number of item (shared) Parameters ---------- P : np.array Vertices positions of the path(s) to be added closed: bool Whether path(s) is/are closed itemsize: int or None Size of an individual path caps : list, array or 2-tuple Path start /end cap join : list, array or float path segment join color : list, array or 4-tuple Path color miter_limit : list, array or float Miter limit for join linewidth : list, array or float Path linewidth antialias : list, array or float Path antialias area """ itemsize = itemsize or len(P) itemcount = len(P) / itemsize # Computes the adjacency information n, p = len(P), P.shape[-1] Z = np.tile(P, 2).reshape(2 * len(P), p) V = np.empty(n, dtype=self.vtype) V['p0'][1:-1] = Z[0::2][:-2] V['p1'][:-1] = Z[1::2][:-1] V['p2'][:-1] = Z[1::2][+1:] V['p3'][:-2] = Z[0::2][+2:] # Apply default values on vertices for name in self.vtype.names: if name not in ['collection_index', 'p0', 'p1', 'p2', 'p3']: V[name] = kwargs.get(name, self._defaults[name]) # Extract relevant segments only V = (V.reshape(n / itemsize, itemsize)[:, :-1]) if closed: V['p0'][:, 0] = V['p2'][:, -1] V['p3'][:, -1] = V['p1'][:, 0] else: V['p0'][:, 0] = V['p1'][:, 0] V['p3'][:, -1] = V['p2'][:, -1] V = V.ravel() # Quadruple each point (we're using 2 triangles / segment) # No shared vertices between segment because of joins V = np.repeat(V, 4, axis=0).reshape((len(V), 4)) V['uv'] = (-1, -1), (-1, +1), (+1, -1), (+1, +1) V = V.ravel() n = itemsize if closed: # uint16 for WebGL I = np.resize( np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32), n * 2 * 3) I += np.repeat(4 * np.arange(n, dtype=np.uint32), 6) I[-6:] = 4 * n - 6, 4 * n - 5, 0, 4 * n - 5, 0, 1 else: I = np.resize( np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32), (n - 1) * 2 * 3) I += np.repeat(4 * np.arange(n - 1, dtype=np.uint32), 6) I = I.ravel() # Uniforms if self.utype: U = np.zeros(itemcount, dtype=self.utype) for name in self.utype.names: if name not in ["__unused__"]: U[name] = kwargs.get(name, self._defaults[name]) else: U = None Collection.append(self, vertices=V, uniforms=U, indices=I, itemsize=itemsize * 4 - 4)
Append a new set of vertices to the collection. For kwargs argument, n is the number of vertices (local) or the number of item (shared) Parameters ---------- P : np.array Vertices positions of the path(s) to be added closed: bool Whether path(s) is/are closed itemsize: int or None Size of an individual path caps : list, array or 2-tuple Path start /end cap join : list, array or float path segment join color : list, array or 4-tuple Path color miter_limit : list, array or float Miter limit for join linewidth : list, array or float Path linewidth antialias : list, array or float Path antialias area
def _setup_rpc(self): """Setup the RPC client for the current agent.""" self._state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) report_interval = CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
Setup the RPC client for the current agent.
def _make_package(args): # pragma: no cover """Prepare transcriptiondata from the transcription sources.""" from lingpy.sequence.sound_classes import token2class from lingpy.data import Model columns = ['LATEX', 'FEATURES', 'SOUND', 'IMAGE', 'COUNT', 'NOTE'] bipa = TranscriptionSystem('bipa') for src, rows in args.repos.iter_sources(type='td'): args.log.info('TranscriptionData {0} ...'.format(src['NAME'])) uritemplate = URITemplate(src['URITEMPLATE']) if src['URITEMPLATE'] else None out = [['BIPA_GRAPHEME', 'CLTS_NAME', 'GENERATED', 'EXPLICIT', 'GRAPHEME', 'URL'] + columns] graphemes = set() for row in rows: if row['GRAPHEME'] in graphemes: args.log.warn('skipping duplicate grapheme: {0}'.format(row['GRAPHEME'])) continue graphemes.add(row['GRAPHEME']) if not row['BIPA']: bipa_sound = bipa[row['GRAPHEME']] explicit = '' else: bipa_sound = bipa[row['BIPA']] explicit = '+' generated = '+' if bipa_sound.generated else '' if is_valid_sound(bipa_sound, bipa): bipa_grapheme = bipa_sound.s bipa_name = bipa_sound.name else: bipa_grapheme, bipa_name = '<NA>', '<NA>' url = uritemplate.expand(**row) if uritemplate else row.get('URL', '') out.append( [bipa_grapheme, bipa_name, generated, explicit, row['GRAPHEME'], url] + [ row.get(c, '') for c in columns]) found = len([o for o in out if o[0] != '<NA>']) args.log.info('... {0} of {1} graphemes found ({2:.0f}%)'.format( found, len(out), found / len(out) * 100)) with UnicodeWriter( pkg_path('transcriptiondata', '{0}.tsv'.format(src['NAME'])), delimiter='\t' ) as writer: writer.writerows(out) count = 0 with UnicodeWriter(pkg_path('soundclasses', 'lingpy.tsv'), delimiter='\t') as writer: writer.writerow(['CLTS_NAME', 'BIPA_GRAPHEME'] + SOUNDCLASS_SYSTEMS) for grapheme, sound in sorted(bipa.sounds.items()): if not sound.alias: writer.writerow( [sound.name, grapheme] + [token2class( grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS]) count += 1 args.log.info('SoundClasses: {0} written to file.'.format(count))
Prepare transcriptiondata from the transcription sources.
def get_parameter_names(self, include_frozen=False): """ Get a list of the parameter names Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``) """ if include_frozen: return self.parameter_names return tuple(p for p, f in zip(self.parameter_names, self.unfrozen_mask) if f)
Get a list of the parameter names Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def setup(app): ''' Required Sphinx extension setup function. ''' app.add_role('bokeh-commit', bokeh_commit) app.add_role('bokeh-issue', bokeh_issue) app.add_role('bokeh-pull', bokeh_pull) app.add_role('bokeh-tree', bokeh_tree)
Required Sphinx extension setup function.
def getActionReflexRules(self, analysis, wf_action): """ This function returns a list of dictionaries with the rules to be done for the analysis service. :analysis: the analysis full object which we want to obtain the rules for. :wf_action: it is the workflow action that the analysis is doing, we have to act in consideration of the action_set 'trigger' variable :returns: [{'action': 'duplicate', ...}, {,}, ...] """ # Setting up the analyses catalog self.analyses_catalog = getToolByName(self, CATALOG_ANALYSIS_LISTING) # Getting the action sets, those that contain action rows action_sets = self.getReflexRules() rules_list = [] condition = False for action_set in action_sets: # Validate the trigger if action_set.get('trigger', '') == wf_action: # Getting the conditions resolution condition = self._areConditionsMet(action_set, analysis) if condition: actions = action_set.get('actions', []) for act in actions: # Adding the rule number inside each action row because # need to get the rule number from a row action later. # we will need to get the rule number from a row # action later. act['rulenumber'] = action_set.get('rulenumber', '0') act['rulename'] = self.Title() rules_list.append(act) return rules_list
This function returns a list of dictionaries with the rules to be done for the analysis service. :analysis: the analysis full object which we want to obtain the rules for. :wf_action: it is the workflow action that the analysis is doing, we have to act in consideration of the action_set 'trigger' variable :returns: [{'action': 'duplicate', ...}, {,}, ...]
def lam_at_index(self, lidx): """Compute the scaled lambda used at index lidx. """ if self.path_ is None: return self.lam * self.lam_scale_ return self.lam * self.lam_scale_ * self.path_[lidx]
Compute the scaled lambda used at index lidx.
def add_warning (self, s, tag=None): """ Add a warning string. """ item = (tag, s) if item not in self.warnings and \ tag not in self.aggregate.config["ignorewarnings"]: self.warnings.append(item)
Add a warning string.
def add_sms_spec_to_request(self, req, federation='', loes=None, context='', url=''): """ Add signed metadata statements to the request :param req: The request so far :param federation: If only signed metadata statements from a specific set of federations should be included this is the set. :param loes: - not used - :param context: What kind of request/response it is: 'registration', 'discovery' or 'response'. The later being registration response. :param url: Just for testing !! :return: A possibly augmented request. """ # fetch the signed metadata statement collection if federation: if not isinstance(federation, list): federation = [federation] if not url: url = "{}/getsmscol/{}/{}".format(self.mdss_endpoint, context, quote_plus(self.entity_id)) http_resp = self.httpcli(method='GET', url=url, verify=self.verify_ssl) if http_resp.status_code >= 400: raise ConnectionError('HTTP Error: {}'.format(http_resp.text)) msg = JsonWebToken().from_jwt(http_resp.text, keyjar=self.mdss_keys) if msg['iss'] != self.mdss_owner: raise KeyError('Wrong iss') if federation: _sms = dict( [(fo, _ms) for fo, _ms in msg.items() if fo in federation]) else: _sms = msg.extra() try: del _sms['kid'] except KeyError: pass req.update({'metadata_statement_uris': _sms}) return req
Add signed metadata statements to the request :param req: The request so far :param federation: If only signed metadata statements from a specific set of federations should be included this is the set. :param loes: - not used - :param context: What kind of request/response it is: 'registration', 'discovery' or 'response'. The later being registration response. :param url: Just for testing !! :return: A possibly augmented request.
def pipe(self, other_task): """ Add a pipe listener to the execution of this task. The output of this task is required to be an iterable. Each item in the iterable will be queued as the sole argument to an execution of the listener task. Can also be written as:: pipeline = task1 | task2 """ other_task._source = self self._listeners.append(PipeListener(other_task)) return other_task
Add a pipe listener to the execution of this task. The output of this task is required to be an iterable. Each item in the iterable will be queued as the sole argument to an execution of the listener task. Can also be written as:: pipeline = task1 | task2
def remove_im_params(model, im): """Remove parameter nodes from the influence map. Parameters ---------- model : pysb.core.Model PySB model. im : networkx.MultiDiGraph Influence map. Returns ------- networkx.MultiDiGraph Influence map with the parameter nodes removed. """ for param in model.parameters: # If the node doesn't exist e.g., it may have already been removed), # skip over the parameter without error try: im.remove_node(param.name) except: pass
Remove parameter nodes from the influence map. Parameters ---------- model : pysb.core.Model PySB model. im : networkx.MultiDiGraph Influence map. Returns ------- networkx.MultiDiGraph Influence map with the parameter nodes removed.
def size_in_bytes(self, offset, timestamp, key, value, headers=None): """ Actual size of message to add """ assert not headers, "Headers not supported in v0/v1" magic = self._magic return self.LOG_OVERHEAD + self.record_size(magic, key, value)
Actual size of message to add
def _check_resources(name, expr, resources): """Validate that the expression and resources passed match up. Parameters ---------- name : str The name of the argument we are checking. expr : Expr The potentially bound expr. resources The explicitly passed resources to compute expr. Raises ------ ValueError If the resources do not match for an expression. """ if expr is None: return bound = expr._resources() if not bound and resources is None: raise ValueError('no resources provided to compute %s' % name) if bound and resources: raise ValueError( 'explicit and implicit resources provided to compute %s' % name, )
Validate that the expression and resources passed match up. Parameters ---------- name : str The name of the argument we are checking. expr : Expr The potentially bound expr. resources The explicitly passed resources to compute expr. Raises ------ ValueError If the resources do not match for an expression.
async def setvolume(self, value): """The volume command Args: value (str): The value to set the volume to """ self.logger.debug("volume command") if self.state != 'ready': return logger.debug("Volume command received") if value == '+': if self.volume < 100: self.statuslog.debug("Volume up") self.volume = (10 * (self.volume // 10)) + 10 self.volumelog.info(str(self.volume)) try: self.streamer.volume = self.volume / 100 except AttributeError: pass else: self.statuslog.warning("Already at maximum volume") elif value == '-': if self.volume > 0: self.statuslog.debug("Volume down") self.volume = (10 * ((self.volume + 9) // 10)) - 10 self.volumelog.info(str(self.volume)) try: self.streamer.volume = self.volume / 100 except AttributeError: pass else: self.statuslog.warning("Already at minimum volume") else: try: value = int(value) except ValueError: self.statuslog.error("Volume argument must be +, -, or a %") else: if 0 <= value <= 200: self.statuslog.debug("Setting volume") self.volume = value self.volumelog.info(str(self.volume)) try: self.streamer.volume = self.volume / 100 except AttributeError: pass else: self.statuslog.error("Volume must be between 0 and 200") self.write_volume()
The volume command Args: value (str): The value to set the volume to
def units(self): """ Tuple of the units for length, time and mass. Can be set in any order, and strings are not case-sensitive. See ipython_examples/Units.ipynb for more information. You can check the units' exact values and add Additional units in rebound/rebound/units.py. Units should be set before adding particles to the simulation (will give error otherwise). Currently supported Units ------------------------- Times: Hr : Hours Yr : Julian years Jyr : Julian years Sidereal_yr : Sidereal year Yr2pi : Year divided by 2pi, with year defined as orbital period of planet at 1AU around 1Msun star Kyr : Kiloyears (Julian) Myr : Megayears (Julian) Gyr : Gigayears (Julian) Lengths: M : Meters Cm : Centimeters Km : Kilometers AU : Astronomical Units Masses: Kg : Kilograms Msun : Solar masses Mmercury : Mercury masses Mvenus : Venus masses Mearth : Earth masses Mmars : Mars masses Mjupiter : Jupiter masses Msaturn : Saturn masses Muranus : Neptune masses Mpluto : Pluto masses Examples -------- >>> sim = rebound.Simulation() >>> sim.units = ('yr', 'AU', 'Msun') """ return {'length':hash_to_unit(self.python_unit_l), 'mass':hash_to_unit(self.python_unit_m), 'time':hash_to_unit(self.python_unit_t)}
Tuple of the units for length, time and mass. Can be set in any order, and strings are not case-sensitive. See ipython_examples/Units.ipynb for more information. You can check the units' exact values and add Additional units in rebound/rebound/units.py. Units should be set before adding particles to the simulation (will give error otherwise). Currently supported Units ------------------------- Times: Hr : Hours Yr : Julian years Jyr : Julian years Sidereal_yr : Sidereal year Yr2pi : Year divided by 2pi, with year defined as orbital period of planet at 1AU around 1Msun star Kyr : Kiloyears (Julian) Myr : Megayears (Julian) Gyr : Gigayears (Julian) Lengths: M : Meters Cm : Centimeters Km : Kilometers AU : Astronomical Units Masses: Kg : Kilograms Msun : Solar masses Mmercury : Mercury masses Mvenus : Venus masses Mearth : Earth masses Mmars : Mars masses Mjupiter : Jupiter masses Msaturn : Saturn masses Muranus : Neptune masses Mpluto : Pluto masses Examples -------- >>> sim = rebound.Simulation() >>> sim.units = ('yr', 'AU', 'Msun')
def getChildTimeoutValue(self): """get child timeout""" print '%s call getChildTimeoutValue' % self.port childTimeout = self.__sendCommand(WPANCTL_CMD + 'getporp -v Thread:ChildTimeout')[0] return int(childTimeout)
get child timeout
def getDarkCurrentFunction(exposuretimes, imgs, **kwargs): ''' get dark current function from given images and exposure times ''' exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs) offs, ascent, rmse = getLinearityFunction(exposuretimes, imgs, **kwargs) return offs, ascent, rmse
get dark current function from given images and exposure times
def show(self): """ Prints the content of this method to stdout. This will print the method signature and the decompiled code. """ args, ret = self.method.get_descriptor()[1:].split(")") if self.code: # We patch the descriptor here and add the registers, if code is available args = args.split(" ") reg_len = self.code.get_registers_size() nb_args = len(args) start_reg = reg_len - nb_args args = ["{} v{}".format(a, start_reg + i) for i, a in enumerate(args)] print("METHOD {} {} {} ({}){}".format( self.method.get_class_name(), self.method.get_access_flags_string(), self.method.get_name(), ", ".join(args), ret)) bytecode.PrettyShow(self, self.basic_blocks.gets(), self.method.notes)
Prints the content of this method to stdout. This will print the method signature and the decompiled code.
def isubset(self, *keys): # type: (*Hashable) -> ww.g """Return key, self[key] as generator for key in keys. Raise KeyError if a key does not exist Args: keys: Iterable containing keys Example: >>> from ww import d >>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3)) [(1, 1), (3, 3)] """ return ww.g((key, self[key]) for key in keys)
Return key, self[key] as generator for key in keys. Raise KeyError if a key does not exist Args: keys: Iterable containing keys Example: >>> from ww import d >>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3)) [(1, 1), (3, 3)]
def add_arguments(self, parser): """ Define optional arguments with default values """ parser.add_argument('--length', default=self.length, type=int, help=_('SECRET_KEY length default=%d' % self.length)) parser.add_argument('--alphabet', default=self.allowed_chars, type=str, help=_('alphabet to use default=%s' % self.allowed_chars))
Define optional arguments with default values
def main(client, adgroup_id): """Runs the example.""" adgroup_criterion_service = client.GetService( 'AdGroupCriterionService', version='v201809') helper = ProductPartitionHelper(adgroup_id) # The most trivial partition tree has only a unit node as the root, e.g.: # helper.CreateUnit(bid_amount=100000) root = helper.CreateSubdivision() new_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', 'condition': 'NEW' } used_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', 'condition': 'USED' } other_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', } helper.CreateUnit(root, new_product_canonical_condition, 200000) helper.CreateUnit(root, used_product_canonical_condition, 100000) other_condition = helper.CreateSubdivision( root, other_product_canonical_condition) cool_product_brand = { 'xsi_type': 'ProductBrand', 'value': 'CoolBrand' } cheap_product_brand = { 'xsi_type': 'ProductBrand', 'value': 'CheapBrand' } other_product_brand = { 'xsi_type': 'ProductBrand', } helper.CreateUnit(other_condition, cool_product_brand, 900000) helper.CreateUnit(other_condition, cheap_product_brand, 10000) other_brand = helper.CreateSubdivision(other_condition, other_product_brand) # The value for the bidding category is a fixed ID for the 'Luggage & Bags' # category. You can retrieve IDs for categories from the ConstantDataService. # See the 'GetProductTaxonomy' example for more details. luggage_category = { 'xsi_type': 'ProductBiddingCategory', 'type': 'BIDDING_CATEGORY_L1', 'value': '-5914235892932915235' } generic_category = { 'xsi_type': 'ProductBiddingCategory', 'type': 'BIDDING_CATEGORY_L1', } helper.CreateUnit(other_brand, luggage_category, 750000) helper.CreateUnit(other_brand, generic_category, 110000) # Make the mutate request result = adgroup_criterion_service.mutate(helper.GetOperations()) children = {} root_node = None # For each criterion, make an array containing each of its children. # We always create the parent before the child, so we can rely on that here. for adgroup_criterion in result['value']: children[adgroup_criterion['criterion']['id']] = [] if 'parentCriterionId' in adgroup_criterion['criterion']: children[adgroup_criterion['criterion']['parentCriterionId']].append( adgroup_criterion['criterion']) else: root_node = adgroup_criterion['criterion'] # Show the tree DisplayTree(root_node, children)
Runs the example.
def clone_name(s, ca=False): """ >>> clone_name("120038881639") "0038881639" >>> clone_name("GW11W6RK01DAJDWa") "GW11W6RK01DAJDW" """ if not ca: return s[:-1] if s[0] == '1': return s[2:] return s.rstrip('ab')
>>> clone_name("120038881639") "0038881639" >>> clone_name("GW11W6RK01DAJDWa") "GW11W6RK01DAJDW"
def get_report(time='hour', threshold='significant', online=False): """ Retrieves a new Report about recent earthquakes. :param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days). :param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported. :returns: :ref:`Report` """ if threshold not in THRESHOLDS: raise USGSException('Unknown threshold: "{}" (must be either "significant", "all", "4.5", "2.5", or "1.0")'.format(threshold)) if time not in TIMES: raise USGSException('Unknown time: "{}" (must be either "hour", "day", "week", "month")'.format(time)) try: result = _get_report_string(time, threshold, online) except HTTPError as e: raise USGSException("Internet error ({}): {}".format(e.code, e.reason)) if result == "": formatted_threshold = 'Magnitude {}+' if threshold not in ('significant', 'all') else threshold.title() result = Report._from_json({'metadata': {'title': 'USGS {} Earthquakes, Past {}'.format(formatted_threshold, time.title())}}) if _USE_CLASSES: return result else: return result._to_dict() elif result: try: json_result = _from_json(result) except ValueError: raise USGSException("The response from the server didn't make any sense.") if _USE_CLASSES: return Report._from_json(json_result) else: return Report._from_json(json_result)._to_dict() else: if _CONNECTED or online: raise USGSException("No response from the server.") else: raise USGSException("No data was in the cache for this time and threshold ('{}', '{}').".format(time, threshold))
Retrieves a new Report about recent earthquakes. :param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days). :param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported. :returns: :ref:`Report`
def rc(self): """Flip the direction""" ntx = self.copy() newstrand = '+' if ntx.strand == '+': newstrand = '-' ntx._options = ntx._options._replace(direction=newstrand) return ntx
Flip the direction
def train(model, X_train=None, Y_train=None, save=False, predictions_adv=None, evaluate=None, args=None, rng=None, var_list=None, attack=None, attack_args=None): """ Train a TF Eager model :param model: cleverhans.model.Model :param X_train: numpy array with training inputs :param Y_train: numpy array with training outputs :param save: boolean controlling the save operation :param predictions_adv: if set with the adversarial example tensor, will run adversarial training :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` If save is True, should also contain 'train_dir' and 'filename' :param rng: Instance of numpy.random.RandomState :param var_list: List of variables to train. :param attack: Instance of the class cleverhans.attacks.attacks_eager :param attack_args: Parameters required for the attack. :return: True if model trained """ assert isinstance(model, Model) args = _ArgsWrapper(args or {}) if ((attack is None) != (attack_args is None)): raise ValueError("attack and attack_args must be " "passed together.") if X_train is None or Y_train is None: raise ValueError("X_train argument and Y_train argument " "must be supplied.") # Check that necessary arguments were given (see doc above) assert args.nb_epochs, "Number of epochs was not given in args dict" assert args.learning_rate, "Learning rate was not given in args dict" assert args.batch_size, "Batch size was not given in args dict" if save: assert args.train_dir, "Directory for save was not given in args dict" assert args.filename, "Filename for save was not given in args dict" if rng is None: rng = np.random.RandomState() # Optimizer tfe = tf.contrib.eager optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32) batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32) # One epoch of training. for epoch in xrange(args.nb_epochs): # Compute number of batches nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size)) assert nb_batches * args.batch_size >= len(X_train) # Indices to shuffle training set index_shuf = list(range(len(X_train))) rng.shuffle(index_shuf) prev = time.time() for batch in range(nb_batches): # Compute batch start and end indices start, end = batch_indices( batch, len(X_train), args.batch_size) # Perform one training step tf.assign(batch_x, X_train[index_shuf[start:end]]) tf.assign(batch_y, Y_train[index_shuf[start:end]]) # Compute grads with tf.GradientTape() as tape: # Define loss loss_clean_obj = LossCrossEntropy(model, smoothing=0.) loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y) loss = loss_clean # Adversarial training if attack is not None: batch_adv_x = attack.generate(batch_x, **attack_args) loss_adv_obj = LossCrossEntropy(model, smoothing=0.) loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y) loss = (loss_clean + loss_adv) / 2.0 # Apply grads model_variables = model.get_params() grads = tape.gradient(loss, model_variables) optimizer.apply_gradients(zip(grads, model_variables)) assert end >= len(X_train) # Check that all examples were used cur = time.time() _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds") if evaluate is not None: evaluate() if save: save_path = os.path.join(args.train_dir, args.filename) saver = tf.train.Saver() saver.save(save_path, model_variables) _logger.info("Completed model training and saved at: " + str(save_path)) else: _logger.info("Completed model training.") return True
Train a TF Eager model :param model: cleverhans.model.Model :param X_train: numpy array with training inputs :param Y_train: numpy array with training outputs :param save: boolean controlling the save operation :param predictions_adv: if set with the adversarial example tensor, will run adversarial training :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` If save is True, should also contain 'train_dir' and 'filename' :param rng: Instance of numpy.random.RandomState :param var_list: List of variables to train. :param attack: Instance of the class cleverhans.attacks.attacks_eager :param attack_args: Parameters required for the attack. :return: True if model trained
def generate_protocol(self): """ Recreate the command stimulus (protocol) for the current sweep. It's not stored point by point (that's a waste of time and memory!) Instead it's stored as a few (x,y) points which can be easily graphed. TODO: THIS for segment in abf.ABFreader.read_protocol(): for analogsignal in segment.analogsignals: print(analogsignal) plt.plot(analogsignal) plt.show() plt.close('all') """ # TODO: elegantly read the protocol like this: #abf.ABFreader.read_protocol()[0].analogsignals()[sigNum] # TODO: right now this works only for the first channel # correct for weird recording/protocol misalignment #what is magic here? 64-bit data points? #1,000,000/64 = 15625 btw self.offsetX = int(self.sweepSize/64) # if there's not a header, get out of here! if not len(self.header['dictEpochInfoPerDAC']): self.log.debug("no protocol defined, so I'll make one") self.protoX,self.protoY=[0,self.sweepX[-1]],[self.holding,self.holding] self.protoSeqX,self.protoSeqY=[0],[self.holding] return # load our protocol from the header proto=self.header['dictEpochInfoPerDAC'][self.channel] # prepare our (x,y) pair arrays self.protoX,self.protoY=[] ,[] # assume our zero time point is the "holding" value self.protoX.append(0) self.protoY.append(self.holding) #TODO: what is this??? # now add x,y points for each change in the protocol for step in proto: dX = proto[step]['lEpochInitDuration'] Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*self.sweep # we have a new Y value, so add it to the last time point self.protoX.append(self.protoX[-1]) self.protoY.append(Y) # now add the same Y point after "dX" amount of time self.protoX.append(self.protoX[-1]+dX) self.protoY.append(Y) # TODO: detect ramps and warn what's up # The last point is probably holding current finalVal=self.holding #regular holding # although if it's set to "use last value", maybe that should be the last one if self.header['listDACInfo'][0]['nInterEpisodeLevel']: finalVal=self.protoY[-1] # add the shift to the final value to the list self.protoX.append(self.protoX[-1]) self.protoY.append(finalVal) # and again as the very last time point self.protoX.append(self.sweepSize) self.protoY.append(finalVal) # update the sequence of protocols now (eliminate duplicate entries) for i in range(1,len(self.protoX)-1): #correct for weird ABF offset issue. self.protoX[i]=self.protoX[i]+self.offsetX self.protoSeqY=[self.protoY[0]] self.protoSeqX=[self.protoX[0]] for i in range(1,len(self.protoY)): if not self.protoY[i]==self.protoY[i-1]: self.protoSeqY.append(self.protoY[i]) self.protoSeqX.append(self.protoX[i]) if self.protoY[0]!=self.protoY[1]: self.protoY.insert(1,self.protoY[0]) self.protoX.insert(1,self.protoX[1]) self.protoY.insert(1,self.protoY[0]) self.protoX.insert(1,self.protoX[0]+self.offsetX/2) self.protoSeqY.append(finalVal) self.protoSeqX.append(self.sweepSize) # convert lists to numpy arrays and do any final conversions self.protoX=np.array(self.protoX)/self.pointsPerSec self.protoY=np.array(self.protoY)
Recreate the command stimulus (protocol) for the current sweep. It's not stored point by point (that's a waste of time and memory!) Instead it's stored as a few (x,y) points which can be easily graphed. TODO: THIS for segment in abf.ABFreader.read_protocol(): for analogsignal in segment.analogsignals: print(analogsignal) plt.plot(analogsignal) plt.show() plt.close('all')
def _get_column(cls, name): """ Based on cqlengine.models.BaseModel._get_column. But to work with 'pk' """ if name == 'pk': return cls._meta.get_field(cls._meta.pk.name) return cls._columns[name]
Based on cqlengine.models.BaseModel._get_column. But to work with 'pk'
def _rebin(self,xdx,rebin): """ Rebin array x with weights 1/dx**2 by factor rebin. Inputs: xdx = [x,dx] rebin = int Returns [x,dx] after rebinning. """ x = xdx[0] dx = xdx[1] rebin = int(rebin) # easy end condition if rebin <= 1: return (x,dx) # Rebin Discard unused bins lenx = len(x) x_rebin = [] dx_rebin = [] # avoid dividing by zero dx[dx==0] = np.inf # weighted mean for i in np.arange(0,lenx,rebin): w = 1./dx[i:i+rebin-1]**2 wsum = np.sum(w) if wsum == 0: x_rebin.append(np.mean(x[i:i+rebin-1])) dx_rebin.append(np.std(x[i:i+rebin-1])) else: x_rebin.append(np.sum(x[i:i+rebin-1]*w)/wsum) dx_rebin.append(1./wsum**0.5) return np.array([x_rebin,dx_rebin])
Rebin array x with weights 1/dx**2 by factor rebin. Inputs: xdx = [x,dx] rebin = int Returns [x,dx] after rebinning.
def add_nest(self, name=None, **kw): """A simple decorator which wraps :meth:`nestly.core.Nest.add`.""" def deco(func): self.add(name or func.__name__, func, **kw) return func return deco
A simple decorator which wraps :meth:`nestly.core.Nest.add`.
def cancel(batch_fn, cancel_fn, ops): """Cancel operations. Args: batch_fn: API-specific batch function. cancel_fn: API-specific cancel function. ops: A list of operations to cancel. Returns: A list of operations canceled and a list of error messages. """ # Canceling many operations one-by-one can be slow. # The Pipelines API doesn't directly support a list of operations to cancel, # but the requests can be performed in batch. canceled_ops = [] error_messages = [] max_batch = 256 total_ops = len(ops) for first_op in range(0, total_ops, max_batch): batch_canceled, batch_messages = _cancel_batch( batch_fn, cancel_fn, ops[first_op:first_op + max_batch]) canceled_ops.extend(batch_canceled) error_messages.extend(batch_messages) return canceled_ops, error_messages
Cancel operations. Args: batch_fn: API-specific batch function. cancel_fn: API-specific cancel function. ops: A list of operations to cancel. Returns: A list of operations canceled and a list of error messages.
def coverage(self, container: Container, *, instrument: bool = True ) -> TestSuiteCoverage: """ Computes complete test suite coverage for a given container. Parameters: container: the container for which coverage should be computed. rebuild: if set to True, the program will be rebuilt before coverage is computed. """ uid = container.uid logger.info("Fetching coverage information for container: %s", uid) uri = 'containers/{}/coverage'.format(uid) r = self.__api.post(uri, params={'instrument': 'yes' if instrument else 'no'}) if r.status_code == 200: jsn = r.json() coverage = TestSuiteCoverage.from_dict(jsn) # type: ignore logger.info("Fetched coverage information for container: %s", uid) return coverage try: self.__api.handle_erroneous_response(r) except exceptions.BugZooException as err: logger.exception("Failed to fetch coverage information for container %s: %s", uid, err.message) # noqa: pycodestyle raise except Exception as err: logger.exception("Failed to fetch coverage information for container %s due to unexpected failure: %s", uid, err) # noqa: pycodestyle raise
Computes complete test suite coverage for a given container. Parameters: container: the container for which coverage should be computed. rebuild: if set to True, the program will be rebuilt before coverage is computed.
def handle_message(received_message, control_plane_sockets, data_plane_sockets): """ Handle a LISP message. The default handle method determines the type of message and delegates it to the more specific method """ logger.debug(u"Handling message #{0} ({1}) from {2}".format(received_message.message_nr, received_message.message.__class__.__name__, received_message.source[0])) try: if isinstance(received_message.message, MapRequestMessage): # A map-request message handle_map_request(received_message, control_plane_sockets, data_plane_sockets) elif isinstance(received_message.message, MapReplyMessage): # A map-reply message handle_map_reply(received_message, control_plane_sockets, data_plane_sockets) elif isinstance(received_message.message, MapNotifyMessage): # A map-notify message (subclass of MapRegisterMessage, so put above it!) handle_map_notify(received_message, control_plane_sockets, data_plane_sockets) elif isinstance(received_message.message, MapRegisterMessage): # A map-register message handle_map_register(received_message, control_plane_sockets, data_plane_sockets) elif isinstance(received_message.message, MapReferralMessage): # A map-referral message handle_map_referral(received_message, control_plane_sockets, data_plane_sockets) elif isinstance(received_message.message, EncapsulatedControlMessage): # Determine the type of ECM if isinstance(received_message.inner_message, MapRequestMessage): if received_message.message.ddt_originated: # A DDT map-request message handle_ddt_map_request(received_message, control_plane_sockets, data_plane_sockets) else: # An encapsulated map-request message handle_enc_map_request(received_message, control_plane_sockets, data_plane_sockets) else: logger.warning("ECM does not contain a map-request in message %d", received_message.message_nr) elif isinstance(received_message.message, InfoMessage): handle_info_message(received_message, control_plane_sockets, data_plane_sockets) else: logger.warning("Unknown content in message %d", received_message.message_nr) except: logger.exception("Unexpected exception while handling message %d", received_message.message_nr)
Handle a LISP message. The default handle method determines the type of message and delegates it to the more specific method
def reset_and_halt(self, reset_type=None): """ perform a reset and stop the core on the reset handler """ delegateResult = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type) # halt the target if not delegateResult: self.halt() # Save CortexM.DEMCR demcr = self.read_memory(CortexM.DEMCR) # enable the vector catch if not delegateResult: self.write_memory(CortexM.DEMCR, demcr | CortexM.DEMCR_VC_CORERESET) self.reset(reset_type) # wait until the unit resets with timeout.Timeout(2.0) as t_o: while t_o.check(): if self.get_state() not in (Target.TARGET_RESET, Target.TARGET_RUNNING): break sleep(0.01) # Make sure the thumb bit is set in XPSR in case the reset handler # points to an invalid address. xpsr = self.read_core_register('xpsr') if xpsr & self.XPSR_THUMB == 0: self.write_core_register('xpsr', xpsr | self.XPSR_THUMB) self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type) # restore vector catch setting self.write_memory(CortexM.DEMCR, demcr)
perform a reset and stop the core on the reset handler
def _tofile(self, fh, pam=False): """Write Netbm file.""" fh.seek(0) fh.write(self._header(pam)) data = self.asarray(copy=False) if self.maxval == 1: data = numpy.packbits(data, axis=-1) data.tofile(fh)
Write Netbm file.
def __write(path, data, mode="w"): ''' Writes to a File. Returns the data written. path - (string) path to the file to write to. data - (json) data from a request. mode - (string) mode to open the file in. Default to 'w'. Overwrites. ''' with open(path, mode) as data_file: data = json.dumps(data, indent=4) data_file.write(data) return data
Writes to a File. Returns the data written. path - (string) path to the file to write to. data - (json) data from a request. mode - (string) mode to open the file in. Default to 'w'. Overwrites.
def _logmessage_transform(cls, s, by=2): """ Preprocess/cleanup a bzr log message before parsing Args: s (str): log message string by (int): cutoff threshold for log message length Returns: str: preprocessed log message string """ if len(s) >= by: return s[by:].strip('\n') return s.strip('\n')
Preprocess/cleanup a bzr log message before parsing Args: s (str): log message string by (int): cutoff threshold for log message length Returns: str: preprocessed log message string
def get_river_index(self, river_id): """ This method retrieves the river index in the netCDF dataset corresponding to the river ID. Parameters ---------- river_id: int The ID of the river segment. Returns ------- int: The index of the river ID's in the file. Example:: from RAPIDpy import RAPIDDataset path_to_rapid_qout = '/path/to/Qout.nc' river_id = 53458 with RAPIDDataset(path_to_rapid_qout) as qout_nc: river_index = qout_nc.get_river_index(river_id) """ try: return np.where(self.get_river_id_array() == river_id)[0][0] except IndexError: raise IndexError("ERROR: River ID {0} not found in dataset " "...".format(river_id))
This method retrieves the river index in the netCDF dataset corresponding to the river ID. Parameters ---------- river_id: int The ID of the river segment. Returns ------- int: The index of the river ID's in the file. Example:: from RAPIDpy import RAPIDDataset path_to_rapid_qout = '/path/to/Qout.nc' river_id = 53458 with RAPIDDataset(path_to_rapid_qout) as qout_nc: river_index = qout_nc.get_river_index(river_id)
def remove_colormap(self, removal_type): """Remove a palette (colormap); if no colormap, returns a copy of this image removal_type - any of lept.REMOVE_CMAP_* """ with _LeptonicaErrorTrap(): return Pix( lept.pixRemoveColormapGeneral(self._cdata, removal_type, lept.L_COPY) )
Remove a palette (colormap); if no colormap, returns a copy of this image removal_type - any of lept.REMOVE_CMAP_*
def get_create_option(self, context, q): """Form the correct create_option to append to results.""" create_option = [] display_create_option = False if self.create_field and q: page_obj = context.get('page_obj', None) if page_obj is None or page_obj.number == 1: display_create_option = True if display_create_option and self.has_add_permission(self.request): ''' Generate querysets of Locations, StaffMembers, and Users that match the query string. ''' for s in Location.objects.filter( Q( Q(name__istartswith=q) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'Location_%s' % s.id, 'text': _('Generate from location "%(location)s"') % {'location': s.name}, 'create_id': True, }] for s in StaffMember.objects.filter( Q( (Q(firstName__istartswith=q) | Q(lastName__istartswith=q)) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'StaffMember_%s' % s.id, 'text': _('Generate from staff member "%(staff_member)s"') % {'staff_member': s.fullName}, 'create_id': True, }] for s in User.objects.filter( Q( (Q(first_name__istartswith=q) | Q(last_name__istartswith=q)) & Q(staffmember__isnull=True) & Q(transactionparty__isnull=True) ) ): create_option += [{ 'id': 'User_%s' % s.id, 'text': _('Generate from user "%(user)s"') % {'user': s.get_full_name()}, 'create_id': True, }] # Finally, allow creation from a name only. create_option += [{ 'id': q, 'text': _('Create "%(new_value)s"') % {'new_value': q}, 'create_id': True, }] return create_option
Form the correct create_option to append to results.
def remove_all_cts_records_by(file_name, crypto_idfp): """ Remove all cts records set by player with CRYPTO_IDFP """ db = XonoticDB.load_path(file_name) db.remove_all_cts_records_by(crypto_idfp) db.save(file_name)
Remove all cts records set by player with CRYPTO_IDFP