code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def process_response(self, req, resp, resource): """ Post-processing of the response (after routing). Some fundamental errors can't be intercepted any other way in Falcon. These include 404 when the route isn't found & 405 when the method is bunk. Falcon will try to send its own errors. In these cases, intercept them & replace them with our JSON API compliant version. TIP: If no route could be determined then the resource will be None. """ if not resource and resp.status == falcon.HTTP_404: abort(exceptions.RouteNotFound) elif resp.status == falcon.HTTP_405: abort(exceptions.MethodNotAllowed)
Post-processing of the response (after routing). Some fundamental errors can't be intercepted any other way in Falcon. These include 404 when the route isn't found & 405 when the method is bunk. Falcon will try to send its own errors. In these cases, intercept them & replace them with our JSON API compliant version. TIP: If no route could be determined then the resource will be None.
def search(searchList, matchStr, numSyllables=None, wordInitial='ok', wordFinal='ok', spanSyllable='ok', stressedSyllable='ok', multiword='ok', pos=None): ''' Searches for matching words in the dictionary with regular expressions wordInitial, wordFinal, spanSyllable, stressSyllable, and multiword can take three different values: 'ok', 'only', or 'no'. pos: a tag in the Penn Part of Speech tagset see isletool.posList for the full list of possible tags Special search characters: 'D' - any dental; 'F' - any fricative; 'S' - any stop 'V' - any vowel; 'N' - any nasal; 'R' - any rhotic '#' - word boundary 'B' - syllable boundary '.' - anything For advanced queries: Regular expression syntax applies, so if you wanted to search for any word ending with a vowel or rhotic, matchStr = '(?:VR)#', '[VR]#', etc. ''' # Run search for words matchStr = _prepRESearchStr(matchStr, wordInitial, wordFinal, spanSyllable, stressedSyllable) compiledRE = re.compile(matchStr) retList = [] for word, pronList in searchList: newPronList = [] for pron, posList in pronList: searchPron = pron.replace(",", "").replace(" ", "") # Search for pos if pos is not None: if pos not in posList: continue # Ignore diacritics for now: for diacritic in diacriticList: if diacritic not in matchStr: searchPron = searchPron.replace(diacritic, "") if numSyllables is not None: if numSyllables != searchPron.count('.') + 1: continue # Is this a compound word? if multiword == 'only': if searchPron.count('#') == 2: continue elif multiword == 'no': if searchPron.count('#') > 2: continue matchList = compiledRE.findall(searchPron) if len(matchList) > 0: if stressedSyllable == 'only': if all([u"ˈ" not in match for match in matchList]): continue if stressedSyllable == 'no': if all([u"ˈ" in match for match in matchList]): continue # For syllable spanning, we check if there is a syllable # marker inside (not at the border) of the match. if spanSyllable == 'only': if all(["." not in txt[1:-1] for txt in matchList]): continue if spanSyllable == 'no': if all(["." in txt[1:-1] for txt in matchList]): continue newPronList.append((pron, posList)) if len(newPronList) > 0: retList.append((word, newPronList)) retList.sort() return retList
Searches for matching words in the dictionary with regular expressions wordInitial, wordFinal, spanSyllable, stressSyllable, and multiword can take three different values: 'ok', 'only', or 'no'. pos: a tag in the Penn Part of Speech tagset see isletool.posList for the full list of possible tags Special search characters: 'D' - any dental; 'F' - any fricative; 'S' - any stop 'V' - any vowel; 'N' - any nasal; 'R' - any rhotic '#' - word boundary 'B' - syllable boundary '.' - anything For advanced queries: Regular expression syntax applies, so if you wanted to search for any word ending with a vowel or rhotic, matchStr = '(?:VR)#', '[VR]#', etc.
def cut_action_callback(self, *event): """Add a copy and cut all selected row dict value pairs to the clipboard""" if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None: _, dict_paths = self.get_view_selection() stored_data_list = [] for dict_path_as_list in dict_paths: if dict_path_as_list: value = self.model.state.semantic_data for path_element in dict_path_as_list: value = value[path_element] stored_data_list.append((path_element, value)) self.model.state.remove_semantic_data(dict_path_as_list) rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(stored_data_list) self.reload_tree_store_data()
Add a copy and cut all selected row dict value pairs to the clipboard
def _merge_relative_path(dst_path, rel_path): """Merge a relative tar file to a destination (which can be "gs://...").""" # Convert rel_path to be relative and normalize it to remove ".", "..", "//", # which are valid directories in fileystems like "gs://". norm_rel_path = os.path.normpath(rel_path.lstrip("/")) if norm_rel_path == ".": return dst_path # Check that the norm rel path does not starts with "..". if norm_rel_path.startswith(".."): raise ValueError("Relative path %r is invalid." % rel_path) merged = os.path.join(dst_path, norm_rel_path) # After merging verify that the merged path keeps the original dst_path. if not merged.startswith(dst_path): raise ValueError("Relative path %r is invalid. Failed to merge with %r." % ( rel_path, dst_path)) return merged
Merge a relative tar file to a destination (which can be "gs://...").
def groupIcon( cls, groupName, default = None ): """ Returns the icon for the inputed group name. :param groupName | <str> default | <str> || None :return <str> """ if ( cls._groupIcons is None ): cls._groupIcons = {} if ( not default ): default = projexui.resources.find('img/settings_32.png') return cls._groupIcons.get(nativestring(groupName), default)
Returns the icon for the inputed group name. :param groupName | <str> default | <str> || None :return <str>
def _get_stddevs(self, stddev_types, pgv): """ Return standard deviations as defined in equation 3.5.5-1 page 151 """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) std = np.zeros_like(pgv) std[pgv <= 25] = 0.20 idx = (pgv > 25) & (pgv <= 50) std[idx] = 0.20 - 0.05 * (pgv[idx] - 25) / 25 std[pgv > 50] = 0.15 # convert from log10 to ln std = np.log(10 ** std) return [std for stddev_type in stddev_types]
Return standard deviations as defined in equation 3.5.5-1 page 151
def preview(klass, account, **kwargs): """ Returns an HTML preview of a tweet, either new or existing. """ params = {} params.update(kwargs) # handles array to string conversion for media IDs if 'media_ids' in params and isinstance(params['media_ids'], list): params['media_ids'] = ','.join(map(str, params['media_ids'])) resource = klass.TWEET_ID_PREVIEW if params.get('id') else klass.TWEET_PREVIEW resource = resource.format(account_id=account.id, id=params.get('id')) response = Request(account.client, 'get', resource, params=params).perform() return response.body['data']
Returns an HTML preview of a tweet, either new or existing.
def lookup_symbols(self, symbols, as_of_date, fuzzy=False, country_code=None): """ Lookup a list of equities by symbol. Equivalent to:: [finder.lookup_symbol(s, as_of, fuzzy) for s in symbols] but potentially faster because repeated lookups are memoized. Parameters ---------- symbols : sequence[str] Sequence of ticker symbols to resolve. as_of_date : pd.Timestamp Forwarded to ``lookup_symbol``. fuzzy : bool, optional Forwarded to ``lookup_symbol``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equities : list[Equity] """ if not symbols: return [] multi_country = country_code is None if fuzzy: f = self._lookup_symbol_fuzzy mapping = self._choose_fuzzy_symbol_ownership_map(country_code) else: f = self._lookup_symbol_strict mapping = self._choose_symbol_ownership_map(country_code) if mapping is None: raise SymbolNotFound(symbol=symbols[0]) memo = {} out = [] append_output = out.append for sym in symbols: if sym in memo: append_output(memo[sym]) else: equity = memo[sym] = f( mapping, multi_country, sym, as_of_date, ) append_output(equity) return out
Lookup a list of equities by symbol. Equivalent to:: [finder.lookup_symbol(s, as_of, fuzzy) for s in symbols] but potentially faster because repeated lookups are memoized. Parameters ---------- symbols : sequence[str] Sequence of ticker symbols to resolve. as_of_date : pd.Timestamp Forwarded to ``lookup_symbol``. fuzzy : bool, optional Forwarded to ``lookup_symbol``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equities : list[Equity]
def convert_label_indexer(index, label, index_name='', method=None, tolerance=None): """Given a pandas.Index and labels (e.g., from __getitem__) for one dimension, return an indexer suitable for indexing an ndarray along that dimension. If `index` is a pandas.MultiIndex and depending on `label`, return a new pandas.Index or pandas.MultiIndex (otherwise return None). """ new_index = None if isinstance(label, slice): if method is not None or tolerance is not None: raise NotImplementedError( 'cannot use ``method`` argument if any indexers are ' 'slice objects') indexer = index.slice_indexer(_sanitize_slice_element(label.start), _sanitize_slice_element(label.stop), _sanitize_slice_element(label.step)) if not isinstance(indexer, slice): # unlike pandas, in xarray we never want to silently convert a # slice indexer into an array indexer raise KeyError('cannot represent labeled-based slice indexer for ' 'dimension %r with a slice over integer positions; ' 'the index is unsorted or non-unique' % index_name) elif is_dict_like(label): is_nested_vals = _is_nested_tuple(tuple(label.values())) if not isinstance(index, pd.MultiIndex): raise ValueError('cannot use a dict-like object for selection on ' 'a dimension that does not have a MultiIndex') elif len(label) == index.nlevels and not is_nested_vals: indexer = index.get_loc(tuple((label[k] for k in index.names))) else: for k, v in label.items(): # index should be an item (i.e. Hashable) not an array-like if not isinstance(v, Hashable): raise ValueError('Vectorized selection is not ' 'available along level variable: ' + k) indexer, new_index = index.get_loc_level( tuple(label.values()), level=tuple(label.keys())) # GH2619. Raise a KeyError if nothing is chosen if indexer.dtype.kind == 'b' and indexer.sum() == 0: raise KeyError('{} not found'.format(label)) elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex): if _is_nested_tuple(label): indexer = index.get_locs(label) elif len(label) == index.nlevels: indexer = index.get_loc(label) else: indexer, new_index = index.get_loc_level( label, level=list(range(len(label))) ) else: label = (label if getattr(label, 'ndim', 1) > 1 # vectorized-indexing else _asarray_tuplesafe(label)) if label.ndim == 0: if isinstance(index, pd.MultiIndex): indexer, new_index = index.get_loc_level(label.item(), level=0) else: indexer = get_loc(index, label.item(), method, tolerance) elif label.dtype.kind == 'b': indexer = label else: if isinstance(index, pd.MultiIndex) and label.ndim > 1: raise ValueError('Vectorized selection is not available along ' 'MultiIndex variable: ' + index_name) indexer = get_indexer_nd(index, label, method, tolerance) if np.any(indexer < 0): raise KeyError('not all values found in index %r' % index_name) return indexer, new_index
Given a pandas.Index and labels (e.g., from __getitem__) for one dimension, return an indexer suitable for indexing an ndarray along that dimension. If `index` is a pandas.MultiIndex and depending on `label`, return a new pandas.Index or pandas.MultiIndex (otherwise return None).
def field(self): """ Returns the field name that this column will have inside the database. :return <str> """ if not self.__field: default_field = inflection.underscore(self.__name) if isinstance(self, orb.ReferenceColumn): default_field += '_id' self.__field = default_field return self.__field or default_field
Returns the field name that this column will have inside the database. :return <str>
def sim_strcmp95(src, tar, long_strings=False): """Return the strcmp95 similarity of two strings. This is a wrapper for :py:meth:`Strcmp95.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 similarity Examples -------- >>> sim_strcmp95('cat', 'hat') 0.7777777777777777 >>> sim_strcmp95('Niall', 'Neil') 0.8454999999999999 >>> sim_strcmp95('aluminum', 'Catalan') 0.6547619047619048 >>> sim_strcmp95('ATCG', 'TAGC') 0.8333333333333334 """ return Strcmp95().sim(src, tar, long_strings)
Return the strcmp95 similarity of two strings. This is a wrapper for :py:meth:`Strcmp95.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 similarity Examples -------- >>> sim_strcmp95('cat', 'hat') 0.7777777777777777 >>> sim_strcmp95('Niall', 'Neil') 0.8454999999999999 >>> sim_strcmp95('aluminum', 'Catalan') 0.6547619047619048 >>> sim_strcmp95('ATCG', 'TAGC') 0.8333333333333334
def render_requests_data_to_html(self, data, file_name, context={}): """Render to HTML file""" file_path = os.path.join(self.html_dir, file_name) logger.info('Rendering HTML file %s...' % file_path) data = format_data(data) data.update(context) data.update(domain=self.domain) with open(file_path, 'w') as fp: fp.write(render_template('realtime.html', data))
Render to HTML file
def ready_print(worker, output, error): # pragma : no cover """Local test helper.""" global COUNTER COUNTER += 1 print(COUNTER, output, error)
Local test helper.
def is_holiday(now=None, holidays="/etc/acct/holidays"): """is_holiday({now}, {holidays="/etc/acct/holidays"}""" now = _Time(now) # Now, parse holiday file. if not os.path.exists(holidays): raise Exception("There is no holidays file: %s" % holidays) f = open(holidays, "r") # First, read all leading comments. line = f.readline() while line[0] == '*': line = f.readline() # We just got the year line. (year, primestart, primeend) = str.split(line) # If not the right year, we have no idea for certain. Skip. if not year == now.year: return 0 # Now the dates. Check each against now. while line != '': # Of course, ignore comments. if line[0] == '*': line = f.readline() continue try: # Format: "1/1 New Years Day" (month, day) = str.split(str.split(line)[0], "/") # The _Time class has leading-zero padded day numbers. if len(day) == 1: day = '0' + day # Get month number from index map (compensate for zero indexing). month = MONTH_MAP[int(month) - 1] # Check the date. #print month, now.month, day, now.day if month == now.month and day == now.day: return 1 line = f.readline() except: # Skip malformed lines. line = f.readline() continue # If no match found, we must not be in a holiday. return 0
is_holiday({now}, {holidays="/etc/acct/holidays"}
def ProcessResponse(self, client_id, response): """Actually processes the contents of the response.""" precondition.AssertType(client_id, Text) downsampled = rdf_client_stats.ClientStats.Downsampled(response) if data_store.AFF4Enabled(): urn = rdf_client.ClientURN(client_id).Add("stats") with aff4.FACTORY.Create( urn, aff4_stats.ClientStats, token=self.token, mode="w") as stats_fd: # Only keep the average of all values that fall within one minute. stats_fd.AddAttribute(stats_fd.Schema.STATS, downsampled) if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientStats(client_id, downsampled) return downsampled
Actually processes the contents of the response.
def get_members(pkg_name, module_filter = None, member_filter = None): """ 返回包中所有符合条件的模块成员。 参数: pkg_name 包名称 module_filter 模块名过滤器 def (module_name) member_filter 成员过滤器 def member_filter(module_member_object) """ modules = get_modules(pkg_name, module_filter) ret = {} for m in modules: members = dict(("{0}.{1}".format(v.__module__, k), v) for k, v in getmembers(m, member_filter)) ret.update(members) return ret
返回包中所有符合条件的模块成员。 参数: pkg_name 包名称 module_filter 模块名过滤器 def (module_name) member_filter 成员过滤器 def member_filter(module_member_object)
def authenticationAndCipheringRequest( AuthenticationParameterRAND_presence=0, CiphKeySeqNr_presence=0): """AUTHENTICATION AND CIPHERING REQUEST Section 9.4.9""" a = TpPd(pd=0x3) b = MessageType(mesType=0x12) # 00010010 d = CipheringAlgorithmAndImeisvRequest() e = ForceToStandbyAndAcReferenceNumber() packet = a / b / d / e if AuthenticationParameterRAND_presence is 1: g = AuthenticationParameterRAND(ieiAPR=0x21) packet = packet / g if CiphKeySeqNr_presence is 1: h = CiphKeySeqNrHdr(ieiCKSN=0x08, eightBitCKSN=0x0) packet = packet / h return packet
AUTHENTICATION AND CIPHERING REQUEST Section 9.4.9
def list_nodes(conn=None, call=None): ''' Return a list of VMs CLI Example .. code-block:: bash salt-cloud -f list_nodes myopenstack ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} for node, info in list_nodes_full(conn=conn).items(): for key in ('id', 'name', 'size', 'state', 'private_ips', 'public_ips', 'floating_ips', 'fixed_ips', 'image'): ret.setdefault(node, {}).setdefault(key, info.get(key)) return ret
Return a list of VMs CLI Example .. code-block:: bash salt-cloud -f list_nodes myopenstack
def perform(self): """ Performs all stored actions. """ if self._driver.w3c: self.w3c_actions.perform() else: for action in self._actions: action()
Performs all stored actions.
def is_json(value, schema = None, json_serializer = None, **kwargs): """Indicate whether ``value`` is a valid JSON object. .. note:: ``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the meta-schema using a ``$schema`` property, the schema will be assumed to conform to Draft 7. :param value: The value to evaluate. :param schema: An optional JSON schema against which ``value`` will be validated. :type schema: :class:`dict <python:dict>` / :class:`str <python:str>` / :obj:`None <python:None>` :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.json(value, schema = schema, json_serializer = json_serializer, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
Indicate whether ``value`` is a valid JSON object. .. note:: ``schema`` supports JSON Schema Drafts 3 - 7. Unless the JSON Schema indicates the meta-schema using a ``$schema`` property, the schema will be assumed to conform to Draft 7. :param value: The value to evaluate. :param schema: An optional JSON schema against which ``value`` will be validated. :type schema: :class:`dict <python:dict>` / :class:`str <python:str>` / :obj:`None <python:None>` :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def get_connected_components_as_subgraphs(graph): """Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph. """ components = get_connected_components(graph) list_of_graphs = [] for c in components: edge_ids = set() nodes = [graph.get_node(node) for node in c] for n in nodes: # --Loop through the edges in each node, to determine if it should be included for e in n['edges']: # --Only add the edge to the subgraph if both ends are in the subgraph edge = graph.get_edge(e) a, b = edge['vertices'] if a in c and b in c: edge_ids.add(e) # --Build the subgraph and add it to the list list_of_edges = list(edge_ids) subgraph = make_subgraph(graph, c, list_of_edges) list_of_graphs.append(subgraph) return list_of_graphs
Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph.
def equal(self, cwd): """ Returns True if left and right are equal """ cmd = ["diff"] cmd.append("-q") cmd.append(self.left.get_name()) cmd.append(self.right.get_name()) try: Process(cmd).run(cwd=cwd, suppress_output=True) except SubprocessError as e: if e.get_returncode() == 1: return False else: raise e return True
Returns True if left and right are equal
def register_form_factory(Form): """Factory for creating an extended user registration form.""" class CsrfDisabledProfileForm(ProfileForm): """Subclass of ProfileForm to disable CSRF token in the inner form. This class will always be a inner form field of the parent class `Form`. The parent will add/remove the CSRF token in the form. """ def __init__(self, *args, **kwargs): """Initialize the object by hardcoding CSRF token to false.""" kwargs = _update_with_csrf_disabled(kwargs) super(CsrfDisabledProfileForm, self).__init__(*args, **kwargs) class RegisterForm(Form): """RegisterForm extended with UserProfile details.""" profile = FormField(CsrfDisabledProfileForm, separator='.') return RegisterForm
Factory for creating an extended user registration form.
def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
Merge multiple SourceBlocks together
async def _get_entity_from_string(self, string): """ Gets a full entity from the given string, which may be a phone or a username, and processes all the found entities on the session. The string may also be a user link, or a channel/chat invite link. This method has the side effect of adding the found users to the session database, so it can be queried later without API calls, if this option is enabled on the session. Returns the found entity, or raises TypeError if not found. """ phone = utils.parse_phone(string) if phone: try: for user in (await self( functions.contacts.GetContactsRequest(0))).users: if user.phone == phone: return user except errors.BotMethodInvalidError: raise ValueError('Cannot get entity by phone number as a ' 'bot (try using integer IDs, not strings)') elif string.lower() in ('me', 'self'): return await self.get_me() else: username, is_join_chat = utils.parse_username(string) if is_join_chat: invite = await self( functions.messages.CheckChatInviteRequest(username)) if isinstance(invite, types.ChatInvite): raise ValueError( 'Cannot get entity from a channel (or group) ' 'that you are not part of. Join the group and retry' ) elif isinstance(invite, types.ChatInviteAlready): return invite.chat elif username: try: result = await self( functions.contacts.ResolveUsernameRequest(username)) except errors.UsernameNotOccupiedError as e: raise ValueError('No user has "{}" as username' .format(username)) from e try: pid = utils.get_peer_id(result.peer, add_mark=False) if isinstance(result.peer, types.PeerUser): return next(x for x in result.users if x.id == pid) else: return next(x for x in result.chats if x.id == pid) except StopIteration: pass try: # Nobody with this username, maybe it's an exact name/title return await self.get_entity( self.session.get_input_entity(string)) except ValueError: pass raise ValueError( 'Cannot find any entity corresponding to "{}"'.format(string) )
Gets a full entity from the given string, which may be a phone or a username, and processes all the found entities on the session. The string may also be a user link, or a channel/chat invite link. This method has the side effect of adding the found users to the session database, so it can be queried later without API calls, if this option is enabled on the session. Returns the found entity, or raises TypeError if not found.
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1, max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE): """ Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool. """ if not self.running: return self.immediate_worker worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle) self._start_worker_pool(worker) return worker
Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool.
def send(self, envelope): """Sends an *envelope*.""" if not self.is_connected: self._connect() msg = envelope.to_mime_message() to_addrs = [envelope._addrs_to_header([addr]) for addr in envelope._to + envelope._cc + envelope._bcc] return self._conn.sendmail(msg['From'], to_addrs, msg.as_string())
Sends an *envelope*.
def exit_if_path_exists(self): """ Exit early if the path cannot be found. """ if os.path.exists(self.output_path): ui.error(c.MESSAGES["path_exists"], self.output_path) sys.exit(1)
Exit early if the path cannot be found.
def parse_rules(self): """ Add a set of rules to the app, dividing between filter and other rule set """ # Load patterns: an app is removed when has no defined patterns. try: rule_options = self.config.items('rules') except configparser.NoSectionError: raise LogRaptorConfigError("the app %r has no defined rules!" % self.name) rules = [] for option, value in rule_options: pattern = value.replace('\n', '') # Strip newlines for multi-line declarations if not self.args.filters: # No filters case: substitute the filter fields with the corresponding patterns. pattern = string.Template(pattern).safe_substitute(self.fields) rules.append(AppRule(option, pattern, self.args)) continue for filter_group in self.args.filters: _pattern, filter_keys = exact_sub(pattern, filter_group) _pattern = string.Template(_pattern).safe_substitute(self.fields) if len(filter_keys) >= len(filter_group): rules.append(AppRule(option, _pattern, self.args, filter_keys)) elif self._thread: rules.append(AppRule(option, _pattern, self.args)) return rules
Add a set of rules to the app, dividing between filter and other rule set
def serial_udb_extra_f6_encode(self, sue_PITCHGAIN, sue_PITCHKD, sue_RUDDER_ELEV_MIX, sue_ROLL_ELEV_MIX, sue_ELEVATOR_BOOST): ''' Backwards compatible version of SERIAL_UDB_EXTRA F6: format sue_PITCHGAIN : Serial UDB Extra PITCHGAIN Proportional Control (float) sue_PITCHKD : Serial UDB Extra Pitch Rate Control (float) sue_RUDDER_ELEV_MIX : Serial UDB Extra Rudder to Elevator Mix (float) sue_ROLL_ELEV_MIX : Serial UDB Extra Roll to Elevator Mix (float) sue_ELEVATOR_BOOST : Gain For Boosting Manual Elevator control When Plane Stabilized (float) ''' return MAVLink_serial_udb_extra_f6_message(sue_PITCHGAIN, sue_PITCHKD, sue_RUDDER_ELEV_MIX, sue_ROLL_ELEV_MIX, sue_ELEVATOR_BOOST)
Backwards compatible version of SERIAL_UDB_EXTRA F6: format sue_PITCHGAIN : Serial UDB Extra PITCHGAIN Proportional Control (float) sue_PITCHKD : Serial UDB Extra Pitch Rate Control (float) sue_RUDDER_ELEV_MIX : Serial UDB Extra Rudder to Elevator Mix (float) sue_ROLL_ELEV_MIX : Serial UDB Extra Roll to Elevator Mix (float) sue_ELEVATOR_BOOST : Gain For Boosting Manual Elevator control When Plane Stabilized (float)
def supports_string_match_type(self, string_match_type=None): """Tests if the given string match type is supported. arg: string_match_type (osid.type.Type): a string match type return: (boolean) - ``true`` if the given string match type Is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``STRING`` raise: NullArgument - ``string_match_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type from .osid_errors import IllegalState, NullArgument if not string_match_type: raise NullArgument('no input Type provided') if self._kwargs['syntax'] not in ['``STRING``']: raise IllegalState('put more meaninful message here') return string_match_type in self.get_string_match_types
Tests if the given string match type is supported. arg: string_match_type (osid.type.Type): a string match type return: (boolean) - ``true`` if the given string match type Is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``STRING`` raise: NullArgument - ``string_match_type`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def patch(): """ Patch startService and stopService so that they check the previous state first. (used for debugging only) """ from twisted.application.service import Service old_startService = Service.startService old_stopService = Service.stopService def startService(self): assert not self.running, "%r already running" % (self,) return old_startService(self) def stopService(self): assert self.running, "%r already stopped" % (self,) return old_stopService(self) Service.startService = startService Service.stopService = stopService
Patch startService and stopService so that they check the previous state first. (used for debugging only)
def download_apk(self, path='.'): """ Download Android .apk @type path: @param path: """ apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk') os.close(apk_fd) try: _download_url(self.artifact_url('apk'), apk_fn) shutil.copy(apk_fn, os.path.join(path, 'target.apk')) finally: os.unlink(apk_fn)
Download Android .apk @type path: @param path:
def __default(self, ast_token): """Handle tokens inside the list or outside the list.""" if self.list_level == 1: if self.list_entry is None: self.list_entry = ast_token elif not isinstance(ast_token, type(self.list_entry)): self.final_ast_tokens.append(ast_token) elif self.list_level == 0: self.final_ast_tokens.append(ast_token)
Handle tokens inside the list or outside the list.
def head(self, rows): """ Return a Series of the first N rows :param rows: number of rows :return: Series """ rows_bool = [True] * min(rows, len(self._index)) rows_bool.extend([False] * max(0, len(self._index) - rows)) return self.get(indexes=rows_bool)
Return a Series of the first N rows :param rows: number of rows :return: Series
def start_file(filename): """ Generalized os.startfile for all platforms supported by Qt This function is simply wrapping QDesktopServices.openUrl Returns True if successfull, otherwise returns False. """ from qtpy.QtCore import QUrl from qtpy.QtGui import QDesktopServices # We need to use setUrl instead of setPath because this is the only # cross-platform way to open external files. setPath fails completely on # Mac and doesn't open non-ascii files on Linux. # Fixes Issue 740 url = QUrl() url.setUrl(filename) return QDesktopServices.openUrl(url)
Generalized os.startfile for all platforms supported by Qt This function is simply wrapping QDesktopServices.openUrl Returns True if successfull, otherwise returns False.
def export_to_dom(self): """ Exports this model to a DOM. """ namespaces = 'xmlns="http://www.neuroml.org/lems/%s" ' + \ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \ 'xsi:schemaLocation="http://www.neuroml.org/lems/%s %s"' namespaces = namespaces%(self.target_lems_version,self.target_lems_version,self.schema_location) xmlstr = '<Lems %s>'%namespaces for include in self.includes: xmlstr += include.toxml() for target in self.targets: xmlstr += '<Target component="{0}"/>'.format(target) for dimension in self.dimensions: xmlstr += dimension.toxml() for unit in self.units: xmlstr += unit.toxml() for constant in self.constants: xmlstr += constant.toxml() for component_type in self.component_types: xmlstr += component_type.toxml() for component in self.components: xmlstr += component.toxml() xmlstr += '</Lems>' xmldom = minidom.parseString(xmlstr) return xmldom
Exports this model to a DOM.
def prepare_child_message(self, gas: int, to: Address, value: int, data: BytesOrView, code: bytes, **kwargs: Any) -> Message: """ Helper method for creating a child computation. """ kwargs.setdefault('sender', self.msg.storage_address) child_message = Message( gas=gas, to=to, value=value, data=data, code=code, depth=self.msg.depth + 1, **kwargs ) return child_message
Helper method for creating a child computation.
def extend(self, source, new_image_name, s2i_args=None): """ extend this s2i-enabled image using provided source, raises ConuException if `s2i build` fails :param source: str, source used to extend the image, can be path or url :param new_image_name: str, name of the new, extended image :param s2i_args: list of str, additional options and arguments provided to `s2i build` :return: S2Image instance """ s2i_args = s2i_args or [] c = self._s2i_command(["build"] + s2i_args + [source, self.get_full_name()]) if new_image_name: c.append(new_image_name) try: run_cmd(c) except subprocess.CalledProcessError as ex: raise ConuException("s2i build failed: %s" % ex) return S2IDockerImage(new_image_name)
extend this s2i-enabled image using provided source, raises ConuException if `s2i build` fails :param source: str, source used to extend the image, can be path or url :param new_image_name: str, name of the new, extended image :param s2i_args: list of str, additional options and arguments provided to `s2i build` :return: S2Image instance
def to_markdown(self): """Converts to markdown :return: item in markdown format """ if self.type == "text": return self.text elif self.type == "url" or self.type == "image": return "[" + self.text + "](" + self.attributes["ref"] + ")" elif self.type == "title": return "#" * int(self.attributes["size"]) + " " + self.text return None
Converts to markdown :return: item in markdown format
def dumpindented(self, pn, indent=0): """ Dump all nodes of the current page with keys indented, showing how the `indent` feature works """ page = self.readpage(pn) print(" " * indent, page) if page.isindex(): print(" " * indent, end="") self.dumpindented(page.preceeding, indent + 1) for p in range(len(page.index)): print(" " * indent, end="") self.dumpindented(page.getpage(p), indent + 1)
Dump all nodes of the current page with keys indented, showing how the `indent` feature works
def authenticate(self, username, password): """ Uses a Smappee username and password to request an access token, refresh token and expiry date. Parameters ---------- username : str password : str Returns ------- requests.Response access token is saved in self.access_token refresh token is saved in self.refresh_token expiration time is set in self.token_expiration_time as datetime.datetime """ url = URLS['token'] data = { "grant_type": "password", "client_id": self.client_id, "client_secret": self.client_secret, "username": username, "password": password } r = requests.post(url, data=data) r.raise_for_status() j = r.json() self.access_token = j['access_token'] self.refresh_token = j['refresh_token'] self._set_token_expiration_time(expires_in=j['expires_in']) return r
Uses a Smappee username and password to request an access token, refresh token and expiry date. Parameters ---------- username : str password : str Returns ------- requests.Response access token is saved in self.access_token refresh token is saved in self.refresh_token expiration time is set in self.token_expiration_time as datetime.datetime
def p_function_body(p): '''function_body : shell_command | shell_command redirection_list''' assert p[1].kind == 'compound' p[0] = p[1] if len(p) == 3: p[0].redirects.extend(p[2]) assert p[0].pos[0] < p[0].redirects[-1].pos[1] p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1])
function_body : shell_command | shell_command redirection_list
def settle(ctx, symbol, amount, account): """ Fund the fee pool of an asset """ print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account))
Fund the fee pool of an asset
def _read_reader_macro(ctx: ReaderContext) -> LispReaderForm: """Return a data structure evaluated as a reader macro from the input stream.""" start = ctx.reader.advance() assert start == "#" token = ctx.reader.peek() if token == "{": return _read_set(ctx) elif token == "(": return _read_function(ctx) elif token == "'": ctx.reader.advance() s = _read_sym(ctx) return llist.l(_VAR, s) elif token == '"': return _read_regex(ctx) elif token == "_": ctx.reader.advance() _read_next(ctx) # Ignore the entire next form return COMMENT elif ns_name_chars.match(token): s = _read_sym(ctx) assert isinstance(s, symbol.Symbol) v = _read_next_consuming_comment(ctx) if s in ctx.data_readers: f = ctx.data_readers[s] return f(v) else: raise SyntaxError(f"No data reader found for tag #{s}") raise SyntaxError(f"Unexpected token '{token}' in reader macro")
Return a data structure evaluated as a reader macro from the input stream.
def create(cls, photo, title, description=''): """Create a new photoset. photo - primary photo """ if not isinstance(photo, Photo): raise TypeError, "Photo expected" method = 'flickr.photosets.create' data = _dopost(method, auth=True, title=title,\ description=description,\ primary_photo_id=photo.id) set = Photoset(data.rsp.photoset.id, title, Photo(photo.id), photos=1, description=description) return set
Create a new photoset. photo - primary photo
def check(f): """ Wraps the function with a decorator that runs all of the pre/post conditions. """ if hasattr(f, 'wrapped_fn'): return f else: @wraps(f) def decorated(*args, **kwargs): return check_conditions(f, args, kwargs) decorated.wrapped_fn = f return decorated
Wraps the function with a decorator that runs all of the pre/post conditions.
def _scalar_pattern_uniform_op_left(func): """Decorator for operator overloading when ScalarPatternUniform is on the left.""" @wraps(func) def verif(self, patt): if isinstance(patt, ScalarPatternUniform): if self._dsphere.shape == patt._dsphere.shape: return ScalarPatternUniform(func(self, self._dsphere, patt._dsphere), doublesphere=True) else: raise ValueError(err_msg['SP_sz_msmtch'] % \ (self.nrows, self.ncols, patt.nrows, patt.ncols)) elif isinstance(patt, numbers.Number): return ScalarPatternUniform(func(self, self._dsphere, patt), doublesphere=True) else: raise TypeError(err_msg['no_combi_SP']) return verif
Decorator for operator overloading when ScalarPatternUniform is on the left.
def pdf(self): r""" Generate the vector of probabilities for the Beta-binomial (n, a, b) distribution. The Beta-binomial distribution takes the form .. math:: p(k \,|\, n, a, b) = {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)}, \qquad k = 0, \ldots, n, where :math:`B` is the beta function. Parameters ---------- n : scalar(int) First parameter to the Beta-binomial distribution a : scalar(float) Second parameter to the Beta-binomial distribution b : scalar(float) Third parameter to the Beta-binomial distribution Returns ------- probs: array_like(float) Vector of probabilities over k """ n, a, b = self.n, self.a, self.b k = np.arange(n + 1) probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b) return probs
r""" Generate the vector of probabilities for the Beta-binomial (n, a, b) distribution. The Beta-binomial distribution takes the form .. math:: p(k \,|\, n, a, b) = {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)}, \qquad k = 0, \ldots, n, where :math:`B` is the beta function. Parameters ---------- n : scalar(int) First parameter to the Beta-binomial distribution a : scalar(float) Second parameter to the Beta-binomial distribution b : scalar(float) Third parameter to the Beta-binomial distribution Returns ------- probs: array_like(float) Vector of probabilities over k
def flatten_egginfo_json( pkg_names, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources). """ working_set = working_set or default_working_set # Ensure only grabbing packages that exists in working_set dists = find_packages_requirements_dists( pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=filename, dep_keys=dep_keys, working_set=working_set)
A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources).
def _set_tasks_state(self, value): """ Purpose: Set state of all tasks of the current stage. :arguments: String """ if value not in states.state_numbers.keys(): raise ValueError(obj=self._uid, attribute='set_tasks_state', expected_value=states.state_numbers.keys(), actual_value=value) for task in self._tasks: task.state = value
Purpose: Set state of all tasks of the current stage. :arguments: String
def _collect_config(self): """Collects all info from three sections""" kwargs = {} sections = ('storage_service', 'trajectory', 'environment') for section in sections: kwargs.update(self._collect_section(section)) return kwargs
Collects all info from three sections
def create_mv_rule(tensorprod_rule, dim): """Convert tensor product rule into a multivariate quadrature generator.""" def mv_rule(order, sparse=False, part=None): """ Multidimensional integration rule. Args: order (int, numpy.ndarray) : order of integration rule. If numpy.ndarray, order along each axis. sparse (bool) : use Smolyak sparse grid. Returns: (numpy.ndarray, numpy.ndarray) abscissas and weights. """ if sparse: order = numpy.ones(dim, dtype=int)*order tensorprod_rule_ = lambda order, part=part:\ tensorprod_rule(order, part=part) return chaospy.quad.sparse_grid(tensorprod_rule_, order) return tensorprod_rule(order, part=part) return mv_rule
Convert tensor product rule into a multivariate quadrature generator.
def get_item_content(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs): """GetItemContent. Get Item Metadata and/or Content for a single item. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str path: Version control path of an individual item to return. :param str project: Project ID or project name :param str file_name: file name of item returned. :param bool download: If true, create a downloadable attachment. :param str scope_path: Version control path of a folder to return multiple items. :param str recursion_level: None (just the item), or OneLevel (contents of a folder). :param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor: Version descriptor. Default is null. :param bool include_content: Set to true to include item content when requesting json. Default is false. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
GetItemContent. Get Item Metadata and/or Content for a single item. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str path: Version control path of an individual item to return. :param str project: Project ID or project name :param str file_name: file name of item returned. :param bool download: If true, create a downloadable attachment. :param str scope_path: Version control path of a folder to return multiple items. :param str recursion_level: None (just the item), or OneLevel (contents of a folder). :param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor: Version descriptor. Default is null. :param bool include_content: Set to true to include item content when requesting json. Default is false. :rtype: object
def get_meta_data_editor(self, for_gaphas=True): """Returns the editor for the specified editor This method should be used instead of accessing the meta data of an editor directly. It return the meta data of the editor available (with priority to the one specified by `for_gaphas`) and converts it if needed. :param bool for_gaphas: True (default) if the meta data is required for gaphas, False if for OpenGL :return: Meta data for the editor :rtype: Vividict """ meta_gaphas = self.meta['gui']['editor_gaphas'] meta_opengl = self.meta['gui']['editor_opengl'] assert isinstance(meta_gaphas, Vividict) and isinstance(meta_opengl, Vividict) # Use meta data of editor with more keys (typically one of the editors has zero keys) # TODO check if the magic length condition in the next line can be improved (consistent behavior getter/setter?) parental_conversion_from_opengl = self._parent and self._parent().temp['conversion_from_opengl'] from_gaphas = len(meta_gaphas) > len(meta_opengl) or (len(meta_gaphas) == len(meta_opengl) and for_gaphas and not parental_conversion_from_opengl) # Convert meta data if meta data target and origin differ if from_gaphas and not for_gaphas: self.meta['gui']['editor_opengl'] = self._meta_data_editor_gaphas2opengl(meta_gaphas) elif not from_gaphas and for_gaphas: self.meta['gui']['editor_gaphas'] = self._meta_data_editor_opengl2gaphas(meta_opengl) # only keep meta data for one editor del self.meta['gui']['editor_opengl' if for_gaphas else 'editor_gaphas'] return self.meta['gui']['editor_gaphas'] if for_gaphas else self.meta['gui']['editor_opengl']
Returns the editor for the specified editor This method should be used instead of accessing the meta data of an editor directly. It return the meta data of the editor available (with priority to the one specified by `for_gaphas`) and converts it if needed. :param bool for_gaphas: True (default) if the meta data is required for gaphas, False if for OpenGL :return: Meta data for the editor :rtype: Vividict
def _compare_frame_rankings(ref, est, transitive=False): '''Compute the number of ranking disagreements in two lists. Parameters ---------- ref : np.ndarray, shape=(n,) est : np.ndarray, shape=(n,) Reference and estimate ranked lists. `ref[i]` is the relevance score for point `i`. transitive : bool If true, all pairs of reference levels are compared. If false, only adjacent pairs of reference levels are compared. Returns ------- inversions : int The number of pairs of indices `i, j` where `ref[i] < ref[j]` but `est[i] >= est[j]`. normalizer : float The total number of pairs (i, j) under consideration. If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}| If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}| ''' idx = np.argsort(ref) ref_sorted = ref[idx] est_sorted = est[idx] # Find the break-points in ref_sorted levels, positions, counts = np.unique(ref_sorted, return_index=True, return_counts=True) positions = list(positions) positions.append(len(ref_sorted)) index = collections.defaultdict(lambda: slice(0)) ref_map = collections.defaultdict(lambda: 0) for level, cnt, start, end in zip(levels, counts, positions[:-1], positions[1:]): index[level] = slice(start, end) ref_map[level] = cnt # Now that we have values sorted, apply the inversion-counter to # pairs of reference values if transitive: level_pairs = itertools.combinations(levels, 2) else: level_pairs = [(i, i+1) for i in levels] level_pairs, lcounter = itertools.tee(level_pairs) normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter])) if normalizer == 0: return 0, 0.0 inversions = 0 for level_1, level_2 in level_pairs: inversions += _count_inversions(est_sorted[index[level_1]], est_sorted[index[level_2]]) return inversions, float(normalizer)
Compute the number of ranking disagreements in two lists. Parameters ---------- ref : np.ndarray, shape=(n,) est : np.ndarray, shape=(n,) Reference and estimate ranked lists. `ref[i]` is the relevance score for point `i`. transitive : bool If true, all pairs of reference levels are compared. If false, only adjacent pairs of reference levels are compared. Returns ------- inversions : int The number of pairs of indices `i, j` where `ref[i] < ref[j]` but `est[i] >= est[j]`. normalizer : float The total number of pairs (i, j) under consideration. If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}| If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
def follow_double_underscores(obj, field_name=None, excel_dialect=True, eval_python=False, index_error_value=None): '''Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators >>> from django.contrib.auth.models import Permission >>> import math >>> p = Permission.objects.all()[0] >>> follow_double_underscores(p, 'content_type__name') == p.content_type.name True >>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name)) True ''' if not obj: return obj if isinstance(field_name, list): split_fields = field_name else: split_fields = re_model_instance_dot.split(field_name) if False and eval_python: try: return eval(field_name, {'datetime': datetime, 'math': math, 'collections': collections}, {'obj': obj}) except IndexError: return index_error_value except: pass if len(split_fields) <= 1: if hasattr(obj, split_fields[0]): value = getattr(obj, split_fields[0]) elif hasattr(obj, split_fields[0] + '_id'): value = getattr(obj, split_fields[0] + '_id') elif hasattr(obj, split_fields[0] + '_set'): value = getattr(obj, split_fields[0] + '_set') elif split_fields[0] in obj.__dict__: value = obj.__dict__.get(split_fields[0]) elif eval_python: value = eval('obj.' + split_fields[0]) else: return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value) if value and excel_dialect and isinstance(value, datetime.datetime): value = value.strftime('%Y-%m-%d %H:%M:%S') return value return follow_double_underscores(getattr(obj, split_fields[0]), field_name=split_fields[1:], eval_python=eval_python, index_error_value=index_error_value)
Like getattr(obj, field_name) only follows model relationships through "__" or "." as link separators >>> from django.contrib.auth.models import Permission >>> import math >>> p = Permission.objects.all()[0] >>> follow_double_underscores(p, 'content_type__name') == p.content_type.name True >>> follow_double_underscores(p, 'math.sqrt(len(obj.content_type.name))', eval_python=True) == math.sqrt(len(p.content_type.name)) True
def _process_mrk_acc_view(self): """ Use this table to create the idmap between the internal marker id and the public mgiid. No triples are produced in this process :return: """ # make a pass through the table first, # to create the mapping between the external and internal identifiers line_counter = 0 LOG.info("mapping markers to internal identifiers") raw = '/'.join((self.rawdir, 'mrk_acc_view')) col = [ 'accid', 'prefix_part', 'logicaldb_key', 'object_key', 'preferred', 'organism_key'] with open(raw, 'r') as fh: fh.readline() # read the header row; skip for line in fh: line = line.rstrip('\n') line_counter += 1 row = line.split('\t') accid = row[col.index('accid')] prefix_part = row[col.index('prefix_part')] logicaldb_key = row[col.index('logicaldb_key')] object_key = row[col.index('object_key')] preferred = row[col.index('preferred')] # organism_key) if self.test_mode is True: if int(object_key) not in self.test_keys.get('marker'): continue # get the hashmap of the identifiers if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1': self.idhash['marker'][object_key] = accid return
Use this table to create the idmap between the internal marker id and the public mgiid. No triples are produced in this process :return:
def map_indices(fn, iterable, indices): r""" Map a function across indices of an iterable. Notes ----- Roughly equivalent to, though more efficient than:: lambda fn, iterable, *indices: (fn(arg) if i in indices else arg for i, arg in enumerate(iterable)) Examples -------- >>> a = [4, 6, 7, 1, 6, 8, 2] >>> from operator import mul >>> list(map_indices(partial(mul, 3), a, [0, 3, 5])) [12, 6, 7, 3, 6, 24, 2] >>> b = [9., np.array([5., 6., 2.]), ... np.array([[5., 6., 2.], [2., 3., 9.]])] >>> list(map_indices(np.log, b, [0, 2])) # doctest: +NORMALIZE_WHITESPACE [2.1972245773362196, array([ 5., 6., 2.]), array([[ 1.60943791, 1.79175947, 0.69314718], [ 0.69314718, 1.09861229, 2.19722458]])] .. todo:: Floating point precision >>> list(map_indices(np.exp, list(map_indices(np.log, b, [0, 2])), [0, 2])) ... # doctest: +NORMALIZE_WHITESPACE +SKIP [9., array([5., 6., 2.]), array([[ 5., 6., 2.], [ 2., 3., 9.]])] """ index_set = set(indices) for i, arg in enumerate(iterable): if i in index_set: yield fn(arg) else: yield arg
r""" Map a function across indices of an iterable. Notes ----- Roughly equivalent to, though more efficient than:: lambda fn, iterable, *indices: (fn(arg) if i in indices else arg for i, arg in enumerate(iterable)) Examples -------- >>> a = [4, 6, 7, 1, 6, 8, 2] >>> from operator import mul >>> list(map_indices(partial(mul, 3), a, [0, 3, 5])) [12, 6, 7, 3, 6, 24, 2] >>> b = [9., np.array([5., 6., 2.]), ... np.array([[5., 6., 2.], [2., 3., 9.]])] >>> list(map_indices(np.log, b, [0, 2])) # doctest: +NORMALIZE_WHITESPACE [2.1972245773362196, array([ 5., 6., 2.]), array([[ 1.60943791, 1.79175947, 0.69314718], [ 0.69314718, 1.09861229, 2.19722458]])] .. todo:: Floating point precision >>> list(map_indices(np.exp, list(map_indices(np.log, b, [0, 2])), [0, 2])) ... # doctest: +NORMALIZE_WHITESPACE +SKIP [9., array([5., 6., 2.]), array([[ 5., 6., 2.], [ 2., 3., 9.]])]
def process_chat(self, chat: types.Chat): """ Generate chat data :param chat: :return: """ if not chat: return yield 'chat_id', chat.id yield 'chat_type', chat.type if self.include_content: yield 'chat_title', chat.full_name if chat.username: yield 'chat_name', f"@{chat.username}"
Generate chat data :param chat: :return:
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'): """Installs the ansible package. By default it is installed from the `PPA`_ linked from the ansible `website`_ or from a ppa specified by a charm config.. .. _PPA: https://launchpad.net/~rquillo/+archive/ansible .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu If from_ppa is empty, you must ensure that the package is available from a configured repository. """ if from_ppa: charmhelpers.fetch.add_source(ppa_location) charmhelpers.fetch.apt_update(fatal=True) charmhelpers.fetch.apt_install('ansible') with open(ansible_hosts_path, 'w+') as hosts_file: hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp')
Installs the ansible package. By default it is installed from the `PPA`_ linked from the ansible `website`_ or from a ppa specified by a charm config.. .. _PPA: https://launchpad.net/~rquillo/+archive/ansible .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu If from_ppa is empty, you must ensure that the package is available from a configured repository.
def create_model(self, parent, name, multiplicity='ZERO_MANY', **kwargs): """Create a new child model under a given parent. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :param parent: parent model :param name: new model name :param parent: parent part instance :type parent: :class:`models.Part` :param name: new part name :type name: basestring :param multiplicity: choose between ZERO_ONE, ONE, ZERO_MANY, ONE_MANY or M_N :type multiplicity: basestring :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: :class:`models.Part` with category `MODEL` :raises IllegalArgumentError: When the provided arguments are incorrect :raises APIError: if the `Part` could not be created """ if parent.category != Category.MODEL: raise IllegalArgumentError("The parent should be of category 'MODEL'") data = { "name": name, "parent": parent.id, "multiplicity": multiplicity } return self._create_part(action="create_child_model", data=data, **kwargs)
Create a new child model under a given parent. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :param parent: parent model :param name: new model name :param parent: parent part instance :type parent: :class:`models.Part` :param name: new part name :type name: basestring :param multiplicity: choose between ZERO_ONE, ONE, ZERO_MANY, ONE_MANY or M_N :type multiplicity: basestring :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: :class:`models.Part` with category `MODEL` :raises IllegalArgumentError: When the provided arguments are incorrect :raises APIError: if the `Part` could not be created
def get_image(self, filename: str=None) -> None: """Download the photo associated with a Savings Goal.""" if filename is None: filename = "{0}.png".format(self.name) endpoint = "/account/{0}/savings-goals/{1}/photo".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() base64_image = response.json()['base64EncodedPhoto'] with open(filename, 'wb') as file: file.write(b64decode(base64_image))
Download the photo associated with a Savings Goal.
def reset(): """ full reset of matplotlib default style and colors """ colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, .75, 0.), (.75, .75, 0.), (0., .75, .75), (0., 0., 0.)] for code, color in zip("bgrmyck", colors): rgb = mpl.colors.colorConverter.to_rgb(color) mpl.colors.colorConverter.colors[code] = rgb mpl.colors.colorConverter.cache[code] = rgb mpl.rcParams.update(mpl.rcParamsDefault) mpl.rcParams['figure.facecolor'] = 'white' mpl.rcParams['axes.facecolor'] = 'white'
full reset of matplotlib default style and colors
def local_rfcformat(dt): """Return the RFC822-formatted representation of a timezone-aware datetime with the UTC offset. """ weekday = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][dt.weekday()] month = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', ][dt.month - 1] tz_offset = dt.strftime('%z') return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( weekday, dt.day, month, dt.year, dt.hour, dt.minute, dt.second, tz_offset, )
Return the RFC822-formatted representation of a timezone-aware datetime with the UTC offset.
def source_sum_err(self): """ The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the error array. """ if self._error is not None: if self._is_completely_masked: return np.nan * self._error_unit # table output needs unit else: return np.sqrt(np.sum(self._error_values ** 2)) else: return None
The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the error array.
def create_request_url(self, interface, method, version, parameters): """Create the URL to submit to the Steam Web API interface: Steam Web API interface containing methods. method: The method to call. version: The version of the method. paramters: Parameters to supply to the method. """ if 'format' in parameters: parameters['key'] = self.apikey else: parameters.update({'key' : self.apikey, 'format' : self.format}) version = "v%04d" % (version) url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method, version, urlencode(parameters)) return url
Create the URL to submit to the Steam Web API interface: Steam Web API interface containing methods. method: The method to call. version: The version of the method. paramters: Parameters to supply to the method.
def _create_inbound_stream(self, config=None): """ Creates an inbound stream from its config. Params: config: stream configuration as read by ait.config Returns: stream: a Stream Raises: ValueError: if any of the required config values are missing """ if config is None: raise ValueError('No stream config to create stream from.') name = self._get_stream_name(config) stream_handlers = self._get_stream_handlers(config, name) stream_input = config.get('input', None) if stream_input is None: raise(cfg.AitConfigMissing('inbound stream {}\'s input'.format(name))) if type(stream_input[0]) is int: return PortInputStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}) else: return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})
Creates an inbound stream from its config. Params: config: stream configuration as read by ait.config Returns: stream: a Stream Raises: ValueError: if any of the required config values are missing
def _string_width(self, s): """Get width of a string in the current font""" s = str(s) w = 0 for char in s: char = ord(char) w += self.character_widths[char] return w * self.font_size / 1000.0
Get width of a string in the current font
def all(self, archived=False, limit=None, page=None): """get all adapter data.""" path = partial(_path, self.adapter) if not archived: path = _path(self.adapter) else: path = _path(self.adapter, 'archived') return self._get(path, limit=limit, page=page)
get all adapter data.
def parse(self, vd, extent_loc): # type: (bytes, int) -> None ''' Parse a Volume Descriptor out of a string. Parameters: vd - The string containing the Volume Descriptor. extent_loc - The location on the ISO of this Volume Descriptor. Returns: Nothing. ''' ################ PVD VERSION ###################### (descriptor_type, identifier, self.version, self.flags, self.system_identifier, self.volume_identifier, unused1, space_size_le, space_size_be, self.escape_sequences, set_size_le, set_size_be, seqnum_le, seqnum_be, logical_block_size_le, logical_block_size_be, path_table_size_le, path_table_size_be, self.path_table_location_le, self.optional_path_table_location_le, self.path_table_location_be, self.optional_path_table_location_be, root_dir_record, self.volume_set_identifier, pub_ident_str, prepare_ident_str, app_ident_str, self.copyright_file_identifier, self.abstract_file_identifier, self.bibliographic_file_identifier, vol_create_date_str, vol_mod_date_str, vol_expire_date_str, vol_effective_date_str, self.file_structure_version, unused2, self.application_use, zero_unused) = struct.unpack_from(self.FMT, vd, 0) # According to Ecma-119, 8.4.1, the primary volume descriptor type # should be 1. if descriptor_type != self._vd_type: raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor') # According to Ecma-119, 8.4.2, the identifier should be 'CD001'. if identifier != b'CD001': raise pycdlibexception.PyCdlibInvalidISO('invalid CD isoIdentification') # According to Ecma-119, 8.4.3, the version should be 1 (or 2 for # ISO9660:1999) expected_versions = [1] if self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY: expected_versions.append(2) if self.version not in expected_versions: raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor version %d' % (self.version)) # According to Ecma-119, 8.4.4, the first flags field should be 0 for a Primary. if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY and self.flags != 0: raise pycdlibexception.PyCdlibInvalidISO('PVD flags field is not zero') # According to Ecma-119, 8.4.5, the first unused field (after the # system identifier and volume identifier) should be 0. if unused1 != 0: raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero') # According to Ecma-119, 8.4.9, the escape sequences for a PVD should # be 32 zero-bytes. However, we have seen ISOs in the wild (Fantastic # Night Dreams - Cotton Original (Japan).cue from the psx redump # collection) that don't have this set to 0, so allow anything here. # According to Ecma-119, 8.4.30, the file structure version should be 1. # However, we have seen ISOs in the wild that that don't have this # properly set to one. In those cases, forcibly set it to one and let # it pass. if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY: if self.file_structure_version != 1: self.file_structure_version = 1 elif self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY: if self.file_structure_version not in (1, 2): raise pycdlibexception.PyCdlibInvalidISO('File structure version expected to be 1') # According to Ecma-119, 8.4.31, the second unused field should be 0. if unused2 != 0: raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero') # According to Ecma-119, the last 653 bytes of the VD should be all 0. # However, we have seen ISOs in the wild that do not follow this, so # relax the check. # Check to make sure that the little-endian and big-endian versions # of the parsed data agree with each other. if space_size_le != utils.swab_32bit(space_size_be): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian space size disagree') self.space_size = space_size_le if set_size_le != utils.swab_16bit(set_size_be): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian set size disagree') self.set_size = set_size_le if seqnum_le != utils.swab_16bit(seqnum_be): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian seqnum disagree') self.seqnum = seqnum_le if logical_block_size_le != utils.swab_16bit(logical_block_size_be): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian logical block size disagree') self.log_block_size = logical_block_size_le if path_table_size_le != utils.swab_32bit(path_table_size_be): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table size disagree') self.path_tbl_size = path_table_size_le self.path_table_num_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2 self.path_table_location_be = utils.swab_32bit(self.path_table_location_be) self.publisher_identifier = FileOrTextIdentifier() self.publisher_identifier.parse(pub_ident_str) self.preparer_identifier = FileOrTextIdentifier() self.preparer_identifier.parse(prepare_ident_str) self.application_identifier = FileOrTextIdentifier() self.application_identifier.parse(app_ident_str) self.volume_creation_date = dates.VolumeDescriptorDate() self.volume_creation_date.parse(vol_create_date_str) self.volume_modification_date = dates.VolumeDescriptorDate() self.volume_modification_date.parse(vol_mod_date_str) self.volume_expiration_date = dates.VolumeDescriptorDate() self.volume_expiration_date.parse(vol_expire_date_str) self.volume_effective_date = dates.VolumeDescriptorDate() self.volume_effective_date.parse(vol_effective_date_str) self.root_dir_record.parse(self, root_dir_record, None) self.orig_extent_loc = extent_loc self._initialized = True
Parse a Volume Descriptor out of a string. Parameters: vd - The string containing the Volume Descriptor. extent_loc - The location on the ISO of this Volume Descriptor. Returns: Nothing.
def random_dna(n): '''Generate a random DNA sequence. :param n: Output sequence length. :type n: int :returns: Random DNA sequence of length n. :rtype: coral.DNA ''' return coral.DNA(''.join([random.choice('ATGC') for i in range(n)]))
Generate a random DNA sequence. :param n: Output sequence length. :type n: int :returns: Random DNA sequence of length n. :rtype: coral.DNA
def numbered_syllable_to_accented(syllable): """Convert a numbered pinyin syllable to an accented pinyin syllable. Implements the following algorithm, modified from https://github.com/tsroten/zhon: 1. If the syllable has an 'a' or 'e', put the tone over that vowel. 2. If the syllable has 'ou', place the tone over the 'o'. 3. Otherwise, put the tone on the last vowel. """ def keep_case_replace(s, vowel, replacement): accented = s.replace(vowel, replacement) if syllable[0].isupper(): return accented[0].upper() + accented[1:] return accented tone = syllable[-1] if tone == '5': return re.sub('u:|v', '\u00fc', syllable[:-1]) # Homogenise representation of u: syl = re.sub('u:|v', '\u00fc', syllable[:-1].lower()) if 'a' in syl: return keep_case_replace(syl, 'a', _num_vowel_to_acc('a', tone)) elif 'e' in syl: return keep_case_replace(syl, 'e', _num_vowel_to_acc('e', tone)) elif 'ou' in syl: return keep_case_replace(syl, 'o', _num_vowel_to_acc('o', tone)) last_vowel = syl[max(map(syl.rfind, VOWELS))] # Find last vowel index. return keep_case_replace(syl, last_vowel, _num_vowel_to_acc(last_vowel, tone))
Convert a numbered pinyin syllable to an accented pinyin syllable. Implements the following algorithm, modified from https://github.com/tsroten/zhon: 1. If the syllable has an 'a' or 'e', put the tone over that vowel. 2. If the syllable has 'ou', place the tone over the 'o'. 3. Otherwise, put the tone on the last vowel.
def sort_url_qsl(cls, raw_url, **kwargs): """Do nothing but sort the params of url. raw_url: the raw url to be sorted; kwargs: (optional) same kwargs for ``sorted``. """ parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) return cls._join_url(parsed_url, sorted(qsl, **kwargs))
Do nothing but sort the params of url. raw_url: the raw url to be sorted; kwargs: (optional) same kwargs for ``sorted``.
def rpc_get_name_at( self, name, block_height, **con_info ): """ Get all the states the name was in at a particular block height. Does NOT work on expired names. Return {'status': true, 'record': ...} """ if not check_name(name): return {'error': 'invalid name', 'http_status': 400} if not check_block(block_height): return self.success_response({'record': None}) db = get_db_state(self.working_dir) names_at = db.get_name_at( name, block_height, include_expired=False ) db.close() ret = [] for name_rec in names_at: if 'opcode' not in name_rec: name_rec['opcode'] = op_get_opcode_name(name_rec['op']) ret.append(self.sanitize_rec(name_rec)) return self.success_response( {'records': ret} )
Get all the states the name was in at a particular block height. Does NOT work on expired names. Return {'status': true, 'record': ...}
def _parse_bug(self, data): """ param data: dict of data from XML-RPC server, representing a bug. returns: AttrDict """ if 'id' in data: data['weburl'] = self.url.replace('xmlrpc.cgi', str(data['id'])) bug = AttrDict(data) return bug
param data: dict of data from XML-RPC server, representing a bug. returns: AttrDict
def update_trigger(self, trigger): """ Updates on the Alert API the trigger record having the ID of the specified Trigger object: the remote record is updated with data from the local Trigger object. :param trigger: the Trigger with updated data :type trigger: `pyowm.alertapi30.trigger.Trigger` :return: ``None`` if update is successful, an error otherwise """ assert trigger is not None assert isinstance(trigger.id, str), "Value must be a string" the_time_period = { "start": { "expression": "after", "amount": trigger.start_after_millis }, "end": { "expression": "after", "amount": trigger.end_after_millis } } the_conditions = [dict(name=c.weather_param, expression=c.operator, amount=c.amount) for c in trigger.conditions] the_area = [a.as_dict() for a in trigger.area] status, _ = self.http_client.put( NAMED_TRIGGER_URI % trigger.id, params={'appid': self.API_key}, data=dict(time_period=the_time_period, conditions=the_conditions, area=the_area), headers={'Content-Type': 'application/json'})
Updates on the Alert API the trigger record having the ID of the specified Trigger object: the remote record is updated with data from the local Trigger object. :param trigger: the Trigger with updated data :type trigger: `pyowm.alertapi30.trigger.Trigger` :return: ``None`` if update is successful, an error otherwise
def add_identity(db, source, email=None, name=None, username=None, uuid=None): """Add an identity to the registry. This function adds a new identity to the registry. By default, a new unique identity will be also added an associated to the new identity. When 'uuid' parameter is set, it creates a new identity that will be associated to the unique identity defined by 'uuid' that already exists on the registry. If the given unique identity does not exist, it raises a 'NotFoundError' exception. The registry considers that two identities are distinct when any value of the tuple (source, email, name, username) is different. Thus, the identities id1:('scm', 'jsmith@example.com', 'John Smith', 'jsmith') and id2:('mls', 'jsmith@example.com', 'John Smith', 'jsmith') will be registered as different identities. A 'AlreadyExistError' exception will be raised when the function tries to insert a tuple that exists in the registry. The function returns the identifier associated to the new registered identity. When no 'uuid' is given, this id and the uuid associated to the new identity will be the same. :param db: database manager :param source: data source :param email: email of the identity :param name: full name of the identity :param username: user name used by the identity :param uuid: associates the new identity to the unique identity identified by this id :returns: a universal unique identifier :raises InvalidValueError: when source is None or empty; each one of the parameters is None; parameters are empty. :raises AlreadyExistsError: raised when the identity already exists in the registry. :raises NotFoundError: raised when the unique identity associated to the given 'uuid' is not in the registry. """ with db.connect() as session: # Each identity needs a unique identifier try: identity_id = utils.uuid(source, email=email, name=name, username=username) except ValueError as e: raise InvalidValueError(e) if not uuid: uidentity = add_unique_identity_db(session, identity_id) else: uidentity = find_unique_identity(session, uuid) if not uidentity: raise NotFoundError(entity=uuid) try: add_identity_db(session, uidentity, identity_id, source, name=name, email=email, username=username) except ValueError as e: raise InvalidValueError(e) return identity_id
Add an identity to the registry. This function adds a new identity to the registry. By default, a new unique identity will be also added an associated to the new identity. When 'uuid' parameter is set, it creates a new identity that will be associated to the unique identity defined by 'uuid' that already exists on the registry. If the given unique identity does not exist, it raises a 'NotFoundError' exception. The registry considers that two identities are distinct when any value of the tuple (source, email, name, username) is different. Thus, the identities id1:('scm', 'jsmith@example.com', 'John Smith', 'jsmith') and id2:('mls', 'jsmith@example.com', 'John Smith', 'jsmith') will be registered as different identities. A 'AlreadyExistError' exception will be raised when the function tries to insert a tuple that exists in the registry. The function returns the identifier associated to the new registered identity. When no 'uuid' is given, this id and the uuid associated to the new identity will be the same. :param db: database manager :param source: data source :param email: email of the identity :param name: full name of the identity :param username: user name used by the identity :param uuid: associates the new identity to the unique identity identified by this id :returns: a universal unique identifier :raises InvalidValueError: when source is None or empty; each one of the parameters is None; parameters are empty. :raises AlreadyExistsError: raised when the identity already exists in the registry. :raises NotFoundError: raised when the unique identity associated to the given 'uuid' is not in the registry.
def is_glacier(s3_client, bucket, prefix): """Check if prefix is archived in Glacier, by checking storage class of first object inside that prefix Arguments: s3_client - boto3 S3 client (not service) bucket - valid extracted bucket (without protocol and prefix) example: sowplow-events-data prefix - valid S3 prefix (usually, run_id) example: snowplow-archive/enriched/archive/ """ response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=3) # 3 to not fetch _SUCCESS for key in response['Contents']: if key.get('StorageClass', 'STANDARD') == 'GLACIER': return True return False
Check if prefix is archived in Glacier, by checking storage class of first object inside that prefix Arguments: s3_client - boto3 S3 client (not service) bucket - valid extracted bucket (without protocol and prefix) example: sowplow-events-data prefix - valid S3 prefix (usually, run_id) example: snowplow-archive/enriched/archive/
def make_unary(lineno, operator, operand, func=None, type_=None): """ Wrapper: returns a Unary node """ return symbols.UNARY.make_node(lineno, operator, operand, func, type_)
Wrapper: returns a Unary node
def axpy(x, y, a=1.0): """Quick level-1 call to BLAS y = a*x+y. Parameters ---------- x : array_like nx1 real or complex vector y : array_like nx1 real or complex vector a : float real or complex scalar Returns ------- y : array_like Input variable y is rewritten Notes ----- The call to get_blas_funcs automatically determines the prefix for the blas call. """ from scipy.linalg import get_blas_funcs fn = get_blas_funcs(['axpy'], [x, y])[0] fn(x, y, a)
Quick level-1 call to BLAS y = a*x+y. Parameters ---------- x : array_like nx1 real or complex vector y : array_like nx1 real or complex vector a : float real or complex scalar Returns ------- y : array_like Input variable y is rewritten Notes ----- The call to get_blas_funcs automatically determines the prefix for the blas call.
def extract_ipfs_path_from_uri(value: str) -> str: """ Return the path from an IPFS URI. Path = IPFS hash & following path. """ parse_result = parse.urlparse(value) if parse_result.netloc: if parse_result.path: return "".join((parse_result.netloc, parse_result.path.rstrip("/"))) else: return parse_result.netloc else: return parse_result.path.strip("/")
Return the path from an IPFS URI. Path = IPFS hash & following path.
def none_coalesce_handle(tokens): """Process the None-coalescing operator.""" if len(tokens) == 1: return tokens[0] elif tokens[0].isalnum(): return "({b} if {a} is None else {a})".format( a=tokens[0], b=none_coalesce_handle(tokens[1:]), ) else: return "(lambda {x}: {b} if {x} is None else {x})({a})".format( x=none_coalesce_var, a=tokens[0], b=none_coalesce_handle(tokens[1:]), )
Process the None-coalescing operator.
async def disconnect(self): """Shut down the watcher task and close websockets. """ if self._connection: log.debug('Closing model connection') await self._connection.close() self._connection = None
Shut down the watcher task and close websockets.
def _get_cpu_cores_per_run0(coreLimit, num_of_threads, allCpus, cores_of_package, siblings_of_core): """This method does the actual work of _get_cpu_cores_per_run without reading the machine architecture from the file system in order to be testable. For description, c.f. above. Note that this method might change the input parameters! Do not call it directly, call getCpuCoresPerRun()! @param allCpus: the list of all available cores @param cores_of_package: a mapping from package (CPU) ids to lists of cores that belong to this CPU @param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core) """ # First, do some checks whether this algorithm has a chance to work. if coreLimit > len(allCpus): sys.exit("Cannot run benchmarks with {0} CPU cores, only {1} CPU cores available.".format(coreLimit, len(allCpus))) if coreLimit * num_of_threads > len(allCpus): sys.exit("Cannot run {0} benchmarks in parallel with {1} CPU cores each, only {2} CPU cores available. Please reduce the number of threads to {3}.".format(num_of_threads, coreLimit, len(allCpus), len(allCpus) // coreLimit)) package_size = None # Number of cores per package for package, cores in cores_of_package.items(): if package_size is None: package_size = len(cores) elif package_size != len(cores): sys.exit("Asymmetric machine architecture not supported: CPU package {0} has {1} cores, but other package has {2} cores.".format(package, len(cores), package_size)) core_size = None # Number of threads per core for core, siblings in siblings_of_core.items(): if core_size is None: core_size = len(siblings) elif core_size != len(siblings): sys.exit("Asymmetric machine architecture not supported: CPU core {0} has {1} siblings, but other core has {2} siblings.".format(core, len(siblings), core_size)) all_cpus_set = set(allCpus) for core, siblings in siblings_of_core.items(): siblings_set = set(siblings) if not siblings_set.issubset(all_cpus_set): sys.exit("Core assignment is unsupported because siblings {0} of core {1} are not usable. Please always make all virtual cores of a physical core available.".format(siblings_set.difference(all_cpus_set), core)) # Second, compute some values we will need. package_count = len(cores_of_package) packages = sorted(cores_of_package.keys()) coreLimit_rounded_up = int(math.ceil(coreLimit / core_size) * core_size) assert coreLimit <= coreLimit_rounded_up < (coreLimit + core_size) packages_per_run = int(math.ceil(coreLimit_rounded_up / package_size)) if packages_per_run > 1 and packages_per_run * num_of_threads > package_count: sys.exit("Cannot split runs over multiple CPUs and at the same time assign multiple runs to the same CPU. Please reduce the number of threads to {0}.".format(package_count // packages_per_run)) runs_per_package = int(math.ceil(num_of_threads / package_count)) assert packages_per_run == 1 or runs_per_package == 1 if packages_per_run == 1 and runs_per_package * coreLimit > package_size: sys.exit("Cannot run {} benchmarks with {} cores on {} CPUs with {} cores, because runs would need to be split across multiple CPUs. Please reduce the number of threads.".format(num_of_threads, coreLimit, package_count, package_size)) # Warn on misuse of hyper-threading need_HT = False if packages_per_run == 1: # Checking whether the total amount of usable physical cores is not enough, # there might be some cores we cannot use, e.g. when scheduling with coreLimit=3 on quad-core machines. # Thus we check per package. assert coreLimit * runs_per_package <= package_size if coreLimit_rounded_up * runs_per_package > package_size: need_HT = True logging.warning("The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.", (package_size // coreLimit_rounded_up) * package_count) else: if coreLimit_rounded_up * num_of_threads > len(allCpus): assert coreLimit_rounded_up * runs_per_package > package_size need_HT = True logging.warning("The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.", len(allCpus) // coreLimit_rounded_up) logging.debug("Going to assign at most %s runs per package, each one using %s cores and blocking %s cores on %s packages.", runs_per_package, coreLimit, coreLimit_rounded_up, packages_per_run) # Third, do the actual core assignment. result = [] used_cores = set() for run in range(num_of_threads): # this calculation ensures that runs are split evenly across packages start_package = (run * packages_per_run) % package_count cores = [] cores_with_siblings = set() for package_nr in range(start_package, start_package + packages_per_run): assert len(cores) < coreLimit # Some systems have non-contiguous package numbers, # so we take the i'th package out of the list of available packages. # On normal system this is the identity mapping. package = packages[package_nr] for core in cores_of_package[package]: if core not in cores: cores.extend(c for c in siblings_of_core[core] if not c in used_cores) if len(cores) >= coreLimit: break cores_with_siblings.update(cores) cores = cores[:coreLimit] # shrink if we got more cores than necessary # remove used cores such that we do not try to use them again cores_of_package[package] = [core for core in cores_of_package[package] if core not in cores] assert len(cores) == coreLimit, "Wrong number of cores for run {} of {} - previous results: {}, remaining cores per package: {}, current cores: {}".format(run+1, num_of_threads, result, cores_of_package, cores) blocked_cores = cores if need_HT else cores_with_siblings assert not used_cores.intersection(blocked_cores) used_cores.update(blocked_cores) result.append(sorted(cores)) assert len(result) == num_of_threads assert all(len(cores) == coreLimit for cores in result) assert len(set(itertools.chain(*result))) == num_of_threads * coreLimit, "Cores are not uniquely assigned to runs: " + result logging.debug("Final core assignment: %s.", result) return result
This method does the actual work of _get_cpu_cores_per_run without reading the machine architecture from the file system in order to be testable. For description, c.f. above. Note that this method might change the input parameters! Do not call it directly, call getCpuCoresPerRun()! @param allCpus: the list of all available cores @param cores_of_package: a mapping from package (CPU) ids to lists of cores that belong to this CPU @param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core)
def get_version(mod, default="0.0.0"): """ :param module|str mod: Module, or module name to find version for (pass either calling module, or its .__name__) :param str default: Value to return if version determination fails :return str: Determined version """ name = mod if hasattr(mod, "__name__"): name = mod.__name__ try: import pkg_resources return pkg_resources.get_distribution(name).version except Exception as e: LOG.warning("Can't determine version for %s: %s", name, e, exc_info=e) return default
:param module|str mod: Module, or module name to find version for (pass either calling module, or its .__name__) :param str default: Value to return if version determination fails :return str: Determined version
def delete_object(self, bucket, obj, version_id): """Delete an existing object. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :param version_id: The version ID. :returns: A Flask response. """ if version_id is None: # Create a delete marker. with db.session.begin_nested(): ObjectVersion.delete(bucket, obj.key) db.session.commit() else: # Permanently delete specific object version. check_permission( current_permission_factory(bucket, 'object-delete-version'), hidden=False, ) obj.remove() db.session.commit() if obj.file_id: remove_file_data.delay(str(obj.file_id)) return self.make_response('', 204)
Delete an existing object. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :param version_id: The version ID. :returns: A Flask response.
def find_features(feats, sequ, annotated, start_pos, cutoff): """ find_features - Finds the reference sequence features in the alignments and records the positions :param feats: Dictonary of sequence features :type feats: ``dict`` :param sequ: The sequence alignment for the input sequence :type sequ: ``List`` :param annotated: dictonary of the annotated features :type annotated: ``dict`` :param start_pos: Where the reference sequence starts :type start_pos: ``int`` :param missing: List of the unmapped features :type missing: ``List`` :param cutoff: The alignment cutoff :type cutoff: ``float`` :param verbose: Flag for running in verbose mode. :type verbose: ``bool`` :param verbosity: Numerical value to indicate how verbose the output will be in verbose mode. :type verbosity: ``int`` :rtype: ``List`` """ feats_a = list(feats.keys()) j = 0 s = 0 en = 0 start = 0 for i in range(0, len(sequ)): if j <= len(feats_a)-1: if i > int(feats[feats_a[j]].location.end): j += 1 if(sequ[i] == '-'): if i == 0: start += 1 en += 1 s = 1 else: start += 1 en += 1 if s == 0: start_val = feats[feats_a[j]].location.start #if feats_a[j] == "five_prime_UTR": # start_val = 0 if((annotated == 0 and start_pos == 0 and cutoff < 0.9) or (annotated == 0 and start_pos == 0 and st < 6) or (start_pos == 0 and len(feats) == 1 and cutoff < .9)): start_val = 0 else: if feats_a[j] == 'five_prime_UTR': start_val = 0 feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(int(feats[feats_a[j]].location.end + 1)), strand=1), type=feats[feats_a[j]].type) if j != len(feats_a): for l in range(j+1, len(feats_a)): feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start+1), ExactPosition(int(feats[feats_a[l]].location.end + 1)), strand=1), type=feats[feats_a[l]].type) else: if s == 1: st = feats[feats_a[j]].location.start + start end = feats[feats_a[j]].location.end + en start_val = st if feats_a[j] != 'five_prime_UTR' and start_pos == 0: if((annotated == 0 and start_pos == 0 and cutoff < 0.9) or (annotated == 0 and start_pos == 0 and st < 6) or (start_pos == 0 and len(feats) == 1 and cutoff < .9)): start_val = 0 else: if feats_a[j] == 'five_prime_UTR': start_val = 0 feats[feats_a[j]] = SeqFeature(FeatureLocation(ExactPosition(start_val), ExactPosition(end), strand=1), type=feats[feats_a[j]].type) if j != len(feats_a): for l in range(j+1, len(feats_a)): feats[feats_a[l]] = SeqFeature(FeatureLocation(ExactPosition(feats[feats_a[l]].location.start+st), ExactPosition(int(feats[feats_a[l]].location.end + st)), strand=1), type=feats[feats_a[l]].type) s = 0 return feats
find_features - Finds the reference sequence features in the alignments and records the positions :param feats: Dictonary of sequence features :type feats: ``dict`` :param sequ: The sequence alignment for the input sequence :type sequ: ``List`` :param annotated: dictonary of the annotated features :type annotated: ``dict`` :param start_pos: Where the reference sequence starts :type start_pos: ``int`` :param missing: List of the unmapped features :type missing: ``List`` :param cutoff: The alignment cutoff :type cutoff: ``float`` :param verbose: Flag for running in verbose mode. :type verbose: ``bool`` :param verbosity: Numerical value to indicate how verbose the output will be in verbose mode. :type verbosity: ``int`` :rtype: ``List``
def dirty(field,ttl=None): "decorator to cache the result of a function until a field changes" if ttl is not None: raise NotImplementedError('pg.dirty ttl feature') def decorator(f): @functools.wraps(f) def wrapper(self,*args,**kwargs): # warning: not reentrant d=self.dirty_cache[field] if field in self.dirty_cache else self.dirty_cache.setdefault(field,{}) return d[f.__name__] if f.__name__ in d else d.setdefault(f.__name__,f(self,*args,**kwargs)) return wrapper return decorator
decorator to cache the result of a function until a field changes
def make_figure_uhs(extractors, what): """ $ oq plot 'uhs?kind=mean&site_id=0' """ import matplotlib.pyplot as plt fig = plt.figure() got = {} # (calc_id, kind) -> curves for i, ex in enumerate(extractors): uhs = ex.get(what) for kind in uhs.kind: got[ex.calc_id, kind] = uhs[kind] oq = ex.oqparam n_poes = len(oq.poes) periods = [imt.period for imt in oq.imt_periods()] [site] = uhs.site_id for j, poe in enumerate(oq.poes): ax = fig.add_subplot(n_poes, 1, j + 1) ax.set_xlabel('UHS on site %s, poe=%s, inv_time=%dy' % (site, poe, oq.investigation_time)) ax.set_ylabel('SA') for ck, arr in got.items(): ax.plot(periods, arr[0, :, j], '-', label='%s_%s' % ck) ax.plot(periods, arr[0, :, j], '.') ax.grid(True) ax.legend() return plt
$ oq plot 'uhs?kind=mean&site_id=0'
def from_vrep(config, vrep_host='127.0.0.1', vrep_port=19997, scene=None, tracked_objects=[], tracked_collisions=[], id=None, shared_vrep_io=None): """ Create a robot from a V-REP instance. :param config: robot configuration (either the path to the json or directly the dictionary) :type config: str or dict :param str vrep_host: host of the V-REP server :param int vrep_port: port of the V-REP server :param str scene: path to the V-REP scene to load and start :param list tracked_objects: list of V-REP dummy object to track :param list tracked_collisions: list of V-REP collision to track :param int id: robot id in simulator (useful when using a scene with multiple robots) :param vrep_io: use an already connected VrepIO (useful when using a scene with multiple robots) :type vrep_io: :class:`~pypot.vrep.io.VrepIO` This function tries to connect to a V-REP instance and expects to find motors with names corresponding as the ones found in the config. .. note:: The :class:`~pypot.robot.robot.Robot` returned will also provide a convenience reset_simulation method which resets the simulation and the robot position to its intial stance. .. note:: Using the same configuration, you should be able to switch from a real to a simulated robot just by switching from :func:`~pypot.robot.config.from_config` to :func:`~pypot.vrep.from_vrep`. For instance:: import json with open('my_config.json') as f: config = json.load(f) from pypot.robot import from_config from pypot.vrep import from_vrep real_robot = from_config(config) simulated_robot = from_vrep(config, '127.0.0.1', 19997, 'poppy.ttt') """ if shared_vrep_io is None: vrep_io = VrepIO(vrep_host, vrep_port) else: vrep_io = shared_vrep_io vreptime = vrep_time(vrep_io) pypot_time.time = vreptime.get_time pypot_time.sleep = vreptime.sleep if isinstance(config, basestring): with open(config) as f: config = json.load(f, object_pairs_hook=OrderedDict) motors = [motor_from_confignode(config, name) for name in config['motors'].keys()] vc = VrepController(vrep_io, scene, motors, id=id) vc._init_vrep_streaming() sensor_controllers = [] if tracked_objects: sensors = [ObjectTracker(name) for name in tracked_objects] vot = VrepObjectTracker(vrep_io, sensors) sensor_controllers.append(vot) if tracked_collisions: sensors = [VrepCollisionDetector(name) for name in tracked_collisions] vct = VrepCollisionTracker(vrep_io, sensors) sensor_controllers.append(vct) robot = Robot(motor_controllers=[vc], sensor_controllers=sensor_controllers) for m in robot.motors: m.goto_behavior = 'minjerk' init_pos = {m: m.goal_position for m in robot.motors} make_alias(config, robot) def start_simu(): vrep_io.start_simulation() for m, p in init_pos.iteritems(): m.goal_position = p vc.start() if tracked_objects: vot.start() if tracked_collisions: vct.start() while vrep_io.get_simulation_current_time() < 1.: sys_time.sleep(0.1) def stop_simu(): if tracked_objects: vot.stop() if tracked_collisions: vct.stop() vc.stop() vrep_io.stop_simulation() def reset_simu(): stop_simu() sys_time.sleep(0.5) start_simu() robot.start_simulation = start_simu robot.stop_simulation = stop_simu robot.reset_simulation = reset_simu def current_simulation_time(robot): return robot._controllers[0].io.get_simulation_current_time() Robot.current_simulation_time = property(lambda robot: current_simulation_time(robot)) def get_object_position(robot, object, relative_to_object=None): return vrep_io.get_object_position(object, relative_to_object) Robot.get_object_position = partial(get_object_position, robot) def get_object_orientation(robot, object, relative_to_object=None): return vrep_io.get_object_orientation(object, relative_to_object) Robot.get_object_orientation = partial(get_object_orientation, robot) return robot
Create a robot from a V-REP instance. :param config: robot configuration (either the path to the json or directly the dictionary) :type config: str or dict :param str vrep_host: host of the V-REP server :param int vrep_port: port of the V-REP server :param str scene: path to the V-REP scene to load and start :param list tracked_objects: list of V-REP dummy object to track :param list tracked_collisions: list of V-REP collision to track :param int id: robot id in simulator (useful when using a scene with multiple robots) :param vrep_io: use an already connected VrepIO (useful when using a scene with multiple robots) :type vrep_io: :class:`~pypot.vrep.io.VrepIO` This function tries to connect to a V-REP instance and expects to find motors with names corresponding as the ones found in the config. .. note:: The :class:`~pypot.robot.robot.Robot` returned will also provide a convenience reset_simulation method which resets the simulation and the robot position to its intial stance. .. note:: Using the same configuration, you should be able to switch from a real to a simulated robot just by switching from :func:`~pypot.robot.config.from_config` to :func:`~pypot.vrep.from_vrep`. For instance:: import json with open('my_config.json') as f: config = json.load(f) from pypot.robot import from_config from pypot.vrep import from_vrep real_robot = from_config(config) simulated_robot = from_vrep(config, '127.0.0.1', 19997, 'poppy.ttt')
def setup_logger(logger, logfile): # noqa # type: (logger, str) -> None """Set up logger""" global _REGISTERED_LOGGER_HANDLERS logger.setLevel(logging.DEBUG) if is_none_or_empty(logfile): handler = logging.StreamHandler() else: handler = logging.FileHandler(logfile, encoding='utf-8') logging.getLogger().addHandler(handler) formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) logger.propagate = False _REGISTERED_LOGGER_HANDLERS.append(handler)
Set up logger
def list_file_jobs(cls, offset=None, limit=None, api=None): """Query ( List ) async jobs :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance :return: Collection object """ api = api or cls._API return super(AsyncJob, cls)._query( api=api, url=cls._URL['list_file_jobs'], offset=offset, limit=limit, )
Query ( List ) async jobs :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance :return: Collection object
def download(self, itemID, savePath): """ downloads an item to local disk Inputs: itemID - unique id of item to download savePath - folder to save the file in """ if os.path.isdir(savePath) == False: os.makedirs(savePath) url = self._url + "/%s/download" % itemID params = { } if len(params.keys()): url = url + "?%s" % urlencode(params) return self._get(url=url, param_dict=params, out_folder=savePath, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
downloads an item to local disk Inputs: itemID - unique id of item to download savePath - folder to save the file in
def make_callback_children(self, name, *args, **kwdargs): """Invoke callbacks on all objects (i.e. layers) from the top to the bottom, returning when the first one returns True. If none returns True, then make the callback on our 'native' layer. """ if hasattr(self, 'objects'): # Invoke callbacks on all our layers that have the UI mixin num = len(self.objects) - 1 while num >= 0: obj = self.objects[num] if isinstance(obj, Callbacks): obj.make_callback(name, *args, **kwdargs) num -= 1 return super(UIMixin, self).make_callback(name, *args, **kwdargs)
Invoke callbacks on all objects (i.e. layers) from the top to the bottom, returning when the first one returns True. If none returns True, then make the callback on our 'native' layer.
def vdm_b(vdm, lat): """ Converts a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM; input in units of Am^2) to a local magnetic field value (output in units of tesla) Parameters ---------- vdm : V(A)DM in units of Am^2 lat: latitude of site in degrees Returns ------- B: local magnetic field strength in tesla """ rad = old_div(np.pi, 180.) # changed radius of the earth from 3.367e6 3/12/2010 fact = ((6.371e6)**3) * 1e7 colat = (90. - lat) * rad return vdm * (np.sqrt(1 + 3 * (np.cos(colat)**2))) / fact
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM; input in units of Am^2) to a local magnetic field value (output in units of tesla) Parameters ---------- vdm : V(A)DM in units of Am^2 lat: latitude of site in degrees Returns ------- B: local magnetic field strength in tesla
def load(self, providers, symbols, start, end, logger, backend, **kwargs): '''Load symbols data. :keyword providers: Dictionary of registered data providers. :keyword symbols: list of symbols to load. :keyword start: start date. :keyword end: end date. :keyword logger: instance of :class:`logging.Logger` or ``None``. :keyword backend: :class:`dynts.TimeSeries` backend name. There is no need to override this function, just use one the three hooks available. ''' # Preconditioning on dates logger = logger or logging.getLogger(self.__class__.__name__) start, end = self.dates(start, end) data = {} for sym in symbols: # Get ticker, field and provider symbol = self.parse_symbol(sym, providers) provider = symbol.provider if not provider: raise MissingDataProvider( 'data provider for %s not available' % sym ) pre = self.preprocess(symbol, start, end, logger, backend, **kwargs) if pre.intervals: result = None for st, en in pre.intervals: logger.info('Loading %s from %s. From %s to %s', symbol.ticker, provider, st, en) res = provider.load(symbol, st, en, logger, backend, **kwargs) if result is None: result = res else: result.update(res) else: result = pre.result # onresult hook result = self.onresult(symbol, result, logger, backend, **kwargs) data[sym] = result # last hook return self.onfinishload(data, logger, backend, **kwargs)
Load symbols data. :keyword providers: Dictionary of registered data providers. :keyword symbols: list of symbols to load. :keyword start: start date. :keyword end: end date. :keyword logger: instance of :class:`logging.Logger` or ``None``. :keyword backend: :class:`dynts.TimeSeries` backend name. There is no need to override this function, just use one the three hooks available.
def trnIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.trnWidth y = i / self.trnWidth return x, y
Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate
def calculate_authentication_data(self, key): ''' Calculate the authentication data based on the current key-id and the given key. ''' # This one is easy if self.key_id == KEY_ID_NONE: return '' # Determine the digestmod and how long the authentication data will be if self.key_id == KEY_ID_HMAC_SHA_1_96: digestmod = hashlib.sha1 data_length = 20 elif self.key_id == KEY_ID_HMAC_SHA_256_128: digestmod = hashlib.sha256 data_length = 32 else: raise ValueError('Unknown Key ID') # Fill the authentication data with the right number of zeroes # after storing the original first so we can restore it later current_authentication_data = self.authentication_data self.authentication_data = '\x00' * data_length # Build the packet msg = self.to_bytes() # Restore the authentication data self.authentication_data = current_authentication_data # Return the authentication data based on the generated packet # and the given key return hmac.new(key, msg, digestmod).digest()
Calculate the authentication data based on the current key-id and the given key.
def ordered_covering(routing_table, target_length, aliases=dict(), no_raise=False): """Reduce the size of a routing table by merging together entries where possible. .. warning:: The input routing table *must* also include entries which could be removed and replaced by default routing. .. warning:: It is assumed that the input routing table is not in any particular order and may be reordered into ascending order of generality (number of don't cares/Xs in the key-mask) without affecting routing correctness. It is also assumed that if this table is unordered it is at least orthogonal (i.e., there are no two entries which would match the same key) and reorderable. .. note:: If *all* the keys in the table are derived from a single instance of :py:class:`~rig.bitfield.BitField` then the table is guaranteed to be orthogonal and reorderable. .. note:: Use :py:meth:`~rig.routing_table.expand_entries` to generate an orthogonal table and receive warnings if the input table is not orthogonal. Parameters ---------- routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing entries to be merged. target_length : int or None Target length of the routing table; the minimisation procedure will halt once either this target is reached or no further minimisation is possible. If None then the table will be made as small as possible. Other Parameters ---------------- aliases : {(key, mask): {(key, mask), ...}, ...} Dictionary of which keys and masks in the routing table are combinations of other (now removed) keys and masks; this allows us to consider only the keys and masks the user actually cares about when determining if inserting a new entry will break the correctness of the table. This should be supplied when using this method to update an already minimised table. no_raise : bool If False (the default) then an error will be raised if the table cannot be minimised to be smaller than `target_length` and `target_length` is not None. If True then a table will be returned regardless of the size of the final table. Raises ------ MinimisationFailedError If the smallest table that can be produced is larger than `target_length` and `no_raise` is False. Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Reduced routing table entries. {(key, mask): {(key, mask), ...}, ...} A new aliases dictionary. """ # Copy the aliases dictionary aliases = dict(aliases) # Perform an initial sort of the routing table in order of increasing # generality. routing_table = sorted( routing_table, key=lambda entry: _get_generality(entry.key, entry.mask) ) while target_length is None or len(routing_table) > target_length: # Get the best merge merge = _get_best_merge(routing_table, aliases) # If there is no merge then stop if merge.goodness <= 0: break # Otherwise apply the merge, this returns a new routing table and a new # aliases dictionary. routing_table, aliases = merge.apply(aliases) # If the table is still too big then raise an error if (not no_raise and target_length is not None and len(routing_table) > target_length): raise MinimisationFailedError(target_length, len(routing_table)) # Return the finished routing table and aliases table return routing_table, aliases
Reduce the size of a routing table by merging together entries where possible. .. warning:: The input routing table *must* also include entries which could be removed and replaced by default routing. .. warning:: It is assumed that the input routing table is not in any particular order and may be reordered into ascending order of generality (number of don't cares/Xs in the key-mask) without affecting routing correctness. It is also assumed that if this table is unordered it is at least orthogonal (i.e., there are no two entries which would match the same key) and reorderable. .. note:: If *all* the keys in the table are derived from a single instance of :py:class:`~rig.bitfield.BitField` then the table is guaranteed to be orthogonal and reorderable. .. note:: Use :py:meth:`~rig.routing_table.expand_entries` to generate an orthogonal table and receive warnings if the input table is not orthogonal. Parameters ---------- routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing entries to be merged. target_length : int or None Target length of the routing table; the minimisation procedure will halt once either this target is reached or no further minimisation is possible. If None then the table will be made as small as possible. Other Parameters ---------------- aliases : {(key, mask): {(key, mask), ...}, ...} Dictionary of which keys and masks in the routing table are combinations of other (now removed) keys and masks; this allows us to consider only the keys and masks the user actually cares about when determining if inserting a new entry will break the correctness of the table. This should be supplied when using this method to update an already minimised table. no_raise : bool If False (the default) then an error will be raised if the table cannot be minimised to be smaller than `target_length` and `target_length` is not None. If True then a table will be returned regardless of the size of the final table. Raises ------ MinimisationFailedError If the smallest table that can be produced is larger than `target_length` and `no_raise` is False. Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Reduced routing table entries. {(key, mask): {(key, mask), ...}, ...} A new aliases dictionary.