code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def rgb_to_hsl(r, g, b): """Convert a color in r, g, b to a color in h, s, l""" r = r or 0 g = g or 0 b = b or 0 r /= 255 g /= 255 b /= 255 max_ = max((r, g, b)) min_ = min((r, g, b)) d = max_ - min_ if not d: h = 0 elif r is max_: h = 60 * (g - b) / d elif g is max_: h = 60 * (b - r) / d + 120 else: h = 60 * (r - g) / d + 240 l = .5 * (max_ + min_) if not d: s = 0 elif l < 0.5: s = .5 * d / l else: s = .5 * d / (1 - l) return tuple(map(normalize_float, (h % 360, s * 100, l * 100)))
Convert a color in r, g, b to a color in h, s, l
def str_digit_to_int(chr): """ Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit. """ # 0 - 9 if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) # A - Z if n < 91: n -= 55 # a - z or higher else: n -= 61 return n
Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit.
def _reshape_m_vecs(self): """return list of arrays, each array represents a different n mode""" lst = [] for n in xrange(0, self.nmax + 1): mlst = [] if n <= self.mmax: nn = n else: nn = self.mmax for m in xrange(-nn, nn + 1): mlst.append(self.__getitem__((n, m))) lst.append(mlst) return lst
return list of arrays, each array represents a different n mode
def parse_args(args): """ Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace` """ parser = argparse.ArgumentParser( description="Build html reveal.js slides from markdown in docs/ dir") parser.add_argument( '-v', '--verbose', help='Whether to show progress messages on stdout, including HTML', action='store_true') parser.add_argument( '--version', help='print twip package version and exit.', action='version', version='twip {ver}'.format(ver=__version__)) parser.add_argument( '-b', '--blog_path', help='Path to source markdown files. Must contain an `images` subdir', default=BLOG_PATH) parser.add_argument( '-s', '--slide_path', help='Path to dir for output slides (HTML). An images subdir will be added. A slides subdir should already exist.', default=DOCS_PATH) parser.add_argument( '-p', '--presentation', help='Source markdown base file name (without .md extension). The HTML slides will share the same basename.', default='2015-10-27-Hacking-Oregon-Hidden-Political-Connections') return parser.parse_args(args)
Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace`
def __parse_precipfc_data(data, timeframe): """Parse the forecasted precipitation data.""" result = {AVERAGE: None, TOTAL: None, TIMEFRAME: None} log.debug("Precipitation data: %s", data) lines = data.splitlines() index = 1 totalrain = 0 numberoflines = 0 nrlines = min(len(lines), round(float(timeframe) / 5) + 1) # looping through lines of forecasted precipitation data and # not using the time data (HH:MM) int the data. This is to allow for # correct data in case we are running in a different timezone. while index < nrlines: line = lines[index] log.debug("__parse_precipfc_data: line: %s", line) # pylint: disable=unused-variable (val, key) = line.split("|") # See buienradar documentation for this api, attribution # https://www.buienradar.nl/overbuienradar/gratis-weerdata # # Op basis van de door u gewenste coordinaten (latitude en longitude) # kunt u de neerslag tot twee uur vooruit ophalen in tekstvorm. De # data wordt iedere 5 minuten geupdatet. Op deze pagina kunt u de # neerslag in tekst vinden. De waarde 0 geeft geen neerslag aan (droog) # de waarde 255 geeft zware neerslag aan. Gebruik de volgende formule # voor het omrekenen naar de neerslagintensiteit in de eenheid # millimeter per uur (mm/u): # # Neerslagintensiteit = 10^((waarde-109)/32) # # Ter controle: een waarde van 77 is gelijk aan een neerslagintensiteit # van 0,1 mm/u. mmu = 10**(float((int(val) - 109)) / 32) totalrain = totalrain + float(mmu) numberoflines = numberoflines + 1 index += 1 if numberoflines > 0: result[AVERAGE] = round((totalrain / numberoflines), 2) else: result[AVERAGE] = 0 result[TOTAL] = round(totalrain / 12, 2) result[TIMEFRAME] = timeframe return result
Parse the forecasted precipitation data.
def check_api_error(api_response): print(api_response) """Check if returned API response contains an error.""" if type(api_response) == dict and 'code' in api_response and api_response['code'] <> 200: print("Server response code: %s" % api_response['code']) print("Server response: %s" % api_response) raise exceptions.HTTPError('Unexpected response', response=api_response) if type(api_response) == dict and (api_response.get('status') == 'failed'): if 'ProgrammingError' in api_response.get('exception_cls'): raise DatabaseError(message='PyBossa database error.', error=api_response) if ('DBIntegrityError' in api_response.get('exception_cls') and 'project' in api_response.get('target')): msg = 'PyBossa project already exists.' raise ProjectAlreadyExists(message=msg, error=api_response) if 'project' in api_response.get('target'): raise ProjectNotFound(message='PyBossa Project not found', error=api_response) if 'task' in api_response.get('target'): raise TaskNotFound(message='PyBossa Task not found', error=api_response) else: print("Server response: %s" % api_response) raise exceptions.HTTPError('Unexpected response', response=api_response)
Check if returned API response contains an error.
def checkout(self): ''' Checkout the configured branch/tag. We catch an "Exception" class here instead of a specific exception class because the exceptions raised by GitPython when running these functions vary in different versions of GitPython. ''' tgt_ref = self.get_checkout_target() try: head_sha = self.repo.rev_parse('HEAD').hexsha except Exception: # Should only happen the first time we are checking out, since # we fetch first before ever checking anything out. head_sha = None # 'origin/' + tgt_ref ==> matches a branch head # 'tags/' + tgt_ref + '@{commit}' ==> matches tag's commit for rev_parse_target, checkout_ref in ( ('origin/' + tgt_ref, 'origin/' + tgt_ref), ('tags/' + tgt_ref, 'tags/' + tgt_ref)): try: target_sha = self.repo.rev_parse(rev_parse_target).hexsha except Exception: # ref does not exist continue else: if head_sha == target_sha: # No need to checkout, we're already up-to-date return self.check_root() try: with self.gen_lock(lock_type='checkout'): self.repo.git.checkout(checkout_ref) log.debug( '%s remote \'%s\' has been checked out to %s', self.role, self.id, checkout_ref ) except GitLockError as exc: if exc.errno == errno.EEXIST: # Re-raise with a different strerror containing a # more meaningful error message for the calling # function. raise GitLockError( exc.errno, 'Checkout lock exists for {0} remote \'{1}\'' .format(self.role, self.id) ) else: log.error( 'Error %d encountered obtaining checkout lock ' 'for %s remote \'%s\'', exc.errno, self.role, self.id ) return None except Exception: continue return self.check_root() log.error( 'Failed to checkout %s from %s remote \'%s\': remote ref does ' 'not exist', tgt_ref, self.role, self.id ) return None
Checkout the configured branch/tag. We catch an "Exception" class here instead of a specific exception class because the exceptions raised by GitPython when running these functions vary in different versions of GitPython.
def _sub16(ins): ''' Pops last 2 words from the stack and subtract them. Then push the result onto the stack. Top of the stack is subtracted Top -1 Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If any of the operands is < 4, then DEC is used * If any of the operands is > 65531 (-4..-1), then INC is used ''' op1, op2 = tuple(ins.quad[2:4]) if is_int(op2): op = int16(op2) output = _16bit_oper(op1) if op == 0: output.append('push hl') return output if op < 4: output.extend(['dec hl'] * op) output.append('push hl') return output if op > 65531: output.extend(['inc hl'] * (0x10000 - op)) output.append('push hl') return output output.append('ld de, -%i' % op) output.append('add hl, de') output.append('push hl') return output if op2[0] == '_': # Optimization when 2nd operand is an id rev = True op1, op2 = op2, op1 else: rev = False output = _16bit_oper(op1, op2, rev) output.append('or a') output.append('sbc hl, de') output.append('push hl') return output
Pops last 2 words from the stack and subtract them. Then push the result onto the stack. Top of the stack is subtracted Top -1 Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If any of the operands is < 4, then DEC is used * If any of the operands is > 65531 (-4..-1), then INC is used
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False, file=None, files=None, embed=None, embeds=None): """|maybecoro| Sends a message using the webhook. If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is not a coroutine. The content must be a type that can convert to a string through ``str(content)``. To upload a single file, the ``file`` parameter should be used with a single :class:`File` object. If the ``embed`` parameter is provided, it must be of type :class:`Embed` and it must be a rich embed type. You cannot mix the ``embed`` parameter with the ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send. Parameters ------------ content: :class:`str` The content of the message to send. wait: :class:`bool` Whether the server should wait before sending a response. This essentially means that the return type of this function changes from ``None`` to a :class:`Message` if set to ``True``. username: :class:`str` The username to send with this message. If no username is provided then the default username for the webhook is used. avatar_url: Union[:class:`str`, :class:`Asset`] The avatar URL to send with this message. If no avatar URL is provided then the default avatar for the webhook is used. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. file: :class:`File` The file to upload. This cannot be mixed with ``files`` parameter. files: List[:class:`File`] A list of files to send with the content. This cannot be mixed with the ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. embeds: List[:class:`Embed`] A list of embeds to send with the content. Maximum of 10. This cannot be mixed with the ``embed`` parameter. Raises -------- HTTPException Sending the message failed. NotFound This webhook was not found. Forbidden The authorization token for the webhook is incorrect. InvalidArgument You specified both ``embed`` and ``embeds`` or the length of ``embeds`` was invalid. Returns --------- Optional[:class:`Message`] The message that was sent. """ payload = {} if files is not None and file is not None: raise InvalidArgument('Cannot mix file and files keyword arguments.') if embeds is not None and embed is not None: raise InvalidArgument('Cannot mix embed and embeds keyword arguments.') if embeds is not None: if len(embeds) > 10: raise InvalidArgument('embeds has a maximum of 10 elements.') payload['embeds'] = [e.to_dict() for e in embeds] if embed is not None: payload['embeds'] = [embed.to_dict()] if content is not None: payload['content'] = str(content) payload['tts'] = tts if avatar_url: payload['avatar_url'] = str(avatar_url) if username: payload['username'] = username return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
|maybecoro| Sends a message using the webhook. If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is not a coroutine. The content must be a type that can convert to a string through ``str(content)``. To upload a single file, the ``file`` parameter should be used with a single :class:`File` object. If the ``embed`` parameter is provided, it must be of type :class:`Embed` and it must be a rich embed type. You cannot mix the ``embed`` parameter with the ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send. Parameters ------------ content: :class:`str` The content of the message to send. wait: :class:`bool` Whether the server should wait before sending a response. This essentially means that the return type of this function changes from ``None`` to a :class:`Message` if set to ``True``. username: :class:`str` The username to send with this message. If no username is provided then the default username for the webhook is used. avatar_url: Union[:class:`str`, :class:`Asset`] The avatar URL to send with this message. If no avatar URL is provided then the default avatar for the webhook is used. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. file: :class:`File` The file to upload. This cannot be mixed with ``files`` parameter. files: List[:class:`File`] A list of files to send with the content. This cannot be mixed with the ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. embeds: List[:class:`Embed`] A list of embeds to send with the content. Maximum of 10. This cannot be mixed with the ``embed`` parameter. Raises -------- HTTPException Sending the message failed. NotFound This webhook was not found. Forbidden The authorization token for the webhook is incorrect. InvalidArgument You specified both ``embed`` and ``embeds`` or the length of ``embeds`` was invalid. Returns --------- Optional[:class:`Message`] The message that was sent.
def build(ctx, less=False, docs=False, js=False, force=False): """Build everything and collectstatic. """ specified = any([less, docs, js]) buildall = not specified if buildall or less: less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less' if less_fname.exists(): lessc.LessRule( ctx, src='{pkg.source_less}/{pkg.name}.less', dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css', force=force ) elif less: print("WARNING: build --less specified, but no file at:", less_fname) if buildall or docs: if WARN_ABOUT_SETTINGS: warnings.warn( "autodoc might need a dummy settings file in the root of " "your package. Since it runs in a separate process you cannot" "use settings.configure()" ) doctools.build(ctx, force=force) if buildall or js: build_js(ctx, force) if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)): collectstatic(ctx, DJANGO_SETTINGS_MODULE)
Build everything and collectstatic.
def parse_string(self): """Tokenize a Fortran string.""" word = '' if self.prior_delim: delim = self.prior_delim self.prior_delim = None else: delim = self.char word += self.char self.update_chars() while True: if self.char == delim: # Check for escaped delimiters self.update_chars() if self.char == delim: word += 2 * delim self.update_chars() else: word += delim break elif self.char == '\n': self.prior_delim = delim break else: word += self.char self.update_chars() return word
Tokenize a Fortran string.
def query(params): """`params` is a city name or a city name + hospital name. CLI: 1. query all putian hospitals in a city: $ iquery -p 南京 +------+ | 南京 | +------+ |... | +------+ |... | +------+ ... 2. query if the hospital in the city is putian series, you can only input hospital's short name: $ iquery -p 南京 曙光 +------------+ |南京曙光医院| +------------+ | True | +------------+ """ r = requests_get(QUERY_URL, verify=True) return HospitalCollection(r.json(), params)
`params` is a city name or a city name + hospital name. CLI: 1. query all putian hospitals in a city: $ iquery -p 南京 +------+ | 南京 | +------+ |... | +------+ |... | +------+ ... 2. query if the hospital in the city is putian series, you can only input hospital's short name: $ iquery -p 南京 曙光 +------------+ |南京曙光医院| +------------+ | True | +------------+
def get_summary_and_description(self): """ Compat: drf-yasg 1.12+ """ summary = self.get_summary() _, description = super().get_summary_and_description() return summary, description
Compat: drf-yasg 1.12+
def simplify_recursive(typ): # type: (AbstractType) -> AbstractType """Simplify all components of a type.""" if isinstance(typ, UnionType): return combine_types(typ.items) elif isinstance(typ, ClassType): simplified = ClassType(typ.name, [simplify_recursive(arg) for arg in typ.args]) args = simplified.args if (simplified.name == 'Dict' and len(args) == 2 and isinstance(args[0], ClassType) and args[0].name in ('str', 'Text') and isinstance(args[1], UnionType) and not is_optional(args[1])): # Looks like a potential case for TypedDict, which we don't properly support yet. return ClassType('Dict', [args[0], AnyType()]) return simplified elif isinstance(typ, TupleType): return TupleType([simplify_recursive(item) for item in typ.items]) return typ
Simplify all components of a type.
def get_port_channel_detail_output_lacp_aggr_member_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") aggr_member = ET.SubElement(lacp, "aggr-member") interface_name = ET.SubElement(aggr_member, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _load_ini(path): """ Load an INI file from *path*. """ cfg = RawConfigParser() with codecs.open(path, mode="r", encoding="utf-8") as f: try: cfg.read_file(f) except AttributeError: cfg.readfp(f) return cfg
Load an INI file from *path*.
def particle_clusters( particle_locations, particle_weights=None, eps=0.5, min_particles=5, metric='euclidean', weighted=False, w_pow=0.5, quiet=True ): """ Yields an iterator onto tuples ``(cluster_label, cluster_particles)``, where ``cluster_label`` is an `int` identifying the cluster (or ``NOISE`` for the particles lying outside of all clusters), and where ``cluster_particles`` is an array of ``dtype`` `bool` specifying the indices of all particles in that cluster. That is, particle ``i`` is in the cluster if ``cluster_particles[i] == True``. """ if weighted == True and particle_weights is None: raise ValueError("Weights must be specified for weighted clustering.") # Allocate new arrays to hold the weights and locations. new_weights = np.empty(particle_weights.shape) new_locs = np.empty(particle_locations.shape) # Calculate and possibly reweight the metric. if weighted: M = sklearn.metrics.pairwise.pairwise_distances(particle_locations, metric=metric) M = metrics.weighted_pairwise_distances(M, particle_weights, w_pow=w_pow) # Create and run a SciKit-Learn DBSCAN clusterer. clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric='precomputed' ) cluster_labels = clusterer.fit_predict(M) else: clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric=metric ) cluster_labels = clusterer.fit_predict(particle_locations) # Find out how many clusters were identified. # Cluster counting logic from: # [http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html]. is_noise = -1 in cluster_labels n_clusters = len(set(cluster_labels)) - (1 if is_noise else 0) # If more than 10% of the particles were labeled as NOISE, # warn. n_noise = np.sum(cluster_labels == -1) if n_noise / particle_weights.shape[0] >= 0.1: warnings.warn("More than 10% of the particles were classified as NOISE. Consider increasing the neighborhood size ``eps``.", ResamplerWarning) # Print debugging info. if not quiet: print("[Clustering] DBSCAN identified {} cluster{}. "\ "{} particles identified as NOISE.".format( n_clusters, "s" if n_clusters > 1 else "", n_noise )) # Loop over clusters, calling the secondary resampler for each. # The loop should include -1 if noise was found. for idx_cluster in range(-1 if is_noise else 0, n_clusters): # Grab a boolean array identifying the particles in a particular # cluster. this_cluster = cluster_labels == idx_cluster yield idx_cluster, this_cluster
Yields an iterator onto tuples ``(cluster_label, cluster_particles)``, where ``cluster_label`` is an `int` identifying the cluster (or ``NOISE`` for the particles lying outside of all clusters), and where ``cluster_particles`` is an array of ``dtype`` `bool` specifying the indices of all particles in that cluster. That is, particle ``i`` is in the cluster if ``cluster_particles[i] == True``.
def _run_introspection(self, runtime='', whitelist=[], verbose=False): """ Figure out which objects are opened by a test binary and are matched by the white list. :param runtime: The binary to run. :type runtime: str :param whitelist: A list of regular expressions describing acceptable library names :type whitelist: [str] """ found_objects = set() try: # Retrieve list of successfully opened objects strace = subprocess.Popen(['strace', runtime], stderr=subprocess.PIPE, stdout=subprocess.PIPE) (_, stderr) = strace.communicate() opened_objects = set() for line in stderr.split('\n'): if 'open' in line and 'ENOENT' not in line: start = line.index('"') end = line.index('"', start + 1) opened_objects.add(line[start + 1:end]) # filter opened objects through white list. for obj in opened_objects: for wl in whitelist: m = re.match('.*' + wl + '[\..*]?', obj) if m: found_objects.add(obj) if verbose: print('Found whitelisted {} at path {}'.format(wl, obj)) continue except Exception as e: print e return found_objects
Figure out which objects are opened by a test binary and are matched by the white list. :param runtime: The binary to run. :type runtime: str :param whitelist: A list of regular expressions describing acceptable library names :type whitelist: [str]
def save_translations(self, *args, **kwargs): """ The method to save all translations. This can be overwritten to implement any custom additions. This method calls :func:`save_translation` for every fetched language. :param args: Any custom arguments to pass to :func:`save`. :param kwargs: Any custom arguments to pass to :func:`save`. """ # Copy cache, new objects (e.g. fallbacks) might be fetched if users override save_translation() # Not looping over the cache, but using _parler_meta so the translations are processed in the order of inheritance. local_caches = self._translations_cache.copy() for meta in self._parler_meta: local_cache = local_caches[meta.model] translations = list(local_cache.values()) # Save all translated objects which were fetched. # This also supports switching languages several times, and save everything in the end. for translation in translations: if is_missing(translation): # Skip fallback markers continue self.save_translation(translation, *args, **kwargs)
The method to save all translations. This can be overwritten to implement any custom additions. This method calls :func:`save_translation` for every fetched language. :param args: Any custom arguments to pass to :func:`save`. :param kwargs: Any custom arguments to pass to :func:`save`.
def room_members(self, stream_id): ''' get list of room members ''' req_hook = 'pod/v2/room/' + str(stream_id) + '/membership/list' req_args = None status_code, response = self.__rest__.GET_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get list of room members
def get_timespan(name): """ This function extracts the time span from the Tplot Variables stored in memory. Parameters: name : str Name of the tplot variable Returns: time_begin : float The beginning of the time series time_end : float The end of the time series Examples: >>> # Retrieve the time span from Variable 1 >>> import pytplot >>> x_data = [1,2,3,4,5] >>> y_data = [1,2,3,4,5] >>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data}) >>> time1, time2 = pytplot.get_timespan("Variable1") """ if name not in data_quants.keys(): print("That name is currently not in pytplot") return print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0])) print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1])) return(data_quants[name].trange[0], data_quants[name].trange[1])
This function extracts the time span from the Tplot Variables stored in memory. Parameters: name : str Name of the tplot variable Returns: time_begin : float The beginning of the time series time_end : float The end of the time series Examples: >>> # Retrieve the time span from Variable 1 >>> import pytplot >>> x_data = [1,2,3,4,5] >>> y_data = [1,2,3,4,5] >>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data}) >>> time1, time2 = pytplot.get_timespan("Variable1")
def _parse_date_time_time_zone(self, date_time_time_zone): """ Parses and convert to protocol timezone a dateTimeTimeZone resource This resource is a dict with a date time and a windows timezone This is a common structure on Microsoft apis so it's included here. """ if date_time_time_zone is None: return None local_tz = self.protocol.timezone if isinstance(date_time_time_zone, dict): try: timezone = pytz.timezone( get_iana_tz(date_time_time_zone.get(self._cc('timeZone'), 'UTC'))) except pytz.UnknownTimeZoneError: timezone = local_tz date_time = date_time_time_zone.get(self._cc('dateTime'), None) try: date_time = timezone.localize(parse(date_time)) if date_time else None except OverflowError as e: log.debug('Could not parse dateTimeTimeZone: {}. Error: {}'.format(date_time_time_zone, str(e))) date_time = None if date_time and timezone != local_tz: date_time = date_time.astimezone(local_tz) else: # Outlook v1.0 api compatibility (fallback to datetime string) try: date_time = local_tz.localize(parse(date_time_time_zone)) if date_time_time_zone else None except Exception as e: log.debug('Could not parse dateTimeTimeZone: {}. Error: {}'.format(date_time_time_zone, str(e))) date_time = None return date_time
Parses and convert to protocol timezone a dateTimeTimeZone resource This resource is a dict with a date time and a windows timezone This is a common structure on Microsoft apis so it's included here.
def try_get_department(department_or_code): """ Try to take the first department code, or fall back to string as passed """ try: value = take_first_department_code(department_or_code) except AssertionError: value = department_or_code if value in DEPARTMENT_MAPPING: value = DEPARTMENT_MAPPING[value] return value
Try to take the first department code, or fall back to string as passed
def add_subcomponent(self, name): """ Create an instance of :class:`SubComponent <hl7apy.core.SubComponent>` having the given name :param name: the name of the subcomponent to be created (e.g. CE_1) :return: an instance of :class:`SubComponent <hl7apy.core.SubComponent>` >>> c = Component(datatype='CE') >>> ce_1 = c.add_subcomponent('CE_1') >>> print(ce_1) <SubComponent CE_1> >>> print(ce_1 in c.children) True """ if self.is_unknown() and is_base_datatype(self.datatype): # An unknown component can't have a child raise ChildNotValid(name, self) return self.children.create_element(name)
Create an instance of :class:`SubComponent <hl7apy.core.SubComponent>` having the given name :param name: the name of the subcomponent to be created (e.g. CE_1) :return: an instance of :class:`SubComponent <hl7apy.core.SubComponent>` >>> c = Component(datatype='CE') >>> ce_1 = c.add_subcomponent('CE_1') >>> print(ce_1) <SubComponent CE_1> >>> print(ce_1 in c.children) True
def execute(tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' .. versionadded:: 2017.7.0 Execute ``fun`` on all minions matched by ``tgt`` and ``tgt_type``. Parameter ``fun`` is the name of execution module function to call. This function should mainly be used as a helper for runner modules, in order to avoid redundant code. For example, when inside a runner one needs to execute a certain function on arbitrary groups of minions, only has to: .. code-block:: python ret1 = __salt__['salt.execute']('*', 'mod.fun') ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup') It can also be used to schedule jobs directly on the master, for example: .. code-block:: yaml schedule: collect_bgp_stats: function: salt.execute args: - edge-routers - bgp.neighbors kwargs: tgt_type: nodegroup days: 1 returner: redis ''' client = salt.client.get_local_client(__opts__['conf_file']) try: ret = client.cmd(tgt, fun, arg=arg, timeout=timeout or __opts__['timeout'], tgt_type=tgt_type, # no warn_until, as this is introduced only in 2017.7.0 ret=ret, jid=jid, kwarg=kwarg, **kwargs) except SaltClientError as client_error: log.error('Error while executing %s on %s (%s)', fun, tgt, tgt_type) log.error(client_error) return {} return ret
.. versionadded:: 2017.7.0 Execute ``fun`` on all minions matched by ``tgt`` and ``tgt_type``. Parameter ``fun`` is the name of execution module function to call. This function should mainly be used as a helper for runner modules, in order to avoid redundant code. For example, when inside a runner one needs to execute a certain function on arbitrary groups of minions, only has to: .. code-block:: python ret1 = __salt__['salt.execute']('*', 'mod.fun') ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup') It can also be used to schedule jobs directly on the master, for example: .. code-block:: yaml schedule: collect_bgp_stats: function: salt.execute args: - edge-routers - bgp.neighbors kwargs: tgt_type: nodegroup days: 1 returner: redis
def generate_component_annotation_miriam_match(elements, component, db): """ Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database. """ def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database.
def add_overlay_to_slice_file( self, filename, overlay, i_overlay, filename_out=None ): """ Function adds overlay to existing file. """ if filename_out is None: filename_out = filename filename = op.expanduser(filename) data = dicom.read_file(filename) data = self.encode_overlay_slice(data, overlay, i_overlay) data.save_as(filename_out)
Function adds overlay to existing file.
def fetch_twitter_lists_for_user_ids_generator(twitter_app_key, twitter_app_secret, user_id_list): """ Collects at most 500 Twitter lists for each user from an input list of Twitter user ids. Inputs: - twitter_app_key: What is says on the tin. - twitter_app_secret: Ditto. - user_id_list: A python list of Twitter user ids. Yields: - user_twitter_id: A Twitter user id. - twitter_lists_list: A python list containing Twitter lists in dictionary (json) format. """ #################################################################################################################### # Log into my application. #################################################################################################################### twitter = login(twitter_app_key, twitter_app_secret) #################################################################################################################### # For each user, gather at most 500 Twitter lists. #################################################################################################################### get_list_memberships_counter = 0 get_list_memberships_time_window_start = time.perf_counter() for user_twitter_id in user_id_list: # Make safe twitter request. try: twitter_lists_list, get_list_memberships_counter, get_list_memberships_time_window_start\ = safe_twitter_request_handler(twitter_api_func=twitter.get_list_memberships, call_rate_limit=15, call_counter=get_list_memberships_counter, time_window_start=get_list_memberships_time_window_start, max_retries=5, wait_period=2, user_id=user_twitter_id, count=500, cursor=-1) # If the call is succesful, yield the list of Twitter lists. yield user_twitter_id, twitter_lists_list except twython.TwythonError: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None except URLError: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None except BadStatusLine: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None
Collects at most 500 Twitter lists for each user from an input list of Twitter user ids. Inputs: - twitter_app_key: What is says on the tin. - twitter_app_secret: Ditto. - user_id_list: A python list of Twitter user ids. Yields: - user_twitter_id: A Twitter user id. - twitter_lists_list: A python list containing Twitter lists in dictionary (json) format.
def search(self, keyword): """Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison') """ params = { "source": "map", "description": keyword } data = self._request(ENDPOINTS['SEARCH'], params) data['result_data'] = [res for res in data['result_data'] if isinstance(res, dict)] return data
Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison')
def send_static_message(sender, message): """Send a static message to the listeners. Static messages represents a whole new message. Usually it will replace the previous message. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message """ dispatcher.send( signal=STATIC_MESSAGE_SIGNAL, sender=sender, message=message)
Send a static message to the listeners. Static messages represents a whole new message. Usually it will replace the previous message. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message
def _parse_cod_segment(cls, fptr): """Parse the COD segment. Parameters ---------- fptr : file Open file object. Returns ------- CODSegment The current COD segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(length - 2) lst = struct.unpack_from('>BBHBBBBBB', read_buffer, offset=0) scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform = lst if len(read_buffer) > 10: precinct_size = _parse_precinct_size(read_buffer[10:]) else: precinct_size = None sop = (scod & 2) > 0 eph = (scod & 4) > 0 if sop or eph: cls._parse_tpart_flag = True else: cls._parse_tpart_flag = False pargs = (scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform, precinct_size) return CODsegment(*pargs, length=length, offset=offset)
Parse the COD segment. Parameters ---------- fptr : file Open file object. Returns ------- CODSegment The current COD segment.
def get(self, option, default=undefined, cast=undefined): """ Return the value for option or default if defined. """ if option in self.repository: value = self.repository.get(option) else: value = default if isinstance(value, Undefined): raise UndefinedValueError('%s option not found and default value was not defined.' % option) if isinstance(cast, Undefined): cast = lambda v: v # nop elif cast is bool: cast = self._cast_boolean return cast(value)
Return the value for option or default if defined.
def record(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Record data and return a list of data_and_metadata objects. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :param timeout: The timeout in seconds. Pass None to use default. :return: The list of data and metadata items that were read. :rtype: list of :py:class:`DataAndMetadata` """ if frame_parameters: self.__hardware_source.set_record_frame_parameters(self.__hardware_source.get_frame_parameters_from_dict(frame_parameters)) if channels_enabled is not None: for channel_index, channel_enabled in enumerate(channels_enabled): self.__hardware_source.set_channel_enabled(channel_index, channel_enabled) self.__hardware_source.start_recording() return self.__hardware_source.get_next_xdatas_to_finish(timeout)
Record data and return a list of data_and_metadata objects. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :param timeout: The timeout in seconds. Pass None to use default. :return: The list of data and metadata items that were read. :rtype: list of :py:class:`DataAndMetadata`
def do( self, params ): """Perform the number of repetitions we want. The results returned will be a list of the results dicts generated by the repeated experiments. The metedata for each experiment will include an entry :attr:`RepeatedExperiment.REPETITIONS` for the number of repetitions that occurred (which will be the length of this list) and an entry :attr:`RepeatedExperiment.I` for the index of the result in that sequence. :param params: the parameters to the experiment :returns: a list of result dicts""" N = self.repetitions() e = self.experiment() results = [] for i in range(N): res = e.run() # make sure we have a list to traverse if not isinstance(res, list): res = [ res ] # add repetition metadata to each result for r in res: r[Experiment.METADATA][self.I] = i r[Experiment.METADATA][self.REPETITIONS] = N # add the results to ours results.extend(res) return results
Perform the number of repetitions we want. The results returned will be a list of the results dicts generated by the repeated experiments. The metedata for each experiment will include an entry :attr:`RepeatedExperiment.REPETITIONS` for the number of repetitions that occurred (which will be the length of this list) and an entry :attr:`RepeatedExperiment.I` for the index of the result in that sequence. :param params: the parameters to the experiment :returns: a list of result dicts
def zsum(s, *args, **kwargs): """ pandas 0.21.0 changes sum() behavior so that the result of applying sum over an empty DataFrame is NaN. Meant to be set as pd.Series.zsum = zsum. """ return 0 if s.empty else s.sum(*args, **kwargs)
pandas 0.21.0 changes sum() behavior so that the result of applying sum over an empty DataFrame is NaN. Meant to be set as pd.Series.zsum = zsum.
def dict_contents(self, use_dict=None, as_class=dict): """Return the contents of an object as a dict.""" if _debug: APDU._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) # make/extend the dictionary of content if use_dict is None: use_dict = as_class() # call the parent classes self.apci_contents(use_dict=use_dict, as_class=as_class) self.apdu_contents(use_dict=use_dict, as_class=as_class) # return what we built/updated return use_dict
Return the contents of an object as a dict.
def backup(self, id=None, src=None, timestamp=None): """ This runs a backup job outside of the storage api, which is useful for performance testing backups """ # Set basic Logging logging.basicConfig() # Get the lunr logger log = logger.get_logger() # Output Debug level info log.logger.setLevel(logging.DEBUG) # Load the local storage configuration conf = LunrConfig.from_storage_conf() # If no time provided, use current time timestamp = timestamp or time() # Init our helpers volume = VolumeHelper(conf) backup = BackupHelper(conf) try: # Create the snapshot snapshot = volume.create_snapshot(src, id, timestamp) # For testing non-snapshot speeds # snapshot = volume.get(src) # snapshot['backup_id'] = id # snapshot['origin'] = src # snapshot['timestamp'] = 1338410885.0 # del snapshot['volume'] print("Created snap-shot: ", pprint(snapshot)) with self.timeit(snapshot['size']): # Backup the snapshot print("Starting Backup") backup.save(snapshot, id) finally: # Delete the snapshot if it was created if 'snapshot' in locals(): self._remove_volume(snapshot['path'])
This runs a backup job outside of the storage api, which is useful for performance testing backups
def set_split_extents_by_tile_shape(self): """ Sets split shape :attr:`split_shape` and split extents (:attr:`split_begs` and :attr:`split_ends`) from value of :attr:`tile_shape`. """ self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1 self.split_begs = [[], ] * len(self.array_shape) self.split_ends = [[], ] * len(self.array_shape) for i in range(len(self.array_shape)): self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i]) self.split_ends[i] = _np.zeros_like(self.split_begs[i]) self.split_ends[i][0:-1] = self.split_begs[i][1:] self.split_ends[i][-1] = self.array_shape[i]
Sets split shape :attr:`split_shape` and split extents (:attr:`split_begs` and :attr:`split_ends`) from value of :attr:`tile_shape`.
def index_table(self, axis=None, baseline=None, prune=False): """Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable. """ proportions = self.proportions(axis=axis) baseline = ( baseline if baseline is not None else self._prepare_index_baseline(axis) ) # Fix the shape to enable correct broadcasting if ( axis == 0 and len(baseline.shape) <= 1 and self.ndim == len(self.get_shape()) ): baseline = baseline[:, None] indexes = proportions / baseline * 100 return self._apply_pruning_mask(indexes) if prune else indexes
Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable.
def login(self): """ Logs the user in, returns the result Returns bool - Whether or not the user logged in successfully """ # Request index to obtain initial cookies and look more human pg = self.getPage("http://www.neopets.com") form = pg.form(action="/login.phtml") form.update({'username': self.username, 'password': self.password}) pg = form.submit() logging.getLogger("neolib.user").info("Login check", {'pg': pg}) return self.username in pg.content
Logs the user in, returns the result Returns bool - Whether or not the user logged in successfully
def _check_vpcs_version(self): """ Checks if the VPCS executable version is >= 0.8b or == 0.6.1. """ try: output = yield from subprocess_check_output(self._vpcs_path(), "-v", cwd=self.working_dir) match = re.search("Welcome to Virtual PC Simulator, version ([0-9a-z\.]+)", output) if match: version = match.group(1) self._vpcs_version = parse_version(version) if self._vpcs_version < parse_version("0.6.1"): raise VPCSError("VPCS executable version must be >= 0.6.1 but not a 0.8") else: raise VPCSError("Could not determine the VPCS version for {}".format(self._vpcs_path())) except (OSError, subprocess.SubprocessError) as e: raise VPCSError("Error while looking for the VPCS version: {}".format(e))
Checks if the VPCS executable version is >= 0.8b or == 0.6.1.
def get_accessibles(request, roles=None): """ Returns the list of *dictionnaries* for which the accounts are accessibles by ``request.user`` filtered by ``roles`` if present. """ results = [] for role_name, organizations in six.iteritems(request.session.get( 'roles', {})): if roles is None or role_name in roles: results += organizations return results
Returns the list of *dictionnaries* for which the accounts are accessibles by ``request.user`` filtered by ``roles`` if present.
def plot_dop(bands, int_max, dop, hund_cu, name): """Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states""" data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.plot_curves_z(data, name)
Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states
def collapse_pair(graph, survivor: BaseEntity, victim: BaseEntity) -> None: """Rewire all edges from the synonymous node to the survivor node, then deletes the synonymous node. Does not keep edges between the two nodes. :param pybel.BELGraph graph: A BEL graph :param survivor: The BEL node to collapse all edges on the synonym to :param victim: The BEL node to collapse into the surviving node """ graph.add_edges_from( (survivor, successor, key, data) for _, successor, key, data in graph.out_edges(victim, keys=True, data=True) if successor != survivor ) graph.add_edges_from( (predecessor, survivor, key, data) for predecessor, _, key, data in graph.in_edges(victim, keys=True, data=True) if predecessor != survivor ) graph.remove_node(victim)
Rewire all edges from the synonymous node to the survivor node, then deletes the synonymous node. Does not keep edges between the two nodes. :param pybel.BELGraph graph: A BEL graph :param survivor: The BEL node to collapse all edges on the synonym to :param victim: The BEL node to collapse into the surviving node
def change_jira_status(test_key, test_status, test_comment, test_attachments): """Update test status in Jira :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments :param test_attachments: test case attachments """ logger = logging.getLogger(__name__) if not execution_url: logger.warning("Test Case '%s' can not be updated: execution_url is not configured", test_key) return logger.info("Updating Test Case '%s' in Jira with status %s", test_key, test_status) composed_comments = comments if test_comment: composed_comments = '{}\n{}'.format(comments, test_comment) if comments else test_comment payload = {'jiraTestCaseId': test_key, 'jiraStatus': test_status, 'summaryPrefix': summary_prefix, 'labels': labels, 'comments': composed_comments, 'version': fix_version, 'build': build} if only_if_changes: payload['onlyIfStatusChanges'] = 'true' try: if test_attachments and len(test_attachments) > 0: files = dict() for index in range(len(test_attachments)): files['attachments{}'.format(index)] = open(test_attachments[index], 'rb') else: files = None response = requests.post(execution_url, data=payload, files=files) except Exception as e: logger.warning("Error updating Test Case '%s': %s", test_key, e) return if response.status_code >= 400: logger.warning("Error updating Test Case '%s': [%s] %s", test_key, response.status_code, get_error_message(response.content)) else: logger.debug("%s", response.content.decode().splitlines()[0])
Update test status in Jira :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments :param test_attachments: test case attachments
def get_score(self, terms): """Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict """ assert isinstance(terms, list) or isinstance(terms, tuple) score_li = np.asarray([self._get_score(t) for t in terms]) s_pos = np.sum(score_li[score_li > 0]) s_neg = -np.sum(score_li[score_li < 0]) s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON) s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON) return {self.TAG_POS: s_pos, self.TAG_NEG: s_neg, self.TAG_POL: s_pol, self.TAG_SUB: s_sub}
Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict
def count_matches(self): """Set the matches_p, matches_c and rows attributes.""" try: self.fn = self.fo.name rows = self.file_rows(self.fo) self.fo.seek(0) except AttributeError: with open(self.fn) as fo: rows = self.file_rows(fo) matches_p = [] matches_c = [] for line in rows: cnt = len(re.findall(DATPRX, line)) matches_p.append(cnt) cnt = len(re.findall(DATCRX, line)) matches_c.append(cnt) self.rows = rows # Is newlines in the end a problem? self.matches_p = matches_p self.matches_c = matches_c
Set the matches_p, matches_c and rows attributes.
def get_hook(hook_name): """Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError """ if not pkg_resources.resource_exists(__name__, hook_name): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError
def init_parser(): """ function to init option parser """ usage = "usage: %prog -u user -s secret -n name [-l label] \ [-t title] [-c callback] [TEXT]" parser = OptionParser(usage, version="%prog " + notifo.__version__) parser.add_option("-u", "--user", action="store", dest="user", help="your notifo username") parser.add_option("-s", "--secret", action="store", dest="secret", help="your notifo API secret") parser.add_option("-n", "--name", action="store", dest="name", help="recipient for the notification") parser.add_option("-l", "--label", action="store", dest="label", help="label for the notification") parser.add_option("-t", "--title", action="store", dest="title", help="title of the notification") parser.add_option("-c", "--callback", action="store", dest="callback", help="callback URL to call") parser.add_option("-m", "--message", action="store_true", dest="message", default=False, help="send message instead of notification") (options, args) = parser.parse_args() return (parser, options, args)
function to init option parser
def calculate_ellipse_description(covariance, scale = 2.0): """! @brief Calculates description of ellipse using covariance matrix. @param[in] covariance (numpy.array): Covariance matrix for which ellipse area should be calculated. @param[in] scale (float): Scale of the ellipse. @return (float, float, float) Return ellipse description: angle, width, height. """ eigh_values, eigh_vectors = numpy.linalg.eigh(covariance) order = eigh_values.argsort()[::-1] values, vectors = eigh_values[order], eigh_vectors[order] angle = numpy.degrees(numpy.arctan2(*vectors[:,0][::-1])) if 0.0 in values: return 0, 0, 0 width, height = 2.0 * scale * numpy.sqrt(values) return angle, width, height
! @brief Calculates description of ellipse using covariance matrix. @param[in] covariance (numpy.array): Covariance matrix for which ellipse area should be calculated. @param[in] scale (float): Scale of the ellipse. @return (float, float, float) Return ellipse description: angle, width, height.
def one(prompt, *args, **kwargs): """Instantiates a picker, registers custom handlers for going back, and starts the picker. """ indicator = '‣' if sys.version_info < (3, 0): indicator = '>' def go_back(picker): return None, -1 options, verbose_options = prepare_options(args) idx = kwargs.get('idx', 0) picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx) picker.register_custom_handler(ord('h'), go_back) picker.register_custom_handler(curses.KEY_LEFT, go_back) with stdout_redirected(sys.stderr): option, index = picker.start() if index == -1: raise QuestionnaireGoBack if kwargs.get('return_index', False): # `one` was called by a special client, e.g. `many` return index return options[index]
Instantiates a picker, registers custom handlers for going back, and starts the picker.
def average_gradients(tower_gradients): r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion. ''' # List of average gradients to return to the caller average_grads = [] # Run this on cpu_device to conserve GPU memory with tf.device(Config.cpu_device): # Loop over gradient/variable pairs from all towers for grad_and_vars in zip(*tower_gradients): # Introduce grads to store the gradients for the current variable grads = [] # Loop over the gradients for the current variable for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Create a gradient/variable tuple for the current variable with its average gradient grad_and_var = (grad, grad_and_vars[0][1]) # Add the current tuple to average_grads average_grads.append(grad_and_var) # Return result to caller return average_grads
r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion.
def get_attributes(var): """ Given a varaible, return the list of attributes that are available inside of a template """ is_valid = partial(is_valid_in_template, var) return list(filter(is_valid, dir(var)))
Given a varaible, return the list of attributes that are available inside of a template
def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs in location given by base_uri.""" parsed_uri = generous_parse_uri(base_uri) uri_list = [] path = parsed_uri.path if IS_WINDOWS: path = unix_to_windows_path(parsed_uri.path, parsed_uri.netloc) for d in os.listdir(path): dir_path = os.path.join(path, d) if not os.path.isdir(dir_path): continue storage_broker = cls(dir_path, config_path) if not storage_broker.has_admin_metadata(): continue uri = storage_broker.generate_uri( name=d, uuid=None, base_uri=base_uri ) uri_list.append(uri) return uri_list
Return list containing URIs in location given by base_uri.
def transp(I,J,c,d,M): """transp -- model for solving the transportation problem Parameters: I - set of customers J - set of facilities c[i,j] - unit transportation cost on arc (i,j) d[i] - demand at node i M[j] - capacity Returns a model, ready to be solved. """ model = Model("transportation") # Create variables x = {} for i in I: for j in J: x[i,j] = model.addVar(vtype="C", name="x(%s,%s)" % (i, j)) # Demand constraints for i in I: model.addCons(quicksum(x[i,j] for j in J if (i,j) in x) == d[i], name="Demand(%s)" % i) # Capacity constraints for j in J: model.addCons(quicksum(x[i,j] for i in I if (i,j) in x) <= M[j], name="Capacity(%s)" % j) # Objective model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.optimize() model.data = x return model
transp -- model for solving the transportation problem Parameters: I - set of customers J - set of facilities c[i,j] - unit transportation cost on arc (i,j) d[i] - demand at node i M[j] - capacity Returns a model, ready to be solved.
def cancel_order(self, order_id, private_key): """ This function is a wrapper function around the create and execute cancellation functions to help make this processes simpler for the end user by combining these requests in 1 step. Execution of this function is as follows:: cancel_order(order_id=order['id'], private_key=kp) cancel_order(order_id=order['id'], private_key=eth_private_key) The expected return result for this function is the same as the execute_cancellation function:: { 'id': 'b8e617d5-f5ed-4600-b8f2-7d370d837750', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T11:16:47.021Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': '6b9f40de-f9bb-46b6-9434-d281f8c06b74', 'offer_hash': '6830d82dbdda566ab32e9a8d9d9d94d3297f67c10374d69bb35d6c5a86bd3e92', 'available_amount': '0', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'cancelling', 'created_at': '2018-08-05T11:16:47.036Z', 'transaction_hash': 'e5b08c4a55c7494f1ec7dd93ac2bb2b4e84e77dec9e00e91be1d520cb818c415', 'trades': [] } ] } :param order_id: The order ID of the open transaction on the order book that you want to cancel. :type order_id: str :param private_key: The KeyPair that will be used to sign the transaction sent to the blockchain. :type private_key: KeyPair :return: Dictionary of the transaction details and state after sending the signed transaction to the blockchain. """ create_cancellation = self.create_cancellation(order_id=order_id, private_key=private_key) return self.execute_cancellation(cancellation_params=create_cancellation, private_key=private_key)
This function is a wrapper function around the create and execute cancellation functions to help make this processes simpler for the end user by combining these requests in 1 step. Execution of this function is as follows:: cancel_order(order_id=order['id'], private_key=kp) cancel_order(order_id=order['id'], private_key=eth_private_key) The expected return result for this function is the same as the execute_cancellation function:: { 'id': 'b8e617d5-f5ed-4600-b8f2-7d370d837750', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T11:16:47.021Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': '6b9f40de-f9bb-46b6-9434-d281f8c06b74', 'offer_hash': '6830d82dbdda566ab32e9a8d9d9d94d3297f67c10374d69bb35d6c5a86bd3e92', 'available_amount': '0', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'cancelling', 'created_at': '2018-08-05T11:16:47.036Z', 'transaction_hash': 'e5b08c4a55c7494f1ec7dd93ac2bb2b4e84e77dec9e00e91be1d520cb818c415', 'trades': [] } ] } :param order_id: The order ID of the open transaction on the order book that you want to cancel. :type order_id: str :param private_key: The KeyPair that will be used to sign the transaction sent to the blockchain. :type private_key: KeyPair :return: Dictionary of the transaction details and state after sending the signed transaction to the blockchain.
def context_chunks(self, context): """ Retrieves all tokens, divided into the chunks in context ``context``. Parameters ---------- context : str Context name. Returns ------- chunks : list Each item in ``chunks`` is a list of tokens. """ N_chunks = len(self.contexts[context]) chunks = [] for j in xrange(N_chunks): chunks.append(self.context_chunk(context, j)) return chunks
Retrieves all tokens, divided into the chunks in context ``context``. Parameters ---------- context : str Context name. Returns ------- chunks : list Each item in ``chunks`` is a list of tokens.
def media_url(self, with_ssl=False): """ Used to return a base media URL. Depending on whether we're serving media remotely or locally, this either hands the decision off to the backend, or just uses the value in settings.STATIC_URL. args: with_ssl: (bool) If True, return an HTTPS url (depending on how the backend handles it). """ if self.serve_remote: # Hand this off to whichever backend is being used. url = self.remote_media_url(with_ssl) else: # Serving locally, just use the value in settings.py. url = self.local_media_url return url.rstrip('/')
Used to return a base media URL. Depending on whether we're serving media remotely or locally, this either hands the decision off to the backend, or just uses the value in settings.STATIC_URL. args: with_ssl: (bool) If True, return an HTTPS url (depending on how the backend handles it).
def download(url, dir, filename=None, expect_size=None): """ Download URL to a directory. Will figure out the filename automatically from URL, if not given. """ mkdir_p(dir) if filename is None: filename = url.split('/')[-1] fpath = os.path.join(dir, filename) if os.path.isfile(fpath): if expect_size is not None and os.stat(fpath).st_size == expect_size: logger.info("File {} exists! Skip download.".format(filename)) return fpath else: logger.warn("File {} exists. Will overwrite with a new download!".format(filename)) def hook(t): last_b = [0] def inner(b, bsize, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner try: with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t)) statinfo = os.stat(fpath) size = statinfo.st_size except IOError: logger.error("Failed to download {}".format(url)) raise assert size > 0, "Downloaded an empty file from {}!".format(url) if expect_size is not None and size != expect_size: logger.error("File downloaded from {} does not match the expected size!".format(url)) logger.error("You may have downloaded a broken file, or the upstream may have modified the file.") # TODO human-readable size logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.') return fpath
Download URL to a directory. Will figure out the filename automatically from URL, if not given.
def stop(self): """Stop the publisher. """ self.publish.setsockopt(zmq.LINGER, 1) self.publish.close() return self
Stop the publisher.
def date_map(doc, datemap_list, time_format=None): ''' For all the datetime fields in "datemap" find that key in doc and map the datetime object to a strftime string. This pprint and others will print out readable datetimes. ''' if datemap_list: for i in datemap_list: if isinstance(i, datetime): doc=CursorFormatter.date_map_field(doc, i, time_format=time_format) return doc
For all the datetime fields in "datemap" find that key in doc and map the datetime object to a strftime string. This pprint and others will print out readable datetimes.
def remove_send_last_message(self, connection): """Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages. """ if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages.
def _expand_data(self, old_data, new_data, group): """ data expansion - uvision needs filename and path separately. """ for file in old_data: if file: extension = file.split(".")[-1].lower() if extension in self.file_types.keys(): new_data['groups'][group].append(self._expand_one_file(normpath(file), new_data, extension)) else: logger.debug("Filetype for file %s not recognized" % file) if hasattr(self, '_expand_sort_key'): new_data['groups'][group] = sorted(new_data['groups'][group], key=self._expand_sort_key)
data expansion - uvision needs filename and path separately.
def install_package_command(package_name): '''install python package from pip''' #TODO refactor python logic if sys.platform == "win32": cmds = 'python -m pip install --user {0}'.format(package_name) else: cmds = 'python3 -m pip install --user {0}'.format(package_name) call(cmds, shell=True)
install python package from pip
def add_ssh_scheme_to_git_uri(uri): # type: (S) -> S """Cleans VCS uris from pipenv.patched.notpip format""" if isinstance(uri, six.string_types): # Add scheme for parsing purposes, this is also what pip does if uri.startswith("git+") and "://" not in uri: uri = uri.replace("git+", "git+ssh://", 1) parsed = urlparse(uri) if ":" in parsed.netloc: netloc, _, path_start = parsed.netloc.rpartition(":") path = "/{0}{1}".format(path_start, parsed.path) uri = urlunparse(parsed._replace(netloc=netloc, path=path)) return uri
Cleans VCS uris from pipenv.patched.notpip format
def is_base_form(self, univ_pos, morphology=None): """ Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely. """ morphology = {} if morphology is None else morphology others = [key for key in morphology if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')] if univ_pos == 'noun' and morphology.get('Number') == 'sing': return True elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf': return True # This maps 'VBP' to base form -- probably just need 'IS_BASE' # morphology elif univ_pos == 'verb' and (morphology.get('VerbForm') == 'fin' and morphology.get('Tense') == 'pres' and morphology.get('Number') is None and not others): return True elif univ_pos == 'adj' and morphology.get('Degree') == 'pos': return True elif VerbForm_inf in morphology: return True elif VerbForm_none in morphology: return True elif Number_sing in morphology: return True elif Degree_pos in morphology: return True else: return False
Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely.
def alarm_on_log(self, alarm, matcher, skip=False): """Raise (or skip) the specified alarm when a log line matches the specified regexp. :param AlarmType|list[AlarmType] alarm: Alarm. :param str|unicode matcher: Regular expression to match log line. :param bool skip: """ self.register_alarm(alarm) value = '%s %s' % ( ','.join(map(attrgetter('alias'), listify(alarm))), matcher) self._set('not-alarm-log' if skip else 'alarm-log', value) return self._section
Raise (or skip) the specified alarm when a log line matches the specified regexp. :param AlarmType|list[AlarmType] alarm: Alarm. :param str|unicode matcher: Regular expression to match log line. :param bool skip:
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return fpminfo is not None
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
def list_loadbalancers(call=None): ''' Return a list of the loadbalancers that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-loadbalancers option' ) ret = {} conn = get_conn() datacenter = get_datacenter(conn) for item in conn.list_loadbalancers(datacenter['id'])['items']: lb = {'id': item['id']} lb.update(item['properties']) ret[lb['name']] = lb return ret
Return a list of the loadbalancers that are on the provider
def add_circle(self, center_lat=None, center_lng=None, radius=None, **kwargs): """ Adds a circle dict to the Map.circles attribute The circle in a sphere is called "spherical cap" and is defined in the Google Maps API by at least the center coordinates and its radius, in meters. A circle has color and opacity both for the border line and the inside area. It accepts a circle dict representation as well. Args: center_lat (float): The circle center latitude center_lng (float): The circle center longitude radius (float): The circle radius, in meters .. _Circle: https://developers.google.com/maps/documen tation/javascript/reference#Circle """ kwargs.setdefault('center', {}) if center_lat: kwargs['center']['lat'] = center_lat if center_lng: kwargs['center']['lng'] = center_lng if radius: kwargs['radius'] = radius if set(('lat', 'lng')) != set(kwargs['center'].keys()): raise AttributeError('circle center coordinates required') if 'radius' not in kwargs: raise AttributeError('circle radius definition required') kwargs.setdefault('stroke_color', '#FF0000') kwargs.setdefault('stroke_opacity', .8) kwargs.setdefault('stroke_weight', 2) kwargs.setdefault('fill_color', '#FF0000') kwargs.setdefault('fill_opacity', .3) self.circles.append(kwargs)
Adds a circle dict to the Map.circles attribute The circle in a sphere is called "spherical cap" and is defined in the Google Maps API by at least the center coordinates and its radius, in meters. A circle has color and opacity both for the border line and the inside area. It accepts a circle dict representation as well. Args: center_lat (float): The circle center latitude center_lng (float): The circle center longitude radius (float): The circle radius, in meters .. _Circle: https://developers.google.com/maps/documen tation/javascript/reference#Circle
def normalize_attachment(attachment): ''' Convert attachment metadata from es to archivant format This function makes side effect on input attachment ''' res = dict() res['type'] = 'attachment' res['id'] = attachment['id'] del(attachment['id']) res['url'] = attachment['url'] del(attachment['url']) res['metadata'] = attachment return res
Convert attachment metadata from es to archivant format This function makes side effect on input attachment
def parse_headers(self, use_cookies, raw): """ analyze headers from file or raw messages :return: (url, dat) :rtype: """ if not raw: packet = helper.to_str(helper.read_file(self.fpth)) else: packet = raw dat = {} pks = [x for x in packet.split('\n') if x.replace(' ', '')] url = pks[0].split(' ')[1] for i, cnt in enumerate(pks[1:]): arr = cnt.split(':') if len(arr) < 2: continue arr = [x.replace(' ', '') for x in arr] _k, v = arr[0], ':'.join(arr[1:]) dat[_k] = v if use_cookies: try: self.fmt_cookies(dat.pop('Cookie')) except: pass self.headers = dat self.url = 'https://{}{}'.format(self.headers.get('Host'), url) return url, dat
analyze headers from file or raw messages :return: (url, dat) :rtype:
def get_variables(self, sort=None, collapse_same_ident=False): """ Get a list of variables. :param str or None sort: Sort of the variable to get. :param collapse_same_ident: Whether variables of the same identifier should be collapsed or not. :return: A list of variables. :rtype: list """ variables = [ ] if collapse_same_ident: raise NotImplementedError() for var in self._variables: if sort == 'stack' and not isinstance(var, SimStackVariable): continue if sort == 'reg' and not isinstance(var, SimRegisterVariable): continue variables.append(var) return variables
Get a list of variables. :param str or None sort: Sort of the variable to get. :param collapse_same_ident: Whether variables of the same identifier should be collapsed or not. :return: A list of variables. :rtype: list
def _dataframe_to_edge_list(df): """ Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively. """ cols = df.columns if len(cols): assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN df = df[cols].T ret = [Edge(None, None, _series=df[col]) for col in df] return ret else: return []
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
def _get_solarflux(self): """Derive the in-band solar flux from rsr over the Near IR band (3.7 or 3.9 microns) """ solar_spectrum = \ SolarIrradianceSpectrum(TOTAL_IRRADIANCE_SPECTRUM_2000ASTM, dlambda=0.0005, wavespace=self.wavespace) self.solar_flux = solar_spectrum.inband_solarflux(self.rsr[self.bandname])
Derive the in-band solar flux from rsr over the Near IR band (3.7 or 3.9 microns)
def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True)
Returns an instance of the redis client
def bootstrap_results(self, init_state): """Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'), values=[init_state]): pkr = self.inner_kernel.bootstrap_results(init_state) if not has_target_log_prob(pkr): raise ValueError( '"target_log_prob" must be a member of `inner_kernel` results.') x = pkr.target_log_prob return MetropolisHastingsKernelResults( accepted_results=pkr, is_accepted=tf.ones_like(x, dtype=tf.bool), log_accept_ratio=tf.zeros_like(x), proposed_state=init_state, proposed_results=pkr, extra=[], )
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob".
def serialize(self): '''Serialize this object as dictionary usable for conversion to JSON. :return: Dictionary representing this object. ''' return { 'type': 'event', 'id': self.uid, 'attributes': { 'start': self.start, 'end': self.end, 'uid': self.uid, 'title': self.title, 'data': self.get_data(), 'status': Status.str(self.status) } }
Serialize this object as dictionary usable for conversion to JSON. :return: Dictionary representing this object.
def isInside(self, point, tol=0.0001): """ Return True if point is inside a polydata closed surface. """ poly = self.polydata(True) points = vtk.vtkPoints() points.InsertNextPoint(point) pointsPolydata = vtk.vtkPolyData() pointsPolydata.SetPoints(points) sep = vtk.vtkSelectEnclosedPoints() sep.SetTolerance(tol) sep.CheckSurfaceOff() sep.SetInputData(pointsPolydata) sep.SetSurfaceData(poly) sep.Update() return sep.IsInside(0)
Return True if point is inside a polydata closed surface.
def retrieve_list(self, session, filters, *args, **kwargs): """ Retrieves a list of the model for this manager. It is restricted by the filters provided. :param Session session: The SQLAlchemy session to use :param dict filters: The filters to restrict the returned models on :return: A tuple of the list of dictionary representation of the models and the dictionary of meta data :rtype: list, dict """ query = self.queryset(session) translator = IntegerField('tmp') pagination_count = translator.translate( filters.pop(self.pagination_count_query_arg, self.paginate_by) ) pagination_pk = translator.translate( filters.pop(self.pagination_pk_query_arg, 1) ) pagination_pk -= 1 # logic works zero based. Pagination shouldn't be though query = query.filter_by(**filters) if pagination_pk: query = query.offset(pagination_pk * pagination_count) if pagination_count: query = query.limit(pagination_count + 1) count = query.count() next_link = None previous_link = None if count > pagination_count: next_link = {self.pagination_pk_query_arg: pagination_pk + 2, self.pagination_count_query_arg: pagination_count} if pagination_pk > 0: previous_link = {self.pagination_pk_query_arg: pagination_pk, self.pagination_count_query_arg: pagination_count} field_dict = self.dot_field_list_to_dict(self.list_fields) props = self.serialize_model(query[:pagination_count], field_dict=field_dict) meta = dict(links=dict(next=next_link, previous=previous_link)) return props, meta
Retrieves a list of the model for this manager. It is restricted by the filters provided. :param Session session: The SQLAlchemy session to use :param dict filters: The filters to restrict the returned models on :return: A tuple of the list of dictionary representation of the models and the dictionary of meta data :rtype: list, dict
def import_image(self, imported_image_name, image_name): """ Import image using `oc import-image` command. :param imported_image_name: str, short name of an image in internal registry, example: - hello-openshift:latest :param image_name: full repository name, example: - docker.io/openshift/hello-openshift:latest :return: str, short name in internal registry """ c = self._oc_command(["import-image", imported_image_name, "--from=%s" % image_name, "--confirm"]) logger.info("Importing image from: %s, as: %s", image_name, imported_image_name) try: o = run_cmd(c, return_output=True, ignore_status=True) logger.debug(o) except subprocess.CalledProcessError as ex: raise ConuException("oc import-image failed: %s" % ex) return imported_image_name
Import image using `oc import-image` command. :param imported_image_name: str, short name of an image in internal registry, example: - hello-openshift:latest :param image_name: full repository name, example: - docker.io/openshift/hello-openshift:latest :return: str, short name in internal registry
def to_long_time_string(self) -> str: """ Return the iso time string only """ hour = self.time.hour minute = self.time.minute second = self.time.second return f"{hour:02}:{minute:02}:{second:02}"
Return the iso time string only
def init_device(self): """Device constructor.""" Device.init_device(self) # Add anything here that has to be done before the device is set to # its ON state. self._set_master_state('on') self._devProxy = DeviceProxy(self.get_name())
Device constructor.
def is_attr_protected(attrname: str) -> bool: """return True if attribute name is protected (start with _ and some other details), False otherwise. """ return ( attrname[0] == "_" and attrname != "_" and not (attrname.startswith("__") and attrname.endswith("__")) )
return True if attribute name is protected (start with _ and some other details), False otherwise.
def union(self, other): """ Intersect current range with other.""" return Interval(min(self.low, other.low), max(self.high, other.high))
Intersect current range with other.
def Close(self): """Disconnects from the database. This method will create the necessary indices and commit outstanding transactions before disconnecting. """ # Build up indices for the fields specified in the args. # It will commit the inserts automatically before creating index. if not self._append: for field_name in self._fields: query = 'CREATE INDEX {0:s}_idx ON log2timeline ({0:s})'.format( field_name) self._cursor.execute(query) if self._set_status: self._set_status('Created index: {0:s}'.format(field_name)) # Get meta info and save into their tables. if self._set_status: self._set_status('Creating metadata...') for field in self._META_FIELDS: values = self._GetDistinctValues(field) self._cursor.execute('DELETE FROM l2t_{0:s}s'.format(field)) for name, frequency in iter(values.items()): self._cursor.execute(( 'INSERT INTO l2t_{0:s}s ({0:s}s, frequency) ' 'VALUES("{1:s}", {2:d}) ').format(field, name, frequency)) self._cursor.execute('DELETE FROM l2t_tags') for tag in self._ListTags(): self._cursor.execute('INSERT INTO l2t_tags (tag) VALUES (?)', [tag]) if self._set_status: self._set_status('Database created.') self._connection.commit() self._cursor.close() self._connection.close() self._cursor = None self._connection = None
Disconnects from the database. This method will create the necessary indices and commit outstanding transactions before disconnecting.
def register(id, url=None): """Register a UUID key in the global S3 bucket.""" bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
Register a UUID key in the global S3 bucket.
def set_thumbnail(self, thumbnail): """ Sets the thumbnail for this OAuth Client. If thumbnail is bytes, uploads it as a png. Otherwise, assumes thumbnail is a path to the thumbnail and reads it in as bytes before uploading. """ headers = { "Authorization": "token {}".format(self._client.token), "Content-type": "image/png", } # TODO this check needs to be smarter - python2 doesn't do it right if not isinstance(thumbnail, bytes): with open(thumbnail, 'rb') as f: thumbnail = f.read() result = requests.put('{}/{}/thumbnail'.format(self._client.base_url, OAuthClient.api_endpoint.format(id=self.id)), headers=headers, data=thumbnail) if not result.status_code == 200: errors = [] j = result.json() if 'errors' in j: errors = [ e['reason'] for e in j['errors'] ] raise ApiError('{}: {}'.format(result.status_code, errors), json=j) return True
Sets the thumbnail for this OAuth Client. If thumbnail is bytes, uploads it as a png. Otherwise, assumes thumbnail is a path to the thumbnail and reads it in as bytes before uploading.
def websocket_safe_read(self): """ Returns data if available, otherwise ''. Newlines indicate multiple messages """ data = [] while True: try: data.append(self.websocket.recv()) except (SSLError, SSLWantReadError) as err: if err.errno == 2: # errno 2 occurs when trying to read or write data, but more # data needs to be received on the underlying TCP transport # before the request can be fulfilled. return data raise
Returns data if available, otherwise ''. Newlines indicate multiple messages
async def _parse_lines(lines, regex): """Parse the lines using the given regular expression. If a line can't be parsed it is logged and skipped in the output. """ results = [] if inspect.iscoroutinefunction(lines): lines = await lines for line in lines: if line: match = regex.search(line) if not match: _LOGGER.debug("Could not parse row: %s", line) continue results.append(match.groupdict()) return results
Parse the lines using the given regular expression. If a line can't be parsed it is logged and skipped in the output.
def attach_ip(self, server, family='IPv4'): """ Attach a new (random) IPAddress to the given server (object or UUID). """ body = { 'ip_address': { 'server': str(server), 'family': family } } res = self.request('POST', '/ip_address', body) return IPAddress(cloud_manager=self, **res['ip_address'])
Attach a new (random) IPAddress to the given server (object or UUID).
def answer_challenge(authzr, client, responders): """ Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. """ responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )
Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified.
def editHook(self, repo_user, repo_name, hook_id, name, config, events=None, add_events=None, remove_events=None, active=None): """ PATCH /repos/:owner/:repo/hooks/:id :param hook_id: Id of the hook. :param name: The name of the service that is being called. :param config: A Hash containing key/value pairs to provide settings for this hook. """ post = dict( name=name, config=config, ) if events is not None: post['events'] = events if add_events is not None: post['add_events'] = add_events if remove_events is not None: post['remove_events'] = remove_events if active is not None: post['active'] = active return self.api.makeRequest( ['repos', repo_user, repo_name, 'hooks', str(hook_id)], method='PATCH', post=post, )
PATCH /repos/:owner/:repo/hooks/:id :param hook_id: Id of the hook. :param name: The name of the service that is being called. :param config: A Hash containing key/value pairs to provide settings for this hook.
def handle_trunks(self, trunks, event_type): """Trunk data model change from the server.""" LOG.debug("Trunks event received: %(event_type)s. Trunks: %(trunks)s", {'event_type': event_type, 'trunks': trunks}) if event_type == events.DELETED: # The port trunks have been deleted. Remove them from cache. for trunk in trunks: self._trunks.pop(trunk.id, None) else: for trunk in trunks: self._trunks[trunk.id] = trunk self._setup_trunk(trunk)
Trunk data model change from the server.
def select_by_index(self, val, level=0, squeeze=False, filter=False, return_mask=False): """ Select or filter elements of the Series by index values (across levels, if multi-index). The index is a property of a Series object that assigns a value to each position within the arrays stored in the records of the Series. This function returns a new Series where, within each record, only the elements indexed by a given value(s) are retained. An index where each value is a list of a fixed length is referred to as a 'multi-index', as it provides multiple labels for each index location. Each of the dimensions in these sublists is a 'level' of the multi-index. If the index of the Series is a multi-index, then the selection can proceed by first selecting one or more levels, and then selecting one or more values at each level. Parameters ---------- val : list of lists Specifies the selected index values. List must contain one list for each level of the multi-index used in the selection. For any singleton lists, the list may be replaced with just the integer. level : list of ints, optional, default=0 Specifies which levels in the multi-index to use when performing selection. If a single level is selected, the list can be replaced with an integer. Must be the same length as val. squeeze : bool, optional, default=False If True, the multi-index of the resulting Series will drop any levels that contain only a single value because of the selection. Useful if indices are used as unique identifiers. filter : bool, optional, default=False If True, selection process is reversed and all index values EXCEPT those specified are selected. return_mask : bool, optional, default=False If True, return the mask used to implement the selection. """ try: level[0] except: level = [level] try: val[0] except: val = [val] remove = [] if len(level) == 1: try: val[0][0] except: val = [val] if squeeze and not filter and len(val) == 1: remove.append(level[0]) else: for i in range(len(val)): try: val[i][0] except: val[i] = [val[i]] if squeeze and not filter and len(val[i]) == 1: remove.append(level[i]) if len(level) != len(val): raise ValueError("List of levels must be same length as list of corresponding values") p = product(*val) selected = set([x for x in p]) masks, ind = self._makemasks(index=self.index, level=level) nmasks = len(masks) masks = array([masks[x] for x in range(nmasks) if tuple(ind[x]) in selected]) final_mask = masks.any(axis=0) if filter: final_mask = logical_not(final_mask) indFinal = array(self.index) if len(indFinal.shape) == 1: indFinal = array(indFinal, ndmin=2).T indFinal = indFinal[final_mask] if squeeze: indFinal = delete(indFinal, remove, axis=1) if len(indFinal[0]) == 1: indFinal = ravel(indFinal) elif len(indFinal[1]) == 0: indFinal = arange(sum(final_mask)) result = self.map(lambda v: v[final_mask], index=indFinal) if return_mask: return result, final_mask else: return result
Select or filter elements of the Series by index values (across levels, if multi-index). The index is a property of a Series object that assigns a value to each position within the arrays stored in the records of the Series. This function returns a new Series where, within each record, only the elements indexed by a given value(s) are retained. An index where each value is a list of a fixed length is referred to as a 'multi-index', as it provides multiple labels for each index location. Each of the dimensions in these sublists is a 'level' of the multi-index. If the index of the Series is a multi-index, then the selection can proceed by first selecting one or more levels, and then selecting one or more values at each level. Parameters ---------- val : list of lists Specifies the selected index values. List must contain one list for each level of the multi-index used in the selection. For any singleton lists, the list may be replaced with just the integer. level : list of ints, optional, default=0 Specifies which levels in the multi-index to use when performing selection. If a single level is selected, the list can be replaced with an integer. Must be the same length as val. squeeze : bool, optional, default=False If True, the multi-index of the resulting Series will drop any levels that contain only a single value because of the selection. Useful if indices are used as unique identifiers. filter : bool, optional, default=False If True, selection process is reversed and all index values EXCEPT those specified are selected. return_mask : bool, optional, default=False If True, return the mask used to implement the selection.
def get_unused_color(self): """Returns an xlwt color index that has not been previously returned by this instance. Attempts to maximize the distance between the color and all previously used colors. """ if not self.unused_colors: # If we somehow run out of colors, reset the color matcher. self.reset() used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors] result_color = max(self.unused_colors, key=lambda c: min(self.color_distance(c, c2) for c2 in used_colors)) result_index = self.xlwt_colors.index(result_color) self.unused_colors.discard(result_color) return result_index
Returns an xlwt color index that has not been previously returned by this instance. Attempts to maximize the distance between the color and all previously used colors.
def resolve_group_names(self, r, target_group_ids, groups): """Resolve any security group names to the corresponding group ids With the context of a given network attached resource. """ names = self.get_group_names(target_group_ids) if not names: return target_group_ids target_group_ids = list(target_group_ids) vpc_id = self.vpc_expr.search(r) if not vpc_id: raise PolicyExecutionError(self._format_error( "policy:{policy} non vpc attached resource used " "with modify-security-group: {resource_id}", resource_id=r[self.manager.resource_type.id])) found = False for n in names: for g in groups: if g['GroupName'] == n and g['VpcId'] == vpc_id: found = g['GroupId'] if not found: raise PolicyExecutionError(self._format_error(( "policy:{policy} could not resolve sg:{name} for " "resource:{resource_id} in vpc:{vpc}"), name=n, resource_id=r[self.manager.resource_type.id], vpc=vpc_id)) target_group_ids.remove(n) target_group_ids.append(found) return target_group_ids
Resolve any security group names to the corresponding group ids With the context of a given network attached resource.
def fill(self, paths): """ Initialise the tree. paths is a list of strings where each string is the relative path to some file. """ for path in paths: tree = self.tree parts = tuple(path.split('/')) dir_parts = parts[:-1] built = () for part in dir_parts: self.cache[built] = tree built += (part, ) parent = tree tree = parent.folders.get(part, empty) if tree is empty: tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent) self.cache[dir_parts] = tree tree.files.add(parts[-1])
Initialise the tree. paths is a list of strings where each string is the relative path to some file.
def installed(name, features=None, recurse=False, restart=False, source=None, exclude=None): ''' Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Check if features is not passed, use name. Split commas if features is None: features = name.split(',') # Make sure features is a list, split commas if not isinstance(features, list): features = features.split(',') # Determine if the feature is installed old = __salt__['win_servermanager.list_installed']() cur_feat = [] for feature in features: if feature not in old: ret['changes'][feature] = \ 'Will be installed recurse={0}'.format(recurse) elif recurse: ret['changes'][feature] = \ 'Already installed but might install sub-features' else: cur_feat.append(feature) if cur_feat: cur_feat.insert(0, 'The following features are already installed:') ret['comment'] = '\n- '.join(cur_feat) if not ret['changes']: return ret if __opts__['test']: ret['result'] = None return ret # Install the features status = __salt__['win_servermanager.install']( features, recurse=recurse, restart=restart, source=source, exclude=exclude) ret['result'] = status['Success'] # Show items failed to install fail_feat = [] new_feat = [] rem_feat = [] for feature in status['Features']: # Features that failed to install or be removed if not status['Features'][feature].get('Success', True): fail_feat.append('- {0}'.format(feature)) # Features that installed elif '(exclude)' not in status['Features'][feature]['Message']: new_feat.append('- {0}'.format(feature)) # Show items that were removed because they were part of `exclude` elif '(exclude)' in status['Features'][feature]['Message']: rem_feat.append('- {0}'.format(feature)) if fail_feat: fail_feat.insert(0, 'Failed to install the following:') if new_feat: new_feat.insert(0, 'Installed the following:') if rem_feat: rem_feat.insert(0, 'Removed the following (exclude):') ret['comment'] = '\n'.join(fail_feat + new_feat + rem_feat) # Get the changes new = __salt__['win_servermanager.list_installed']() ret['changes'] = salt.utils.data.compare_dicts(old, new) return ret
Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server
def _results(self, scheduler_instance_id): """Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str """ with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) return serialize(res, True)
Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str