code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def reports_progress(reporter): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): with progress_reporter(reporter): return func(*args, **kwargs) return wrapper return decorator
A decorator factory to mark functions which report progress. Args: reporter: A zero-argument callable to report progress. The callable provided should have the means to both retrieve and display current progress information.
juraj-google-style
def register_lookup_handler(lookup_type, handler_or_path): handler = handler_or_path if isinstance(handler_or_path, basestring): handler = load_object_from_string(handler_or_path) LOOKUP_HANDLERS[lookup_type] = handler if type(handler) != type: logger = logging.getLogger(__name__) logger.warning("Registering lookup `%s`: Please upgrade to use the " "new style of Lookups." % lookup_type) warnings.warn( "Lookup `%s`: Please upgrade to use the new style of Lookups" "." % lookup_type, DeprecationWarning, stacklevel=2, )
Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler
juraj-google-style
def _GetRecord(self, offset, record_size): record_header = "<4sLQQL" get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0] url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0] if url_offset in [0xFF, 0xFE]: return None data_offset = get4(offset + 68) data_size = get4(offset + 72) start_pos = offset + data_offset data = struct.unpack("{0}s".format(data_size), self.input_dat[start_pos:start_pos + data_size])[0] fmt = record_header unknown_size = url_offset - struct.calcsize(fmt) fmt += "{0}s".format(unknown_size) fmt += "{0}s".format(record_size - struct.calcsize(fmt)) dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size]) header, blocks, mtime, ctime, ftime, _, url = dat url = url.split(b"\x00")[0].decode("utf-8") if mtime: mtime = mtime if ctime: ctime = ctime return { "header": header, "blocks": blocks, "urloffset": url_offset, "data_offset": data_offset, "data_size": data_size, "data": data, "mtime": mtime, "ctime": ctime, "ftime": ftime, "url": url }
Retrieve a single record from the file. Args: offset: offset from start of input_dat where header starts record_size: length of the header according to file (untrusted) Returns: A dict containing a single browser history record.
juraj-google-style
def run_ops(state, serial=False, no_wait=False): state.deploying = True if serial: _run_serial_ops(state) elif no_wait: _run_no_wait_ops(state) for op_hash in state.get_op_order(): _run_single_op(state, op_hash)
Runs all operations across all servers in a configurable manner. Args: state (``pyinfra.api.State`` obj): the deploy state to execute serial (boolean): whether to run operations host by host no_wait (boolean): whether to wait for all hosts between operations
juraj-google-style
def is_admin(name): groups = get_user_groups(name, True) for group in groups: if (group in ('S-1-5-32-544', 'S-1-5-18')): return True return False
Is the passed user a member of the Administrators group Args: name (str): The name to check Returns: bool: True if user is a member of the Administrators group, False otherwise
codesearchnet
def get_resource_id(prefix, *data): parts = flatten(data) for part in parts: if (type(part) not in (str, int, float)): raise ValueError('Supported data types: int, float, list, tuple, str. Got: {}'.format(type(part))) return '{}-{}'.format(prefix, get_hash('-'.join(sorted(map(str, parts))))[(- 16):])
Returns a unique ID based on the SHA256 hash of the provided data. The input data is flattened and sorted to ensure identical hashes are generated regardless of the order of the input. Values must be of types `str`, `int` or `float`, any other input type will raise a `ValueError` >>> get_resource_id('ec2', 'lots', 'of', 'data') 'ec2-1d21940125214123' >>> get_resource_id('ecs', 'foo', ['more', 'data', 'here', 2, 3]) 'ecs-e536b036ea6fd463' >>> get_resource_id('ecs', ['more'], 'data', 'here', [[2], 3], 'foo') 'ecs-e536b036ea6fd463' Args: prefix (`str`): Key prefix *data (`str`, `int`, `float`, `list`, `tuple`): Data used to generate a unique ID Returns: `str`
codesearchnet
def get_sid_string(principal): if principal is None: principal = 'NULL SID' try: return win32security.ConvertSidToStringSid(principal) except TypeError: principal = get_sid(principal) try: return win32security.ConvertSidToStringSid(principal) except pywintypes.error: log.exception('Invalid principal %s', principal) raise CommandExecutionError('Invalid principal {0}'.format(principal))
Converts a PySID object to a string SID. Args: principal(str): The principal to lookup the sid. Must be a PySID object. Returns: str: A string sid Usage: .. code-block:: python # Get a PySID object py_sid = salt.utils.win_dacl.get_sid('jsnuffy') # Get the string version of the SID salt.utils.win_dacl.get_sid_string(py_sid)
juraj-google-style
def label(self, name): if isinstance(name, str): self._label = name else: raise TypeError('label expects a string')
Set snapshot label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None.
juraj-google-style
def connect(self, container, *args, **kwargs): if isinstance(container, Container): container = container.id return self.client.api.connect_container_to_network(container, self.id, *args, **kwargs)
Connect a container to this network. Args: container (str): Container to connect to this network, as either an ID, name, or :py:class:`~docker.models.containers.Container` object. aliases (:py:class:`list`): A list of aliases for this endpoint. Names in that list can be used within the network to reach the container. Defaults to ``None``. links (:py:class:`list`): A list of links for this endpoint. Containers declared in this list will be linkedto this container. Defaults to ``None``. ipv4_address (str): The IP address of this container on the network, using the IPv4 protocol. Defaults to ``None``. ipv6_address (str): The IP address of this container on the network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def ParseInteger(text, is_signed=False, is_long=False): result = _ParseAbstractInteger(text, is_long=is_long) checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
juraj-google-style
def shift(self, time: int) -> 'TimeslotCollection': slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots] return TimeslotCollection(*slots)
Return a new TimeslotCollection shifted by `time`. Args: time: time to be shifted by
codesearchnet
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): if ((cls._compressed_stream_remainder_list is None) or (cls._compressed_stream_store is None)): (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store if (cls._compressed_stream_scanner is None): cls._compressed_stream_scanner = cls._GetSignatureScanner(cls._compressed_stream_store) return cls._GetTypeIndicators(cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
codesearchnet
def inputs(dataset, batch_size=None, num_preprocess_threads=None): if (not batch_size): batch_size = FLAGS.batch_size with tf.device('/cpu:0'): (images, labels) = batch_inputs(dataset, batch_size, train=False, num_preprocess_threads=num_preprocess_threads, num_readers=1) return (images, labels)
Generate batches of ImageNet images for evaluation. Use this function as the inputs for evaluating a network. Note that some (minimal) image preprocessing occurs during evaluation including central cropping and resizing of the image to fit the network. Args: dataset: instance of Dataset class specifying the dataset. batch_size: integer, number of examples in batch num_preprocess_threads: integer, total number of preprocessing threads but None defaults to FLAGS.num_preprocess_threads. Returns: images: Images. 4D tensor of size [batch_size, FLAGS.image_size, image_size, 3]. labels: 1-D integer Tensor of [FLAGS.batch_size].
codesearchnet
def QA_data_ctptick_resample(tick, type_='1min'): resx = pd.DataFrame() _temp = set(tick.TradingDay) for item in _temp: _data = tick.query('TradingDay=="{}"'.format(item)) try: _data.loc[time(20, 0):time(21, 0), 'volume'] = 0 except: pass _data.volume = _data.volume.diff() _data = _data.assign(amount=_data.LastPrice * _data.volume) _data0 = _data[time(0, 0):time(2, 30)].resample( type_, closed='right', base=30, loffset=type_ ).apply( { 'LastPrice': 'ohlc', 'volume': 'sum', 'code': 'last', 'amount': 'sum' } ) _data1 = _data[time(9, 0):time(11, 30)].resample( type_, closed='right', base=30, loffset=type_ ).apply( { 'LastPrice': 'ohlc', 'volume': 'sum', 'code': 'last', 'amount': 'sum' } ) _data2 = _data[time(13, 1):time(15, 0)].resample( type_, closed='right', base=30, loffset=type_ ).apply( { 'LastPrice': 'ohlc', 'volume': 'sum', 'code': 'last', 'amount': 'sum' } ) _data3 = _data[time(21, 0):time(23, 59)].resample( type_, closed='left', loffset=type_ ).apply( { 'LastPrice': 'ohlc', 'volume': 'sum', 'code': 'last', 'amount': 'sum' } ) resx = resx.append(_data0).append(_data1).append(_data2).append(_data3) resx.columns = resx.columns.droplevel(0) return resx.reset_index().drop_duplicates().set_index(['datetime', 'code']).sort_index()
tick采样成任意级别分钟线 Arguments: tick {[type]} -- transaction Returns: [type] -- [description]
juraj-google-style
def nonoverlap(item_a, time_a, item_b, time_b, max_value): return (np.minimum((1 - item_a.count_overlap(time_a, item_b, time_b)), max_value) / float(max_value))
Percentage of pixels in each object that do not overlap with the other object Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def du(*components, **kwargs): human_readable = kwargs.get("human_readable", True) _path = path(*components) if not exists(_path): raise Error("file '{}' not found".format(_path)) size = os.stat(_path).st_size if human_readable: return naturalsize(size) else: return size
Get the size of a file in bytes or as a human-readable string. Arguments: *components (str[]): Path to file. **kwargs: If "human_readable" is True, return a formatted string, e.g. "976.6 KiB" (default True) Returns: int or str: If "human_readble" kwarg is True, return str, else int.
juraj-google-style
def fill(self, config, section): if config.has_section(section): default_url = self.DEFAULT_REPOSITORIES.get(self.name, '') self.url = RepositoryURL(config_get(config, section, 'repository', default_url)) self.username = config_get(config, section, 'username', '') self.password = config_get(config, section, 'password', '')
Fill data from a given configuration section. Args: config (configparser): the configuration file section (str): the section to use
juraj-google-style
def pxbounds(self, geom, clip=False): try: if isinstance(geom, dict): if ('geometry' in geom): geom = shape(geom['geometry']) else: geom = shape(geom) elif isinstance(geom, BaseGeometry): geom = shape(geom) else: geom = wkt.loads(geom) except: raise TypeError('Invalid geometry object') if geom.disjoint(shape(self)): raise ValueError('Geometry outside of image bounds') (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds (_nbands, ysize, xsize) = self.shape if clip: xmin = max(xmin, 0) ymin = max(ymin, 0) xmax = min(xmax, xsize) ymax = min(ymax, ysize) return (xmin, ymin, xmax, ymax)
Returns the bounds of a geometry object in pixel coordinates Args: geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string clip (bool): Clip the bounds to the min/max extent of the image Returns: list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds
codesearchnet
def _run_post_configure_callbacks(self, configure_args): resulting_configuration = ImmutableDict(self.config) multiple_callbacks = copy.copy(self._post_configure_callbacks['multiple']) single_callbacks = copy.copy(self._post_configure_callbacks['single']) self._post_configure_callbacks['single'] = [] for callback in multiple_callbacks: callback(resulting_configuration, configure_args) for callback in single_callbacks: callback(resulting_configuration, configure_args)
Run all post configure callbacks we have stored. Functions are passed the configuration that resulted from the call to :meth:`configure` as the first argument, in an immutable form; and are given the arguments passed to :meth:`configure` for the second argument. Returns from callbacks are ignored in all fashion. Args: configure_args (list[object]): The full list of arguments passed to :meth:`configure`. Returns: None: Does not return anything.
codesearchnet
def apply_sync(processor: Processor | PartProcessor, content: Iterable[ProcessorPart]) -> list[ProcessorPart]: return asyncio.run(apply_async(processor, content))
Applies a Processor synchronously. When a part processor is given as input, this method will first turn it into a processor and then will process the content concurrently. Args: processor: the Processor to apply to the content. content: a collection of ProcessorParts on which to apply the Processor. Returns: the content, with the Processor applied to each content part.
github-repos
def get_book_progress(self, asin): kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`.
juraj-google-style
def queryString_required_ClassVersion(strList): def _dec(function): @wraps(function) def _wrap(classInstance, request, *args, **kwargs): for i in strList: if i not in request.GET: raise Http404("api does not exist") return function(classInstance, request, *args, **kwargs) return _wrap return _dec
An decorator checking whether queryString key is valid or not Args: str: allowed queryString key Returns: if contains invalid queryString key, it will raise exception.
juraj-google-style
def get_dataset_split(tmp_dir, split, use_control_set): if not use_control_set: dataset_split = { problem.DatasetSplit.TRAIN: [ f for f in tf.gfile.Glob( os.path.join(tmp_dir, "train-novels*.txt")) ], problem.DatasetSplit.EVAL: [ os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt") ], } return dataset_split[split]
Gives the file paths with regards to the given split. Args: tmp_dir: temp directory split: dataset split use_control_set: uses control dataset if true. Returns: list of file paths.
juraj-google-style
def recipe_drive_copy(config, auth_read, source, destination): drive(config, {'auth': auth_read, 'copy': {'source': source, 'destination': destination}})
Copy a drive document. Args: auth_read (authentication) - Credentials used for reading data. source (string) - Name or URL of document to copy from. destination (string) - Name document to copy to.
github-repos
def find_trivial_constructor(type_): assert isinstance(type_, class_declaration.class_t) trivial = type_.constructors( lambda x: is_trivial_constructor(x), recursive=False, allow_empty=True) if trivial: return trivial[0] return None
Returns reference to trivial constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the trivial constructor
juraj-google-style
def flush_redis_unsafe(redis_client=None): if redis_client is None: ray.worker.global_worker.check_connected() redis_client = ray.worker.global_worker.redis_client keys = redis_client.keys("LOGFILE:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} log files from Redis.".format(num_deleted)) keys = redis_client.keys("event_log:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} event logs from Redis.".format(num_deleted))
This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called.
juraj-google-style
def sample(self, hashes): api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples
codesearchnet
def cast_to_seq(obj, alphabet=IUPAC.extended_protein): if isinstance(obj, Seq): return obj if isinstance(obj, SeqRecord): return obj.seq if isinstance(obj, str): obj = obj.upper() return Seq(obj, alphabet) else: raise ValueError('Must provide a string, Seq, or SeqRecord object.')
Return a Seq representation of a string or SeqRecord object. Args: obj (str, Seq, SeqRecord): Sequence string or Biopython SeqRecord object alphabet: See Biopython SeqRecord docs Returns: Seq: Seq representation of the sequence
juraj-google-style
async def get_me(self, input_peer=False): if input_peer and self._self_input_peer: return self._self_input_peer try: me = (await self( functions.users.GetUsersRequest([types.InputUserSelf()])))[0] self._bot = me.bot if not self._self_input_peer: self._self_input_peer = utils.get_input_peer( me, allow_self=False ) return self._self_input_peer if input_peer else me except errors.UnauthorizedError: return None
Gets "me" (the self user) which is currently authenticated, or None if the request fails (hence, not authenticated). Args: input_peer (`bool`, optional): Whether to return the :tl:`InputPeerUser` version or the normal :tl:`User`. This can be useful if you just need to know the ID of yourself. Returns: Your own :tl:`User`.
juraj-google-style
def weights_to_cpu(state_dict): state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu
Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU.
juraj-google-style
def global_horizontal_illuminance(self, value=999999.0): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `global_horizontal_illuminance`'.format(value)) if (value < 0.0): raise ValueError('value need to be greater or equal 0.0 for field `global_horizontal_illuminance`') self._global_horizontal_illuminance = value
Corresponds to IDD Field `global_horizontal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `global_horizontal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_version_string(version): version_len = len(version) if (version_len == 3): version_string = ('%d.%d.%d' % version) elif (version_len == 4): version_string = ('%d.%d.%d-%s' % version) else: raise Exception('Version tuple is non-semver-compliant {} length!'.format(version_len)) return version_string
Translate a version tuple into a string. Specify the __version__ as a tuple for more precise comparisons, and translate it to __version_string__ for when that's needed. This function exists primarily for easier unit testing. Args: version (Tuple[int, int, int, str]): three ints and an optional string. Returns: version_string (str): the tuple translated into a string per semver.org
codesearchnet
def _CheckCacheFileForMatch(self, cache_filename, scopes): creds = { 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: pass
Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None.
juraj-google-style
def set_timeout(self, network_timeout): if network_timeout == self._network_timeout: return self._network_timeout = network_timeout self._disconnect()
Set the timeout for existing and future Clients. Close all current connections. This will cause future operations to create new Clients with the network_timeout passed through socketTimeoutMS optional parameter. Args: network_timeout: The new value in milliseconds for the timeout.
juraj-google-style
def _any(objs, query): for obj in objs: if isinstance(obj, Document): if _any(obj.roots, query): return True else: if any(query(ref) for ref in obj.references()): return True else: return False
Whether any of a collection of objects satisfies a given query predicate Args: objs (seq[Model or Document]) : query (callable) Returns: True, if ``query(obj)`` is True for some object in ``objs``, else False
juraj-google-style
def __init__(self, xid=None, flags=None, miss_send_len=None): super().__init__(xid, flags, miss_send_len) self.header.message_type = Type.OFPT_SET_CONFIG
Create a SetConfig with the optional parameters below. Args: xid (int): xid to be used on the message header. flags (~pyof.v0x01.controller2switch.common.ConfigFlag): OFPC_* flags. miss_send_len (int): UBInt16 max bytes of new flow that the datapath should send to the controller.
juraj-google-style
def request(self, subject, callback, msg=None): inbox = self._build_inbox() s = self.subscribe(inbox, callback) self.unsubscribe(s, 1) self.publish(subject, msg, inbox) return s
ublish a message with an implicit inbox listener as the reply. Message is optional. Args: subject (string): a string with the subject callback (function): callback to be called msg (string=None): payload string
juraj-google-style
def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter): self._columns = collections.OrderedDict() self._formats = dict() self.formatter = formatter if _deprecated is not None: warnings.warn("Two-argument __init__ is deprecated. Use Table().with_columns(...)", FutureWarning) columns, labels = labels, _deprecated columns = columns if columns is not None else [] labels = labels if labels is not None else [] assert len(labels) == len(columns), 'label/column number mismatch' else: labels = labels if labels is not None else [] columns = [[] for _ in labels] self._num_rows = 0 if len(columns) is 0 else len(columns[0]) for column, label in zip(columns, labels): self[label] = column self.take = _RowTaker(self) self.exclude = _RowExcluder(self)
Create an empty table with column labels. >>> tiles = Table(make_array('letter', 'count', 'points')) >>> tiles letter | count | points Args: ``labels`` (list of strings): The column labels. ``formatter`` (Formatter): An instance of :class:`Formatter` that formats the columns' values.
juraj-google-style
def _create(cls, model_class, *args, **kwargs): manager = cls._get_manager(model_class) return manager.create_user(*args, **kwargs)
Create a new user instance. Args: model_class: The type of model to create an instance of. args: Positional arguments to create the instance with. kwargs: Keyword arguments to create the instance with. Returns: A new user instance of the type specified by ``model_class``.
codesearchnet
def _explode_shorthand_ip_string(self): if isinstance(self, _BaseNet): ip_str = str(self.ip) else: ip_str = str(self) ip_int = self._ip_int_from_string(ip_str) parts = [] for i in xrange(self._HEXTET_COUNT): parts.append('%04x' % (ip_int & 0xFFFF)) ip_int >>= 16 parts.reverse() if isinstance(self, _BaseNet): return '%s/%d' % (':'.join(parts), self.prefixlen) return ':'.join(parts)
Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address.
juraj-google-style
def sanitize(s, normalize_whitespace=True, normalize_unicode=True, form='NFKC', enforce_encoding=True, encoding='utf-8'): if enforce_encoding: s = s.encode(encoding, errors='ignore').decode(encoding, errors='ignore') if normalize_unicode: s = unicodedata.normalize(form, s) if normalize_whitespace: s = re.sub('\\s+', ' ', s).strip() return s
Normalize a string Args: s (unicode string): input unicode string normalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines), strip whitespace at start/end normalize_unicode (bool): if True, normalize unicode form to 'form' form (str): unicode form enforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors and stripping all characters not part of the encoding encoding (str): target encoding for the above Returns: str: unicode output string
codesearchnet
def get_metrics_by_kernel(rows: list[list[str]]) -> list[dict[str, tuple[str, str]]]: name_index = {} units = rows[1] for i, name in enumerate(rows[0]): name_index[name] = i results = [] for kernel in rows[2:]: values = {} for idx, name in enumerate(rows[0]): values[name] = (kernel[idx], units[idx]) results.append(values) return results
Converts ncu-rep table to a dictionary of metrics by kernel. Args: rows: ncu-rep table rows Returns: dictionary of metrics by kernel
github-repos
def db020(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db020`'.format(value)) self._db020 = value
Corresponds to IDD Field `db020` mean coincident wet-bulb temperature to Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions) Args: value (float): value for IDD Field `db020` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def PathToComponents(path): precondition.AssertType(path, Text) if path and not path.startswith("/"): raise ValueError("Path '{}' is not absolute".format(path)) if path: return tuple(path.split("/")[1:]) else: return ()
Converts a canonical path representation to a list of components. Args: path: A canonical MySQL path representation. Returns: A sequence of path components.
juraj-google-style
def forward_ad(node, wrt, preserve_result=False, check_dims=True): if (not isinstance(node, gast.FunctionDef)): raise TypeError cfg_obj = cfg.CFG.build_cfg(node) cfg.Active(range(len(node.args.args))).visit(cfg_obj.entry) fad = ForwardAD(wrt, preserve_result, check_dims) node = fad.visit(node) node = annotate.find_stacks(node) node = gast.Module([node]) anno.clearanno(node) return (node, fad.required)
Perform forward-mode AD on an AST. This function analyses the AST to determine which variables are active and proceeds by taking the naive derivative. Before returning the primal and adjoint it annotates push and pop statements as such. Args: node: A `FunctionDef` AST node. wrt: A tuple of argument indices with respect to which we take the derivative. preserve_result: A boolean indicating whether the original non-differentiated function value should be returned check_dims: A boolean indicating whether the provided derivatives should have the same shape as their corresponding arguments. Returns: mod: A `Module` node containing the naive primal and adjoint of the function which can be fed to the `split` and `joint` functions. required: A list of tuples of functions and argument indices. These functions were called by the function but did not have an adjoint.
codesearchnet
def add(self, value): value = int(value) if value < 10: value = 10 if value > 600: value = 600 self._data.setdefault(value, 0) self._data[value] += 1 self._len += 1
Add the value to this histogram. Args: value (int): The value. Values outside of ``10 <= x <= 600`` will be raised to ``10`` or reduced to ``600``.
juraj-google-style
def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region, provider_region): region = None if custom_bucket: region = hook_region else: region = stacker_bucket_region return (region or provider_region)
Returns the appropriate region to use when uploading functions. Select the appropriate region for the bucket where lambdas are uploaded in. Args: custom_bucket (str, None): The custom bucket name provided by the `bucket` kwarg of the aws_lambda hook, if provided. hook_region (str): The contents of the `bucket_region` argument to the hook. stacker_bucket_region (str): The contents of the `stacker_bucket_region` global setting. provider_region (str): The region being used by the provider. Returns: str: The appropriate region string.
codesearchnet
def with_flat_values(self, new_values): if isinstance(self._values, RaggedTensor): return self.with_values(self.values.with_flat_values(new_values)) else: new_values = _convert_to_ragged_tensor_values(new_values) return self.with_values(new_values)
Returns a copy of `self` with `flat_values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor that should replace `self.flat_values`. Must have `rank > 0`, and must have the same number of rows as `self.flat_values`. Returns: A `RaggedTensor`. `result.rank = self.ragged_rank + new_values.rank`. `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
github-repos
def _project_TH2(self, hist: Hist) -> Any: if (len(self.projection_axes) != 1): raise ValueError(len(self.projection_axes), 'Invalid number of axes') projection_func_map = {TH1AxisType.x_axis.value: hist.ProjectionX, TH1AxisType.y_axis.value: hist.ProjectionY} try: axis_type = self.projection_axes[0].axis_type.value except ValueError: axis_type = self.axis_type projection_func = projection_func_map[axis_type] logger.info(f'Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}') projected_hist = projection_func() return projected_hist
Perform the actual TH2 -> TH1 projection. This projection can only be to 1D. Args: hist (ROOT.TH2): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
codesearchnet
def __init__(self, index: int, lo_freq: float = None, lo_freq_range: Tuple[float, float] = (0, float("inf"))): super().__init__(index, lo_freq, lo_freq_range)
Create new drive (d) channel. Args: index (int): index of the channel lo_freq (float): default frequency of LO (local oscillator) lo_freq_range (tuple): feasible range of LO frequency
juraj-google-style
def DeserializeFromDB(buffer): m = StreamManager.GetStream(buffer) reader = BinaryReader(m) account = AccountState() account.Deserialize(reader) StreamManager.ReleaseStream(m) return account
Deserialize full object. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: AccountState:
juraj-google-style
def get_box_comments(self, box_key): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.comments_suffix ]) return self._req('get', uri)
Gets comments in a box with the provided attributes. Args: box_key key for box return (status code, list of comment dicts)
juraj-google-style
def createDomains(tlds, nicks=None, nicksFile=None): domain_candidates = [] if nicks != None: for n in nicks: for t in tlds: tmp = { "domain" : n + t["tld"], "type" : t["type"], "tld": t["tld"] } domain_candidates.append(tmp) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for t in tlds: tmp = { "domain" : n + t["tld"], "type" : t["type"], "tld": t["tld"] } domain_candidates.append(tmp) return domain_candidates
Method that globally permits to generate the domains to be checked. Args: ----- tlds: List of tlds. nicks: List of aliases. nicksFile: The filepath to the aliases file. Returns: -------- list: list of domains to be checked.
juraj-google-style
def read_int8(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sb' % endian))
Read 1 byte as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
codesearchnet
def plot_tree(ax, tree, plane='xy', diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): (plane0, plane1) = _plane2col(plane) segs = [((s[0][plane0], s[0][plane1]), (s[1][plane0], s[1][plane1])) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = LineCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection(collection)
Plots a 2d figure of the tree's segments Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values Note: If the tree contains one single point the plot will be empty since no segments can be constructed.
codesearchnet
def dbmin10years(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dbmin10years`'.format(value)) self._dbmin10years = value
Corresponds to IDD Field `dbmin10years` 10-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin10years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5): if isinstance(vectorizable_token, VectorizableToken) is False: raise TypeError() _ = self.inference(test_arr) score_arr = self.__encoder_decoder_controller.get_reconstruction_error() score_arr = score_arr.reshape(( score_arr.shape[0], -1 )).mean(axis=1) score_list = score_arr.tolist() abstract_list = [] for i in range(limit): if self.__normal_prior_flag is True: key = score_arr.argmin() else: key = score_arr.argmax() score = score_list.pop(key) score_arr = np.array(score_list) seq_arr = test_arr[key] token_arr = vectorizable_token.tokenize(seq_arr.tolist()) s = " ".join(token_arr.tolist()) _s = "".join(token_arr.tolist()) for sentence in sentence_list: if s in sentence or _s in sentence: abstract_list.append(sentence) abstract_list = list(set(abstract_list)) if len(abstract_list) >= limit: break return abstract_list
Summarize input document. Args: test_arr: `np.ndarray` of observed data points.. vectorizable_token: is-a `VectorizableToken`. sentence_list: `list` of all sentences. limit: The number of selected abstract sentence. Returns: `np.ndarray` of scores.
juraj-google-style
def update_display(self, force=False): with self._lock: stats_updated = False for pcoll_id, stats in self._pcollection_stats.items(): cache_label = stats['cache_label'] version = stats['version'] if force or not self._cache_manager.is_latest_version(version, 'sample', cache_label): pcoll_list, version = self._cache_manager.read('sample', cache_label) stats['sample'] = list(pcoll_list) stats['version'] = version stats_updated = True if pcoll_id in self._analyzer.tl_referenced_pcoll_ids(): self._text_to_print[pcoll_id] = str('%s produced %s' % (self._producers[pcoll_id], interactive_pipeline_graph.format_sample(pcoll_list, 5))) if force or stats_updated: self._pipeline_graph.update_pcollection_stats(self._pcollection_stats) if IPython: from IPython import display display.clear_output(True) rendered_graph = self._renderer.render_pipeline_graph(self._pipeline_graph) display.display(display.HTML(rendered_graph)) _display_progress('Running...') for text in self._text_to_print.values(): if text != '': _display_progress(text)
Updates display on the frontend. Retrieves the latest execution status by querying CacheManager and updates display on the fronend. The assumption is that there is only one pipeline in a cell, because it clears up everything in the cell output every update cycle. Args: force: (bool) whether to force updating when no stats change happens.
github-repos
def _merge_with(self, other: 'DynamicRaggedShape.Spec') -> 'DynamicRaggedShape.Spec': max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions) a = self._with_num_row_partitions(max_num_row_partitions) b = other._with_num_row_partitions(max_num_row_partitions) new_rp = [a._merge_with(b) for a, b in zip(a._row_partitions, b._row_partitions)] new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape) dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64 return DynamicRaggedShape.Spec(new_rp, new_static_inner_shape, dtype=dtype)
Merges all information between two specs. Specs are expected to represent the same information modulo num_row_partitons. If the specs are of different ranks, then fail. Args: other: another Spec of the same rank. Returns: a Spec with the union of information.
github-repos
def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.latin_statements is None and options.script_location is None: raise ParseError("One of script or it's location" " must be specified", cls.optparser.format_help()) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.script_location is not None: if options.latin_statements is not None: raise ParseError( "Both script and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3: (options.script_location.find("s3n: try: s = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.latin_statements = s if (args is not None) and (len(args) > 0): if options.latin_statements is not None: raise ParseError( "Extra arguments can only be " "supplied with a script_location in S3 right now", cls.optparser.format_help()) p = {} for a in args: kv = a.split('=') if len(kv) != 2: raise ParseError("Arguments to pig script must be of this format k1=v1 k2=v2 k3=v3...") p[kv[0]] = kv[1] setattr(options, 'parameters', p) else: if (args is not None) and (len(args) > 0): raise ParseError( "Extra arguments can only be supplied with a script_location", cls.optparser.format_help()) v = vars(options) v["command_type"] = "PigCommand" return v
Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct
juraj-google-style
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None, name=None): with tf.compat.v2.name_scope((name or 'assert_rank_at_most')): return tf.compat.v1.assert_less_equal(tf.rank(x), rank, data=data, summarize=summarize, message=message)
Assert `x` has rank equal to `rank` or smaller. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.assert_rank_at_most(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_most". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or lower. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank.
codesearchnet
def find_faces(self, image, draw_box=False): frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) faces = self.cascade.detectMultiScale( frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0) if draw_box: for x, y, w, h in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return faces
Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades.
juraj-google-style
def convert_unicode(value): if isinstance(value, dict): return {convert_unicode(key): convert_unicode(value) for key, value in value.iteritems()} elif isinstance(value, list): return [convert_unicode(item) for item in value] elif isinstance(value, unicode): return value.encode('utf-8') else: return value
Resolves python 2 issue with json loading in unicode instead of string Args: value (str): Unicode value to be converted Returns: (str): converted string
juraj-google-style
def remove_phenotype(self, institute, case, user, link, phenotype_id, is_group=False): LOG.info('Removing HPO term from case {0}'.format(case['display_name'])) if is_group: updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$pull': {'phenotype_terms': {'phenotype_id': phenotype_id}, 'phenotype_groups': {'phenotype_id': phenotype_id}}}, return_document=pymongo.ReturnDocument.AFTER) else: updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$pull': {'phenotype_terms': {'phenotype_id': phenotype_id}}}, return_document=pymongo.ReturnDocument.AFTER) LOG.info('Creating event for removing phenotype term {0} from case {1}'.format(phenotype_id, case['display_name'])) self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='remove_phenotype', subject=case['display_name']) LOG.debug('Case updated') return updated_case
Remove an existing phenotype from a case Args: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (dict): The url to be used in the event phenotype_id (str): A phenotype id Returns: updated_case(dict)
codesearchnet
def wait_for_tuning_job(self, job, poll=5): desc = _wait_until(lambda: _tuning_job_status(self.sagemaker_client, job), poll) self._check_job_status(job, desc, 'HyperParameterTuningJobStatus') return desc
Wait for an Amazon SageMaker hyperparameter tuning job to complete. Args: job (str): Name of the tuning job to wait for. poll (int): Polling interval in seconds (default: 5). Returns: (dict): Return value from the ``DescribeHyperParameterTuningJob`` API. Raises: ValueError: If the hyperparameter tuning job fails.
juraj-google-style
def CheckInputFromValidContext(op, input_op): op_ctxt = op._get_control_flow_context() input_ctxt = GetOutputContext(input_op) valid = False if not input_ctxt: valid = True elif op_ctxt is input_ctxt: valid = True else: while_ctxt = GetContainingWhileContext(op_ctxt) input_while_ctxt = GetContainingWhileContext(input_ctxt) if while_ctxt is None: if input_while_ctxt is None: valid = True if IsLoopEnter(op): valid = True if IsSwitch(op): valid = True elif IsContainingContext(while_ctxt, input_while_ctxt): valid = True elif while_ctxt.grad_state and IsContainingContext(while_ctxt.grad_state.forward_context, input_while_ctxt): valid = True elif while_ctxt.grad_state and while_ctxt.grad_state.forward_context is input_while_ctxt._outer_context: valid = True elif input_while_ctxt.grad_state and input_while_ctxt.grad_state.forward_context is while_ctxt: valid = True elif input_while_ctxt.grad_state and input_ctxt.grad_state.forward_context.grad_state and (input_ctxt.grad_state.forward_context.grad_state.forward_context is while_ctxt): valid = True if not valid: if while_ctxt: error_msg = f"Cannot use '{input_op.name}' as input to '{op.name}' because they are in different while loops." else: error_msg = f"Cannot use '{input_op.name}' as input to '{op.name}' because '{input_op.name}' is in a while loop." log_msg = error_msg log_msg += '\n\n%s while context: %s' % (op.name, while_ctxt) log_msg += '\n%s while context: %s' % (input_op.name, input_while_ctxt) log_msg += '\n\nTraceback for %s:\n%s\nTraceback for %s:\n%s\n' % (op.name, ''.join(traceback.format_list(op.traceback)), input_op.name, ''.join(traceback.format_list(input_op.traceback))) logging.info(log_msg) raise ValueError(error_msg + ' See info log for more details.')
Returns whether `input_op` can be used from `op`s context. Conceptually, only inputs from op's while context or any ancestor while context (including outside of any context) are valid. In practice, there are many other edge cases as well. Args: op: Operation input_op: Operation Raises: ValueError: if input_op is from an invalid context.
github-repos
def get(self, key, value): if (key == 'id'): response = self._swimlane.request('get', 'groups/{}'.format(value)) return Group(self._swimlane, response.json()) else: response = self._swimlane.request('get', 'groups/lookup?name={}'.format(value)) matched_groups = response.json() for group_data in matched_groups: if (group_data.get('name') == value): return Group(self._swimlane, group_data) raise ValueError('Unable to find group with name "{}"'.format(value))
Retrieve single group record by id or name Supports resource cache Keyword Args: id (str): Full Group ID name (str): Group name Raises: TypeError: Unexpected or more than one keyword argument provided ValueError: No matching group found based on provided inputs Returns: Group: Group instance matching provided inputs
codesearchnet
def taylor_approx(target, stencil, values): batch_shape, ndim = (target.shape[:-1], target.shape[-1]) stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim)) values = np.broadcast_to(values, stencil.shape[:-1]) delta_x = stencil - np.expand_dims(target, axis=-2) delta_xy = np.matmul(np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2)) i = np.arange(ndim) j, k = np.triu_indices(ndim, k=1) coeffs = np.concatenate([np.ones(delta_x.shape[:-1] + (1,)), delta_x, delta_xy[..., i, i] / 2, delta_xy[..., j, k]], axis=-1) return np.squeeze(np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1)
Use taylor series to approximate up to second order derivatives. Args: target: An array of shape (..., n), a batch of n-dimensional points where one wants to approximate function value and derivatives. stencil: An array of shape broadcastable to (..., k, n), for each target point a set of k = triangle(n + 1) points to use on its approximation. values: An array of shape broadcastable to (..., k), the function value at each of the stencil points. Returns: An array of shape (..., k), for each target point the approximated function value, gradient and hessian evaluated at that point (flattened and in the same order as returned by derivative_names).
github-repos
def outer(x1, x2): if any_symbolic_tensors((x1, x2)): return Outer().symbolic_call(x1, x2) return backend.numpy.outer(x1, x2)
Compute the outer product of two vectors. Given two vectors `x1` and `x2`, the outer product is: ``` out[i, j] = x1[i] * x2[j] ``` Args: x1: First input tensor. x2: Second input tensor. Returns: Outer product of `x1` and `x2`.
github-repos
def __init__(self, a_file, import_resolver=None): self._sections = [] self._original_content = a_file.read() self._import_resolver = import_resolver self._processed_content = None
Initializes the file reading in the file. Args: a_file: The file to read in. import_resolver: a function that given a path will return a stream for the contents. Raises: PDDMError if there are any issues.
juraj-google-style
def wait_for_disappearance(self, timeout=120): start = time.time() while self.exists(): self.poco.sleep_for_polling_interval() if time.time() - start > timeout: raise PocoTargetTimeout('disappearance', self)
Block and wait until the UI element **disappears** within the given timeout. Args: timeout: maximum waiting time in seconds Raises: PocoTargetTimeout: when timeout
juraj-google-style
def output_summary(fqn, action, changeset, params_diff, replacements_only=False): replacements = [] changes = [] for change in changeset: resource = change['ResourceChange'] replacement = (resource.get('Replacement') == 'True') summary = ('- %s %s (%s)' % (resource['Action'], resource['LogicalResourceId'], resource['ResourceType'])) if replacement: replacements.append(summary) else: changes.append(summary) summary = '' if params_diff: summary += summarize_params_diff(params_diff) if replacements: if (not replacements_only): summary += 'Replacements:\n' summary += '\n'.join(replacements) if changes: if summary: summary += '\n' summary += ('Changes:\n%s' % '\n'.join(changes)) logger.info('%s %s:\n%s', fqn, action, summary)
Log a summary of the changeset. Args: fqn (string): fully qualified name of the stack action (string): action to include in the log message changeset (list): AWS changeset params_diff (list): A list of dictionaries detailing the differences between two parameters returned by :func:`stacker.actions.diff.diff_dictionaries` replacements_only (bool, optional): boolean for whether or not we only want to list replacements
codesearchnet
def row_lengths(self): if self._row_lengths is not None: return self._row_lengths splits = self._row_splits return splits[1:] - splits[:-1]
Returns the lengths of rows in this `RowPartition`. Returns: A 1-D integer Tensor with shape `[self.nrows]`. The returned tensor is nonnegative. `tf.reduce_sum(self.row_lengths) == self.nvals()`.
github-repos
def sample(self, signum, frame): stack = [] while (frame and (frame != self.base_frame)): stack.append((frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)) frame = frame.f_back self._stats[tuple(stack)] += 1 signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
Samples current stack and adds result in self._stats. Args: signum: Signal that activates handler. frame: Frame on top of the stack when signal is handled.
codesearchnet
def pretty_description(description, wrap_at=None, indent=0): if wrap_at is None or wrap_at < 0: width = console_width(default=79) if wrap_at is None: wrap_at = width else: wrap_at += width indent = ' ' * indent text_wrapper = textwrap.TextWrapper( width=wrap_at, replace_whitespace=False, initial_indent=indent, subsequent_indent=indent) new_desc = [] for line in description.split('\n'): new_desc.append(line.replace('\n', '').strip()) while not new_desc[0]: del new_desc[0] while not new_desc[-1]: del new_desc[-1] separators = [i for i, l in enumerate(new_desc) if not l] paragraphs = [] if separators: start, end = 0, separators[0] paragraphs.append(new_desc[start:end]) for i in range(len(separators) - 1): start = end + 1 end = separators[i + 1] paragraphs.append(new_desc[start:end]) paragraphs.append(new_desc[end + 1:]) return '\n\n'.join(text_wrapper.fill(' '.join(p)) for p in paragraphs) return text_wrapper.fill(' '.join(new_desc))
Return a pretty formatted string given some text. Args: description (str): string to format. wrap_at (int): maximum length of a line. indent (int): level of indentation. Returns: str: pretty formatted string.
juraj-google-style
def get_knowledge_base(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.get_knowledge_base(knowledge_base_path) print('Got Knowledge Base:') print(' - Display Name: {}'.format(response.display_name)) print(' - Knowledge ID: {}'.format(response.name))
Gets a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
juraj-google-style
def detect_overflow(var, ctx): detected = False if torch.isnan(var).any().item(): detected = True print(f'{ctx} has nans') if torch.isinf(var).any().item(): detected = True print(f'{ctx} has infs') if 0: n100 = var[torch.ge(var.abs(), 100)] if n100.numel() > 0: print(f'{ctx}: n100={n100.numel()}') n1000 = var[torch.ge(var.abs(), 1000)] if n1000.numel() > 0: print(f'{ctx}: n1000={n1000.numel()}') n10000 = var[torch.ge(var.abs(), 10000)] if n10000.numel() > 0: print(f'{ctx}: n10000={n10000.numel()}') if 0: print(f'min={var.min():9.2e} max={var.max():9.2e}') if 0: print(f'min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})') return detected
Report whether the tensor contains any `nan` or `inf` entries. This is useful for detecting overflows/underflows and best to call right after the function that did some math that modified the tensor in question. This function contains a few other helper features that you can enable and tweak directly if you want to track various other things. Args: var: the tensor variable to check ctx: the message to print as a context Return: `True` if `inf` or `nan` was detected, `False` otherwise
github-repos
def onTagDel(self, name, func): if ('*' in name): self.ontagdelglobs.add(name, func) else: self.ontagdels[name].append(func)
Register a callback for tag deletion. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
codesearchnet
def bulk_write(self, metrics): try: for metric in metrics: self.producer.send(self.topic, metric) self.producer.flush() except (KafkaTimeoutError, NoBrokersAvailable) as exc: logger.warning('bulk_write metrics %r failure %r', metrics, exc)
Write multiple metrics to kafka in one request Args: metrics (list):
juraj-google-style
def downstream(self, node): graph = self.graph if node not in graph: raise KeyError('node %s is not in graph' % node) return list(graph[node])
Returns a list of all nodes this node has edges towards. Args: node (str): The node whose downstream nodes you want to find. Returns: list: A list of nodes that are immediately downstream from the node.
juraj-google-style
def scan_file(path): path = os.path.abspath(path) assert os.path.exists(path), "Unreachable file '%s'." % path result = sh.clamscan(path, no_summary=True, infected=True, _ok_code=[0, 1]) return _parse_result(result)
Scan `path` for viruses using ``clamscan`` program. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: AssertionError: When the internal file doesn't exists.
juraj-google-style
def ApplyParsersToResponses(parser_factory, responses, flow_obj): knowledge_base = flow_obj.state.knowledge_base parsed_responses = [] if parser_factory.HasSingleResponseParsers(): for response in responses: for parser in parser_factory.SingleResponseParsers(): parsed_responses.extend( parser.ParseResponse(knowledge_base, response, flow_obj.args.path_type)) for parser in parser_factory.MultiResponseParsers(): parsed_responses.extend(parser.ParseResponses(knowledge_base, responses)) has_single_file_parsers = parser_factory.HasSingleFileParsers() has_multi_file_parsers = parser_factory.HasMultiFileParsers() if has_single_file_parsers or has_multi_file_parsers: precondition.AssertIterableType(responses, rdf_client_fs.StatEntry) pathspecs = [response.pathspec for response in responses] if data_store.RelationalDBEnabled(): filedescs = [] for pathspec in pathspecs: client_path = db.ClientPath.FromPathSpec(flow_obj.client_id, pathspec) filedescs.append(file_store.OpenFile(client_path)) else: filedescs = MultiOpenAff4File(flow_obj, pathspecs) if has_single_file_parsers: for response, filedesc in zip(responses, filedescs): for parser in parser_factory.SingleFileParsers(): parsed_responses.extend( parser.ParseFile(knowledge_base, response.pathspec, filedesc)) if has_multi_file_parsers: for parser in parser_factory.MultiFileParsers(): parsed_responses.extend( parser.ParseFiles(knowledge_base, pathspecs, filedescs)) return parsed_responses or responses
Parse responses with applicable parsers. Args: parser_factory: A parser factory for specific artifact. responses: A list of responses from the client. flow_obj: An artifact collection flow. Returns: A list of (possibly parsed) responses.
juraj-google-style
def ws025(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws025`'.format(value)) self._ws025 = value
Corresponds to IDD Field `ws025` Wind speed corresponding to 2.5% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `ws025` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _QueryProcessStatus(self, process): process_is_alive = process.is_alive() if process_is_alive: rpc_client = self._rpc_clients_per_pid.get(process.pid, None) process_status = rpc_client.CallFunction() else: process_status = None return process_status
Queries a process to determine its status. Args: process (MultiProcessBaseProcess): process to query for its status. Returns: dict[str, str]: status values received from the worker process.
codesearchnet
def _FormatSocketInet128Token(self, token_data): protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN') ip_address = self._FormatPackedIPv6Address(token_data.ip_addresss) return { 'protocols': protocol, 'family': token_data.socket_family, 'port': token_data.port_number, 'address': ip_address}
Formats an Internet socket token as a dictionary of values. Args: token_data (bsm_token_data_sockinet64): AUT_SOCKINET128 token data. Returns: dict[str, str]: token values.
juraj-google-style
def _default_tolerance(dtype): if dtype == dtypes_lib.bfloat16.as_numpy_dtype: return 0.005 if dtype == np.float16: return 0.005 elif dtype in (np.float32, np.complex64): return 0.001 elif dtype in (np.float64, np.complex128): return 1e-05 else: return None
Returns a sensible default tolerance for comparing results of a given type. Args: dtype: A datatype.
github-repos
def __init__(self, strategy, replica_id_in_sync_group): self._strategy = strategy self._thread_context = _InReplicaThreadMode(self) if not (replica_id_in_sync_group is None or tensor_util.is_tf_type(replica_id_in_sync_group) or isinstance(replica_id_in_sync_group, int)): raise ValueError('replica_id_in_sync_group can only be an integer, a Tensor or None.') self._replica_id_in_sync_group = replica_id_in_sync_group if strategy: self._local_replica_id = strategy.extended._get_local_replica_id(replica_id_in_sync_group) self._summary_recording_distribution_strategy = None
Creates a ReplicaContext. Args: strategy: A `tf.distribute.Strategy`. replica_id_in_sync_group: An integer, a `Tensor` or None. Prefer an integer whenever possible to avoid issues with nested `tf.function`. It accepts a `Tensor` only to be compatible with `tpu.replicate`.
github-repos
def get_learning_rate(self, iter): return (self.init_lr * ((math.cos((((iter * 1.0) / self.max_iter) * math.pi)) + 1.0) * 0.5))
Get learning rate with cosine decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate
codesearchnet
def after_run(self, run_context, run_values): global_step = run_values.results if self._timer.should_trigger_for_step( global_step) and global_step > self._warm_steps: elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( global_step) if elapsed_time is not None: self._step_train_time += elapsed_time self._total_steps += elapsed_steps average_examples_per_sec = self._batch_size * ( self._total_steps / self._step_train_time) current_examples_per_sec = self._batch_size * ( elapsed_steps / elapsed_time) tf.logging.info('Batch [%g]: current exp/sec = %g, average exp/sec = ' '%g', self._total_steps, current_examples_per_sec, average_examples_per_sec)
Called after each call to run(). Args: run_context: A SessionRunContext object. run_values: A SessionRunValues object.
juraj-google-style
def search(self, search_phrase, limit=None): query_string = self._make_query_from_terms(search_phrase) self._parsed_query = query_string schema = self._get_generic_schema() parser = QueryParser('doc', schema=schema) query = parser.parse(query_string) datasets = defaultdict(DatasetSearchResult) logger.debug('Searching datasets using `{}` query.'.format(query)) with self.index.searcher() as searcher: results = searcher.search(query, limit=limit) for hit in results: vid = hit['vid'] datasets[vid].vid = hit['vid'] datasets[vid].b_score += hit.score logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
juraj-google-style
def get_panel_info(panel_lines=None, panel_id=None, institute=None, version=None, date=None, display_name=None): panel_info = {'panel_id': panel_id, 'institute': institute, 'version': version, 'date': date, 'display_name': display_name} if panel_lines: for line in panel_lines: line = line.rstrip() if (not line.startswith(' break info = line[2:].split('=') field = info[0] value = info[1] if (not panel_info.get(field)): panel_info[field] = value panel_info['date'] = get_date(panel_info['date']) return panel_info
Parse metadata for a gene panel For historical reasons it is possible to include all information about a gene panel in the header of a panel file. This function parses the header. Args: panel_lines(iterable(str)) Returns: panel_info(dict): Dictionary with panel information
codesearchnet
def set_bfd_ip(self, name, vrid, value=None, disable=False, default=False, run=True): if ((not default) and (not disable)): if (not re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', str(value))): raise ValueError("vrrp property 'bfd_ip' must be a properly formatted IP address") cmd = self.command_builder(('vrrp %d bfd ip' % vrid), value=value, default=default, disable=disable) if run: result = self.configure_interface(name, cmd) if (result is False): return self.error return result return cmd
Set the bfd_ip property of the vrrp Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. value (string): The bfd ip address to be set. disable (boolean): Unset bfd ip if True. default (boolean): Set bfd ip to default if True. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node
codesearchnet
def get_extrapolated_diffusivity(temps, diffusivities, new_temp): Ea, c, _ = fit_arrhenius(temps, diffusivities) return c * np.exp(-Ea / (const.k / const.e * new_temp))
Returns (Arrhenius) extrapolated diffusivity at new_temp Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s new_temp (float): desired temperature. units: K Returns: (float) Diffusivity at extrapolated temp in mS/cm.
juraj-google-style
def __init__(self, dump_root, partition_graphs=None, validate=True): if not gfile.IsDirectory(dump_root): raise IOError('Dump root directory %s does not exist' % dump_root) self._core_metadata = [] self._dump_root = dump_root self._load_core_metadata() self._load_fetches_info() self._load_feeds_info() self._load_all_device_dumps(partition_graphs, validate) self._python_graph = None
`DebugDumpDir` constructor. Args: dump_root: (`str`) path to the dump root directory. partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime. validate: (`bool`) whether the dump files are to be validated against the partition graphs. Raises: IOError: If dump_root does not exist as a directory. ValueError: If more than one core metadata file is found under the dump root directory.
github-repos
def db_set(table, record, column, value, if_exists=False): cmd = ['ovs-vsctl'] if if_exists: cmd += ['--if-exists'] cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))] result = __salt__['cmd.run_all'](cmd) if (result['retcode'] != 0): return result['stderr'] else: return None
Sets a column's value for a specific record. Args: table: A string - name of the database table. record: A string - identifier of the record. column: A string - name of the column. value: A string - the value to be set if_exists: A boolean - if True, it is not an error if the record does not exist. Returns: None on success and an error message on failure. CLI Example: .. code-block:: bash salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
codesearchnet
def _process_book(book_url): data = DOWNER.download(book_url) dom = dhtmlparser.parseString(data) details_tags = dom.find("div", {"id": "contentDetail"}) assert details_tags, "Can't find details of the book." details = details_tags[0] title = _parse_title(dom, details) authors = _parse_authors(details) publisher = _parse_publisher(details) price = _parse_price(details) pages, binding = _parse_pages_binding(details) pub = Publication( title, authors, price, publisher ) pub.optionals.URL = book_url pub.optionals.binding = binding pub.optionals.pages = pages pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details) pub.optionals.edition = _parse_edition(details) pub.optionals.description = _parse_description(details) return pub
Parse available informations about book from the book details page. Args: book_url (str): Absolute URL of the book. Returns: obj: :class:`structures.Publication` instance with book details.
juraj-google-style
def ValidateAccessAndSubjects(requested_access, subjects): if not requested_access: raise access_control.UnauthorizedAccess( "Must specify requested access type for %s" % subjects) for s in requested_access: if s not in "rwq": raise ValueError( "Invalid access requested for %s: %s" % (subjects, requested_access)) if "q" in requested_access and "r" not in requested_access: raise access_control.UnauthorizedAccess( "Invalid access request: query permissions require read permissions " "for %s" % subjects, requested_access=requested_access) return True
Does basic requested access validation. Args: requested_access: String consisting or 'r', 'w' and 'q' characters. subjects: A list of subjects that are about to be accessed with a given requested_access. Used for logging purposes only. Returns: True if requested_access is valid. Raises: access_control.UnauthorizedAccess: if requested_access is not valid. ValueError: if subjects list is empty.
juraj-google-style
def create_pipeline(self, name, description, **kwargs): if not (name and description): return requests.codes.bad_request, None kwargs.update({'name':name, 'description':description}) new_pl = StreakPipeline(**kwargs) uri = '/'.join([ self.api_uri, self.pipelines_suffix ]) code, r_data = self._req('put', uri, new_pl.to_dict()) return code, r_data
Creates a pipeline with the provided attributes. Args: name required name string kwargs {name, description, orgWide, aclEntries} user specifiable ones only return (status code, pipeline_dict) (as created)
juraj-google-style
def calculate(self, token_list_x, token_list_y): if len(token_list_x) == 0 or len(token_list_y) == 0: return 0.0 document_list = token_list_x.copy() [document_list.append(v) for v in token_list_y] document_list = list(set(document_list)) tfidf_vectorizer = TfidfVectorizer(document_list) vector_list_x = tfidf_vectorizer.vectorize(token_list_x) vector_list_y = tfidf_vectorizer.vectorize(token_list_y) if len(vector_list_x) > len(vector_list_y): [vector_list_y.append(0.0) for _ in range(len(vector_list_x) - len(vector_list_y))] elif len(vector_list_y) > len(vector_list_x): [vector_list_x.append(0.0) for _ in range(len(vector_list_y) - len(vector_list_x))] dot_prod = np.dot(vector_list_x, vector_list_y) norm_x = np.linalg.norm(vector_list_x) norm_y = np.linalg.norm(vector_list_y) try: result = dot_prod / (norm_x * norm_y) if np.isnan(result) is True: return 0.0 else: return result except ZeroDivisionError: return 0.0
Calculate similarity with the so-called Cosine similarity of Tf-Idf vectors. Concrete method. Args: token_list_x: [token, token, token, ...] token_list_y: [token, token, token, ...] Returns: Similarity.
juraj-google-style
def __init__(self, setup): pass
Construct a NotebookTestCase. Args: setup: arbitrary JSON-serializable object specified by test spec
github-repos