code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __decode_dictionary(self, message_type, dictionary): message = message_type() for key, value in six.iteritems(dictionary): if value is None: try: message.reset(key) except AttributeError: pass continue try: field = message.field_by_name(key) except KeyError: variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) continue if field.repeated: if not isinstance(value, list): value = [value] valid_value = [self.decode_field(field, item) for item in value] setattr(message, field.name, valid_value) continue if value == []: continue try: setattr(message, field.name, self.decode_field(field, value)) except messages.DecodeError: if not isinstance(field, messages.EnumField): raise variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) return message
Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries.
juraj-google-style
def sin(x): return math_ops.sin(x)
Computes sin of x element-wise. Args: x: Tensor or variable. Returns: A tensor.
github-repos
def Decrement(self, key): with self._lock: if _IsHashable(key): if key in self._d: if self._d[key] > 1: self._d[key] -= 1 else: del self._d[key] else: try: i = self._unhashable_items.index(key) if self._unhashable_counts[i] > 1: self._unhashable_counts[i] -= 1 else: del self._unhashable_counts[i] del self._unhashable_items[i] except ValueError: pass
Atomically decrement a count by 1. Expunge the item if the count is 0. If the item is not present, has no effect. Args: key: the key being counted.
github-repos
def random(self, shape, tf_fn, kwargs): slice_shape = self.slice_shape(shape) x = tf_fn(slice_shape, **kwargs) layout = self.tensor_layout(shape) mesh_axes = [i for i in xrange(self.ndims) if i not in layout.tensor_axis_to_mesh_axis] multiplier = 1.0 for axis in mesh_axes: multiplier *= tf.cast( tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype) x *= multiplier x = self.LaidOutTensor([x]) x = self.allreduce(x, mesh_axes, "SUM") return x
Call a random tf operation (e.g. random_uniform). Args: shape: a Shape tf_fn: a function such as tf.random.uniform kwargs: kwargs to pass to tf_fn, except for seed Returns: a LaidOutTensor
juraj-google-style
def plot(self, data): import IPython if ((not isinstance(data, dict)) or (not all((isinstance(v, pd.DataFrame) for v in data.values())))): raise ValueError('Expect a dictionary where the values are all dataframes.') gfsg = GenericFeatureStatisticsGenerator() data = [{'name': k, 'table': self._remove_nonascii(v)} for (k, v) in six.iteritems(data)] data_proto = gfsg.ProtoFromDataFrames(data) protostr = base64.b64encode(data_proto.SerializeToString()).decode('utf-8') html_id = ('f' + datalab.utils.commands.Html.next_id()) HTML_TEMPLATE = '<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html" >\n <facets-overview id="{html_id}"></facets-overview>\n <script>\n document.querySelector(" html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr) return IPython.core.display.HTML(html)
Plots an overview in a list of dataframes Args: data: a dictionary with key the name, and value the dataframe.
codesearchnet
def __call__(self, request: beam.Row, *args, **kwargs): try: entity_id = request._asdict()[self.row_key] except KeyError: raise KeyError('Enrichment requests to Vertex AI Feature Store should contain a field: %s in the input `beam.Row` to join the input with fetched response. This is used as the `FeatureViewDataKey` to fetch feature values corresponding to this key.' % self.row_key) try: response = self.client.fetch_feature_values(request=aiplatform.gapic.FetchFeatureValuesRequest(data_key=aiplatform.gapic.FeatureViewDataKey(key=entity_id), feature_view=self.feature_view_path, data_format=aiplatform.gapic.FeatureViewDataFormat.PROTO_STRUCT)) except NotFound: if self.exception_level == ExceptionLevel.WARN: _LOGGER.warning(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id)) return (request, beam.Row()) elif self.exception_level == ExceptionLevel.RAISE: raise ValueError(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id)) response_dict = dict(response.proto_struct) return (request, beam.Row(**response_dict))
Fetches feature value for an entity-id from Vertex AI Feature Store. Args: request: the input `beam.Row` to enrich.
github-repos
def get_cached_response(cls, key): request_cached_response = DEFAULT_REQUEST_CACHE.get_cached_response(key) if not request_cached_response.is_found: django_cached_response = cls._get_cached_response_from_django_cache(key) cls._set_request_cache_if_django_cache_hit(key, django_cached_response) return django_cached_response return request_cached_response
Retrieves a CachedResponse for the provided key. Args: key (string) Returns: A CachedResponse with is_found status and value.
juraj-google-style
def print_dict(py_dict): for gpu, cc in py_dict.items(): print('{:<25}{:<25}'.format(gpu, cc))
Prints dictionary with formatting (2 column table). Args: py_dict: Dictionary that is to be printed out in a table format.
github-repos
def __init__(self, project_key, conf_path=settings.ZEO_CLIENT_PATH): super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key )
Constructor. Args: project_key (str): Project key which is used for the root of DB. conf_path (str): Path to the client zeo configuration file. Default :attr:`.settings.ZEO_CLIENT_PATH`.
juraj-google-style
def load(self, filepath, file_encoding=None): with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) elif len(current_line) > 10: break self.__make_dicts__
Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary
juraj-google-style
def delete(self, timeout=-1, custom_headers=None, force=False): uri = self.data['uri'] logger.debug("Delete resource (uri = %s)" % (str(uri))) return self._helper.delete(uri, timeout=timeout, custom_headers=custom_headers, force=force)
Deletes current resource. Args: timeout: Timeout in seconds. custom_headers: Allows to set custom http headers. force: Flag to force the operation.
juraj-google-style
def add_tags(self, ID3=None): if ID3 is None: ID3 = self.ID3 if self.tags is None: self.ID3 = ID3 self.tags = ID3() else: raise error("an ID3 tag already exists")
Add an empty ID3 tag to the file. Args: ID3 (ID3): An ID3 subclass to use or `None` to use the one that used when loading. A custom tag reader may be used in instead of the default `ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
juraj-google-style
def assert_inbounds(num, low, high, msg='', eq=False, verbose=not util_arg.QUIET): r from utool import util_str if util_arg.NO_ASSERTS: return passed = util_alg.inbounds(num, low, high, eq=eq) if isinstance(passed, np.ndarray): passflag = np.all(passed) else: passflag = passed if not passflag: failednum = num.compress(~passed) if isinstance(num, np.ndarray) else num failedlow = low.compress(~passed) if isinstance(low, np.ndarray) else low failedhigh = high.compress(~passed) if isinstance(high, np.ndarray) else high msg_ = 'num=%r is out of bounds=(%r, %r)' % (failednum, failedlow, failedhigh) raise AssertionError(msg_ + '\n' + msg) else: if verbose: op = '<=' if eq else '<' fmtstr = 'Passed assert_inbounds: {low} {op} {num} {op} {high}' print(fmtstr.format(low=low, op=op, num=util_str.truncate_str(str(num)), high=high))
r""" Args: num (scalar): low (scalar): high (scalar): msg (str):
juraj-google-style
def data_received(self, data): try: self.responders[-1].on_data(data) except Exception as error: self.handle_error(error)
(asyncio.Protocol member) Called upon when there is new data to be passed to the protocol. The data is forwarded to the top of the responder stack (via the on_data method). If an excpetion occurs while this is going on, the Exception is forwarded to the protocol's handle_error method. Args: data (bytes): Bytes from the latest data transmission
juraj-google-style
def get(self, *args, **kwargs): if (not self.enabled): return None cache_key = self.make_key(args, kwargs) with self._cache_lock: if (cache_key in self._cache): (expirytime, item) = self._cache[cache_key] if (expirytime >= time()): return item else: del self._cache[cache_key] return None
Get an item from the cache for this combination of args and kwargs. Args: *args: any arguments. **kwargs: any keyword arguments. Returns: object: The object which has been found in the cache, or `None` if no unexpired item is found. This means that there is no point storing an item in the cache if it is `None`.
codesearchnet
def _publish_status(self, slug, data): status_topic = self.topics.prefix + 'devices/{}/data/status'.format(slug) self._logger.debug("Publishing status message: (topic=%s) (message=%s)", status_topic, str(data)) self.client.publish(status_topic, data)
Publish a status message for a device Args: slug (string): The device slug that we are publishing on behalf of data (dict): The status message data to be sent back to the caller
juraj-google-style
def get_unverified_claims(token): try: claims = jws.get_unverified_claims(token) except: raise JWTError('Error decoding token claims.') try: claims = json.loads(claims.decode('utf-8')) except ValueError as e: raise JWTError(('Invalid claims string: %s' % e)) if (not isinstance(claims, Mapping)): raise JWTError('Invalid claims string: must be a json object') return claims
Returns the decoded claims without verification of any kind. Args: token (str): A signed JWT to decode the headers from. Returns: dict: The dict representation of the token claims. Raises: JWTError: If there is an exception decoding the token.
codesearchnet
def convert_to_eager_tensor(value, ctx, dtype=None) -> ops._EagerTensorBase: if isinstance(value, np.ndarray): value = value.copy() if isinstance(value, ops.EagerTensor): if dtype is not None and value.dtype != dtype: raise TypeError(f'Expected tensor {value} with dtype {dtype!r}, but got dtype {value.dtype!r}.') return value if dtype is not None: try: dtype = dtype.as_datatype_enum except AttributeError: dtype = dtypes.as_dtype(dtype).as_datatype_enum ctx.ensure_initialized() return ops.EagerTensor(value, ctx.device_name, dtype)
Converts the given `value` to an `EagerTensor`. Note that this function could return cached copies of created constants for performance reasons. Args: value: value to convert to EagerTensor. ctx: value of context.context(). dtype: optional desired dtype of the converted EagerTensor. Returns: EagerTensor created from value. Raises: TypeError: if `dtype` is not compatible with the type of t.
github-repos
def get_config(self, key_name): if (key_name in self.config): return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component
codesearchnet
def GetYearFromPosixTime(posix_time, timezone=pytz.UTC): datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone) return datetime_object.year
Gets the year from a POSIX timestamp The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC. Args: posix_time: An integer containing the number of seconds since 1970-01-01 00:00:00 UTC. timezone: Optional timezone of the POSIX timestamp. Returns: The year of the POSIX timestamp. Raises: ValueError: If the posix timestamp is out of the range of supported values.
juraj-google-style
def coordination_number_delta_E( self ): initial_site_neighbours = [ s for s in self.initial_site.p_neighbours if s.is_occupied ] final_site_neighbours = [ s for s in self.final_site.p_neighbours if s.is_occupied and s is not self.initial_site ] initial_cn_occupation_energy = ( self.initial_site.cn_occupation_energy() + sum( [ site.cn_occupation_energy() for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy() for site in final_site_neighbours ] ) ) final_cn_occupation_energy = ( self.final_site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.final_site.label : +1 } ) for site in final_site_neighbours ] ) ) return ( final_cn_occupation_energy - initial_cn_occupation_energy )
Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (coordination-number)
juraj-google-style
def get_custom_modules_path() -> Path: channel_path = (get_base_path() / 'modules') if (not channel_path.exists()): channel_path.mkdir(parents=True) return channel_path
Get the path to custom channels Returns: The path for custom channels.
codesearchnet
def check_absolute_refs(self, construction_table): c_table = construction_table problem_index = [i for i in c_table.index[:3] if (not self._has_valid_abs_ref(i, c_table))] return problem_index
Checks first three rows of ``construction_table`` for linear references Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices.
codesearchnet
def holiday_day(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `holiday_day`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `holiday_day`') self._holiday_day = value
Corresponds to IDD Field `holiday_day` Args: value (str): value for IDD Field `holiday_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def run_job(self, section_id, session=None): if (not self.parser.has_section(section_id)): raise KeyError('section not found: {}'.format(section_id)) session = (session or Session()) for (name, looter_cls) in six.iteritems(self._CLS_MAP): targets = self.get_targets(self._get(section_id, name)) quiet = self._getboolean(section_id, 'quiet', self.args.get('--quiet', False)) if targets: logger.info('Launching {} job for section {}'.format(name, section_id)) for (target, directory) in six.iteritems(targets): try: logger.info('Downloading {} to {}'.format(target, directory)) looter = looter_cls(target, add_metadata=self._getboolean(section_id, 'add-metadata', False), get_videos=self._getboolean(section_id, 'get-videos', False), videos_only=self._getboolean(section_id, 'videos-only', False), jobs=self._getint(section_id, 'jobs', 16), template=self._get(section_id, 'template', '{id}'), dump_json=self._getboolean(section_id, 'dump-json', False), dump_only=self._getboolean(section_id, 'dump-only', False), extended_dump=self._getboolean(section_id, 'extended-dump', False), session=session) if self.parser.has_option(section_id, 'username'): looter.logout() username = self._get(section_id, 'username') password = (self._get(section_id, 'password') or getpass.getpass('Password for "{}": '.format(username))) looter.login(username, password) n = looter.download(directory, media_count=self._getint(section_id, 'num-to-dl'), new_only=self._getboolean(section_id, 'new', False), pgpbar_cls=(None if quiet else TqdmProgressBar), dlpbar_cls=(None if quiet else TqdmProgressBar)) logger.success('Downloaded %i medias !', n) except Exception as exception: logger.error(six.text_type(exception))
Run a job as described in the section named ``section_id``. Raises: KeyError: when the section could not be found.
codesearchnet
def skipForDeviceType(self, device_type: typing.List[str], reason: str, unless_device_count_equals_to=None): physical_device_types = set([d.device_type for d in tf_config.list_physical_devices()]) for device in device_type: if device == 'TPU' and is_tpu_present(): if unless_device_count_equals_to is None: self.skipTest(reason) elif len(list_local_logical_devices(device)) != unless_device_count_equals_to: self.skipTest(reason) if device == 'CPU' and len(physical_device_types) == 1 and ('CPU' in physical_device_types): self.skipTest(reason) if device == 'GPU' and 'GPU' in physical_device_types: self.skipTest(reason)
Skip the test for the specific device_type. Args: device_type: list of device types, one of "CPU", "GPU", or "TPU". reason: string that describe the reason for skipping the test. unless_device_count_equals_to: Optional int. This parameter only works if device_type is "TPU". If set, the test will be skipped unless the number of TPUs equals to the specified count.
github-repos
def volume(self): return np.dot(self.matrix[0], np.cross(self.matrix[1], self.matrix[2]))
The cell volume. Args: None Returns: (float): The cell volume.
codesearchnet
async def on_message(message): server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if server is not None and author != channel.server.me: prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): package = content.split(" ") command = package[0][len(prefix):] args = package[1:] arg = ' '.join(args) if command == 'help': if args: datapacks = api_help.get_help_datapacks(arg, prefix) if datapacks: await client.send_typing(channel) embed = ui_embed.success(channel, arg, datapacks) try: await embed.send() except discord.errors.HTTPException: embed = ui_embed.http_exception(channel, arg) await embed.send() else: datapacks = api_help.get_help_commands(prefix) if datapacks: await client.send_typing(channel) embed = ui_embed.success(channel, arg, datapacks) try: await embed.send() except discord.errors.HTTPException: embed = ui_embed.http_exception(channel, arg) await embed.send()
The on_message event handler for this module Args: message (discord.Message): Input message
juraj-google-style
def validate_default_element(self, value): if isinstance(value, (six.string_types, six.integer_types)): if self.__type: self.__type(value) return value return super(EnumField, self).validate_default_element(value)
Validate default element of Enum field. Enum fields allow for delayed resolution of default values when the type of the field has not been resolved. The default value of a field may be a string or an integer. If the Enum type of the field has been resolved, the default value is validated against that type. Args: value: Value to validate. Raises: ValidationError if value is not expected message type.
juraj-google-style
class GraniteMoeHybridMLP(nn.Module): def __init__(self, config: GraniteMoeHybridConfig): super(GraniteMoeHybridMLP, self).__init__() self.input_size = config.hidden_size self.hidden_size = config.shared_intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False) self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.input_linear(hidden_states) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] hidden_states = self.output_linear(hidden_states) return hidden_states
MLP layer for shared experts Args: config: Configuration object with model hyperparameters.
github-repos
def delete_volume(self, volume_name: str): if not self._manager: raise RuntimeError('Volumes can only be deleted ' 'on swarm manager nodes') self._api_client.remove_volume(volume_name)
Removes/stops a docker volume. Only the manager nodes can delete a volume Args: volume_name (string): Name of the volume
juraj-google-style
def unionfs(rw='rw', ro=None, union='union'): from functools import wraps def wrap_in_union_fs(func): '\n Function that wraps a given function inside the file system.\n\n Args:\n func: The function that needs to be wrapped inside the unions fs.\n Return:\n The file system with the function wrapped inside.\n ' @wraps(func) def wrap_in_union_fs_func(project, *args, **kwargs): '\n Wrap the func in the UnionFS mount stack.\n\n We make sure that the mount points all exist and stack up the\n directories for the unionfs. All directories outside of the default\n build environment are tracked for deletion.\n ' container = project.container if ((container is None) or in_container()): return func(project, *args, **kwargs) build_dir = local.path(project.builddir) LOG.debug('UnionFS - Project builddir: %s', project.builddir) if __unionfs_is_active(root=build_dir): LOG.debug('UnionFS already active in %s, nesting not supported.', build_dir) return func(project, *args, **kwargs) ro_dir = local.path(container.local) rw_dir = (build_dir / rw) un_dir = (build_dir / union) LOG.debug('UnionFS - RW: %s', rw_dir) unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir) project_builddir_bak = project.builddir project.builddir = un_dir proc = unionfs_cmd.popen() while ((not __unionfs_is_active(root=un_dir)) and (proc.poll() is None)): pass ret = None if (proc.poll() is None): try: with local.cwd(un_dir): ret = func(project, *args, **kwargs) finally: project.builddir = project_builddir_bak from signal import SIGINT is_running = (proc.poll() is None) while (__unionfs_is_active(root=un_dir) and is_running): try: proc.send_signal(SIGINT) proc.wait(timeout=3) except subprocess.TimeoutExpired: proc.kill() is_running = False LOG.debug('Unionfs shut down.') if __unionfs_is_active(root=un_dir): raise UnmountError() return ret return wrap_in_union_fs_func return wrap_in_union_fs
Decorator for the UnionFS feature. This configures a unionfs for projects. The given base_dir and/or image_dir are layered as follows: image_dir=RW:base_dir=RO All writes go to the image_dir, while base_dir delivers the (read-only) versions of the rest of the filesystem. The unified version will be provided in the project's builddir. Unmouting is done as soon as the function completes. Args: rw: writeable storage area for the unified fuse filesystem. ro: read-only storage area for the unified fuse filesystem. union: mountpoint of the unified fuse filesystem.
codesearchnet
class AutoContrast(BaseImagePreprocessingLayer): _USE_BASE_FACTOR = False _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. ' def __init__(self, value_range=(0, 255), **kwargs): super().__init__(**kwargs) self._set_value_range(value_range) def _set_value_range(self, value_range): if not isinstance(value_range, (tuple, list)): raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') if len(value_range) != 2: raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}') self.value_range = sorted(value_range) def transform_images(self, images, transformation=None, training=True): original_images = images images = self._transform_value_range(images, original_range=self.value_range, target_range=(0, 255), dtype=self.compute_dtype) images = self.backend.cast(images, self.compute_dtype) low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True) high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True) scale = 255.0 / (high - low) offset = -low * scale images = images * scale + offset results = self.backend.numpy.clip(images, 0.0, 255.0) results = self._transform_value_range(results, original_range=(0, 255), target_range=self.value_range, dtype=self.compute_dtype) results = self.backend.numpy.where(self.backend.numpy.isnan(results), original_images, results) if results.dtype == images.dtype: return results if backend.is_int_dtype(images.dtype): results = self.backend.numpy.round(results) return _saturate_cast(results, images.dtype, self.backend) def transform_labels(self, labels, transformation, training=True): return labels def transform_bounding_boxes(self, bounding_boxes, transformation, training=True): return bounding_boxes def transform_segmentation_masks(self, segmentation_masks, transformation, training=True): return segmentation_masks def get_config(self): config = super().get_config() config.update({'value_range': self.value_range}) return config def compute_output_shape(self, input_shape): return input_shape
Performs the auto-contrast operation on an image. Auto contrast stretches the values of an image across the entire available `value_range`. This makes differences between pixels more obvious. An example of this is if an image only has values `[0, 1]` out of the range `[0, 255]`, auto contrast will change the `1` values to be `255`. This layer is active at both training and inference time. Args: value_range: Range of values the incoming images will have. Represented as a two number tuple written `(low, high)`. This is typically either `(0, 1)` or `(0, 255)` depending on how your preprocessing pipeline is set up. Defaults to `(0, 255)`.
github-repos
def get_atomic_python_constant(variable: cfg.Variable, constant_type=None): atomic = get_atomic_value(variable) return atomic.ctx.convert.value_to_constant(atomic, constant_type)
Get the concrete atomic Python value stored in this variable. This is used for things that are stored in cfg.Variable, but we need the actual data in order to proceed. E.g. function / class definitions. Args: variable: A cfg.Variable. It can only have one possible value. constant_type: Optionally, the required type of the constant. Returns: A Python constant. (Typically, a string, a tuple, or a code object.) Raises: ConversionError: If the value in this Variable is purely abstract, i.e. doesn't store a Python value, or if it has more than one possible value.
github-repos
def _ParseOrMerge(self, lines, message): tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
juraj-google-style
def save(self, outfname): f = BZ2File(outfname, 'w') self.doc.writexml(f, addindent=' ', newl='\n') f.close()
Save the environment of a sv file to be used with soniv visualiser Args: outfname(str): full path to the file storing the environment
juraj-google-style
def wrap(tensor, books=None, tensor_shape=None): if (books is None): books = bookkeeper.for_default_graph() if isinstance(tensor, PrettyTensor): return tensor.as_layer() elif isinstance(tensor, UnboundVariable): def set_input_from_unbound_var(data): 'Sets the input from the given unbound_var.' if (data is not None): return wrap(data, books) else: return None return _DeferredLayer(books, set_input_from_unbound_var, [tensor], {}) else: tensor = tf.convert_to_tensor(tensor, name='input') if tensor_shape: _set_shape_on_tensor(tensor, tensor_shape) return Layer(books, tensor=tensor, name=tensor.name)
Creates an input layer representing the given tensor. Args: tensor: The tensor. books: The bookkeeper; this is usually not required unless you are building multiple `tf.Graphs.` tensor_shape: An optional shape that will be set on the Tensor or verified to match the tensor. Returns: A layer.
codesearchnet
def __init__( self, cert, urlbase='https: ): self.cert = cert self.urlbase = urlbase if not urlbase.endswith('/'): self.urlbase += '/' self._session = requests.Session() self._session.cert = cert self._session.timeout = self.TIMEOUT self._session.verify = True self._session.mount(urlbase, HTTPAdapter(max_retries=self.RETRIES)) log.debug("------------------------------------------------------") log.info("[PyLmod] init urlbase=%s", urlbase)
Initialize Base instance. Args: cert (unicode): File path to the certificate used to authenticate access to LMod Web service urlbase (str): The URL of the LMod Web service. i.e. ``learning-modules.mit.edu`` or ``learning-modules-test.mit.edu``
juraj-google-style
def obtain(self, dest): (url, rev_options) = self.get_url_rev_options(self.url) if (not os.path.exists(dest)): self.fetch_new(dest, url, rev_options) return rev_display = rev_options.to_display() if self.is_repository_directory(dest): existing_url = self.get_remote_url(dest) if self.compare_urls(existing_url, url): logger.debug('%s in %s exists, and has correct URL (%s)', self.repo_name.title(), display_path(dest), url) if (not self.is_commit_id_equal(dest, rev_options.rev)): logger.info('Updating %s %s%s', display_path(dest), self.repo_name, rev_display) self.update(dest, url, rev_options) else: logger.info('Skipping because already up-to-date.') return logger.warning('%s %s in %s exists with URL %s', self.name, self.repo_name, display_path(dest), existing_url) prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b')) else: logger.warning('Directory %s already exists, and is not a %s %s.', dest, self.name, self.repo_name) prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) logger.warning('The plan is to install the %s repository %s', self.name, url) response = ask_path_exists(('What to do? %s' % prompt[0]), prompt[1]) if (response == 'a'): sys.exit((- 1)) if (response == 'w'): logger.warning('Deleting %s', display_path(dest)) rmtree(dest) self.fetch_new(dest, url, rev_options) return if (response == 'b'): dest_dir = backup_dir(dest) logger.warning('Backing up %s to %s', display_path(dest), dest_dir) shutil.move(dest, dest_dir) self.fetch_new(dest, url, rev_options) return if (response == 's'): logger.info('Switching %s %s to %s%s', self.repo_name, display_path(dest), url, rev_display) self.switch(dest, url, rev_options)
Install or update in editable mode the package represented by this VersionControl object. Args: dest: the repository directory in which to install or update.
codesearchnet
def _GetFormatErrorLocation(self, yaml_definition, last_definition_object): name = yaml_definition.get('name', None) if name: error_location = 'in: {0:s}'.format((name or '<NAMELESS>')) elif last_definition_object: error_location = 'after: {0:s}'.format(last_definition_object.name) else: error_location = 'at start' return error_location
Retrieves a format error location. Args: yaml_definition (dict[str, object]): current YAML definition. last_definition_object (DataTypeDefinition): previous data type definition. Returns: str: format error location.
codesearchnet
def asarray(self, array_like, *, xnp: numpy_utils.NpModule, casting: Union[Casting, str]=Casting.ALL): casting = Casting(casting) from_dtype = numpy_utils.lazy.dtype_from_array(array_like, strict=False) to_dtype = self._get_target_dtype(from_dtype) if casting == casting.NONE: if to_dtype is None: pass elif from_dtype is None: pass elif from_dtype != to_dtype: raise ValueError(f'Cannot cast {from_dtype} to {to_dtype} (casting={casting}).') elif casting == casting.ALL: pass else: raise NotImplementedError(f'Unsupported casting {casting}') if to_dtype is None: dtype_kwargs = {} else: dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(to_dtype, xnp=xnp)} if isinstance(array_like, np.ndarray) and array_like.shape == (): if not dtype_kwargs: dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(array_like.dtype, xnp=xnp)} array_like = array_like.item() arr = xnp.asarray(array_like, **dtype_kwargs) return arr
Creates an `xnp.ndarray` from the `array_like`. Args: array_like: Any array-like xnp: Target numpy module casting: If `NONE`, prevent casting. Returns: array: The xnp array.
github-repos
def depth_july_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_july_average_ground_temperature`'.format(value)) self._depth_july_average_ground_temperature = value
Corresponds to IDD Field `depth_july_average_ground_temperature` Args: value (float): value for IDD Field `depth_july_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def LoadGDAL(filename, no_data=None): if (not GDAL_AVAILABLE): raise Exception('richdem.LoadGDAL() requires GDAL.') allowed_types = {gdal.GDT_Byte, gdal.GDT_Int16, gdal.GDT_Int32, gdal.GDT_UInt16, gdal.GDT_UInt32, gdal.GDT_Float32, gdal.GDT_Float64} src_ds = gdal.Open(filename) srcband = src_ds.GetRasterBand(1) if (no_data is None): no_data = srcband.GetNoDataValue() if (no_data is None): raise Exception('The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.') srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data) if (not (srcband.DataType in allowed_types)): raise Exception('This datatype is not supported. Please file a bug report on RichDEM.') srcdata.projection = src_ds.GetProjectionRef() srcdata.geotransform = src_ds.GetGeoTransform() srcdata.metadata = dict() for (k, v) in src_ds.GetMetadata().items(): srcdata.metadata[k] = v _AddAnalysis(srcdata, 'LoadGDAL(filename={0}, no_data={1})'.format(filename, no_data)) return srcdata
Read a GDAL file. Opens any file GDAL can read, selects the first raster band, and loads it and its metadata into a RichDEM array of the appropriate data type. If you need to do something more complicated, look at the source of this function. Args: filename (str): Name of the raster file to open no_data (float): Optionally, set the no_data value to this. Returns: A RichDEM array
codesearchnet
def is_full(cm, nodes1, nodes2): if ((not nodes1) or (not nodes2)): return True cm = cm[np.ix_(nodes1, nodes2)] return (cm.sum(0).all() and cm.sum(1).all())
Test connectivity of one set of nodes to another. Args: cm (``np.ndarrray``): The connectivity matrix nodes1 (tuple[int]): The nodes whose outputs to ``nodes2`` will be tested. nodes2 (tuple[int]): The nodes whose inputs from ``nodes1`` will be tested. Returns: bool: ``True`` if all elements in ``nodes1`` output to some element in ``nodes2`` and all elements in ``nodes2`` have an input from some element in ``nodes1``, or if either set of nodes is empty; ``False`` otherwise.
codesearchnet
def download_and_extract(uri, name, path): if (not os.path.exists(path)): os.makedirs(path) if (not os.listdir(path)): with tmpdir() as tmp: if uri.startswith('s3: dst = os.path.join(tmp, 'tar_file') s3_download(uri, dst) with tarfile.open(name=dst, mode='r:gz') as t: t.extractall(path=path) elif os.path.isdir(uri): if (uri == path): return if os.path.exists(path): shutil.rmtree(path) shutil.move(uri, path) else: shutil.copy2(uri, os.path.join(path, name))
Download, prepare and install a compressed tar file from S3 or local directory as an entry point. SageMaker Python SDK saves the user provided entry points as compressed tar files in S3 Args: name (str): name of the entry point. uri (str): the location of the entry point. path (bool): The path where the script will be installed. It will not download and install the if the path already has the user entry point.
codesearchnet
def Collect( self, knowledge_base, artifact_definition, searcher): for source in artifact_definition.sources: if source.type_indicator not in ( artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY, artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): continue if source.type_indicator == ( artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): key_value_pairs = [{'key': key} for key in source.keys] else: key_value_pairs = source.key_value_pairs for key_value_pair in key_value_pairs: key_path = key_value_pair['key'] key_path_upper = key_path.upper() if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'): key_path = '{0:s}{1:s}'.format( 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet', key_path[23:]) find_spec = registry_searcher.FindSpec(key_path_glob=key_path) for key_path in searcher.Find(find_specs=[find_spec]): try: registry_key = searcher.GetKeyByPath(key_path) except IOError as exception: raise errors.PreProcessFail(( 'Unable to retrieve Windows Registry key: {0:s} with error: ' '{1!s}').format(key_path, exception)) if registry_key: value_name = key_value_pair.get('value', None) self._ParseKey(knowledge_base, registry_key, value_name)
Collects values using a Windows Registry value artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to preprocess the Windows Registry. Raises: PreProcessFail: if the Windows Registry key or value cannot be read.
juraj-google-style
def join(self, basepath, *paths): if not basepath.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX): raise ValueError('Basepath %r must be an Azure Blob Storage path.' % basepath) path = basepath for p in paths: path = path.rstrip('/') + '/' + p.lstrip('/') return path
Join two or more pathname components for the filesystem Args: basepath: string path of the first component of the path paths: path components to be added Returns: full path after combining all the passed components
github-repos
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): exp_blocked_to_pad = jnp.concatenate([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], axis=2) band_mask = jnp.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask = jnp.expand_dims(band_mask, 1) return band_mask
Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size].
github-repos
def get_experiment_from_id(self, experiment_id): experiment = self.experiment_id_map.get(experiment_id) if experiment: return experiment self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id) self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR)) return None
Get experiment for the provided experiment ID. Args: experiment_id: Experiment ID for which experiment is to be determined. Returns: Experiment corresponding to the provided experiment ID.
juraj-google-style
def export_json_object(dict_obj, filename=None): try: if filename: try: with open(filename, 'w') as handle: handle.write(json.dumps(dict_obj, indent=4, sort_keys=True)) logger.info( '%s: Wrote %s to local filesystem location' % (inspect.stack()[0][3], filename)) handle.close() except TypeError as e: logger.warning( '%s: object in dict not serializable: %s' % (inspect.stack()[0][3], str(e))) else: json_str = json.dumps(dict_obj, indent=4, sort_keys=True) print(highlight(json_str, lexers.JsonLexer(), formatters.TerminalFormatter())) logger.info('%s: successful export to stdout' % inspect.stack()[0][3]) return True except IOError as e: logger.critical( '%s: export_file_object: error writing to %s to filesystem. Error: %s' % (inspect.stack()[0][3], filename, str(e))) return False else: logger.info('export_file_object: successful export to %s' % filename) return True
Summary: exports object to block filesystem object Args: :dict_obj (dict): dictionary object :filename (str): name of file to be exported (optional) Returns: True | False Boolean export status
juraj-google-style
def _append_expectation(self, expectation_config): expectation_type = expectation_config['expectation_type'] json.dumps(expectation_config) if ('column' in expectation_config['kwargs']): column = expectation_config['kwargs']['column'] self._expectations_config.expectations = [f for f in filter((lambda exp: ((exp['expectation_type'] != expectation_type) or (('column' in exp['kwargs']) and (exp['kwargs']['column'] != column)))), self._expectations_config.expectations)] else: self._expectations_config.expectations = [f for f in filter((lambda exp: (exp['expectation_type'] != expectation_type)), self._expectations_config.expectations)] self._expectations_config.expectations.append(expectation_config)
Appends an expectation to `DataAsset._expectations_config` and drops existing expectations of the same type. If `expectation_config` is a column expectation, this drops existing expectations that are specific to \ that column and only if it is the same expectation type as `expectation_config`. Otherwise, if it's not a \ column expectation, this drops existing expectations of the same type as `expectation config`. \ After expectations of the same type are dropped, `expectation_config` is appended to `DataAsset._expectations_config`. Args: expectation_config (json): \ The JSON-serializable expectation to be added to the DataAsset expectations in `_expectations_config`. Notes: May raise future errors once json-serializable tests are implemented to check for correct arg formatting
codesearchnet
def _testExportImportAcrossScopes(self, graph_fn, use_resource): with ops.Graph().as_default() as original_graph: with variable_scope.variable_scope('dropA/dropB/keepA'): graph_fn(use_resource=use_resource) exported_meta_graph_def = meta_graph.export_scoped_meta_graph(graph=original_graph, export_scope='dropA/dropB')[0] with ops.Graph().as_default() as imported_graph: meta_graph.import_scoped_meta_graph(exported_meta_graph_def, import_scope='importA') with ops.Graph().as_default() as expected_graph: with variable_scope.variable_scope('importA/keepA'): graph_fn(use_resource=use_resource) result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0] expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0] if use_resource: for meta_graph_def in [result, expected]: for node in meta_graph_def.graph_def.node: for attr_to_remove in ['shared_name', 'debug_name']: attr_value = node.attr.get(attr_to_remove, None) if attr_value and attr_value.HasField('s'): if attr_value.s: node.attr[attr_to_remove].s = b'' test_util.assert_meta_graph_protos_equal(self, expected, result)
Tests export and importing a graph across scopes. Args: graph_fn: A closure that creates a graph on the current scope. use_resource: A bool indicating whether or not to use ResourceVariables.
github-repos
def log_variables(variables=None): if (variables is None): variables = (tf.global_variables() + tf.local_variables()) for row in format_variables(variables, join_lines=False): tf.logging.info(row)
Logs variable information. This function logs the name, shape, type, collections, and device for either all variables or a given iterable of variables. In the "Device" columns, the nature of the variable (legacy or resource (for ResourceVariables)) is also specified in parenthesis. Args: variables: iterable of variables; if not provided, then all variables (in the default graph) are logged.
codesearchnet
def mean_min_time_distance(item_a, item_b, max_value): times_a = item_a.times.reshape((item_a.times.size, 1)) times_b = item_b.times.reshape((1, item_b.times.size)) distance_matrix = (times_a - times_b) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
Calculate the mean time difference among the time steps in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
juraj-google-style
def reset_logformat_timestamped(logger: logging.Logger, extraname: str='', level: int=logging.INFO) -> None: namebit = ((extraname + ':') if extraname else '') fmt = (('%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:' + namebit) + '%(message)s') reset_logformat(logger, fmt=fmt) logger.setLevel(level)
Apply a simple time-stamped log format to an existing logger, and set its loglevel to either ``logging.DEBUG`` or ``logging.INFO``. Args: logger: logger to modify extraname: additional name to append to the logger's name level: log level to set
codesearchnet
def GetFrequencyStartTimes(self): start_times = [] for freq_tuple in self.GetFrequencyTuples(): (start_secs, end_secs, headway_secs) = freq_tuple[0:3] run_secs = start_secs while (run_secs < end_secs): start_times.append(run_secs) run_secs += headway_secs return start_times
Return a list of start time for each headway-based run. Returns: a sorted list of seconds since midnight, the start time of each run. If this trip doesn't have headways returns an empty list.
codesearchnet
def adafactor_decay_rate_adam(beta2): t = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + 1.0 decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t)) return decay
Second-moment decay rate like Adam, subsuming the correction factor. Args: beta2: a float between 0 and 1 Returns: a scalar
juraj-google-style
def unroll_state_saver(input_layer, name, state_shapes, template, lengths=None): state_saver = input_layer.bookkeeper.recurrent_state state_names = [((STATE_NAME % name) + ('_%d' % i)) for i in xrange(len(state_shapes))] if hasattr(state_saver, 'add_state'): for (state_name, state_shape) in zip(state_names, state_shapes): initial_state = tf.zeros(state_shape[1:], dtype=input_layer.dtype) state_saver.add_state(state_name, initial_state=initial_state, batch_size=state_shape[0]) if (lengths is not None): max_length = tf.reduce_max(lengths) else: max_length = None results = [] prev_states = [] for (state_name, state_shape) in zip(state_names, state_shapes): my_shape = list(state_shape) my_shape[0] = (- 1) prev_states.append(tf.reshape(state_saver.state(state_name), my_shape)) my_parameters = None for (i, layer) in enumerate(input_layer.sequence): with input_layer.g.name_scope(('unroll_%00d' % i)): if ((i > 0) and (max_length is not None)): result = control_flow_ops.cond((i < max_length), (lambda : unwrap_all(*template(layer, *prev_states).flatten())), (lambda : unwrap_all(out, *prev_states))) out = result[0] prev_states = result[1:] else: (out, prev_states) = template(layer, *prev_states) if (my_parameters is None): my_parameters = out.layer_parameters results.append(prettytensor.unwrap(out)) updates = [state_saver.save_state(state_name, prettytensor.unwrap(prev_state)) for (state_name, prev_state) in zip(state_names, prev_states)] with tf.control_dependencies(updates): results[0] = tf.identity(results[0]) return input_layer.with_sequence(results, parameters=my_parameters)
Unrolls the given function with state taken from the state saver. Args: input_layer: The input sequence. name: The name of this layer. state_shapes: A list of shapes, one for each state variable. template: A template with unbound variables for input and states that returns a RecurrentResult. lengths: The length of each item in the batch. If provided, use this to truncate computation. Returns: A sequence from applying the given template to each item in the input sequence.
codesearchnet
def to_value(original_string, corenlp_value=None): if isinstance(original_string, Value): return original_string if not corenlp_value: corenlp_value = original_string amount = NumberValue.parse(corenlp_value) if amount is not None: return NumberValue(amount, original_string) ymd = DateValue.parse(corenlp_value) if ymd is not None: if ymd[1] == ymd[2] == -1: return NumberValue(ymd[0], original_string) else: return DateValue(ymd[0], ymd[1], ymd[2], original_string) return StringValue(original_string)
Convert the string to Value object. Args: original_string (basestring): Original string corenlp_value (basestring): Optional value returned from CoreNLP Returns: Value
juraj-google-style
def precipitable_water(self, value=999.0): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `precipitable_water`'.format(value)) self._precipitable_water = value
Corresponds to IDD Field `precipitable_water` Args: value (float): value for IDD Field `precipitable_water` Unit: mm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def read_bytes(self, length) -> bytes: value = self.stream.read(length) return value
Read the specified number of bytes from the stream. Args: length (int): number of bytes to read. Returns: bytes: `length` number of bytes.
codesearchnet
def hash_file(path, block_size=65536): sha256 = hashlib.sha256() with open(path, 'rb') as f: for block in iter((lambda : f.read(block_size)), b''): sha256.update(block) return sha256.hexdigest()
Returns SHA256 checksum of a file Args: path (string): Absolute file path of file to hash block_size (int, optional): Number of bytes to read per block
codesearchnet
def search_by_user(self, screen_name, count=100): results = self._api.user_timeline(screen_name=screen_name, count=count) return results
Search tweets by user. Args: screen_name: screen name count: the number of tweets Returns: list: tweet list
juraj-google-style
def RegisterCredentials(cls, credentials): if credentials.type_indicator in cls._credentials: raise KeyError( 'Credentials object already set for type indicator: {0:s}.'.format( credentials.type_indicator)) cls._credentials[credentials.type_indicator] = credentials
Registers a path specification credentials. Args: credentials (Credentials): credentials. Raises: KeyError: if credentials object is already set for the corresponding type indicator.
juraj-google-style
def build_one_definition_example(self, def_name): if def_name in self.definitions_example.keys(): return True elif def_name not in self.specification['definitions'].keys(): return False self.definitions_example[def_name] = {} def_spec = self.specification['definitions'][def_name] if def_spec.get('type') == 'array' and 'items' in def_spec: item = self.get_example_from_prop_spec(def_spec['items']) self.definitions_example[def_name] = [item] return True if 'properties' not in def_spec: self.definitions_example[def_name] = self.get_example_from_prop_spec(def_spec) return True for prop_name, prop_spec in def_spec['properties'].items(): example = self.get_example_from_prop_spec(prop_spec) if example is None: return False self.definitions_example[def_name][prop_name] = example return True
Build the example for the given definition. Args: def_name: Name of the definition. Returns: True if the example has been created, False if an error occured.
juraj-google-style
class AnomalyDetector(abc.ABC): def __init__(self, model_id: Optional[str]=None, features: Optional[Iterable[str]]=None, target: Optional[str]=None, threshold_criterion: Optional[ThresholdFn]=None, **kwargs): self._model_id = model_id if model_id is not None else getattr(self, 'spec_type', lambda: 'unknown')() self._features = features self._target = target self._threshold_criterion = threshold_criterion @abc.abstractmethod def learn_one(self, x: beam.Row) -> None: raise NotImplementedError @abc.abstractmethod def score_one(self, x: beam.Row) -> Optional[float]: raise NotImplementedError
An abstract base class for anomaly detectors. Args: model_id: The ID of detector (model). Defaults to the value of the `spec_type` attribute, or 'unknown' if not set. features: An Iterable of strings representing the names of the input features in the `beam.Row` target: The name of the target field in the `beam.Row`. threshold_criterion: An optional `ThresholdFn` to apply to the outlier score and yield a label.
github-repos
def make_grid_texture(num_h_lines=10, num_v_lines=10, resolution=50): x_h, y_h = make_lines_texture(num_h_lines, resolution) y_v, x_v = make_lines_texture(num_v_lines, resolution) return np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v])
Makes a texture consisting of a grid of vertical and horizontal lines. Args: num_h_lines (int): the number of horizontal lines to draw num_v_lines (int): the number of vertical lines to draw resolution (int): the number of midpoints to draw on each line Returns: A texture.
juraj-google-style
def RemoveTransaction(self, tx): if BC.Default() is None: return False if not BC.Default().ContainsTransaction(tx.Hash): return False if tx.Hash.ToBytes() in self.MemPool: del self.MemPool[tx.Hash.ToBytes()] return True return False
Remove a transaction from the memory pool if it is found on the blockchain. Args: tx (neo.Core.TX.Transaction): instance. Returns: bool: True if successfully removed. False otherwise.
juraj-google-style
def _cumprod(l): ret = [1] for item in l: ret.append((ret[(- 1)] * item)) return ret
Cumulative product of a list. Args: l: a list of integers Returns: a list with one more element (starting with 1)
codesearchnet
def __init__(self, filename, damethod, date, ensize): self.filename = filename self.damethod = damethod self.date = date self.ensize = ensize self.dafile = h5py.File(self.filename, "a") self.dafile.attrs['damethod'] = self.damethod self.dafile.attrs['date'] = self.date self.dafile.attrs['ensize'] = self.ensize self.dafile.create_group("Observation") self.dafile.create_group("Parameter") self.dafile.create_group("State") self.dafile.create_group("StateObservation") self.dafile.create_group("Simulation") self.dafile.create_group("Inflation")
Initialize darun attributes Args: filename (str): Absolute path of file name as a string with `hdf5` extension damethod (str): Name of the assimilation method used, i.e. `enkf`. date (str): Date of the experiment `MM-DD-YYYY:HHHH` ensize (int): ensemble size
juraj-google-style
def _GetUserTypeAndPassword(username, password=None, is_admin=False): if is_admin: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN else: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD if password is None: password = getpass.getpass(prompt="Please enter password for user '%s':" % username) return user_type, password
Returns the user-type and password for a user. Args: username: Username for the user. password: Password for the user. If None, or not provided, we will prompt for one via the terminal. is_admin: Indicates whether the user should have admin privileges.
juraj-google-style
def __init__(self, configs): self.tests = [] class_identifier = self.__class__.__name__ if configs.test_class_name_suffix: class_identifier = '%s_%s' % (class_identifier, configs.test_class_name_suffix) if self.TAG is None: self.TAG = class_identifier self.root_output_path = configs.log_path self.log_path = os.path.join(self.root_output_path, class_identifier) utils.create_dir(self.log_path) self.test_bed_name = configs.test_bed_name self.testbed_name = configs.testbed_name self.user_params = configs.user_params self.results = records.TestResult() self.summary_writer = configs.summary_writer self._generated_test_table = collections.OrderedDict() self._controller_manager = controller_manager.ControllerManager(class_name=self.TAG, controller_configs=configs.controller_configs) self.controller_configs = self._controller_manager.controller_configs
Constructor of BaseTestClass. The constructor takes a config_parser.TestRunConfig object and which has all the information needed to execute this test class, like log_path and controller configurations. For details, see the definition of class config_parser.TestRunConfig. Args: configs: A config_parser.TestRunConfig object.
github-repos
def read_lines(self, max_lines=None): if max_lines is None: return self.read_stream().split('\n') max_to_read = self.metadata.size bytes_to_read = min(100 * max_lines, self.metadata.size) while True: content = self.read_stream(byte_count=bytes_to_read) lines = content.split('\n') if len(lines) > max_lines or bytes_to_read >= max_to_read: break bytes_to_read = min(bytes_to_read * 10, max_to_read) del lines[-1] return lines[0:max_lines]
Reads the content of this object as text, and return a list of lines up to some max. Args: max_lines: max number of lines to return. If None, return all lines. Returns: The text content of the object as a list of lines. Raises: Exception if there was an error requesting the object's content.
juraj-google-style
def edit_miz( infile: str, outfile: str = None, metar: typing.Union[str, Metar] = None, time: str = None, min_wind: int = 0, max_wind: int = 40 ) -> str: if outfile is None: LOGGER.debug('editing in place: %s', infile) outfile = infile else: LOGGER.debug('editing miz file: %s -> %s', infile, outfile) mission_weather = mission_time = None if metar: error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar) if error: return error mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind) if time: try: mission_time = MissionTime.from_string(time) except ValueError: return f'badly formatted time string: {time}' if not mission_weather and not mission_time: return 'nothing to do!' with Miz(infile) as miz: if mission_weather: LOGGER.debug('applying MissionWeather') if not mission_weather.apply_to_miz(miz): return 'error while applying METAR to mission' if mission_time: LOGGER.debug('applying MissionTime') if not mission_time.apply_to_miz(miz): return 'error while setting time on mission' try: miz.zip(outfile) return '' except OSError: return f'permission error: cannot edit "{outfile}"; maybe it is in use ?'
Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error
juraj-google-style
class FlaubertPoolerEndLogits(nn.Module): def __init__(self, config: FlaubertConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None' if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) start_states = hidden_states.gather(-2, start_positions) start_states = start_states.expand(-1, slen, -1) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if p_mask.dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e+30 * p_mask return x
Compute SQuAD end logits from sequence hidden states. Args: config ([`FlaubertConfig`]): The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps` to use.
github-repos
def get_installed_version(vcs): version_path = _get_version_path(vcs) if not os.path.exists(version_path): raise VersionNotInstalledError with open(version_path, 'r') as f: return f.read().strip()
Get the installed version for this project. Args: vcs (easyci.vcs.base.Vcs) Returns: str - version number Raises: VersionNotInstalledError
juraj-google-style
def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): keep_by_nms = tf.image.combined_non_max_suppression(boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return (masks, iou_scores, rle_masks, mask_boxes)
Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`tf.Tensor`): binary masks in the RLE format iou_scores (`tf.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`tf.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold.
github-repos
def validate(self, corpus): invalid_utterances = {} for utterance in corpus.utterances.values(): duration = utterance.duration ll = utterance.label_lists[self.label_list_idx] transcription = ' '.join([l.value for l in ll]) num_chars = len(transcription.replace(' ', '')) char_per_sec = num_chars / duration if char_per_sec > self.max_characters_per_second: invalid_utterances[utterance.idx] = char_per_sec passed = len(invalid_utterances) <= 0 info = { 'Threshold max. characters per second': str(self.max_characters_per_second), 'Label-List ID': self.label_list_idx } return base.InvalidUtterancesResult(passed, invalid_utterances, name=self.name(), info=info)
Perform the validation on the given corpus. Args: corpus (Corpus): The corpus to test/validate. Returns: InvalidUtterancesResult: Validation result.
juraj-google-style
def get_events_for_block_ids(self, block_ids, subscriptions): blocks = [self._block_store[block_id] for block_id in block_ids] return self.get_events_for_blocks(blocks, subscriptions)
Get a list of events associated with all the block ids. Args: block_ids (list of str): The block ids to search for events that match each subscription. subscriptions (list of EventSubscriptions): EventFilter and event type to filter events. Returns (list of Events): The Events associated which each block id. Raises: KeyError A block id isn't found within the block store or a transaction is missing from the receipt store.
codesearchnet
def add_evolved_transformer_hparams(hparams): hparams.num_encoder_layers = 3 hparams.num_decoder_layers = 4 hparams.learning_rate_constant /= (hparams.learning_rate_warmup_steps ** 0.5) hparams.learning_rate_schedule = 'constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size' hparams.learning_rate_decay_steps = 250000 return hparams
Add Evolved Transformer hparams. Note: These are for the Adam optimizer, not the Adafactor optimizer used in the paper. Args: hparams: Current hparams. Returns: hparams updated with Evolved Transformer values.
codesearchnet
def device(device_name_or_function) -> ContextManager[None]: if context.executing_eagerly(): if callable(device_name_or_function): raise RuntimeError('tf.device does not support functions when eager execution is enabled.') return context.device(device_name_or_function) elif executing_eagerly_outside_functions(): @tf_contextlib.contextmanager def combined(device_name_or_function): with get_default_graph().device(device_name_or_function): if not callable(device_name_or_function): with context.device(device_name_or_function): yield else: yield return combined(device_name_or_function) else: return get_default_graph().device(device_name_or_function)
Wrapper for `Graph.device()` using the default graph. See `tf.Graph.device` for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in.
github-repos
def _parse_parameters(val_type, val): if val_type == "logical": return val == "T" elif val_type == "int": return int(val) elif val_type == "string": return val.strip() else: return float(val)
Helper function to convert a Vasprun parameter into the proper type. Boolean, int and float types are converted. Args: val_type: Value type parsed from vasprun.xml. val: Actual string value parsed for vasprun.xml.
juraj-google-style
def get_resource_group(access_token, subscription_id, rgname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
Get details about the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body.
codesearchnet
def end_parallel(self): outport = self.oport if isinstance(self.oport.operator, streamsx.topology.graph.Marker): if (self.oport.operator.kind == '$Union$'): pto = self.topology.graph.addPassThruOperator() pto.addInputPort(outputPort=self.oport) outport = pto.addOutputPort(schema=self.oport.schema) op = self.topology.graph.addOperator('$EndParallel$') op.addInputPort(outputPort=outport) oport = op.addOutputPort(schema=self.oport.schema) endP = Stream(self.topology, oport) return endP
Ends a parallel region by merging the channels into a single stream. Returns: Stream: Stream for which subsequent transformations are no longer parallelized. .. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel`
codesearchnet
def unindent(lines): unindented_lines = [] for line in lines: unindented_line = line.lstrip() indent = (len(line) - len(unindented_line)) unindented_lines.append((indent, unindented_line)) return unindented_lines
Convert an iterable of indented lines into a sequence of tuples. The first element of each tuple is the indent in number of characters, and the second element is the unindented string. Args: lines: A sequence of strings representing the lines of text in a docstring. Returns: A list of tuples where each tuple corresponds to one line of the input list. Each tuple has two entries - the first is an integer giving the size of the indent in characters, the second is the unindented text.
codesearchnet
def _set_c_attrs(self, attrs): for name, attr_value in attrs.items(): serialized = attr_value.SerializeToString() with self._c_func.get() as func: c_api.TF_FunctionSetAttrValueProto(func, compat.as_str(name), serialized)
Sets `attrs` as attributes of self._c_func. Requires that self._c_func is not None. Args: attrs: a dictionary from attribute name to attribute proto value
github-repos
def body(self, body): if isinstance(body, bytes): body = body.decode('utf-8') self._body = body
Defines response body data. Arguments: body (str|bytes): response body to use. Returns: self: ``pook.Response`` current instance.
codesearchnet
def add_rec_new(self, k, val): self.rec_new(val) self[k] = val return val
Recursively add a new value and its children to me, and assign a variable to it. Args: k (str): The name of the variable to assign. val (LispVal): The value to be added and assigned. Returns: LispVal: The added value.
juraj-google-style
def add_subscriber(self, connection_id, subscriptions, last_known_block_id): with self._subscribers_cv: self._subscribers[connection_id] = EventSubscriber(connection_id, subscriptions, last_known_block_id) LOGGER.debug('Added Subscriber %s for %s', connection_id, subscriptions)
Register the subscriber for the given event subscriptions. Raises: InvalidFilterError One of the filters in the subscriptions is invalid.
codesearchnet
def download_file(self, url): response = requests.get(url, stream=True) response.raise_for_status() return (int(response.headers.get('content-length', 0)), response)
Initiate a streaming download Args: url (str): The url to download Returns: A tuple of the content length and the streaming response
codesearchnet
def _resolve_if_choice_type(fhir_message: message.Message) -> Optional[message.Message]: if annotation_utils.is_choice_type(fhir_message): choice_field = fhir_message.WhichOneof('choice') if choice_field is None: return None return cast(message.Message, proto_utils.get_value_at_field(fhir_message, choice_field)) return fhir_message
Resolve to the proper field if given a choice type, return as-is if not. Each value in a FHIR choice type is a different field on the protobuf representation wrapped under a proto onoeof field. Therefore, if an expression points to a choice type, we should return the populated field -- while just returning the field as-is for non-choice types. This way we can simply pass nested messages through this class, and return the populated item when appropriate. Args: fhir_message: the evaluation result which may or may not be a choice type Returns: The result value, resolved to the sub-field if it is a choice type.
github-repos
def sget_voltage(self, cycle, step, set_number=None): time_00 = time.time() set_number = self._validate_dataset_number(set_number) if (set_number is None): self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt voltage_header = self.headers_normal.voltage_txt step_index_header = self.headers_normal.step_index_txt test = self.datasets[set_number].dfdata if isinstance(step, (list, tuple)): warnings.warn(f'The varialbe step is a list.Should be an integer.{step}') step = step[0] c = test[((test[cycle_index_header] == cycle) & (test[step_index_header] == step))] self.logger.debug(f'(dt: {(time.time() - time_00):4.2f}s)') if (not self.is_empty(c)): v = c[voltage_header] return v else: return None
Returns voltage for cycle, step. Convinience function; same as issuing dfdata[(dfdata[cycle_index_header] == cycle) & (dfdata[step_index_header] == step)][voltage_header] Args: cycle: cycle number step: step number set_number: the dataset number (automatic selection if None) Returns: pandas.Series or None if empty
codesearchnet
def _evalDecodeJpeg(self, image_name, parallelism, num_iters, crop_during_decode=None, crop_window=None, tile=None): ops.reset_default_graph() image_file_path = resource_loader.get_path_to_datafile(os.path.join('core', 'lib', 'jpeg', 'testdata', image_name)) if not os.path.exists(image_file_path): image_file_path = resource_loader.get_path_to_datafile(os.path.join('..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name)) if tile is None: image_content = variable_scope.get_variable('image_%s' % image_name, initializer=io_ops.read_file(image_file_path)) else: single_image = image_ops.decode_jpeg(io_ops.read_file(image_file_path), channels=3, name='single_image') tiled_image = array_ops.tile(single_image, tile) image_content = variable_scope.get_variable('tiled_image_%s' % image_name, initializer=image_ops.encode_jpeg(tiled_image)) with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) images = [] for _ in range(parallelism): if crop_window is None: image = image_ops.decode_jpeg(image_content, channels=3) elif crop_during_decode: image = image_ops.decode_and_crop_jpeg(image_content, crop_window, channels=3) else: image = image_ops.decode_jpeg(image_content, channels=3) image = image_ops.crop_to_bounding_box(image, offset_height=crop_window[0], offset_width=crop_window[1], target_height=crop_window[2], target_width=crop_window[3]) images.append(image) r = control_flow_ops.group(*images) for _ in range(3): self.evaluate(r) start_time = time.time() for _ in range(num_iters): self.evaluate(r) end_time = time.time() return end_time - start_time
Evaluate DecodeJpegOp for the given image. TODO(tanmingxing): add decoding+cropping as well. Args: image_name: a string of image file name (without suffix). parallelism: the number of concurrent decode_jpeg ops to be run. num_iters: number of iterations for evaluation. crop_during_decode: If true, use fused DecodeAndCropJpeg instead of separate decode and crop ops. It is ignored if crop_window is None. crop_window: if not None, crop the decoded image. Depending on crop_during_decode, cropping could happen during or after decoding. tile: if not None, tile the image to composite a larger fake image. Returns: The duration of the run in seconds.
github-repos
def write_to(self, content, content_type): try: self._api.object_upload(self._bucket, self._key, content, content_type) except Exception as e: raise e
Writes text content to this item. Args: content: the text content to be written. content_type: the type of text content. Raises: Exception if there was an error requesting the item's content.
codesearchnet
def stage_signature(vcs, signature): evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature in staged: raise AlreadyStagedError staged.append(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
Add `signature` to the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: AlreadyStagedError
juraj-google-style
def _to_reader_home(self): self.switch_to_default_content() self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL) if (self.title == u'Problem loading page'): raise ConnectionError login_or_reader_loaded = (lambda br: (br.find_elements_by_id('amzn_kcr') or br.find_elements_by_id('KindleLibraryIFrame'))) self._wait(5).until(login_or_reader_loaded) try: self._wait(5).until((lambda br: (br.title == u'Amazon.com Sign In'))) except TimeoutException: raise BrowserError('Failed to load Kindle Cloud Reader.') else: self._login()
Navigate to the Cloud Reader library page. Raises: BrowserError: If the KCR homepage could not be loaded. ConnectionError: If there was a connection error.
codesearchnet
def run(self, env: env_tools.PreparedEnv, verbose: bool, previous_failures: Set['Check']) -> CheckResult: if previous_failures.intersection(self.dependencies): print(shell_tools.highlight( 'Skipped ' + self.command_line_switch(), shell_tools.YELLOW)) return CheckResult( self, False, 'Skipped due to dependency failing.', None) print(shell_tools.highlight( 'Running ' + self.command_line_switch(), shell_tools.GREEN)) try: success, message = self.perform_check(env, verbose=verbose) result = CheckResult(self, success, message, None) except Exception as ex: result = CheckResult(self, False, 'Unexpected error.', ex) print(shell_tools.highlight( 'Finished ' + self.command_line_switch(), shell_tools.GREEN if result.success else shell_tools.RED)) if verbose: print(result) return result
Evaluates this check. Args: env: The prepared python environment to run the check in. verbose: When set, more progress output is produced. previous_failures: Checks that have already run and failed. Returns: A CheckResult instance.
juraj-google-style
def recipe_cm360_to_dv360(config, auth_dv, auth_cm, auth_sheet, auth_bigquery, recipe_name, recipe_slug, command): dataset(config, {'__comment__': 'Ensure dataset exists.', 'auth': auth_bigquery, 'dataset': recipe_slug}) drive(config, {'__comment__': 'Copy the default template to sheet with the recipe name', 'auth': auth_sheet, 'copy': {'source': 'https: cm_to_dv(config, {'__comment': 'Depending on users choice, execute a different part of the solution.', 'auth_dv': auth_dv, 'auth_cm': auth_cm, 'auth_sheets': auth_sheet, 'auth_bigquery': auth_bigquery, 'sheet': recipe_name, 'dataset': recipe_slug, 'command': command})
Allows bulk creating DV360 Insertion Orders and Line Items from CM360. Args: auth_dv (authentication) - Credentials used for dv. auth_cm (authentication) - Credentials used for dv. auth_sheet (authentication) - Credentials used for sheet. auth_bigquery (authentication) - Credentials used for bigquery. recipe_name (string) - Name of Google Sheet to create. recipe_slug (string) - Name of Google BigQuery dataset to create. command (choice) - Action to take.
github-repos
def plot_script_validate(self, script): script.plot_validate([self.matplotlibwidget_1.figure, self.matplotlibwidget_2.figure]) self.matplotlibwidget_1.draw() self.matplotlibwidget_2.draw()
checks the plottype of the script and plots it accordingly Args: script: script to be plotted
juraj-google-style
def request(self, method_name: str, *args: Any, trim_log_values: bool=False, validate_against_schema: bool=True, id_generator: Optional[Iterator]=None, **kwargs: Any) -> Response: return self.send(Request(method_name, *args, id_generator=id_generator, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema)
Send a request by passing the method and arguments. >>> client.request("cat", name="Yoko") <Response[1] Args: method_name: The remote procedure's method name. args: Positional arguments passed to the remote procedure. kwargs: Keyword arguments passed to the remote procedure. trim_log_values: Abbreviate the log entries of requests and responses. validate_against_schema: Validate response against the JSON-RPC schema. id_generator: Iterable of values to use as the "id" part of the request.
codesearchnet