code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0, mode='train', **kwargs): del params rng = kwargs.get('rng', None) (q, k, v), mask = x feature_depth = q.shape[-1] assert feature_depth % num_heads == 0 head_depth = feature_depth nbatch = np.shape(q)[0] def SplitHeads(x): return np.transpose( np.reshape(x, (nbatch, -1, num_heads, head_depth)), (0, 2, 1, 3)) def JoinHeads(x): return np.reshape( np.transpose(x, (0, 2, 1, 3)), (nbatch, -1, num_heads*head_depth)) return JoinHeads( DotProductAttention( SplitHeads(q), SplitHeads(k), SplitHeads(v), mask, dropout=dropout, mode=mode, rng=rng))
Pure transformer-style multi-headed attention. Args: x: inputs ((q, k, v), mask) params: parameters (none) num_heads: int: number of attention heads dropout: float: dropout rate mode: str: 'train' or 'eval' **kwargs: other arguments including the rng Returns: Pure Multi-headed attention layer (no Dense transforms on input).
juraj-google-style
def return_item_count_on_subpage(self, subpage=1, total_items=1): up_to_subpage = ((subpage - 1) * self.subpage_items) if total_items > up_to_subpage: count = total_items - up_to_subpage else: count = total_items if count >= self.subpage_items: return self.subpage_items else: return count
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
juraj-google-style
def _tokens_to_subtoken(self, tokens): ret = [] for token in tokens: ret.extend(self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet))) return ret
Converts a list of tokens to a list of subtoken. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)
codesearchnet
def get_file_metadata(root): properties = {} file_author = getattr(root.find('fileAuthor'), 'text', False) if not file_author: raise MissingElementError('fileAuthor') else: properties['file-authors'] = [{'name': file_author}] properties['file-version'] = 0 properties['chemked-version'] = __version__ return properties
Read and parse ReSpecTh XML file metadata (file author, version, etc.) Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with file metadata
juraj-google-style
def _retrieve_info(self, http): if self.invalid: info = _metadata.get_service_account_info( http, service_account=self.service_account_email or 'default') self.invalid = False self.service_account_email = info['email'] self.scopes = info['scopes']
Retrieves service account info for invalid credentials. Args: http: an object to be used to make HTTP requests.
juraj-google-style
def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast': return cls(config, task=task, use_past=True)
Instantiate a OnnxConfig with `use_past` attribute set to True Args: config: The underlying model's config to use when exporting to ONNX Returns: OnnxConfig with `.use_past = True`
github-repos
def print_fhir_to_json_string(fhir_proto: message.Message) -> str: printer = _json_printer.JsonPrinter.compact_printer(_PRIMITIVE_HANDLER) return printer.print(fhir_proto)
Returns a FHIR JSON representation with no spaces or newlines. Args: fhir_proto: The proto to serialize into a JSON string. Returns: A FHIR JSON representation with no spaces or newlines.
github-repos
def _cmd(self, command, uid=None): if not uid: uid = self.uid self._client_send(json.dumps({'cmd': command, 'uid': uid})) return self._client_receive()
Send a command to the server. Args: command: str, The name of the command to execute. uid: int, the uid of the session to send the command to. Returns: The line that was written back.
github-repos
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = IMessageEventData() event_data.attachment_location = self._GetRowValue( query_hash, row, 'attachment_location') event_data.imessage_id = self._GetRowValue(query_hash, row, 'imessage_id') event_data.message_type = self._GetRowValue(query_hash, row, 'message_type') event_data.offset = self._GetRowValue(query_hash, row, 'ROWID') event_data.query = query event_data.read_receipt = self._GetRowValue(query_hash, row, 'read_receipt') event_data.service = self._GetRowValue(query_hash, row, 'service') event_data.text = self._GetRowValue(query_hash, row, 'text') timestamp = self._GetRowValue(query_hash, row, 'date') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a message row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def topological_sort(g): def _is_loop_edge(op): return op.type in ['NextIteration'] def _in_op_degree(op): count = 0 for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]: if not _is_loop_edge(op): count += 1 return count sorted_ops = [] op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()} frontier = [op for op, degree in op_in_degree.items() if degree == 0] frontier.sort(key=lambda op: op.name) while frontier: op = frontier.pop() sorted_ops.append(op) if _is_loop_edge(op): continue consumers = list(op._control_outputs) for out_tensor in op.outputs: consumers += [consumer_op for consumer_op in out_tensor.consumers()] consumers.sort(key=lambda op: op.name) for consumer in consumers: op_in_degree[consumer] -= 1 if op_in_degree[consumer] == 0: frontier.append(consumer) if op_in_degree[consumer] < 0: raise ValueError('consumer:%s degree mismatch' % consumer.name) left_ops = set((op for op, degree in op_in_degree.items() if degree > 0)) if left_ops: return (True, left_ops) else: assert len(g.get_operations()) == len(sorted_ops) return (False, sorted_ops)
Performs topological sort on the given graph. Args: g: the graph. Returns: A pair where the first element indicates if the topological sort succeeded (True if there is no cycle found; False if a cycle is found) and the second element is either the sorted list of nodes or the cycle of nodes found.
github-repos
def frozen(cls: _Cls) -> _Cls: if not isinstance(cls, type): raise TypeError(f'{cls.__name__} is not') cls.__init__ = _wrap_init(cls.__init__) cls.__setattr__ = _wrap_setattr(cls.__setattr__) return cls
Class decorator which prevent mutating attributes after `__init__`. Example: ```python @epy.frozen class A: def __init__(self): self.x = 123 a = A() a.x = 456 # AttributeError ``` Supports inheritance, child classes should explicitly be marked as `@epy.frozen` if they mutate additional attributes in `__init__`. Args: cls: The class to freeze. Returns: cls: The class object
github-repos
def _TrimNode(node, index, depth, flags): if depth == 1 or node.LeftChild is None: return if depth == 2: if not flags[index * 2] and not flags[index * 2 + 1]: node.LeftChild = None node.RightChild = None else: MerkleTree._TrimNode(node.LeftChild, index * 2, depth - 1, flags) MerkleTree._TrimNode(node.RightChild, index * 2, depth - 1, flags) if node.LeftChild.LeftChild is None and node.RightChild.RightChild is None: node.LeftChild = None node.RightChild = None
Internal helper method to trim a node. Args: node (MerkleTreeNode): index (int): flag index. depth (int): node tree depth to start trim from. flags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node. 00 to erase, 11 to keep. Will keep the node if either left or right is not-0
juraj-google-style
def build(self, query_shape, value_shape, key_shape=None): key_shape = value_shape if key_shape is None else key_shape if value_shape[1:-1] != key_shape[1:-1]: raise ValueError(f'All dimensions of `value` and `key`, except the last one, must be equal. Received: value_shape={value_shape} and key_shape={key_shape}') query_rank = len(query_shape) value_rank = len(value_shape) key_rank = len(key_shape) einsum_equation, bias_axes, output_rank = _build_proj_equation(query_rank - 1, bound_dims=1, output_dims=2) self._query_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='query', **self._get_common_kwargs_for_sublayer()) self._query_dense.build(query_shape) einsum_equation, bias_axes, output_rank = _build_proj_equation(key_rank - 1, bound_dims=1, output_dims=2) self._key_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='key', **self._get_common_kwargs_for_sublayer()) self._key_dense.build(key_shape) einsum_equation, bias_axes, output_rank = _build_proj_equation(value_rank - 1, bound_dims=1, output_dims=2) self._value_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._value_dim]), bias_axes=bias_axes if self._use_bias else None, name='value', **self._get_common_kwargs_for_sublayer()) self._value_dense.build(value_shape) self._build_attention(output_rank) self._output_dense = self._make_output_dense(query_shape, self._get_common_kwargs_for_sublayer(), 'attention_output') output_dense_input_shape = list(self._query_dense.compute_output_shape(query_shape)) output_dense_input_shape[-1] = self._value_dim self._output_dense.build(tuple(output_dense_input_shape))
Builds layers and variables. Args: query_shape: Shape of the `query` tensor. value_shape: Shape of the `value` tensor. key: Optional shape of the `key` tensor.
github-repos
def _WaitForStartup(self, deadline): start = time.time() sleep = 0.05 def Elapsed(): return (time.time() - start) while True: try: (response, _) = self._http.request(self._host) if (response.status == 200): logging.info('emulator responded after %f seconds', Elapsed()) return True except (socket.error, httplib.ResponseNotReady): pass if (Elapsed() >= deadline): return False else: time.sleep(sleep) sleep *= 2
Waits for the emulator to start. Args: deadline: deadline in seconds Returns: True if the emulator responds within the deadline, False otherwise.
codesearchnet
def _build_statistics(self, input_batch, use_batch_stats, stat_dtype): if self.MOVING_MEAN not in self._initializers: self._initializers[self.MOVING_MEAN] = create_mean_initializer() self._moving_mean = tf.get_variable( "moving_mean", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_MEAN], trainable=False) if self.MOVING_VARIANCE not in self._initializers: self._initializers[self.MOVING_VARIANCE] = create_variance_initializer() self._moving_variance = tf.get_variable( "moving_variance", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_VARIANCE], trainable=False) def build_batch_stats(): mean, variance = tf.nn.moments(input_batch, self._axis, keep_dims=True, name="normalize_moments") return mean, variance def build_moving_stats(): input_dtype = input_batch.dtype.base_dtype if stat_dtype == input_dtype: return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) else: return ( tf.cast(self._moving_mean, input_dtype), tf.cast(self._moving_variance, input_dtype), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance
Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. stat_dtype: TensorFlow datatype to use for the moving mean and variance. Returns: Tuple of (mean, variance), each of the same datatype as `input_batch`.
juraj-google-style
def tf_step(self, time, variables, **kwargs): deltas = self.optimizer.step(time=time, variables=variables, **kwargs) with tf.control_dependencies(control_inputs=deltas): clipped_deltas = list() exceeding_deltas = list() for delta in deltas: clipped_delta = tf.clip_by_value( t=delta, clip_value_min=-self.clipping_value, clip_value_max=self.clipping_value ) clipped_deltas.append(clipped_delta) exceeding_deltas.append(clipped_delta - delta) applied = self.apply_step(variables=variables, deltas=exceeding_deltas) with tf.control_dependencies(control_inputs=(applied,)): return [delta + 0.0 for delta in clipped_deltas]
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. **kwargs: Additional arguments passed on to the internal optimizer. Returns: List of delta tensors corresponding to the updates for each optimized variable.
juraj-google-style
def add(self, X): for each in X: self.dpp_vector[each] = X[each] self.fit(self.dpp_vector.reshape(1, -1))
Add data about known pipeline and scores. Updates ``dpp_vector`` and refits model with all data. Args: X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a column in ``dpp_matrix`` and values are the corresponding score for pipeline on the dataset.
juraj-google-style
def encoder(self, inputs, n_layers=3): latent_dims = self.hparams.z_dim shape_as_list = inputs.shape.as_list() if (len(shape_as_list) != 5): raise ValueError(('Expected inputs to be a 5-D, got %d' % len(shape_as_list))) if (inputs.dtype != tf.float32): raise ValueError(('Expected dtype tf.float32, got %s' % inputs.dtype)) (batch_size, _) = shape_as_list[:2] inputs = tf.reshape(inputs, ([(- 1)] + list(inputs.shape)[2:])) n_filters = 64 rectified = None padding = [[0, 0], [1, 1], [1, 1], [0, 0]] for i in range(n_layers): with tf.variable_scope(('layer_%d' % (i + 1))): n_filters *= (2 ** i) if i: padded = tf.pad(rectified, padding) else: padded = tf.pad(inputs, padding) convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4, strides=2, padding='VALID') normalized = tf.contrib.layers.instance_norm(convolved) rectified = tf.nn.leaky_relu(normalized, alpha=0.2) pooled = tf.nn.avg_pool(rectified, (([1] + rectified.shape[1:3].as_list()) + [1]), strides=[1, 1, 1, 1], padding='VALID') squeezed = tf.squeeze(pooled, [1, 2]) with tf.variable_scope('z_mu'): z_mu = tf.layers.dense(squeezed, latent_dims) with tf.variable_scope('z_log_sigma_sq'): z_log_var = tf.layers.dense(squeezed, latent_dims) z_log_var = tf.clip_by_value(z_log_var, (- 10), 10) z_mu = tf.reshape(z_mu, (batch_size, (- 1), latent_dims)) z_log_var = tf.reshape(z_log_var, (batch_size, (- 1), latent_dims)) return (z_mu, z_log_var)
Convnet that encodes inputs into mean and std of a gaussian. Args: inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels) n_layers: Number of layers. Returns: z_mu: Mean of the latent gaussians. z_log_var: log(var) of the latent gaussians. Raises: ValueError: If inputs is not a 5-D tensor or not float32.
codesearchnet
def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources): new_resource.check_required_fields(ignore_fields=ignore_fields) self.resources.append(new_resource) if new_resource.get_file_to_upload(): filestore_resources.append(new_resource) new_resource['url'] = Dataset.temporary_url
Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None
codesearchnet
def __new__(cls, *args, **kwargs): self = super(cls, FrameSet).__new__(cls, *args, **kwargs) return self
Initialize the :class:`FrameSet` object. Args: frange (str or :class:`FrameSet`): the frame range as a string (ie "1-100x5") Raises: :class:`.ParseException`: if the frame range (or a portion of it) could not be parsed. :class:`fileseq.exceptions.MaxSizeException`: if the range exceeds `fileseq.constants.MAX_FRAME_SIZE`
juraj-google-style
def scale(self, new_volume: float) -> 'Lattice': versors = (self.matrix / self.abc) geo_factor = abs(dot(np.cross(versors[0], versors[1]), versors[2])) ratios = (np.array(self.abc) / self.c) new_c = ((new_volume / (geo_factor * np.prod(ratios))) ** (1 / 3.0)) return Lattice((versors * (new_c * ratios)))
Return a new Lattice with volume new_volume by performing a scaling of the lattice vectors so that length proportions and angles are preserved. Args: new_volume: New volume to scale to. Returns: New lattice with desired volume.
codesearchnet
def More(contents, out, prompt=None, check_pager=True): if not IsInteractive(output=True): out.write(contents) return if check_pager: pager = encoding.GetEncodedValue(os.environ, 'PAGER', None) if pager == '-': pager = None elif not pager: for command in ('less', 'pager'): if files.FindExecutableOnPath(command): pager = command break if pager: less_orig = encoding.GetEncodedValue(os.environ, 'LESS', None) less = '-R' + (less_orig or '') encoding.SetEncodedValue(os.environ, 'LESS', less) signal.signal(signal.SIGINT, signal.SIG_IGN) p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True) enc = console_attr.GetConsoleAttr().GetEncoding() p.communicate(input=contents.encode(enc)) p.wait() signal.signal(signal.SIGINT, signal.SIG_DFL) if less_orig is None: encoding.SetEncodedValue(os.environ, 'LESS', None) return console_pager.Pager(contents, out, prompt).Run()
Run a user specified pager or fall back to the internal pager. Args: contents: The entire contents of the text lines to page. out: The output stream. prompt: The page break prompt. check_pager: Checks the PAGER env var and uses it if True.
github-repos
def __init__(self, filesystem, os_module=None): self.filesystem = filesystem self._os_path = self._OS_PATH_COPY if os_module is None: warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2) self._os_path.os = self.os = os_module self.sep = self.filesystem.path_separator self.altsep = self.filesystem.alternative_path_separator
Init. Args: filesystem: FakeFilesystem used to provide file system information os_module: (deprecated) FakeOsModule to assign to self.os
juraj-google-style
def _deduplicate_indexed_slices(values, indices): unique_indices, new_index_positions = tf.unique(indices) summed_values = tf.unsorted_segment_sum(values, new_index_positions, tf.shape(unique_indices)[0]) return (summed_values, unique_indices)
Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index.
juraj-google-style
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None): if (rid is not None): assert (type(rid) == list), 'rid must be a list. rid: {}'.format(rid) rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if (gctoo_row in rid)] num_missing_rids = (len(rid) - len(rows_to_keep)) if (num_missing_rids != 0): logger.info('{} rids were not found in the GCT.'.format(num_missing_rids)) elif (row_bool is not None): assert (len(row_bool) == gctoo.data_df.shape[0]), ('row_bool must have length equal to gctoo.data_df.shape[0]. ' + 'len(row_bool): {}, gctoo.data_df.shape[0]: {}'.format(len(row_bool), gctoo.data_df.shape[0])) rows_to_keep = gctoo.data_df.index[row_bool].values elif (ridx is not None): assert (type(ridx[0]) is int), ('ridx must be a list of integers. ridx[0]: {}, ' + 'type(ridx[0]): {}').format(ridx[0], type(ridx[0])) assert (max(ridx) <= gctoo.data_df.shape[0]), ('ridx contains an integer larger than the number of rows in ' + 'the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}').format(max(ridx), gctoo.data_df.shape[0]) rows_to_keep = gctoo.data_df.index[ridx].values else: rows_to_keep = gctoo.data_df.index.values if (exclude_rid is not None): rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if (row_to_keep not in exclude_rid)] return rows_to_keep
Figure out based on the possible row inputs which rows to keep. Args: gctoo (GCToo object): rid (list of strings): row_bool (boolean array): ridx (list of integers): exclude_rid (list of strings): Returns: rows_to_keep (list of strings): row ids to be kept
codesearchnet
def attention_lm_attention_moe_tiny(): hparams = attention_lm_moe_small() hparams.moe_layers = '' hparams.attention_num_experts = 128 hparams.filter_size = 8192 hparams.attention_type = AttentionType.LOCAL_EXPERTS return hparams
Cheap model for debugging. Returns: an hparams object.
codesearchnet
def _parse(json_str: str, primitive_cls: Type[DateTime], *, default_timezone: str) -> DateTime: try: dt = datetime.datetime.strptime(json_str, '%Y') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.YEAR, primitive_cls) except ValueError: pass try: dt = datetime.datetime.strptime(json_str, '%Y-%m') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.MONTH, primitive_cls) except ValueError: pass try: dt = datetime.datetime.strptime(json_str, '%Y-%m-%d') return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.DAY, primitive_cls) except ValueError: pass datetime_str, timezone_str = _primitive_time_utils.split_timezone(json_str) try: dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S') return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.SECOND, primitive_cls) except ValueError: pass try: dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S.%f') if _primitive_time_utils.PRECISION_PATTERN_MILLISECOND.search(datetime_str) is not None: return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.MILLISECOND, primitive_cls) elif _primitive_time_utils.PRECISION_PATTERN_MICROSECOND.search(datetime_str) is not None: return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.MICROSECOND, primitive_cls) except ValueError: pass raise fhir_errors.InvalidFhirError('Invalid DateTime.')
Parses the json_str into a DateTime FHIR primitive. Args: json_str: The raw JSON string to parse. primitive_cls: The FHIR primitive to parse into. default_timezone: The default timezone to use when parsing in the event that no timezone information is present. Returns: A FHIR primitive DateTime. Raises: fhir_errors.InvalidFhirError: In the event that no FHIR primitive DateTime format was able to properly parse the json_str.
github-repos
def merge_level_and_latent_dist(level_dist, latent_dist, merge_std="prev_level"): level_mean, level_std = level_dist.loc, level_dist.scale latent_mean, latent_std = latent_dist.loc, latent_dist.scale new_mean = level_mean + latent_mean if merge_std == "normal": z_shape = common_layers.shape_list(latent_mean) log_scale = tf.get_variable( "merge_std", shape=z_shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) scale = tf.exp(log_scale * 3.0) elif merge_std == "prev_level": scale = level_std elif merge_std == "prev_step": scale = latent_std return tfp.distributions.Normal(loc=new_mean, scale=scale)
Merge level_dist and latent_dist. new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined according to merge_std. Args: level_dist: instance of tfp.distributions.Normal latent_dist: instance of tfp.distributions.Normal merge_std: can be "prev_level", "prev_step" or "normal". Returns: merged_dist: instance of tfp.distributions.Normal
juraj-google-style
def create_ingress_rule(self, app, rule): if isinstance(rule, dict): start_port = rule.get('start_port') end_port = rule.get('end_port') protocol = rule.get('protocol', 'tcp') requested_cross_account = rule.get('env', self.env) if self.env == requested_cross_account: cross_account_env = None cross_account_vpc_id = None else: cross_account_env = requested_cross_account cross_account_vpc_id = get_vpc_id(cross_account_env, self.region) else: start_port = rule end_port = rule protocol = 'tcp' cross_account_env = None cross_account_vpc_id = None created_rule = { 'app': app, 'start_port': start_port, 'end_port': end_port, 'protocol': protocol, 'cross_account_env': cross_account_env, 'cross_account_vpc_id': cross_account_vpc_id } self.log.debug('Normalized ingress rule: %s', created_rule) return created_rule
Create a normalized ingress rule. Args: app (str): Application name rule (dict or int): Allowed Security Group ports and protocols. Returns: dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
juraj-google-style
def get_course_id(self, course_uuid): course_data = self.get( 'courseguide/course?uuid={uuid}'.format( uuid=course_uuid or self.course_id ), params=None ) try: return course_data['response']['docs'][0]['id'] except KeyError: failure_message = ('KeyError in get_course_id - ' 'got {0}'.format(course_data)) log.exception(failure_message) raise PyLmodUnexpectedData(failure_message) except TypeError: failure_message = ('TypeError in get_course_id - ' 'got {0}'.format(course_data)) log.exception(failure_message) raise PyLmodUnexpectedData(failure_message)
Get course id based on uuid. Args: uuid (str): course uuid, i.e. /project/mitxdemosite Raises: PyLmodUnexpectedData: No course data was returned. requests.RequestException: Exception connection error Returns: int: numeric course id
juraj-google-style
def backfill_previous_messages(self, reverse=False, limit=10): res = self.client.api.get_room_messages(self.room_id, self.prev_batch, direction="b", limit=limit) events = res["chunk"] if not reverse: events = reversed(events) for event in events: self._put_event(event)
Backfill handling of previous messages. Args: reverse (bool): When false messages will be backfilled in their original order (old to new), otherwise the order will be reversed (new to old). limit (int): Number of messages to go back.
juraj-google-style
class AqlmConfig(QuantizationConfigMixin): def __init__(self, in_group_size: int=8, out_group_size: int=1, num_codebooks: int=1, nbits_per_codebook: int=16, linear_weights_not_to_quantize: Optional[List[str]]=None, **kwargs): self.quant_method = QuantizationMethod.AQLM self.in_group_size = in_group_size self.out_group_size = out_group_size self.num_codebooks = num_codebooks self.nbits_per_codebook = nbits_per_codebook self.linear_weights_not_to_quantize = linear_weights_not_to_quantize self.post_init() def post_init(self): if not isinstance(self.in_group_size, int): raise TypeError('in_group_size must be a float') if not isinstance(self.out_group_size, int): raise TypeError('out_group_size must be a float') if not isinstance(self.num_codebooks, int): raise TypeError('num_codebooks must be a float') if not isinstance(self.nbits_per_codebook, int): raise TypeError('nbits_per_codebook must be a float') if self.linear_weights_not_to_quantize is not None and (not isinstance(self.linear_weights_not_to_quantize, list)): raise ValueError('linear_weights_not_to_quantize must be a list of strings') if self.linear_weights_not_to_quantize is None: self.linear_weights_not_to_quantize = []
This is a wrapper class about `aqlm` parameters. Args: in_group_size (`int`, *optional*, defaults to 8): The group size along the input dimension. out_group_size (`int`, *optional*, defaults to 1): The group size along the output dimension. It's recommended to always use 1. num_codebooks (`int`, *optional*, defaults to 1): Number of codebooks for the Additive Quantization procedure. nbits_per_codebook (`int`, *optional*, defaults to 16): Number of bits encoding a single codebook vector. Codebooks size is 2**nbits_per_codebook. linear_weights_not_to_quantize (`Optional[List[str]]`, *optional*): List of full paths of `nn.Linear` weight parameters that shall not be quantized. kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object.
github-repos
def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while '%s%d' % (name, chunk_id) in group.attrs: data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]]) chunk_id += 1 return data
Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data.
github-repos
def replace_dimensions(cls, dimensions, overrides): from .dimension import Dimension replaced = [] for d in dimensions: if d.name in overrides: override = overrides[d.name] elif d.label in overrides: override = overrides[d.label] else: override = None if override is None: replaced.append(d) elif isinstance(override, (util.basestring, tuple)): replaced.append(d.clone(override)) elif isinstance(override, Dimension): replaced.append(override) elif isinstance(override, dict): replaced.append(d.clone(override.get('name',None), **{k:v for k,v in override.items() if k != 'name'})) else: raise ValueError('Dimension can only be overridden ' 'with another dimension or a dictionary ' 'of attributes') return replaced
Replaces dimensions in list with dictionary of overrides. Args: dimensions: List of dimensions overrides: Dictionary of dimension specs indexed by name Returns: list: List of dimensions with replacements applied
juraj-google-style
def load_dict(self, data, overwrite=False, auto_load_model=True): for (k, v) in data.items(): if ((k not in self._elements.keys()) and (not auto_load_model)): raise AttributeError('Model {} is not loaded'.format(k)) elif ((k not in self._elements.keys()) and auto_load_model): self._load_model(k) attr = getattr(self, k) _load_dict(attr, v)
Load a dictionary into the model. Args: data(dict): Dictionary to load overwrite(bool): Whether the data present in the model should be overwritten by the data in the dict or not. auto_load_model(bool): If set to true models will be loaded as they are needed Examples: >>> vlans_dict = { >>> "vlans": { "vlan": { 100: { >>> "config": { >>> "vlan_id": 100, "name": "production"}}, >>> 200: { >>> "config": { >>> "vlan_id": 200, "name": "dev"}}}}} >>> config.load_dict(vlans_dict) >>> print(config.vlans.vlan.keys()) ... [200, 100] >>> print(100, config.vlans.vlan[100].config.name) ... (100, u'production') >>> print(200, config.vlans.vlan[200].config.name) ... (200, u'dev')
codesearchnet
def plot_clicked(self, mouse_event): if isinstance(self.current_script, SelectPoints) and self.current_script.is_running: if (not (mouse_event.xdata == None)): if (mouse_event.button == 1): pt = np.array([mouse_event.xdata, mouse_event.ydata]) self.current_script.toggle_NV(pt) self.current_script.plot([self.matplotlibwidget_1.figure]) self.matplotlibwidget_1.draw() item = self.tree_scripts.currentItem() if item is not None: if item.is_point(): item_x = item.child(1) if mouse_event.xdata is not None: self.tree_scripts.setCurrentItem(item_x) item_x.value = float(mouse_event.xdata) item_x.setText(1, '{:0.3f}'.format(float(mouse_event.xdata))) item_y = item.child(0) if mouse_event.ydata is not None: self.tree_scripts.setCurrentItem(item_y) item_y.value = float(mouse_event.ydata) item_y.setText(1, '{:0.3f}'.format(float(mouse_event.ydata))) self.tree_scripts.setCurrentItem(item) else: if item.parent() is not None: if item.parent().is_point(): if item == item.parent().child(1): if mouse_event.xdata is not None: item.setData(1, 2, float(mouse_event.xdata)) if item == item.parent().child(0): if mouse_event.ydata is not None: item.setData(1, 2, float(mouse_event.ydata))
gets activated when the user clicks on a plot Args: mouse_event:
juraj-google-style
def _GetTimeElementsTuple(self, structure): month, day, hours, minutes, seconds = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) if month != 0 and month < self._last_month: self._year_use += 1 return (self._year_use, month, day, hours, minutes, seconds)
Retrieves a time elements tuple from the structure. Args: structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Returns: tuple: containing: year (int): year. month (int): month, where 1 represents January. day_of_month (int): day of month, where 1 is the first day of the month. hours (int): hours. minutes (int): minutes. seconds (int): seconds.
juraj-google-style
def _run_dnb_normalization(self, dnb_data, sza_data): dnb_data = xr.DataArray(dnb_data, dims=('y', 'x')) sza_data = xr.DataArray(sza_data, dims=('y', 'x')) good_mask = (~ (dnb_data.isnull() | sza_data.isnull())) output_dataset = dnb_data.where(good_mask) output_dataset = output_dataset.values.copy() dnb_data = dnb_data.values sza_data = sza_data.values (day_mask, mixed_mask, night_mask) = make_day_night_masks(sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step) did_equalize = False if day_mask.any(): LOG.debug('Histogram equalizing DNB day data...') histogram_equalization(dnb_data, day_mask, out=output_dataset) did_equalize = True if mixed_mask: for mask in mixed_mask: if mask.any(): LOG.debug('Histogram equalizing DNB mixed data...') histogram_equalization(dnb_data, mask, out=output_dataset) did_equalize = True if night_mask.any(): LOG.debug('Histogram equalizing DNB night data...') histogram_equalization(dnb_data, night_mask, out=output_dataset) did_equalize = True if (not did_equalize): raise RuntimeError('No valid data found to histogram equalize') return output_dataset
Scale the DNB data using a histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array
codesearchnet
def run(cmd: str, *paths: str, cwd: str = '.', mute: bool = False, filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None, failure_ok: bool = False, timeout: float = _DEFAULT_PROCESS_TIMEOUT, ) -> typing.Tuple[str, int]: filters = _sanitize_filters(filters) exe_path, args_list = _parse_cmd(cmd, *paths) context = RunContext( exe_path=exe_path, capture=sarge.Capture(), failure_ok=failure_ok, mute=mute, args_list=args_list, paths=paths, cwd=cwd, timeout=timeout, filters=filters, ) if mute: context.result_buffer += f'{context.cmd_as_string}' else: _LOGGER_PROCESS.info('%s: running', context.cmd_as_string) context.start_process() monitor_running_process(context) check_error(context) return context.process_output_as_str, context.return_code
Executes a command and returns the result Args: cmd: command to execute paths: paths to search executable in cwd: working directory (defaults to ".") mute: if true, output will not be printed filters: gives a list of partial strings to filter out from the output (stdout or stderr) failure_ok: if False (default), a return code different than 0 will exit the application timeout: sub-process timeout Returns: command output
juraj-google-style
def load(filename, instruments = None): with open(filename, 'r') as infile: dataPickle = infile.read() script_as_dict = pickle.loads(dataPickle) script_class = script_as_dict['_script_class'] script_instance, _, updated_instruments = Script.load_and_append({'script': script_class}, instruments = instruments) script_instance = script_instance['script'] instruments = script_instance._instruments script_instance.__dict__ = script_as_dict script_instance._instruments = instruments return script_instance, updated_instruments
loads an script instance using pickle Args: filename: source filename instruments: optional - only needed if script requires instruments dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } Returns: script_instance updated_instruments
juraj-google-style
def gff3_verifier(entries, line=None): regex = ('^[a-zA-Z0-9.:^*$@!+_?-|]+\\t.+\\t.+\\t\\d+\\t\\d+\\t' + '\\d*\\.?\\d*\\t[+-.]\\t[.0-2]\\t.+{0}$'.format(os.linesep)) delimiter = '\\t' for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: if line: intro = 'Line {0}'.format(str(line)) elif (error.part == 0): intro = 'Entry with source {0}'.format(entry.source) else: intro = 'Entry with Sequence ID {0}'.format(entry.seqid) if (error.part == 0): msg = '{0} has no Sequence ID'.format(intro) elif (error.part == 1): msg = '{0} has no source'.format(intro) elif (error.part == 2): msg = '{0} has non-numerical characters in type'.format(intro) elif (error.part == 3): msg = '{0} has non-numerical characters in start position'.format(intro) elif (error.part == 4): msg = '{0} has non-numerical characters in end position'.format(intro) elif (error.part == 5): msg = '{0} has non-numerical characters in score'.format(intro) elif (error.part == 6): msg = '{0} strand not in [+-.]'.format(intro) elif (error.part == 7): msg = '{0} phase not in [.0-2]'.format(intro) elif (error.part == 8): msg = '{0} has no attributes'.format(intro) else: msg = 'Unknown Error: Likely a Bug' raise FormatError(message=msg) if line: line += 1
Raises error if invalid GFF3 format detected Args: entries (list): A list of GFF3Entry instances line (int): Line number of first entry Raises: FormatError: Error when GFF3 format incorrect with descriptive message
codesearchnet
def __init__(self, *, picos: Union[int, float] = 0, nanos: Union[int, float] = 0) -> None: if picos and nanos: self._picos = picos + nanos * 1000 else: self._picos = nanos * 1000 if nanos else picos
Initializes a Timestamp with a time specified in ns and/or ps. The time is relative to some unspecified "time zero". If both picos and nanos are specified, their contributions away from zero are added. Args: picos: How many picoseconds away from time zero? nanos: How many nanoseconds away from time zero?
juraj-google-style
def _kl_chi_chi(a, b, name=None): with tf.name_scope(name or "kl_chi_chi"): return (0.5 * tf.math.digamma(0.5 * a.df) * (a.df - b.df) + tf.math.lgamma(0.5 * b.df) - tf.math.lgamma(0.5 * a.df))
Calculate the batched KL divergence KL(a || b) with a and b Chi. Args: a: instance of a Chi distribution object. b: instance of a Chi distribution object. name: (optional) Name to use for created operations. default is "kl_chi_chi". Returns: Batchwise KL(a || b)
juraj-google-style
def parse_header(line): if not line or line == "\r\n": return None if line[0] in " \t": return line[1:].rstrip() name, value = line.split(":", 1) return (name.strip(), value.strip())
Parse a header line. Args: line: A header line as a string. Returns: None if end of headers is found. A string giving the continuation line if a continuation is found. A tuple of name, value when a header line is found. Raises: ValueError: If the line cannot be parsed as a header.
juraj-google-style
def _build_trial_meta(cls, expr_dir): meta_file = os.path.join(expr_dir, EXPR_META_FILE) meta = parse_json(meta_file) if (not meta): job_id = expr_dir.split('/')[(- 2)] trial_id = expr_dir[(- 8):] params = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE)) meta = {'trial_id': trial_id, 'job_id': job_id, 'status': 'RUNNING', 'type': 'TUNE', 'start_time': os.path.getctime(expr_dir), 'end_time': None, 'progress_offset': 0, 'result_offset': 0, 'params': params} if (not meta.get('start_time', None)): meta['start_time'] = os.path.getctime(expr_dir) if isinstance(meta['start_time'], float): meta['start_time'] = timestamp2date(meta['start_time']) if meta.get('end_time', None): meta['end_time'] = timestamp2date(meta['end_time']) meta['params'] = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE)) return meta
Build meta file for trial. Args: expr_dir (str): Directory path of the experiment. Return: A dict of trial meta info.
codesearchnet
def checksum(self, path): try: return s3io.S3IO(options=self._options).checksum(path) except Exception as e: raise BeamIOError('Checksum operation failed', {path: e})
Fetch checksum metadata of a file on the :class:`~apache_beam.io.filesystem.FileSystem`. Args: path: string path of a file. Returns: string containing checksum Raises: ``BeamIOError``: if path isn't a file or doesn't exist.
github-repos
def append(self, item): if isinstance(item, list): self.extend(item) elif issubclass(item.__class__, self._pyof_class): list.append(self, item) else: raise exceptions.WrongListItemType(item.__class__.__name__, self._pyof_class.__name__)
Append one item to the list. Args: item: Item to be appended. Its type must match the one defined in the constructor. Raises: :exc:`~.exceptions.WrongListItemType`: If the item has a different type than the one specified in the constructor.
juraj-google-style
def with_min_memory(self, min_bytes=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0): self._options['min_bytes'] = min_bytes self._options['min_peak_bytes'] = min_peak_bytes self._options['min_residual_bytes'] = min_residual_bytes self._options['min_output_bytes'] = min_output_bytes return self
Only show profiler nodes consuming no less than 'min_bytes'. Args: min_bytes: Only show profiler nodes requested to allocate no less bytes than this. min_peak_bytes: Only show profiler nodes using no less than this bytes at peak (high watermark). For profiler nodes consist of multiple graph nodes, it sums the graph nodes' peak_bytes. min_residual_bytes: Only show profiler nodes have no less than this bytes not being de-allocated after Compute() ends. For profiler nodes consist of multiple graph nodes, it sums the graph nodes' residual_bytes. min_output_bytes: Only show profiler nodes have no less than this bytes output. The output are not necessarily allocated by this profiler nodes. Returns: self
github-repos
def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() batch_size = pixel_values.shape[0] image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state if query_output.dtype != image_embeds.dtype: query_output = query_output.to(image_embeds.dtype) language_model_inputs = self.language_projection(query_output) language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) if input_ids is None: start_tokens = [self.config.text_config.bos_token_id] if getattr(self.config, 'image_token_id', None) is not None: start_tokens = [self.config.image_token_id] * self.config.num_query_tokens + start_tokens input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) input_ids = input_ids.repeat(batch_size, 1) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'image_token_id', None) is not None: special_image_mask = (input_ids == self.config.image_token_id).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https: inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) if not self.language_model.config.is_encoder_decoder: generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1 generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1] inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask} if not self.language_model.config.is_encoder_decoder: inputs['input_ids'] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs
Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): Input images to be processed. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices Returns: captions (list): A list of strings of length batch_size * num_captions.
github-repos
def _get_parseable_methods(cls): _LOG.debug("Retrieving parseable methods for '%s'", cls.__name__) init_parser = None methods_to_parse = {} for name, obj in vars(cls).items(): if callable(obj) and hasattr(obj, "parser"): _LOG.debug("Found method '%s'", name) if name == "__init__": init_parser = obj.parser else: methods_to_parse[obj.__name__] = obj.parser return (init_parser, methods_to_parse)
Return all methods of cls that are parseable i.e. have been decorated by '@create_parser'. Args: cls: the class currently being decorated Note: classmethods will not be included as they can only be referenced once the class has been defined Returns: a 2-tuple with the parser of the __init__ method if any and a dict of the form {'method_name': associated_parser}
juraj-google-style
def RunStateMethod(self, method_name, request=None, responses=None): if self._TerminationPending(): return client_id = None try: self.context.current_state = method_name if request and responses: client_id = request.client_id or self.runner_args.client_id logging.debug("%s Running %s with %d responses from %s", self.session_id, method_name, len(responses), client_id) else: logging.debug("%s Running state method %s", self.session_id, method_name) self.flow_obj.HeartBeat() try: method = getattr(self.flow_obj, method_name) except AttributeError: raise FlowRunnerError("Flow %s has no state method %s" % (self.flow_obj.__class__.__name__, method_name)) responses = flow_responses.Responses.FromLegacyResponses( request=request, responses=responses) self.SaveResourceUsage(responses.status) stats_collector_instance.Get().IncrementCounter("grr_worker_states_run") if method_name == "Start": stats_collector_instance.Get().IncrementCounter( "flow_starts", fields=[self.flow_obj.Name()]) method() else: method(responses) if self.sent_replies: self.ProcessRepliesWithOutputPlugins(self.sent_replies) self.sent_replies = [] except Exception as e: stats_collector_instance.Get().IncrementCounter("grr_flow_errors") stats_collector_instance.Get().IncrementCounter( "flow_errors", fields=[self.flow_obj.Name()]) logging.exception("Flow %s raised %s.", self.session_id, e) self.Error(traceback.format_exc(), client_id=client_id)
Completes the request by calling the state method. Args: method_name: The name of the state method to call. request: A RequestState protobuf. responses: A list of GrrMessages responding to the request.
juraj-google-style
def validate(filename=None, ocrd_page=None, ocrd_file=None, strictness='strict', strategy='index1'): if ocrd_page: validator = PageValidator(ocrd_page, strictness, strategy) elif ocrd_file: validator = PageValidator(page_from_file(ocrd_file), strictness, strategy) elif filename: validator = PageValidator(parse(filename, silence=True), strictness, strategy) else: raise Exception('At least one of ocrd_page, ocrd_file or filename must be set') return validator._validate()
Validates a PAGE file for consistency by filename, OcrdFile or passing OcrdPage directly. Arguments: filename (string): Path to PAGE ocrd_page (OcrdPage): OcrdPage instance ocrd_file (OcrdFile): OcrdFile instance wrapping OcrdPage strictness (string): 'strict', 'lax', 'fix' or 'off' strategy (string): Currently only 'index1' Returns: report (:class:`ValidationReport`) Report on the validity
codesearchnet
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_2_0): if (kmip_version < enums.KMIPVersion.KMIP_2_0): raise exceptions.VersionNotSupported('KMIP {} does not support the Attributes object.'.format(kmip_version.value)) super(Attributes, self).read(input_stream, kmip_version=kmip_version) local_stream = BytearrayStream(input_stream.read(self.length)) while True: if (len(local_stream) < 3): break tag = struct.unpack('!I', (b'\x00' + local_stream.peek(3)))[0] if enums.is_enum_value(enums.Tags, tag): tag = enums.Tags(tag) if (not enums.is_attribute(tag, kmip_version=kmip_version)): raise exceptions.AttributeNotSupported('Attribute {} is not supported by KMIP {}.'.format(tag.name, kmip_version.value)) value = self._factory.create_attribute_value_by_enum(tag, None) value.read(local_stream, kmip_version=kmip_version) self._attributes.append(value) else: break self.is_oversized(local_stream)
Read the data stream and decode the Attributes structure into its parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: AttributeNotSupported: Raised if an unsupported attribute is encountered while decoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the Attributes object.
codesearchnet
def conversations_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'topic': topic}) return self.api_call('conversations.setTopic', json=kwargs)
Sets the topic for a conversation. Args: channel (str): The channel id. e.g. 'C1234567890' topic (str): The new topic for the channel. e.g. 'My Topic'
codesearchnet
def _testZeroDensity(self, alpha): try: from scipy import stats except ImportError as e: tf_logging.warn('Cannot test zero density proportions: %s' % e) return allowable_zeros = {dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny), dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny), dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)} failures = [] for dt in (dtypes.float16, dtypes.float32, dtypes.float64): sampler = self._Sampler(10000, alpha, 1.0, dt, seed=12345) x = sampler() allowable = allowable_zeros[dt] * x.size allowable = allowable * 2 if allowable < 10 else allowable * 1.05 if np.sum(x <= 0) > allowable: failures += [dt] self.assertEqual([], failures)
Zero isn't in the support of the gamma distribution. But quantized floating point math has its limits. TODO(bjp): Implement log-gamma sampler for small-shape distributions. Args: alpha: float shape value to test
github-repos
def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files): for cache_address in index_table: cache_address_chain_length = 0 while (cache_address.value != 0): if (cache_address_chain_length >= 64): parser_mediator.ProduceExtractionWarning('Maximum allowed cache address chain length reached.') break data_block_file_object = data_block_files.get(cache_address.filename, None) if (not data_block_file_object): message = 'Cache address: 0x{0:08x} missing data file.'.format(cache_address.value) parser_mediator.ProduceExtractionWarning(message) break try: cache_entry = self._data_block_file_parser.ParseCacheEntry(data_block_file_object, cache_address.block_offset) except (IOError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning('Unable to parse cache entry with error: {0!s}'.format(exception)) break event_data = ChromeCacheEntryEventData() event_data.original_url = cache_entry.original_url date_time = dfdatetime_webkit_time.WebKitTime(timestamp=cache_entry.creation_time) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) cache_address = cache_entry.next cache_address_chain_length += 1
Parses Chrome Cache file entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. index_table (list[CacheAddress]): the cache addresses which are stored in the index file. data_block_files (dict[str: file]): look up table for the data block file-like object handles.
codesearchnet
def check_valid(money): if (not isinstance(money, sc_messages.Money)): raise ValueError((u'Inputs should be of type %s' % (sc_messages.Money,))) currency = money.currencyCode if ((not currency) or (len(currency) != 3)): raise ValueError(_MSG_3_LETTERS_LONG) units = money.units nanos = money.nanos if (((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0))): raise ValueError(_MSG_UNITS_NANOS_MISMATCH) if (abs(nanos) > MAX_NANOS): raise ValueError(_MSG_NANOS_OOB)
Determine if an instance of `Money` is valid. Args: money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the instance to test Raises: ValueError: if the money instance is invalid
codesearchnet
def BindScope(self, scope_id, values): if scope_id not in self._scopes: raise KeyError(scope_id) keys = set(iterkeys(values)) if keys != self._scopes[scope_id]: raise KeyError(keys ^ self._scopes[scope_id]) self._scope_bindings[scope_id].append(values)
Associates given values with given scope. This can be called multiple times to associate multiple values. Args: scope_id: A scope id to bind the values to. values: A mapping from scope variable ids to values to bind in scope. Raises: KeyError: If given scope or scope variable is not specified in the pattern.
juraj-google-style
def lap(self): now = time.time() lap_time = (now - self.lap_time) total_time = (now - self.start) self.lap_time = now return (lap_time, total_time)
Calculate lap time. Returns: float: Lap time. The duration from the previous call of ``lap()`` or initialization at first call. float: Total time. The duration from initialization.
codesearchnet
def MessageToJson(message, including_default_value_fields=False): js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. Returns: A string containing the JSON formatted protocol buffer message.
codesearchnet
def range_dimension(self): if self.shape.dims: return self.shape.dims[-2] else: return tensor_shape.Dimension(None)
Dimension (in the sense of vector spaces) of the range of this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. Returns: `Dimension` object.
github-repos
def get_loss_func(self, C=1.0, k=1): def lf(x): (mu, ln_var) = self.encode(x) batchsize = len(mu.data) rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += (F.bernoulli_nll(x, self.decode(z, sigmoid=False)) / (k * batchsize)) self.rec_loss = rec_loss self.loss = (self.rec_loss + ((C * gaussian_kl_divergence(mu, ln_var)) / batchsize)) return self.loss return lf
Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector.
codesearchnet
def check_value(config, section, option, jinja_pattern=JINJA_PATTERN): value = config[section][option] if re.match(jinja_pattern, value): return None return value
try to figure out if value is valid or jinja2 template value Args: config (:obj:`configparser.ConfigParser`): config object to read key from section (str): name of section in configparser option (str): name of option in configparser jinja_pattern (:obj:`_sre.SRE_Pattern`): a `re.compile()` pattern to match on Returns: str: value if value, else None Raises: KeyError: configparser.NoOptionError: configparser.NoSectionError:
codesearchnet
def _do_retrieve_scopes(self, http, token): logger.info('Refreshing scopes') query_params = {'access_token': token, 'fields': 'scope'} token_info_uri = _helpers.update_query_params(self.token_info_uri, query_params) (resp, content) = transport.request(http, token_info_uri) content = _helpers._from_bytes(content) if (resp.status == http_client.OK): d = json.loads(content) self.scopes = set(_helpers.string_to_scopes(d.get('scope', ''))) else: error_msg = 'Invalid response {0}.'.format(resp.status) try: d = json.loads(content) if ('error_description' in d): error_msg = d['error_description'] except (TypeError, ValueError): pass raise Error(error_msg)
Retrieves the list of authorized scopes from the OAuth2 provider. Args: http: an object to be used to make HTTP requests. token: A string used as the token to identify the credentials to the provider. Raises: Error: When refresh fails, indicating the the access token is invalid.
codesearchnet
def update_state(self, values, sample_weight=None): values = math_ops.cast(values, self._dtype) if not self._built: self._build(values.shape) elif values.shape != self._shape: raise ValueError('MeanTensor input values must always have the same shape. Expected shape (set during the first call): {}. Got: {}'.format(self._shape, values.shape)) num_values = array_ops.ones_like(values) if sample_weight is not None: sample_weight = math_ops.cast(sample_weight, self._dtype) values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(values, sample_weight=sample_weight) try: sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, values) except ValueError: ndim = backend.ndim(values) weight_ndim = backend.ndim(sample_weight) values = math_ops.reduce_mean(values, axis=list(range(weight_ndim, ndim))) num_values = math_ops.multiply(num_values, sample_weight) values = math_ops.multiply(values, sample_weight) update_total_op = self._total.assign_add(values) with ops.control_dependencies([update_total_op]): return self._count.assign_add(num_values)
Accumulates statistics for computing the element-wise mean. Args: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Update op.
github-repos
def add(self, key, value): if isinstance(value, list): for val in value: self._add_arg_python(key, val) elif isinstance(value, dict): err = 'Dictionary types are not currently supported for field.' print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err)) else: mask = False env_var = re.compile('^\\$env\\.(.*)$') envs_var = re.compile('^\\$envs\\.(.*)$') if env_var.match(str(value)): env_key = env_var.match(str(value)).groups()[0] value = os.environ.get(env_key, value) elif envs_var.match(str(value)): env_key = envs_var.match(str(value)).groups()[0] value = os.environ.get(env_key, value) mask = True self._add_arg(key, value, mask)
Add CLI Arg to lists value. Args: key (string): The CLI Args key (e.g., --name). value (string): The CLI Args value (e.g., bob).
codesearchnet
def set(pb_or_dict, key, value): if not isinstance(pb_or_dict, (collections.MutableMapping, Message)): raise TypeError('Tried to set a key %s on an invalid object; ' 'expected a dict or protobuf message.' % key) key, subkey = _resolve_subkeys(key) if subkey is not None: if isinstance(pb_or_dict, collections.MutableMapping): pb_or_dict.setdefault(key, {}) set(get(pb_or_dict, key), subkey, value) return if isinstance(pb_or_dict, collections.MutableMapping): pb_or_dict[key] = value elif isinstance(value, (collections.MutableSequence, tuple)): while getattr(pb_or_dict, key): getattr(pb_or_dict, key).pop() for item in value: if isinstance(item, collections.Mapping): getattr(pb_or_dict, key).add(**item) else: getattr(pb_or_dict, key).extend([item]) elif isinstance(value, collections.Mapping): for item_key, item_value in value.items(): set(getattr(pb_or_dict, key), item_key, item_value) elif isinstance(value, Message): for item_key, item_value in value.ListFields(): set(getattr(pb_or_dict, key), item_key.name, item_value) else: setattr(pb_or_dict, key, value)
Set the given key on the object. Args: pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key on the object in question. value (Any): The value to set. Raises: TypeError: If pb_or_dict is not a Message or Mapping.
juraj-google-style
def write(self, output, mode='w', keep_rc=False): if isinstance(output, six.string_types): already_exists = os.path.exists(output) try: with open(output, mode) as f: p = self._build_pipes(f) rc = p.wait() if keep_rc: return rc if rc: raise CalledProcessError(rc, self.cmds[0], '') except BaseException as be: if ((not already_exists) and os.path.exists(output)): os.remove(output) six.reraise(be.__class__, be, sys.exc_info()[2]) else: p = self._build_pipes(output) rc = p.wait() if keep_rc: return rc if rc: raise CalledProcessError(rc, self.cmds[0], '')
Executes the pipeline and writes the results to the supplied output. If output is a filename and the file didn't already exist before trying to write, the file will be removed if an exception is raised. Args: output (str or file like object): will create a new file of this name or overwrite an existing file. If output is already a file like object, it is used. mode (str): mode to use when creating or opening the provided file name if it is a string. Ignored if output is a file like object. Returns: The final output of the pipeline. Raises: CalledProcessError if any return code in the pipeline is nonzero.
codesearchnet
def __build_completer_map(cls): ret = {} for name in dir(cls): obj = getattr(cls, name) if iscompleter(obj): for cmd in obj.__complete_targets__: if (cmd in ret.keys()): raise PyShellError("The command '{}' already has complter method '{}', cannot register a second method '{}'.".format(cmd, ret[cmd], obj.__name__)) ret[cmd] = obj.__name__ return ret
Build a mapping from command names to completer names. One command name maps to at most one completer method. Multiple command names can map to the same completer method. Only used by __init__() to initialize self._cmd_map. MUST NOT be used elsewhere. Raises: PyShellError: A command maps to multiple helper methods.
codesearchnet
def sysname(self): pchar = self._libinput.libinput_device_get_sysname(self._handle) return string_at(pchar).decode()
The system name of the device. To get the descriptive device name, use :attr:`name`. Returns: str: System name of the device.
codesearchnet
def diff_halfMatch(self, text1, text2): if self.Diff_Timeout <= 0: return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None def diff_halfMatchI(longtext, shorttext, i): seed = longtext[i:i + len(longtext) best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common)
Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match.
juraj-google-style
def auth(self, skypeToken): token = expiry = endpoint = None msgsHost = SkypeConnection.API_MSGSHOST while (not token): secs = int(time.time()) hash = self.getMac256Hash(str(secs)) headers = {'LockAndKey': 'appId=msmsgs@msnmsgr.com; time={0}; lockAndKeyResponse={1}'.format(secs, hash), 'Authentication': ('skypetoken=' + skypeToken), 'BehaviorOverride': 'redirectAs404'} endpointResp = self.conn('POST', '{0}/users/ME/endpoints'.format(msgsHost), codes=(200, 201, 404), headers=headers, json={'endpointFeatures': 'Agent'}) regTokenHead = endpointResp.headers.get('Set-RegistrationToken') locHead = endpointResp.headers.get('Location') if locHead: locParts = re.search('(https: if locParts[2]: endpoint = SkypeEndpoint(self.conn, locParts[2].replace('%7B', '{').replace('%7D', '}')) if (not (locParts[0] == msgsHost)): msgsHost = locHead.rsplit('/', (4 if locParts[2] else 3))[0] continue if regTokenHead: token = re.search('(registrationToken=[a-z0-9\\+/=]+)', regTokenHead, re.I).group(1) regExpiry = re.search('expires=(\\d+)', regTokenHead).group(1) expiry = datetime.fromtimestamp(int(regExpiry)) regEndMatch = re.search('endpointId=({[a-z0-9\\-]+})', regTokenHead) if regEndMatch: endpoint = SkypeEndpoint(self.conn, regEndMatch.group(1)) if ((not endpoint) and (endpointResp.status_code == 200) and endpointResp.json()): endpoint = SkypeEndpoint(self.conn, endpointResp.json()[0]['id']) return (token, expiry, msgsHost, endpoint)
Request a new registration token using a current Skype token. Args: skypeToken (str): existing Skype token Returns: (str, datetime.datetime, str, SkypeEndpoint) tuple: registration token, associated expiry if known, resulting endpoint hostname, endpoint if provided Raises: .SkypeAuthException: if the login request is rejected .SkypeApiException: if the login form can't be processed
codesearchnet
def failure_packages(self, failure_index=None): phase, _ = self._get_failed_phase(failure_index) fr = phase.failure_reason return fr.involved_requirements() if fr else None
Get packages involved in a failure. Args: failure_index: See `failure_reason`. Returns: A list of Requirement objects.
juraj-google-style
def _Open(self, path_spec, mode='rb'): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: tsk_image_object = tsk_image.TSKFileSystemImage(file_object) tsk_volume = pytsk3.Volume_Info(tsk_image_object) except: file_object.close() raise self._file_object = file_object self._tsk_volume = tsk_volume
Opens the file system object defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def _GetSanitizedEventValues(self, event): event_values = {} for (attribute_name, attribute_value) in event.GetAttributes(): if (attribute_name == 'regvalue'): continue if (attribute_name == 'pathspec'): try: attribute_value = JsonPathSpecSerializer.WriteSerialized(attribute_value) except TypeError: continue event_values[attribute_name] = attribute_value try: attribute_value = timelib.Timestamp.RoundToSeconds(event.timestamp) except TypeError as exception: logger.warning('Unable to round timestamp {0!s}. error: {1!s}. Defaulting to 0'.format(event.timestamp, exception)) attribute_value = 0 attribute_value = timelib.Timestamp.CopyToIsoFormat(attribute_value, timezone=self._output_mediator.timezone) event_values['datetime'] = attribute_value (message, _) = self._output_mediator.GetFormattedMessages(event) if (message is None): data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type)) event_values['message'] = message try: labels = list(event_values['tag'].labels) except (KeyError, AttributeError): labels = [] event_values['tag'] = labels (source_short, source) = self._output_mediator.GetFormattedSources(event) if ((source is None) or (source_short is None)): data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type)) event_values['source_short'] = source_short event_values['source_long'] = source return event_values
Sanitizes the event for use in Elasticsearch. The event values need to be sanitized to prevent certain values from causing problems when indexing with Elasticsearch. For example the path specification is a nested dictionary which will cause problems for Elasticsearch automatic indexing. Args: event (EventObject): event. Returns: dict[str, object]: sanitized event values. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
codesearchnet
def add(queue_name, payload=None, content_type=None, source=None, task_id=None, build_id=None, release_id=None, run_id=None): if task_id: task = WorkQueue.query.filter_by(task_id=task_id).first() if task: return task.task_id else: task_id = uuid.uuid4().hex if (payload and (not content_type) and (not isinstance(payload, basestring))): payload = json.dumps(payload) content_type = 'application/json' now = datetime.datetime.utcnow() task = WorkQueue(task_id=task_id, queue_name=queue_name, eta=now, source=source, build_id=build_id, release_id=release_id, run_id=run_id, payload=payload, content_type=content_type) db.session.add(task) return task.task_id
Adds a work item to a queue. Args: queue_name: Name of the queue to add the work item to. payload: Optional. Payload that describes the work to do as a string. If not a string and content_type is not provided, then this function assumes the payload is a JSON-able Python object. content_type: Optional. Content type of the payload. source: Optional. Who or what originally created the task. task_id: Optional. When supplied, only enqueue this task if a task with this ID does not already exist. If a task with this ID already exists, then this function will do nothing. build_id: Build ID to associate with this task. May be None. release_id: Release ID to associate with this task. May be None. run_id: Run ID to associate with this task. May be None. Returns: ID of the task that was added.
codesearchnet
def __init__(self, _args): super(TcExPackage, self).__init__(_args) self.features = ['aotExecutionEnabled', 'secureParams'] self._app_packages = [] self.package_data = {'errors': [], 'updates': [], 'bundle': [], 'package': []} self.validation_data = {}
Initialize Class properties. Args: _args (namespace): The argparser args Namespace.
juraj-google-style
def connection_lost(self, exc): if exc: log.error("{:d} connection_lost {}", id(self), exc) else: log.info("{:d} connection_lost", id(self))
(asyncio.Protocol member) Called upon when a socket closes. This class simply logs the disconnection Args: exc (Exception or None): Error if connection closed unexpectedly, None if closed cleanly.
juraj-google-style
def _transform_feature(self, inputs): pass
Returns intermediate representation (usually a `Tensor`). Uses `inputs` to create an intermediate representation (usually a `Tensor`) that other feature columns can use. Example usage of `inputs`: Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will be used as follows: ```python raw_tensor = inputs.get('raw') fc_tensor = inputs.get(input_fc) ``` Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`.
github-repos
def make_batched_images(images) -> List[List[ImageInput]]: if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]): return [img for img_list in images for img in img_list] elif isinstance(images, (list, tuple)) and is_valid_image(images[0]): return images elif is_valid_image(images): return [images] raise ValueError(f'Could not make batched images from {images}')
Accepts images in list or nested list format, and makes a list of images for preprocessing. Args: images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`): The input image. Returns: list: A list of images.
github-repos
def create(self, reference, document_data): write_pbs = _helpers.pbs_for_create(reference._document_path, document_data) self._add_write_pbs(write_pbs)
Add a "change" to this batch to create a document. If the document given by ``reference`` already exists, then this batch will fail when :meth:`commit`-ed. Args: reference (~.firestore_v1beta1.document.DocumentReference): A document reference to be created in this batch. document_data (dict): Property names and values to use for creating a document.
codesearchnet
def with_doc(fn_with_doc_to_copy): def decorator(wrapper_init): @wrapt.decorator def wrapping_fn(unused_wrapped, instance, args, kwargs): wrapper_init(instance, *args, **kwargs) return wrapping_fn(fn_with_doc_to_copy) return decorator
Returns a decorator to copy documentation from the given function. Docstring is copied, including *args and **kwargs documentation. Args: fn_with_doc_to_copy: Function whose docstring, including *args and **kwargs documentation, is to be copied. Returns: Decorated version of `wrapper_init` with documentation copied from `fn_with_doc_to_copy`.
juraj-google-style
def top(self, container, ps_args=None): u = self._url('/containers/{0}/top', container) params = {} if (ps_args is not None): params['ps_args'] = ps_args return self._result(self._get(u, params=params), True)
Display the running processes of a container. Args: container (str): The container to inspect ps_args (str): An optional arguments passed to ps (e.g. ``aux``) Returns: (str): The output of the top Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def listen(self): self.listening = True if self.threading: from threading import Thread self.listen_thread = Thread(target=self.listen_loop) self.listen_thread.daemon = True self.listen_thread.start() self.scheduler_thread = Thread(target=self.scheduler) self.scheduler_thread.daemon = True self.scheduler_thread.start() else: self.listen_loop()
Starts the listen loop. If threading is enabled, then the loop will be started in its own thread. Args: None Returns: None
codesearchnet
def _subdivide_nodes(nodes): _, num_nodes = np.shape(nodes) if num_nodes == 2: left_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_RIGHT) elif num_nodes == 3: left_nodes = _helpers.matrix_product(nodes, _QUADRATIC_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product( nodes, _QUADRATIC_SUBDIVIDE_RIGHT ) elif num_nodes == 4: left_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_LEFT) right_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_RIGHT) else: left_mat, right_mat = make_subdivision_matrices(num_nodes - 1) left_nodes = _helpers.matrix_product(nodes, left_mat) right_nodes = _helpers.matrix_product(nodes, right_mat) return left_nodes, right_nodes
Subdivide a curve into two sub-curves. Does so by taking the unit interval (i.e. the domain of the surface) and splitting it into two sub-intervals by splitting down the middle. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. Returns: Tuple[numpy.ndarray, numpy.ndarray]: The nodes for the two sub-curves.
juraj-google-style
def truncate(text, max_len=350, end='...'): if len(text) <= max_len: return text return text[:max_len].rsplit(' ', maxsplit=1)[0] + end
Truncate the supplied text for display. Arguments: text (:py:class:`str`): The text to truncate. max_len (:py:class:`int`, optional): The maximum length of the text before truncation (defaults to 350 characters). end (:py:class:`str`, optional): The ending to use to show that the text was truncated (defaults to ``'...'``). Returns: :py:class:`str`: The truncated text.
juraj-google-style
def link(target, link_to): assert isinstance(target, str) assert os.path.exists(target) assert isinstance(link_to, str) abs_path = os.path.dirname(os.path.abspath(link_to)) if not os.path.isdir(abs_path): os.makedirs(abs_path) chmod(target) os.symlink(target, link_to)
Create a link to a target file or a folder. For simplicity sake, both target and link_to must be absolute path and must include the filename of the file or folder. Also do not include any trailing slash. e.g. link('/path/to/file', '/path/to/link') But not: link('/path/to/file', 'path/to/') or link('/path/to/folder/', '/path/to/link') Args: target (str): file or folder the link will point to link_to (str): Link to create
juraj-google-style
def import_tf_tensor(self, x, tf_x): return self.LaidOutTensor(self.make_slices(tf_x, x.shape))
Import a tf.Tensor, producing a LaidOutTensor. Args: x: a Tensor tf_x: a tf.Tensor Returns: a LaidOutTensor
juraj-google-style
def ValidatePassword(self, password): password = to_aes_key(password) return hashlib.sha256(password).digest() == self.LoadStoredData('PasswordHash')
Validates if the provided password matches with the stored password. Args: password (string): a password. Returns: bool: the provided password matches with the stored password.
juraj-google-style
def get_num_days_required(offset, period='d', perc_required=0.9): x = pd.to_datetime('2010-01-01') delta = (x - (x - offset)) days = (delta.days * 0.69) if (period == 'd'): req = (days * perc_required) elif (period == 'm'): req = ((days / 20) * perc_required) elif (period == 'y'): req = ((days / 252) * perc_required) else: raise NotImplementedError('period not supported. Supported periods are d, m, y') return req
Estimates the number of days required to assume that data is OK. Helper function used to determine if there are enough "good" data days over a given period. Args: * offset (DateOffset): Offset (lookback) period. * period (str): Period string. * perc_required (float): percentage of number of days expected required.
codesearchnet
def update_firmware(self, device, id_override=None, type_override=None): object_id = (id_override or device.object_id()) object_type = (type_override or device.object_type()) url_string = '{}/{}s/{}/update_firmware'.format(self.BASE_URL, object_type, object_id) try: arequest = requests.post(url_string, headers=API_HEADERS) response_json = arequest.json() return response_json except requests.exceptions.RequestException: return None
Make a call to the update_firmware endpoint. As far as I know this is only valid for Wink hubs. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format
codesearchnet
def get_predicted_structure(self, structure, ref_structure): new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, ref_structure)) return new_structure
Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a Structure object with predicted volume
juraj-google-style
def _TestGradient(self, nccl_reduce, numpy_fn): def _Gradient(tensors, devices): inputs = [array_ops.placeholder(t.dtype, t.shape) for t in tensors] reduce_tensors = nccl_reduce(inputs, devices) losses = _DeviceTensors(tensors, [t.device for t in reduce_tensors]) grads = gradients.gradients(reduce_tensors, inputs, losses, colocate_gradients_with_ops=True) return [g for g in grads if g is not None] self._Test(_Gradient, numpy_fn)
Tests the gradient of nccl_reduce. Args: nccl_reduce: A function taking a list of tensors and a list of devices, and returns a list of reduced tensors and a list of ops to perform the reduction. numpy_fn: A function taking two tensors and returning the gradient of the reduction of the two.
github-repos
def DistFitDataset(Dat): (r, c) = Dat.shape Poiss = np.zeros(r) Norm = np.zeros(r) LogNorm = np.zeros(r) for i in range(r): temp = GetDistFitError(Dat[i]) Poiss[i] = temp['poiss'] Norm[i] = temp['norm'] LogNorm[i] = temp['lognorm'] d = {} d['poiss'] = Poiss d['norm'] = Norm d['lognorm'] = LogNorm return d
Given a data matrix, this returns the per-gene fit error for the Poisson, Normal, and Log-Normal distributions. Args: Dat (array): numpy array with shape (genes, cells) Returns: d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
codesearchnet
def relativefrom(base, path): base_parts = list(iteratepath(base)) path_parts = list(iteratepath(path)) common = 0 for (component_a, component_b) in zip(base_parts, path_parts): if (component_a != component_b): break common += 1 return '/'.join(((['..'] * (len(base_parts) - common)) + path_parts[common:]))
Return a path relative from a given base path. Insert backrefs as appropriate to reach the path from the base. Arguments: base (str): Path to a directory. path (str): Path to make relative. Returns: str: the path to ``base`` from ``path``. >>> relativefrom("foo/bar", "baz/index.html") '../../baz/index.html'
codesearchnet
def stop(self) -> float: self.stop_time = time.time() return ((self.stop_time - self.start_time) - self.offset)
Stop the timer Returns: The time the timer was stopped
codesearchnet
def inference(self, state_arr, limit=1000): self.__inferencing_flag = True agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] self.__create_enemy(self.__map_arr) result_list = [(agent_x, agent_y, 0.0)] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) result_val_list.append(0.0) result_list.append(tuple(result_val_list)) self.t = 0 while self.t < limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) self.__move_enemy(action_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_val_list = [agent_x, agent_y] for e in range(self.__enemy_num): result_val_list.append(self.__enemy_pos_list[e][0]) result_val_list.append(self.__enemy_pos_list[e][1]) try: result_val_list.append(q[0]) except IndexError: result_val_list.append(q) result_list.append(tuple(result_val_list)) state_arr = self.update_state(state_arr, action_arr) self.t += 1 end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route.
juraj-google-style
def _CheckCompositeMap(self, data_type_definition): if not data_type_definition: raise errors.FormatError('Missing data type definition') members = getattr(data_type_definition, 'members', None) if not members: raise errors.FormatError('Invalid data type definition missing members') is_composite_map = False last_member_byte_order = data_type_definition.byte_order for member_definition in members: if member_definition.IsComposite(): is_composite_map = True break if (last_member_byte_order != definitions.BYTE_ORDER_NATIVE and member_definition.byte_order != definitions.BYTE_ORDER_NATIVE and last_member_byte_order != member_definition.byte_order): is_composite_map = True break last_member_byte_order = member_definition.byte_order return is_composite_map
Determines if the data type definition needs a composite map. Args: data_type_definition (DataTypeDefinition): structure data type definition. Returns: bool: True if a composite map is needed, False otherwise. Raises: FormatError: if a composite map is needed cannot be determined from the data type definition.
juraj-google-style
def tomof(self, maxline=MAX_MOF_LINE): mof = [] mof.append(u'Qualifier ') mof.append(self.name) mof.append(u' : ') mof.append(self.type) if self.is_array: mof.append(u'[') if (self.array_size is not None): mof.append(six.text_type(self.array_size)) mof.append(u']') if (self.value is not None): mof.append(u' = ') if isinstance(self.value, list): mof.append(u'{ ') mof_str = u''.join(mof) line_pos = ((len(mof_str) - mof_str.rfind('\n')) - 1) (val_str, line_pos) = _value_tomof(self.value, self.type, MOF_INDENT, maxline, line_pos, 3, False) mof.append(val_str) if isinstance(self.value, list): mof.append(u' }') mof.append(u',\n') mof.append(_indent_str((MOF_INDENT + 1))) mof.append(u'Scope(') mof_scopes = [] for scope in self._ordered_scopes: if self.scopes.get(scope, False): mof_scopes.append(scope.lower()) mof.append(u', '.join(mof_scopes)) mof.append(u')') mof_flavors = [] if (self.overridable is True): mof_flavors.append('EnableOverride') elif (self.overridable is False): mof_flavors.append('DisableOverride') if (self.tosubclass is True): mof_flavors.append('ToSubclass') elif (self.tosubclass is False): mof_flavors.append('Restricted') if self.translatable: mof_flavors.append('Translatable') if mof_flavors: mof.append(u',\n') mof.append(_indent_str((MOF_INDENT + 1))) mof.append(u'Flavor(') mof.append(u', '.join(mof_flavors)) mof.append(u')') mof.append(u';\n') return u''.join(mof)
Return a MOF string with the declaration of this CIM qualifier type. The returned MOF string conforms to the ``qualifierDeclaration`` ABNF rule defined in :term:`DSP0004`. Qualifier flavors are included in the returned MOF string only when the information is available (i.e. the value of the corresponding attribute is not `None`). Because :term:`DSP0004` does not support instance qualifiers, and thus does not define a flavor keyword for the :attr:`~pywbem.CIMQualifierDeclaration.toinstance` attribute, that flavor is not included in the returned MOF string. Returns: :term:`unicode string`: MOF string.
codesearchnet
def _approx_eq_iterables(val: Any, other: Any, *, atol: Union[(int, float)]) -> bool: def get_iter(iterable): try: return iter(iterable) except TypeError: return None val_it = get_iter(val) other_it = get_iter(other) if ((val_it is not None) and (other_it is not None)): while True: try: val_next = next(val_it) except StopIteration: try: next(other_it) return False except StopIteration: return True try: other_next = next(other_it) except StopIteration: return False result = approx_eq(val_next, other_next, atol=atol) if (result is not True): return result return NotImplemented
Iterates over arguments and calls approx_eq recursively. Types of `val` and `other` does not necessarily needs to match each other. They just need to be iterable of the same length and have the same structure, approx_eq() will be called on each consecutive element of `val` and `other`. Args: val: Source for approximate comparison. other: Target for approximate comparison. atol: The minimum absolute tolerance. See np.isclose() documentation for details. Returns: True if objects are approximately equal, False otherwise. Returns NotImplemented when approximate equality is not implemented for given types.
codesearchnet