code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, output_mediator): super(LinearOutputModule, self).__init__(output_mediator) self._output_writer = None
Initializes a linear output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs. Raises: ValueError: if the output writer is missing.
juraj-google-style
def housekeeping(self, **kwargs): path = '/projects/%s/housekeeping' % self.get_id() self.manager.gitlab.http_post(path, **kwargs)
Start the housekeeping task. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabHousekeepingError: If the server failed to perform the request
juraj-google-style
def save_forensic_reports_to_splunk(self, forensic_reports): logger.debug('Saving forensic reports to Splunk') if (type(forensic_reports) == dict): forensic_reports = [forensic_reports] if (len(forensic_reports) < 1): return json_str = '' for report in forensic_reports: data = self._common_data.copy() data['sourcetype'] = 'dmarc:forensic' timestamp = human_timestamp_to_timestamp(report['arrival_date_utc']) data['time'] = timestamp data['event'] = report.copy() json_str += '{0}\n'.format(json.dumps(data)) if (not self.session.verify): logger.debug('Skipping certificate verification for Splunk HEC') try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if (response['code'] != 0): raise SplunkError(response['text'])
Saves forensic DMARC reports to Splunk Args: forensic_reports (list): A list of forensic report dictionaries to save in Splunk
codesearchnet
def main(args=None): if args is None: args = sys.argv[1:] parser = create_parser() args = parser.parse_args(args) if args.verbose >= 2: level = logging.DEBUG elif args.verbose >= 1: level = logging.INFO else: level = logging.WARNING logging.basicConfig(level=level) try: args.command(args) except pylink.JLinkException as e: sys.stderr.write('Error: %s%s' % (str(e), os.linesep)) return 1 return 0
Main command-line interface entrypoint. Runs the given subcommand or argument that were specified. If not given a ``args`` parameter, assumes the arguments are passed on the command-line. Args: args (list): list of command-line arguments Returns: Zero on success, non-zero otherwise.
juraj-google-style
def increase_volume(percentage): if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': pass elif system.get_name() == 'mac': volume_int = percentage / 10 old_volume = get() new_volume = old_volume + volume_int if new_volume > 10: new_volume = 10 set_volume(new_volume * 10) else: formatted = '%d%%+' % percentage sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0.
juraj-google-style
class PromptDepthAnythingPreActResidualLayer(nn.Module): def __init__(self, config): super().__init__() self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) return hidden_state + residual
ResidualConvUnit, pre-activate residual unit. Args: config (`[PromptDepthAnythingConfig]`): Model configuration class defining the model architecture.
github-repos
def file_name(self, file_name): if (not self.can_update()): self._tcex.handle_error(910, [self.type]) self._data['fileName'] = file_name request = {'fileName': file_name} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Updates the file_name. Args: file_name:
codesearchnet
def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can call `update_candidate_strategy`.')
Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions.
github-repos
def create_temp_creds(client_id, access_token, start=None, expires=None, scopes=None, name=None): now = arrow.utcnow().replace(minutes=(- 10)) start = (start or now.datetime) expires = (expires or now.replace(days=31).datetime) scopes = (scopes or ['assume:project:taskcluster:worker-test-scopes']) creds = createTemporaryCredentials(client_id, access_token, start, expires, scopes, name=name) for (key, value) in creds.items(): try: creds[key] = value.decode('utf-8') except (AttributeError, UnicodeDecodeError): pass return creds
Request temp TC creds with our permanent creds. Args: client_id (str): the taskcluster client_id to use access_token (str): the taskcluster access_token to use start (str, optional): the datetime string when the credentials will start to be valid. Defaults to 10 minutes ago, for clock skew. expires (str, optional): the datetime string when the credentials will expire. Defaults to 31 days after 10 minutes ago. scopes (list, optional): The list of scopes to request for the temp creds. Defaults to ['assume:project:taskcluster:worker-test-scopes', ] name (str, optional): the name to associate with the creds. Returns: dict: the temporary taskcluster credentials.
codesearchnet
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False): point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian) self.append(specie, point, coords_are_cartesian=coords_are_cartesian) self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
Class method for adding a site at a specified point in a slab. Will add the corresponding site on the other side of the slab to maintain equivalent surfaces. Arg: specie (str): The specie to add point (coords): The coordinate of the site in the slab to add. coords_are_cartesian (bool): Is the point in cartesian coordinates Returns: (Slab): The modified slab
codesearchnet
def ConsumeIdentifier(self): result = self.token if (not self._IDENTIFIER.match(result)): raise self._ParseError('Expected identifier.') self.NextToken() return result
Consumes protocol message field identifier. Returns: Identifier string. Raises: ParseError: If an identifier couldn't be consumed.
codesearchnet
def show_help(bokehjs_action): print() if (bokehjs_action in ['built', 'installed']): print("Bokeh-specific options available with 'install' or 'develop':") print() print(' --build-js build and install a fresh BokehJS') print(' --install-js install only last previously built BokehJS') else: print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'") print() print('No extra Bokeh-specific options are available.') print()
Print information about extra Bokeh-specific command line options. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree Returns: None
codesearchnet
def add_role(user, roles): def _add_role(role): user_role = UserRole() user_role.user_id = user.user_id user_role.role_id = role.role_id db.session.add(user_role) db.session.commit() [_add_role(role) for role in roles]
Map roles for user in database Args: user (User): User to add roles to roles ([Role]): List of roles to add Returns: None
juraj-google-style
def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, weight=1.0, scope=None): logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape()) with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]): num_classes = one_hot_labels.get_shape()[(- 1)].value one_hot_labels = tf.cast(one_hot_labels, logits.dtype) if (label_smoothing > 0): smooth_positives = (1.0 - label_smoothing) smooth_negatives = (label_smoothing / num_classes) one_hot_labels = ((one_hot_labels * smooth_positives) + smooth_negatives) cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits(logits, one_hot_labels, name='xentropy') weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value') tf.add_to_collection(LOSSES_COLLECTION, loss) return loss
Define a Cross Entropy loss using softmax_cross_entropy_with_logits. It can scale the loss by weight factor, and smooth the labels. Args: logits: [batch_size, num_classes] logits outputs of the network . one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels. label_smoothing: if greater than 0 then smooth the labels. weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: A tensor with the softmax_cross_entropy loss.
codesearchnet
def get_value(self): try: value = self.raw_value except (AttributeError, KeyError) as err: self._reraise_if_required(err) default_value = self.default_value if self.transform_default: return self.transform(default_value) return default_value else: return self.transform(value)
Return the transformed raw or default value. If the variable is missing from the project settings, and the setting is required, re-raise an AttributeError. If it is not required, return the (optionally transformed) default value. Returns: object: the transformed raw value.
codesearchnet
def _valid_deleted_file(path): ret = False if path.endswith(' (deleted)'): ret = True if re.compile(r"\(path inode=[0-9]+\)$").search(path): ret = True regex = re.compile("|".join(LIST_DIRS)) if regex.match(path): ret = False return ret
Filters file path against unwanted directories and decides whether file is marked as deleted. Returns: True if file is desired deleted file, else False. Args: path: A string - path to file
juraj-google-style
def post_error(self, name, message): self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.ERROR_LEVEL, message))
Asynchronously post a user facing error message about a service. Args: name (string): The name of the service message (string): The user facing error message that will be stored for the service and can be queried later.
codesearchnet
def __init__(self, pipeline_proto, pipeline_analyzer, cache_manager, pipeline_graph_renderer): self._analyzer = pipeline_analyzer self._cache_manager = cache_manager self._pipeline_graph = interactive_pipeline_graph.InteractivePipelineGraph(pipeline_proto, required_transforms=self._analyzer.tl_required_trans_ids(), referenced_pcollections=self._analyzer.tl_referenced_pcoll_ids(), cached_pcollections=self._analyzer.caches_used()) self._renderer = pipeline_graph_renderer self._text_to_print = collections.OrderedDict() self._text_to_print['summary'] = 'Using %s cached PCollections\nExecuting %s of %s transforms.' % (len(self._analyzer.caches_used()), len(self._analyzer.tl_required_trans_ids()) - len(self._analyzer.read_cache_ids()) - len(self._analyzer.write_cache_ids()), len(pipeline_proto.components.transforms[pipeline_proto.root_transform_ids[0]].subtransforms)) self._text_to_print.update({pcoll_id: '' for pcoll_id in self._analyzer.tl_referenced_pcoll_ids()}) self._pcollection_stats = {} for pcoll_id in self._analyzer.tl_referenced_pcoll_ids(): self._pcollection_stats[pcoll_id] = {'cache_label': self._analyzer.pipeline_info().cache_label(pcoll_id), 'version': -1, 'sample': []} self._producers = {} for _, transform in pipeline_proto.components.transforms.items(): for pcoll_id in transform.outputs.values(): if pcoll_id not in self._producers or '/' not in transform.unique_name: self._producers[pcoll_id] = transform.unique_name self._lock = threading.Lock() self._periodic_update = False
Constructor of DisplayManager. Args: pipeline_proto: (Pipeline proto) pipeline_analyzer: (PipelineAnalyzer) the pipeline analyzer that corresponds to this round of execution. This will provide more detailed informations about the pipeline cache_manager: (interactive_runner.CacheManager) DisplayManager fetches the latest status of pipeline execution by querying cache_manager. pipeline_graph_renderer: (pipeline_graph_renderer.PipelineGraphRenderer) decides how a pipeline graph is rendered.
github-repos
def filter_by_hoys(self, hoys): existing_hoys = self.header.analysis_period.hoys hoys = [h for h in hoys if h in existing_hoys] _moys = tuple(int(hour * 60) for hour in hoys) return self.filter_by_moys(_moys)
Filter the Data Collection based onva list of hoys. Args: hoys: A List of hours of the year 0..8759 Return: A new Data Collection with filtered data
juraj-google-style
def Insert(self, request, global_params=None): config = self.GetMethodConfig('Insert') return self._RunMethod(config, request, global_params=global_params)
Creates a new empty dataset. Args: request: (BigqueryDatasetsInsertRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Dataset) The response message.
github-repos
def groups_createChild(self, *, channel: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({'channel': channel}) return self.api_call('groups.createChild', http_verb='GET', params=kwargs)
Clones and archives a private channel. Args: channel (str): The group id. e.g. 'G1234567890'
codesearchnet
def CopyFromStringTuple(self, time_elements_tuple): if len(time_elements_tuple) < 7: raise ValueError(( 'Invalid time elements tuple at least 7 elements required,' 'got: {0:d}').format(len(time_elements_tuple))) year, month, day_of_month, hours, minutes, seconds, microseconds = ( time_elements_tuple) try: microseconds = int(microseconds, 10) except (TypeError, ValueError): raise ValueError('Invalid microsecond value: {0!s}'.format(microseconds)) if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND: raise ValueError('Invalid number of microseconds.') fraction_of_second = ( decimal.Decimal(microseconds) / definitions.MICROSECONDS_PER_SECOND) time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds, str(fraction_of_second)) super(TimeElementsInMicroseconds, self).CopyFromStringTuple( time_elements_tuple)
Copies time elements from string-based time elements tuple. Args: time_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]): time elements, contains year, month, day of month, hours, minutes, seconds and microseconds. Raises: ValueError: if the time elements tuple is invalid.
juraj-google-style
def set_pattern_actual_step(self, patternnumber, value): _checkPatternNumber(patternnumber) _checkStepNumber(value) address = _calculateRegisterAddress('actualstep', patternnumber) self.write_register(address, value, 0)
Set the 'actual step' parameter for a given pattern. Args: * patternnumber (integer): 0-7 * value (integer): 0-7
juraj-google-style
def hard_shrink(x, threshold=0.5): if any_symbolic_tensors((x,)): return HardShrink(threshold).symbolic_call(x) return backend.nn.hard_shrink(x, threshold)
Hard Shrink activation function. The Hard Shrink function is a thresholding operation defined as: `f(x) = x` if `|x| > threshold`, `f(x) = 0` otherwise. Args: x: Input tensor. threshold: Threshold value. Defaults to 0.5. Returns: A tensor with the same shape as `x`. Example: >>> x = np.array([-0.5, 0., 1.]) >>> x_hard_shrink = keras.ops.hard_shrink(x) >>> print(x_hard_shrink) array([0. 0. 1.], shape=(3,), dtype=float64)
github-repos
def prune_unconnected_ops_from_xla(prune_graph: ops.Graph): for graph in [prune_graph] + [f for f in prune_graph._functions.values()]: if not isinstance(graph, ops.Graph): continue for op in graph.get_operations(): if op.type not in _UNCONNECTED_OPS_TO_PRUNE: continue outputs_consumed = False for output in op.outputs: if output.consumers(): outputs_consumed = True break if not outputs_consumed: logging.info('Pruning OP %s of type %s from XLA Compile due to it being disconnected.', op.name, op.type) op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR)
Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. Args: prune_graph: A tensorflow graph from which we wish to prune unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have no inputs and no consumers. These can often be left behind due to graph construction rewiring (for instance TF-Hub). While they never execute, they will cause XLA compile to fail so we strip them from XLA compile by removing the tpu_replicate attribute.
github-repos
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list, cluster_centers): cluster_sums = [] cluster_counts = [] epsilon = constant_op.constant(1e-06, dtype=inputs[0].dtype) for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): cluster_sums.append(math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters)) cluster_counts.append(math_ops.unsorted_segment_sum(array_ops.reshape(array_ops.ones(array_ops.reshape(array_ops.shape(inp)[0], [-1])), [-1, 1]), cluster_idx, num_clusters)) with ops.colocate_with(cluster_centers, ignore_existing=True): new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon) if self._clusters_l2_normalized(): new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1) return state_ops.assign(cluster_centers, new_clusters_centers)
Creates an op for training for full batch case. Args: inputs: list of input Tensors. num_clusters: an integer Tensor providing the number of clusters. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. Returns: An op for doing an update of mini-batch k-means.
github-repos
def decode_single_feature_from_dict(feature_k, feature, tfexample_dict): if (not feature.serialized_keys): data_to_decode = tfexample_dict[feature_k] else: data_to_decode = {k: tfexample_dict[posixpath.join(feature_k, k)] for k in feature.serialized_keys} return feature.decode_example(data_to_decode)
Decode the given feature from the tfexample_dict. Args: feature_k (str): Feature key in the tfexample_dict feature (FeatureConnector): Connector object to use to decode the field tfexample_dict (dict): Dict containing the data to decode. Returns: decoded_feature: The output of the feature.decode_example
codesearchnet
def UploadOperations(self, operations, is_last=False): if self._is_last: raise googleads.errors.AdWordsBatchJobServiceInvalidOperationError( 'Can\'t add new operations to a completed incremental upload.') req = self._request_builder.BuildUploadRequest( self._upload_url, operations, current_content_length=self._current_content_length, is_last=is_last) try: _batch_job_logger.debug('Outgoing request: %s %s %s', req.get_full_url(), req.headers, req.data) self._url_opener.open(req) if _batch_job_logger.isEnabledFor(logging.INFO): _batch_job_logger.info('Request summary: %s', self._ExtractRequestSummaryFields(req)) except urllib2.HTTPError as e: if e.code != 308: if _batch_job_logger.isEnabledFor(logging.WARNING): _batch_job_logger.warning( 'Request summary: %s', self._ExtractRequestSummaryFields(req, error=e)) raise self._current_content_length += len(req.data) self._is_last = is_last
Uploads operations to the given uploadUrl in incremental steps. Note: Each list of operations is expected to contain operations of the same type, similar to how one would normally send operations in an AdWords API Service request. Args: operations: one or more lists of operations as would be sent to the AdWords API for the associated service. is_last: a boolean indicating whether this is the final increment to be added to the batch job.
juraj-google-style
def task_indices(self, job_name): try: job = self._cluster_spec[job_name] except KeyError: raise ValueError('No such job in cluster: %r' % job_name) return list(sorted(job.keys()))
Returns a list of valid task indices in the given job. Args: job_name: The string name of a job in this cluster. Returns: A list of valid task indices in the given job. Raises: ValueError: If `job_name` does not name a job in this cluster, or no task with index `task_index` is defined in that job.
github-repos
def check_structure(data): if not isinstance(data, dict): try: data = _convert_to_dict(data) except MetaParsingException: raise except: raise MetaParsingException( "Metadata format has invalid strucure (dict is expected)." ) for key, val in data.iteritems(): if type(key) not in _ALLOWED_TYPES: raise MetaParsingException( "Can't decode the meta file - invalid type of keyword '" + str(key) + "'!" ) if type(val) not in _ALLOWED_TYPES: raise MetaParsingException( "Can't decode the meta file - invalid type of keyword '" + str(key) + "'!" ) return data
Check whether the structure is flat dictionary. If not, try to convert it to dictionary. Args: data: Whatever data you have (dict/tuple/list). Returns: dict: When the conversion was successful or `data` was already `good`. Raises: MetaParsingException: When the data couldn't be converted or had `bad` structure.
juraj-google-style
def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): dtype = dtypes.as_dtype(dtype) with ops.name_scope('random_sign_uniform'): unsigned_samples = random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign(random_ops.random_uniform(shape, minval=-1.0, maxval=1.0, seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype.
github-repos
def duration_distance(item_a, item_b, max_value): duration_a = item_a.times.size duration_b = item_b.times.size return (np.minimum(np.abs((duration_a - duration_b)), max_value) / float(max_value))
Absolute difference in the duration of two items Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def fit_to_cols(what, indent='', cols=79): lines = [] while what: what, next_line = split_line( what=what, cols=cols, indent=indent, ) lines.append(next_line) return '\n'.join(lines)
Wrap the given text to the columns, prepending the indent to each line. Args: what(str): text to wrap. indent(str): indentation to use. cols(int): colt to wrap to. Returns: str: Wrapped text
juraj-google-style
def weighted_average(counts: tf.Tensor, values: tf.Tensor): float_counts = tf.cast(counts, tf.float32) weighted_values = tf.einsum('i,i...->...', float_counts, values) return weighted_values / tf.reduce_sum(float_counts)
Returns the weighted average of input values. Subtensor `i` of `values` is multiplied by `counts[i]` resulting in a weighted version of values; the mean is then taken across the first dimension. Args: counts: Non-negative integers of shape [batch_size]. values: Floats of shape [batch_size, ...]. Returns: Tensor of shape [...] which is the weighted average.
github-repos
def WriteEventBody(self, event): output_values = [] for field_name in self._fields: output_value = self._dynamic_fields_helper.GetFormattedField( event, field_name) output_value = self._SanitizeField(output_value) output_values.append(output_value) output_line = '{0:s}\n'.format(self._field_delimiter.join(output_values)) self._output_writer.Write(output_line)
Writes the body of an event to the output. Args: event (EventObject): event.
juraj-google-style
def _RunActions(self, rule, client_id): actions_count = 0 for action in rule.actions: try: token = self.token.Copy() token.username = "Foreman" if action.HasField("hunt_id"): if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id): logging.info( "Foreman: ignoring hunt %s on client %s: was started " "here before", client_id, action.hunt_id) else: logging.info("Foreman: Starting hunt %s on client %s.", action.hunt_id, client_id) flow_cls = registry.AFF4FlowRegistry.FlowClassByName( action.hunt_name) flow_cls.StartClients(action.hunt_id, [client_id]) actions_count += 1 else: flow.StartAFF4Flow( client_id=client_id, flow_name=action.flow_name, token=token, **action.argv.ToDict()) actions_count += 1 except Exception as e: logging.exception("Failure running foreman action on client %s: %s", action.hunt_id, e) return actions_count
Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started.
juraj-google-style
def get_col_info(table_name, col_name, meta_file): with open(meta_file, 'r') as f: meta = json.load(f) (data_table, table) = load_data_table(table_name, meta_file, meta) for field in table['fields']: if (field['name'] == col_name): col_meta = field col = data_table[col_name] return (col, col_meta)
Return the content and metadata of a fiven column. Args: table_name(str): Name of the table. col_name(str): Name of the column. meta_file(str): Path to the meta.json file. Returns: tuple(pandas.Series, dict)
codesearchnet
def calculate_view_box(layers, aspect_ratio, margin=DEFAULT_VIEW_BOX_MARGIN): min_x = min((np.nanmin(x) for (x, y) in layers)) max_x = max((np.nanmax(x) for (x, y) in layers)) min_y = min((np.nanmin(y) for (x, y) in layers)) max_y = max((np.nanmax(y) for (x, y) in layers)) height = (max_y - min_y) width = (max_x - min_x) if (height > (width * aspect_ratio)): adj_height = (height * (1.0 + margin)) adj_width = (adj_height / aspect_ratio) else: adj_width = (width * (1.0 + margin)) adj_height = (adj_width * aspect_ratio) width_buffer = ((adj_width - width) / 2.0) height_buffer = ((adj_height - height) / 2.0) return ((min_x - width_buffer), (min_y - height_buffer), adj_width, adj_height)
Calculates the size of the SVG viewBox to use. Args: layers (list): the layers in the image aspect_ratio (float): the height of the output divided by the width margin (float): minimum amount of buffer to add around the image, relative to the total dimensions Returns: tuple: a 4-tuple of floats representing the viewBox according to SVG specifications ``(x, y, width, height)``.
codesearchnet
def _get_fields(ast): if (not ast.selection_set): return ([], []) property_fields = [] vertex_fields = [] seen_field_names = set() switched_to_vertices = False for field_ast in ast.selection_set.selections: if (not isinstance(field_ast, Field)): continue name = get_ast_field_name(field_ast) if (name in seen_field_names): raise GraphQLCompilationError(u'Encountered repeated field name: {}'.format(name)) seen_field_names.add(name) if is_vertex_field_name(name): switched_to_vertices = True vertex_fields.append(field_ast) else: if switched_to_vertices: raise GraphQLCompilationError(u'Encountered property field {} after vertex fields!'.format(name)) property_fields.append(field_ast) return (vertex_fields, property_fields)
Return a list of vertex fields, and a list of property fields, for the given AST node. Also verifies that all property fields for the AST node appear before all vertex fields, raising GraphQLCompilationError if that is not the case. Args: ast: GraphQL AST node, obtained from the graphql library Returns: tuple of two lists - the first list contains ASTs for vertex fields - the second list contains ASTs for property fields
codesearchnet
def process_gatt_service(services, event): length = len(event.payload) - 5 handle, start, end, uuid = unpack('<BHH%ds' % length, event.payload) uuid = process_uuid(uuid) services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}
Process a BGAPI event containing a GATT service description and add it to a dictionary Args: services (dict): A dictionary of discovered services that is updated with this event event (BGAPIPacket): An event containing a GATT service
juraj-google-style
def commit_output(cls, shard_ctx, iterator): outs = tuple(iterator) shard_ctx._state.writer_state['outs'] = outs
Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs.
codesearchnet
def set_bias(self, value): if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self.build_in_name_scope() lm_head.set_bias(value)
Set all the bias in the LM head. Args: value (`Dict[tf.Variable]`): All the new bias attached to an LM head.
github-repos
def get_variable(self, feature_column, name): del feature_column, name raise NotImplementedError('StateManager.get_var')
Returns an existing variable. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: variable name.
github-repos
def abs_path_from_base(base_path, rel_path): return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path))
Join a base and a relative path and return an absolute path to the resulting location. Args: base_path: str Relative or absolute path to prepend to ``rel_path``. rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
codesearchnet
def setting_address(key): key_parts = key.split('.', maxsplit=(_MAX_KEY_PARTS - 1)) addr_parts = [_short_hash(x.encode()) for x in key_parts] addr_parts.extend(([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts)))) return (CONFIG_STATE_NAMESPACE + ''.join(addr_parts))
Computes the radix address for the given setting key. Keys are broken into four parts, based on the dots in the string. For example, the key `a.b.c` address is computed based on `a`, `b`, `c` and the empty string. A longer key, for example `a.b.c.d.e`, is still broken into four parts, but the remaining pieces are in the last part: `a`, `b`, `c` and `d.e`. Each of these peices has a short hash computed (the first 16 characters of its SHA256 hash in hex), and is joined into a single address, with the config namespace (`000000`) added at the beginning. Args: key (str): the setting key Returns: str: the computed address
codesearchnet
def update(self, forecasts, observations): for (t, threshold) in enumerate(self.thresholds[:(- 1)]): self.frequencies.loc[(t, 'Positive_Freq')] += np.count_nonzero((((threshold <= forecasts) & (forecasts < self.thresholds[(t + 1)])) & (observations >= self.obs_threshold))) self.frequencies.loc[(t, 'Total_Freq')] += np.count_nonzero(((threshold <= forecasts) & (forecasts < self.thresholds[(t + 1)])))
Update the statistics with a set of forecasts and observations. Args: forecasts (numpy.ndarray): Array of forecast probability values observations (numpy.ndarray): Array of observation values
codesearchnet
def makecontinuum(cube, **kwargs): inchs = kwargs.pop('inchs', None) exchs = kwargs.pop('exchs', None) if (inchs is not None) or (exchs is not None): raise KeyError('Inchs and exchs are no longer supported. Use weight instead.') if weight is None: weight = 1. cont = (cube * (1 / weight**2)).sum(dim='ch') / (1 / weight**2).sum(dim='ch') xcoords = {'x': cube.x.values} ycoords = {'y': cube.y.values} chcoords = {'masterid': np.array([0]), 'kidid': np.array([0]), 'kidfq': np.array([0]), 'kidtp': np.array([1])} scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values, 'xref': cube.xref.values, 'yref': cube.yref.values} return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords)
Make a continuum array. Args: cube (decode.cube): Decode cube which will be averaged over channels. kwargs (optional): Other arguments. inchs (list): Included channel kidids. exchs (list): Excluded channel kidids. Returns: decode cube (decode.cube): Decode cube (2d).
juraj-google-style
def _AskUser(self): if self._show_percent: progress = int(((self._displayed * 100) / len(self._text.splitlines()))) progress_text = (' (%d%%)' % progress) else: progress_text = '' question = AnsiText(('Enter: next line, Space: next page, b: prev page, q: quit.%s' % progress_text), ['green']) sys.stdout.write(question) sys.stdout.flush() ch = self._GetCh() sys.stdout.write(('\r%s\r' % (' ' * len(question)))) sys.stdout.flush() return ch
Prompt the user for the next action. Returns: A string, the character entered by the user.
codesearchnet
def find_bad_commit(target_test, start_commit, end_commit): if start_commit == end_commit: return start_commit create_script(target_test=target_test) bash = f'\ngit bisect reset\ngit bisect start {start_commit} {end_commit}\ngit bisect run python3 target_script.py\n' with open('run_git_bisect.sh', 'w') as fp: fp.write(bash.strip()) result = subprocess.run(['bash', 'run_git_bisect.sh'], capture_output=True, text=True) print(result.stdout) if 'error: bisect run failed' in result.stderr: index = result.stderr.find('error: bisect run failed') bash_error = result.stderr[index:] error_msg = f'Error when running git bisect:\nbash error: {bash_error}' pattern = 'pytest failed to run: .+' pytest_errors = re.findall(pattern, result.stdout) if len(pytest_errors) > 0: pytest_error = pytest_errors[0] index = pytest_error.find('pytest failed to run: ') index += len('pytest failed to run: ') pytest_error = pytest_error[index:] error_msg += f'pytest error: {pytest_error}' raise ValueError(error_msg) pattern = '(.+) is the first bad commit' commits = re.findall(pattern, result.stdout) bad_commit = None if len(commits) > 0: bad_commit = commits[0] print(f'Between `start_commit` {start_commit} and `end_commit` {end_commit}') print(f'bad_commit: {bad_commit}\n') return bad_commit
Find (backward) the earliest commit between `start_commit` and `end_commit` at which `target_test` fails. Args: target_test (`str`): The test to check. start_commit (`str`): The latest commit. end_commit (`str`): The earliest commit. Returns: `str`: The earliest commit at which `target_test` fails.
github-repos
def _AddPropertiesForNonRepeatedScalarField(field, cls): proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field) default_value = field.default_value valid_values = set() is_proto3 = field.containing_type.syntax == "proto3" def getter(self): return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name clear_when_set_to_default = is_proto3 and not field.containing_oneof def field_setter(self, new_value): new_value = type_checker.CheckValue(new_value) if clear_when_set_to_default and not new_value: self._fields.pop(field, None) else: self._fields[field] = new_value if not self._cached_byte_size_dirty: self._Modified() if field.containing_oneof: def setter(self, new_value): field_setter(self, new_value) self._UpdateOneofState(field) else: setter = field_setter setter.__module__ = None setter.__doc__ = 'Setter for %s.' % proto_field_name doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
juraj-google-style
def get_experiment(self, coll_name, exp_name): exp = ExperimentResource(exp_name, coll_name) return self.get_project(exp)
Convenience method that gets experiment resource. Args: coll_name (str): Collection name exp_name (str): Experiment name Returns: (ExperimentResource)
juraj-google-style
def initialize_logger(debug): level = logging.DEBUG if debug else logging.INFO logger = logging.getLogger('cucco') logger.setLevel(level) formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used.
juraj-google-style
def process_layer(layer_data): layer_name = layer_data['name'] if layer_name in created_layers: layer = created_layers[layer_name] else: from tensorflow.python.keras.layers import deserialize as deserialize_layer layer = deserialize_layer(layer_data, custom_objects=custom_objects) created_layers[layer_name] = layer node_count_by_layer[layer] = int(_should_skip_first_node(layer)) inbound_nodes_data = layer_data['inbound_nodes'] inbound_nodes_data = tf_utils.convert_inner_node_data(inbound_nodes_data, wrap=True) for node_data in inbound_nodes_data: add_unprocessed_node(layer, node_data)
Deserializes a layer, then call it on appropriate inputs. Args: layer_data: layer config dict. Raises: ValueError: In case of improperly formatted `layer_data` dict.
github-repos
def _serialize_to_proto(self, object_proto=None, **kwargs): del object_proto, kwargs return None
Returns a proto of any type to be saved into the SavedModel. Trackable classes decorated with `register_serializable` should overwrite this method to save metadata for this object to the SavedModel. The proto returned by this function will be passed to `_deserialize_from_proto` in the form of a `google.protobuf.Any` proto. This data is only saved and used by the Python API. Existing C++ loading APIs such as `tensorflow::LoadSavedModel` will not read this field at all. Args: object_proto: A `SavedObject` proto that may be filled by this function. Only the core serializable types (Variable, Function, Constant, Asset) should modify this argument. **kwargs: Future keyword arguments passed to the object during saving. Returns: A proto that serializes this class's type.
github-repos
def parse_verilog(text): lex = VerilogLexer name = None kind = None saved_type = None mode = 'input' ptype = 'wire' metacomments = [] parameters = [] param_items = [] generics = [] ports = collections.OrderedDict() sections = [] port_param_index = 0 last_item = None array_range_start_pos = 0 objects = [] for (pos, action, groups) in lex.run(text): if (action == 'metacomment'): if (last_item is None): metacomments.append(groups[0]) else: last_item.desc = groups[0] if (action == 'section_meta'): sections.append((port_param_index, groups[0])) elif (action == 'module'): kind = 'module' name = groups[0] generics = [] ports = collections.OrderedDict() param_items = [] sections = [] port_param_index = 0 elif (action == 'parameter_start'): (net_type, vec_range) = groups new_ptype = '' if (net_type is not None): new_ptype += net_type if (vec_range is not None): new_ptype += (' ' + vec_range) ptype = new_ptype elif (action == 'param_item'): generics.append(VerilogParameter(groups[0], 'in', ptype)) elif (action == 'module_port_start'): (new_mode, net_type, signed, vec_range) = groups new_ptype = '' if (net_type is not None): new_ptype += net_type if (signed is not None): new_ptype += (' ' + signed) if (vec_range is not None): new_ptype += (' ' + vec_range) for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) param_items = [] if (len(ports) > 0): last_item = next(reversed(ports)) mode = new_mode ptype = new_ptype elif (action == 'port_param'): ident = groups[0] param_items.append(ident) port_param_index += 1 elif (action == 'end_module'): for i in param_items: ports[i] = VerilogParameter(i, mode, ptype) vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments) objects.append(vobj) last_item = None metacomments = [] return objects
Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects.
codesearchnet
def fft(x): if any_symbolic_tensors(x): return FFT().symbolic_call(x) return backend.math.fft(x)
Computes the Fast Fourier Transform along last axis of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output tensor. Example: >>> x = ( ... keras.ops.convert_to_tensor([1., 2.]), ... keras.ops.convert_to_tensor([0., 1.]), ... ) >>> fft(x) (array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32))
github-repos
def csv_to_num_matrix(csv_file_path): mtx = [] with open(csv_file_path) as csv_data_file: for row in csv_data_file: mtx.append([float(val) for val in row.split(',')]) return mtx
Load a CSV file consisting only of numbers into a Python matrix of floats. Args: csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
codesearchnet
def load_json_or_yaml(string, is_path=False, file_type='json', exception=ScriptWorkerTaskException, message='Failed to load %(file_type)s: %(exc)s'): if (file_type == 'json'): _load_fh = json.load _load_str = json.loads else: _load_fh = yaml.safe_load _load_str = yaml.safe_load try: if is_path: with open(string, 'r') as fh: contents = _load_fh(fh) else: contents = _load_str(string) return contents except (OSError, ValueError, yaml.scanner.ScannerError) as exc: if (exception is not None): repl_dict = {'exc': str(exc), 'file_type': file_type} raise exception((message % repl_dict))
Load json or yaml from a filehandle or string, and raise a custom exception on failure. Args: string (str): json/yaml body or a path to open is_path (bool, optional): if ``string`` is a path. Defaults to False. file_type (str, optional): either "json" or "yaml". Defaults to "json". exception (exception, optional): the exception to raise on failure. If None, don't raise an exception. Defaults to ScriptWorkerTaskException. message (str, optional): the message to use for the exception. Defaults to "Failed to load %(file_type)s: %(exc)s" Returns: dict: the data from the string. Raises: Exception: as specified, on failure
codesearchnet
def fuse_resize_and_conv(input_graph_def: graph_pb2.GraphDef, output_node_names: Sequence[str]) -> graph_pb2.GraphDef: input_node_map = {} for node in input_graph_def.node: if node.name not in input_node_map: input_node_map[node.name] = node else: raise ValueError('Duplicate node names detected for ', node.name) node_reference_count = collections.defaultdict(int) for node in input_graph_def.node: for input_name in node.input: stripped_name = node_name_from_input(input_name) node_reference_count[stripped_name] += 1 for output_name in output_node_names: node_reference_count[output_name] += 1 new_ops = [] for node in input_graph_def.node: if node.op != 'Conv2D': continue conv_op = node input_op = node_from_map(input_node_map, conv_op.input[0]) if input_op.op == 'MirrorPad': mirror_pad_op = input_op resize_op = node_from_map(input_node_map, mirror_pad_op.input[0]) if resize_op.op != 'ResizeBilinear': resize_op = None else: mirror_pad_op = None if input_op.op == 'ResizeBilinear': resize_op = input_op else: resize_op = None if not mirror_pad_op and (not resize_op): continue node_reference_count[conv_op.name] = 0 if mirror_pad_op: node_reference_count[mirror_pad_op.name] -= 1 if resize_op: node_reference_count[resize_op.name] -= 1 fused_conv_op = node_def_pb2.NodeDef() if resize_op: fused_conv_op.op = 'FusedResizeAndPadConv2D' else: fused_conv_op.op = 'FusedPadConv2D' fused_conv_op.name = conv_op.name if mirror_pad_op: mirror_paddings_name = mirror_pad_op.input[1] mirror_paddings_mode = mirror_pad_op.attr['mode'] else: paddings_op = node_def_pb2.NodeDef() paddings_op.op = 'Const' paddings_op.name = conv_op.name + '_dummy_paddings' paddings_op.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)) paddings_op.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto([0, 0, 0, 0, 0, 0, 0, 0], dtypes.int32, [4, 2]))) new_ops.extend([paddings_op]) mirror_paddings_name = paddings_op.name mirror_paddings_mode = attr_value_pb2.AttrValue(s=b'REFLECT') if resize_op: fused_conv_op.input.extend([resize_op.input[0], resize_op.input[1], mirror_paddings_name, conv_op.input[1]]) fused_conv_op.attr['resize_align_corners'].CopyFrom(resize_op.attr['align_corners']) else: fused_conv_op.input.extend([mirror_pad_op.input[0], mirror_paddings_name, conv_op.input[1]]) fused_conv_op.attr['T'].CopyFrom(conv_op.attr['T']) fused_conv_op.attr['mode'].CopyFrom(mirror_paddings_mode) fused_conv_op.attr['strides'].CopyFrom(conv_op.attr['strides']) fused_conv_op.attr['padding'].CopyFrom(conv_op.attr['padding']) new_ops.extend([fused_conv_op]) result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node_reference_count[node.name] < 1: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) result_graph_def.node.extend([new_node]) result_graph_def.node.extend(new_ops) return result_graph_def
Merges preceding resize and mirror pad ops into a specialized convolution. There's a common pattern of enlarging the input to a convolution using a resize operation, and also using MirrorPad to extend the boundaries to that zero edge pixels don't bleed inwards when convolving. This routine looks for that pattern of operations, and fuses them together into a Conv2DWithResizeOp. Args: input_graph_def: A GraphDef containing a model. output_node_names: A list of names of the nodes that produce the final results. Returns: Modified graph with resize and pad ops merged. Raises: ValueError: If the graph is badly formed with duplicate node names.
github-repos
def file_name(self, file_name): if not self.can_update(): self._tcex.handle_error(910, [self.type]) self._data['fileName'] = file_name request = {'fileName': file_name} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Updates the file_name. Args: file_name:
juraj-google-style
def __init__(self, exprs): self.exprs = exprs
Initialize a disjunction. Args: exprs: A set. The subterms.
github-repos
def copy_file(src_file: str, dst_dir: str, strip: str=None, dest_file: str=None) -> None: dest = dest_file if dest_file else src_file if dest.startswith('bazel-out'): dest = dest[dest.index('bin') + 4:] if strip: dest = dest.removeprefix(strip) dest_dir_path = os.path.join(dst_dir, os.path.dirname(dest)) os.makedirs(dest_dir_path, exist_ok=True) shutil.copy(src_file, dest_dir_path) os.chmod(os.path.join(dst_dir, dest), 420)
Copy a file to the destination directory. Args: src_file: file to be copied dst_dir: destination directory strip: prefix to strip before copying to destination dest_file: destanation file location if different from src_file
github-repos
def rewrite_autodoc(app, what, name, obj, options, lines): try: lines[:] = parse_cartouche_text(lines) except CartoucheSyntaxError as syntax_error: args = syntax_error.args arg0 = (args[0] if args else '') arg0 += ' in docstring for {what} {name} :'.format(what=what, name=name) arg0 += '\n=== BEGIN DOCSTRING ===\n{lines}\n=== END DOCSTRING ===\n'.format(lines='\n'.join(lines)) syntax_error.args = ((arg0,) + args[1:]) raise
Convert lines from Cartouche to Sphinx format. The function to be called by the Sphinx autodoc extension when autodoc has read and processed a docstring. This function modified its ``lines`` argument *in place* replacing Cartouche syntax input into Sphinx reStructuredText output. Args: apps: The Sphinx application object. what: The type of object which the docstring belongs to. One of 'module', 'class', 'exception', 'function', 'method', 'attribute' name: The fully qualified name of the object. obj: The object itself. options: The options given to the directive. An object with attributes ``inherited_members``, ``undoc_members``, ``show_inheritance`` and ``noindex`` that are ``True`` if the flag option of the same name was given to the auto directive. lines: The lines of the docstring. Will be modified *in place*. Raises: CartoucheSyntaxError: If the docstring is malformed.
codesearchnet
def inverse_transform(self, y, lengths=None): y = np.argmax(y, -1) inverse_y = [self._label_vocab.id2doc(ids) for ids in y] if lengths is not None: inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)] return inverse_y
Return label strings. Args: y: label id matrix. lengths: sentences length. Returns: list: list of list of strings.
juraj-google-style
def build_gemini_query(self, query, extra_info): if ('WHERE' in query): return '{0} AND {1}'.format(query, extra_info) else: return '{0} WHERE {1}'.format(query, extra_info)
Append sql to a gemini query Args: query(str): The gemini query extra_info(str): The text that should be added Return: extended_query(str)
codesearchnet
def post_shared_file(self, image_file=None, source_link=None, shake_id=None, title=None, description=None): if (image_file and source_link): raise Exception('You can only specify an image file or a source link, not both.') if ((not image_file) and (not source_link)): raise Exception('You must specify an image file or a source link') content_type = self._get_image_type(image_file) if (not title): title = os.path.basename(image_file) f = open(image_file, 'rb') endpoint = '/api/upload' files = {'file': (title, f, content_type)} data = self._make_request('POST', endpoint=endpoint, files=files) f.close() return data
Upload an image. TODO: Don't have a pro account to test (or even write) code to upload a shared filed to a particular shake. Args: image_file (str): path to an image (jpg/gif) on your computer. source_link (str): URL of a source (youtube/vine/etc.) shake_id (int): shake to which to upload the file or source_link [optional] title (str): title of the SharedFile [optional] description (str): description of the SharedFile Returns: SharedFile key.
codesearchnet
def apply_cut(self, cm): inverse = np.logical_not(self.cut_matrix(cm.shape[0])).astype(int) return (cm * inverse)
Return a modified connectivity matrix with all connections that are severed by this cut removed. Args: cm (np.ndarray): A connectivity matrix.
codesearchnet
def update_hash_with_primitive_value(hash_value, value): hash_const = np.uint64(11400714819323197440) hash_value = np.uint64(hash_value) value = np.uint64(value) hash_value = np.array([hash_value]) value = np.array([value]) hash_value = np.bitwise_xor(hash_value, value + hash_const + np.left_shift(hash_value, 10) + np.right_shift(hash_value, 4))[0] return hash_value
Update the hash value using a primitive value. Args: hash_value (uint64): The current hash value. value: The primitive value to incorporate into the hash. Returns: int: The updated hash value.
github-repos
def do_put(self, uri, resource, timeout, custom_headers): self.validate_resource_uri(uri) task, body = self._connection.put(uri, resource, custom_headers=custom_headers) if not task: return body return self._task_monitor.wait_for_task(task, timeout)
Helps to make put requests. Args: uri: URI of the resource timeout: Time out for the request in seconds. custom_headers: Allows to set custom http headers. Retuns: Returns Task object
juraj-google-style
def set_images(self, text, parse_html=True): file_list = [] if parse_html: processed_string = self.parse_html(text) else: processed_string = text reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE) matches = reg.findall(processed_string) for match in matches: file_result = self.set_image(match[1]) if file_result[0] != "": replacement, new_files = file_result processed_string = processed_string.replace(match[1], replacement) file_list += new_files return processed_string, file_list
set_images: Replace image strings with downloaded image checksums Args: text (str): text to parse for image strings Returns:string with checksums in place of image strings and list of files that were downloaded from string
juraj-google-style
def dot(*values: Union[float, complex, np.ndarray] ) -> Union[float, complex, np.ndarray]: if len(values) == 1: if isinstance(values[0], np.ndarray): return np.array(values[0]) return values[0] return np.linalg.multi_dot(values)
Computes the dot/matrix product of a sequence of values. A *args version of np.linalg.multi_dot. Args: *values: The values to combine with the dot/matrix product. Returns: The resulting value or matrix.
juraj-google-style
def get_key_by_job_id(cls, mapreduce_id): return db.Key.from_path(cls.kind(), str(mapreduce_id))
Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState.
codesearchnet
def store(self, df, attribute_columns): entity_id_start = models.Entity.get_max_id(self.session) + 1 attribute_id_start = models.Attribute.get_max_id(self.session) + 1 df['id'] = range(entity_id_start, entity_id_start + len(df)) df['type'] = self.type df[['id', 'type']].to_sql(name=models.Entity.__tablename__, con=self.client.engine, if_exists='append', index=False) for col in attribute_columns: attr_df = df[[col, 'id']].rename(columns={'id': 'entity_id', col: 'value'}) attr_df['name'] = col attr_df['id'] = range(attribute_id_start, attribute_id_start + len(df)) attribute_id_start += len(df) attr_df.to_sql(name=models.Attribute.__tablename__, con=self.client.engine, if_exists='append', index=False)
Store entities and their attributes Args: df (pandas.DataFrame): data to store (storing appends 'id' and 'type' columns!) attribute_columns (list(str)): list of column labels that define attributes
juraj-google-style
def ParseByteStream(self, parser_mediator, byte_stream, parent_path_segments=None, codepage='cp1252'): if (parent_path_segments and isinstance(parent_path_segments, list)): self._path_segments = list(parent_path_segments) else: self._path_segments = [] shell_item_list = pyfwsi.item_list() parser_mediator.AppendToParserChain(self) try: shell_item_list.copy_from_byte_stream(byte_stream, ascii_codepage=codepage) for shell_item in iter(shell_item_list.items): self._ParseShellItem(parser_mediator, shell_item) finally: parser_mediator.PopFromParserChain()
Parses the shell items from the byte stream. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. byte_stream (bytes): shell items data. parent_path_segments (Optional[list[str]]): parent shell item path segments. codepage (Optional[str]): byte stream codepage.
codesearchnet
def resize(self, image: np.ndarray, size: Dict[str, int], patch_size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: if 'longest_edge' in size: size = (size['longest_edge'], size['longest_edge']) elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if 'height' in patch_size and 'width' in patch_size: patch_size = (patch_size['height'], patch_size['width']) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`Dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
github-repos
def _rpc_metadata(self): if (self._rpc_metadata_internal is None): self._rpc_metadata_internal = _helpers.metadata_with_prefix(self._database_string) return self._rpc_metadata_internal
The RPC metadata for this client's associated database. Returns: Sequence[Tuple(str, str)]: RPC metadata with resource prefix for the database associated with this client.
codesearchnet
def create_project(self, resource): self.project_service.set_auth(self._token_project) return self.project_service.create(resource)
Create the entity described by the given resource. Args: resource (intern.resource.boss.BossResource) Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
juraj-google-style
def _ReadLine(self, text_file_object, max_len=None, depth=0): line = text_file_object.readline(size=max_len) if (not line): return '' if (line in self._EMPTY_LINES): if (depth == self._MAXIMUM_DEPTH): return '' return self._ReadLine(text_file_object, max_len=max_len, depth=(depth + 1)) return line.strip()
Reads a line from a text file. Args: text_file_object (dfvfs.TextFile): text file. max_len (Optional[int]): maximum number of bytes a single line can take, where None means all remaining bytes should be read. depth (Optional[int]): number of new lines the parser encountered. Returns: str: single line read from the file-like object, or the maximum number of characters, if max_len defined and line longer than the defined size. Raises: UnicodeDecodeError: if the text cannot be decoded using the specified encoding.
codesearchnet
def set_management_icmp(enabled=True, deploy=False): if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError("Invalid option provided for service enabled option.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '<disable-icmp>{0}</disable-icmp>'.format(value)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
Enables or disables the ICMP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_icmp salt '*' panos.set_management_icmp enabled=False deploy=True
juraj-google-style
def _prob_in_top_k(clean_values, noisy_values, noise_stddev, noisy_top_values, k): batch = tf.shape(clean_values)[0] m = tf.shape(noisy_top_values)[1] top_values_flat = tf.reshape(noisy_top_values, [(- 1)]) threshold_positions_if_in = ((tf.range(batch) * m) + k) threshold_if_in = tf.expand_dims(tf.gather(top_values_flat, threshold_positions_if_in), 1) is_in = tf.greater(noisy_values, threshold_if_in) if (noise_stddev is None): return tf.to_float(is_in) threshold_positions_if_out = (threshold_positions_if_in - 1) threshold_if_out = tf.expand_dims(tf.gather(top_values_flat, threshold_positions_if_out), 1) prob_if_in = _normal_distribution_cdf((clean_values - threshold_if_in), noise_stddev) prob_if_out = _normal_distribution_cdf((clean_values - threshold_if_out), noise_stddev) prob = tf.where(is_in, prob_if_in, prob_if_out) return prob
Helper function to NoisyTopKGating. Computes the probability that value is in top k, given different random noise. This gives us a way of backpropagating from a loss that balances the number of times each expert is in the top k experts per example. In the case of no noise, pass in None for noise_stddev, and the result will not be differentiable. Args: clean_values: a `Tensor` of shape [batch, n]. noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus normally distributed noise with standard deviation noise_stddev. noise_stddev: a `Tensor` of shape [batch, n], or None noisy_top_values: a `Tensor` of shape [batch, m]. "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 k: an integer. Returns: a `Tensor` of shape [batch, n].
codesearchnet
def random_expr(depth, vlist, ops): if (not depth): return str(vlist[random.randrange(len(vlist))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) left = random_expr(((depth - 1) if max_depth_side else other_side_depth), vlist, ops) right = random_expr(((depth - 1) if (not max_depth_side) else other_side_depth), vlist, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
Generate a random expression tree. Args: depth: At least one leaf will be this many levels down from the top. vlist: A list of chars. These chars are randomly selected as leaf values. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree.
codesearchnet
def to_lxml_encoding(encoding): try: lxml.html.HTMLParser(encoding=encoding) except LookupError: encoding = encoding.replace('-', '') else: return encoding try: lxml.html.HTMLParser(encoding=encoding) except LookupError: encoding = encoding.replace('_', '') else: return encoding try: lxml.html.HTMLParser(encoding=encoding) except LookupError: pass else: return encoding
Check if lxml supports the specified encoding. Returns: str, None
codesearchnet
def CreateAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService', 'v201809') adgroup = {'adGroupType': 'SHOPPING_SHOWCASE_ADS', 'campaignId': campaign_id, 'name': ('AdGroup adgroup_operations = {'operator': 'ADD', 'operand': adgroup} adgroup = ad_group_service.mutate(adgroup_operations)['value'][0] print(('AdGroup with name "%s" and ID "%s" was added.' % (adgroup['name'], adgroup['id']))) return adgroup
Creates an AdGroup for the given shopping campaign ID. Args: client: an AdWordsClient instance. campaign_id: the str ID of a shopping campaign. Returns: The created AdGroup as a sudsobject.
codesearchnet
def __init__(self, request, scopes=None, return_url=None): self.request = request self.return_url = return_url or request.get_full_path() if scopes: self._scopes = set(oauth2_settings.scopes) | set(scopes) else: self._scopes = set(oauth2_settings.scopes)
Initialize the Oauth2 Object. Args: request: Django request object. scopes: Scopes desired for this OAuth2 flow. return_url: The url to return to after the OAuth flow is complete, defaults to the request's current URL path.
juraj-google-style
def slogdet(x): if any_symbolic_tensors((x,)): return Slogdet().symbolic_call(x) return backend.numpy.slogdet(x)
Compute the sign and natural logarithm of the determinant of a matrix. Args: x: Input matrix. It must 2D and square. Returns: A tuple `(sign, logabsdet)`. `sign` is a number representing the sign of the determinant. For a real matrix, this is 1, 0, or -1. For a complex matrix, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. `logabsdet` is the natural log of the absolute value of the determinant.
github-repos
def get_all_results_for_query_batch(self, batch_id, job_id=None, chunk_size=2048): result_ids = self.get_query_batch_result_ids(batch_id, job_id=job_id) if (not result_ids): raise RuntimeError('Batch is not complete') for result_id in result_ids: (yield self.get_query_batch_results(batch_id, result_id, job_id=job_id, chunk_size=chunk_size))
Gets result ids and generates each result set from the batch and returns it as an generator fetching the next result set when needed Args: batch_id: id of batch job_id: id of job, if not provided, it will be looked up
codesearchnet
def get_directory_list_doc(self, configs): if not isinstance(configs, (tuple, list)): configs = [configs] util.check_list_type(configs, dict, 'configs', allow_none=False) return self.__directory_list_descriptor(configs)
JSON dict description of a protorpc.remote.Service in list format. Args: configs: Either a single dict or a list of dicts containing the service configurations to list. Returns: dict, The directory list document as a JSON dict.
juraj-google-style
def mme_delete(case_obj, mme_base_url, mme_token): server_responses = [] if not mme_base_url or not mme_token: return 'Please check that Matchmaker connection parameters are valid' for patient in case_obj['mme_submission']['patients']: patient_id = patient['id'] url = ''.join([mme_base_url, '/patient/delete/', patient_id]) resp = matchmaker_request(url=url, token=mme_token, method='DELETE', ) server_responses.append({ 'patient_id': patient_id, 'message': resp.get('message'), 'status_code': resp.get('status_code') }) return server_responses
Delete all affected samples for a case from MatchMaker Args: case_obj(dict) a scout case object mme_base_url(str) base url of the MME server mme_token(str) auth token of the MME server Returns: server_responses(list): a list of object of this type: { 'patient_id': patient_id 'message': server_message, 'status_code': server_status_code }
juraj-google-style
def duration(self, value): if value == self._defaults['duration'] and 'duration' in self._values: del self._values['duration'] else: self._values['duration'] = value
The duration property. Args: value (string). the property value.
juraj-google-style
def parse_env(config_schema, env): try: return { key: item_schema.parse(key, env.get(key)) for key, item_schema in config_schema.items() } except KeyError as error: raise MissingConfigError( "Required config not set: {}".format(error.args[0]) )
Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment.
juraj-google-style
def get_value_at_percentile(self, percentile): count_at_percentile = self.get_target_count_at_percentile(percentile) total = 0 for index in range(self.counts_len): total += self.get_count_at_index(index) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: return self.get_highest_equivalent_value(value_at_index) return self.get_lowest_equivalent_value(value_at_index) return 0
Get the value for a given percentile Args: percentile: a float in [0.0..100.0] Returns: the value for the given percentile
juraj-google-style
def _encode_gif(images, fps): writer = WholeVideoWriter(fps) writer.write_multi(images) return writer.finish()
Encodes numpy images into gif string. Args: images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape `[time, height, width, channels]` where `channels` is 1 or 3. fps: frames per second of the animation Returns: The encoded gif string. Raises: IOError: If the ffmpeg command returns an error.
juraj-google-style
def serialize(activation): if hasattr(activation, '__name__') and activation.__name__ in _TF_ACTIVATIONS_V2: return _TF_ACTIVATIONS_V2[activation.__name__] return serialize_keras_object(activation)
Returns the string identifier of an activation function. Args: activation : Function object. Returns: String denoting the name attribute of the input function For example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: ('Cannot serialize', 'abcd') Raises: ValueError: The input function is not a valid one.
github-repos
def depth_november_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_november_average_ground_temperature`'.format(value)) self._depth_november_average_ground_temperature = value
Corresponds to IDD Field `depth_november_average_ground_temperature` Args: value (float): value for IDD Field `depth_november_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def json_fhir_string_to_proto(raw_json: str, proto_cls: Type[_T], *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> _T: resource = proto_cls() merge_json_fhir_string_into_proto(raw_json, resource, validate=validate, default_timezone=default_timezone) return resource
Creates a resource of proto_cls and merges contents of raw_json into it. Args: raw_json: The raw FHIR JSON string to convert. proto_cls: A subclass of message.Message to instantiate and return. validate: A Boolean value indicating if validation should be performed on the resultant Message. Validation takes the form of ensuring that basic checks such as cardinality guarantees, required field adherence, etc. are met. Defaults to True. default_timezone: A string specifying the timezone string to use for time- like FHIR data during parsing. Defaults to 'Z' for UTC. Raises: fhir_errors.InvalidFhirError: In the event that raw_json was not valid FHIR. Returns: An instance of proto_cls with FHIR JSON data from the raw_json representation.
github-repos
def add_string_pairs_from_label_element(xib_file, results, label, special_ui_components_prefix): label_entry_comment = extract_element_internationalized_comment(label) if (label_entry_comment is None): return warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix) if (label.hasAttribute('usesAttributedText') and (label.attributes['usesAttributedText'].value == 'YES')): add_string_pairs_from_attributed_ui_element(results, label, label_entry_comment) else: try: label_entry_key = label.attributes['text'].value except KeyError: try: label_entry_key = label.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: label_entry_key = 'N/A' logging.warn('%s: Missing text entry in %s', xib_file, label.toxml('UTF8')) results.append((label_entry_key, label_entry_comment))
Adds string pairs from a label element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. label (element): The label element from the xib, to extract the string pairs from. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix.
codesearchnet
def validate_tag(self, key, value): if (key == 'owner'): return validate_email(value, self.partial_owner_match) elif (key == self.gdpr_tag): return (value in self.gdpr_tag_values) else: return True
Check whether a tag value is valid Args: key: A tag key value: A tag value Returns: `(True or False)` A boolean indicating whether or not the value is valid
codesearchnet
def read_requirements(req_file): items = list(parse_requirements(req_file, session={})) result = [] for item in items: line_number = item.comes_from.split((req_file + ' (line '))[1][:(- 1)] if item.req: item.req.marker = item.markers result.append((item.req, line_number)) else: result.append((item, line_number)) return result
Reads a requirements file. Args: req_file (str): Filename of requirements file
codesearchnet
def tokenize_to_spacy_doc(self, text: str) -> Doc: if not self.keep_multi_space: text = re.sub(' +', ' ', text) doc = self.nlp(text, disable=['parser']) for a_token in doc: self.custom_token(a_token) return doc
Tokenize the given text, returning a spacy doc. Used for spacy rule extractor Args: text (string): Returns: Doc
juraj-google-style
def avl_split_last(root): if (root is None): raise IndexError('Empty tree has no maximum element') (root, left, right) = avl_release_kids(root) if (right is None): (new_root, last_node) = (left, root) else: (new_right, last_node) = avl_split_last(right) new_root = avl_join(left, new_right, root) return (new_root, last_node)
Removes the maximum element from the tree Returns: tuple: new_root, last_node O(log(n)) = O(height(root))
codesearchnet