code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def coupling(self, source_y, target_y, weight): v_pyramidal = source_y[1] - source_y[2] return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) * (weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal)))
How to couple the output of one node to the input of another. Args: source_y (array of shape (8,)): state of the source node target_y (array of shape (8,)): state of the target node weight (float): the connection strength Returns: input (array of shape (8,)): value to drive each variable of the target node.
juraj-google-style
def decode_list_oov(self, ids, source_oov_id_to_token): seq = (reversed(ids) if self._reverse else ids) tokens = [] for cur_id in seq: if (cur_id in self._id_to_token): tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[(cur_id - self.vocab_size)]) return tokens
decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens.
codesearchnet
def batch_set_value(tuples): if context.executing_eagerly() or ops.inside_function(): for x, value in tuples: x.assign(numpy_compat.np_asarray(value, dtype=dtype_numpy(x))) else: with get_graph().as_default(): if tuples: assign_ops = [] feed_dict = {} for x, value in tuples: value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x)) tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: placeholder_shape = tensor_shape.TensorShape([None] * value.ndim) assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op assign_ops.append(assign_op) feed_dict[assign_placeholder] = value get_session().run(assign_ops, feed_dict=feed_dict)
Sets the values of many tensor variables at once. Args: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array.
github-repos
def get_point_index(self, point): for i, segment in enumerate(self.segments): idx = segment.getPointIndex(point) if idx != -1: return i, idx return -1, -1
Gets of the closest first point Args: point (:obj:`Point`) Returns: (int, int): Segment id and point index in that segment
juraj-google-style
def add(TargetGroup, NewMember, Config=None, Args=None): Member = (Task(NewMember, (Args or {}), (Config or {})) if isfunction(NewMember) else Group(NewMember, (Config or {}))) ParentMembers = TargetGroup.__ec_member__.Members ParentMembers[Member.Config['name']] = Member alias = Member.Config.get('alias') if alias: ParentMembers[alias] = Member
r"""Adds members to an existing group. Args: TargetGroup (Group): The target group for the addition. NewMember (Group / Task): The member to be added. Config (dict): The config for the member. Args (OrderedDict): ArgConfig for the NewMember, if it's a task (optional).
codesearchnet
def coord_list_mapping(subset, superset, atol=1e-08): c1 = np.array(subset) c2 = np.array(superset) inds = np.where(np.all(np.isclose(c1[(:, None, :)], c2[(None, :, :)], atol=atol), axis=2))[1] result = c2[inds] if (not np.allclose(c1, result, atol=atol)): if (not is_coord_subset(subset, superset)): raise ValueError('subset is not a subset of superset') if (not (result.shape == c1.shape)): raise ValueError('Something wrong with the inputs, likely duplicates in superset') return inds
Gives the index mapping from a subset to a superset. Subset and superset cannot contain duplicate rows Args: subset, superset: List of coords Returns: list of indices such that superset[indices] = subset
codesearchnet
def emergence(network, state, do_blackbox=False, do_coarse_grain=True, time_scales=None): micro_phi = compute.major_complex(network, state).phi max_phi = float('-inf') max_network = None for subsystem in all_macro_systems(network, state, do_blackbox=do_blackbox, do_coarse_grain=do_coarse_grain, time_scales=time_scales): phi = compute.phi(subsystem) if ((phi - max_phi) > constants.EPSILON): max_phi = phi max_network = MacroNetwork(network=network, macro_phi=phi, micro_phi=micro_phi, system=subsystem.micro_node_indices, time_scale=subsystem.time_scale, blackbox=subsystem.blackbox, coarse_grain=subsystem.coarse_grain) return max_network
Check for the emergence of a micro-system into a macro-system. Checks all possible blackboxings and coarse-grainings of a system to find the spatial scale with maximum integrated information. Use the ``do_blackbox`` and ``do_coarse_grain`` args to specifiy whether to use blackboxing, coarse-graining, or both. The default is to just coarse-grain the system. Args: network (Network): The network of the micro-system under investigation. state (tuple[int]): The state of the network. do_blackbox (bool): Set to ``True`` to enable blackboxing. Defaults to ``False``. do_coarse_grain (bool): Set to ``True`` to enable coarse-graining. Defaults to ``True``. time_scales (list[int]): List of all time steps over which to check for emergence. Returns: MacroNetwork: The maximal macro-system generated from the micro-system.
codesearchnet
def irreducible_purviews(cm, direction, mechanism, purviews): def reducible(purview): 'Return ``True`` if purview is trivially reducible.' (_from, to) = direction.order(mechanism, purview) return connectivity.block_reducible(cm, _from, to) return [purview for purview in purviews if (not reducible(purview))]
Return all purviews which are irreducible for the mechanism. Args: cm (np.ndarray): An |N x N| connectivity matrix. direction (Direction): |CAUSE| or |EFFECT|. purviews (list[tuple[int]]): The purviews to check. mechanism (tuple[int]): The mechanism in question. Returns: list[tuple[int]]: All purviews in ``purviews`` which are not reducible over ``mechanism``. Raises: ValueError: If ``direction`` is invalid.
codesearchnet
def _run_internal_graph(self, inputs, training=None, mask=None): inputs = self._flatten_to_reference_inputs(inputs) if mask is None: masks = [None] * len(inputs) else: masks = self._flatten_to_reference_inputs(mask) for input_t, mask in zip(inputs, masks): input_t._keras_mask = mask tensor_dict = {} tensor_usage_count = self._tensor_usage_count for x, y in zip(self.inputs, inputs): y = self._conform_to_reference_input(y, ref_input=x) x_id = str(id(x)) tensor_dict[x_id] = [y] * tensor_usage_count[x_id] nodes_by_depth = self._nodes_by_depth depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = nodes_by_depth[depth] for node in nodes: if node.is_input: continue if any((t_id not in tensor_dict for t_id in node.flat_input_ids)): continue args, kwargs = node.map_arguments(tensor_dict) outputs = node.layer(*args, **kwargs) for x_id, y in zip(node.flat_output_ids, nest.flatten(outputs)): tensor_dict[x_id] = [y] * tensor_usage_count[x_id] output_tensors = [] for x in self.outputs: x_id = str(id(x)) assert x_id in tensor_dict, 'Could not compute output ' + str(x) output_tensors.append(tensor_dict[x_id].pop()) return nest.pack_sequence_as(self._nested_outputs, output_tensors)
Computes output tensors for new inputs. # Note: - Can be run on non-Keras tensors. Args: inputs: Tensor or nested structure of Tensors. training: Boolean learning phase. mask: (Optional) Tensor or nested structure of Tensors. Returns: output_tensors
github-repos
def indexes(self, collection=None): indexes = [] for collection_name in self.collections(): if collection and collection != collection_name: continue for index_name in self.db[collection_name].index_information(): if index_name != '_id_': indexes.append(index_name) return indexes
Return a list with the current indexes Skip the mandatory _id_ indexes Args: collection(str) Returns: indexes(list)
juraj-google-style
def do_transaction(args): rest_client = RestClient(args.url, args.user) if args.subcommand == 'list': transactions = rest_client.list_transactions() keys = ('transaction_id', 'family', 'version', 'size', 'payload') headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys) def parse_txn_row(transaction, decode=True): decoded = b64decode(transaction['payload']) return ( transaction['header_signature'], transaction['header']['family_name'], transaction['header']['family_version'], len(decoded), str(decoded) if decode else transaction['payload']) if args.format == 'default': fmt.print_terminal_table(headers, transactions, parse_txn_row) elif args.format == 'csv': fmt.print_csv(headers, transactions, parse_txn_row) elif args.format == 'json' or args.format == 'yaml': data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))} for b in transactions] if args.format == 'yaml': fmt.print_yaml(data) elif args.format == 'json': fmt.print_json(data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if args.subcommand == 'show': output = rest_client.get_transaction(args.transaction_id) if args.key: if args.key == 'payload': output = b64decode(output['payload']) elif args.key in output: output = output[args.key] elif args.key in output['header']: output = output['header'][args.key] else: raise CliException( 'Key "{}" not found in transaction or header'.format( args.key)) if args.format == 'yaml': fmt.print_yaml(output) elif args.format == 'json': fmt.print_json(output) else: raise AssertionError('Missing handler: {}'.format(args.format))
Runs the transaction list or show command, printing to the console Args: args: The parsed arguments sent to the command at runtime
juraj-google-style
def _should_get_another_batch(self, content): if ('max-keys' in self._options and self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT): return False elements = self._find_elements( content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER])) if elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true': return False next_marker = elements.get(common._T_NEXT_MARKER) if next_marker is None: self._options.pop('marker', None) return False self._options['marker'] = next_marker return True
Whether to issue another GET bucket call. Args: content: response XML. Returns: True if should, also update self._options for the next request. False otherwise.
juraj-google-style
def _get_weight_param_summary(wp): summary_str = '' if wp.HasField('quantization'): nbits = wp.quantization.numberOfBits quant_type = 'linearly' if wp.quantization.HasField('linearQuantization') else 'lookup-table' summary_str += '{}-bit {} quantized'.format(nbits, quant_type) if len(wp.floatValue) > 0: summary_str += '({} floatValues)'.format(len(wp.floatValue)) if len(wp.float16Value) > 0: summary_str += '({} bytes float16Values)'.format(len(wp.float16Value)) if len(wp.rawValue) > 0: summary_str += '({} bytes rawValues)'.format(len(wp.rawValue)) return summary_str
Get a summary of _NeuralNetwork_pb2.WeightParams Args: wp : _NeuralNetwork_pb2.WeightParams - the _NeuralNetwork_pb2.WeightParams message to display Returns: a str summary for wp
juraj-google-style
def get_aggregation_propensity(self, seq, outdir, cutoff_v=5, cutoff_n=5, run_amylmuts=False): seq = ssbio.protein.sequence.utils.cast_to_str(seq) results = self.run_amylpred2(seq=seq, outdir=outdir, run_amylmuts=run_amylmuts) (agg_index, agg_conf) = self.parse_for_consensus_aggregation(N=len(seq), results=results, cutoff_v=cutoff_v, cutoff_n=cutoff_n) return agg_index
Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity. Args: seq (str, Seq, SeqRecord): Amino acid sequence outdir (str): Directory to where output files should be saved cutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue cutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of aggregation-prone region run_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most time consuming and generates a slightly different result every submission. Returns: int: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence
codesearchnet
def decode(token, certs=None, verify=True, audience=None): (header, payload, signed_section, signature) = _unverified_decode(token) if (not verify): return payload if isinstance(certs, collections.Mapping): key_id = header.get('kid') if key_id: if (key_id not in certs): raise ValueError('Certificate for key id {} not found.'.format(key_id)) certs_to_check = [certs[key_id]] else: certs_to_check = certs.values() else: certs_to_check = certs if (not crypt.verify_signature(signed_section, signature, certs_to_check)): raise ValueError('Could not verify token signature.') _verify_iat_and_exp(payload) if (audience is not None): claim_audience = payload.get('aud') if (audience != claim_audience): raise ValueError('Token has wrong audience {}, expected {}'.format(claim_audience, audience)) return payload
Decode and verify a JWT. Args: token (str): The encoded JWT. certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The certificate used to validate the JWT signature. If bytes or string, it must the the public key certificate in PEM format. If a mapping, it must be a mapping of key IDs to public key certificates in PEM format. The mapping must contain the same key ID that's specified in the token's header. verify (bool): Whether to perform signature and claim validation. Verification is done by default. audience (str): The audience claim, 'aud', that this JWT should contain. If None then the JWT's 'aud' parameter is not verified. Returns: Mapping[str, str]: The deserialized JSON payload in the JWT. Raises: ValueError: if any verification checks failed.
codesearchnet
def set_card_standard(self, title, text, smallImageUrl=None, largeImageUrl=None): self.response.card.type = 'Standard' self.response.card.title = title self.response.card.text = text if smallImageUrl: self.response.card.image.smallImageUrl = smallImageUrl if largeImageUrl: self.response.card.image.largeImageUrl = largeImageUrl
Set response card as standard type. title, text, and image cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. text: str. Content of Standard type card. smallImageUrl: str. URL of small image. Cannot exceed 2,000 characters. Recommended pixel size: 720w x 480h. largeImageUrl: str. URL of large image. Cannot exceed 2,000 characters. Recommended pixel size: 1200w x 800h.
codesearchnet
def bfs(graph, start): queue = [] visited = [] queue.append([['', start]]) while queue: path = queue.pop(0) node = path[-1][1] if node.stateid not in visited: visited.append(node.stateid) if node.final != TropicalWeight(float('inf')): return "".join([mnode[0] for mnode in path]) for arc in node.arcs: char = graph.isyms.find(arc.ilabel) next_state = graph[arc.nextstate] if next_state.stateid not in visited: new_path = list(path) new_path.append([char, next_state]) queue.append(new_path)
Finds the shortest string using BFS Args: graph (DFA): The DFA states start (DFA state): The DFA initial state Returns: str: The shortest string
juraj-google-style
def aggregate_kernel_metrics(metrics: list[str], kernel_metrics: list[dict[str, tuple[str, str]]]) -> list[list[str]]: if not kernel_metrics: raise app.UsageError('no metrics found') results: dict[str, tuple[list[float], str]] = {} for vals in kernel_metrics: for name in metrics: if name not in vals: raise app.UsageError(f"metric '{name}' is not found") value, unit = vals[name] if name not in results: results[name] = ([], unit) if results[name][1] != unit: raise app.UsageError(f"unit mismatch for metric '{name}'") results[name][0].append(float(value.replace(',', ''))) kernel_metrics = [] for name, (values, unit) in results.items(): a = aggregate(values, name) if round(a) == a: kernel_metrics.append([name, f'{round(a)}', unit]) else: kernel_metrics.append([name, f'{round(a, 2)}', unit]) return kernel_metrics
Aggregates and returns the metrics for the given kernels. Args: metrics: list of metrics names to print kernel_metrics: dictionary of metrics by kernel Returns: list of rows [name, value, unit] per metric.
github-repos
def gather(values, index, name='segmented_gather'): indices = index.indices if len(values.shape[index.batch_dims:]) < 2: return torch.gather(values, index.batch_dims, indices.view(values.size()[0], -1)).view(indices.size()) else: indices = indices.unsqueeze(-1).expand(values.shape) return torch.gather(values, index.batch_dims, indices)
Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up a value for that index in *values*. Two elements from the same segment always get assigned the same value. Args: values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)): Tensor with segment values. index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)): IndexMap. name (`str`, *optional*, defaults to 'segmented_gather'): Name for the operation. Currently not used Returns: `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values.
github-repos
def GenesisBlock() -> Block: prev_hash = UInt256(data=bytearray(32)) timestamp = int(datetime(2016, 7, 15, 15, 8, 21, tzinfo=pytz.utc).timestamp()) index = 0 consensus_data = 2083236893 next_consensus = Blockchain.GetConsensusAddress(Blockchain.StandbyValidators()) script = Witness(bytearray(0), bytearray(PUSHT)) mt = MinerTransaction() mt.Nonce = 2083236893 output = TransactionOutput(Blockchain.SystemShare().Hash, Blockchain.SystemShare().Amount, Crypto.ToScriptHash(Contract.CreateMultiSigRedeemScript((int((len(Blockchain.StandbyValidators()) / 2)) + 1), Blockchain.StandbyValidators()))) it = IssueTransaction([], [output], [], [script]) return Block(prev_hash, timestamp, index, consensus_data, next_consensus, script, [mt, Blockchain.SystemShare(), Blockchain.SystemCoin(), it], True)
Create the GenesisBlock. Returns: BLock:
codesearchnet
def _prepare_controller(self, controller, template): if template: fn = aiohttp_jinja2.template(template_name=template)(controller) else: fn = self._parse_json_response(controller) return fn
Wraps the controller wether to render a jinja template or to return a json response (if template is None) Args: controller (coroutine): the coroutine to be wrapped template (str): the name of the template or None Returns: coroutine: a wrapped coroutine of the controller
juraj-google-style
def GetUnclaimedCoins(self): unclaimed = [] neo = Blockchain.SystemShare().Hash for coin in self.GetCoins(): if ((coin.Output.AssetId == neo) and ((coin.State & CoinState.Confirmed) > 0) and ((coin.State & CoinState.Spent) > 0) and ((coin.State & CoinState.Claimed) == 0) and ((coin.State & CoinState.Frozen) == 0) and ((coin.State & CoinState.WatchOnly) == 0)): unclaimed.append(coin) return unclaimed
Gets coins in the wallet that have not been 'claimed', or redeemed for their gas value on the blockchain. Returns: list: a list of ``neo.Wallet.Coin`` that have 'claimable' value
codesearchnet
def _build_parser(self): main_parser = argparse.ArgumentParser(description=self.common.help, prefix_chars='-+') self._add_options_to_parser(self._opt_bare, main_parser) main_parser.set_defaults(**self.common.defaults) if (self.bare is not None): main_parser.set_defaults(**self.bare.defaults) subparsers = main_parser.add_subparsers(dest='loam_sub_name') for (cmd_name, meta) in self.subcmds.items(): kwargs = {'prefix_chars': '+-', 'help': meta.help} dummy_parser = subparsers.add_parser(cmd_name, **kwargs) self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser) dummy_parser.set_defaults(**meta.defaults) return main_parser
Build command line argument parser. Returns: :class:`argparse.ArgumentParser`: the command line argument parser. You probably won't need to use it directly. To parse command line arguments and update the :class:`ConfigurationManager` instance accordingly, use the :meth:`parse_args` method.
codesearchnet
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None): if len(a_aln_seq) != len(b_aln_seq): raise ValueError('Sequence lengths not equal - was an alignment run?') if not a_seq_id: a_seq_id = 'a_seq' if not b_seq_id: b_seq_id = 'b_seq' a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq) b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq) a_idx = 1 b_idx = 1 appender = [] for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)): to_append = {} if a == b and a != '-' and b != '-': aa_flag = 'match' elif a != b and a == '-' and b != '-': aa_flag = 'insertion' elif a != b and a != '-' and b == '-': aa_flag = 'deletion' elif a != b and a != '-' and b == 'X': aa_flag = 'unresolved' elif a != b and b != '-' and a == 'X': aa_flag = 'unresolved' elif a != b and a != '-' and b != '-': aa_flag = 'mutation' to_append['id_a'] = a_seq_id to_append['id_b'] = b_seq_id to_append['type'] = aa_flag if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation': to_append['id_a_aa'] = a to_append['id_a_pos'] = int(a_idx) to_append['id_b_aa'] = b to_append['id_b_pos'] = int(b_idx) a_idx += 1 b_idx += 1 if aa_flag == 'deletion': to_append['id_a_aa'] = a to_append['id_a_pos'] = int(a_idx) a_idx += 1 if aa_flag == 'insertion': to_append['id_b_aa'] = b to_append['id_b_pos'] = int(b_idx) b_idx += 1 appender.append(to_append) cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'] alignment_df = pd.DataFrame.from_records(appender, columns=cols) alignment_df = alignment_df.fillna(value=np.nan) return alignment_df
Summarize two alignment strings in a dataframe. Args: a_aln_seq (str): Aligned sequence string b_aln_seq (str): Aligned sequence string a_seq_id (str): Optional ID of a_seq b_seq_id (str): Optional ID of b_aln_seq Returns: DataFrame: a per-residue level annotation of the alignment
juraj-google-style
def from_statements( cls, sts: List[Influence], assign_default_polarities: bool = True ): _dict = {} for s in sts: if assign_default_polarities: for delta in deltas(s): if delta["polarity"] is None: delta["polarity"] = 1 concepts = nameTuple(s) if concepts[0] != concepts[1]: if all( map(exists, (delta["polarity"] for delta in deltas(s))) ): if concepts in _dict: _dict[concepts].append(s) else: _dict[concepts] = [s] edges = [ (*concepts, {"InfluenceStatements": statements}) for concepts, statements in _dict.items() ] return cls(edges)
Construct an AnalysisGraph object from a list of INDRA statements. Unknown polarities are set to positive by default. Args: sts: A list of INDRA Statements Returns: An AnalysisGraph instance constructed from a list of INDRA statements.
juraj-google-style
def sample(self, size=None): self._recompute() if (size is None): n = np.random.randn(len(self._t)) else: n = np.random.randn(len(self._t), size) n = self.solver.dot_L(n) if (size is None): return (self.mean.get_value(self._t) + n[(:, 0)]) return (self.mean.get_value(self._t)[(None, :)] + n.T)
Sample from the prior distribution over datasets Args: size (Optional[int]): The number of samples to draw. Returns: array[n] or array[size, n]: The samples from the prior distribution over datasets.
codesearchnet
def __init__(self, data, limit=None): self._data = data self._limit = limit
Initialise the Action object. Args: data (MultiTaskData): The processed data from the task that should be passed on to successor tasks. limit (list): A list of names of all immediate successor tasks that should be executed.
juraj-google-style
def prepare_loss_functions(loss, output_names): if isinstance(loss, collections.abc.Mapping): generic_utils.check_for_unexpected_keys('loss', loss, output_names) loss_functions = [] for name in output_names: if name not in loss: logging.warning('Output {0} missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to {0}.'.format(name)) loss_functions.append(get_loss_function(loss.get(name, None))) elif isinstance(loss, str): loss_functions = [get_loss_function(loss) for _ in output_names] elif isinstance(loss, collections.abc.Sequence): if len(loss) != len(output_names): raise ValueError('When passing a list as loss, it should have one entry per model outputs. The model has {} outputs, but you passed loss={}'.format(len(output_names), loss)) loss_functions = nest.map_structure(get_loss_function, loss) else: loss_functions = [get_loss_function(loss) for _ in range(len(output_names))] return loss_functions
Converts loss to a list of loss functions. Args: loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. output_names: List of model output names. Returns: A list of loss objective functions. Raises: ValueError: If loss is a dict with keys not in model output names, or if loss is a list with len not equal to model outputs.
github-repos
def _parse_test_option_args(self, argv): parser = argparse.ArgumentParser() parser.add_argument('--test-pipeline-options', type=str, action='store', help='only run tests providing service options') parser.add_argument('--not-use-test-runner-api', action='store_true', default=False, help='whether not to use test-runner-api') known, unused_argv = parser.parse_known_args(argv) test_pipeline_options = known.test_pipeline_options or TestPipeline.pytest_test_pipeline_options if self.is_integration_test and (not test_pipeline_options): raise SkipTest('IT is skipped because --test-pipeline-options is not specified') self.not_use_test_runner_api = known.not_use_test_runner_api return shlex.split(test_pipeline_options) if test_pipeline_options else []
Parse value of command line argument: --test-pipeline-options to get pipeline options. Args: argv: An iterable of command line arguments to be used. If not specified then sys.argv will be used as input for parsing arguments. Returns: An argument list of options that can be parsed by argparser or directly build a pipeline option.
github-repos
def cached_name_scope(name, top_level=True): if not top_level: current_ns = tf.get_default_graph().get_name_scope() if current_ns: name = current_ns + '/' + name ns = _get_cached_ns(name) with tf.name_scope(ns): yield ns
Return a context which either opens and caches a new name scope, or reenter an existing one. Args: top_level(bool): if True, the name scope will always be top-level. It will not be nested under any existing name scope of the caller.
juraj-google-style
def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): return cls(backbone_config=backbone_config, **kwargs)
Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`Mask2FormerConfig`]: An instance of a configuration object
github-repos
def to_str(value): if sys.version_info.major < 3 and isinstance(value, six.string_types): return value return str(value)
Convert the input to a string, unless it is a unicode string in Python 2. Unicode strings are supported as native strings in Python 3, but ``str()`` cannot be invoked on unicode strings in Python 2, so we need to check for that case when converting user-specified values to strings. Args: value: The value to convert to a string. Returns: str or unicode: The string representation of the value or the unicode string itself.
juraj-google-style
def get_idiomatic_name_in_language(cls, name, language): if (language in cls.idiomatic_methods_cache): m = cls.idiomatic_methods_cache[language] if (not m): return name return m(name) (found, method) = load_language_plugins(language, 'get_idiomatic_name') if found: cls.idiomatic_methods_cache[language] = method if method: return method(name) else: return name module = importlib.import_module(('.lang.%s' % language), package='monolithe.generators') if (not hasattr(module, 'get_idiomatic_name')): cls.idiomatic_methods_cache[language] = None return name method = getattr(module, 'get_idiomatic_name') cls.idiomatic_methods_cache[language] = method return method(name)
Get the name for the given language Args: name (str): the name to convert language (str): the language to use Returns: a name in the given language Example: get_idiomatic_name_in_language("EnterpriseNetwork", "python") >>> enterprise_network
codesearchnet
def swo_speed_info(self): info = structs.JLinkSWOSpeedInfo() res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_SPEED_INFO, ctypes.byref(info)) if (res < 0): raise errors.JLinkException(res) return info
Retrieves information about the supported SWO speeds. Args: self (JLink): the ``JLink`` instance Returns: A ``JLinkSWOSpeedInfo`` instance describing the target's supported SWO speeds. Raises: JLinkException: on error
codesearchnet
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: known_args, pipeline_args = parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session model_loader = TFModelHandlerTensor(model_uri=known_args.model_path).with_preprocess_fn(lambda image_name: read_image(image_name, known_args.image_dir)) pipeline = test_pipeline if not test_pipeline: pipeline = beam.Pipeline(options=pipeline_options) image = pipeline | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) predictions = image | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor()) _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True) result = pipeline.run() result.wait_until_finish() return result
Args: argv: Command line arguments defined for this example. save_main_session: Used for internal testing. test_pipeline: Used for internal testing.
github-repos
def _validate_query_parameters(self, query, action_spec): processed_params = [] for param_name, param_value in query.items(): if param_name in action_spec['parameters'].keys(): processed_params.append(param_name) if action_spec['parameters'][param_name]['type'] == 'array': if not isinstance(param_value, list): return False else: for i in param_value: if not self.check_type(i, action_spec['parameters'][param_name]['items']['type']): return False elif not self.check_type(param_value, action_spec['parameters'][param_name]['type']): return False if not all(param in processed_params for param, spec in action_spec['parameters'].items() if spec['in'] == 'query' and 'required' in spec and spec['required']): return False return True
Check the query parameter for the action specification. Args: query: query parameter to check. action_spec: specification of the action. Returns: True if the query is valid.
juraj-google-style
def __init__(self, x: int, *args, y: str, **kwargs) -> float: del x, y, args, kwargs
Constructor. Args: x: Input 1. *args: Variable positional args. y: Input 2. **kwargs: Variable keyword args. Returns: The result.
github-repos
def get_poi(self, **kwargs): params = { 'coordinateX': kwargs.get('longitude'), 'coordinateY': kwargs.get('latitude'), 'tipos': util.ints_to_string(kwargs.get('types')), 'Radius': kwargs.get('radius'), 'cultureInfo': util.language_code(kwargs.get('lang')) } result = self.make_request('geo', 'get_poi', **params) if not util.check_result(result, 'poiList'): return False, 'UNKNOWN ERROR' values = util.response_list(result, 'poiList') return True, [emtype.Poi(**a) for a in values]
Obtain a list of POI in the given radius. Args: latitude (double): Latitude in decimal degrees. longitude (double): Longitude in decimal degrees. types (list[int] | int): POI IDs (or empty list to get all). radius (int): Radius (in meters) of the search. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Poi]), or message string in case of error.
juraj-google-style
def tar_extract(context): logger.debug('start') mode = get_file_mode_for_reading(context) for item in context['tar']['extract']: source = context.get_formatted_string(item['in']) destination = context.get_formatted_string(item['out']) with tarfile.open(source, mode) as extract_me: logger.debug(f"Extracting '{source}' to '{destination}'") extract_me.extractall(destination) logger.info(f"Extracted '{source}' to '{destination}'") logger.debug('end')
Extract all members of tar archive to specified path. Args: context: dictionary-like. context is mandatory. context['tar']['extract'] must exist. It's a dictionary. keys are the path to the tar to extract. values are the destination paths. Example: tar: extract: - in: path/to/my.tar.xs out: /path/extract/here - in: another/tar.xs out: . This will extract path/to/my.tar.xs to /path/extract/here, and also extract another/tar.xs to $PWD.
codesearchnet
def _get_object_from_version(cls, operations, ident): version, objname = ident.split(".") module_ = operations.get_context().script.get_revision(version).module obj = getattr(module_, objname) return obj
Returns a Python object from an Alembic migration module (script). Args: operations: instance of ``alembic.operations.base.Operations`` ident: string of the format ``version.objname`` Returns: the object whose name is ``objname`` within the Alembic migration script identified by ``version``
juraj-google-style
def Trim(self): ms = StreamManager.GetStream() writer = BinaryWriter(ms) self.SerializeUnsigned(writer) writer.WriteByte(1) self.Script.Serialize(writer) writer.WriteHashes([tx.Hash.ToBytes() for tx in self.Transactions]) retVal = ms.ToArray() StreamManager.ReleaseStream(ms) return retVal
Returns a byte array that contains only the block header and transaction hash. Returns: bytes:
codesearchnet
def as_dict_summary(self, print_subelectrodes=True): chg_comp = self.fully_charged_entry.composition dischg_comp = self.fully_discharged_entry.composition ion = self.working_ion d = {'average_voltage': self.get_average_voltage(), 'max_voltage': self.max_voltage, 'min_voltage': self.min_voltage, 'max_delta_volume': self.max_delta_volume, 'max_voltage_step': self.max_voltage_step, 'capacity_grav': self.get_capacity_grav(), 'capacity_vol': self.get_capacity_vol(), 'energy_grav': self.get_specific_energy(), 'energy_vol': self.get_energy_density(), 'working_ion': self._working_ion.symbol, 'nsteps': self.num_steps, 'framework': self._vpairs[0].framework.to_data_dict, 'formula_charge': chg_comp.reduced_formula, 'id_charge': self.fully_charged_entry.entry_id, 'formula_discharge': dischg_comp.reduced_formula, 'id_discharge': self.fully_discharged_entry.entry_id, 'fracA_charge': chg_comp.get_atomic_fraction(ion), 'fracA_discharge': dischg_comp.get_atomic_fraction(ion), 'max_instability': self.get_max_instability(), 'min_instability': self.get_min_instability(), 'material_ids': [itr_ent.entry_id for itr_ent in self._entries], 'stable_material_ids': [itr_ent.entry_id for itr_ent in self.get_stable_entries()], 'unstable_material_ids': [itr_ent.entry_id for itr_ent in self.get_unstable_entries()]} if all([('decomposition_energy' in itr_ent.data) for itr_ent in self._entries]): d.update({'stability_charge': self.fully_charged_entry.data['decomposition_energy'], 'stability_discharge': self.fully_discharged_entry.data['decomposition_energy'], 'stability_data': {itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in self._entries}}) if all([('muO2' in itr_ent.data) for itr_ent in self._entries]): d.update({'muO2_data': {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}}) if print_subelectrodes: f_dict = (lambda c: c.as_dict_summary(print_subelectrodes=False)) d['adj_pairs'] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=True))) d['all_pairs'] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=False))) return d
Generate a summary dict. Args: print_subelectrodes: Also print data on all the possible subelectrodes. Returns: A summary of this electrode"s properties in dict format.
codesearchnet
def target_optimizer_arguments(self): variables = (self.target_network.get_variables() + [variable for name in sorted(self.target_distributions) for variable in self.target_distributions[name].get_variables()]) source_variables = (self.network.get_variables() + [variable for name in sorted(self.distributions) for variable in self.distributions[name].get_variables()]) arguments = dict(time=self.global_timestep, variables=variables, source_variables=source_variables) if (self.global_model is not None): arguments['global_variables'] = (self.global_model.target_network.get_variables() + [variable for name in sorted(self.global_model.target_distributions) for variable in self.global_model.target_distributions[name].get_variables()]) return arguments
Returns the target optimizer arguments including the time, the list of variables to optimize, and various functions which the optimizer might require to perform an update step. Returns: Target optimizer arguments as dict.
codesearchnet
def define_singleton(carrier, name, cls, cls_args={}): instance_name = '__{}'.format(name) setattr(carrier, instance_name, None) def getter(self): instance = getattr(carrier, instance_name) if (instance is None): instance = cls(**cls_args) setattr(carrier, instance_name, instance) return instance setattr(type(carrier), name, property(getter))
Creates a property with the given name, but the cls will created only with the first call Args: carrier: an instance of the class where want to reach the cls instance name (str): the variable name of the cls instance cls (type): the singleton object type cls_args (dict): optional dict for createing cls
codesearchnet
async def from_api_token(cls, token=None, api_cls=SlackBotApi): api = api_cls.from_env() if token is None else api_cls(api_token=token) data = await api.execute_method(cls.API_AUTH_ENDPOINT) return cls(data['user_id'], data['user'], api)
Create a new instance from the API token. Arguments: token (:py:class:`str`, optional): The bot's API token (defaults to ``None``, which means looking in the environment). api_cls (:py:class:`type`, optional): The class to create as the ``api`` argument for API access (defaults to :py:class:`aslack.slack_api.SlackBotApi`). Returns: :py:class:`SlackBot`: The new instance.
juraj-google-style
def inner(x1, x2): if any_symbolic_tensors((x1, x2)): return Inner().symbolic_call(x1, x2) return backend.numpy.inner(x1, x2)
Return the inner product of two tensors. Ordinary inner product of vectors for 1-D tensors (without complex conjugation), in higher dimensions a sum product over the last axes. Multidimensional arrays are treated as vectors by flattening all but their last axes. The resulting dot product is performed over their last axes. Args: x1: First input tensor. x2: Second input tensor. The last dimension of `x1` and `x2` must match. Returns: Output tensor. The shape of the output is determined by broadcasting the shapes of `x1` and `x2` after removing their last axes.
github-repos
def count_params(weights): unique_weights = {id(w): w for w in weights}.values() weight_shapes = [w.shape.as_list() for w in unique_weights] standardized_weight_shapes = [[0 if w_i is None else w_i for w_i in w] for w in weight_shapes] return int(sum((np.prod(p) for p in standardized_weight_shapes)))
Count the total number of scalars composing the weights. Args: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights
github-repos
def dump_ddl(metadata: MetaData, dialect_name: str, fileobj: TextIO = sys.stdout, checkfirst: bool = True) -> None: def dump(querysql, *multiparams, **params): compsql = querysql.compile(dialect=engine.dialect) writeline_nl(fileobj, "{sql};".format(sql=compsql)) writeline_nl(fileobj, sql_comment("Schema (for dialect {}):".format(dialect_name))) engine = create_engine('{dialect}: strategy='mock', executor=dump) metadata.create_all(engine, checkfirst=checkfirst)
Sends schema-creating DDL from the metadata to the dump engine. This makes ``CREATE TABLE`` statements. Args: metadata: SQLAlchemy :class:`MetaData` dialect_name: string name of SQL dialect to generate DDL in fileobj: file-like object to send DDL to checkfirst: if ``True``, use ``CREATE TABLE IF NOT EXISTS`` or equivalent.
juraj-google-style
def choose_palette(stream=sys.stdout, basic_palette=None): result = None pal = basic_palette log.debug('console version: %s', __version__) log.debug('X11_RGB_PATHS: %r', X11_RGB_PATHS) if color_is_forced(): (result, pal) = (detect_palette_support(basic_palette=pal) or 'basic') elif (is_a_tty(stream=stream) and color_is_allowed()): (result, pal) = detect_palette_support(basic_palette=pal) proximity.build_color_tables(pal) log.debug('Basic palette: %r', pal) log.debug('%r', result) return result
Make a best effort to automatically determine whether to enable ANSI sequences, and if so, which color palettes are available. This is the main function of the module—meant to be used unless something more specific is needed. Takes the following factors into account: - Whether output stream is a TTY. - ``TERM``, ``ANSICON`` environment variables - ``CLICOLOR``, ``NO_COLOR`` environment variables Arguments: stream: Which output file to check: stdout, stderr basic_palette: Force the platform-dependent 16 color palette, for testing. List of 16 rgb-int tuples. Returns: None, str: 'basic', 'extended', or 'truecolor'
codesearchnet
def Increment(self, key): with self._lock: if _IsHashable(key): if key in self._d: self._d[key] += 1 else: self._d[key] = 1 else: try: i = self._unhashable_items.index(key) self._unhashable_counts[i] += 1 except ValueError: self._unhashable_items.append(key) self._unhashable_counts.append(1)
Atomically increment a count by 1. Insert the item if not present. Args: key: the key being counted.
github-repos
def sort_prefixes(orig, prefixes='@+'): new = '' for prefix in prefixes: if (prefix in orig): new += prefix return new
Returns a sorted list of prefixes. Args: orig (str): Unsorted list of prefixes. prefixes (str): List of prefixes, from highest-priv to lowest.
codesearchnet
def get_all_models_including_attached_models(model): if (hasattr(model, "_tx_model_repository")): models = list( model._tx_model_repository.all_models.filename_to_model.values()) if model not in models: models.append(model) else: models = [model] return models
get a list of all models stored within a model (including the owning model). Args: model: the owning model Returns: a list of all models
juraj-google-style
def dump(self): return {u'storage_data': [x.asdict() for x in self.storage_data], u'streaming_data': [x.asdict() for x in self.streaming_data]}
Serialize the state of this InMemoryStorageEngine to a dict. Returns: dict: The serialized data.
codesearchnet
def mktemp(self, container: Container) -> str: r = self.__api.post('containers/{}/tempfile'.format(container.uid)) if (r.status_code == 200): return r.json() self.__api.handle_erroneous_response(r)
Generates a temporary file for a given container. Returns: the path to the temporary file inside the given container.
codesearchnet
def lsfiles(root=".", **kwargs): paths = ls(root=root, **kwargs) if isfile(root): return paths return [_path for _path in paths if isfile(path(root, _path))]
Return only files from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of file paths. Raises: OSError: If root directory does not exist.
juraj-google-style
def send_rpc_sync(self, conn_id, address, rpc_id, payload, timeout): done = threading.Event() result = {} def send_rpc_done(conn_id, adapter_id, status, reason, rpc_status, resp_payload): result['success'] = status result['failure_reason'] = reason result['status'] = rpc_status result['payload'] = resp_payload done.set() self.send_rpc_async(conn_id, address, rpc_id, payload, timeout, send_rpc_done) done.wait() return result
Synchronously send an RPC to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection address (int): the address of the tile that we wish to send the RPC to rpc_id (int): the 16-bit id of the RPC we want to call payload (bytearray): the payload of the command timeout (float): the number of seconds to wait for the RPC to execute Returns: dict: A dictionary with four elements 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False 'status': the one byte status code returned for the RPC if success == True else None 'payload': a bytearray with the payload returned by RPC if success == True else None
codesearchnet
def create_fork_relation(self, forked_from_id, **kwargs): path = '/projects/%s/fork/%s' % (self.get_id(), forked_from_id) self.manager.gitlab.http_post(path, **kwargs)
Create a forked from/to relation between existing projects. Args: forked_from_id (int): The ID of the project that was forked from **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the relation could not be created
juraj-google-style
def find(self, name): for i in range(0, len(self)): if self[i].name == name: return i return -1
Get the index of a field in the flattened list given its (fully-qualified) name. Args: name: the fully-qualified name of the field. Returns: The index of the field, if found; else -1.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): filename = parser_mediator.GetFilename() file_size = file_object.get_size() if file_size <= 0: raise errors.UnableToParseFile( 'File size: {0:d} bytes is less equal 0.'.format(file_size)) if file_size > 50000000: raise errors.UnableToParseFile( 'File size: {0:d} bytes is larger than 50 MB.'.format(file_size)) top_level_object = self.GetTopLevel(file_object) if not top_level_object: raise errors.UnableToParseFile( 'Unable to parse: {0:s} skipping.'.format(filename)) matching_plugin = None for plugin in self._plugins: try: plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object) matching_plugin = plugin except errors.WrongPlistPlugin as exception: logger.debug('Wrong plugin: {0:s} for: {1:s}'.format( exception.args[0], exception.args[1])) if not matching_plugin and self._default_plugin: self._default_plugin.UpdateChainAndProcess( parser_mediator, plist_name=filename, top_level=top_level_object)
Parses a plist file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def add_note(self, note): notes = self.cached_json if not note.moderator: note.moderator = self.r.user.me().name try: mod_index = notes['constants']['users'].index(note.moderator) except ValueError: notes['constants']['users'].append(note.moderator) mod_index = notes['constants']['users'].index(note.moderator) try: warn_index = notes['constants']['warnings'].index(note.warning) except ValueError: if note.warning in Note.warnings: notes['constants']['warnings'].append(note.warning) warn_index = notes['constants']['warnings'].index(note.warning) else: raise ValueError('Warning type not valid: ' + note.warning) new_note = { 'n': note.note, 't': note.time, 'm': mod_index, 'l': note.link, 'w': warn_index } try: notes['users'][note.username]['ns'].insert(0, new_note) except KeyError: notes['users'][note.username] = {'ns': [new_note]} return '"create new note on user {}" via puni'.format(note.username)
Add a note to the usernotes wiki page. Arguments: note: the note to be added (Note) Returns the update message for the usernotes wiki Raises: ValueError when the warning type of the note can not be found in the stored list of warnings.
juraj-google-style
def get_status_tree(root_pipeline_id): root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id) root_pipeline_record = db.get(root_pipeline_key) if (root_pipeline_record is None): raise PipelineStatusError(('Could not find pipeline ID "%s"' % root_pipeline_id)) actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore(root_pipeline_record) if (actual_root_key != root_pipeline_key): root_pipeline_key = actual_root_key root_pipeline_id = root_pipeline_key.id_or_name() root_pipeline_record = db.get(root_pipeline_key) if (not root_pipeline_record): raise PipelineStatusError(('Could not find pipeline ID "%s"' % root_pipeline_id)) queries = {} for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord): queries[model] = model.all().filter('root_pipeline =', root_pipeline_key).run(batch_size=1000) found_pipeline_dict = dict(((stage.key(), stage) for stage in queries[_PipelineRecord])) found_slot_dict = dict(((slot.key(), slot) for slot in queries[_SlotRecord])) found_barrier_dict = dict(((barrier.key(), barrier) for barrier in queries[_BarrierRecord])) found_status_dict = dict(((status.key(), status) for status in queries[_StatusRecord])) valid_pipeline_keys = set([root_pipeline_key]) slot_filler_dict = {} expand_stack = [root_pipeline_record] while expand_stack: old_stack = expand_stack expand_stack = [] for pipeline_record in old_stack: for child_pipeline_key in pipeline_record.fanned_out: child_pipeline_record = found_pipeline_dict.get(child_pipeline_key) if (child_pipeline_record is None): raise PipelineStatusError(('Pipeline ID "%s" points to child ID "%s" which does not exist.' % (pipeline_record.key().name(), child_pipeline_key.name()))) expand_stack.append(child_pipeline_record) valid_pipeline_keys.add(child_pipeline_key) child_outputs = child_pipeline_record.params['output_slots'] for output_slot_key in child_outputs.itervalues(): slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key output = {'rootPipelineId': root_pipeline_id, 'slots': {}, 'pipelines': {}} for pipeline_key in found_pipeline_dict.keys(): if (pipeline_key not in valid_pipeline_keys): continue output['pipelines'][pipeline_key.name()] = _get_internal_status(pipeline_key=pipeline_key, pipeline_dict=found_pipeline_dict, slot_dict=found_slot_dict, barrier_dict=found_barrier_dict, status_dict=found_status_dict) for (slot_key, filler_pipeline_key) in slot_filler_dict.iteritems(): output['slots'][str(slot_key)] = _get_internal_slot(slot_key=slot_key, filler_pipeline_key=filler_pipeline_key, slot_dict=found_slot_dict) return output
Gets the full status tree of a pipeline. Args: root_pipeline_id: The pipeline ID to get status for. Returns: Dictionary with the keys: rootPipelineId: The ID of the root pipeline. slots: Mapping of slot IDs to result of from _get_internal_slot. pipelines: Mapping of pipeline IDs to result of _get_internal_status. Raises: PipelineStatusError if any input is bad.
codesearchnet
def splat(f: Callable[(..., A)]) -> Callable[([Iterable], A)]: def splatted(args): return f(*args) return splatted
Convert a function taking multiple arguments into a function taking a single iterable argument. Args: f: Any function Returns: A function that accepts a single iterable argument. Each element of this iterable argument is passed as an argument to ``f``. Example: $ def f(a, b, c): $ return a + b + c $ $ f(1, 2, 3) # 6 $ g = splat(f) $ g([1, 2, 3]) # 6
codesearchnet
def Analyze(self, hashes): logger.debug( 'Opening connection to {0:s}:{1:d}'.format(self._host, self._port)) nsrl_socket = self._GetSocket() if not nsrl_socket: self.SignalAbort() return [] hash_analyses = [] for digest in hashes: response = self._QueryHash(nsrl_socket, digest) if response is None: continue hash_analysis = interface.HashAnalysis(digest, response) hash_analyses.append(hash_analysis) nsrl_socket.close() logger.debug( 'Closed connection to {0:s}:{1:d}'.format(self._host, self._port)) return hash_analyses
Looks up hashes in nsrlsvr. Args: hashes (list[str]): hash values to look up. Returns: list[HashAnalysis]: analysis results, or an empty list on error.
juraj-google-style
def set_hyperparameters(self, hyperparameters): for block_name, block_hyperparams in hyperparameters.items(): self.blocks[block_name].set_hyperparameters(block_hyperparams)
Set new hyperparameter values for some blocks. Args: hyperparameters (dict): A dictionary containing the block names as keys and the new hyperparameters dictionary as values.
juraj-google-style
def _prepare_for_training(self, job_name=None): super(Framework, self)._prepare_for_training(job_name=job_name) if (self.source_dir and (not self.source_dir.lower().startswith('s3: validate_source_dir(self.entry_point, self.source_dir) local_code = get_config_value('local.local_code', self.sagemaker_session.config) if (self.sagemaker_session.local_mode and local_code): if (self.source_dir is None): self.source_dir = os.path.dirname(self.entry_point) self.entry_point = os.path.basename(self.entry_point) code_dir = ('file: script = self.entry_point else: self.uploaded_code = self._stage_user_code_in_s3() code_dir = self.uploaded_code.s3_prefix script = self.uploaded_code.script_name self._hyperparameters[DIR_PARAM_NAME] = code_dir self._hyperparameters[SCRIPT_PARAM_NAME] = script self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
Set hyperparameters needed for training. This method will also validate ``source_dir``. Args: * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
codesearchnet
def main(jlink_serial, device): buf = StringIO.StringIO() jlink = pylink.JLink(log=buf.write, detailed_log=buf.write) jlink.open(serial_no=jlink_serial) jlink.set_tif(pylink.enums.JLinkInterfaces.SWD) jlink.connect(device, verbose=True) sys.stdout.write('ARM Id: %d\n' % jlink.core_id()) sys.stdout.write('CPU Id: %d\n' % jlink.core_cpu()) sys.stdout.write('Core Name: %s\n' % jlink.core_name()) sys.stdout.write('Device Family: %d\n' % jlink.device_family())
Prints the core's information. Args: jlink_serial (str): the J-Link serial number device (str): the target CPU Returns: Always returns ``0``. Raises: JLinkException: on error
juraj-google-style
def pull(self, device_filename, dest_file=None, timeout_ms=None): should_return_data = (dest_file is None) if isinstance(dest_file, six.string_types): dest_file = open(dest_file, 'w') elif (dest_file is None): dest_file = six.StringIO() self.filesync_service.recv(device_filename, dest_file, timeouts.PolledTimeout.from_millis(timeout_ms)) if should_return_data: return dest_file.getvalue()
Pull file from device. Arguments: device_filename: The filename on the device to pull. dest_file: If set, a filename or writable file-like object. timeout_ms: Expected timeout for the pull. Returns: The file data if dest_file is not set, None otherwise.
codesearchnet
def get_bq_tableschema(schema): if isinstance(schema, (bigquery.TableSchema, value_provider.ValueProvider)) or callable(schema) or schema is None: return schema elif isinstance(schema, str): return get_table_schema_from_string(schema) elif isinstance(schema, dict): schema_string = json.dumps(schema) return parse_table_schema_from_json(schema_string) else: raise TypeError('Unexpected schema argument: %s.' % schema)
Convert the table schema to a TableSchema object. Args: schema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema): The schema to be used if the BigQuery table to write has to be created. This can either be a dict or string or in the TableSchema format. Returns: ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema: The schema as a TableSchema object.
github-repos
def __init__(self, scope, parent, id, name, result, definition=True): CodeEntity.__init__(self, scope, parent) self.id = id self.name = name self.result = result self.parameters = [] self.body = CodeBlock(self, self, explicit=True) self.member_of = None self.references = [] self._definition = self if definition else None
Constructor for functions. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. id: An unique identifier for this function. name (str): The name of the function in the program. result (str): The return type of the function in the program.
juraj-google-style
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor: sequence_length = sequence.shape[1] indices = [lag - shift for lag in self.config.lags_sequence] if max(indices) + subsequences_length > sequence_length: raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}') lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1)
Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :]. Args: sequence: Tensor The sequence from which lagged subsequences should be extracted. Shape: (N, T, C). subsequences_length : int Length of the subsequences to be extracted. shift: int Shift the lags by this amount back.
github-repos
def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool=False): target_module_found = False is_regex = False if isinstance(optim_target_modules, str): target_module_found = bool(re.fullmatch(optim_target_modules, key)) is_regex = True if not optim_target_modules == key else False elif key in optim_target_modules: target_module_found = True elif any((target_key in key for target_key in optim_target_modules)): target_module_found = True elif any((bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules)): target_module_found = True is_regex = True if return_is_regex: return (target_module_found, is_regex) return target_module_found
A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules. Args: optim_target_modules (`Union[str, List[str]]`): A list of strings to try to match. Can be also a full string. key (`str`): A key to search any matches in optim_target_modules return_is_regex (`bool`): If set to `True`, the method will return whether the passed `optim_target_modules` is a regex or not. Returns: `bool` : True of match object if key matches any target modules from config, False or None if no match found `bool` : If the matched target module is a regex to silence out the warnings in Trainer for extra modules being found (only if `target_module_found=True` for an array of regex).
github-repos
def pgm(X, prox_f, step_f, accelerated=False, relax=None, e_rel=1e-06, max_iter=1000, traceback=None): stepper = utils.NesterovStepper(accelerated=accelerated) if (relax is not None): assert ((relax > 0) and (relax < 1.5)) if (traceback is not None): traceback.update_history(0, X=X, step_f=step_f) if accelerated: traceback.update_history(0, omega=0) if (relax is not None): traceback.update_history(0, relax=relax) for it in range(max_iter): omega = stepper.omega if (omega > 0): _X = (X + (omega * (X - X_))) else: _X = X X_ = X.copy() X[:] = prox_f(_X, step_f) if (relax is not None): X += ((relax - 1) * (X - X_)) if (traceback is not None): traceback.update_history((it + 1), X=X, step_f=step_f) if accelerated: traceback.update_history((it + 1), omega=omega) if (relax is not None): traceback.update_history((it + 1), relax=relax) converged = (utils.l2sq((X - X_)) <= ((e_rel ** 2) * utils.l2sq(X))) if converged: break logger.info('Completed {0} iterations'.format((it + 1))) if (not converged): logger.warning('Solution did not converge') return (converged, (X - X_))
Proximal Gradient Method Adapted from Combettes 2009, Algorithm 3.4. The accelerated version is Algorithm 3.6 with modifications from Xu & Yin (2015). Args: X: initial X, will be updated prox_f: proxed function f (the forward-backward step) step_f: step size, < 1/L with L being the Lipschitz constant of grad f accelerated: If Nesterov acceleration should be used relax: (over)relaxation parameter, 0 < relax < 1.5 e_rel: relative error of X max_iter: maximum iteration, irrespective of residual error traceback: utils.Traceback to hold variable histories Returns: converged: whether the optimizer has converged within e_rel error: X^it - X^it-1
codesearchnet
def CopyConfig(self): newconf = self.MakeNewConfig() newconf.raw_data = copy.deepcopy(self.raw_data) newconf.files = copy.deepcopy(self.files) newconf.secondary_config_parsers = copy.deepcopy(self.secondary_config_parsers) newconf.writeback = copy.deepcopy(self.writeback) newconf.writeback_data = copy.deepcopy(self.writeback_data) newconf.global_override = copy.deepcopy(self.global_override) newconf.context_descriptions = copy.deepcopy(self.context_descriptions) newconf.constants = copy.deepcopy(self.constants) newconf.initialized = copy.deepcopy(self.initialized) return newconf
Make a complete new copy of the current config. This includes all options as they currently are. If you want a base config with defaults use MakeNewConfig. Returns: A new config object with the same data as self.
codesearchnet
def create_requests( requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool ) -> Union[Request, Set[Request]]: if isinstance(requests, list): return { Request(context=context, convert_camel_case=convert_camel_case, **request) for request in requests } return Request(context=context, convert_camel_case=convert_camel_case, **requests)
Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them.
juraj-google-style
def Serialize(self, writer): writer.WriteUInt32(self.Magic) writer.WriteFixedString(self.Command, 12) writer.WriteUInt32(len(self.Payload)) writer.WriteUInt32(self.Checksum) writer.WriteBytes(self.Payload)
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def seek(self, offset, whence=os.SEEK_SET): if not self._gzip_file_object: raise IOError('Not opened.') if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self.uncompressed_data_size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value less than zero.') self._current_offset = offset
Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed or the file has not been opened. OSError: if the seek failed or the file has not been opened.
juraj-google-style
def removeRouterPrefix(self, prefixEntry): print '%s call removeRouterPrefix' % self.port print prefixEntry prefix = self.__convertIp6PrefixStringToIp6Address(str(prefixEntry)) try: prefixLen = 64 cmd = 'prefix remove %s/%d' % (prefix, prefixLen) print cmd if self.__sendCommand(cmd)[0] == 'Done': return self.__sendCommand('netdataregister')[0] == 'Done' else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger("removeRouterPrefix() Error: " + str(e))
remove the configured prefix on a border router Args: prefixEntry: a on-mesh prefix entry Returns: True: successful to remove the prefix entry from border router False: fail to remove the prefix entry from border router
juraj-google-style
def orient_averaged_fixed(tm): S = np.zeros((2, 2), dtype=complex) Z = np.zeros((4, 4)) ap = np.linspace(0, 360, (tm.n_alpha + 1))[:(- 1)] aw = (1.0 / tm.n_alpha) for alpha in ap: for (beta, w) in zip(tm.beta_p, tm.beta_w): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) S += (w * S_ang) Z += (w * Z_ang) sw = tm.beta_w.sum() S *= (aw / sw) Z *= (aw / sw) return (S, Z)
Compute the T-matrix using variable orientation scatterers. This method uses a fast Gaussian quadrature and is suitable for most use. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance. Returns: The amplitude (S) and phase (Z) matrices.
codesearchnet
def compute_stats(input_handle, stats_path, max_rows=None, for_eval=False, pipeline_args=None, publish_to_bq=None, metrics_dataset=None, metrics_table=None, project=None): namespace = metrics_table pipeline = beam.Pipeline(argv=pipeline_args) metrics_monitor = None if publish_to_bq: metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=namespace, filters=MetricsFilter().with_namespace(namespace)) query = taxi.make_sql(table_name=input_handle, max_rows=max_rows, for_eval=for_eval) raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: Start' >> beam.ParDo(MeasureTime(namespace)) | 'ConvertToTFDVInput' >> beam.Map(lambda x: {key: np.asarray([x[key]]) for key in x if x[key] is not None}) _ = raw_data | 'GenerateStatistics' >> tfdv.GenerateStatistics() | 'Measure time: End' >> beam.ParDo(MeasureTime(namespace)) | 'WriteStatsOutput' >> beam.io.WriteToTFRecord(stats_path, shard_name_template='', coder=beam.coders.ProtoCoder(statistics_pb2.DatasetFeatureStatisticsList)) result = pipeline.run() result.wait_until_finish() if metrics_monitor: metrics_monitor.publish_metrics(result)
Computes statistics on the input data. Args: input_handle: BigQuery table name to process specified as DATASET.TABLE or path to csv file with input data. stats_path: Directory in which stats are materialized. max_rows: Number of rows to query from BigQuery for_eval: Query for eval set rows from BigQuery pipeline_args: additional DataflowRunner or DirectRunner args passed to the beam pipeline.
github-repos
def from_operator(cls, operator): validation_fields = ('is_non_singular', 'is_self_adjoint', 'is_positive_definite', 'is_square') kwargs = _extract_attrs(operator, keys=set(operator._composite_tensor_fields + validation_fields)) non_tensor_params = {} param_specs = {} for k, v in list(kwargs.items()): type_spec_or_v = _extract_type_spec_recursively(v) is_tensor = [isinstance(x, type_spec.TypeSpec) for x in nest.flatten(type_spec_or_v)] if all(is_tensor): param_specs[k] = type_spec_or_v elif not any(is_tensor): non_tensor_params[k] = v else: raise NotImplementedError(f'Field {k} contains a mix of `Tensor` and non-`Tensor` values.') return cls(param_specs=param_specs, non_tensor_params=non_tensor_params, prefer_static_fields=operator._composite_tensor_prefer_static_fields)
Builds a `_LinearOperatorSpec` from a `LinearOperator` instance. Args: operator: An instance of `LinearOperator`. Returns: linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as the `TypeSpec` of `operator`.
github-repos
def traverse(self, index=0): if index < len(self.nodes): for entity in self.nodes[index]: for next_result in self.traverse(index=index+1): if isinstance(entity, list): yield entity + next_result else: yield [entity] + next_result else: yield []
This is used to produce a list of lists where each each item in that list is a diffrent combination of items from the lists within with every combination of such values. Args: index (int) : the index at witch to start the list. Note this is used only in the function as a processing Returns: list : is every combination.
juraj-google-style
def __init__(self, output_mediator): super(SQLite4n6TimeOutputModule, self).__init__(output_mediator) self._connection = None self._count = 0 self._cursor = None self._filename = None
Initializes the output module object. Args: output_mediator (OutputMediator): output mediator. Raises: ValueError: if the file handle is missing.
juraj-google-style
def current(sam=False): try: if sam: user_name = win32api.GetUserNameEx(win32con.NameSamCompatible) else: user_name = win32api.GetUserName() except pywintypes.error as exc: log.error('Failed to get current user') log.error('nbr: %s', exc.winerror) log.error('ctx: %s', exc.funcname) log.error('msg: %s', exc.strerror) raise CommandExecutionError('Failed to get current user', info=exc) if (not user_name): raise CommandExecutionError('Failed to get current user') return user_name
Get the username that salt-minion is running under. If salt-minion is running as a service it should return the Local System account. If salt is running from a command prompt it should return the username that started the command prompt. .. versionadded:: 2015.5.6 Args: sam (bool, optional): False returns just the username without any domain notation. True returns the domain with the username in the SAM format. Ie: ``domain\\username`` Returns: str: Returns username CLI Example: .. code-block:: bash salt '*' user.current
codesearchnet
def openbin(self, path, mode='r', buffering=(- 1), **options): self.check() _path = self.validatepath(path) _mode = Mode(mode) _mode.validate_bin() with self._lock: if _mode.exclusive: if self.exists(_path): raise errors.FileExists(path) else: _mode = Mode(''.join(set(mode.replace('x', 'w')))) elif (_mode.reading and (not _mode.create) and (not self.exists(_path))): raise errors.ResourceNotFound(path) elif self.isdir(_path): raise errors.FileExpected(path) with convert_sshfs_errors('openbin', path): _sftp = self._client.open_sftp() handle = _sftp.open(_path, mode=_mode.to_platform_bin(), bufsize=buffering) handle.set_pipelined(options.get('pipelined', True)) return SSHFile(handle)
Open a binary file-like object. Arguments: path (str): A path on the filesystem. mode (str): Mode to open the file (must be a valid, non-text mode). Since this method only opens binary files, the ``b`` in the mode is implied. buffering (int): the buffering policy (-1 to use default buffering, 0 to disable completely, 1 to enable line based buffering, or any larger positive integer for a custom buffer size). Keyword Arguments: pipelined (bool): Set the transfer in pipelined mode (should improve transfer speed). Defaults to ``True``. Raises: fs.errors.FileExpected: if the path if not a file. fs.errors.FileExists: if the file already exists and *exclusive mode* is specified (``x`` in the mode). fs.errors.ResourceNotFound: if the path does not exist. Returns: io.IOBase: a file handle.
codesearchnet
def bessel_y1(x, name=None): with ops.name_scope(name, 'bessel_y1', [x]): return gen_special_math_ops.bessel_y1(x)
Computes the Bessel y1 function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_y1([0.5, 1., 2., 4.]).numpy() array([-1.47147239, -0.78121282, -0.10703243, 0.39792571], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.y1 @end_compatibility
github-repos
def get_generation_mode(self, assistant_model: Optional['PreTrainedModel']=None) -> GenerationMode: if self.constraints is not None or self.force_words_ids is not None: generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH elif self.num_beams == 1: if self.do_sample is False: if self.top_k is not None and self.top_k > 1 and (self.penalty_alpha is not None) and (self.penalty_alpha > 0): generation_mode = GenerationMode.CONTRASTIVE_SEARCH else: generation_mode = GenerationMode.GREEDY_SEARCH else: generation_mode = GenerationMode.SAMPLE elif self.num_beam_groups > 1: generation_mode = GenerationMode.GROUP_BEAM_SEARCH elif self.do_sample is True: generation_mode = GenerationMode.BEAM_SAMPLE else: generation_mode = GenerationMode.BEAM_SEARCH if assistant_model is not None or self.prompt_lookup_num_tokens is not None or self.assistant_early_exit is not None: if generation_mode in ('greedy_search', 'sample'): generation_mode = GenerationMode.ASSISTED_GENERATION else: logger.warning(f"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate is only supported with Greedy Search and Sample. However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.") if self.dola_layers is not None: if generation_mode in ('greedy_search', 'sample'): generation_mode = GenerationMode.DOLA_GENERATION else: logger.warning(f"You've set `dola_layers`, which triggers DoLa generate. Currently, DoLa generate is only supported with Greedy Search and Sample. However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.") return generation_mode
Returns the generation mode triggered by the [`GenerationConfig`] instance. Arg: assistant_model (`PreTrainedModel`, *optional*): The assistant model to be used for assisted generation. If set, the generation mode will be assisted generation. Returns: `GenerationMode`: The generation mode triggered by the instance.
github-repos
def secure(cls): builtin_mechs = cls._get_builtin_mechanisms() secure_mechs = [mech for (_, mech) in builtin_mechs.items() if ((not mech.insecure) and (mech.priority is not None))] return SASLAuth(secure_mechs)
Uses only authentication mechanisms that are secure for use in non-encrypted sessions. Returns: A new :class:`SASLAuth` object.
codesearchnet
def _ParseTimestamp(self, parser_mediator, row): timestamp = row.get('timestamp', None) if (timestamp is not None): try: timestamp = int(timestamp, 10) except (ValueError, TypeError): parser_mediator.ProduceExtractionWarning('Unable to parse timestamp value: {0!s}'.format(timestamp)) return dfdatetime_posix_time.PosixTime(timestamp=timestamp) try: return self._ConvertToTimestamp(row['date'], row['time']) except ValueError as exception: parser_mediator.ProduceExtractionWarning('Unable to parse time string: "{0:s} {1:s}" with error: {2!s}'.format(repr(row['date']), repr(row['time']), exception))
Provides a timestamp for the given row. If the Trend Micro log comes from a version that provides a POSIX timestamp, use that directly; it provides the advantages of UTC and of second precision. Otherwise fall back onto the local-timezone date and time. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: dfdatetime.interface.DateTimeValue: date and time value.
codesearchnet
def build_results(self, values): raise NotImplementedError('build_results must be implemented by subclasses')
Build results that match the original shape of the fetch. Args: values: List of values returned by run(). The values correspond exactly to the list tensors or ops returned by unique_fetches(). Returns: A struct of the same shape as the original fetch object handled by this fetch mapper. In the returned struct, the original fetches are replaced by their fetched values.
github-repos
def add_send_message(self, connection, send_message): self._send_message[connection] = send_message LOGGER.debug('Added send_message function for connection %s', connection)
Adds a send_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection.
codesearchnet
def to_html(self): if self.items is None: return else: html = '<ol%s>\n' % self.html_attributes() for item in self.items: html += '<li>%s</li>\n' % item.to_html() html += '</ol>' return html
Render a Text MessageElement as html Args: None Returns: Str the html representation of the Text MessageElement Raises: Errors are propagated
juraj-google-style
def add_package(package, ignore_check=False, prevent_pending=False, image=None, restart=False): cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Add-Package', '/PackagePath:{0}'.format(package)] if ignore_check: cmd.append('/IgnoreCheck') if prevent_pending: cmd.append('/PreventPending') if (not restart): cmd.append('/NoRestart') return __salt__['cmd.run_all'](cmd)
Install a package using DISM Args: package (str): The package to install. Can be a .cab file, a .msu file, or a folder .. note:: An `.msu` package is supported only when the target image is offline, either mounted or applied. ignore_check (Optional[bool]): Skip installation of the package if the applicability checks fail prevent_pending (Optional[bool]): Skip the installation of the package if there are pending online actions image (Optional[str]): The path to the root directory of an offline Windows image. If ``None`` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.add_package C:\\Packages\\package.cab
codesearchnet
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(KeyWrappingData, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream): self._wrapping_method = primitives.Enumeration( enum=enums.WrappingMethod, tag=enums.Tags.WRAPPING_METHOD ) self._wrapping_method.read( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the wrapping method attribute." ) if self.is_tag_next( enums.Tags.ENCRYPTION_KEY_INFORMATION, local_stream ): self._encryption_key_information = EncryptionKeyInformation() self._encryption_key_information.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.MAC_SIGNATURE_KEY_INFORMATION, local_stream ): self._mac_signature_key_information = MACSignatureKeyInformation() self._mac_signature_key_information.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.MAC_SIGNATURE, local_stream): self._mac_signature = primitives.ByteString( tag=enums.Tags.MAC_SIGNATURE ) self._mac_signature.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream): self._iv_counter_nonce = primitives.ByteString( tag=enums.Tags.IV_COUNTER_NONCE ) self._iv_counter_nonce.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream): self._encoding_option = primitives.Enumeration( enum=enums.EncodingOption, tag=enums.Tags.ENCODING_OPTION ) self._encoding_option.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
Read the data encoding the KeyWrappingData struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def AddBasicOptions(self, argument_group): version_string = self.GetVersionInformation() argument_group.add_argument('-h', '--help', action='help', help='Show this help message and exit.') argument_group.add_argument('--troubles', dest='show_troubleshooting', action='store_true', default=False, help='Show troubleshooting information.') argument_group.add_argument('-V', '--version', dest='version', action='version', version=version_string, help='Show the version information.')
Adds the basic options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
codesearchnet
def get_dict(self, name, default=None): if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return dict(**self.get(name))
Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided.
juraj-google-style
def make_mixture_prior(latent_size, mixture_components): if (mixture_components == 1): return tfd.MultivariateNormalDiag(loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0) loc = tf.compat.v1.get_variable(name='loc', shape=[mixture_components, latent_size]) raw_scale_diag = tf.compat.v1.get_variable(name='raw_scale_diag', shape=[mixture_components, latent_size]) mixture_logits = tf.compat.v1.get_variable(name='mixture_logits', shape=[mixture_components]) return tfd.MixtureSameFamily(components_distribution=tfd.MultivariateNormalDiag(loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name='prior')
Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence.
codesearchnet
def clip_and_copy_attack_outputs(self, attack_name, is_targeted): if is_targeted: self._targeted_attack_names.add(attack_name) else: self._attack_names.add(attack_name) attack_dir = os.path.join(self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir, attack_name) for fname in os.listdir(attack_dir): if not (fname.endswith('.png') or fname.endswith('.jpg')): continue image_id = fname[:-4] if image_id not in self.dataset_max_clip: continue image_max_clip = self.dataset_max_clip[image_id] image_min_clip = self.dataset_min_clip[image_id] adversarial_image = np.array( Image.open(os.path.join(attack_dir, fname)).convert('RGB')) clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip) output_basename = '{0:08d}'.format(self._output_image_idx) self._output_image_idx += 1 self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id) if is_targeted: self._targeted_attack_image_count += 1 else: self._attack_image_count += 1 Image.fromarray(clipped_adv_image).save( os.path.join(self.all_adv_examples_dir, output_basename + '.png'))
Clips results of attack and copy it to directory with all images. Args: attack_name: name of the attack. is_targeted: if True then attack is targeted, otherwise non-targeted.
juraj-google-style
def FinalizeTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') storage_file_path = self._GetTaskStorageFilePath(task) processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: os.rename(storage_file_path, processed_storage_file_path) except OSError as exception: raise IOError(( 'Unable to rename task storage file: {0:s} with error: ' '{1!s}').format(storage_file_path, exception))
Finalizes a processed task storage. Moves the task storage file from its temporary directory to the processed directory. Args: task (Task): task. Raises: IOError: if the storage type is not supported or if the storage file cannot be renamed. OSError: if the storage type is not supported or if the storage file cannot be renamed.
juraj-google-style
def create_app(self): self.appinfo['accounts'] = self.get_accounts() self.log.debug('Pipeline Config\n%s', pformat(self.pipeline_config)) self.log.debug('App info:\n%s', pformat(self.appinfo)) jsondata = self.retrieve_template() wait_for_task(jsondata) self.log.info('Successfully created %s application', self.appname) return jsondata
Send a POST to spinnaker to create a new application with class variables. Raises: AssertionError: Application creation failed.
codesearchnet
def FromHttpToTimestamp(self, http_ts_string): t = time.strptime(http_ts_string, '%a, %d %b %Y %H:%M:%S GMT') return int(calendar.timegm(t))
Converts HTTP timestamp string to internal nss_cache timestamp. Args: HTTP format timestamp string Returns: number of seconds since epoch
github-repos