code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def represent_as_tuple(string):
keep = (".", "[", "]")
return tuple(str_digit_to_int(c) if c not in keep else c for c in string) | Represent a number-string in the form of a tuple of digits.
"868.0F" -> (8, 6, 8, '.', 0, 15)
Args:
string - Number represented as a string of digits.
Returns:
Number represented as an iterable container of digits
>>> represent_as_tuple('868.0F')
(8, 6, 8, '.', 0, 15) | juraj-google-style |
def _call_for_each_replica(distribution, fn, args, kwargs):
run_concurrently = False
if not context.executing_eagerly():
ops.get_default_graph().switch_to_thread_local()
coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))
shared_variable_store = {}
devices = distrib... | Run `fn` in separate threads, once per replica/worker device.
Args:
distribution: the DistributionStrategy object.
fn: function to run (will be run once per replica, each in its own thread).
args: positional arguments for `fn`
kwargs: keyword arguments for `fn`.
Returns:
Merged return value of `fn` across all replica... | github-repos |
def parse_vlq(self, segment):
values = []
(cur, shift) = (0, 0)
for c in segment:
val = B64[ord(c)]
(val, cont) = ((val & 31), (val >> 5))
cur += (val << shift)
shift += 5
if (not cont):
(cur, sign) = ((cur >> 1), (cur & 1))
if sign:
... | Parse a string of VLQ-encoded data.
Returns:
a list of integers. | codesearchnet |
def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None):
if format_ == FileFormat.txt:
raise ValueError("'txt' format not supported for packages.")
data_ = dict((k, v) for k, v in data.iteritems() if v is not None)
data_ = package_serialise_schema.validate(data_)
skip... | Write package data to `buf`.
Args:
data (dict): Data source - must conform to `package_serialise_schema`.
buf (file-like object): Destination stream.
format_ (`FileFormat`): Format to dump data in.
skip_attributes (list of str): List of attributes to not print. | juraj-google-style |
def _GetLoadConfigTimestamp(self, pefile_object):
if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG')):
return None
timestamp = getattr(pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)
return timestamp | Retrieves the timestamp from the Load Configuration directory.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
int: load configuration timestamps or None if there are none present. | codesearchnet |
def on_connected(self, connection):
log.info('PikaClient: connected to RabbitMQ')
self.connected = True
self.in_channel = self.connection.channel(self.on_channel_open) | AMQP connection callback.
Creates input channel.
Args:
connection: AMQP connection | codesearchnet |
def count(cls, cur, table: str, where_keys: list=None):
if where_keys:
(where_clause, values) = cls._get_where_clause_with_values(where_keys)
query = cls._count_query_where.format(table, where_clause)
(q, t) = (query, values)
else:
query = cls._count_query.format(table)
(... | gives the number of records in the table
Args:
table: a string indicating the name of the table
Returns:
an integer indicating the number of records in the table | codesearchnet |
def encipher_vigenere(plaintext, plain_vocab, key):
ciphertext = []
layers = [
ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))
]
for i, sentence in enumerate(plaintext):
cipher_sentence = []
for j, character in enumerate(sentence):
key_idx = key[j % len(key)]
... | Encrypt plain text with given key.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
key (list of Integer): key to encrypt cipher using Vigenere table.
Returns:
ciphertext (list of Strings): encrypted plain text. | juraj-google-style |
def ValidateFeedStartAndExpirationDates(self, problems, first_date, last_date, first_date_origin, last_date_origin, today):
warning_cutoff = (today + datetime.timedelta(days=60))
if (last_date < warning_cutoff):
problems.ExpirationDate(time.mktime(last_date.timetuple()), last_date_origin)
if (first_... | Validate the start and expiration dates of the feed.
Issue a warning if it only starts in the future, or if
it expires within 60 days.
Args:
problems: The problem reporter object
first_date: A date object representing the first day the feed is active
last_date: A date object representing the last day the feed is activ... | codesearchnet |
def tags(self, value):
if value == self._defaults['tags'] and 'tags' in self._values:
del self._values['tags']
else:
self._values['tags'] = value | The tags property.
Args:
value (hash). the property value. | juraj-google-style |
def console_set_char_background(con: tcod.console.Console, x: int, y: int, col: Tuple[(int, int, int)], flag: int=BKGND_SET) -> None:
lib.TCOD_console_set_char_background(_console(con), x, y, col, flag) | Change the background color of x,y to col using a blend mode.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
flag (int): Blending mode to use, ... | codesearchnet |
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):
a, b, c = lattice.abc
xlo, ylo, zlo = origin
xhi = a + xlo
m = lattice.matrix
xy = np.dot(m[1], m[0] / a)
yhi = np.sqrt(b ** 2 - xy ** 2) + ylo
xz = np.dot(m[2], m[0] / a)
yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo)
zhi = np... | Converts a lattice object to LammpsBox, and calculates the symmetry
operation used.
Args:
lattice (Lattice): Input lattice.
origin: A (3,) array/list of floats setting lower bounds of
simulation box. Default to (0, 0, 0).
Returns:
LammpsBox, SymmOp | juraj-google-style |
def dilated_conv_stack(name, x, mid_channels, output_channels, dilation_rates, activation='relu', dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output = 0.0
for (dil_ind, dil_rate) in enumerate(dilation_rates):
curr_out = conv_stack(('dil_%d' % dil_ind), x, mid_channel... | Dilated convolutional stack.
Features at different rates are computed independently using a 3 layer
convolutional stack and added.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer in the conv
stack.
output_channels: Number of output channels of the last layer.
dila... | codesearchnet |
def id_token_jwt_grant(request, token_uri, assertion):
body = {'assertion': assertion, 'grant_type': _JWT_GRANT_TYPE}
response_data = _token_endpoint_request(request, token_uri, body)
try:
id_token = response_data['id_token']
except KeyError as caught_exc:
new_exc = exceptions.RefreshErr... | Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
requests an OpenID Connect ID Token instead of an access token.
This is a variant on the standard JWT Profile that is currently unique
to Google. This was added for the benefit of authenticating to services
that require ID Tokens instead of access toke... | codesearchnet |
def id_transcripts_by_gene(self, build='37'):
hgnc_id_transcripts = {}
LOG.info("Fetching all id transcripts")
for gene_obj in self.hgnc_collection.find({'build': build}):
hgnc_id = gene_obj['hgnc_id']
id_transcripts = self.get_id_transcripts(hgnc_id=hgnc_id, bui... | Return a dictionary with hgnc_id as keys and a set of id transcripts as value
Args:
build(str)
Returns:
hgnc_id_transcripts(dict) | juraj-google-style |
def _bfs_sort(self, start):
pathstates = {}
queue = []
queue.append([0, start])
pathstates[start.stateid] = 0
while queue:
leaf = queue.pop(0)
node = leaf[1]
pathlen = leaf[0]
... | maintain a map of states distance using BFS
Args:
start (fst state): The initial DFA state
Returns:
list: An ordered list of DFA states
using path distance | juraj-google-style |
def validate_id(tx_body):
tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
try:
proposed_tx_id = tx_body['id']
except KeyError:
raise InvalidHash('No transaction id found!')
tx_body['id'] = None
tx_body_serialized = Transac... | Validate the transaction ID of a transaction
Args:
tx_body (dict): The Transaction to be transformed. | juraj-google-style |
def markdown_to_safe_html(markdown_string):
warning = ''
if isinstance(markdown_string, six.binary_type):
markdown_string_decoded = markdown_string.decode('utf-8')
markdown_string = markdown_string_decoded.replace(u'\x00', u'')
num_null_bytes = len(markdown_string_decoded) - len(markdown... | Convert Markdown to HTML that's safe to splice into the DOM.
Arguments:
markdown_string: A Unicode string or UTF-8--encoded bytestring
containing Markdown source. Markdown tables are supported.
Returns:
A string containing safe HTML. | juraj-google-style |
def _get_example_from_properties(self, spec):
local_spec = deepcopy(spec)
additional_property = False
if 'additionalProperties' in local_spec:
additional_property = True
if 'properties' not in local_spec:
local_spec['pr... | Get example from the properties of an object defined inline.
Args:
prop_spec: property specification you want an example of.
Returns:
An example for the given spec
A boolean, whether we had additionalProperties in the spec, or not | juraj-google-style |
class WhisperProcessor(ProcessorMixin):
feature_extractor_class = 'WhisperFeatureExtractor'
tokenizer_class = 'WhisperTokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_... | Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single
processor.
[`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See
the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more informati... | github-repos |
def add_read(
self,
read_tuple_id,
bases,
qualities,
segments,
):
assert type(bases) is str, "Wrong type of bases: '{}'".format(bases)
assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities)
assert type(segments) is t... | Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed.
Args:
read_tuple_id (int): ID of the read tuple.
bases (str): Sequence of bases.
qualities (str): Sequence of FASTQ qualities.
segments (list of rnftools.rnfformat.segment): List of segments constituting the... | juraj-google-style |
def bullet_base_pose_to_world_pose(self, pose_in_base):
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T... | Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple. | juraj-google-style |
def GetUpdates(self, source, search_base, search_filter, search_scope, since):
if self.conf.get('ad'):
self.attrs.append('whenChanged')
else:
self.attrs.append('modifyTimestamp')
if since is not None:
ts = self.FromTimestampToLdap(since)
if self.conf.get('ad'):
ts... | Get updates from a source.
Args:
source: a data source
search_base: the LDAP base of the tree
search_filter: the LDAP object filter
search_scope: the LDAP scope filter, one of 'base', 'one', or 'sub'.
since: a timestamp to get updates since (None for 'get everything')
Returns:
a tuple containing the map of updates a... | github-repos |
def hgnc_id(self, hgnc_symbol, build='37'):
query = {'hgnc_symbol': hgnc_symbol, 'build': build}
projection = {'hgnc_id': 1, '_id': 0}
res = self.hgnc_collection.find(query, projection)
if (res.count() > 0):
return res[0]['hgnc_id']
else:
return None | Query the genes with a hgnc symbol and return the hgnc id
Args:
hgnc_symbol(str)
build(str)
Returns:
hgnc_id(int) | codesearchnet |
def intrusion_set(self, name, **kwargs):
group_obj = IntrusionSet(name, **kwargs)
return self._group(group_obj) | Add Intrusion Set data to Batch object.
Args:
name (str): The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of IntrusionSet. | juraj-google-style |
def copy_code(source: message.Message, target: message.Message) -> None:
if not fhir_types.is_type_or_profile_of_code(source.DESCRIPTOR):
raise fhir_errors.InvalidFhirError(f'Source: {source.DESCRIPTOR.full_name} is not type or profile of Code.')
if not fhir_types.is_type_or_profile_of_code(target.DESCR... | Adds all fields from source to target.
Args:
source: The FHIR Code instance to copy from.
target: The target FHIR Code instance to copy to. | github-repos |
def invoke_process_element(self, sdf_invoker, output_processor, element, restriction, watermark_estimator_state, *args, **kwargs):
assert isinstance(sdf_invoker, DoFnInvoker)
class CheckpointState(object):
def __init__(self):
self.checkpointed = None
self.residual_restriction =... | Invokes `process()` method of a Splittable `DoFn` for a given element.
Args:
sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.
element: the element to process
Returns:
a `SDFProcessElementInvoker.Result` object. | github-repos |
def join(table1, table2, on=None, how='inner', name=None):
if (how not in ('inner', 'left')):
ItsdbError("Only 'inner' and 'left' join methods are allowed.")
on = _join_pivot(on, table1, table2)
fields = _RelationJoin(table1.fields, table2.fields, on=on)
get_key = (lambda rec: tuple((rec.get(k) ... | Join two tables and return the resulting Table object.
Fields in the resulting table have their names prefixed with their
corresponding table name. For example, when joining `item` and
`parse` tables, the `i-input` field of the `item` table will be
named `item:i-input` in the resulting Table. Pivot fields (those
in *o... | codesearchnet |
def sample(input_placeholder, logits, seed=None, max_length=1024, temperature=1.0):
assert (temperature > 0), 'Temperature must be greater than 0.'
if (not seed):
seed = chr((ord('A') + random.randint(0, 25)))
result = ''
recurrent_runner = pt.train.RecurrentRunner()
recurrent_runner.reset()... | Samples from the LSTM model.
Sampling is done by first running either the seed or an arbitrary character
through the model and then drawing the next character from the probability
distribution definted by `softmax`.
Args:
input_placeholder: A placeholder that expects a scalar feed.
logits: The logits. This works wit... | codesearchnet |
def aside_view_declaration(self, view_name):
if (view_name in self._combined_asides):
return getattr(self, self._combined_asides[view_name])
else:
return None | Find and return a function object if one is an aside_view for the given view_name
Aside methods declare their view provision via @XBlockAside.aside_for(view_name)
This function finds those declarations for a block.
Arguments:
view_name (string): the name of the view requested.
Returns:
either the function or None | codesearchnet |
def get_suffixes(arr):
arr = tuple(arr)
return [arr]
return (arr[i:] for i in range(len(arr))) | Returns all possible suffixes of an array (lazy evaluated)
Args:
arr: input array
Returns:
Array of all possible suffixes (as tuples) | juraj-google-style |
def sql_column_like_drug(self, column_name: str) -> str:
clauses = ['{col} LIKE {fragment}'.format(col=column_name, fragment=sql_string_literal(f)) for f in self.sql_like_fragments]
return '({})'.format(' OR '.join(clauses)) | Returns SQL like
.. code-block:: sql
(column_name LIKE '%drugname1%' OR
column_name LIKE '%drugname2%')
for the drug names that this Drug object knows about.
Args:
column_name: column name, pre-escaped if necessary
Returns:
SQL fragment as above | codesearchnet |
def main():
parser = argparse.ArgumentParser(description='Cherry picking automation.')
parser.add_argument('--version', help='<new_major_ver>.<new_minor_ver>.<new_patch_ver>', default='')
parser.add_argument('--nightly', help='disable the service provisioning step', action='store_true')
args = parser.pa... | This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir | github-repos |
def validate(self, institute, case, user, link, variant, validate_type):
if not validate_type in SANGER_OPTIONS:
LOG.warning("Invalid validation string: %s", validate_type)
LOG.info("Validation options: %s", ', '.join(SANGER_OPTIONS))
return
updated_variant ... | Mark validation status for a variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
validate_type(str): The outcome of validation.
choices=('True positive', 'False positive')
Returns:
upd... | juraj-google-style |
def parse(cls, args):
parsed = {}
try:
(options, args) = cls.optparser.parse_args(args)
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
parsed['label'] =... | Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct | juraj-google-style |
def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_doys(analysis_period.doys_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data | juraj-google-style |
def from_data(cls, data):
obj = cls()
with contextlib.closing(BytesIO(data)) as file_handle:
obj.load_file(file_handle)
return obj | Load an FCS file from a bytes-like object.
Args:
data: buffer containing contents of an FCS file.
Returns:
FCSParser instance with data loaded | codesearchnet |
def restrict_with(self, expr: str, error_tag: str = None,
error_message: str = None) -> None:
def parse(x: str) -> Number:
res = self.parser(x)
if res is None:
raise InvalidArgument(expr)
return res
def simpl(rng: List[N... | Combine the receiver with new intervals.
Args:
expr: "range" or "length" expression.
error_tag: error tag of the new expression.
error_message: error message for the new expression.
Raises:
InvalidArgument: If parsing of `expr` fails. | juraj-google-style |
def from_pandas(cls, df, block_partitions_cls):
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes) | Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns DataManager containing data from the Pandas DataFrame. | juraj-google-style |
def sharded_filename(filename_tensor: tensor_lib.Tensor, shard: int, num_shards: tensor_lib.Tensor) -> tensor_lib.Tensor:
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards) | Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor. | github-repos |
def _contains_nd(nodes, point):
min_vals = np.min(nodes, axis=1)
if (not np.all((min_vals <= point))):
return False
max_vals = np.max(nodes, axis=1)
if (not np.all((point <= max_vals))):
return False
return True | r"""Predicate indicating if a point is within a bounding box.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
point (numpy.ndarray): A 1D NumPy array representing a point
in the same dimension as ``nodes``.
Retur... | codesearchnet |
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
if is_training and keep_prob > 0:
with tf.name_scope(scope, 'Dropout', [inputs]):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs | Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for na... | juraj-google-style |
def AddCustomJsonFieldMapping(message_type, python_name, json_name, package=None):
if (not issubclass(message_type, messages.Message)):
raise exceptions.TypecheckError(('Cannot set JSON field mapping for non-message "%s"' % message_type))
try:
_ = message_type.field_by_name(python_name)
exce... | Add a custom wire encoding for a given message field.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
message_type: (messages.Message) A message type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire... | codesearchnet |
def times_update(self, factor):
if (factor < 0):
raise ValueError('The factor must not be negative.')
elif (factor == 0):
self.clear()
else:
_elements = self._elements
for element in _elements:
_elements[element] *= factor
self._total *= factor | Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a',... | codesearchnet |
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1 | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
This implementation does not add special tokens and this method should be overridden in a subclass.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[i... | github-repos |
def channel_ready_future(channel):
fut = channel._loop.create_future()
def _set_result(state):
if ((not fut.done()) and (state is _grpc.ChannelConnectivity.READY)):
fut.set_result(None)
fut.add_done_callback((lambda f: channel.unsubscribe(_set_result)))
channel.subscribe(_set_result... | Creates a Future that tracks when a Channel is ready.
Cancelling the Future does not affect the channel's state machine.
It merely decouples the Future from channel state machine.
Args:
channel: A Channel object.
Returns:
A Future object that matures when the channel connectivity is
ChannelConnectivity.READY. | codesearchnet |
def transitive_inputs(self, node_name, include_control=True, include_reversed_ref=False, device_name=None):
if not self._debug_graphs:
raise LookupError('Node inputs are not loaded from partition graphs yet.')
device_name = self._infer_device_name(device_name, node_name)
input_lists = [self._debug_g... | Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
include_control: Include control inputs (True by default).
include_reversed_ref: Whether a ref input, say from A to B, is to be also
considered as an input from B to A. The rationale is that ref inputs
generally l... | github-repos |
def handle_range(schema, field, validator, parent_schema):
if (not isinstance(field, fields.Number)):
return schema
if validator.min:
schema['minimum'] = validator.min
schema['exclusiveMinimum'] = True
else:
schema['minimum'] = 0
schema['exclusiveMinimum'] = False
... | Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-proces... | codesearchnet |
def OpenFile(self, windows_path):
path_spec = self._path_resolver.ResolvePath(windows_path)
if (path_spec is None):
return None
return self._file_system.GetFileObjectByPathSpec(path_spec) | Opens the file specificed by the Windows path.
Args:
windows_path (str): Windows path to the file.
Returns:
FileIO: file-like object or None if the file does not exist. | codesearchnet |
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None):
dtype = dtypes.as_dtype(dtype)
with ops.name_scope('random_uniform'):
samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
... | Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum val... | github-repos |
def interpolate_beat_times(self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray):
requires_backends(self, ['scipy'])
beat_times_function = scipy.interpolate.interp1d(np.arange(beat_times.size), beat_times, bounds_error=False, fill_value='extrapolate')
ext_beats = beat_time... | This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is
then used to convert raw audio to log-mel-spectrogram.
Args:
beat_times (`numpy.ndarray`):
beat_times is passed into `scipy.interpolate.interp1d` for processing.
steps_per_beat (`int`):
used as an parameter to ... | github-repos |
def model_to_dot(model, show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=200, subgraph=False, show_layer_activations=False, show_trainable=False, **kwargs):
from keras.src.ops.function import make_node_key
if not model.built:
raise ValueError('This model... | Convert a Keras model to dot format.
Args:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: `"TB"`
cre... | github-repos |
def plot_generated_images(images, fname):
fig = plt.figure(figsize=(4, 4))
canvas = backend_agg.FigureCanvasAgg(fig)
for i, image in enumerate(images):
ax = fig.add_subplot(4, 4, i + 1)
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(image.reshape(IMAGE_SHAPE[:-1]),... | Save a synthetic image as a PNG file.
Args:
images: samples of synthetic images generated by the generative network.
fname: Python `str`, filename to save the plot to. | juraj-google-style |
def compute_output_signature(self, input_signature):
def check_type_return_shape(s):
if not isinstance(s, tensor.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(chec... | Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this... | github-repos |
def filter(self, cls, recursive=False):
source = self.walk_preorder if recursive else self._children
return [
codeobj
for codeobj in source()
if isinstance(codeobj, cls)
] | Retrieves all descendants (including self) that are instances
of a given class.
Args:
cls (class): The class to use as a filter.
Kwargs:
recursive (bool): Whether to descend recursively down the tree. | juraj-google-style |
def opcode_to_name(model, op_code):
op = model.operatorCodes[op_code]
code = max(op.builtinCode, op.deprecatedBuiltinCode)
for name, value in vars(schema_fb.BuiltinOperator).items():
if value == code:
return name
return None | Converts a TFLite op_code to the human readable name.
Args:
model: The input tflite model.
op_code: The op_code to resolve to a readable name.
Returns:
A string containing the human readable op name, or None if not resolvable. | github-repos |
def pyc_load(fp):
magic_1 = U16(fp.read(2), target=MARSHAL_TARGET)
magic_2 = U16(fp.read(2), target=MARSHAL_TARGET)
internals = MAGIC_MAP.get(magic_1)
if (internals is None):
raise ValueError(('Invalid or unknown magic (%d).' % magic_1))
if (magic_2 != 2573):
raise ValueError(('Inval... | Load a .pyc file from a file-like object.
Arguments:
fp(file): The file-like object to read.
Returns:
PycFile: The parsed representation of the .pyc file. | codesearchnet |
def duration(self):
duration = 0.0
if (len(self.events) > 0):
first = datetime.fromtimestamp(self.events[0]['timestamp'])
last = datetime.fromtimestamp(self.events[(- 1)]['timestamp'])
duration = (last - first).total_seconds()
return duration | Calculate how long the stage took.
Returns:
float: (current) duration of the stage | codesearchnet |
def _prepare_socket_file(self, socket_path, default_prefix):
if (socket_path is not None):
if os.path.exists(socket_path):
raise Exception('Socket file {} exists!'.format(socket_path))
socket_dir = os.path.dirname(socket_path)
try_to_create_directory(socket_dir)
return so... | Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare. | codesearchnet |
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None):
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1,... | Tests that gen_nn_ops.conv2d_backprop_filter produces the right output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
st... | github-repos |
def check(cls, status):
assert (cls.trigger is not None), 'Invalid ErrorTrap, trigger not set'
assert (cls.error is not None), 'Invalid ErrorTrap, error not set'
if (status == cls.trigger):
raise cls.error() | Checks if a status enum matches the trigger originally set, and
if so, raises the appropriate error.
Args:
status (int, enum): A protobuf enum response status to check.
Raises:
AssertionError: If trigger or error were not set.
_ApiError: If the statuses don't match. Do not catch. Will be
caught automatically and sent... | codesearchnet |
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary):
if (m_dict['index'] is not None):
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
if (name in results_dictionary):
raise _reuse_fail(name... | Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_typ... | codesearchnet |
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
kwargs.update({'channel': channel, 'ts': ts})
return self.api_call('chat.delete', json=kwargs) | Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456' | codesearchnet |
def __init__(self, dllpath=None):
self._lib = None
self._winlib = None
self._path = None
self._windows = sys.platform.startswith('win')
self._cygwin = sys.platform.startswith('cygwin')
self._temp = None
if self._windows or self._cygwin:
self.... | Initializes an instance of a ``Library``.
Loads the default J-Link DLL if ``dllpath`` is ``None``, otherwise
loads the DLL specified by the given ``dllpath``.
Args:
self (Library): the ``Library`` instance
dllpath (str): the DLL to load into the library
Returns:
``None`` | juraj-google-style |
def _init_from_converter(self, options: QuantizationDebugOptions, converter: TFLiteConverter, calibrated_model: Optional[bytes]=None, float_model: Optional[bytes]=None) -> None:
self.quant_model = convert.mlir_quantize(calibrated_model, disable_per_channel=converter._experimental_disable_per_channel, fully_quantize... | Convert the model and apply options.
Converts the quantized model and initializes a quantized model interpreter
with the quantized model. Returns a float model interpreter if float model
is provided.
Args:
options: a QuantizationDebugOptions object.
converter: an initialized tf.lite.TFLiteConverter.
calibrated_model:... | github-repos |
def writeline(self, line, line_number):
tmp_file = tempfile.TemporaryFile('w+')
if not line.endswith(os.linesep):
line += os.linesep
try:
with open(self.path, 'r') as file_handle:
for count, new_line in enumerate(file_handle):
... | Rewrite a single line in the file.
Args:
line (str): The new text to write to the file.
line_number (int): The line of the file to rewrite. Numbering
starts at 0. | juraj-google-style |
def unregister_peer(self, connection_id):
public_key = self.peer_to_public_key(connection_id)
if public_key:
self._consensus_notifier.notify_peer_disconnected(public_key)
with self._lock:
if connection_id in self._peers:
del self._peers[connectio... | Removes a connection_id from the registry.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket. | juraj-google-style |
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
new_scores = jnp.full(scores.shape, -float('inf'))
apply_p... | [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token. | github-repos |
def forward(self, inference_args, input_tangents):
if self._forward is None:
self._forward, self._forward_graph, self._backward, self._forwardprop_output_indices, self._num_forwardprop_outputs = self._forward_and_backward_functions(inference_args, input_tangents)
return self._forward | Construct or fetch a forward function with side-outputs.
When graph building without a tape active, symbolic gradients rely on
regenerating the backward function for higher-order gradients (to account
for new side outputs of the rewritten forward function call). Thus there is
no fixed backward function for this case. ... | github-repos |
def _build(self, inputs, memory, treat_input_as_matrix=False):
if treat_input_as_matrix:
inputs = basic.BatchFlatten(preserve_dims=2)(inputs)
inputs_reshape = basic.BatchApply(
basic.Linear(self._mem_size), n_dims=2)(inputs)
else:
inputs = basic.BatchFlatten()(inputs)
inpu... | Adds relational memory to the TensorFlow graph.
Args:
inputs: Tensor input.
memory: Memory output from the previous time step.
treat_input_as_matrix: Optional, whether to treat `input` as a sequence
of matrices. Defaulta to False, in which case the input is flattened
into a vector.
Returns:
output: This time step's o... | juraj-google-style |
def minimize_peak_memory(graph, scheduler_alg):
if (scheduler_alg == 'NAIVE'):
return _minimize_peak_memory_naive(graph)
elif (scheduler_alg == 'LIST'):
return _minimize_peak_memory_list(graph)
else:
raise NotImplementedError('{} is not a scheduler algorithm. It should be one of NAIV... | Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule. | codesearchnet |
def scaled_wulff(self, wulffshape, r):
r_ratio = (r / wulffshape.effective_radius)
miller_list = wulffshape.miller_energy_dict.keys()
se_list = np.array(list(wulffshape.miller_energy_dict.values()))
scaled_se = (se_list * r_ratio)
return WulffShape(wulffshape.lattice, miller_list, scaled_se, symprec... | Scales the Wulff shape with an effective radius r. Note that the resulting
Wulff does not neccesarily have the same effective radius as the one
provided. The Wulff shape is scaled by its surface energies where first
the surface energies are scale by the minimum surface energy and then
multiplied by the given effective ... | codesearchnet |
def load_yaml_config(conf_file):
global g_config
with open(conf_file) as fp:
g_config = util.yaml_load(fp)
src_dir = get_path('src_dir', None)
if (src_dir is not None):
sys.path.insert(0, src_dir)
for cmd in get('commands', []):
_import(cmd) | Load a YAML configuration.
This will not update the configuration but replace it entirely.
Args:
conf_file (str):
Path to the YAML config. This function will not check the file name
or extension and will just crash if the given file does not exist or
is not a valid YAML file. | codesearchnet |
def _parse_description(html_chunk):
description_tag = html_chunk.match(
["div", {"class": "kniha_detail_text"}],
"p"
)
if not description_tag:
return None
description = get_first_content(description_tag)
description = description.replace("<br />", "\n")
description... | Parse description of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str/None: Description as string or None if not found. | juraj-google-style |
def _subtoken_ids_to_tokens(self, subtokens):
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(un... | Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings. | juraj-google-style |
def default_filename(ext):
if ext == "py":
raise RuntimeError("asked for a default filename with 'py' extension")
filename = detect_current_filename()
if filename is None:
return temp_filename(ext)
basedir = dirname(filename) or getcwd()
if _no_access(basedir) or _shares_exe... | Generate a default filename with a given extension, attempting to use
the filename of the currently running process, if possible.
If the filename of the current process is not available (or would not be
writable), then a temporary file with the given extension is returned.
Args:
ext (str) : the desired extension for ... | juraj-google-style |
class RealmScorerOutput(ModelOutput):
relevance_score: Optional[torch.FloatTensor] = None
query_score: Optional[torch.FloatTensor] = None
candidate_score: Optional[torch.FloatTensor] = None | Outputs of [`RealmScorer`] models.
Args:
relevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`):
The relevance score of document candidates (before softmax).
query_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`):
Query score derived from the query embedder.... | github-repos |
def get_package_from_string(txt, paths=None):
o = VersionedObject(txt)
return get_package(o.name, o.version, paths=paths) | Get a package given a string.
Args:
txt (str): String such as 'foo', 'bah-1.3'.
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` instance, or None if no package was found. | codesearchnet |
def _replace_sparse_with_values(value, sparse_list):
flat_vals = nest.flatten(value, expand_composites=False)
new_vals = []
for v in flat_vals:
if isinstance(v, sparse_tensor.SparseTensor):
sparse_list.append(v)
new_vals.append(v.values)
else:
new_vals.app... | Replace `SparseTensor`s with their values in `value`
Each `SparseTensor` in `value` is replaced by its `values` tensor, and
collects all `SparseTensor`s in `sparse_list`.
Args:
value: A structure of `Tensor`s and `SparseTensor`s
sparse_list: A list. Output parameter that collects all `SparseTensor`s in
`value`.
Retu... | github-repos |
def _project_TH2(self, hist: Hist) -> Any:
if len(self.projection_axes) != 1:
raise ValueError(len(self.projection_axes), "Invalid number of axes")
projection_func_map = {
TH1AxisType.x_axis.value: hist.ProjectionX,
... | Perform the actual TH2 -> TH1 projection.
This projection can only be to 1D.
Args:
hist (ROOT.TH2): Histogram from which the projections should be performed.
Returns:
ROOT.TH1: The projected histogram. | juraj-google-style |
async def _connect_and_read(self):
while (not self._stopped):
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
self._session = session
(url, data)... | Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or a... | codesearchnet |
def gather(self, indices, name=None):
return self._implementation.gather(indices, name=name) | Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the
`TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Return... | github-repos |
def get_time_of_day_description(self):
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
if ((any(((exp in minute_expression) for exp in self._special_characters)) is False) and (any(... | Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description | codesearchnet |
def drive_enclosures(self):
if (not self.__drive_enclures):
self.__drive_enclures = DriveEnclosures(self.__connection)
return self.__drive_enclures | Gets the Drive Enclosures API client.
Returns:
DriveEnclosures: | codesearchnet |
def _hexdecode(hexstring):
_checkString(hexstring, description='hexstring')
if len(hexstring) % 2 != 0:
raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring))
if sys.version_info[0] > 2:
by = bytes(hexstring, 'latin1')
... | Convert a hex encoded string to a byte string.
For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1).
Args:
hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length.
Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space).
Returns:
A string of half... | juraj-google-style |
def str_to_mac(mac_string):
sp = mac_string.split(':')
mac_string = ''.join(sp)
return binascii.unhexlify(mac_string) | Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form | juraj-google-style |
def indicator_arrays(tc_entity_array):
type_dict = {}
for ea in tc_entity_array:
type_dict.setdefault(ea['type'], []).append(ea['value'])
return type_dict | Convert TCEntityArray to Indicator Type dictionary.
Args:
tc_entity_array (dictionary): The TCEntityArray to convert.
Returns:
(dictionary): Dictionary containing arrays of indicators for each indicator type. | juraj-google-style |
def ray_get_and_free(object_ids):
global _last_free_time
global _to_free
result = ray.get(object_ids)
if type(object_ids) is not list:
object_ids = [object_ids]
_to_free.extend(object_ids)
now = time.time()
if (len(_to_free) > MAX_FREE_QUEUE_SIZE
or now - _la... | Call ray.get and then queue the object ids for deletion.
This function should be used whenever possible in RLlib, to optimize
memory usage. The only exception is when an object_id is shared among
multiple readers.
Args:
object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
Returns:
The result of ray.ge... | juraj-google-style |
def _BuildKeyHierarchy(self, subkeys, values):
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
registry_key._key_path = key_paths.JoinKeyPath([
s... | Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values. | juraj-google-style |
def expect_no_raises(message=None, extras=None):
try:
yield
except Exception as e:
e_record = records.ExceptionRecord(e)
if extras:
e_record.extras = extras
msg = message or 'Got an unexpected exception'
details = '%s: %s' % (msg, e_record.details)
... | Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be incl... | juraj-google-style |
def _compute_attention(self, query, key, value, attention_mask=None, training=None, return_attention_scores=False):
if self._flash_attention and return_attention_scores:
raise ValueError('Returning attention scores is not supported when flash attention is enabled. Please disable flash attention to access at... | Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for
customized attention implementation.
Args:
query: Projected query tensor of shape `(B, T, N, key_dim)`.
key: Projected key te... | github-repos |
def install_dependencies(package: str) -> None:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package]) | Install Python dependencies
Args:
package (string): The package to install | github-repos |
def message_upperbound(self, tree, spins, subtheta):
energy_sources = set()
for v, subtree in tree.items():
assert all(u in spins for u in self._ancestors[v])
def energy_contributions():
yield subtheta.linear[v]
... | Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree. | juraj-google-style |
def intersection(self, other):
if (not hasattr(other, '__iter__')):
other = [other]
bounds = self.bounds
for range in other:
bounds = self._intersection(bounds, range.bounds)
if (not bounds):
return None
range = VersionRange(None)
range.bounds = bounds
return ... | AND together version ranges.
Calculates the intersection of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to AND with.
Returns:
New VersionRange object representing the intersection, or None if
no ranges intersect. | codesearchnet |
def results(self, use_cache=True, dialect=None, billing_tier=None):
return self._materialization.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) | Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta),... | codesearchnet |
def write(self, file_name):
try:
assert file_name[-6:] == '.xhtml'
except (AssertionError, IndexError):
raise ValueError('filename must end with .xhtml')
with open(file_name, 'wb') as f:
f.write(self.content.encode('utf-8')) | Writes the chapter object to an xhtml file.
Args:
file_name (str): The full name of the xhtml file to save to. | juraj-google-style |
def register_write(self, reg_index, value):
res = self._dll.JLINKARM_WriteReg(reg_index, value)
if (res != 0):
raise errors.JLinkException(('Error writing to register %d' % reg_index))
return value | Writes into an ARM register.
Note:
The data is not immediately written, but is cached before being
transferred to the CPU on CPU start.
Args:
self (JLink): the ``JLink`` instance
reg_index (int): the ARM register to write to
value (int): the value to write to the register
Returns:
The value written to the ARM regist... | codesearchnet |
def scatter_div(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')
return self._lazy_read(gen_resource_variable_ops.resource_scatter_div(s... | Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`. | github-repos |
def load_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped', skip_case_id=False, gq_treshold=None, case_id=None, max_window=3000, profile_file=None, hard_threshold=0.95, soft_threshold=0.9):
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
... | Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_tres... | codesearchnet |
def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3):
(N, M, W, Ch) = (n_objectives, n_interp_steps, width, channels)
const_term = sum([lowres_tensor([W, W, Ch], [(W
const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])
example_interps = [sum([lowres_tensor([M, W, W... | A paramaterization for interpolating between each pair of N objectives.
Sometimes you want to interpolate between optimizing a bunch of objectives,
in a paramaterization that encourages images to align.
Args:
n_objectives: number of objectives you want interpolate between
n_interp_steps: number of interpolation steps... | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.