code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def _str_dotted_getattr(obj, name):
for part in name.split('.'):
obj = getattr(obj, part)
return str(obj) if obj else None | Expands extends getattr to allow dots in x to indicate nested objects.
Args:
obj (object): an object.
name (str): a name for a field in the object.
Returns:
Any: the value of named attribute.
Raises:
AttributeError: if the named attribute does not exist. | juraj-google-style |
def fit_cosine_function(wind):
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values)
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)
return popt | fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function | juraj-google-style |
def tuple_shapes(self):
if not self.is_tuple():
raise ValueError('tuple_shapes() called on a non-tuple shape')
return self._tuple_shapes | If this is a tuple, returns its sequence of constituent Shape objects.
Returns:
Tuple sub-shapes.
Raises:
ValueError: if this is not a tuple. | github-repos |
def build_kalman_filter_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep):
def kalman_filter_step(state, elems_t):
'Run a single step of Kalman filtering.\n\n Args:\n state: A `KalmanFilterState` object representing the previous\n filter state at time `t-1`.\n elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.\n `x_t` is a `Tensor` with rightmost shape dimensions\n `[observation_size, 1]` representing the vector observed at time `t`,\n and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`\n representing the observation mask at time `t`. Both `x_t` and `mask_t`\n may have batch dimensions, which must be compatible with the batch\n dimensions of `state.predicted_mean` and `state.predictived_cov`\n respectively. If `mask_t` is not provided, it is assumed to be `None`.\n\n Returns:\n new_state: A `KalmanFilterState` object representing the new\n filter state at time `t`.\n '
if isinstance(elems_t, tuple):
(x_t, mask_t) = elems_t
else:
x_t = elems_t
mask_t = None
observation_matrix = get_observation_matrix_for_timestep(state.timestep)
observation_noise = get_observation_noise_for_timestep(state.timestep)
if (mask_t is not None):
x_expected = (_propagate_mean(state.predicted_mean, observation_matrix, observation_noise) * tf.ones_like(x_t))
x_t = tf.where(tf.broadcast_to(mask_t, tf.shape(input=x_expected)), x_expected, tf.broadcast_to(x_t, tf.shape(input=x_expected)))
(filtered_mean, filtered_cov, observation_dist) = linear_gaussian_update(state.predicted_mean, state.predicted_cov, observation_matrix, observation_noise, x_t)
log_marginal_likelihood = observation_dist.log_prob(x_t[(..., 0)])
if (mask_t is not None):
filtered_mean = tf.where(tf.broadcast_to(mask_t, tf.shape(input=filtered_mean)), state.predicted_mean, filtered_mean)
filtered_cov = tf.where(tf.broadcast_to(mask_t, tf.shape(input=filtered_cov)), state.predicted_cov, filtered_cov)
log_marginal_likelihood = tf.where(tf.broadcast_to(mask_t[(..., 0, 0)], tf.shape(input=log_marginal_likelihood)), tf.zeros_like(log_marginal_likelihood), log_marginal_likelihood)
(predicted_mean, predicted_cov) = kalman_transition(filtered_mean, filtered_cov, get_transition_matrix_for_timestep(state.timestep), get_transition_noise_for_timestep(state.timestep))
return KalmanFilterState(filtered_mean, filtered_cov, predicted_mean, predicted_cov, observation_dist.mean()[(..., tf.newaxis)], observation_dist.covariance(), log_marginal_likelihood, (state.timestep + 1))
return kalman_filter_step | Build a callable that performs one step of Kalman filtering.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_filter_step: a callable that updates a KalmanFilterState
from timestep `t-1` to `t`. | codesearchnet |
class DinatDownsampler(nn.Module):
def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.dim = dim
self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.norm = norm_layer(2 * dim)
def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
input_feature = self.norm(input_feature)
return input_feature | Convolutional Downsampling Layer.
Args:
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class. | github-repos |
def get_random_voxels(dataset, n_voxels):
voxels = np.arange(dataset.masker.n_vox_in_vol)
np.random.shuffle(voxels)
selected = voxels[0:n_voxels]
return dataset.get_image_data(voxels=selected) | Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns. | codesearchnet |
def _validate_recurse_directive_types(current_schema_type, field_schema_type, context):
type_hints = context['type_equivalence_hints'].get(field_schema_type)
type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type)
allowed_current_types = {field_schema_type}
if type_hints and isinstance(type_hints, GraphQLUnionType):
allowed_current_types.update(type_hints.types)
if type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType):
allowed_current_types.update(type_hints_inverse.types)
current_scope_is_allowed = current_schema_type in allowed_current_types
is_implemented_interface = (
isinstance(field_schema_type, GraphQLInterfaceType) and
isinstance(current_schema_type, GraphQLObjectType) and
field_schema_type in current_schema_type.interfaces
)
if not any((current_scope_is_allowed, is_implemented_interface)):
raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either '
u'be of the same type as their enclosing scope, a supertype '
u'of the enclosing scope, or be of an interface type that is '
u'implemented by the type of their enclosing scope. '
u'Enclosing scope type: {}, edge type: '
u'{}'.format(current_schema_type, field_schema_type)) | Perform type checks on the enclosing type and the recursed type for a recurse directive.
Args:
current_schema_type: GraphQLType, the schema type at the current location
field_schema_type: GraphQLType, the schema type at the inner scope
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function! | juraj-google-style |
def table(cls, table, columns, index='', keyset=None):
keyset = keyset or KeySet(all_=True)
if not isinstance(keyset, KeySet):
raise ValueError('keyset must be an instance of class google.cloud.spanner.KeySet')
return cls(is_sql=False, is_table=True, read_operation='process_read_batch', kwargs={'table': table, 'columns': columns, 'index': index, 'keyset': keyset}) | A convenient method to construct ReadOperation from table.
Args:
table: name of the table from which to fetch data.
columns: names of columns to be retrieved.
index: (optional) name of index to use, rather than the table's primary
key.
keyset: (optional) `KeySet` keys / ranges identifying rows to be
retrieved. | github-repos |
def _ass_refresh_attrs(self, cached_ass, file_ass):
loaded_ass = yaml_loader.YamlLoader.load_yaml_by_path(file_ass['source'], log_debug=True)
attrs = loaded_ass
yaml_checker.check(file_ass['source'], attrs)
cached_ass['source'] = file_ass['source']
cached_ass['ctime'] = os.path.getctime(file_ass['source'])
cached_ass['attrs'] = {}
cached_ass['snippets'] = {}
for a in ['fullname', 'description', 'icon_path']:
if a in attrs:
cached_ass['attrs'][a] = attrs.get(a)
if 'args' in attrs:
cached_ass['attrs']['args'] = {}
for argname, argparams in attrs.get('args', {}).items():
if 'use' in argparams or 'snippet' in argparams:
snippet_name = argparams.pop('use', None) or argparams.pop('snippet')
snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snippet_name)
cached_ass['attrs']['args'][argname] = snippet.get_arg_by_name(argname)
cached_ass['attrs']['args'][argname].update(argparams)
cached_ass['snippets'][snippet.name] = self._get_snippet_ctime(snippet.name)
else:
cached_ass['attrs']['args'][argname] = argparams | Completely refreshes cached assistant from file.
Args:
cached_ass: an assistant from cache hierarchy
(for format see Cache class docstring)
file_ass: the respective assistant from filesystem hierarchy
(for format see what refresh_role accepts) | juraj-google-style |
def __init__(self, glob, opts = None):
super(GlobComponent, self).__init__()
self._glob = glob
self.regex = re.compile(fnmatch.translate(glob), re.I)
self.opts = opts or PathOpts() | Instantiates a new GlobComponent from a given path glob.
Args:
glob: A string with potential glob elements (e.g. `foo*`).
opts: An optional PathOpts instance. | juraj-google-style |
def propose(self, n=1):
proposed_params = []
for i in range(n):
candidate_params = self._create_candidates()
if candidate_params is None:
return None
predictions = self.predict(candidate_params)
idx = self._acquire(predictions)
params = {}
for i in range(candidate_params[idx, :].shape[0]):
inverse_transformed = self.tunables[i][1].inverse_transform(
candidate_params[idx, i]
)
params[self.tunables[i][0]] = inverse_transformed
proposed_params.append(params)
return params if n == 1 else proposed_params | Use the trained model to propose a new set of parameters.
Args:
n (int, optional): number of candidates to propose
Returns:
Mapping of tunable name to proposed value. If called with n>1 then proposal is a list
of dictionaries. | juraj-google-style |
def Print(x, data, message, **kwargs):
return PrintOperation(x, data, message, **kwargs).outputs[0] | Call tf.Print.
Args:
x: a Tensor.
data: a list of Tensor
message: a string
**kwargs: keyword arguments to tf.Print
Returns:
a Tensor which is identical in value to x | codesearchnet |
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(config, request, global_params=global_params) | Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).
Args:
request: (CloudbuildProjectsBuildsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message. | github-repos |
def enclose_points(points, clip_rect):
point_array = ffi.new('SDL_Point[]', len(points))
for (i, p) in enumerate(points):
point_array[i] = p._ptr
enclosing_rect = Rect()
if lib.SDL_EnclosePoints(point_array, len(points), clip_rect._ptr, enclosing_rect._ptr):
return enclosing_rect
else:
return None | Return the minimal rectangle enclosing the given set of points
Args:
points (List[Point]): The set of points that the new Rect must enclose.
clip_rect (Rect): A clipping Rect.
Returns:
Rect: A new Rect enclosing the given points. | codesearchnet |
def _parse_string_to_list_of_pairs(s, seconds_to_int=False):
r
ret = []
for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]:
if len(p) != 2:
raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s)
if seconds_to_int:
ret.append((p[0], int(p[1])))
else:
ret.append(tuple(p))
return ret | r"""Parses a string into a list of pairs.
In the input string, each pair is separated by a colon, and the delimiters
between pairs are any of " ,.;".
e.g. "rows:32,cols:32"
Args:
s: str to parse.
seconds_to_int: Boolean. If True, then the second elements are returned
as integers; otherwise they are strings.
Returns:
List of tuple pairs.
Raises:
ValueError: Badly formatted string. | juraj-google-style |
def triangle_area(point1, point2, point3):
'Lengths of the three sides of the triangle'
a = point_distance(point1, point2)
b = point_distance(point1, point3)
c = point_distance(point2, point3)
'Where s is the semiperimeter'
s = (((a + b) + c) / 2.0)
"Return the area of the triangle (using Heron's formula)"
return math.sqrt((((s * (s - a)) * (s - b)) * (s - c))) | Uses Heron's formula to find the area of a triangle
based on the coordinates of three points.
Args:
point1: list or tuple, the x y coordinate of point one.
point2: list or tuple, the x y coordinate of point two.
point3: list or tuple, the x y coordinate of point three.
Returns:
The area of a triangle as a floating point number.
Requires:
The math module, point_distance(). | codesearchnet |
def from_datetimes(datetimes):
if isinstance(datetimes, (datetime.date, datetime.datetime)):
return from_year_month_day(datetimes.year, datetimes.month, datetimes.day, validate=False)
years = tf.constant([dt.year for dt in datetimes], dtype=tf.int32)
months = tf.constant([dt.month for dt in datetimes], dtype=tf.int32)
days = tf.constant([dt.day for dt in datetimes], dtype=tf.int32)
return from_year_month_day(years, months, days, validate=False) | Creates DateTensor from a sequence of Python datetime objects.
Args:
datetimes: Sequence of Python datetime objects.
Returns:
DateTensor object.
#### Example
```python
import datetime
dates = [datetime.date(2015, 4, 15), datetime.date(2017, 12, 30)]
date_tensor = tff.datetime.dates_from_datetimes(dates)
``` | github-repos |
class DataParallel(Distribution):
def __init__(self, device_mesh=None, devices=None, auto_shard_dataset=True):
if device_mesh:
self._initialize_with_device_mesh(device_mesh)
elif devices:
self._initialize_mesh_from_devices(devices)
else:
self._initialize_mesh_from_list_devices()
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
self._auto_shard_dataset = auto_shard_dataset
def _initialize_with_device_mesh(self, device_mesh):
if not isinstance(device_mesh, DeviceMesh):
raise ValueError(f'Expect `mesh` to be an instance of `DeviceMesh`. Received: mesh={device_mesh} (of type {type(device_mesh)})')
super().__init__(device_mesh, device_mesh.axis_names[0])
if self.device_mesh.devices.ndim != 1:
warnings.warn('Expect the input mesh to be 1D, but received mesh.devices.ndim=%d. The first axis will be used for data-parallel sharding.', device_mesh.devices.ndim)
def _initialize_mesh_from_devices(self, devices):
devices = np.array(devices)
device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices)
super().__init__(device_mesh, DEFAULT_BATCH_DIM_NAME)
def _initialize_mesh_from_list_devices(self):
devices = np.array(list_devices())
device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices)
super().__init__(device_mesh, DEFAULT_BATCH_DIM_NAME)
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self.batch_dim_name
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
if getattr(variable, '_layout', None) is not None:
return variable._layout
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
return None
def distribute_dataset(self, dataset):
from tensorflow.python.data.experimental.ops import distribute as tf_data_distribute
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(f'Only `tf.data.Dataset` is supported for sharding, got {type(dataset)}')
if not self._is_multi_process or not self._auto_shard_dataset:
return dataset
batch_size = tf_data_distribute.compute_batch_size(dataset)
if batch_size.numpy() < 0:
raise ValueError('The batch size of the input dataset is unknown. Please config the batch size for the input dataset, e.g via `dataset.batch(batch_size)`')
per_worker_batch_size = tf_data_distribute.batch_sizes_for_worker(global_batch_size=batch_size, num_workers=self._num_process, num_replicas_per_worker=1, worker_index=self._process_id)
distributed_dataset = dataset.rebatch(per_worker_batch_size)
distributed_dataset = tf_data_distribute._AutoShardDataset(distributed_dataset, num_workers=self._num_process, index=self._process_id, num_replicas=self._num_process)
return distributed_dataset.prefetch(tf.data.AUTOTUNE) | Distribution for data parallelism.
You can choose to create this instance by either specifying
the `device_mesh` or `devices` arguments (but not both).
The `device_mesh` argument is expected to be a `DeviceMesh` instance,
and is expected to be 1D only. In case that the mesh has multiple axes,
then the first axis will be treated as the data parallel dimension
(and a warning will be raised).
When a list of `devices` are provided, they will be used to construct a
1D mesh.
When both `mesh` and `devices` are absent, then `list_devices()`
will be used to detect any available devices and create a 1D mesh from
them.
Args:
device_mesh: Optional `DeviceMesh` instance.
devices: Optional list of devices.
auto_shard_dataset: Automatically shard the dataset amongst processes.
Defaults to true. | github-repos |
def inverse(self):
inverses = []
for segment in self:
if (len(segment) < 2):
inverses.append([])
else:
inverses.append(segment.inverse())
return inverses | Calculate the inverse geodesic between locations in segments.
Returns:
list of 2-tuple of float: Groups in bearing and distance between
points in segments | codesearchnet |
def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor:
if bias is not None:
x = x + bias
out = torch.nn.functional.dropout(x, p=prob, training=training)
if residual is not None:
out = residual + out
return out | add bias to x, apply dropout and residual connection
Args:
x (Tensor): main path of output
bias (Tensor): None or attn_bias of the last attention layer
residual (Optional[Tensor]): residual value
prob (float): dropout probability
training (bool): whether in training mode or not
Returns:
Tensor: dropout(x + bias) + residual | github-repos |
def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None):
if password is None and bearer_token is None:
logger.error("No authentication information provided; "
"please check your object")
raise KeyError
session = requests.Session()
session.trust_env = False
headers = {'Accept-encoding': 'gzip',
'User-Agent': 'twitterdev-search-tweets-python/' + VERSION}
if bearer_token:
logger.info("using bearer token for authentication")
headers['Authorization'] = "Bearer {}".format(bearer_token)
session.headers = headers
else:
logger.info("using username and password for authentication")
session.auth = username, password
session.headers = headers
if extra_headers_dict:
headers.update(extra_headers_dict)
return session | Creates a Requests Session for use. Accepts a bearer token
for premiums users and will override username and password information if
present.
Args:
username (str): username for the session
password (str): password for the user
bearer_token (str): token for a premium API user. | juraj-google-style |
def add_oxidation_state_by_site(self, oxidation_states):
if len(oxidation_states) != len(self.sites):
raise ValueError("Oxidation states of all sites must be "
"specified.")
for site, ox in zip(self.sites, oxidation_states):
new_sp = {}
for el, occu in site.species.items():
sym = el.symbol
new_sp[Specie(sym, ox)] = occu
site.species = new_sp | Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2] | juraj-google-style |
def __new__(cls, *args) -> 'InvokeExpressionNode':
if not args:
return super().__new__(cls)
_, identifier, parent_node = args
if identifier == 'reference' and isinstance(parent_node.return_type, _fhir_path_data_types.ReferenceStructureDataType):
return super().__new__(InvokeReferenceNode)
return super().__new__(InvokeExpressionNode) | Creates a new InvokeExpressionNode node or one of its subclasses.
Creates either an InvokeExpressionNode or InvokeReferenceNode, a subclass of
InvokeExpressionNode. The InvokeReferenceNode is returned when a field named
'reference' is invoked against a FHIR Reference resource. Database backends
have special behavior for reference nodes. This reference-specific node type
allows them to define visitors to implement their reference-specific logic.
Args:
*args: The args passed to `__init__`.
Returns:
A new InvokeExpressionNode of the appropriate type. | github-repos |
def __init__(self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2):
super().__init__()
bottleneck = (in_features + condition_dim)
self.mlp = nn.Sequential(nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus())
self.p_eps = 0.0001
self.max_temp = config.max_temp
self.min_temp = config.min_temp
self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax) | Per-pixel MLP followed by a Conditional Log Binomial softmax.
Args:
in_features (`int`):
Number of input channels in the main feature.
condition_dim (`int`):
Number of input channels in the condition feature.
n_classes (`int`, *optional*, defaults to 256):
Number of classes.
bottleneck_factor (`int`, *optional*, defaults to 2):
Hidden dim factor. | github-repos |
def extract_sequences(x, sequence_length, sequence_stride):
if any_symbolic_tensors((x,)):
return ExtractSequences(sequence_length, sequence_stride).symbolic_call(x)
return backend.math.extract_sequences(x, sequence_length, sequence_stride) | Expands the dimension of last axis into sequences of `sequence_length`.
Slides a window of size `sequence_length` over the last axis of the input
with a stride of `sequence_stride`, replacing the last axis with
`[num_sequences, sequence_length]` sequences.
If the dimension along the last axis is N, the number of sequences can be
computed by:
`num_sequences = 1 + (N - sequence_length) // sequence_stride`
Args:
x: Input tensor.
sequence_length: An integer representing the sequences length.
sequence_stride: An integer representing the sequences hop size.
Returns:
A tensor of sequences with shape [..., num_sequences, sequence_length].
Example:
>>> x = keras.ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
>>> extract_sequences(x, 3, 2)
array([[1, 2, 3],
[3, 4, 5]]) | github-repos |
def invoke_script(self, script, id=None, endpoint=None):
return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint) | Invokes a script that has been assembled
Args:
script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def serialize_keras_object(instance):
instance = inspect.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
name = object_registration.get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(name, {_LAYER_UNDEFINED_CONFIG_KEY: True})
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and (not isinstance(item, dict)):
serialized_item['__passive_serialization__'] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = object_registration.get_registered_name(instance.__class__)
return serialize_keras_class_and_config(name, serialization_config, instance)
if hasattr(instance, '__name__'):
return object_registration.get_registered_name(instance)
raise ValueError(f"Cannot serialize {instance} because it doesn't implement `get_config()`.") | Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config. | github-repos |
def pack_results(measurements: Sequence[Tuple[(str, np.ndarray)]]) -> bytes:
if (not measurements):
return b''
shapes = [(key, np.shape(data)) for (key, data) in measurements]
if (not all(((len(shape) == 2) for (_, shape) in shapes))):
raise ValueError('Expected 2-D data: shapes={}'.format(shapes))
reps = shapes[0][1][0]
if (not all(((shape[0] == reps) for (_, shape) in shapes))):
raise ValueError('Expected same reps for all keys: shapes={}'.format(shapes))
bits = np.hstack([np.asarray(data, dtype=bool) for (_, data) in measurements])
bits = bits.reshape((- 1))
remainder = (len(bits) % 8)
if remainder:
bits = np.pad(bits, (0, (8 - remainder)), 'constant')
bits = bits.reshape(((- 1), 8))[(:, ::(- 1))]
byte_arr = np.packbits(bits, axis=1).reshape((- 1))
return byte_arr.tobytes() | Pack measurement results into a byte string.
Args:
measurements: A sequence of tuples, one for each measurement, consisting
of a string key and an array of boolean data. The data should be
a 2-D array indexed by (repetition, qubit_index). All data for all
measurements must have the same number of repetitions.
Returns:
Packed bytes, as described in the unpack_results docstring below.
Raises:
ValueError if the measurement data do not have the compatible shapes. | codesearchnet |
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32) | Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type. | juraj-google-style |
def reparameterization_type(self):
return self._reparameterization_type | Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`. | github-repos |
def fillNoneValues(column):
if column.dtype == object:
column.fillna('', inplace=True)
return column | Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values. | juraj-google-style |
def value(self):
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_MonitoringSamplerCellValue(self._cell, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
histogram_proto = summary_pb2.HistogramProto()
histogram_proto.ParseFromString(compat.as_bytes(proto_data))
return histogram_proto | Retrieves the current distribution of samples.
Returns:
A HistogramProto describing the distribution of samples. | github-repos |
def check_onfail_requisites(state_id, state_result, running, highstate):
nret = None
if (state_id and state_result and highstate and isinstance(highstate, dict)):
onfails = search_onfail_requisites(state_id, highstate)
if onfails:
for handler in onfails:
(fstate, mod_, fchunk) = handler
for (rstateid, rstate) in six.iteritems(running):
if ('_|-' in rstateid):
st = salt.state.split_low_tag(rstateid)
else:
id_ = rstate.get('__id__', rstateid)
if (not id_):
raise ValueError('no state id')
st = {'__id__': id_, 'state': mod_}
if ((mod_ == st['state']) and (fstate == st['__id__'])):
ofresult = rstate.get('result', _empty)
if (ofresult in [False, True]):
nret = ofresult
if (ofresult is False):
break
if (nret is None):
nret = False
return nret | When a state fail and is part of a highstate, check
if there is onfail requisites.
When we find onfail requisites, we will consider the state failed
only if at least one of those onfail requisites also failed
Returns:
True: if onfail handlers suceeded
False: if one on those handler failed
None: if the state does not have onfail requisites | codesearchnet |
def simulate_w(self, index: int, half_turns: float, axis_half_turns: float):
args = self._shard_num_args({'index': index, 'half_turns': half_turns, 'axis_half_turns': axis_half_turns})
if (index >= self._num_shard_qubits):
self._pool.map(_clear_scratch, args)
self._pool.map(_w_between_shards, args)
self._pool.map(_copy_scratch_to_state, args)
else:
self._pool.map(_w_within_shard, args)
norm_squared = np.sum(self._pool.map(_norm_squared, args))
args = self._shard_num_args({'norm_squared': norm_squared})
self._pool.map(_renorm, args) | Simulate a single qubit rotation gate about a X + b Y.
The gate simulated is U = exp(-i pi/2 W half_turns)
where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y
Args:
index: The qubit to act on.
half_turns: The amount of the overall rotation, see the formula
above.
axis_half_turns: The angle between the pauli X and Y operators,
see the formula above. | codesearchnet |
def round_accuracy(y_true, y_predicted):
predictions = [round(x) for x in y_predicted]
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)])
return correct / examples_len if examples_len else 0 | Rounds predictions and calculates accuracy in terms of absolute coincidence.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
portion of absolutely coincidental samples | juraj-google-style |
def addRow(self, triggered):
if triggered:
model = self.tableView.model()
model.addDataFrameRows()
self.sender().setChecked(False) | Adds a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the row will be appended to the end. | codesearchnet |
def imfrombytes(content, flag='color'):
img_np = np.frombuffer(content, np.uint8)
flag = imread_flags[flag] if is_str(flag) else flag
img = cv2.imdecode(img_np, flag)
return img | Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Same as :func:`imread`.
Returns:
ndarray: Loaded image array. | juraj-google-style |
def namespace(self, mid: ModuleId) -> YangIdentifier:
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
return mdata.main_module[0] | Return the namespace corresponding to a module or submodule.
Args:
mid: Module identifier.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model. | codesearchnet |
def SpinTimes(spin, bias):
if (not isinstance(spin, int)):
raise TypeError('spin must be an int')
if (spin == (- 1)):
return Times(Real(((- 1), 1)), bias)
elif (spin == 1):
return bias
else:
raise ValueError('expected spins to be -1., or 1.') | Define our own multiplication for bias times spins. This allows for
cleaner log code as well as value checking.
Args:
spin (int): -1 or 1
bias (:class:`pysmt.shortcuts.Symbol`): The bias
Returns:
spins * bias | codesearchnet |
def __init__(self, scale, growth_factor, bucket_count):
super(ExponentialBuckets, self).__init__(pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, bucket_count)) | Creates a new exponential Buckets.
Args:
scale: float
growth_factor: float
bucket_count: integer | github-repos |
def put(self, credentials):
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock() | Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store. | codesearchnet |
def upload_to_s3(context, file_obj):
bucket = context.solid_config['bucket']
key = context.solid_config['key']
context.resources.s3.put_object(Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('kwargs') or {}))
(yield Result(bucket, 'bucket'))
(yield Result(key, 'key')) | Upload a file to s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
(str, str):
The bucket and key to which the file was uploaded. | codesearchnet |
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName('VertexIDs')
vertexIDs.InsertNextValue('a')
vertexIDs.InsertNextValue('b')
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor) | Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1. | codesearchnet |
def find(self, title):
if (title not in self._titles):
raise KeyError(title)
return self._titles[title][0] | Return the first worksheet with the given title.
Args:
title(str): title/name of the worksheet to return
Returns:
WorkSheet: contained worksheet object
Raises:
KeyError: if the spreadsheet has no no worksheet with the given ``title`` | codesearchnet |
def VShadowPathSpecGetStoreIndex(path_spec):
store_index = getattr(path_spec, 'store_index', None)
if store_index is None:
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith('/vss'):
return None
store_index = None
try:
store_index = int(location[4:], 10) - 1
except (TypeError, ValueError):
pass
if store_index is None or store_index < 0:
return None
return store_index | Retrieves the store index from the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
int: store index or None if not available. | juraj-google-style |
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
def _linspace(n_xels_per_dim):
return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)
dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = meshgrid(*dim_ranges, indexing='ij')
return torch.stack(array_index_grid, dim=-1) | Generate an array of position indices for an N-D input array.
Args:
index_dims (`List[int]`):
The shape of the index dimensions of the input array.
output_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):
The min and max values taken by each input index dimension.
Returns:
`torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`. | github-repos |
def by_location(self, location, cc=None):
(header, content) = self._http_request(self.BASE_URL, location=location, cc=cc)
return json.loads(content) | Perform a Yelp Neighborhood API Search based on a location specifier.
Args:
location - textual location specifier of form: "address, city, state or zip, optional country"
cc - ISO 3166-1 alpha-2 country code. (Optional) | codesearchnet |
def __init__(self, in_features: int, lateral_features: int):
super().__init__()
self.proj = nn.Sequential(nn.Conv2d(lateral_features, in_features, kernel_size=1, padding=0, bias=False), nn.GroupNorm(32, in_features))
self.block = MaskFormerFPNConvLayer(in_features, in_features) | A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous
and backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled.
Args:
in_features (`int`):
The number of input features (channels).
lateral_features (`int`):
The number of lateral features (channels). | github-repos |
def __load_partition_entries(self, fd, bs):
fd.seek(self.header.part_lba * bs)
for p in range(0, self.header.num_partitions):
data = fd.read(self.header.part_size)
entry = GptPartitionEntry(data)
if entry.type_guid != uuid.UUID(
'{00000000-0000-0000-0000-000000000000}'
):
self.__partition_entries.append(entry)
else:
break | Loads the list of :class:`GptPartition` partition entries
Args:
bs (uint): Block size of the volume | juraj-google-style |
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[(str, Bounds)]:
scope = self.action_precondition_scope(state)
lower_bounds = self.rddl.domain.action_lower_bound_constraints
upper_bounds = self.rddl.domain.action_upper_bound_constraints
with self.graph.as_default():
with tf.name_scope('action_bound_constraints'):
bounds = {}
for name in self.rddl.domain.action_fluent_ordering:
lower_expr = lower_bounds.get(name)
lower = None
if (lower_expr is not None):
with tf.name_scope('lower_bound'):
lower = self._compile_expression(lower_expr, scope)
upper_expr = upper_bounds.get(name)
upper = None
if (upper_expr is not None):
with tf.name_scope('upper_bound'):
upper = self._compile_expression(upper_expr, scope)
bounds[name] = (lower, upper)
return bounds | Compiles all actions bounds for the given `state`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A mapping from action names to a pair of
:obj:`rddl2tf.fluent.TensorFluent` representing
its lower and upper bounds. | codesearchnet |
def _fromTwosComplement(x, bits=16):
_checkInt(bits, minvalue=0, description='number of bits')
_checkInt(x, description='input')
upperlimit = 2 ** (bits) - 1
lowerlimit = 0
if x > upperlimit or x < lowerlimit:
raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \
.format(x, lowerlimit, upperlimit, bits))
limit = 2 ** (bits - 1) - 1
if x <= limit:
return x
return x - 2 ** bits | Calculate the inverse(?) of a two's complement of an integer.
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the inverse(?) of two's complement of the input.
Example for bits=8:
=== =======
x returns
=== =======
0 0
1 1
127 127
128 -128
129 -127
255 -1
=== ======= | juraj-google-style |
def apply_cut(self, cut):
return MacroSubsystem(
self.network,
self.network_state,
self.micro_node_indices,
cut=cut,
time_scale=self.time_scale,
blackbox=self.blackbox,
coarse_grain=self.coarse_grain) | Return a cut version of this |MacroSubsystem|.
Args:
cut (Cut): The cut to apply to this |MacroSubsystem|.
Returns:
MacroSubsystem: The cut version of this |MacroSubsystem|. | juraj-google-style |
def WakeStuckFlow(session_id):
session_id = rdfvalue.SessionID(session_id)
woken = 0
checked_pending = False
with queue_manager.QueueManager() as manager:
for (request, responses) in manager.FetchRequestsAndResponses(session_id):
if (not checked_pending):
task = manager.Query(request.client_id, task_id=('task:%s' % request.request.task_id))
if task:
return
checked_pending = True
if ((not responses) or (responses[(- 1)].type != rdf_flows.GrrMessage.Type.STATUS)):
manager.QueueClientMessage(request.request)
woken += 1
if (responses and (responses[(- 1)].type == rdf_flows.GrrMessage.Type.STATUS)):
manager.QueueNotification(session_id)
return woken | Wake up stuck flows.
A stuck flow is one which is waiting for the client to do something, but the
client requests have been removed from the client queue. This can happen if
the system is too loaded and the client messages have TTLed out. In this case
we reschedule the client requests for this session.
Args:
session_id: The session for the flow to wake.
Returns:
The total number of client messages re-queued. | codesearchnet |
def _on_connection_finished(self, result):
(success, retval, context) = self._parse_return(result)
conn_id = context['connection_id']
callback = context['callback']
if (success is False):
callback(conn_id, self.id, False, 'Timeout opening connection')
with self.count_lock:
self.connecting_count -= 1
return
handle = retval['handle']
context['disconnect_handler'] = self._on_connection_failed
context['connect_time'] = time.time()
context['state'] = 'preparing'
self._connections[handle] = context
self.probe_services(handle, conn_id, self._probe_services_finished) | Callback when the connection attempt to a BLE device has finished
This function if called when a new connection is successfully completed
Args:
event (BGAPIPacket): Connection event | codesearchnet |
def are_all_matches_terminal(self, predicate: Callable[([ops.Operation], bool)]):
return all(((self.next_moment_operating_on(op.qubits, (i + 1)) is None) for (i, op) in self.findall_operations(predicate))) | Check whether all of the ops that satisfy a predicate are terminal.
Args:
predicate: A predicate on ops.Operations which is being checked.
Returns:
Whether or not all `Operation` s in a circuit that satisfy the
given predicate are terminal. | codesearchnet |
def rfc3339(self):
if (self._nanosecond == 0):
return to_rfc3339(self)
nanos = str(self._nanosecond).rjust(9, '0').rstrip('0')
return '{}.{}Z'.format(self.strftime(_RFC3339_NO_FRACTION), nanos) | Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec. | codesearchnet |
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except:
eln = site.species_string
reldists_neighs = []
for (neigh, dist) in neighs_dists:
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([(dist / get_okeeffe_distance_prediction(eln, el2)), neigh])
siw = []
min_reldist = min([reldist for (reldist, neigh) in reldists_neighs])
for (reldist, s) in reldists_neighs:
if (reldist < ((1.0 + self.tol) * min_reldist)):
w = (min_reldist / reldist)
siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)})
return siw | Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight. | codesearchnet |
def unpack(self, dtensor: Any) -> Sequence[Any]:
if not context.executing_eagerly():
raise RuntimeError('`unpack` must be called eagerly.')
try:
tensors = _pywrap_dtensor_device.Unpack(context.context()._handle, dtensor, self._device_info)
except core._NotOkStatusException as e:
raise core._status_to_exception(e) from None
is_sparse = _pywrap_dtensor_device.IsSparseDTensor(context.context()._handle, dtensor, self._device_info)
if is_sparse:
result = []
for i in range(len(tensors)
result.append(sparse_tensor.SparseTensor(tensors[i], tensors[i + len(tensors)
return result
else:
return tensors | Unpacks a DTensor handle on this DTensor device.
Packing and unpacking are inverse operations:
```
* unpack(pack(tensors)) == tensors
* pack(unpack(dtensor)) == dtensor
```
Refer to `dtensor.unpack` for more information.
Args:
dtensor: The DTensor to unpack.
Returns:
The raw underlying tensor components of the DTensor.
Raises:
RuntimeError: When not called eagerly. | github-repos |
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
if 'shortest_edge' in size and 'longest_edge' in size:
size = get_resize_output_image_size(image, size, input_data_format)
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'.")
return resize(image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) | Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred. | github-repos |
def _get_expiration(self, headers: dict) -> int:
expiration_str = headers.get('expires')
if not expiration_str:
return 0
expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = (expiration - datetime.utcnow()).total_seconds()
return math.ceil(abs(delta)) | Gets the expiration time of the data from the response headers.
Args:
headers: dictionary of headers from ESI
Returns:
value of seconds from now the data expires | juraj-google-style |
def get_url_preview(self, url, ts=None):
params = {'url': url}
if ts:
params['ts'] = ts
return self._send('GET', '', query_params=params, api_path='/_matrix/media/r0/preview_url') | Get preview for URL.
Args:
url (str): URL to get a preview
ts (double): The preferred point in time to return
a preview for. The server may return a newer
version if it does not have the requested
version available. | codesearchnet |
def get_callable(subcommand):
_LOGGER.debug('Creating callable from subcommand "%s".', subcommand.__name__)
if isinstance(subcommand, ModuleType):
_LOGGER.debug('Subcommand is a module.')
assert hasattr(subcommand, 'Command'), 'Module subcommand must have callable "Command" class definition.'
callable_ = subcommand.Command
else:
callable_ = subcommand
if any((isinstance(callable_, t) for t in six.class_types)):
return callable_()
return callable_ | Return a callable object from the subcommand.
Args:
subcommand: A object loaded from an entry point. May be a module,
class, or function.
Returns:
The callable entry point for the subcommand. If the subcommand is a
function, it will be returned unchanged. If the subcommand is a module
or a class, an instance of the command class will be returned.
Raises:
AssertionError: Raised when a module entry point does not have a
callable class named Command. | codesearchnet |
def swo_stop(self):
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.STOP, 0)
if (res < 0):
raise errors.JLinkException(res)
return None | Stops collecting SWO data.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
Raises:
JLinkException: on error | codesearchnet |
def GetPluginObjectByName(cls, plugin_name):
plugin_class = cls._plugin_classes.get(plugin_name, None)
if plugin_class:
return plugin_class()
return None | Retrieves a specific plugin object by its name.
Args:
plugin_name (str): name of the plugin.
Returns:
BasePlugin: a plugin object or None if not available. | juraj-google-style |
def get_characteristic_handle_from_uuid(self, uuid):
ch = self.get_characteristic_from_uuid(uuid)
return None if ch is None else ch.char_handle | Given a characteristic UUID, return its handle.
Args:
uuid (str): a string containing the hex-encoded UUID
Returns:
None if an error occurs, otherwise an integer handle. | juraj-google-style |
def max(self):
if (len(self._data) == 0):
return 600
return next(iter(reversed(sorted(self._data.keys())))) | Return the maximum value in this histogram.
If there are no values in the histogram at all, return 600.
Returns:
int: The maximum value in the histogram. | codesearchnet |
def get_current_round(self, tournament=1):
query =
arguments = {'tournament': tournament}
data = self.raw_query(query, arguments)['data']['rounds'][0]
if data is None:
return None
round_num = data["number"]
return round_num | Get number of the current active round.
Args:
tournament (int): ID of the tournament (optional, defaults to 1)
Returns:
int: number of the current active round
Example:
>>> NumerAPI().get_current_round()
104 | juraj-google-style |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
accounts = match.get('Accounts', {})
for name_account, account in iter(accounts.items()):
first_name = account.get('FirstName', '<FirstName>')
last_name = account.get('LastName', '<LastName>')
general_description = '{0:s} ({1:s} {2:s})'.format(
name_account, first_name, last_name)
event_data = plist_event.PlistTimeEventData()
event_data.key = name_account
event_data.root = '/Accounts'
datetime_value = account.get('CreationDate', None)
if datetime_value:
event_data.desc = 'Configured Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('LastSuccessfulConnect', None)
if datetime_value:
event_data.desc = 'Connected Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = account.get('ValidationDate', None)
if datetime_value:
event_data.desc = 'Last validation Apple account {0:s}'.format(
general_description)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant Apple Account entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | juraj-google-style |
def __get_default_form_data_input(self, elements):
form_data = OrderedDict()
for element in elements:
default_value = self.__get_default_value_from_element(element)
if default_value is False:
continue
form_data[element["name"]] = default_value
return form_data | Get the default form data {key: value} for the given elements.
Args:
elements list(obj): Soup elements.
Returns:
obj: The {key: value} form data | juraj-google-style |
def cumulative_distribution(self, X):
self.check_fit()
U, V = self.split_matrix(X)
if (V == 0).all() or (U == 0).all():
return np.zeros(V.shape[0])
else:
cdfs = [
np.power(
np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1,
-1.0 / self.theta
)
if (U[i] > 0 and V[i] > 0) else 0
for i in range(len(U))
]
return np.array([max(x, 0) for x in cdfs]) | Computes the cumulative distribution function for the copula, :math:`C(u, v)`
Args:
X: `np.ndarray`
Returns:
np.array: cumulative probability | juraj-google-style |
def load_from_tarfile(session, tarfile_path, check_for_duplicates, pkts_per_commit=1000):
tf_stream = tarfile_xml_generator(tarfile_path)
logger.info(('Loading: ' + tarfile_path))
n_parsed = 0
n_loaded = 0
for tarinf in tf_stream:
try:
v = vp.loads(tarinf.xml, check_version=False)
if (v.attrib['version'] != '2.0'):
logger.debug('Packet: {} is not VO-schema version 2.0.'.format(tarinf.name))
n_parsed += 1
except:
logger.exception('Error loading file {}, skipping'.format(tarinf.name))
continue
try:
new_row = Voevent.from_etree(v)
if check_for_duplicates:
if ivorn_present(session, new_row.ivorn):
logger.debug('Ignoring duplicate ivorn: {} in file {}'.format(new_row.ivorn, tarinf.name))
continue
session.add(new_row)
n_loaded += 1
except:
logger.exception('Error converting file {} to database row, skipping'.format(tarinf.name))
continue
if ((n_loaded % pkts_per_commit) == 0):
session.commit()
session.commit()
logger.info('Successfully parsed {} packets, of which loaded {}.'.format(n_parsed, n_loaded))
return (n_parsed, n_loaded) | Iterate through xml files in a tarball and attempt to load into database.
.. warning::
Very slow with duplicate checking enabled.
Returns:
tuple: (n_parsed, n_loaded) - Total number of packets parsed from
tarbar, and number successfully loaded. | codesearchnet |
def fit_transform(self, *args, **kwargs):
self.fit(*args, **kwargs)
return self.transform(*args, **kwargs) | Performs fit followed by transform.
This method simply combines fit and transform.
Args:
args: positional arguments (can be anything)
kwargs: keyword arguments (can be anything)
Returns:
dict: output | juraj-google-style |
def call_rpc_external(self, address, rpc_id, arg_payload, timeout=10.0):
self.verify_calling_thread(False, 'call_rpc_external is for use **outside** of the event loop')
response = CrossThreadResponse()
self._loop.call_soon_threadsafe(self._rpc_queue.put_rpc, address, rpc_id, arg_payload, response)
try:
return response.wait(timeout)
except RPCRuntimeError as err:
return err.binary_error | Call an RPC from outside of the event loop and block until it finishes.
This is the main method by which a caller outside of the EmulationLoop
can inject an RPC into the EmulationLoop and wait for it to complete.
This method is synchronous so it blocks until the RPC completes or the
timeout expires.
Args:
address (int): The address of the mock tile this RPC is for
rpc_id (int): The number of the RPC
payload (bytes): A byte string of payload parameters up to 20 bytes
timeout (float): The maximum time to wait for the RPC to finish.
Returns:
bytes: The response payload from the RPC | codesearchnet |
def GenApiConfig(service_class_names, config_string_generator=None, hostname=None, application_path=None, **additional_kwargs):
api_service_map = collections.OrderedDict()
resolved_services = []
for service_class_name in service_class_names:
(module_name, base_service_class_name) = service_class_name.rsplit('.', 1)
module = __import__(module_name, fromlist=base_service_class_name)
service = getattr(module, base_service_class_name)
if hasattr(service, 'get_api_classes'):
resolved_services.extend(service.get_api_classes())
elif ((not isinstance(service, type)) or (not issubclass(service, remote.Service))):
raise TypeError(('%s is not a ProtoRPC service' % service_class_name))
else:
resolved_services.append(service)
for resolved_service in resolved_services:
services = api_service_map.setdefault((resolved_service.api_info.name, resolved_service.api_info.api_version), [])
services.append(resolved_service)
app_yaml_hostname = _GetAppYamlHostname(application_path)
service_map = collections.OrderedDict()
config_string_generator = (config_string_generator or api_config.ApiConfigGenerator())
for (api_info, services) in api_service_map.iteritems():
assert services, 'An API must have at least one ProtoRPC service'
hostname = (services[0].api_info.hostname or hostname or app_yaml_hostname)
service_map[('%s-%s' % api_info)] = config_string_generator.pretty_print_config_to_json(services, hostname=hostname, **additional_kwargs)
return service_map | Write an API configuration for endpoints annotated ProtoRPC services.
Args:
service_class_names: A list of fully qualified ProtoRPC service classes.
config_string_generator: A generator object that produces API config strings
using its pretty_print_config_to_json method.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback.
application_path: A string with the path to the AppEngine application.
Raises:
TypeError: If any service classes don't inherit from remote.Service.
messages.DefinitionNotFoundError: If a service can't be found.
Returns:
A map from service names to a string containing the API configuration of the
service in JSON format. | codesearchnet |
def quantize(self, mode, **kwargs):
from keras.src.dtype_policies import QUANTIZATION_MODES
type_check = kwargs.pop('type_check', True)
if kwargs:
raise ValueError(f'Unrecognized keyword arguments passed to {self.__class__.__name__}: {kwargs}')
if mode not in QUANTIZATION_MODES:
raise ValueError(f'Invalid quantization mode. Expected one of {QUANTIZATION_MODES}. Received: mode={mode}')
mode_changed = False
for layer in self._flatten_layers():
list_of_sublayers = list(layer._flatten_layers())
if len(list_of_sublayers) == 1:
try:
layer.quantize(mode, type_check=type_check)
mode_changed = True
except NotImplementedError as e:
warnings.warn(str(e))
if mode_changed:
self.train_function = None
self.test_function = None
self.predict_function = None | Quantize the weights of the model.
Note that the model must be built first before calling this method.
`quantize` will recursively call `quantize(mode)` in all layers and
will be skipped if the layer doesn't implement the function.
Args:
mode: The mode of the quantization. Only 'int8' is supported at this
time. | github-repos |
def __init__(self, data=None, top=None):
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
if top is None:
top = data[0][0]
self.top = top | Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ]) | juraj-google-style |
def __init__(self, fields: List[Field], name: Optional[str]=None, base_schema_list: Optional[List['Schema']]=None, description: Optional[str]=None, *, allow_nonconst_keys: bool=False, metadata: Optional[Dict[str, Any]]=None, for_cls: Optional[Type[Any]]=None):
if not isinstance(fields, list):
raise TypeError(f"Argument 'fields' must be a list. Encountered: {fields}.")
self._name = name
self._allow_nonconst_keys = allow_nonconst_keys
self._fields = {f.key: f for f in fields}
self._description = description
self._metadata = metadata or {}
if for_cls is not None:
for f in fields:
if f.origin is None:
f.set_origin(for_cls)
self._dynamic_field = None
for f in fields:
if not f.key.is_const:
self._dynamic_field = f
break
if base_schema_list:
base = Schema.merge(base_schema_list)
self.extend(base)
if not allow_nonconst_keys and self._dynamic_field is not None:
raise ValueError(f"NonConstKey is not allowed in schema. Encountered '{self._dynamic_field.key}'.") | Constructor.
Args:
fields: A list of Field as the definition of the schema. The order of the
fields will be preserved.
name: Optional name of this schema. Useful for debugging.
base_schema_list: List of schema used as base. When present, fields
from these schema will be copied to this schema. Fields from the
latter schema will override those from the former ones.
description: Optional str as the description for the schema.
allow_nonconst_keys: Whether immediate fields can use non-const keys.
metadata: Optional dict of user objects as schema-level metadata.
for_cls: Optional class that this schema applies to.
Raises:
TypeError: Argument `fields` is not a list.
KeyError: If a field name contains characters ('.') which is not
allowed, or a field name from `fields` already exists in parent
schema.
ValueError: When failed to create ValueSpec from `fields`.
It could be an unsupported value type, default value doesn't conform
with value specification, etc. | github-repos |
def get_ignition_type(root):
properties = {}
elem = root.find('ignitionType')
if elem is None:
raise MissingElementError('ignitionType')
elem = elem.attrib
if 'target' in elem:
ign_target = elem['target'].rstrip(';').upper()
else:
raise MissingAttributeError('target', 'ignitionType')
if 'type' in elem:
ign_type = elem['type']
if ign_type == 'baseline max intercept from d/dt':
ign_type = 'd/dt max extrapolated'
else:
raise MissingAttributeError('type', 'ignitionType')
if len(ign_target.split(';')) > 1:
raise NotImplementedError('Multiple ignition targets not supported.')
if ign_target == 'OHEX':
ign_target = 'OH*'
elif ign_target == 'CHEX':
ign_target = 'CH*'
elif ign_target == 'P':
ign_target = 'pressure'
elif ign_target == 'T':
ign_target = 'temperature'
if ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']:
raise KeywordError(ign_target + ' not valid ignition target')
if ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']:
raise KeywordError(ign_type + ' not valid ignition type')
properties['type'] = ign_type
properties['target'] = ign_target
return properties | Gets ignition type and target.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with ignition type/target information | juraj-google-style |
def wait_for_transform_job(self, job, poll=5):
desc = _wait_until(lambda: _transform_job_status(self.sagemaker_client, job), poll)
self._check_job_status(job, desc, 'TransformJobStatus')
return desc | Wait for an Amazon SageMaker transform job to complete.
Args:
job (str): Name of the transform job to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
(dict): Return value from the ``DescribeTransformJob`` API.
Raises:
ValueError: If the transform job fails. | juraj-google-style |
def add_text(self, coords, text, color=(0, 0, 0)):
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera()) | Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black. | codesearchnet |
def update_unexpected_keys(self, model, unexpected_keys: List[str], prefix: str) -> List[str]:
if self.run_compressed:
return unexpected_keys
keys_to_ignore = self.compressor.get_unexpected_file_keys(model)
return [key for key in unexpected_keys if not any((re.match(f'.*{pattern}', key) for pattern in keys_to_ignore))] | Override this method if you want to adjust the `unexpected_keys`.
Args:
unexpected_keys (`List[str]`, *optional*):
The list of unexpected keys in the checkpoint compared to the state dict of the model | github-repos |
def save_json(dictionary, path, pretty=False, sortkeys=False):
with open(path, 'w') as f:
if pretty:
indent = 2
separators = (',', ': ')
else:
indent = None
separators = (', ', ': ')
json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators) | Save dictionary to JSON file preserving order if it is an OrderedDict
Args:
dictionary (Dict): Python dictionary to save
path (str): Path to JSON file
pretty (bool): Whether to pretty print. Defaults to False.
sortkeys (bool): Whether to sort dictionary keys. Defaults to False.
Returns:
None | codesearchnet |
def nr_profiles(arr, genomes):
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for (i, g) in enumerate(genomes):
p = arr[(i, :)].tostring()
if (p in patt_dict):
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return (arr[(indices, :)], gs_collapse) | Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_ | codesearchnet |
def to_html(value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, view_id: str='html-tree-view', **kwargs) -> Html:
content = base.view(value, name=name, root_path=root_path, view_id=view_id, **kwargs)
assert isinstance(content, Html), content
return content | Returns the HTML representation of a value.
Args:
value: The value to render.
name: The name of the value.
root_path: The root path of the value.
view_id: The ID of the view to render the value.
See `pg.views.HtmlView.dir()` for all available HTML view IDs.
**kwargs: Additional keyword arguments passed from `pg.to_html`, wich
will be passed to the `HtmlView.render_xxx()` (thus
`Extension._html_xxx()`) methods.
Returns:
The rendered HTML. | github-repos |
def execute_processing_block(pb_id: str, log_level='DEBUG'):
init_logger('sip', show_log_origin=True, propagate=False, log_level=log_level)
LOG.info(('+' * 40))
LOG.info('+ Executing Processing block: %s!', pb_id)
LOG.info(('+' * 40))
LOG.info('Processing Block Controller version: %s', __version__)
LOG.info('Docker Swarm API version: %s', sip_swarm_api_version)
LOG.info('Configuration database API version: %s', config_db_version)
pb = ProcessingBlock(pb_id)
LOG.info('Starting workflow %s %s', pb.workflow_id, pb.workflow_version)
pb.set_status('running')
docker = DockerSwarmClient()
workflow_stage_dict = {}
for stage in pb.workflow_stages:
workflow_stage_dict[stage.id] = deepcopy(stage.config)
workflow_stage_dict[stage.id]['services'] = dict()
while True:
time.sleep(0.1)
for workflow_stage in pb.workflow_stages:
_start_workflow_stages(pb, pb_id, workflow_stage_dict, workflow_stage, docker)
_update_workflow_stages(workflow_stage_dict[workflow_stage.id], workflow_stage, docker)
if _abort_workflow(pb, workflow_stage_dict, docker):
break
if _workflow_complete(workflow_stage_dict):
break
pb_list = ProcessingBlockList()
pb_list.set_complete(pb_id)
pb.set_status('completed')
LOG.info(('-' * 40))
LOG.info('- Destroying PBC for %s', pb_id)
LOG.info(('-' * 40))
return pb.status | Execute a processing block.
Celery tasks that executes a workflow defined in a Configuration database
Processing Block data object.
Args:
pb_id (str): The PB id for the PBC
log_level (str): Python logging level. | codesearchnet |
def setData(self, data):
try:
bytestream = pickle.dumps(data)
super(MimeData, self).setData(self._mimeType, bytestream)
except TypeError:
raise TypeError(self.tr("can not pickle added data"))
except:
raise | Add some data.
Args:
data (object): Object to add as data. This object has to be pickable.
Qt objects don't work!
Raises:
TypeError if data is not pickable | juraj-google-style |
def update_account_info(self):
request = self._get_request()
return request.post(self.ACCOUNT_UPDATE_URL, {'callback_url': self.account.callback_url}) | Update current account information
At the moment you can only update your callback_url.
Returns:
An Account object | codesearchnet |
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'):
shortest_distance = ((257 * 257) * 3)
index = 0
if (not color_table):
if (not color_table8):
build_color_tables()
color_table = color_table8
for (i, values) in enumerate(color_table):
rd = (r - values[0])
gd = (g - values[1])
bd = (b - values[2])
this_distance = (((rd * rd) + (gd * gd)) + (bd * bd))
if (this_distance < shortest_distance):
index = i
shortest_distance = this_distance
return index | Given three integers representing R, G, and B,
return the nearest color index.
Arguments:
r: int - of range 0…255
g: int - of range 0…255
b: int - of range 0…255
Returns:
int, None: index, or None on error. | codesearchnet |
def generate_cot(context, parent_path=None):
body = generate_cot_body(context)
schema = load_json_or_yaml(
context.config['cot_schema_path'], is_path=True,
exception=ScriptWorkerException,
message="Can't read schema file {}: %(exc)s".format(context.config['cot_schema_path'])
)
validate_json_schema(body, schema, name="chain of trust")
body = format_json(body)
parent_path = parent_path or os.path.join(context.config['artifact_dir'], 'public')
unsigned_path = os.path.join(parent_path, 'chain-of-trust.json')
write_to_file(unsigned_path, body)
if context.config['sign_chain_of_trust']:
ed25519_signature_path = '{}.sig'.format(unsigned_path)
ed25519_private_key = ed25519_private_key_from_file(context.config['ed25519_private_key_path'])
ed25519_signature = ed25519_private_key.sign(body.encode('utf-8'))
write_to_file(ed25519_signature_path, ed25519_signature, file_type='binary')
return body | Format and sign the cot body, and write to disk.
Args:
context (scriptworker.context.Context): the scriptworker context.
parent_path (str, optional): The directory to write the chain of trust
artifacts to. If None, this is ``artifact_dir/public/``.
Defaults to None.
Returns:
str: the contents of the chain of trust artifact.
Raises:
ScriptWorkerException: on schema error. | juraj-google-style |
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord) | Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros. | juraj-google-style |
def with_input_types(self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
super().with_input_types(input_type_hint)
side_inputs_arg_hints = native_type_compatibility.convert_to_beam_types(side_inputs_arg_hints)
side_input_kwarg_hints = native_type_compatibility.convert_to_beam_types(side_input_kwarg_hints)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints) | Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs::
FlatMap().with_input_types(int, int, bool)
Raises:
:class:`TypeError`: If **type_hint** is not a valid type-hint.
See
:func:`~apache_beam.typehints.typehints.validate_composite_type_param`
for further details.
Returns:
:class:`PTransform`: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods. | github-repos |
def __getattr__(self, key):
if key == 'str' and self.weld_type == WeldVec(WeldChar()):
return StringSeriesWeld(
self.expr,
self.weld_type,
self.df,
self.column_name
)
raise AttributeError("Attr %s does not exist" % key) | Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description | juraj-google-style |
def update_resource_assignments(self, id_or_uri, resource_assignments, timeout=(- 1)):
uri = (self._client.build_uri(id_or_uri) + '/resource-assignments')
headers = {'Content-Type': 'application/json'}
return self._client.patch_request(uri, resource_assignments, timeout=timeout, custom_headers=headers) | Modifies scope membership by adding or removing resource assignments.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
resource_assignments (dict):
A dict object with a list of resource URIs to be added and a list of resource URIs to be removed.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Updated resource. | codesearchnet |
def truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):
with ops.name_scope(name, 'truncated_normal', [shape, mean, stddev]) as name:
shape_tensor = _shape_tensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name='mean')
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')
rnd = self._truncated_normal(shape_tensor, dtype=dtype)
mul = rnd * stddev_tensor
return math_ops.add(mul, mean_tensor, name=name) | Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than
2 standard deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output
tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the normal distribution, before truncation.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal
values. | github-repos |
def ingest_data(ingested_dataset_path: str, base_artifact_path: str):
timestamp = int(time.time())
target_path = f'{base_artifact_path}/ingestion/ingested_dataset_{timestamp}.jsonl'
target_path_gcsfuse = target_path.replace('gs:
Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True)
with open(target_path_gcsfuse, 'w') as f:
f.writelines(['{"image_id": 318556, "id": 255, "caption": "An angled view of a beautifully decorated bathroom.", "image_url": "http:
Path(ingested_dataset_path).parent.mkdir(parents=True, exist_ok=True)
with open(ingested_dataset_path, 'w') as f:
f.write(target_path) | Data ingestion step that returns an uri
to the data it has 'ingested' as jsonlines.
Args:
data_ingestion_target (str): uri to the data that was scraped and
ingested by the component | github-repos |
def deserialize(self, encoded_accumulator):
pass | Deserialize an accumulator received from 'serialize()'.
This function deserializes an accumulator serialized by 'serialize()'.
Args:
encoded_accumulator: A byte string representing an accumulator.
Returns:
The accumulator represented by the passed byte_string. | github-repos |
def match(self, path):
match = self._re.search(path)
if match is None:
return None
args = []
kwargs = {}
for i, wildcard in enumerate(self._wildcards):
if wildcard.name == '!':
continue
value = wildcard.value(match.groups()[i])
if not wildcard.name:
args.append(value)
else:
kwargs[wildcard.name] = value
return self._callback, args, kwargs | Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path. | juraj-google-style |
def align_up(offset, align):
remain = (offset % align)
if (remain == 0):
return offset
else:
return (offset + (align - remain)) | Align ``offset`` up to ``align`` boundary.
Args:
offset (int): value to be aligned.
align (int): alignment boundary.
Returns:
int: aligned offset.
>>> align_up(3, 2)
4
>>> align_up(3, 1)
3 | codesearchnet |
def compress_encoder(inputs,
hparams,
strides=(2, 2),
kernel_size=(3, 3),
name=None):
with tf.variable_scope(name, default_name="compress"):
x = inputs
for i in range(hparams.num_compress_steps
with tf.variable_scope("compress_conv_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(
x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)],
strides=strides,
padding="SAME",
name="compress_conv_%d" % i)
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
if hparams.do_compress_attend:
y = compress_self_attention_layer(
x, hparams, name="compress_selfatt_%d" % i)
y += x
x = y
x = residual_block_layer(x, hparams)
shape_x = common_layers.shape_list(x)
x = tf.layers.dense(x,
hparams.num_latents * hparams.hidden_size,
name=name + "_dense")
return tf.reshape(x, [shape_x[0],
shape_x[1] * shape_x[2] * hparams.num_latents,
hparams.hidden_size]) | Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, height, width, channels].
hparams: HParams.
strides: Tuple, strides for conv block.
kernel_size: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps). | juraj-google-style |
def get_gcps(self):
gcps = self.filehandle.gcps
gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]])
ypoints = np.unique(gcp_array[(:, 0)])
xpoints = np.unique(gcp_array[(:, 1)])
gcp_lons = gcp_array[(:, 2)].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_lats = gcp_array[(:, 3)].reshape(ypoints.shape[0], xpoints.shape[0])
gcp_alts = gcp_array[(:, 4)].reshape(ypoints.shape[0], xpoints.shape[0])
return ((xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps) | Read GCP from the GDAL band.
Args:
band (gdal band): Measurement band which comes with GCP's
coordinates (tuple): A tuple with longitude and latitude arrays
Returns:
points (tuple): Pixel and Line indices 1d arrays
gcp_coords (tuple): longitude and latitude 1d arrays | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.