code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
atol = np.array([1., 1. ,1.]) * atol
return cuc.coord_list_mapping_pbc(subset, superset, atol) | Gives the index mapping from a subset to a superset.
Superset cannot contain duplicate matching rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset | juraj-google-style |
def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None):
with backend.get_graph().as_default():
if sample_weights is not None:
self._update_sample_weight_modes(sample_weights)
self._prepare_sample_weights(sample_weights)
masks = self._prepare_output_masks()
self._handle_metrics(self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True)
self.total_loss = self._prepare_total_loss(masks) | Compiles the model loss and weighted metric sub-graphs.
This may be used to set graph tensors as sample weights (instead of creating
placeholders).
Args:
sample_weights: List of tensors to use as the sample weights. Must be the
same length as the number of outputs. If left as `None`, placeholders
are used instead. | github-repos |
def generate_cot_body(context):
try:
cot = {
'artifacts': get_cot_artifacts(context),
'chainOfTrustVersion': 1,
'runId': context.claim_task['runId'],
'task': context.task,
'taskId': context.claim_task['status']['taskId'],
'workerGroup': context.claim_task['workerGroup'],
'workerId': context.config['worker_id'],
'workerType': context.config['worker_type'],
'environment': get_cot_environment(context),
}
except (KeyError, ) as exc:
raise ScriptWorkerException("Can't generate chain of trust! {}".format(str(exc)))
return cot | Generate the chain of trust dictionary.
This is the unsigned and unformatted chain of trust artifact contents.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: the unsignd and unformatted chain of trust artifact contents.
Raises:
ScriptWorkerException: on error. | juraj-google-style |
def _generate_mark_code(rule_name):
code = ''.join([i for i in str(rule_name) if i.isdigit()])
code = code.zfill(2)
return code | Generates a two digit string based on a provided string
Args:
rule_name (str): A configured rule name 'pytest_mark3'.
Returns:
str: A two digit code based on the provided string '03' | juraj-google-style |
def _set_axis_limits(self, which, lims, d, scale, reverse=False):
setattr(self.limits, (which + 'lims'), lims)
setattr(self.limits, ('d' + which), d)
setattr(self.limits, (which + 'scale'), scale)
if reverse:
setattr(self.limits, (('reverse_' + which) + '_axis'), True)
return | Private method for setting axis limits.
Sets the axis limits on each axis for an individual plot.
Args:
which (str): The indicator of which part of the plots
to adjust. This currently handles `x` and `y`.
lims (len-2 list of floats): The limits for the axis.
d (float): Amount to increment by between the limits.
scale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False. | codesearchnet |
def extraction_data_statistics(path):
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
X, y = extraction.return_main_dataset()
functions.verify_dataset(X, y)
if extraction.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=extraction.test_dataset['split_ratio'],
random_state=extraction.test_dataset['split_seed'],
stratify=y
)
elif extraction.test_dataset['method'] == 'source':
if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = extraction.test_dataset["source"]
extraction_function = functions.\
import_object_from_string_code(extraction_code, "extract_test_dataset")
X_test, y_test = extraction_function()
else:
X_test, y_test = None, None
extraction_code = extraction.meta_feature_generation['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits = 0
test_indices = []
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits += 1
test_indices.append(test_idx)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
test_indices = np.concatenate(test_indices)
X, y = X[test_indices], y[test_indices]
extraction_code = extraction.stacked_ensemble_cv['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits_stacked_cv = 0
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits_stacked_cv += 1
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
data_stats = dict()
data_stats['train_data_stats'] = functions.verify_dataset(X, y)
if X_test is not None:
data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)
else:
data_stats['test_data_stats'] = None
data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}
data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}
extraction.data_statistics = data_stats
session.add(extraction)
session.commit() | Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook | juraj-google-style |
def reindex(self, axis, labels, **kwargs):
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while (len(df.columns) < len(old_labels)):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while (len(df.index) < len(old_labels)):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = (self.columns if axis else self.index)
new_index = (self.index if axis else labels)
new_columns = (labels if axis else self.columns)
func = self._prepare_method((lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)))
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns) | Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index. | codesearchnet |
def __init__(self, *, verbose: bool = False) -> None:
self.verbose = verbose
self._start = None
self.elapsed = None | Configure the timing Timing context manager.
Args:
verbose: Print elapsed time | juraj-google-style |
def _merge_assets_key_collection(saved_model_proto, path):
for meta_graph in saved_model_proto.meta_graphs:
node_asset_map = {}
if tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def:
assets_any_proto = meta_graph.collection_def[
tf_v1.saved_model.constants.ASSETS_KEY].any_list.value
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_filename = _get_asset_filename(path, asset_proto.filename)
node_asset_map[_get_node_name_from_tensor(
asset_proto.tensor_info.name)] = asset_filename
del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY]
for node in meta_graph.graph_def.node:
asset_filepath = node_asset_map.get(node.name)
if asset_filepath:
_check_asset_node_def(node)
node.attr["value"].tensor.string_val[0] = asset_filepath | Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto.
Removes the ASSETS_KEY collection from the GraphDefs in the SavedModel and
modifies nodes with the assets filenames to point to the assets in `path`.
After this transformation, the SavedModel GraphDefs can be used without
feeding asset tensors.
Args:
saved_model_proto: SavedModel proto to be modified.
path: path where the SavedModel is being loaded from. | juraj-google-style |
def load_parameters(distribution, method_name, parameters=None, cache=None, cache_key=(lambda x: x)):
from .. import baseclass
if (cache is None):
cache = {}
if (parameters is None):
parameters = {}
parameters_ = distribution.prm.copy()
parameters_.update(**parameters)
parameters = parameters_
if contains_call_signature(getattr(distribution, method_name), 'cache'):
parameters['cache'] = cache
else:
for (key, value) in parameters.items():
if isinstance(value, baseclass.Dist):
value = cache_key(value)
if (value in cache):
parameters[key] = cache[value]
else:
raise baseclass.StochasticallyDependentError('evaluating under-defined distribution {}.'.format(distribution))
return parameters | Load parameter values by filling them in from cache.
Args:
distribution (Dist):
The distribution to load parameters from.
method_name (str):
Name of the method for where the parameters should be used.
Typically ``"_pdf"``, ``_cdf`` or the like.
parameters (:py:data:typing.Any):
Default parameters to use if there are no cache to retrieve. Use
the distributions internal parameters, if not provided.
cache (:py:data:typing.Any):
A dictionary containing previous evaluations from the stack. If
a parameters contains a distribution that contains in the cache, it
will be replaced with the cache value. If omitted, a new one will
be created.
cache_key (:py:data:typing.Any)
Redefine the keys of the cache to suite other purposes.
Returns:
Same as ``parameters``, if provided. The ``distribution`` parameter if
not. In either case, parameters may be updated with cache values (if
provided) or by ``cache`` if the call signature of ``method_name`` (on
``distribution``) contains an ``cache`` argument. | codesearchnet |
def transform_tensor(self, tensor):
dim = tensor.shape
rank = len(dim)
assert all([(i == 3) for i in dim])
lc = string.ascii_lowercase
indices = (lc[:rank], lc[rank:(2 * rank)])
einsum_string = ','.join([(a + i) for (a, i) in zip(*indices)])
einsum_string += ',{}->{}'.format(*indices[::(- 1)])
einsum_args = (([self.rotation_matrix] * rank) + [tensor])
return np.einsum(einsum_string, *einsum_args) | Applies rotation portion to a tensor. Note that tensor has to be in
full form, not the Voigt form.
Args:
tensor (numpy array): a rank n tensor
Returns:
Transformed tensor. | codesearchnet |
def __init__(self, data_manager, axis, func):
self.data_manager = data_manager
self.axis = axis
self.index_iter = (
iter(self.data_manager.columns)
if axis
else iter(range(len(self.data_manager.index)))
)
self.func = func | PartitionIterator class to define a generator on partitioned data
Args:
data_manager (DataManager): Data manager for the dataframe
axis (int): axis to iterate over
func (callable): The function to get inner iterables from
each partition | juraj-google-style |
def setup_privnet(self, host=None):
self.setup(FILENAME_SETTINGS_PRIVNET)
if isinstance(host, str):
if (':' in host):
raise Exception('No protocol prefix or port allowed in host, use just the IP or domain.')
print('Using custom privatenet host:', host)
self.SEED_LIST = [('%s:20333' % host)]
self.RPC_LIST = [('http:
print('- P2P:', ', '.join(self.SEED_LIST))
print('- RPC:', ', '.join(self.RPC_LIST))
self.check_privatenet() | Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333. | codesearchnet |
def all_arguments(cls, function, arguments):
if isinstance(arguments, dict):
arguments = Arguments(**arguments)
elif not isinstance(arguments, Arguments):
arguments = Arguments(*arguments)
return cls(function, arguments) | Helper function for creating `FunctionCall`s with `Arguments`.
Args:
function: The value to store for the action function.
arguments: The values to store for the arguments of the action. Can either
be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an
iterable is provided, the values will be unpacked into an `Arguments`
object.
Returns:
A new `FunctionCall` instance. | juraj-google-style |
def ne(self, other, axis="columns", level=None):
return self._binary_op("ne", other, axis=axis, level=level) | Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans. | juraj-google-style |
def RegisterMountPoint(cls, mount_point, path_spec):
if (mount_point in cls._mount_points):
raise KeyError('Mount point: {0:s} already set.'.format(mount_point))
cls._mount_points[mount_point] = path_spec | Registers a path specification mount point.
Args:
mount_point (str): mount point identifier.
path_spec (PathSpec): path specification of the mount point.
Raises:
KeyError: if the corresponding mount point is already set. | codesearchnet |
def backup(filenames, prefix='error'):
num = max(([0] + [int(f.split('.')[1]) for f in glob('{}.*.tar.gz'.format(prefix))]))
filename = '{}.{}.tar.gz'.format(prefix, (num + 1))
logging.info('Backing up run to {}.'.format(filename))
with tarfile.open(filename, 'w:gz') as tar:
for fname in filenames:
for f in glob(fname):
tar.add(f) | Backup files to a tar.gz file. Used, for example, in backing up the
files of an errored run before performing corrections.
Args:
filenames ([str]): List of files to backup. Supports wildcards, e.g.,
*.*.
prefix (str): prefix to the files. Defaults to error, which means a
series of error.1.tar.gz, error.2.tar.gz, ... will be generated. | codesearchnet |
def Sample(self, operation, description, data_size, compressed_data_size):
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format(sample_time, operation, description, data_size, compressed_data_size)
self._WritesString(sample) | Takes a sample of data read or written for profiling.
Args:
operation (str): operation, either 'read' or 'write'.
description (str): description of the data read.
data_size (int): size of the data read in bytes.
compressed_data_size (int): size of the compressed data read in bytes. | codesearchnet |
def unkown_field(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `unkown_field`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `unkown_field`')
self._unkown_field = value | Corresponds to IDD Field `unkown_field` Empty field in data.
Args:
value (str): value for IDD Field `unkown_field`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def install_requirements(self, path, index=None):
cmd = 'install -r {0}'.format(path)
if index:
cmd = 'install --index-url {0} -r {1}'.format(index, path)
self.pip(cmd) | Install packages from a requirements.txt file.
Args:
path (str): The path to the requirements file.
index (str): The URL for a pypi index to use. | juraj-google-style |
def floodlight_monitor(config, task: dict) -> None:
if config.verbose:
print('FLOODLIGHT MONITOR')
if 'template' in task['sheet']:
sheets_tab_copy(config, task['auth'], task['sheet']['template']['sheet'], task['sheet']['template']['tab'], task['sheet']['sheet'], task['sheet']['tab'])
triggers = sheets_read(config, task['auth'], task['sheet']['sheet'], task['sheet']['tab'], task['sheet']['range'])
if config.verbose and len(triggers) == 0:
print('FLOODLIGHT MONITOR: No floodlight ids specified in sheet.')
alerts = {}
day = None
for trigger in triggers:
trigger.append(floodlight_report(config, task, trigger[TRIGGER_ID]))
for trigger in triggers:
rows = floodlight_rows(config, task, trigger[TRIGGER_REPORT])
last_day, rows = floodlight_analysis(config, task, rows)
if last_day:
day = last_day if day is None else max(day, last_day)
alerts.setdefault(trigger[TRIGGER_EMAIL], [])
alerts[trigger[TRIGGER_EMAIL]].extend(rows)
if alerts:
floodlight_email(config, task, day, alerts) | The task handler. See module description.
Args:
Everuthing is passed using task.
Returns:
Nothing. | github-repos |
def _ReceiveItemOnActivity(self, zmq_socket):
events = zmq_socket.poll(self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)
if events:
try:
received_object = self._zmq_socket.recv_pyobj()
return received_object
except zmq.error.Again:
logger.error('{0:s}. Failed to receive item in time.'.format(self.name))
raise
except zmq.error.ZMQError as exception:
if (exception.errno == errno.EINTR):
logger.error('ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(self.name))
raise
raise errors.QueueEmpty | Attempts to receive an item from a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the receive the item.
Returns:
object: item from the socket.
Raises:
QueueEmpty: if no item could be received within the timeout.
zmq.error.ZMQError: if an error occurs in ZeroMQ | codesearchnet |
def load_image(buf, request_components=0):
x = ffi.new('int*')
y = ffi.new('int*')
n = ffi.new('int*')
cbuf = ffi.from_buffer(buf)
bitmap = lib.stbi_load_from_memory(
ffi.cast('unsigned char*', cbuf), len(buf), x, y, n,
request_components
)
pybuffer = ffi.buffer(bitmap, x[0]*y[0]*n[0])
return pybuffer, x[0], y[0], n[0] | Load a png or jpeg image into a bitmap buffer.
Args:
buf (Buffer): Buffer to load
request_components (int): If you want to force number of components
Returns:
A tuple containing:
- Bitmap buffer
- width of bitmap
- height of bitmap
- number of components | juraj-google-style |
def get_default_mesh() -> Optional[layout_lib.Mesh]:
if _dtensor_singleton is None:
return None
else:
return _dtensor_singleton._current_default_mesh | Return the default mesh under the current dtensor device context.
In the case that dtensor device system is not initialized, this function
will return None.
Returns:
The current default mesh for the dtensor device context. | github-repos |
def poll(self, timeout=None):
if (not isinstance(timeout, (int, float, type(None)))):
raise TypeError('Invalid timeout type, should be integer, float, or None.')
p = select.epoll()
p.register(self._fd, ((select.EPOLLIN | select.EPOLLET) | select.EPOLLPRI))
for _ in range(2):
events = p.poll(timeout)
if events:
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, ('Rewinding GPIO: ' + e.strerror))
return True
return False | Poll a GPIO for the edge event configured with the .edge property.
`timeout` can be a positive number for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Defaults to
blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if an edge event occurred, ``False`` on timeout.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `timeout` type is not None or int. | codesearchnet |
def default_float_type():
if not is_prefer_float32() and is_allow_float64():
return float64
else:
return float32 | Gets the default float type.
Returns:
If `is_prefer_float32()` is false and `is_allow_float64()` is true, returns
float64; otherwise returns float32. | github-repos |
def apply(self, value: Any, allow_partial: bool=False, transform_fn: Optional[Callable[[utils.KeyPath, 'Field', Any], Any]]=None, root_path: Optional[utils.KeyPath]=None) -> Any:
value = self._value.apply(value, allow_partial=allow_partial, child_transform=transform_fn, root_path=root_path)
if transform_fn:
value = transform_fn(root_path, self, value)
return value | Apply current field to a value, which validate and complete the value.
Args:
value: Value to validate against this spec.
allow_partial: Whether partial value is allowed. This is for dict or
nested dict values.
transform_fn: Function to transform applied value into final value.
root_path: Key path for root.
Returns:
final value.
When allow_partial is set to False (default), only fully qualified value
is acceptable. When allow_partial is set to True, missing fields will
be placeheld using MISSING_VALUE.
Raises:
KeyError: if additional key is found in value, or required key is missing
and allow_partial is set to False.
TypeError: if type of value is not the same as spec required.
ValueError: if value is not acceptable, or value is MISSING_VALUE while
allow_partial is set to False. | github-repos |
def bin(self, bins, labels=None):
return dim(self, bin, bins, labels=labels) | Bins continuous values.
Bins continuous using the provided bins and assigns labels
either computed from each bins center point or from the
supplied labels.
Args:
bins: List or array containing the bin boundaries
labels: List of labels to assign to each bin
If the bins are length N the labels should be length N-1 | juraj-google-style |
def format(self, record):
level = record.levelno
if level >= logging.CRITICAL:
color = self.CRITICAL
elif level >= logging.ERROR:
color = self.ERROR
elif level >= logging.WARNING:
color = self.WARNING
elif level >= logging.INFO:
color = self.INFO
elif level >= logging.DEBUG:
color = self.DEBUG
else:
color = self.DEFAULT
message = super().format(record)
if record.args:
try:
message = message % record.args
except TypeError:
pass
return color + message + self.DEFAULT | Adds colors to a log record and formats it with the default
Args:
record (logging.LogRecord): log record to format
Returns:
str: The colored and formatted record string | juraj-google-style |
def apply_filter(self, expr, value):
if self.skip(value):
return expr
if not self._valid_value(value):
msg = "Invalid value {value} passed to filter {name} - ".format(
value=repr(value),
name=self.name)
if self.default is not None:
warn(msg + "defaulting to {}".format(self.default))
value = self.default
else:
warn(msg + "skipping")
return expr
return self.func(expr, value) | Returns the given expression filtered by the given value.
Args:
expr (xpath.expression.AbstractExpression): The expression to filter.
value (object): The desired value with which the expression should be filtered.
Returns:
xpath.expression.AbstractExpression: The filtered expression. | juraj-google-style |
def get_visual_content(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/visualContent"
return self._client.get(uri) | Gets a list of visual content objects describing each rack within the data center. The response aggregates data
center and rack data with a specified metric (peak24HourTemp) to provide simplified access to display data for
the data center.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
Return:
list: List of visual content objects. | juraj-google-style |
def convert_to_localized_md(model_list: str, localized_model_list: str, format_str: str) -> Tuple[bool, str]:
def _rep(match):
title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()
return format_str.format(title=title, model_link=model_link, paper_affiliations=paper_affiliations, paper_title_link=paper_title_link, paper_authors=paper_authors, supplements=' ' + supplements.strip() if len(supplements) != 0 else '')
_re_capture_meta = re.compile('\\*\\*\\[([^\\]]*)\\]\\(([^\\)]*)\\)\\*\\* \\(from ([^)]*)\\)[^\\[]*([^\\)]*\\)).*?by (.*?[A-Za-z\\*]{2,}?)\\. (.*)$')
_re_capture_title_link = re.compile('\\*\\*\\[([^\\]]*)\\]\\(([^\\)]*)\\)\\*\\*')
_re_capture_paper_link = re.compile(' \\[([^\\]]*)\\]\\(([^\\)]*)\\)')
if len(localized_model_list) == 0:
localized_model_index = {}
else:
try:
localized_model_index = {re.search('\\*\\*\\[([^\\]]*)', line).groups()[0]: line for line in localized_model_list.strip().split('\n')}
except AttributeError:
raise AttributeError('A model name in localized READMEs cannot be recognized.')
model_keys = [re.search('\\*\\*\\[([^\\]]*)', line).groups()[0] for line in model_list.strip().split('\n')]
readmes_match = not any((k not in model_keys for k in localized_model_index))
localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys}
for model in model_list.strip().split('\n'):
title, model_link = _re_capture_title_link.search(model).groups()
if title not in localized_model_index:
readmes_match = False
localized_model_index[title] = _re_capture_meta.sub(_rep, model + ' ')
elif _re_fill_pattern.search(localized_model_index[title]) is not None:
update = _re_capture_meta.sub(_rep, model + ' ')
if update != localized_model_index[title]:
readmes_match = False
localized_model_index[title] = update
else:
converted_model = _re_capture_title_link.sub(f'**[{title}]({model_link})**', localized_model_index[title], count=1)
paper_title_link = _re_capture_paper_link.search(model)
if paper_title_link is not None:
paper_title, paper_link = paper_title_link.groups()
converted_model = _re_capture_paper_link.sub(f' [{paper_title}]({paper_link})', converted_model, count=1)
if converted_model != localized_model_index[title]:
readmes_match = False
localized_model_index[title] = converted_model
sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())
return (readmes_match, '\n'.join((x[1] for x in sorted_index)) + '\n') | Compare the model list from the main README to the one in a localized README.
Args:
model_list (`str`): The model list in the main README.
localized_model_list (`str`): The model list in one of the localized README.
format_str (`str`):
The template for a model entry in the localized README (look at the `format_model_list` in the entries of
`LOCALIZED_READMES` for examples).
Returns:
`Tuple[bool, str]`: A tuple where the first value indicates if the READMEs match or not, and the second value
is the correct localized README. | github-repos |
def load_tensor_from_event(event):
tensor_proto = event.summary.value[0].tensor
shape = tensor_util.TensorShapeProtoToList(tensor_proto.tensor_shape)
num_elements = 1
for shape_dim in shape:
num_elements *= shape_dim
if tensor_proto.tensor_content or tensor_proto.string_val or (not num_elements):
if tensor_proto.dtype == types_pb2.DT_RESOURCE:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
try:
tensor_value = tensor_util.MakeNdarray(tensor_proto)
except KeyError:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
tensor_value = InconvertibleTensorProto(tensor_proto, False)
return tensor_value | Load a tensor from an Event proto.
Args:
event: The Event proto, assumed to hold a tensor value in its
summary.value[0] field.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`, if
representation of the tensor value by a `numpy.ndarray` is possible.
For uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be represented as `numpy.ndarray` (e.g., `tf.resource`), return
the `TensorProto` protobuf object without converting it to a
`numpy.ndarray`. | github-repos |
def isostr_to_datetime(dt_str):
if (len(dt_str) <= 20):
return datetime.datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%SZ')
else:
dt_str = dt_str.split('.')
return isostr_to_datetime(('%sZ' % dt_str[0])) | Converts iso formated text string into a datetime object
Args:
dt_str (str): ISO formated text string
Returns:
:obj:`datetime.datetime` | codesearchnet |
def _start_workflow_stages(pb: ProcessingBlock, pb_id: str, workflow_stage_dict: dict, workflow_stage: WorkflowStage, docker: DockerSwarmClient):
stage_data = workflow_stage_dict[workflow_stage.id]
stage_data['start'] = False
if (stage_data['status'] == 'none'):
if (not workflow_stage.dependencies):
stage_data['start'] = True
else:
dependency_status = []
for dependency in workflow_stage.dependencies:
dependency_status.append((workflow_stage_dict[dependency['value']]['status'] == 'complete'))
stage_data['start'] = all(dependency_status)
if stage_data['start']:
LOG.info('-- Starting workflow stage: %s --', workflow_stage.id)
LOG.info('Configuring EE templates.')
args_template = jinja2.Template(workflow_stage.args_template)
stage_params = pb.workflow_parameters[workflow_stage.id]
template_params = {**workflow_stage.config, **stage_params}
args = args_template.render(stage=template_params)
LOG.info('Resolving workflow script arguments.')
args = json.dumps(json.loads(args))
compose_template = jinja2.Template(workflow_stage.compose_template)
compose_str = compose_template.render(stage=dict(args=args))
compose_dict = yaml.load(compose_str)
service_names = compose_dict['services'].keys()
new_service_names = ['{}_{}_{}'.format(pb_id, pb.workflow_id, name) for name in service_names]
for (new, old) in zip(new_service_names, service_names):
compose_dict['services'][new] = compose_dict['services'].pop(old)
compose_str = yaml.dump(compose_dict)
service_ids = docker.create_services(compose_str)
LOG.info('Staring workflow containers:')
for service_id in service_ids:
service_name = docker.get_service_name(service_id)
LOG.info(' %s, %s ', service_name, service_id)
stage_data['services'][service_id] = {}
LOG.info('Created Services: %s', service_ids)
stage_data['services'][service_id] = dict(name=docker.get_service_name(service_id), status='running', complete=False)
stage_data['status'] = 'running' | Start a workflow stage by starting a number of docker services.
This function first assesses if the specified workflow stage can be
started based on its dependencies. If this is found to be the case,
the workflow stage is stared by first resolving and template arguments
in the workflow stage configuration, and then using the Docker Swarm Client
API to start workflow stage services. As part of this, the
workflow_stage_dict data structure is updated accordingly.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing Block data
object
pb_id (str): Processing Block identifier
workflow_stage_dict (dict): Workflow stage metadata structure
workflow_stage (WorkflowStage): Workflow state configuration database
data object.
docker (DockerClient): Docker Swarm Client object. | codesearchnet |
def ContainsIgnoreCase(self, value):
self._awql = self._CreateSingleValueCondition(value, 'CONTAINS_IGNORE_CASE')
return self._query_builder | Sets the type of the WHERE clause as "contains ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | juraj-google-style |
def _GetDecryptedStreamSize(self):
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
decrypted_stream_size = 0
while (encrypted_data_offset < encrypted_data_size):
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if (read_count == 0):
break
encrypted_data_offset += read_count
decrypted_stream_size += self._decrypted_data_size
return decrypted_stream_size | Retrieves the decrypted stream size.
Returns:
int: decrypted stream size. | codesearchnet |
def url(self, pattern, method=None, type_cast=None):
if (not type_cast):
type_cast = {}
def decorator(function):
self.add(pattern, function, method, type_cast)
return function
return decorator | Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None. | codesearchnet |
def get_dag(nodes, downstream_fn) -> Tuple[(Dict, Dict)]:
dag = {}
node_by_ids = {}
for node in nodes:
downstream_ops = downstream_fn(node)
dag[node.id] = set(downstream_ops)
node_by_ids[node.id] = node
return (dag, node_by_ids) | Return a dag representation of the nodes passed.
This is equally used for pipelines and pipeline runs.
Params:
nodes: an instance of `Operation` | `OperationRun` the nodes to represent en dag.
downstream_fn: a function that returns the downstream nodes of the a node.
Returns:
tuple: (dag, dict(node_id: node)) | codesearchnet |
class PromptDepthAnythingFeatureFusionLayer(nn.Module):
def __init__(self, config: PromptDepthAnythingConfig):
super().__init__()
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = PromptDepthAnythingPreActResidualLayer(config)
self.residual_layer2 = PromptDepthAnythingPreActResidualLayer(config)
self.prompt_depth_layer = PromptDepthAnythingLayer(config)
def forward(self, hidden_state, residual=None, size=None, prompt_depth=None):
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(residual, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
if prompt_depth is not None:
prompt_depth = nn.functional.interpolate(prompt_depth, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)
res = self.prompt_depth_layer(prompt_depth)
hidden_state = hidden_state + res
modifier = {'scale_factor': 2} if size is None else {'size': size}
hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True)
hidden_state = self.projection(hidden_state)
return hidden_state | Feature fusion layer, merges feature maps from different stages.
Args:
config (`[PromptDepthAnythingConfig]`):
Model configuration class defining the model architecture. | github-repos |
def register_filesystem_plugin(plugin_location):
if os.path.exists(plugin_location):
py_tf.TF_RegisterFilesystemPlugin(plugin_location)
else:
raise OSError(errno.ENOENT, 'The file to load file system plugin from does not exist.', plugin_location) | Loads a TensorFlow FileSystem plugin.
Args:
plugin_location: Path to the plugin. Relative or absolute filesystem plugin
path to a dynamic library file.
Returns:
None
Raises:
OSError: When the file to be loaded is not found.
RuntimeError: when unable to load the library. | github-repos |
def sticky_attribute_assignment(trackable, name, value):
if isinstance(value, NoDependency):
add_dependency = False
else:
add_dependency = True
value = wrap_or_unwrap(value)
if not add_dependency:
return value
if isinstance(value, base.Trackable):
trackable._track_trackable(value, name=name, overwrite=True)
return value | Adds dependencies, generally called from __setattr__.
This behavior is shared between Trackable and Model.
Respects NoDependency indicators, but otherwise makes trackable objects
out of common data structures and tracks objects by their attribute names.
Args:
trackable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a trackable object.
Returns:
The value which should be stored in the attribute (unwrapped from a
NoDependency object if necessary). | github-repos |
def get_role(self, name):
address = _create_role_address(name)
role_list_bytes = None
try:
role_list_bytes = self._state_view.get(address=address)
except KeyError:
return None
if (role_list_bytes is not None):
role_list = _create_from_bytes(role_list_bytes, identity_pb2.RoleList)
for role in role_list.roles:
if (role.name == name):
return role
return None | Get a single Role by name.
Args:
name (str): The name of the Role.
Returns:
(:obj:`Role`): The Role that matches the name or None. | codesearchnet |
async def get_auth(request):
auth_val = request.get(AUTH_KEY)
if auth_val:
return auth_val
auth_policy = request.get(POLICY_KEY)
if auth_policy is None:
raise RuntimeError('auth_middleware not installed')
request[AUTH_KEY] = await auth_policy.get(request)
return request[AUTH_KEY] | Returns the user_id associated with a particular request.
Args:
request: aiohttp Request object.
Returns:
The user_id associated with the request, or None if no user is
associated with the request.
Raises:
RuntimeError: Middleware is not installed | juraj-google-style |
def build_to_target_size_from_token_counts(cls, target_size, token_counts, min_val, max_val, num_iterations=4):
if (min_val > max_val):
raise ValueError('Lower bound for the minimum token count is greater than the upper bound.')
def bisect(min_val, max_val):
'Bisection to find the right size.'
present_count = ((max_val + min_val)
logger.info(('Trying min_count %d' % present_count))
subtokenizer = cls()
subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations)
logger.info('min_count %d attained a %d vocab_size', present_count, subtokenizer.vocab_size)
if ((subtokenizer.vocab_size == target_size) or (min_val >= max_val)):
return subtokenizer
if (subtokenizer.vocab_size > target_size):
other_subtokenizer = bisect((present_count + 1), max_val)
else:
other_subtokenizer = bisect(min_val, (present_count - 1))
if (other_subtokenizer is None):
return subtokenizer
if (abs((other_subtokenizer.vocab_size - target_size)) < abs((subtokenizer.vocab_size - target_size))):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val) | Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextTokenizer instance.
Raises:
ValueError: If `min_val` is greater than `max_val`. | codesearchnet |
def AttachUserList(client, ad_group_id, user_list_id):
ad_group_criterion_service = client.GetService('AdGroupCriterionService', 'v201809')
user_list = {'xsi_type': 'CriterionUserList', 'userListId': user_list_id}
ad_group_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'criterion': user_list, 'adGroupId': ad_group_id}
operations = [{'operator': 'ADD', 'operand': ad_group_criterion}]
return ad_group_criterion_service.mutate(operations)['value'][0] | Links the provided ad group and user list.
Args:
client: an AdWordsClient instance.
ad_group_id: an int ad group ID.
user_list_id: an int user list ID.
Returns:
The ad group criterion that was successfully created. | codesearchnet |
def _parse_control_fields(self, fields, tag_id="tag"):
for field in fields:
params = field.params
if tag_id not in params:
continue
self.controlfields[params[tag_id]] = field.getContent().strip() | Parse control fields.
Args:
fields (list): list of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id". | juraj-google-style |
def _render(self):
message = Message()
message.add(Heading(tr('Problem'), **ORANGE_LEVEL_4_STYLE))
message.add(Paragraph(tr('The following problem(s) were encountered whilst running the analysis.')))
items = BulletedList()
for p in reversed(self.problems):
items.add(p)
message.add(items)
message.add(Heading(tr('Suggestion'), **GREEN_LEVEL_4_STYLE))
message.add(Paragraph(tr('You can try the following to resolve the issue:')))
if (len(self.suggestions) < 1):
suggestions = self.standard_suggestions()
message.add(suggestions)
else:
items = BulletedList()
for s in reversed(self.suggestions):
if (s is not None):
items.add(s)
message.add(items)
if (len(self.details) > 0):
items = BulletedList()
message.add(Heading(tr('Details'), **ORANGE_LEVEL_5_STYLE))
message.add(Paragraph(tr('These additional details were reported when the problem occurred.')))
for d in self.details:
if (d is not None):
items.add(d)
message.add(items)
message.add(Heading(tr('Diagnostics'), **TRACEBACK_STYLE))
message.add(self.tracebacks)
return message | Create a Message version of this ErrorMessage
Args:
none
Returns:
the Message instance of this ErrorMessage
Raises:
Errors are propagated | codesearchnet |
def _build_request(self, verb, verb_arguments):
method = getattr(self._component, verb)
method_args = {str(k): v for (k, v) in verb_arguments.items()}
return method(**method_args) | Builds HttpRequest object.
Args:
verb (str): Request verb (ex. insert, update, delete).
verb_arguments (dict): Arguments to be passed with the request.
Returns:
httplib2.HttpRequest: HttpRequest to be sent to the API. | codesearchnet |
def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name):
if isinstance(variable_or_list, (list, tuple)):
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError('Slices must all be from the same tensor: %s != %s' % (slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, '') | Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable. | github-repos |
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`SegformerForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id. | github-repos |
def describe_all(self, full=False):
for table in self.tabs:
yield self.tabs[table]().describe(full) | Prints description information about all tables registered
Args:
full (bool): Also prints description of post processors. | juraj-google-style |
def get_table_schema_from_string(schema):
table_schema = bigquery.TableSchema()
schema_list = [s.strip() for s in schema.split(',')]
for field_and_type in schema_list:
field_name, field_type = field_and_type.split(':')
field_schema = bigquery.TableFieldSchema()
field_schema.name = field_name
field_schema.type = field_type
field_schema.mode = 'NULLABLE'
table_schema.fields.append(field_schema)
return table_schema | Transform the string table schema into a
:class:`~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema` instance.
Args:
schema (str): The string schema to be used if the BigQuery table to write
has to be created.
Returns:
~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema:
The schema to be used if the BigQuery table to write has to be created
but in the :class:`~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema` format. | github-repos |
def rightClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
_failSafeCheck()
click(x, y, 1, 0.0, 'right', _pause=False)
_autoPause(pause, _pause) | Performs a right mouse button click.
This is a wrapper function for click('right', x, y).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None | codesearchnet |
def _test_connection(url):
import pika
try:
with closing(pika.BlockingConnection(pika.URLParameters(url))) as conn:
conn.channel()
except pika.exceptions.ConnectionClosed as e:
raise ValidationError(e) | Attempt to connect to amqp
Args:
url: string in the form "amqp://[user]:[password]@[host]" | juraj-google-style |
def destroy_team(self):
request = self._get_request()
request.post(url=self.TEAM_DESTROY_URL, get_json=False) | Delete your Team
Deletes your Team. Can only be invoked when you have a team with only one member left (yourself).
Returns:
None | codesearchnet |
def reviews(self, packageName, filterByDevice=False, sort=2, nb_results=None, offset=None):
path = (REVIEWS_URL + '?doc={}&sort={}'.format(requests.utils.quote(packageName), sort))
if (nb_results is not None):
path += '&n={}'.format(nb_results)
if (offset is not None):
path += '&o={}'.format(offset)
if filterByDevice:
path += '&dfil=1'
data = self.executeRequestApi2(path)
output = []
for review in data.payload.reviewResponse.getResponse.review:
output.append(utils.parseProtobufObj(review))
return output | Browse reviews for an application
Args:
packageName (str): app unique ID.
filterByDevice (bool): filter results for current device
sort (int): sorting criteria (values are unknown)
nb_results (int): max number of reviews to return
offset (int): return reviews starting from an offset value
Returns:
dict object containing all the protobuf data returned from
the api | codesearchnet |
def _GetISO8601String(self, structure):
time_offset = structure.time_offset
month = timelib.MONTH_DICT.get(structure.month.lower(), 0)
try:
time_offset_hours = int(time_offset[1:3], 10)
time_offset_minutes = int(time_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
date_time_string = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.000000'
'{6:s}{7:02d}:{8:02d}').format(
structure.year, month, structure.day, structure.hours,
structure.minutes, structure.seconds, time_offset[0],
time_offset_hours, time_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return date_time_string | Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string. | juraj-google-style |
def multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
if isinstance(other, complex) or other < 1:
return Stinespring(Choi(self).multiply(other))
num = np.sqrt(other)
stine_l, stine_r = self._data
stine_l = num * self._data[0]
stine_r = None
if self._data[1] is not None:
stine_r = num * self._data[1]
return Stinespring((stine_l, stine_r), self.input_dims(),
self.output_dims()) | Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Stinespring: the scalar multiplication other * self as a
Stinespring object.
Raises:
QiskitError: if other is not a valid scalar. | juraj-google-style |
def get_num_samples(repr_ds: RepresentativeDataset) -> Optional[int]:
if isinstance(repr_ds, Sized):
try:
return len(repr_ds)
except Exception as ex:
logging.info('Cannot determine the size of the dataset (%s).', ex)
return None
else:
return None | Returns the number of samples if known.
Args:
repr_ds: Representative dataset.
Returns:
Returns the total number of samples in `repr_ds` if it can be determined
without iterating the entier dataset. Returns None iff otherwise. When it
returns None it does not mean the representative dataset is infinite or it
is malformed; it simply means the size cannot be determined without
iterating the whole dataset. | github-repos |
def get_name_or_instance_id(self, with_id=False):
name = self.get_tag('Name', case_sensitive=False)
if name and len(name.value.strip()) > 0:
return '{0} ({1})'.format(name.value, self.id) if with_id else name.value
return self.id | Returns the name of an instance if existant, else return the instance id
Args:
with_id (bool): Include the instance ID even if the name is found (default: False)
Returns:
Name and/or instance ID of the instance object | juraj-google-style |
def get_diff(value1, value2, name1, name2):
lines1 = [line + "\n" for line in value1.splitlines()]
lines2 = [line + "\n" for line in value2.splitlines()]
diff_lines = difflib.context_diff(
lines1, lines2, fromfile=name1, tofile=name2
)
return "".join(diff_lines) | Get a diff between two strings.
Args:
value1 (str): First string to be compared.
value2 (str): Second string to be compared.
name1 (str): Name of the first string.
name2 (str): Name of the second string.
Returns:
str: The full diff. | juraj-google-style |
def addResource(self, pid):
self._check_initialized()
try:
self.getObjectByPid(pid)
return
except IndexError:
pass
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid))) | Add a resource to the Resource Map.
Args:
pid : str | codesearchnet |
def _MakeSavedModelV2(self, run_params):
saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV2(self, run_params)
saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)
new_saved_model = saved_model_pb2.SavedModel()
new_saved_model.CopyFrom(saved_model_proto)
new_meta_graph_def = new_saved_model.meta_graphs[0]
prefix_len = len('__inference_')
for func_def in new_meta_graph_def.graph_def.library.function:
logging.info('_MakeSavedModelV2, func_def name: %s', func_def.signature.name)
func_name_without_prefix = func_def.signature.name[prefix_len:]
if func_name_without_prefix.startswith('_conv_and_pool_0'):
func_def.attr['_noinline'].CopyFrom(attr_value_pb2.AttrValue(b=True))
self._copy_test_attributes_to_func_def(func_def)
old_saved_model_file = os.path.join(saved_model_dir, constants.SAVED_MODEL_FILENAME_PB)
if os.path.exists(old_saved_model_file):
os.remove(old_saved_model_file)
path = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, new_saved_model.SerializeToString(deterministic=True))
return saved_model_dir | Write the saved model as an input for testing.
In addition to creating a SavedModel like its parent method, this method
replaces this SavedModel by adding TF-TRT conversion parameters as function
attributes to each function in the SavedModel.
Args:
run_params: The current test run parameters.
Returns:
The directory of the saved model. | github-repos |
def _instance_transform(fqdn, o, *args, **kwargs):
return _package_transform(o, fqdn, *args, start=0, **kwargs) | Applies an instance method with name `fqdn` to `o`.
Args:
fqdn (str): fully-qualified domain name of the object.
o: object to apply instance method to. | codesearchnet |
def DeregisterCredentials(cls, credentials):
if (credentials.type_indicator not in cls._credentials):
raise KeyError('Credential object not set for type indicator: {0:s}.'.format(credentials.type_indicator))
del cls._credentials[credentials.type_indicator] | Deregisters a path specification credentials.
Args:
credentials (Credentials): credentials.
Raises:
KeyError: if credential object is not set for the corresponding
type indicator. | codesearchnet |
def alltoall_pointtwise(xs, devices, split_axis, concat_axis):
n = len(xs)
if n == 1:
return xs
parts = mtf.transpose_list_of_lists(
mtf.parallel(devices, tf.split, xs, [n] * n, axis=[split_axis] * n))
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | MPI alltoall operation.
Implementation of alltoall using pointwise communication.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
split_axis: an integer
concat_axis: an integer
Returns:
a list of n Tensors | juraj-google-style |
def import_to_tensorboard(model_dir, log_dir, tag_set):
with session.Session(graph=ops.Graph()) as sess:
input_graph_def = saved_model_utils.get_meta_graph_def(model_dir, tag_set).graph_def
importer.import_graph_def(input_graph_def)
pb_visual_writer = summary.FileWriter(log_dir)
pb_visual_writer.add_graph(sess.graph)
print('Model Imported. Visualize by running: tensorboard --logdir={}'.format(log_dir)) | View an SavedModel as a graph in Tensorboard.
Args:
model_dir: The directory containing the SavedModel to import.
log_dir: The location for the Tensorboard log to begin visualization from.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Usage: Call this function with your SavedModel location and desired log
directory. Launch Tensorboard by pointing it to the log directory. View your
imported SavedModel as a graph. | github-repos |
def infer_location(self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit):
self.segments = [segment.infer_location(location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit) for segment in self.segments]
return self | In-place location inferring of segments
Returns:
This track | codesearchnet |
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fields)
if len(self.resources) == 0 and not allow_no_resources:
raise HDXError('There are no resources! Please add at least one resource!')
for resource in self.resources:
ignore_fields = ['package_id']
resource.check_required_fields(ignore_fields=ignore_fields) | Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None | juraj-google-style |
def read_kw_file():
self_path = os.path.dirname(__file__)
kw_list_path = join(self_path, '../templates/keyword_list.json.bz2')
with bz2.BZ2File(kw_list_path) as f:
kw_list = f.read()
return json.loads(kw_list) | Read content of the file containing keyword informations in JSON. File is
packed using BZIP.
Returns:
list: List of dictionaries containing keywords. | codesearchnet |
def next_sample(uid):
return next(_SHARED_SEQUENCES[uid]) | Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`. | github-repos |
def plot(self, ax=None, legend=None, return_fig=False, **kwargs):
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
d = None
if legend is not None:
try:
d = legend.get_decor(self)
except:
pass
if d is not None:
kwargs['color'] = d.colour
kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)
kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')
axkwargs = {}
xlim = getattr(d, 'xlim', None)
if xlim is not None:
axkwargs['xlim'] = list(map(float, xlim.split(',')))
xticks = getattr(d, 'xticks', None)
if xticks is not None:
axkwargs['xticks'] = list(map(float, xticks.split(',')))
xscale = getattr(d, 'xscale', None)
if xscale is not None:
axkwargs['xscale'] = xscale
ax.set(**axkwargs)
ax.plot(self, self.basis, **kwargs)
ax.set_title(self.mnemonic)
ax.set_xlabel(self.units)
if False:
ax.xaxis.tick_top()
if True:
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(90)
ax.set_ylim([self.stop, self.start])
ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')
if return_ax:
return ax
elif return_fig:
return fig
else:
return None | Plot a curve.
Args:
ax (ax): A matplotlib axis.
legend (striplog.legend): A legend. Optional.
return_fig (bool): whether to return the matplotlib figure.
Default False.
kwargs: Arguments for ``ax.set()``
Returns:
ax. If you passed in an ax, otherwise None. | juraj-google-style |
def parse_history_node(h_node):
if isinstance(h_node, dict):
return HistoryNode.from_dict(h_node)
else:
if len(h_node) != 3:
raise ValueError("Invalid History node, "
"should be dict or (name, version, "
"description) tuple: {}".format(h_node))
return HistoryNode(h_node[0], h_node[1], h_node[2]) | Parses a History Node object from either a dict or a tuple.
Args:
h_node: A dict with name/url/description fields or a 3-element
tuple.
Returns:
History node. | juraj-google-style |
def lookup(self, name):
name = compat.as_str(name)
if name in self._registry:
return self._registry[name][_TYPE_TAG]
else:
raise LookupError('%s registry has no entry for: %s' % (self._name, name)) | Looks up "name".
Args:
name: a string specifying the registry key for the candidate.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered. | github-repos |
def _options_form_default(self):
if (not self.profile_list):
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list) | Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined. | codesearchnet |
def describe(self, percentiles=None, include=None, exclude=None):
if include is not None and (isinstance(include, np.dtype) or include != "all"):
if not is_list_like(include):
include = [include]
include = [
np.dtype(i)
if not (isinstance(i, type) and i.__module__ == "numpy")
else i
for i in include
]
if not any(
(isinstance(inc, np.dtype) and inc == d)
or (
not isinstance(inc, np.dtype)
and inc.__subclasscheck__(getattr(np, d.__str__()))
)
for d in self._get_dtypes()
for inc in include
):
raise ValueError("No objects to concatenate")
if exclude is not None:
if not is_list_like(exclude):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if all(
(isinstance(exc, np.dtype) and exc == d)
or (
not isinstance(exc, np.dtype)
and exc.__subclasscheck__(getattr(np, d.__str__()))
)
for d in self._get_dtypes()
for exc in exclude
):
raise ValueError("No objects to concatenate")
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return self.__constructor__(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
) | Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics | juraj-google-style |
def default_bucket(self):
if self._default_bucket:
return self._default_bucket
account = self.boto_session.client('sts').get_caller_identity()['Account']
region = self.boto_session.region_name
default_bucket = 'sagemaker-{}-{}'.format(region, account)
s3 = self.boto_session.resource('s3')
try:
if (region == 'us-east-1'):
s3.create_bucket(Bucket=default_bucket)
else:
s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region})
LOGGER.info('Created S3 bucket: {}'.format(default_bucket))
except ClientError as e:
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if (error_code == 'BucketAlreadyOwnedByYou'):
pass
elif ((error_code == 'OperationAborted') and ('conflicting conditional operation' in message)):
pass
elif (error_code == 'TooManyBuckets'):
s3.meta.client.head_bucket(Bucket=default_bucket)
else:
raise
self._default_bucket = default_bucket
return self._default_bucket | Return the name of the default bucket to use in relevant Amazon SageMaker interactions.
Returns:
str: The name of the default bucket, which is of the form: ``sagemaker-{region}-{AWS account ID}``. | codesearchnet |
def CheckDirectory(self, path, extension='yaml'):
result = True
if extension:
glob_spec = os.path.join(path, '*.{0:s}'.format(extension))
else:
glob_spec = os.path.join(path, '*')
for definition_file in sorted(glob.glob(glob_spec)):
if not self.CheckFile(definition_file):
result = False
return result | Validates definition files in a directory.
Args:
path (str): path of the definition file.
extension (Optional[str]): extension of the filenames to read.
Returns:
bool: True if the directory contains valid definitions. | juraj-google-style |
def get_rows(self, reportId: int=None, timeout: int=60 * 3) -> typing.Iterator[dict]:
if reportId is None:
reportId = self.reportId
while timeout > 0:
report = API_SearchAds(self.config, self.auth).reports().get(reportId=reportId).execute()
if report['isReportReady']:
for fragment in range(len(report['files'])):
rows = csv_to_rows(API_SearchAds(self.config, self.auth).reports().getFile(reportId=reportId, reportFragment=fragment).execute())
if fragment > 0:
next(rows)
yield from rows
break
else:
if self.config.verbose:
print('.', end='')
sleep(60)
timeout -= 1 | Return each row of data from a report as a generator.
Wait up to 3 hours with 1 minute poll intervals for report to finish.
Handle fragmented downloads.
Args:
reportId - optional, if not given uses prior value from request(...) call.
timeout - optional, number of minutes to wait for report to complete.
Returns:
Generator with lists of column values. | github-repos |
def user(self, user: str, token: Optional[str] = None) -> "IntentAPI":
if not self.bot:
return self.client.intent(user, token)
else:
self.log.warning("Called IntentAPI
return self.bot.client.intent(user, token) | Get the intent API for a specific user. This is just a proxy to :func:`~HTTPAPI.intent`.
You should only call this method for the bot user. Calling it with child intent APIs will
result in a warning log.
Args:
user: The Matrix ID of the user whose intent API to get.
token: The access token to use for the Matrix ID.
Returns:
The IntentAPI for the given user. | juraj-google-style |
def EqualTo(self, value):
self._awql = self._CreateSingleValueCondition(value, '=')
return self._query_builder | Sets the type of the WHERE clause as "equal to".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | juraj-google-style |
class MusicgenUnconditionalInput(ModelOutput):
encoder_outputs: Tuple[torch.FloatTensor] = None
attention_mask: Optional[torch.LongTensor] = None
guidance_scale: Optional[float] = None | Args:
encoder_outputs (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the text encoder model.
attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*):
Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,
1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.
guidance_scale (`float`, *optional*):
Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted
from the prompts) and the unconditional logits (predicted without prompts). | github-repos |
def container(container_name) -> ContextManager[str]:
return get_default_graph().container(container_name) | Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops. | github-repos |
def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None):
if (shards is None):
shards = parameters.config.SHARD_COUNT
if (base_path is None):
base_path = parameters.config.BASE_PATH
mapreduce_id = control.start_map(job_name, handler_spec, input_reader_spec, (params or {}), mapreduce_parameters={'done_callback': self.get_callback_url(), 'done_callback_method': 'GET', 'pipeline_id': self.pipeline_id, 'base_path': base_path}, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name)
self.fill(self.outputs.job_id, mapreduce_id)
self.set_status(console_url=('%s/detail?mapreduce_id=%s' % (base_path, mapreduce_id))) | Start a mapreduce job.
Args:
job_name: mapreduce name. Only for display purpose.
handler_spec: fully qualified name to your map function/class.
input_reader_spec: fully qualified name to input reader class.
output_writer_spec: fully qualified name to output writer class.
params: a dictionary of parameters for input reader and output writer
initialization.
shards: number of shards. This provides a guide to mapreduce. The real
number of shards is determined by how input are splited. | codesearchnet |
def _GetUrl(self, url, cache, database):
if not url:
return ''
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self._URL_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'url', 'id', ('url', 'title'))
url_cache_results = cache.GetResults('url')
reference_url, reference_title = url_cache_results.get(url, ['', ''])
if not reference_url:
return ''
return '{0:s} ({1:s})'.format(reference_url, reference_title) | Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url (str): URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL or an empty string if no URL was found. | juraj-google-style |
def set_boolean(self, option, value):
if (not isinstance(value, bool)):
raise TypeError(('%s must be a boolean' % option))
self.options[option] = str(value).lower() | Set a boolean option.
Args:
option (str): name of option.
value (bool): value of the option.
Raises:
TypeError: Value must be a boolean. | codesearchnet |
def _get_modules(package, attr_name, constants_attr_name):
modules = set()
for module in list(sys.modules.values()):
if not module or not hasattr(module, '__name__') or package not in module.__name__:
continue
for module_contents_name in dir(module):
attr = getattr(module, module_contents_name)
_, attr = tf_decorator.unwrap(attr)
if module_contents_name == constants_attr_name:
for exports, _ in attr:
modules.update([_get_module_from_symbol(export) for export in exports])
continue
if hasattr(attr, '__dict__') and attr_name in attr.__dict__:
modules.update([_get_module_from_symbol(export) for export in getattr(attr, attr_name)])
return modules | Get list of TF API modules.
Args:
package: We only look at modules that contain package in the name.
attr_name: Attribute set on TF symbols that contains API names.
constants_attr_name: Attribute set on TF modules that contains
API constant names.
Returns:
Set of TensorFlow API modules. | github-repos |
def add_exit_node(self, ast_node, section_id, guards):
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
return node | Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered to
be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
Returns:
Node | github-repos |
def StreamFile(self, filedesc, offset=0, amount=None):
reader = FileReader(filedesc, offset=offset)
return self.Stream(reader, amount=amount) | Streams chunks of a given file starting at given offset.
Args:
filedesc: A `file` object to stream.
offset: An integer offset at which the file stream should start on.
amount: An upper bound on number of bytes to read.
Returns:
Generator over `Chunk` instances. | juraj-google-style |
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
def any2bytes(s):
'Convert string or number to memory in bytes.'
if is_string(s):
return int(Memory.from_string(s).to('b'))
else:
return int(s)
if (max_filesize is not None):
max_filesize = any2bytes(max_filesize)
if exclude_exts:
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith('.nc'):
exts.append(e.replace('.nc', ''))
else:
exts.append((e + '.nc'))
exclude_exts = exts
def filter(tarinfo):
'\n Function that takes a TarInfo object argument and returns the changed TarInfo object.\n If it instead returns None the TarInfo object will be excluded from the archive.\n '
if (tarinfo.issym() or tarinfo.islnk()):
if verbose:
print(('Excluding link: %s' % tarinfo.name))
return None
if ((max_filesize is not None) and (tarinfo.size > max_filesize)):
if verbose:
print(('Excluding %s due to max_filesize' % tarinfo.name))
return None
if (exclude_exts and any((tarinfo.name.endswith(ext) for ext in exclude_exts))):
if verbose:
print(('Excluding %s due to extension' % tarinfo.name))
return None
if (exclude_dirs and any(((dir_name in exclude_dirs) for dir_name in tarinfo.name.split(os.path.sep)))):
if verbose:
print(('Excluding %s due to exclude_dirs' % tarinfo.name))
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, '..'))
import tarfile
name = ((os.path.basename(self.workdir) + '.tar.gz') if (name is None) else name)
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)
if ((self.pyfile is not None) and os.path.exists(self.pyfile)):
tar.add(self.pyfile)
os.chdir(back)
return name | Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns:
The name of the tarfile. | codesearchnet |
def filler(self):
if (not self.filled):
raise SlotNotFilledError(('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)))
return self._filler_pipeline_key.name() | Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet. | codesearchnet |
def should_include_file_in_search(file_name, extensions, exclude_dirs):
return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \
any(file_name.endswith(e) for e in extensions) | Whether or not a filename matches a search criteria according to arguments.
Args:
file_name (str): A file path to check.
extensions (list): A list of file extensions file should match.
exclude_dirs (list): A list of directories to exclude from search.
Returns:
A boolean of whether or not file matches search criteria. | juraj-google-style |
def cluster_resources(self):
resources = defaultdict(int)
clients = self.client_table()
for client in clients:
if client['IsInsertion']:
for (key, value) in client['Resources'].items():
resources[key] += value
return dict(resources) | Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster. | codesearchnet |
def Copy(From, To):
from benchbuild.utils.cmd import cp
cp("-ar", "--reflink=auto", From, To) | Small copy wrapper.
Args:
From (str): Path to the SOURCE.
To (str): Path to the TARGET. | juraj-google-style |
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0):
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, (list, tuple)):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. You should provide one `' + weight_type + '`array per model output.')
return x_weight
if isinstance(x_weight, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` should be either a list or a dict. Provided `' + weight_type + '` type not understood: ' + str(x_weight)) | Maps `sample_weight` or `class_weight` to model outputs.
Args:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument. | github-repos |
def parse_response(response, encoding='utf-8'):
return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response(response, encoding).parts | Parse a multipart Requests.Response into a tuple of BodyPart objects.
Args:
response: Requests.Response
encoding:
The parser will assume that any text in the HTML body is encoded with this
encoding when decoding it for use in the ``text`` attribute.
Returns:
tuple of BodyPart
Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode),
encoding (str). | codesearchnet |
def iplot_histogram(data, figsize=None, number_to_keep=None, sort='asc', legend=None):
html_template = Template('\n <p>\n <div id="histogram_$divNumber"></div>\n </p>\n ')
javascript_template = Template('\n <script>\n requirejs.config({\n paths: {\n qVisualization: "https:
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
if (figsize is None):
figsize = (7, 5)
options = {'number_to_keep': (0 if (number_to_keep is None) else number_to_keep), 'sort': sort, 'show_legend': 0, 'width': int(figsize[0]), 'height': int(figsize[1])}
if legend:
options['show_legend'] = 1
data_to_plot = []
if isinstance(data, dict):
data = [data]
if (legend and (len(legend) != len(data))):
raise VisualizationError(("Length of legendL (%s) doesn't match number of input executions: %s" % (len(legend), len(data))))
for (item, execution) in enumerate(data):
exec_data = process_data(execution, options['number_to_keep'])
out_dict = {'data': exec_data}
if legend:
out_dict['name'] = legend[item]
data_to_plot.append(out_dict)
html = html_template.substitute({'divNumber': div_number})
javascript = javascript_template.substitute({'divNumber': div_number, 'executions': data_to_plot, 'options': options})
display(HTML((html + javascript))) | Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data. | codesearchnet |
def tag(self, main_type, sub_type, unique_id, tag, action='GET', owner=None, params=None):
params = params or {}
if owner:
params['owner'] = owner
action = action.upper()
if sub_type:
url = '/v2/{}/{}/{}/tags/{}'.format(main_type, sub_type, unique_id, quote(tag))
else:
url = '/v2/{}/{}/tags/{}'.format(main_type, unique_id, quote(tag))
response = None
if action == 'ADD':
response = self.tcex.session.post(url, params=params)
elif action == 'DELETE':
response = self.tcex.session.delete(url, params=params)
elif action == 'GET':
response = self.tcex.session.get(url, params=params)
else:
self.tcex.log.error('_tags error')
return response | Args:
owner:
main_type:
sub_type:
unique_id:
tag:
action:
params:
Return: | juraj-google-style |
def get_prefix(self, name):
if name == 'current':
name = self.current
try:
return self.prefixes[name]
except KeyError:
raise KeyError(
'Unable to find prefix "%s" in workdir %s' % (name, self.path)
) | Retrieve a prefix, resolving the current one if needed
Args:
name(str): name of the prefix to retrieve, or current to get the
current one
Returns:
self.prefix_class: instance of the prefix with the given name | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.