code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def set_max_freq(self, max_freq=None):
if max_freq:
self['max_freq'] = max_freq
else:
for frequency in self['frequencies']:
if self['max_freq']:
if frequency['value'] > self['max_freq']:
self['max_freq'] = frequency['value']
else:
self['max_freq'] = frequency['value']
return | Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency | juraj-google-style |
def plot_hall_carriers(self, temp=300):
import matplotlib.pyplot as plt
hall_carriers = [abs(i) for i in
self._bz.get_hall_carrier_concentration()[temp]]
plt.semilogy(self._bz.mu_steps,
hall_carriers,
linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap + 0.5)
plt.ylim(1e14, 1e22)
plt.ylabel("Hall carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt | Plot the Hall carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object | juraj-google-style |
def _stringify_path(path_or_buffer):
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
if hasattr(path_or_buffer, '__fspath__'):
return path_or_buffer.__fspath__()
if (_PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path)):
return text_type(path_or_buffer)
return path_or_buffer | Convert path like object to string
Args:
path_or_buffer: object to be converted
Returns:
string_path_or_buffer: maybe string version of path_or_buffer | codesearchnet |
def trace_distance_bound(val: Any) -> float:
getter = getattr(val, '_trace_distance_bound_', None)
result = (NotImplemented if (getter is None) else getter())
if ((result is not NotImplemented) and (result < 1.0)):
return result
return 1.0 | Returns a maximum on the trace distance between this effect's input
and output. This method makes use of the effect's `_trace_distance_bound_`
method to determine the maximum bound on the trace difference between
before and after the effect.
Args:
val: The effect of which the bound should be calculated
Returns:
If `val` has a _trace_distance_bound_ method and its result is not
NotImplemented, that result is returned. Otherwise, 1 is returned.
Result is capped at a maximum of 1, even if the underlying function
produces a result greater than 1. | codesearchnet |
def make(self, path, metadata=None):
self.current_shard_filenames = []
if self.h5_file is not None:
self.current_shard_filenames.append(pathlib.Path(self.h5_file.filename).name)
return super().make(path, metadata) | Make a new H5 entry group.
This method is only available in write mode. It defers the creation of
the H5 entry group until `__setitem__` is called, preventing the
creation of empty groups.
The information about the current shard is reset.
Args:
path: `str`. The variable path.
metadata: Optional `dict`. The metadata to save with the H5 entry
group. Defaults to `None`. | github-repos |
def freeze_script(script_path, cache=True, temp_path='_hadoopy_temp'):
script_abspath = os.path.abspath(script_path)
if (not os.path.exists(script_abspath)):
raise ValueError(('Script [%s] does not exist.' % script_abspath))
try:
if (not cache):
raise KeyError
(cmds, frozen_tar_path) = FREEZE_CACHE[script_abspath]
except KeyError:
tmp_frozen_tar_path = (temp_path + ('/%f.tar' % time.time()))
freeze_fp = tempfile.NamedTemporaryFile(suffix='.tar')
cmds = hadoopy._freeze.freeze_to_tar(os.path.abspath(script_path), freeze_fp.name)
md5 = _md5_file(freeze_fp.name)
frozen_tar_path = (temp_path + ('/%s.tar' % md5))
if (not hadoopy.exists(frozen_tar_path)):
if (not hadoopy.exists(temp_path)):
hadoopy.mkdir(temp_path)
hadoopy.put(freeze_fp.name, tmp_frozen_tar_path)
try:
hadoopy.mv(tmp_frozen_tar_path, frozen_tar_path)
except IOError:
if (not hadoopy.exists(frozen_tar_path)):
raise
FREEZE_CACHE[script_abspath] = (cmds, frozen_tar_path)
return {'cmds': cmds, 'frozen_tar_path': frozen_tar_path} | Freezes a script, puts it on hdfs, and gives you the path
'frozen_tar_path' can be given to launch_frozen and it will use that
instead of making its own, this is useful for repeated calls. If a
file with the same md5 already exists in the temp_path, it is used
instead of putting a new copy there to avoid the file transfer. The
files are put into a temporary file based on the timestamp first, then
moved to a location that is only a function of their md5 to prevent partial
files.
Args:
script_path: Path to a hadoopy script
cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
temp_path: HDFS temporary path (default is '_hadoopy_temp')
Returns:
{'cmds': commands_ran, 'frozen_tar_path': frozen_tar_path}
Raises:
ValueError: Script cannot be found | codesearchnet |
def FromStream(cls, stream):
if stream.system:
specifier = DataStreamSelector.MatchSystemOnly
else:
specifier = DataStreamSelector.MatchUserOnly
return DataStreamSelector(stream.stream_type, stream.stream_id, specifier) | Create a DataStreamSelector from a DataStream.
Args:
stream (DataStream): The data stream that we want to convert. | juraj-google-style |
def _do_retrieve_scopes(self, http, token):
logger.info('Refreshing scopes')
query_params = {'access_token': token, 'fields': 'scope'}
token_info_uri = _helpers.update_query_params(
self.token_info_uri, query_params)
resp, content = transport.request(http, token_info_uri)
content = _helpers._from_bytes(content)
if resp.status == http_client.OK:
d = json.loads(content)
self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))
else:
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
d = json.loads(content)
if 'error_description' in d:
error_msg = d['error_description']
except (TypeError, ValueError):
pass
raise Error(error_msg) | Retrieves the list of authorized scopes from the OAuth2 provider.
Args:
http: an object to be used to make HTTP requests.
token: A string used as the token to identify the credentials to
the provider.
Raises:
Error: When refresh fails, indicating the the access token is
invalid. | juraj-google-style |
def __init__(self, theta: types.RealTensor, mean_reversion: types.RealTensor, sigma: types.RealTensor, dtype: Optional[tf.DType]=None, name: Optional[str]=None):
dim = 1
dtype = dtype or tf.float32
name = name or 'cir_model'
with tf.name_scope(name):
def _convert_param_to_tensor(param):
param_t = tf.convert_to_tensor(param, dtype=dtype)
return param_t * tf.ones(shape=dim, dtype=dtype)
def _get_batch_shape(param):
param_shape = tff_utils.get_shape(param)
return param_shape[:-1]
self._theta = _convert_param_to_tensor(theta)
self._mean_reversion = _convert_param_to_tensor(mean_reversion)
self._sigma = _convert_param_to_tensor(sigma)
self._batch_shape = _get_batch_shape(self._theta)
self._batch_shape_rank = len(self._batch_shape)
def _drift_fn(t, x):
del t
expand_rank = tff_utils.get_shape(x).rank - self._batch_shape_rank - 1
theta_expand = self._expand_param_on_rank(self._theta, expand_rank, axis=-2)
mean_reversion_expand = self._expand_param_on_rank(self._mean_reversion, expand_rank, axis=-2)
return theta_expand - mean_reversion_expand * x
def _volatility_fn(t, x):
del t
expand_rank = len(tff_utils.get_shape(x)) - self._batch_shape_rank - 1
sigma_expand = self._expand_param_on_rank(self._sigma, expand_rank, axis=-2)
return tf.expand_dims(sigma_expand * tf.sqrt(x), axis=-1)
super(CirModel, self).__init__(dim, _drift_fn, _volatility_fn, dtype, name) | Initializes the CIR Model.
Args:
theta: A positive scalar `Tensor` with shape `batch_shape` + [1].
mean_reversion: A positive scalar `Tensor` of the same dtype and shape as
`theta`. Means speed of reversion.
sigma: A scalar `Tensor` of the same dtype and shape as `theta`.Means
volatility.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which maps to `tf.float32`.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name `cir_model`. | github-repos |
def __init__(self, backend_prop):
super().__init__()
self.backend_prop = backend_prop
self.swap_graph = nx.DiGraph()
self.cx_errors = {}
self.readout_errors = {}
self.available_hw_qubits = []
self.gate_list = []
self.gate_cost = {}
self.swap_paths = {}
self.swap_costs = {}
self.prog_graph = nx.Graph()
self.qarg_to_id = {}
self.pending_program_edges = []
self.prog2hw = {} | Chooses a Noise Adaptive Layout
Args:
backend_prop (BackendProperties): backend properties object
Raises:
TranspilerError: if invalid options | juraj-google-style |
def power_devices(self):
if (not self.__power_devices):
self.__power_devices = PowerDevices(self.__connection)
return self.__power_devices | Gets the PowerDevices API client.
Returns:
PowerDevices: | codesearchnet |
def _ValidateFleetspeakServiceConfig(self, config_path):
with open(config_path, "rb") as f:
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)
parsed_config = text_format.Parse(
f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)
if parsed_config.factory != "Daemon":
raise BuildError(
"Fleetspeak config does not have the expected factory type.")
daemon_cfg = fs_config_pb2.Config()
parsed_config.config.Unpack(daemon_cfg)
if not daemon_cfg.argv:
raise BuildError(
"Fleetspeak daemon service config does not specify command line "
"args.") | Validates a Fleetspeak service config.
Checks that the given file is a valid TextFormat representation of
a Fleetspeak service config proto.
Args:
config_path: Path to the config file.
Raises:
BuildError: If the config is not valid. | juraj-google-style |
def round(x, name=None):
x = ops.convert_to_tensor(x, name='x')
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name) | Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`. | github-repos |
def test_config_to_dict(test_config_string):
test_config = {}
if test_config_string:
for config in test_config_string.split(','):
key, value = config.split('=')
test_config[key] = value
return test_config | Parse the test config to a dictionary
Args:
test_config_string (str) this string come from the --test-config
flag of the bro executable run command | juraj-google-style |
def clear(cls, fn):
if hasattr(fn, cls.CACHE_VAR):
delattr(fn, cls.CACHE_VAR) | Clear result cache on the given function.
If the function has no cached result, this call will do nothing.
Args:
fn (FunctionType):
The function whose cache should be cleared. | codesearchnet |
def _and_join(self, close_group=False):
if (not self.initialized):
raise ValueError('You must add a search term before adding an operator.')
else:
self._operator('AND', close_group=close_group)
return self | Combine terms with AND.
There must be a term added before using this method.
Arguments:
close_group (bool): If ``True``, will end the current group and start a new one.
If ``False``, will continue current group.
Example::
If the current query is "(term1"
.and(close_group=True) => "(term1) AND ("
.and(close_group=False) => "(term1 AND "
Returns:
SearchHelper: Self | codesearchnet |
def _variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var | Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor | juraj-google-style |
def _inputs_valid(self, output_condition_uris):
if len(self.inputs) != len(output_condition_uris):
raise ValueError('Inputs and '
'output_condition_uris must have the same count')
tx_dict = self.tx_dict if self.tx_dict else self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_dict['id'] = None
tx_serialized = Transaction._to_str(tx_dict)
def validate(i, output_condition_uri=None):
return self._input_valid(self.inputs[i], self.operation,
tx_serialized, output_condition_uri)
return all(validate(i, cond)
for i, cond in enumerate(output_condition_uris)) | Validates an Input against a given set of Outputs.
Note:
The number of `output_condition_uris` must be equal to the
number of Inputs a Transaction has.
Args:
output_condition_uris (:obj:`list` of :obj:`str`): A list of
Outputs to check the Inputs against.
Returns:
bool: If all Outputs are valid. | juraj-google-style |
def set_control_mode(self, modevalue):
minimalmodbus._checkInt(modevalue, minvalue=0, maxvalue=3, description='control mode')
self.write_register(4101, modevalue) | Set the control method using the corresponding integer value.
Args:
modevalue(int): 0-3
The modevalue is one of the keys in :data:`CONTROL_MODES`. | juraj-google-style |
def __init__(self, api_url='https:
headers=None):
self.API_BASE_URL = '{api_url}'.format(**locals())
self.headers = headers | Initialize Paystack Request object for browsing resource.
Args:
api_url: str
headers: dict | juraj-google-style |
def _get_archive_filelist(filename):
names = []
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names | Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2) | juraj-google-style |
def getTraitCovarStdErrors(self,term_i):
assert self.init, 'GP not initialised'
assert self.fast==False, 'Not supported for fast implementation'
if self.P==1:
out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i]
else:
C = self.vd.getTerm(term_i).getTraitCovar()
n_params = C.getNumberParams()
par_index = 0
for term in range(term_i-1):
par_index += self.vd.getTerm(term_i).getNumberScales()
Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)]
out = sp.zeros((self.P,self.P))
for param_i in range(n_params):
out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i]
for param_j in range(param_i):
out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j]
out = sp.sqrt(out)
return out | Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar)
Args:
term_i: index of the term we are interested in | juraj-google-style |
def node_traceback(self, element_name):
if self._python_graph is None:
raise LookupError('Python graph is not available for traceback lookup')
node_name = debug_graphs.get_node_name(element_name)
if node_name not in self._node_traceback:
raise KeyError('Cannot find node "%s" in Python graph' % node_name)
return self._node_traceback[node_name] | Try to retrieve the Python traceback of node's construction.
Args:
element_name: (`str`) Name of a graph element (node or tensor).
Returns:
(list) The traceback list object as returned by the `extract_trace`
method of Python's traceback module.
Raises:
LookupError: If Python graph is not available for traceback lookup.
KeyError: If the node cannot be found in the Python graph loaded. | github-repos |
def with_row(self, row):
self = self.copy()
self.append(row)
return self | Return a table with an additional row.
Args:
``row`` (sequence): A value for each column.
Raises:
``ValueError``: If the row length differs from the column count.
>>> tiles = Table(make_array('letter', 'count', 'points'))
>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])
letter | count | points
c | 2 | 3
d | 4 | 2 | codesearchnet |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
shortcuts = match.get('UserShortcuts', {})
for (search_text, data) in iter(shortcuts.items()):
datetime_value = data.get('LAST_USED', None)
if (not datetime_value):
continue
display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>')
path = data.get('PATH', '<PATH>')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Spotlight term searched "{0:s}" associate to {1:s} ({2:s})'.format(search_text, display_name, path)
event_data.key = search_text
event_data.root = '/UserShortcuts'
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | codesearchnet |
def get_distance(self, node):
delta = (node.pos[0]-self.pos[0], node.pos[1]-self.pos[1])
return sqrt(delta[0]**2+delta[1]**2) | Get the distance beetween 2 nodes
Args:
node (object): The other node. | juraj-google-style |
def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'):
variants = self.variant_collection.find({
'case_id': case_obj['_id'],
'category': category,
'variant_type': variant_type,
}).sort('rank_score', pymongo.DESCENDING)
LOG.info("Updating variant_rank for all variants")
requests = []
for index, var_obj in enumerate(variants):
if len(requests) > 5000:
try:
self.variant_collection.bulk_write(requests, ordered=False)
requests = []
except BulkWriteError as err:
LOG.warning("Updating variant rank failed")
raise err
operation = pymongo.UpdateOne(
{'_id': var_obj['_id']},
{
'$set': {
'variant_rank': index + 1,
}
})
requests.append(operation)
try:
self.variant_collection.bulk_write(requests, ordered=False)
except BulkWriteError as err:
LOG.warning("Updating variant rank failed")
raise err
LOG.info("Updating variant_rank done") | Updates the manual rank for all variants in a case
Add a variant rank based on the rank score
Whenever variants are added or removed from a case we need to update the variant rank
Args:
case_obj(Case)
variant_type(str) | juraj-google-style |
def lola_image(self, save=False, name='BaseLola.png'):
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(111)
(lon_m, lon_M, lat_m, lat_M) = self.lambert_window(self.size_window, self.lat0, self.lon0)
m = Basemap(llcrnrlon=lon_m, llcrnrlat=lat_m, urcrnrlon=lon_M, urcrnrlat=lat_M, resolution='i', projection='laea', rsphere=1734400, lat_0=self.lat0, lon_0=self.lon0)
(Xl, Yl, Zl) = self.get_arrays('Lola')
(Xl, Yl) = m(Xl, Yl)
CS = m.pcolormesh(Xl, Yl, Zl, cmap='gist_earth', alpha=0.5, ax=ax1, zorder=1)
(xc, yc) = m(self.lon0, self.lat0)
ax1.scatter(xc, yc, s=200, marker='v', zorder=2)
self._add_scale(m, ax1)
self._add_colorbar(m, CS, ax1, 'Topography')
if (save == True):
fig.savefig(name, rasterized=True, dpi=50, bbox_inches='tight', pad_inches=0.1) | Draw the topography of the region of interest
Args:
save (Optional[bool]): Weither or not to save the image.
Defaults to False.
name (Optional[str]): Absolut path to save the resulting
image. Default to 'BaseLola.png' in the working
directory.
Returns:
An image correponding to the region tography. Realized
from the data taken by the LOLA instrument on board of LRO.
Note:
Nice to use in a jupyter notebook with ``%matplotib inline``
activated.
Feel free to modify this method to plot exactly what you need. | codesearchnet |
def _has_nchw_support():
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available | Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw | github-repos |
def configure_vrf(self, vrf_name, commands):
commands = make_iterable(commands)
commands.insert(0, 'vrf definition %s' % vrf_name)
return self.configure(commands) | Configures the specified VRF using commands
Args:
vrf_name (str): The VRF name to configure
commands: The list of commands to configure
Returns:
True if the commands completed successfully | juraj-google-style |
def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor:
token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens]
dequantised_states = []
for i in range(bs_chunks):
music_tokens_i = [chunks[i] for chunks in token_chunks]
dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level)
dequantised_states.append(dequantised_state)
return torch.cat(dequantised_states, dim=0) | Transforms the input `music_tokens` to their `raw_audio` representation.
Args:
music_tokens (`torch.LongTensor`):
Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token
should be an index to a corresponding `code` vector in the codebook.
start_level (`int`, *optional*):
Level at which the decoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the decoding process will start. Default to None.
bs_chunks (int, *optional*):
Number of chunks to process at the same time. | github-repos |
def from_log(cls, log, cutoff=None, components=None, legend=None, legend_field=None, field=None, right=False, basis=None, source='Log'):
if ((components is None) and (legend is None) and (field is None)):
m = 'You must provide a list of components, and legend, or a field.'
raise StriplogError(m)
if ((legend is not None) and (legend_field is None)):
try:
components = [deepcopy(decor.component) for decor in legend]
except AttributeError:
pass
if (legend_field is not None):
field_values = [getattr(d, legend_field, 0) for d in legend]
components = [Component() for i in range(int((max(field_values) + 1)))]
for (i, decor) in enumerate(legend):
components[i] = deepcopy(decor.component)
if (cutoff is not None):
try:
n = len(cutoff)
except TypeError:
n = 1
if (len(components) < (n + 1)):
m = 'For n cutoffs, you need to provide at least'
m += 'n+1 components.'
raise StriplogError(m)
try:
a = np.digitize(log, cutoff, right)
except ValueError:
a = np.digitize(log, [cutoff], right)
else:
a = np.copy(log)
(tops, values) = utils.tops_from_loglike(a)
if (basis is None):
m = 'You must provide a depth or elevation basis.'
raise StriplogError(m)
list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components, field=field)
return cls(list_of_Intervals, source=source) | Turn a 1D array into a striplog, given a cutoff.
Args:
log (array-like): A 1D array or a list of integers.
cutoff (number or array-like): The log value(s) at which to bin
the log. Optional.
components (array-like): A list of components. Use this or
``legend``.
legend (``Legend``): A legend object. Use this or ``components``.
legend_field ('str'): If you're not trying to match against
components, then you can match the log values to this field in
the Decors.
field (str): The field in the Interval's ``data`` to store the log
values as.
right (bool): Which side of the cutoff to send things that are
equal to, i.e. right on, the cutoff.
basis (array-like): A depth basis for the log, so striplog knows
where to put the boundaries.
source (str): The source of the data. Default 'Log'.
Returns:
Striplog: The ``striplog`` object. | codesearchnet |
def Header(self):
if (not self._header):
self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp, self.Index, self.ConsensusData, self.NextConsensus, self.Script)
return self._header | Get the block header.
Returns:
neo.Core.Header: | codesearchnet |
class FlaxBaseModelOutputWithNoAttention(ModelOutput):
last_hidden_state: Optional[jnp.ndarray] = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None | Base class for model's outputs, with potential hidden states.
Args:
last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one
for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the
model at the output of each layer plus the optional initial embedding outputs. | github-repos |
def get_corner(self, time):
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
return self.i[diff][0, 0], self.j[diff][0, 0]
else:
return -1, -1 | Gets the corner array indices of the STObject at a given time that corresponds
to the upper left corner of the bounding box for the STObject.
Args:
time: time at which the corner is being extracted.
Returns:
corner index. | juraj-google-style |
def state_size(self) -> Sequence[Sequence[int]]:
fluents = self.domain.state_fluents
ordering = self.domain.state_fluent_ordering
return self._fluent_size(fluents, ordering) | The size of each state fluent in canonical order.
Returns:
Sequence[Sequence[int]]: A tuple of tuple of integers
representing the shape and size of each fluent. | codesearchnet |
def check_decorator_order(filename: str) -> List[int]:
with open(filename, 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith('parameterized'):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors | Check that in a given test file, the slow decorator is always last.
Args:
filename (`str`): The path to a test file to check.
Returns:
`List[int]`: The list of failures as a list of indices where there are problems. | github-repos |
def GetStoredHostname(self):
store_number = len(self._hostnames)
return self._hostnames.get(store_number, None) | Retrieves the stored hostname.
The hostname is determined based on the preprocessing information
that is stored inside the storage file.
Returns:
str: hostname. | codesearchnet |
def __init__(self, reader_ref, supports_serialize=False):
if context.executing_eagerly():
raise RuntimeError('Readers are not supported when eager execution is enabled. Instead, please use tf.data to get data into your model.')
self._reader_ref = reader_ref
self._supports_serialize = supports_serialize | Creates a new ReaderBase.
Args:
reader_ref: The operation that implements the reader.
supports_serialize: True if the reader implementation can
serialize its state.
Raises:
RuntimeError: If eager execution is enabled. | github-repos |
def _verify_signature(message, signature, certs):
for pem in certs:
verifier = Verifier.from_string(pem, is_x509_cert=True)
if verifier.verify(message, signature):
return
raise AppIdentityError('Invalid token signature') | Verifies signed content using a list of certificates.
Args:
message: string or bytes, The message to verify.
signature: string or bytes, The signature on the message.
certs: iterable, certificates in PEM format.
Raises:
AppIdentityError: If none of the certificates can verify the message
against the signature. | codesearchnet |
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))
msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]
else:
msg = '"%s"' % tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\n', output_tensor, '\n', summarize=num_elements, output_stream=output_stream) | Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
tensor_trace_order.tensorname_to_cache_idx. | github-repos |
def get_customer(self, customer_id):
return self.client._get(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers()) | Queries the information related to the customer.
Args:
customer_id: Identifier of the client from which you want to find the associated information.
Returns: | juraj-google-style |
def get_asides(self, block):
aside_instances = [self.get_aside_of_type(block, aside_type) for aside_type in self.applicable_aside_types(block)]
return [aside_instance for aside_instance in aside_instances if aside_instance.should_apply_to_block(block)] | Return instances for all of the asides that will decorate this `block`.
Arguments:
block (:class:`.XBlock`): The block to render retrieve asides for.
Returns:
List of XBlockAside instances | codesearchnet |
def chdir(self, target_directory):
target_directory = self.filesystem.resolve_path(target_directory, allow_fd=True)
self.filesystem.confirmdir(target_directory)
directory = self.filesystem.resolve(target_directory)
if ((not is_root()) and (not (directory.st_mode | PERM_EXE))):
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = target_directory | Change current working directory to target directory.
Args:
target_directory: The path to new current working directory.
Raises:
OSError: if user lacks permission to enter the argument directory
or if the target is not a directory. | codesearchnet |
def trees_by_path(self, path):
return set(
self.path_db.get(path, OOSet()).keys()
) | Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances. | juraj-google-style |
def get_bkg_qq_data(id=None, bkg_id=None):
bdata = ui.get_bkg(id=id, bkg_id=bkg_id)
kev = bdata.get_x()
obs_data = bdata.counts
model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)
return np.vstack((kev, obs_data, model_data)) | Get data for a quantile-quantile plot of the background data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
*bkg_id*
The identifier of the background; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module. | codesearchnet |
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None | Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger. | juraj-google-style |
def get_reversed_statuses(context):
_rev = {v: k for (k, v) in STATUSES.items()}
_rev.update(dict(context.config['reversed_statuses']))
return _rev | Return a mapping of exit codes to status strings.
Args:
context (scriptworker.context.Context): the scriptworker context
Returns:
dict: the mapping of exit codes to status strings. | codesearchnet |
def mtr_lm_v1():
hparams = mtr_lm_dense(0)
hparams.layers = (['local_self_att', 'local_self_att', 'drd', 'self_att', 'drd', 'local_self_att', 'local_self_att', 'moe_2d'] * 4)[:(- 1)]
hparams.d_kv = 128
hparams.moe_expert_x = 8
hparams.moe_expert_y = 4
hparams.moe_hidden_size = 32768
hparams.d_ff = 2048
hparams.num_memory_heads = 0
hparams.mesh_shape = 'b0:4;b1:8'
hparams.layout = 'outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0'
hparams.outer_batch_size = 4
return hparams | Model incorporating mixture-of-experts, local and global attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams | codesearchnet |
def detect_response_encoding(response, is_html=False, peek=131072):
encoding = get_heading_encoding(response)
encoding = wpull.string.detect_encoding(wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html)
_logger.debug(__('Got encoding: {0}', encoding))
return encoding | Return the likely encoding of the response document.
Args:
response (Response): An instance of :class:`.http.Response`.
is_html (bool): See :func:`.util.detect_encoding`.
peek (int): The maximum number of bytes of the document to be analyzed.
Returns:
``str``, ``None``: The codec name. | codesearchnet |
def default(self, interface, vrid):
vrrp_str = ('default vrrp %d' % vrid)
return self.configure_interface(interface, vrrp_str) | Defaults a vrrp instance from an interface
Note:
This method will attempt to default the vrrp on the node's
operational config. Default results in the deletion of the
specified vrrp . If the vrrp does not exist on the
interface then this method will not perform any changes
but still return True
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be defaulted.
Returns:
True if the vrrp could be defaulted otherwise False (see Node) | codesearchnet |
def as_tensor(self):
with ops.control_dependencies(None):
return self._concat() | Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value. | github-repos |
def check_panels(adapter, panels, default_panels=None):
default_panels = (default_panels or [])
panels_exist = True
for panel in default_panels:
if (panel not in panels):
log.warning('Default panels have to be defined in panels')
panels_exist = False
for panel in panels:
if (not adapter.gene_panel(panel)):
log.warning('Panel {} does not exist in database'.format(panel))
panels_exist = False
return panels_exist | Make sure that the gene panels exist in the database
Also check if the default panels are defined in gene panels
Args:
adapter(MongoAdapter)
panels(list(str)): A list with panel names
Returns:
panels_exists(bool) | codesearchnet |
def _ReformatMessageString(self, message_string):
def _PlaceHolderSpecifierReplacer(match_object):
expanded_groups = []
for group in match_object.groups():
try:
place_holder_number = int(group, 10) - 1
expanded_group = '{{{0:d}:s}}'.format(place_holder_number)
except ValueError:
expanded_group = group
expanded_groups.append(expanded_group)
return ''.join(expanded_groups)
if not message_string:
return None
message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r'', message_string)
message_string = self._TEXT_SPECIFIER_RE.sub(r'\\\1', message_string)
message_string = self._CURLY_BRACKETS.sub(r'\1\1', message_string)
return self._PLACE_HOLDER_SPECIFIER_RE.sub(
_PlaceHolderSpecifierReplacer, message_string) | Reformats the message string.
Args:
message_string (str): message string.
Returns:
str: message string in Python format() (PEP 3101) style. | juraj-google-style |
def get(self, key):
path = self.object_path(key)
return self._read_object(path) | Return the object named by key or None if it does not exist.
Args:
key: Key naming the object to retrieve
Returns:
object or None | juraj-google-style |
def GetPublicCert(self):
cert_url = (self.google_api_url + 'publicKeys')
(resp, content) = self.http.request(cert_url)
if (resp.status == 200):
return simplejson.loads(content)
else:
raise errors.GitkitServerError(('Error response for cert url: %s' % content)) | Download Gitkit public cert.
Returns:
dict of public certs. | codesearchnet |
def read32(self, offset):
if (not isinstance(offset, (int, long))):
raise TypeError('Invalid offset type, should be integer.')
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack('=L', self.mapping[offset:(offset + 4)])[0] | Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds. | codesearchnet |
def get_c_function(self, name):
self.ensure_initialized()
return c_api_util.ScopedTFFunction(pywrap_tfe.TFE_ContextGetFunction(self._handle, name), name) | Get a C API TF_Function from the context.
Args:
name: Name of the function to get.
Returns:
A ScopedTFFunction wrapping the C API TF_Function. | github-repos |
def one_hot_encoding(labels, num_classes, scope=None):
with tf.name_scope(scope, 'OneHotEncoding', [labels]):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(axis=1, values=[indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels | Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels. | juraj-google-style |
def score(text, *score_functions):
if not score_functions:
raise ValueError("score_functions must not be empty")
return statistics.mean(func(text) for func in score_functions) | Score ``text`` using ``score_functions``.
Examples:
>>> score("abc", function_a)
>>> score("abc", function_a, function_b)
Args:
text (str): The text to score
*score_functions (variable length argument list): functions to score with
Returns:
Arithmetic mean of scores
Raises:
ValueError: If score_functions is empty | juraj-google-style |
def split_line(what, indent='', cols=79):
if (len(indent) > cols):
raise ValueError("The indent can't be longer than cols.")
if (cols < 2):
raise ValueError("The cols can't be smaller than 2 (a char plus a possible '-')")
what = (indent + what.lstrip())
if (len(what) <= cols):
(what, new_line) = ('', what)
else:
try:
closest_space = what[:cols].rindex(' ')
except ValueError:
closest_space = (- 1)
if (closest_space > len(indent)):
(what, new_line) = (what[closest_space:], what[:closest_space])
elif (what[cols] == ' '):
(what, new_line) = (what[cols:], what[:cols])
else:
(what, new_line) = (what[(cols - 1):], (what[:(cols - 1)] + '-'))
return (what.lstrip(), new_line.rstrip()) | Split a line on the closest space, or break the last word with '-'.
Args:
what(str): text to spli one line of.
indent(str): will prepend this indent to the split line, taking it into
account in the column count.
cols(int): maximum length of the split line.
Returns:
tuple(str, str): rest of the text and split line in that order.
Raises:
ValueError: when the indent is greater than the indent, or the cols
param is too small | codesearchnet |
def add_user(username, password):
assert _is_valid_username(username), \
"Invalid format of username '%s'!" % username
assert username not in passwd_reader.load_users(), \
"User '%s' is already registered!" % username
assert password, "Password is reqired!"
home_dir = settings.DATA_PATH + username
sh.ftpasswd(
passwd=True,
name=username,
home=home_dir,
shell="/bin/false",
uid=settings.PROFTPD_USERS_GID,
gid=settings.PROFTPD_USERS_GID,
stdin=True,
file=settings.LOGIN_FILE,
_in=password
)
if not os.path.exists(home_dir):
os.makedirs(home_dir, 0775)
passwd_reader.set_permissions(home_dir, gid=settings.PROFTPD_USERS_GID)
passwd_reader.set_permissions(settings.LOGIN_FILE, mode=0600)
create_lock_file(home_dir + "/" + settings.LOCK_FILENAME)
reload_configuration() | Adds record to passwd-like file for ProFTPD, creates home directory and
sets permissions for important files.
Args:
username (str): User's name.
password (str): User's password. | juraj-google-style |
def __init__(self, input_reader=None, output_writer=None):
super(PstealTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._artifacts_registry = None
self._command_line_arguments = None
self._deduplicate_events = True
self._enable_sigsegv_handler = False
self._knowledge_base = knowledge_base.KnowledgeBase()
self._number_of_analysis_reports = 0
self._number_of_extraction_workers = 0
self._output_format = None
self._parsers_manager = parsers_manager.ParsersManager
self._preferred_language = 'en-US'
self._preferred_year = None
self._status_view_mode = status_view.StatusView.MODE_WINDOW
self._status_view = status_view.StatusView(self._output_writer, self.NAME)
self._time_slice = None
self._use_time_slicer = False
self.list_hashers = False
self.list_language_identifiers = False
self.list_output_modules = False
self.list_parsers_and_plugins = False
self.list_timezones = False | Initializes the CLI tool object.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used. | juraj-google-style |
def remove_padding(sequence):
length = sequence.pop('length')
sequence = tools.nested.map((lambda tensor: tensor[:length]), sequence)
return sequence | Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed. | codesearchnet |
def __init__(self, *args, root_path: Optional[utils.KeyPath]=None, override_args: bool=False, ignore_extra_args: bool=False, **kwargs):
_ = kwargs.pop('allow_partial', None)
varargs = None
signature = self.__signature__
if len(args) > len(signature.args):
if signature.varargs:
varargs = list(args[len(signature.args):])
args = args[:len(signature.args)]
else:
arg_phrase = utils.auto_plural(len(signature.args), 'argument')
was_phrase = utils.auto_plural(len(args), 'was', 'were')
raise TypeError(f'{signature.id}() takes {len(signature.args)} positional {arg_phrase} but {len(args)} {was_phrase} given.')
bound_kwargs = dict()
for i, v in enumerate(args):
if pg_typing.MISSING_VALUE != v:
bound_kwargs[signature.args[i].name] = v
if varargs is not None:
bound_kwargs[signature.varargs.name] = varargs
for k, v in kwargs.items():
if pg_typing.MISSING_VALUE != v:
if k in bound_kwargs:
raise TypeError(f'{signature.id}() got multiple values for keyword argument {k!r}.')
bound_kwargs[k] = v
default_args = set()
non_default_args = set(bound_kwargs)
for arg_spec in signature.named_args:
if not arg_spec.value_spec.has_default:
continue
arg_name = arg_spec.name
if arg_name not in non_default_args:
default_args.add(arg_name)
elif bound_kwargs[arg_name] == arg_spec.value_spec.default:
default_args.add(arg_name)
non_default_args.discard(arg_name)
if signature.varargs and (not varargs):
default_args.add(signature.varargs.name)
super().__init__(allow_partial=True, root_path=root_path, **bound_kwargs)
self._non_default_args = non_default_args
self._default_args = default_args
self._specified_args = set(bound_kwargs)
self._override_args = override_args
self._ignore_extra_args = ignore_extra_args
self._tls = threading.local() if self.is_subclassed_functor else None | Constructor.
Args:
*args: prebound positional arguments.
root_path: The symbolic path for current object.
override_args: If True, allows arguments provided during `__call__` to
override existing bound arguments.
ignore_extra_args: If True, unsupported arguments can be passed in
during `__call__` without using them. Otherwise, calling with
unsupported arguments will raise error.
**kwargs: prebound keyword arguments.
Raises:
KeyError: constructor got unexpected arguments. | github-repos |
def closest_leaf_to_root(self):
best = (None, float('inf'))
d = dict()
for node in self.traverse_preorder():
if (node.edge_length is None):
d[node] = 0
else:
d[node] = node.edge_length
if (not node.is_root()):
d[node] += d[node.parent]
if (node.is_leaf() and (d[node] < best[1])):
best = (node, d[node])
return best | Return the leaf that is closest to the root and the corresponding distance. Edges with no length will be considered to have a length of 0
Returns:
``tuple``: First value is the closest leaf to the root, and second value is the corresponding distance | codesearchnet |
def binding_site_mol2(self, residues, force_rerun=False):
log.debug('{}: running binding site isolation...'.format(self.id))
if (not self.receptorpdb_path):
return ValueError('Please run protein_only_and_noH')
prefix = ((self.id + '_') + 'binding_residues')
mol2maker = op.join(self.dock_dir, '{}_make_mol2.py'.format(prefix))
outfile = op.join(self.dock_dir, '{}.mol2'.format(prefix))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(mol2maker, 'w') as mol2_maker:
mol2_maker.write('
mol2_maker.write('from chimera import runCommand\n')
mol2_maker.write('runCommand("open {}")\n'.format(self.receptorpdb_path))
mol2_maker.write('runCommand("delete ~:{}")\n'.format(residues))
mol2_maker.write('runCommand("write format mol2 resnum 0 {}")\n'.format(outfile))
mol2_maker.write('runCommand("close all")')
cmd = 'chimera --nogui {}'.format(mol2maker)
os.system(cmd)
os.remove(mol2maker)
os.remove('{}c'.format(mol2maker))
if ssbio.utils.is_non_zero_file(outfile):
self.bindingsite_path = outfile
log.debug('{}: successful binding site isolation'.format(self.bindingsite_path))
else:
log.critical('{}: binding_site_mol2 failed to run on receptor file'.format(self.receptorpdb_path)) | Create mol2 of only binding site residues from the receptor
This function will take in a .pdb file (preferably the _receptor_noH.pdb file)
and a string of residues (eg: '144,170,199') and delete all other residues in the
.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.
This is necessary for Chimera to select spheres within the radius of the binding
site.
Args:
residues (str): Comma separated string of residues (eg: '144,170,199')
force_rerun (bool): If method should be rerun even if output file exists | codesearchnet |
def assemble_data(data_dfs, concat_direction):
if concat_direction == "horiz":
all_data_df = pd.concat(data_dfs, axis=1)
n_cols = all_data_df.shape[1]
logger.debug("all_data_df.shape[1]: {}".format(n_cols))
n_cols_cumulative = sum([df.shape[1] for df in data_dfs])
assert n_cols == n_cols_cumulative
elif concat_direction == "vert":
all_data_df = pd.concat(data_dfs, axis=0)
n_rows = all_data_df.shape[0]
logger.debug("all_data_df.shape[0]: {}".format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in data_dfs])
assert n_rows == n_rows_cumulative
all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1)
return all_data_df_sorted | Assemble the data dfs together. Both indices are sorted.
Args:
data_dfs (list of pandas dfs)
concat_direction (string): 'horiz' or 'vert'
Returns:
all_data_df_sorted (pandas df) | juraj-google-style |
def optionally_with_plugs(phase, **subplugs):
if isinstance(phase, PhaseGroup):
return phase.with_plugs(**subplugs)
if isinstance(phase, collections.Iterable):
return [optionally_with_plugs(p, **subplugs) for p in phase]
if (not isinstance(phase, phase_descriptor.PhaseDescriptor)):
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase)
return phase.with_known_plugs(**subplugs) | Apply only the with_plugs that the phase knows.
This will determine the subset of plug overrides for only plugs the phase
actually has.
Args:
phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or
iterable of those, the phase or phase group (or iterable) to apply the
plug changes to.
**subplugs: mapping from plug name to derived plug class, the subplugs to
apply.
Raises:
openhtf.plugs.InvalidPlugError: if a specified subplug class is not a valid
replacement for the specified plug name.
Returns:
phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated
plugs. | codesearchnet |
def from_config(cls, config):
return cls(**config) | Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance. | github-repos |
def fetch(url: str, **kwargs) -> Selector:
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree | Send HTTP request and parse it as a DOM tree.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions. | codesearchnet |
def read_submissions_from_directory(dirname, use_gpu):
result = []
for sub_dir in os.listdir(dirname):
submission_path = os.path.join(dirname, sub_dir)
try:
if (not os.path.isdir(submission_path)):
continue
if (not os.path.exists(os.path.join(submission_path, 'metadata.json'))):
continue
with open(os.path.join(submission_path, 'metadata.json')) as f:
metadata = json.load(f)
if (use_gpu and ('container_gpu' in metadata)):
container = metadata['container_gpu']
else:
container = metadata['container']
entry_point = metadata['entry_point']
submission_type = metadata['type']
if ((submission_type == 'attack') or (submission_type == 'targeted_attack')):
submission = Attack(submission_path, container, entry_point, use_gpu)
elif (submission_type == 'defense'):
submission = Defense(submission_path, container, entry_point, use_gpu)
else:
raise ValueError(('Invalid type of submission: %s' % submission_type))
result.append(submission)
except (IOError, KeyError, ValueError):
print('Failed to read submission from directory ', submission_path)
return result | Scans directory and read all submissions.
Args:
dirname: directory to scan.
use_gpu: whether submissions should use GPU. This argument is
used to pick proper Docker container for each submission and create
instance of Attack or Defense class.
Returns:
List with submissions (subclasses of Submission class). | codesearchnet |
def max_neighbor(self, in_lon, in_lat, radius=0.05):
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)
out_indices = np.indices(out_data.shape[1:])
out_rows = out_indices[0].ravel()
out_cols = out_indices[1].ravel()
for d in range(self.data.shape[0]):
nz_points = np.where(self.data[d] > 0)
if len(nz_points[0]) > 0:
nz_vals = self.data[d][nz_points]
nz_rank = np.argsort(nz_vals)
original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T)
all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0)
for n, neighbors in enumerate(all_neighbors):
if len(neighbors) > 0:
out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n]
return out_data | Finds the largest value within a given radius of a point on the interpolated grid.
Args:
in_lon: 2D array of longitude values
in_lat: 2D array of latitude values
radius: radius of influence for largest neighbor search in degrees
Returns:
Array of interpolated data | juraj-google-style |
def output(self, value):
return super(Map, self).output(self.stream, value) | SPL output port assignment expression.
Arguments:
value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.
Returns:
Expression: Output assignment expression that is valid as a the context of this operator. | juraj-google-style |
def visible_devices(self):
devs = {}
for (device_id, adapters) in self._devices.items():
dev = None
max_signal = None
best_adapter = None
for (adapter_id, devinfo) in adapters.items():
connstring = 'adapter/{0}/{1}'.format(adapter_id, devinfo['connection_string'])
if (dev is None):
dev = copy.deepcopy(devinfo)
del dev['connection_string']
if ('adapters' not in dev):
dev['adapters'] = []
best_adapter = adapter_id
dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring))
if (max_signal is None):
max_signal = devinfo['signal_strength']
elif (devinfo['signal_strength'] > max_signal):
max_signal = devinfo['signal_strength']
best_adapter = adapter_id
if (dev is None):
continue
dev['connection_string'] = ('device/%x' % dev['uuid'])
dev['adapters'] = sorted(dev['adapters'], key=(lambda x: x[1]), reverse=True)
dev['best_adapter'] = best_adapter
dev['signal_strength'] = max_signal
devs[device_id] = dev
return devs | Unify all visible devices across all connected adapters
Returns:
dict: A dictionary mapping UUIDs to device information dictionaries | codesearchnet |
def are_values_same_type(first_val, second_val):
first_val_type = type(first_val)
second_val_type = type(second_val)
if isinstance(first_val, string_types) and isinstance(second_val, string_types):
return True
if isinstance(first_val, bool) or isinstance(second_val, bool):
return first_val_type == second_val_type
if isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float)):
return True
return False | Method to verify that both values belong to same type. Float and integer are
considered as same type.
Args:
first_val: Value to validate.
second_Val: Value to validate.
Returns:
Boolean: True if both values belong to same type. Otherwise False. | juraj-google-style |
def LockScanNode(self, path_spec):
scan_node = self._scan_nodes.get(path_spec, None)
if (not scan_node):
raise KeyError('Scan node does not exist.')
self._locked_scan_nodes[path_spec] = scan_node | Marks a scan node as locked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists. | codesearchnet |
async def leave(self, *, force: bool = False) -> bool:
params = {"force": force}
await self.docker._query("swarm/leave", method="POST", params=params)
return True | Leave a swarm.
Args:
force: force to leave the swarm even if the node is a master | juraj-google-style |
def __trim_grave_accent(self, href):
if href.startswith("`"):
href = href[1:]
if href.endswith("`"):
href = href[:-1]
return href | Trim grave accents manually (because BeautifulSoup doesn"t support it).
Args:
href (str): The BeautifulSoup href value.
Returns:
str: The BeautifulSoup href value without grave accents. | juraj-google-style |
def is_allowed(request, level, pid):
if is_trusted_subject(request):
return True
return d1_gmn.app.models.Permission.objects.filter(sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level).exists() | Check if one or more subjects are allowed to perform action level on object.
If a subject holds permissions for one action level on object, all lower action
levels are also allowed. Any included subject that is unknown to this MN is treated
as a subject without permissions.
Returns:
bool
True:
- The active subjects include one or more subjects that:
- are fully trusted DataONE infrastructure subjects, causing all rights to be
granted regardless of requested access level and SciObj
- OR are in the object's ACL for the requested access level. The ACL contains
the subjects from the object's allow rules and the object's rightsHolder,
which has all rights.
- OR object is public, which always yields a match on the "public" symbolic
subject.
False:
- None of the active subjects are in the object's ACL for the requested
access level or for lower levels.
- OR PID does not exist
- OR access level is invalid | codesearchnet |
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._m, self._u = self.add_optimizer_variables(var_list, ['momentum', 'norm']) | Initialize optimizer variables.
Adamax optimizer has 2 types of variables: momentums (denoted as m),
exponentially weighted infinity norm (denoted as u).
Args:
var_list: list of model variables to build Adamax variables on. | github-repos |
def status(self):
line = next(self.__line_gen()).rstrip()
parts = line.split(None, 1)
try:
(code, message) = (int(parts[0]), '')
except ValueError:
raise NNTPProtocolError(line)
if ((code < 100) or (code >= 600)):
raise NNTPProtocolError(line)
if (len(parts) > 1):
message = parts[1]
if (400 <= code <= 499):
raise NNTPTemporaryError(code, message)
if (500 <= code <= 599):
raise NNTPPermanentError(code, message)
return (code, message) | Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message. | codesearchnet |
def get_random_password():
password = []
password.append(RandomInputHelper.get_random_value(4, [string.ascii_lowercase]))
password.append(RandomInputHelper.get_random_value(2, [string.digits]))
password.append(RandomInputHelper.get_random_value(2, ['$&*@!']))
password.append(RandomInputHelper.get_random_value(4, [string.ascii_uppercase]))
return ''.join(password) | Get a random password that complies with most of the requirements.
Note:
This random password is not strong and not "really" random, and should only be
used for testing purposes.
Returns:
str: The random password. | codesearchnet |
def transfer(self, address, messages):
if not isinstance(messages, list):
raise TypeError("Invalid messages type, should be list of I2C.Message.")
elif len(messages) == 0:
raise ValueError("Invalid messages data, should be non-zero length.")
cmessages = (_CI2CMessage * len(messages))()
for i in range(len(messages)):
if isinstance(messages[i].data, bytes):
data = messages[i].data
elif isinstance(messages[i].data, bytearray):
data = bytes(messages[i].data)
elif isinstance(messages[i].data, list):
data = bytes(bytearray(messages[i].data))
cmessages[i].addr = address
cmessages[i].flags = messages[i].flags | (I2C._I2C_M_RD if messages[i].read else 0)
cmessages[i].len = len(data)
cmessages[i].buf = ctypes.cast(ctypes.create_string_buffer(data, len(data)), ctypes.POINTER(ctypes.c_ubyte))
i2c_xfer = _CI2CIocTransfer()
i2c_xfer.nmsgs = len(cmessages)
i2c_xfer.msgs = cmessages
try:
fcntl.ioctl(self._fd, I2C._I2C_IOC_RDWR, i2c_xfer, False)
except IOError as e:
raise I2CError(e.errno, "I2C transfer: " + e.strerror)
for i in range(len(messages)):
if messages[i].read:
data = [cmessages[i].buf[j] for j in range(cmessages[i].len)]
if isinstance(messages[i].data, list):
messages[i].data = data
elif isinstance(messages[i].data, bytearray):
messages[i].data = bytearray(data)
elif isinstance(messages[i].data, bytes):
messages[i].data = bytes(bytearray(data)) | Transfer `messages` to the specified I2C `address`. Modifies the
`messages` array with the results of any read transactions.
Args:
address (int): I2C address.
messages (list): list of I2C.Message messages.
Raises:
I2CError: if an I/O or OS error occurs.
TypeError: if `messages` type is not list.
ValueError: if `messages` length is zero, or if message data is not valid bytes. | juraj-google-style |
def fit_transform(self, col):
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col) | Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | codesearchnet |
def changeset_info(changeset):
keys = [tag.attrib.get('k') for tag in changeset.getchildren()]
keys += ['id', 'user', 'uid', 'bbox', 'created_at']
values = [tag.attrib.get('v') for tag in changeset.getchildren()]
values += [changeset.get('id'), changeset.get('user'), changeset.get('uid'), get_bounds(changeset), changeset.get('created_at')]
return dict(zip(keys, values)) | Return a dictionary with id, user, user_id, bounds, date of creation
and all the tags of the changeset.
Args:
changeset: the XML string of the changeset. | codesearchnet |
def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None):
if (not self.structure_file):
log.error('{}: no structure file, unable to clean'.format(self.id))
return None
clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix, outdir=outdir, force_rerun=force_rerun, remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_chemicals=keep_chemicals, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains)
return clean_pdb_file | Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file | codesearchnet |
def _RawGlobPathSpecWithNumericSchema(file_system, parent_path_spec, segment_format, location, segment_number):
segment_files = []
while True:
segment_location = segment_format.format(location, segment_number)
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if (parent_path_spec.parent is not None):
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(parent_path_spec.type_indicator, **kwargs)
if (not file_system.FileEntryExistsByPathSpec(segment_path_spec)):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files | Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob. | codesearchnet |
def _sample_action_fluent(self, name: str, dtype: tf.DType, size: Sequence[int], constraints: Dict[(str, Constraints)], default_value: tf.Tensor, prob: float) -> tf.Tensor:
shape = ([self.batch_size] + list(size))
if (dtype == tf.float32):
bounds = constraints.get(name)
if (bounds is None):
(low, high) = ((- self.MAX_REAL_VALUE), self.MAX_REAL_VALUE)
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
(low, high) = bounds
batch = (((low is not None) and low.batch) or ((high is not None) and high.batch))
low = (tf.cast(low.tensor, tf.float32) if (low is not None) else (- self.MAX_REAL_VALUE))
high = (tf.cast(high.tensor, tf.float32) if (high is not None) else self.MAX_REAL_VALUE)
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif (isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor)):
if ((low + high).shape.as_list() == list(size)):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError('bounds are not compatible with action fluent.')
else:
sampled_fluent = dist.sample(shape)
elif (dtype == tf.int32):
logits = ([1.0] * self.MAX_INT_VALUE)
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif (dtype == tf.bool):
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent | Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent. | codesearchnet |
def _color_level(str_, level):
(fore_color, back_color, styles) = _get_style_from_config(level)
return _color(str_, fore_color, back_color, styles) | Return the string wrapped with the appropriate styling for the message
level. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
level (str): The message level. Should be one of 'critical', 'error',
'warning', 'info' or 'debug'.
Returns:
str: The string styled with the appropriate escape sequences. | codesearchnet |
def _ParseCommon2003CachedEntry(self, value_data, cached_entry_offset):
data_type_map = self._GetDataTypeMap('appcompatcache_cached_entry_2003_common')
try:
cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))
if (cached_entry.path_size > cached_entry.maximum_path_size):
raise errors.ParseError('Path size value out of bounds.')
path_end_of_string_size = (cached_entry.maximum_path_size - cached_entry.path_size)
if ((cached_entry.path_size == 0) or (path_end_of_string_size != 2)):
raise errors.ParseError('Unsupported path size values.')
return cached_entry | Parses the cached entry structure common for Windows 2003, Vista and 7.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
appcompatcache_cached_entry_2003_common: cached entry structure common
for Windows 2003, Windows Vista and Windows 7.
Raises:
ParseError: if the value data could not be parsed. | codesearchnet |
def __init__(self, project, sub_name, expected_msg=None, expected_msg_len=None, timeout=DEFAULT_TIMEOUT, with_attributes=False, strip_attributes=None, sleep_time=DEFAULT_SLEEP_TIME, max_messages_in_one_pull=DEFAULT_MAX_MESSAGES_IN_ONE_PULL, pull_timeout=DEFAULT_PULL_TIMEOUT):
if pubsub is None:
raise ImportError('PubSub dependencies are not installed.')
if not project:
raise ValueError('Invalid project %s.' % project)
if not sub_name:
raise ValueError('Invalid subscription %s.' % sub_name)
if not expected_msg_len and (not expected_msg):
raise ValueError('Required expected_msg: {} or expected_msg_len: {}.'.format(expected_msg, expected_msg_len))
if expected_msg and (not isinstance(expected_msg, list)):
raise ValueError('Invalid expected messages %s.' % expected_msg)
if expected_msg_len and (not isinstance(expected_msg_len, int)):
raise ValueError('Invalid expected messages %s.' % expected_msg_len)
self.project = project
self.sub_name = sub_name
self.expected_msg = expected_msg
self.expected_msg_len = expected_msg_len or len(self.expected_msg)
self.timeout = timeout
self.messages = None
self.messages_all_details = None
self.with_attributes = with_attributes
self.strip_attributes = strip_attributes
self.sleep_time = sleep_time
self.max_messages_in_one_pull = max_messages_in_one_pull
self.pull_timeout = pull_timeout | Initialize PubSubMessageMatcher object.
Args:
project: A name string of project.
sub_name: A name string of subscription which is attached to output.
expected_msg: A string list that contains expected message data pulled
from the subscription. See also: with_attributes.
expected_msg_len: Number of expected messages pulled from the
subscription.
timeout: Timeout in seconds to wait for all expected messages appears.
with_attributes: If True, will match against both message data and
attributes. If True, expected_msg should be a list of ``PubsubMessage``
objects. Otherwise, it should be a list of ``bytes``.
strip_attributes: List of strings. If with_attributes==True, strip the
attributes keyed by these values from incoming messages.
If a key is missing, will add an attribute with an error message as
value to prevent a successful match.
sleep_time: Time in seconds between which the pulls from pubsub are done.
max_messages_in_one_pull: Maximum number of messages pulled from pubsub
at once.
pull_timeout: Time in seconds after which the pull from pubsub is repeated | github-repos |
def _WriteIfcfg(self, interfaces, logger):
for interface in interfaces:
interface_config = os.path.join(
self.network_path, 'ifcfg-%s' % interface)
interface_content = [
'
'STARTMODE=hotplug',
'BOOTPROTO=dhcp',
'DHCLIENT_SET_DEFAULT_ROUTE=yes',
'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface,
'',
]
with open(interface_config, 'w') as interface_file:
interface_file.write('\n'.join(interface_content))
logger.info('Created ifcfg file for interface %s.', interface) | Write ifcfg files for multi-NIC support.
Overwrites the files. This allows us to update ifcfg-* in the future.
Disable the network setup to override this behavior and customize the
configurations.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port. | juraj-google-style |
def raster_binarization(given_value, rasterfilename):
origin_raster = RasterUtilClass.read_raster(rasterfilename)
binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)
return binary_raster | Make the raster into binarization.
The opening and closing are based on binary image. Therefore we need to
make the raster into binarization.
Args:
given_value: The given value's pixels will be value in 1,
other pixels will be value in 0.
rasterfilename: The initial rasterfilena,e.
Returns:
binary_raster: Raster after binarization. | juraj-google-style |
def CalculateForecastStats(matched, available, possible=None):
if matched > 0:
available_percent = (float(available) / matched) * 100.
else:
available_percent = 0
if possible is not None:
if matched > 0:
possible_percent = (possible/float(matched)) * 100.
else:
possible_percent = 0
else:
possible_percent = None
return available_percent, possible_percent | Calculate forecast percentage stats.
Args:
matched: The number of matched impressions.
available: The number of available impressions.
possible: The optional number of possible impressions.
Returns:
The percentage of impressions that are available and possible. | juraj-google-style |
def check_validation_split_arg(validation_split, subset, shuffle, seed):
if validation_split and (not 0 < validation_split < 1):
raise ValueError(f'`validation_split` must be between 0 and 1, received: {validation_split}')
if (validation_split or subset) and (not (validation_split and subset)):
raise ValueError('If `subset` is set, `validation_split` must be set, and inversely.')
if subset not in ('training', 'validation', 'both', None):
raise ValueError(f'`subset` must be either "training", "validation" or "both", received: {subset}')
if validation_split and shuffle and (seed is None):
raise ValueError('If using `validation_split` and shuffling the data, you must provide a `seed` argument, to make sure that there is no overlap between the training and validation subset.') | Raise errors in case of invalid argument values.
Args:
validation_split: float between 0 and 1, fraction of data to reserve for
validation.
subset: One of `"training"`, `"validation"`, or `"both"`. Only used if
`validation_split` is set.
shuffle: Whether to shuffle the data. Either `True` or `False`.
seed: random seed for shuffling and transformations. | github-repos |
def MultifactorSchedule(history=None, factors='constant * linear_warmup * rsqrt_decay', constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000):
del history
cache_args = (factors, constant, warmup_steps)
if (cache_args in _memoized_multifactor_schedules):
return _memoized_multifactor_schedules[cache_args]
factors = [n.strip() for n in factors.split('*')]
def learning_rate(step):
'Step to learning rate function.'
ret = 1.0
for name in factors:
if (name == 'constant'):
ret *= constant
elif (name == 'linear_warmup'):
ret *= np.minimum(1.0, (step / warmup_steps))
elif (name == 'rsqrt_decay'):
ret /= np.sqrt(np.maximum(step, warmup_steps))
elif (name == 'decay_every'):
ret *= (decay_factor ** (step
else:
raise ValueError(('Unknown factor %s.' % name))
return ret
_memoized_multifactor_schedules[cache_args] = learning_rate
return learning_rate | Factor-based learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
Args:
history: the history of training and evaluation (History object).
factors: a string with factors separated by "*" that defines the schedule.
constant: float, the starting constant for the learning rate schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
Returns:
a function learning_rate(step): float -> float, the step-dependent lr. | codesearchnet |
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Lxmert sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. | github-repos |
def angle(self, deg=False):
if (self.dtype.str[1] != 'c'):
warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1)
return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels) | Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64. | codesearchnet |
def load_plugins(config, plugin_kwargs):
installed_plugins = _gather_installed_plugins()
metrics_plugin = _get_metrics_plugin(config, installed_plugins)
if metrics_plugin:
plugin_kwargs['metrics'] = metrics_plugin
active_plugins = _get_activated_plugins(config, installed_plugins)
if (not active_plugins):
return ([], [], [], None)
plugin_namespaces = _get_plugin_config_keys(active_plugins)
plugin_configs = _load_plugin_configs(plugin_namespaces, config)
(plugin_names, plugins, errors) = _init_plugins(active_plugins, installed_plugins, plugin_configs, plugin_kwargs)
return (plugin_names, plugins, errors, plugin_kwargs) | Discover and instantiate plugins.
Args:
config (dict): loaded configuration for the Gordon service.
plugin_kwargs (dict): keyword arguments to give to plugins
during instantiation.
Returns:
Tuple of 3 lists: list of names of plugins, list of
instantiated plugin objects, and any errors encountered while
loading/instantiating plugins. A tuple of three empty lists is
returned if there are no plugins found or activated in gordon
config. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.