code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def _send_unary_request(self, request):
if request.ack_ids:
self._client.acknowledge(
subscription=self._subscription, ack_ids=list(request.ack_ids)
)
if request.modify_deadline_ack_ids:
deadline_to_ack_ids = collections.defaultdict(list)
for n, ack_id in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(
subscription=self._subscription,
ack_ids=ack_ids,
ack_deadline_seconds=deadline,
)
_LOGGER.debug("Sent request(s) over unary RPC.") | Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests. | juraj-google-style |
def forward(self, music_tokens, raw_audio_conditioning=None):
if raw_audio_conditioning is None:
raw_audio_conditioning = 0.0
music_tokens = music_tokens.long()
hidden_states = self.embed_tokens(music_tokens)
hidden_states = hidden_states + raw_audio_conditioning
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.upsampler(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
return hidden_states | Args:
music_tokens (`torch.LongTensor`):
Music tokens form the upper level in range(nb_discrete_codes)
raw_audio_conditioning (`torch.LongTensor`, *optional*):
Audio used when primed sampling, raw audio information that conditions the generation | github-repos |
def airborne_velocity(msg):
if common.typecode(msg) != 19:
raise RuntimeError("%s: Not a airborne velocity message, expecting TC=19" % msg)
mb = common.hex2bin(msg)[32:]
subtype = common.bin2int(mb[5:8])
if common.bin2int(mb[14:24]) == 0 or common.bin2int(mb[25:35]) == 0:
return None
if subtype in (1, 2):
v_ew_sign = -1 if mb[13]=='1' else 1
v_ew = common.bin2int(mb[14:24]) - 1
v_ns_sign = -1 if mb[24]=='1' else 1
v_ns = common.bin2int(mb[25:35]) - 1
v_we = v_ew_sign * v_ew
v_sn = v_ns_sign * v_ns
spd = math.sqrt(v_sn*v_sn + v_we*v_we)
spd = int(spd)
trk = math.atan2(v_we, v_sn)
trk = math.degrees(trk)
trk = trk if trk >= 0 else trk + 360
tag = 'GS'
trk_or_hdg = round(trk, 2)
else:
if mb[13] == '0':
hdg = None
else:
hdg = common.bin2int(mb[14:24]) / 1024.0 * 360.0
hdg = round(hdg, 2)
trk_or_hdg = hdg
spd = common.bin2int(mb[25:35])
spd = None if spd==0 else spd-1
if mb[24]=='0':
tag = 'IAS'
else:
tag = 'TAS'
vr_sign = -1 if mb[36]=='1' else 1
vr = common.bin2int(mb[37:46])
rocd = None if vr==0 else int(vr_sign*(vr-1)*64)
return spd, trk_or_hdg, rocd, tag | Calculate the speed, track (or heading), and vertical rate
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
(int, float, int, string): speed (kt), ground track or heading (degree),
rate of climb/descend (ft/min), and speed type
('GS' for ground speed, 'AS' for airspeed) | juraj-google-style |
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME):
return os.path.join(appdirs.user_config_dir, file_name) | Return the path where the config file is stored.
Args:
app_name (text_type, optional): Name of the application, defaults to
``'projecthamster``. Allows you to use your own application specific
namespace if you wish.
file_name (text_type, optional): Name of the config file. Defaults to
``config.conf``.
Returns:
str: Fully qualified path (dir & filename) where we expect the config file. | juraj-google-style |
def create_report_proto(self, tt_config, tt_parameters, tensor_trace_order, tensor_trace_points, collected_signature_types):
report = tensor_tracer_pb2.TensorTracerReport()
report.config.version = tt_config.version
report.config.device = tt_config.device_type
report.config.num_cores = tt_config.num_replicas
report.config.num_hosts = tt_config.num_hosts
report.config.num_cores_per_host = tt_config.num_replicas_per_host
report.config.submode = tt_parameters.submode
report.config.trace_mode = tt_parameters.trace_mode
for signature_name, _ in sorted(collected_signature_types.items(), key=lambda x: x[1]):
report.config.signatures.append(signature_name)
for tensor in tensor_trace_order.graph_order.tensors:
tensor_def = tensor_tracer_pb2.TensorTracerReport.TracedTensorDef()
tensor_def.name = tensor.name
if tensor.name in tensor_trace_order.tensorname_to_cache_idx:
tensor_def.is_traced = True
tensor_def.cache_index = tensor_trace_order.tensorname_to_cache_idx[tensor.name]
else:
if tt_parameters.use_fingerprint_subdir:
continue
tensor_def.is_traced = False
if tensor.name in tensor_trace_points:
tensor_def.trace_point_name = tensor_trace_points[tensor.name]
if tensor.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.name]
elif tensor.op.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.op.name]
report.tensordef[tensor.name].CopyFrom(tensor_def)
report.fingerprint = proto_fingerprint(report)
logging.info('TensorTracerProto fingerprint is %s.', report.fingerprint)
tf_graph = tensor_trace_order.graph_order.graph
report.graphdef.CopyFrom(tf_graph.as_graph_def())
return report | Creates and returns a proto that stores tensor tracer configuration.
Args:
tt_config: TensorTracerConfig object holding information about the run
environment (device, # cores, # hosts), and tensor tracer version
information.
tt_parameters: TTParameters objects storing the user provided parameters
for tensor tracer.
tensor_trace_order: TensorTraceOrder object storing a topological order of
the graph.
tensor_trace_points: Progromatically added trace_points/checkpoints.
collected_signature_types: The signature types collected, e,g, norm,
max, min, mean...
Returns:
TensorTracerReport proto. | github-repos |
def js_link(self, attr, other, other_attr):
if (attr not in self.properties()):
raise ValueError(('%r is not a property of self (%r)' % (attr, self)))
if (not isinstance(other, Model)):
raise ValueError(("'other' is not a Bokeh model: %r" % other))
if (other_attr not in other.properties()):
raise ValueError(('%r is not a property of other (%r)' % (other_attr, other)))
from bokeh.models.callbacks import CustomJS
cb = CustomJS(args=dict(other=other), code=('other.%s = this.%s' % (other_attr, attr)))
self.js_on_change(attr, cb) | Link two Bokeh model properties using JavaScript.
This is a convenience method that simplifies adding a CustomJS callback
to update one Bokeh model property whenever another changes value.
Args:
attr (str) :
The name of a Bokeh property on this model
other (Model):
A Bokeh model to link to self.attr
other_attr (str) :
The property on ``other`` to link together
Added in version 1.1
Raises:
ValueError
Examples:
This code with ``js_link``:
.. code :: python
select.js_link('value', plot, 'sizing_mode')
is equivalent to the following:
.. code:: python
from bokeh.models import CustomJS
select.js_on_change('value',
CustomJS(args=dict(other=plot),
code="other.sizing_mode = this.value"
)
) | codesearchnet |
def _ParseShellItem(self, parser_mediator, shell_item):
path_segment = self._ParseShellItemPathSegment(shell_item)
self._path_segments.append(path_segment)
event_data = shell_item_events.ShellItemFileEntryEventData()
event_data.origin = self._origin
event_data.shell_item_path = self.CopyToPath()
if isinstance(shell_item, pyfwsi.file_entry):
event_data.name = shell_item.name
for extension_block in shell_item.extension_blocks:
if isinstance(extension_block, pyfwsi.file_entry_extension):
long_name = extension_block.long_name
localized_name = extension_block.localized_name
file_reference = extension_block.file_reference
if file_reference:
file_reference = '{0:d}-{1:d}'.format((file_reference & 281474976710655), (file_reference >> 48))
event_data.file_reference = file_reference
event_data.localized_name = localized_name
event_data.long_name = long_name
fat_date_time = extension_block.get_creation_time_as_integer()
if (fat_date_time != 0):
date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
fat_date_time = extension_block.get_access_time_as_integer()
if (fat_date_time != 0):
date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
fat_date_time = shell_item.get_modification_time_as_integer()
if (fat_date_time != 0):
date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a shell item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
shell_item (pyfwsi.item): shell item. | codesearchnet |
def _GetCachedFileByPath(self, key_path_upper):
longest_key_path_prefix_upper = ''
longest_key_path_prefix_length = len(longest_key_path_prefix_upper)
for key_path_prefix_upper in self._registry_files:
if key_path_upper.startswith(key_path_prefix_upper):
key_path_prefix_length = len(key_path_prefix_upper)
if (key_path_prefix_length > longest_key_path_prefix_length):
longest_key_path_prefix_upper = key_path_prefix_upper
longest_key_path_prefix_length = key_path_prefix_length
if (not longest_key_path_prefix_upper):
return (None, None)
registry_file = self._registry_files.get(longest_key_path_prefix_upper, None)
return (longest_key_path_prefix_upper, registry_file) | Retrieves a cached Windows Registry file for a key path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consist:
str: key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available. | codesearchnet |
def __getitem__(self, slice_spec):
if isinstance(slice_spec, bool) or (isinstance(slice_spec, tensor_lib.Tensor) and slice_spec.dtype == dtypes.bool) or (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
tensor = _var_to_tensor(self)
return array_ops.boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = (slice_spec,)
s = slice_spec[0]
if isinstance(s, slice):
first_dim_slice_specs = self._decompose_slice_spec(s)
values = []
for i, var in enumerate(self._variables):
if first_dim_slice_specs[i] is not None:
all_dim_slice_spec = (first_dim_slice_specs[i],) + slice_spec[1:]
values.append(var[all_dim_slice_spec])
if s.step is not None and s.step < 0:
values.reverse()
if not values:
return constant_op.constant([], dtype=self._dtype, shape=(0,) + self._shape[1:])
return array_ops.concat(values, axis=0)
elif s is Ellipsis:
return array_ops.concat([var[slice_spec] for var in self._variables], axis=0)
elif s is array_ops.newaxis:
return array_ops.concat([var[slice_spec[1:]] for var in self._variables], axis=0)[array_ops.newaxis]
else:
if isinstance(s, tensor_lib.Tensor):
raise TypeError('ShardedVariable: using Tensor for indexing is not allowed.')
if s < 0:
s += self._shape[0]
if s < 0 or s >= self._shape[0]:
raise IndexError(f'ShardedVariable: slice index {s} of dimension 0 out of bounds.')
for i in range(len(self._variables)):
if i == len(self._variables) - 1 or (s >= self._var_offsets[i][0] and s < self._var_offsets[i + 1][0]):
return self._variables[i][(s - self._var_offsets[i][0],) + slice_spec[1:]] | Extracts the specified region as a Tensor from the sharded variable.
The API contract is identical to `Tensor.__getitem__`. Assignment to the
sliced range is not yet supported.
Args:
slice_spec: The arguments to __getitem__, specifying the global slicing of
the sharded variable.
Returns:
The appropriate slice of tensor based on `slice_spec`.
Raises:
IndexError: If a slice index is out of bound.
TypeError: If `spec_spec` contains Tensor. | github-repos |
def choose_1_from_each(lists):
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list | Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists. | juraj-google-style |
def print_table(col_tuple, row_tuples):
col_widths = [max((len(str(row[col])) for row in ([col_tuple] + row_tuples))) for col in range(len(col_tuple))]
format_str = ' '.join(('{{:<{}}}'.format(col_width) for col_width in col_widths))
header_border = ' '.join((('=' * col_width) for col_width in col_widths))
print(header_border)
print(format_str.format(*col_tuple))
print(header_border)
for row_tuple in row_tuples:
print(format_str.format(*row_tuple))
print(header_border)
print() | Print column headers and rows as a reStructuredText table.
Args:
col_tuple: Tuple of column name strings.
row_tuples: List of tuples containing row data. | codesearchnet |
def is_periodic_image(self, other, tolerance=1e-08, check_lattice=True):
if (check_lattice and (self.lattice != other.lattice)):
return False
if (self.species != other.species):
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance) | Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other. | codesearchnet |
def take(self, count, name=None) -> 'DatasetV2':
from tensorflow.python.data.ops import take_op
return take_op._take(self, count, name=name) | Creates a `Dataset` with at most `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.take(3)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[0, 1, 2]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
name: (Optional.) A name for the tf.data operation.
Returns:
A new `Dataset` with the transformation applied as described above. | github-repos |
def _ParseDataObject(self, file_object, file_offset):
data_object_map = self._GetDataTypeMap('systemd_journal_data_object')
try:
data_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse data object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if data_object.object_type != self._OBJECT_TYPE_DATA:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
data_object.object_type))
if data_object.object_flags not in (
0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4):
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
data_object.object_flags))
data_size = data_object.data_size - 64
data = file_object.read(data_size)
if data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ:
data = lzma.decompress(data)
elif data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4:
uncompressed_size_map = self._GetDataTypeMap('uint32le')
try:
uncompressed_size = self._ReadStructureFromByteStream(
data, file_offset + 64, uncompressed_size_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with '
'error: {1!s}').format(file_offset + 64, exception))
data = lz4.block.decompress(
data[8:], uncompressed_size=uncompressed_size)
return data | Parses a data object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the data object relative to the start
of the file-like object.
Returns:
bytes: data.
Raises:
ParseError: if the data object cannot be parsed. | juraj-google-style |
def _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, data_format=tf_data_format) | Fused version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`. | github-repos |
def update_if_absent(self, **kwargs):
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage() | Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields. | juraj-google-style |
def RunMetadata(self, tag):
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata | Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto. | juraj-google-style |
def evaluate_period_forecasts(self):
score_columns = ['Run_Date', 'Ensemble Name', 'Model_Name', 'Forecast_Variable', 'Neighbor_Radius', 'Smoothing_Radius', 'Size_Threshold', 'ROC', 'Reliability']
all_scores = pd.DataFrame(columns=score_columns)
if (self.coordinate_file is not None):
coord_mask = np.where((((((self.coordinates['lon'] >= self.lon_bounds[0]) & (self.coordinates['lon'] <= self.lon_bounds[1])) & (self.coordinates['lat'] >= self.lat_bounds[0])) & (self.coordinates['lat'] <= self.lat_bounds[1])) & (self.period_obs[self.mask_variable] > 0)))
else:
coord_mask = None
for neighbor_radius in self.neighbor_radii:
n_filter = disk(neighbor_radius)
for (s, size_threshold) in enumerate(self.size_thresholds):
period_obs = fftconvolve((self.period_obs[self.mrms_variable] >= self.obs_thresholds[s]), n_filter, mode='same')
period_obs[(period_obs > 1)] = 1
if (self.obs_mask and (self.coordinate_file is None)):
period_obs = period_obs[(self.period_obs[self.mask_variable] > 0)]
elif (self.obs_mask and (self.coordinate_file is not None)):
period_obs = period_obs[(coord_mask[0], coord_mask[1])]
else:
period_obs = period_obs.ravel()
for smoothing_radius in self.smoothing_radii:
print('Eval period forecast {0} {1} {2} {3} {4} {5}'.format(self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold, smoothing_radius))
period_var = 'neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}'.format(((self.end_hour - self.start_hour) + 1), neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold)
if (self.obs_mask and (self.coordinate_file is None)):
period_forecast = self.period_forecasts[period_var][(self.period_obs[self.mask_variable] > 0)]
elif (self.obs_mask and (self.coordinate_file is not None)):
period_forecast = self.period_forecasts[period_var][(coord_mask[0], coord_mask[1])]
else:
period_forecast = self.period_forecasts[period_var].ravel()
roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)
roc.update(period_forecast, period_obs)
rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)
rel.update(period_forecast, period_obs)
row = [self.run_date, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel]
all_scores.loc[period_var] = row
return all_scores | Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour
Returns:
A pandas DataFrame with full-period metadata and verification statistics | codesearchnet |
def get_keras_mask(x):
return get_tensor_attr(x, '_keras_mask') | Gets the Keras mask attribute from the given tensor.
Args:
x: Input tensor.
Returns:
The mask tensor associated with the input tensor, or `None` if no mask
has been set. | github-repos |
def BuildFindSpecs(self, environment_variables=None):
path_attributes = {}
if environment_variables:
for environment_variable in environment_variables:
attribute_name = environment_variable.name.lower()
attribute_value = environment_variable.value
if not isinstance(attribute_value, py2to3.STRING_TYPES):
continue
if len(attribute_value) > 2 and attribute_value[1] == ':':
_, _, attribute_value = attribute_value.rpartition(':')
if attribute_value.startswith('\\'):
attribute_value = attribute_value.replace('\\', '/')
path_attributes[attribute_name] = attribute_value
find_specs = []
with open(self._path, 'r') as file_object:
for line in file_object:
line = line.strip()
if line.startswith('
continue
if path_attributes:
try:
line = line.format(**path_attributes)
except KeyError as exception:
logger.error((
'Unable to expand path filter: {0:s} with error: '
'{1!s}').format(line, exception))
continue
if not line.startswith('/'):
logger.warning((
'The path filter must be defined as an absolute path: '
'{0:s}').format(line))
continue
path_segments = line.split('/')
path_segments.pop(0)
if not path_segments[-1]:
logger.warning(
'Empty last path segment in path filter: {0:s}'.format(line))
continue
find_spec = file_system_searcher.FindSpec(
location_regex=path_segments, case_sensitive=False)
find_specs.append(find_spec)
return find_specs | Build find specification from a filter file.
Args:
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables.
Returns:
list[dfvfs.FindSpec]: find specification. | juraj-google-style |
def generic_type_args(type_: Type) -> List[Type]:
if hasattr(type_, '__union_params__'):
return list(type_.__union_params__)
return list(type_.__args__) | Gets the type argument list for the given generic type.
If you give this function List[int], it will return [int], and
if you give it Union[int, str] it will give you [int, str]. Note
that on Python < 3.7, Union[int, bool] collapses to Union[int] and
then to int; this is already done by the time this function is
called, so it does not help with that.
Args:
type_: The type to get the arguments list of.
Returns:
A list of Type objects. | codesearchnet |
def sca_intensity(scatterer, h_pol=True):
Z = scatterer.get_Z()
return (Z[0,0] - Z[0,1]) if h_pol else (Z[0,0] + Z[0,1]) | Scattering intensity (phase function) for the current setup.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The differential scattering cross section. | juraj-google-style |
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):
key_notation = '.'.join([section, key])
try:
return self._decode_value(self._data_from_dotnotation(key_notation, default))
except KeyError:
return None | Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned. | codesearchnet |
def add_affiliation(self, value, curated_relation=None, record=None):
if value:
affiliation = {'value': value}
if record:
affiliation['record'] = record
if (curated_relation is not None):
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation) | Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference | codesearchnet |
def copy_framebuffer(self, dst, src) -> None:
self.mglo.copy_framebuffer(dst.mglo, src.mglo) | Copy framebuffer content.
Use this method to:
- blit framebuffers.
- copy framebuffer content into a texture.
- downsample framebuffers. (it will allow to read the framebuffer's content)
- downsample a framebuffer directly to a texture.
Args:
dst (Framebuffer or Texture): Destination framebuffer or texture.
src (Framebuffer): Source framebuffer. | juraj-google-style |
def before_request(self, request, method, url, headers):
parts = urllib.parse.urlsplit(url)
audience = urllib.parse.urlunsplit(
(parts.scheme, parts.netloc, parts.path, "", ""))
token = self._get_jwt_for_audience(audience)
self.apply(headers, token=token) | Performs credential-specific before request logic.
Args:
request (Any): Unused. JWT credentials do not need to make an
HTTP request to refresh.
method (str): The request's HTTP method.
url (str): The request's URI. This is used as the audience claim
when generating the JWT.
headers (Mapping): The request's headers. | juraj-google-style |
def _new_ass_hierarchy(self, file_ass):
ret_struct = {'source': '',
'subhierarchy': {},
'attrs': {},
'snippets': {}}
ret_struct['source'] = file_ass['source']
self._ass_refresh_attrs(ret_struct, file_ass)
for name, subhierarchy in file_ass['subhierarchy'].items():
ret_struct['subhierarchy'][name] = self._new_ass_hierarchy(subhierarchy)
return ret_struct | Returns a completely new cache hierarchy for given assistant file.
Args:
file_ass: the assistant from filesystem hierarchy to create cache hierarchy for
(for format see what refresh_role accepts)
Returns:
the newly created cache hierarchy | juraj-google-style |
def encode_plus(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, answer_coordinates: Optional[List[Tuple]]=None, answer_text: Optional[List[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if return_token_type_ids is not None and (not add_special_tokens):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if answer_coordinates and (not answer_text) or (not answer_coordinates and answer_text):
raise ValueError('In case you provide answers, both answer_coordinates and answer_text should be provided')
if 'is_split_into_words' in kwargs:
raise NotImplementedError('Currently TapasTokenizer only supports questions as strings.')
if return_offsets_mapping:
raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')
return self._encode_plus(table=table, query=query, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) | Prepare a table and a string for the model.
Args:
table (`pd.DataFrame`):
Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
dataframe to convert it to string.
query (`str` or `List[str]`):
Question related to a table to be encoded.
answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
(not the column header row) has index 0. The first column has index 0.
answer_text (`List[str]` or `List[List[str]]`, *optional*):
Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
more strings. Each string must be the answer text of a corresponding answer coordinate. | github-repos |
def _freezeModel(self, func):
root = autotrackable.AutoTrackable()
root.f = func
input_func = root.f.get_concrete_function()
output_func = convert_to_constants.convert_variables_to_constants_v2(input_func, lower_control_flow=False)
return (root, output_func) | Freezes the function.
Args:
func: Function.
Returns:
root: AutoTrackable object with original ConcreteFunction.
output_func: frozen ConcreteFunction. | github-repos |
def invoke_string(self, line):
line = str(line)
if (len(line) == 0):
return True
if (line[0] == u'
return True
args = self._split_line(line)
return self.invoke(args) | Parse and invoke a string line.
Args:
line (str): The line that we want to parse and invoke.
Returns:
bool: A boolean specifying if the last function created a new context
(False if a new context was created) and a list with the remainder of the
command line if this function did not consume all arguments.) | codesearchnet |
def get_go_server(settings=None):
if not settings:
settings = get_settings()
return gocd.Server(
settings.get('server'),
user=settings.get('user'),
password=settings.get('password'),
) | Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance | juraj-google-style |
def upload(cls, file_obj, store=None):
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'UPLOADCARE_STORE': store,
}
files = uploading_request('POST', 'base/', data=data,
files={'file': file_obj})
file_ = cls(files['file'])
return file_ | Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance | juraj-google-style |
def select(self, field_paths):
field_paths = list(field_paths)
for field_path in field_paths:
field_path_module.split_field_path(field_path)
new_projection = query_pb2.StructuredQuery.Projection(fields=[query_pb2.StructuredQuery.FieldReference(field_path=field_path) for field_path in field_paths])
return self.__class__(self._parent, projection=new_projection, field_filters=self._field_filters, orders=self._orders, limit=self._limit, offset=self._offset, start_at=self._start_at, end_at=self._end_at) | Project documents matching query to a limited set of fields.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If the current query already has a projection set (i.e. has already
called :meth:`~.firestore_v1beta1.query.Query.select`), this
will overwrite it.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query. Acts as
a copy of the current query, modified with the newly added
projection.
Raises:
ValueError: If any ``field_path`` is invalid. | codesearchnet |
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=-1):
lr_lambda = partial(_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles)
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
num_cycles (`int`, *optional*, defaults to 1):
The number of hard restarts to use.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. | github-repos |
def __call__(self, inputs, *args, **kwargs):
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError('scope argument not allowed when keras style layers are enabled, but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
scope_context_manager = self._always_reuse_variable_scope
except AttributeError:
scope_context_manager = None
if scope_context_manager is None:
scope_context_manager = vs.variable_scope(self._scope, reuse=True, auxiliary_name_scope=False)
if not ops.executing_eagerly_outside_functions():
self._always_reuse_variable_scope = scope_context_manager
else:
scope_context_manager = vs.variable_scope(self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = variable_scope_shim.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not context.executing_eagerly():
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs | Wraps `call`, applying pre- and post-processing steps.
Args:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value). | github-repos |
def getFingerprint(self, text):
fp = self._fullClient.getFingerprintForText(text)
return fp.positions | Get the semantic fingerprint of the input text.
Args:
text, str: The text to be evaluated
Returns:
list of str: the positions of the semantic fingerprint
Raises:
CorticalioException: if the request was not successful | juraj-google-style |
def match(self, other_version):
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"]) | Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard. | juraj-google-style |
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_collection = getattr(
configuration_object, '_filter_collection', None)
if not filter_collection:
raise errors.BadConfigObject(
'Filter collection missing from configuration object')
date_filters = getattr(options, 'date_filters', None)
if not date_filters:
return
file_entry_filter = file_entry_filters.DateTimeFileEntryFilter()
for date_filter in date_filters:
date_filter_pieces = date_filter.split(',')
if len(date_filter_pieces) != 3:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
time_value, start_time_string, end_time_string = date_filter_pieces
time_value = time_value.strip()
start_time_string = start_time_string.strip()
end_time_string = end_time_string.strip()
try:
file_entry_filter.AddDateTimeRange(
time_value, start_time_string=start_time_string,
end_time_string=end_time_string)
except ValueError:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
filter_collection.AddFilter(file_entry_filter) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. | juraj-google-style |
def initialize(graph=None, session=None):
if context.executing_eagerly():
return
if _summary_state.writer is None:
raise RuntimeError('No default tf.contrib.summary.SummaryWriter found')
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError('Argument `session must be passed if no default session exists')
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(graph_v1(x, 0), feed_dict={x: data}) | Initializes summary writing for graph execution mode.
This operation is a no-op when executing eagerly.
This helper method provides a higher-level alternative to using
`tf.contrib.summary.summary_writer_initializer_op` and
`tf.contrib.summary.graph`.
Most users will also want to call `tf.compat.v1.train.create_global_step`
which can happen before or after this function is called.
Args:
graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call `tf.Session.run`. This defaults
to `tf.compat.v1.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
`tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session. | github-repos |
def FormatSOAPDateTime(value):
value_date = value['date']
return ('%s-%s-%s %s:%s:%s (%s)' % (value_date['year'], value_date['month'], value_date['day'], value['hour'], value['minute'], value['second'], value['timeZoneId'])) | Format a SOAP DateTime object for printing.
Args:
value: The DateTime object to format.
Returns:
A string representing the value. | codesearchnet |
def items(self, section=_UNSET):
if section is _UNSET:
return [(sect.name, sect) for sect in self.sections_blocks()]
section = self.__getitem__(section)
return [(opt.key, opt) for opt in section.option_blocks()] | Return a list of (name, value) tuples for options or sections.
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_type) for each section.
Args:
section (str): optional section name, default UNSET
Returns:
list: list of :class:`Section` or :class:`Option` objects | juraj-google-style |
def get_gradients(self, loss, params):
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))
return grads | Returns gradients of `loss` with respect to `params`.
Should be used only in legacy v1 graph mode.
Args:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented). | github-repos |
def in_builddir(sub='.'):
from functools import wraps
def wrap_in_builddir(func):
@wraps(func)
def wrap_in_builddir_func(self, *args, **kwargs):
p = local.path(self.builddir) / sub
if not p.exists():
LOG.error("%s does not exist.", p)
if p == local.cwd:
LOG.debug("CWD already is %s", p)
return func(self, *args, *kwargs)
with local.cwd(p):
return func(self, *args, **kwargs)
return wrap_in_builddir_func
return wrap_in_builddir | Decorate a project phase with a local working directory change.
Args:
sub: An optional subdirectory to change into. | juraj-google-style |
def remote(self, *args, **kwargs):
return self._remote(args=args, kwargs=kwargs) | Create an actor.
Args:
args: These arguments are forwarded directly to the actor
constructor.
kwargs: These arguments are forwarded directly to the actor
constructor.
Returns:
A handle to the newly created actor. | juraj-google-style |
def convert(self):
self._validate_inputs(self._input_tensors, self.quantized_input_stats)
quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, self._graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)
optimized_graph = self._optimize_tf_model(self._graph_def, self._input_tensors, self._output_tensors, quant_mode)
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(quant_mode.converter_flags(self.inference_type, self.inference_input_type))
converter_kwargs.update({'output_format': self.output_format, 'quantized_input_stats': self._quantized_stats, 'default_ranges_stats': self.default_ranges_stats, 'drop_control_dependency': self.drop_control_dependency, 'reorder_across_fake_quant': self.reorder_across_fake_quant, 'change_concat_input_ranges': self.change_concat_input_ranges, 'dump_graphviz_dir': self.dump_graphviz_dir, 'dump_graphviz_video': self.dump_graphviz_video, 'conversion_summary_dir': self.conversion_summary_dir})
self._validate_quantized_input_stats(converter_kwargs, quant_mode)
if not self.experimental_new_converter:
logging.warning('Please consider switching to the new converter by setting experimental_new_converter=True. The old converter is deprecated.')
else:
logging.info('Using experimental converter: If you encountered a problem please file a bug. You can opt-out by setting experimental_new_converter=False')
if self._has_valid_tensors():
result = _convert_graphdef(input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs)
else:
result = _convert_graphdef_with_arrays(input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, control_output_arrays=self._control_output_arrays, **converter_kwargs)
return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer) | Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format, either a TFLite Flatbuffer or
a Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor. | github-repos |
def _normalize_string(raw_str):
return ' '.join((token.strip() for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str)))) | Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split() | codesearchnet |
class RowwiseParallel(TensorParallelLayer):
def __init__(self, *, input_layouts: Optional[Placement]=None, output_layouts: Optional[Placement]=None, use_local_output: bool=True, use_dtensor=True):
super().__init__()
self.input_layouts = (input_layouts or Shard(-1),)
self.output_layouts = (output_layouts or Replicate(),)
self.use_local_output = use_local_output
self.use_dtensor = use_dtensor
def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):
if param_type != 'bias':
parameter = get_tensor_shard(param, empty_param, device_mesh, rank, -1)
shard = [Shard(-1)]
else:
shard = [Replicate()]
parameter = param[:]
parameter = parameter.to(param_casting_dtype)
if to_contiguous:
parameter = parameter.contiguous()
if self.use_dtensor:
parameter = DTensor.from_local(parameter, device_mesh, shard, run_check=False)
return nn.Parameter(parameter, requires_grad=parameter.is_floating_point())
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
if hasattr(mod, 'bias') and mod.bias is not None:
mod._bias = mod.bias
mod.bias = None
input_tensor = inputs[0]
if not isinstance(input_tensor, DTensor):
input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False)
if input_layouts != desired_input_layouts:
input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True)
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
if outputs.placements != output_layouts:
outputs = outputs.redistribute(placements=output_layouts, async_op=True)
if hasattr(mod, '_bias'):
outputs += mod._bias
return outputs.to_local() if use_local_output else outputs
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
module._distribute_module_applied = True
if self.use_dtensor:
if isinstance(module, nn.Linear):
self.desired_input_layouts: Tuple[Placement, ...] = (Shard(-1),)
elif isinstance(module, nn.Embedding):
self.desired_input_layouts = (Replicate(),)
elif isinstance(module, nn.Parameter):
self.desired_input_layouts = (Shard(-1),)
else:
raise NotImplementedError('RowwiseParallel currently only support nn.Linear and nn.Embedding!')
distribute_module(module, device_mesh, partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), partial(self._prepare_output_fn, self.output_layouts, self.use_local_output)) | Partition a compatible nn.Module in a row-wise fashion. Currently supports nn.Linear and nn.Embedding.
Users can compose it with ColwiseParallel to achieve the sharding of more complicated modules.
(i.e. MLP, Attention)
Keyword Args:
input_layouts (Placement, optional):
The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to
become a DTensor. If not specified, we assume the input tensor to be sharded on the last dimension.
output_layouts (Placement, optional):
The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module
with the user desired layout. If not specified, the output tensor is replicated.
use_local_output (bool, optional):
Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True.
Returns:
A :class:`ParallelStyle` object that represents Rowwise sharding of the nn.Module. | github-repos |
def sg_regularizer_loss(scale=1.0):
r
return scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) | r""" Get regularizer losss
Args:
scale: A scalar. A weight applied to regularizer loss | juraj-google-style |
def get_community_names():
ret = dict()
if __utils__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY):
_LOG.debug('Loading communities from Group Policy settings')
current_values = __utils__['reg.list_values'](_HKEY, _COMMUNITIES_GPO_KEY, include_default=False)
if isinstance(current_values, list):
for current_value in current_values:
if (not isinstance(current_value, dict)):
continue
ret[current_value['vdata']] = 'Managed by GPO'
if (not ret):
_LOG.debug('Loading communities from SNMP settings')
current_values = __utils__['reg.list_values'](_HKEY, _COMMUNITIES_KEY, include_default=False)
if isinstance(current_values, list):
for current_value in current_values:
if (not isinstance(current_value, dict)):
continue
permissions = six.text_type()
for permission_name in _PERMISSION_TYPES:
if (current_value['vdata'] == _PERMISSION_TYPES[permission_name]):
permissions = permission_name
break
ret[current_value['vname']] = permissions
if (not ret):
_LOG.debug('Unable to find existing communities.')
return ret | Get the current accepted SNMP community names and their permissions.
If community names are being managed by Group Policy, those values will be
returned instead like this:
.. code-block:: bash
TestCommunity:
Managed by GPO
Community names managed normally will denote the permission instead:
.. code-block:: bash
TestCommunity:
Read Only
Returns:
dict: A dictionary of community names and permissions.
CLI Example:
.. code-block:: bash
salt '*' win_snmp.get_community_names | codesearchnet |
def GetSuperClasses():
return SUPERCLASSES.copy() | Get a Python type hierarchy mapping.
This generates a dictionary that can be used to look up the bases of
a type in the abstract base class hierarchy.
Returns:
A dictionary mapping a type, as string, to a list of base types (also
as strings). E.g. "float" -> ["Real"]. | github-repos |
def payoff(spots):
return tf.nn.relu((spots - strikes) * option_signs) | Computes payff for the specified options given the spot grid.
Args:
spots: Tensor of shape [batch_size, grid_size, 1]. The spot values at some
time.
Returns:
Payoffs for exercise at the specified strikes. | github-repos |
def insert_column(table, insert_column, col_name=None, default_value=None):
column_labels = table[0]
following_index = 0
def set_cell(row, column_index, value):
if hasattr(value, '__call__'):
row[column_index] = value(column_labels, row, column_index)
else:
row[column_index] = value
if isinstance(insert_column, basestring):
insert_column = insert_column.strip()
for column_index in range(len(column_labels)):
if column_labels[column_index] == insert_column:
following_index = column_index
break
else:
following_index = insert_column
col_data_start = 0
if col_name != None:
table[0].insert(following_index, col_name.strip())
col_data_start = 1
for row in table[col_data_start:]:
row.insert(following_index, None)
if default_value:
set_cell(row, min(following_index, len(row)-1), default_value) | Inserts a new column before another specified column (by name or index).
Args:
insert_column: The column index or first row name where the insertion should occur
col_name: The name to insert into the first row of the column. Leaving this argument
to the default of None will apply the default_value to that row's cell.
default_value: Can be a value or function which takes (row, index, value) as
arguments to return a value. | juraj-google-style |
def create_halton_samples(order, dim=1, burnin=(- 1), primes=()):
primes = list(primes)
if (not primes):
prime_order = (10 * dim)
while (len(primes) < dim):
primes = create_primes(prime_order)
prime_order *= 2
primes = primes[:dim]
assert (len(primes) == dim), 'not enough primes'
if (burnin < 0):
burnin = max(primes)
out = numpy.empty((dim, order))
indices = [(idx + burnin) for idx in range(order)]
for dim_ in range(dim):
out[dim_] = create_van_der_corput_samples(indices, number_base=primes[dim_])
return out | Create Halton sequence.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Halton sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Halton sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns (numpy.ndarray):
Halton sequence with ``shape == (dim, order)``. | codesearchnet |
def __init__(self, config, in_channels, out_channels, bottleneck_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = VitDetLayerNorm(bottleneck_channels)
self.act1 = ACT2FN[config.hidden_act]
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False)
self.norm2 = VitDetLayerNorm(bottleneck_channels)
self.act2 = ACT2FN[config.hidden_act]
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = VitDetLayerNorm(out_channels) | Args:
config (`VitDetConfig`):
Model configuration.
in_channels (`int`):
Number of input channels.
out_channels (`int`):
Number of output channels.
bottleneck_channels (`int`):
Number of output channels for the 3x3 "bottleneck" conv layers. | github-repos |
def generate_example(config, ext='json'):
template_name = 'example.{0}'.format(ext.lower())
template = ENV.get_template(template_name)
return template.render(config=config) | Generate an example file based on the given Configuration object.
Args:
config (confpy.core.configuration.Configuration): The configuration
object on which to base the example.
ext (str): The file extension to render. Choices: JSON and INI.
Returns:
str: The text of the example file. | codesearchnet |
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict:
internal = [x.id for x in internal_devices]
external = [x.id for x in external_devices]
data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}}
return self._restCall(
"home/security/setZonesDeviceAssignment", body=json.dumps(data)
) | sets the devices for the security zones
Args:
internal_devices(List[Device]): the devices which should be used for the internal zone
external_devices(List[Device]): the devices which should be used for the external(hull) zone
Returns:
the result of _restCall | juraj-google-style |
def distinct(l):
seen = set()
seen_add = seen.add
return (_ for _ in l if not (_ in seen or seen_add(_))) | Return a list where the duplicates have been removed.
Args:
l (list): the list to filter.
Returns:
list: the same list without duplicates. | juraj-google-style |
def add(self, dic):
for kw in dic:
checkKey(kw, self.keyWord)
self._add([Pair(kw, StringSingle(dic[kw]))], self.d) | adds a dict as pair
Args:
dic (dict): key and value | codesearchnet |
def get_templates(self, id_or_uri, start=0, count=(- 1), filter='', query='', sort=''):
uri = (self._client.build_uri(id_or_uri) + '/templates')
return self._client.get(self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri)) | Gets a list of volume templates. Returns a list of storage templates belonging to the storage system.
Returns:
list: Storage Template List. | codesearchnet |
def site_specific_nn_occupation(self):
to_return = {l: 0 for l in set((site.label for site in self.p_neighbours))}
for site in self.p_neighbours:
if site.is_occupied:
to_return[site.label] += 1
return to_return | Returns the number of occupied nearest neighbour sites, classified by site type.
Args:
None
Returns:
(Dict(Str:Int)): Dictionary of nearest-neighbour occupied site numbers, classified by site label, e.g. { 'A' : 2, 'B' : 1 }. | codesearchnet |
def count_lines(self):
lines = 0
non_blank = 0
for (path, info) in self._make_iter():
if info.is_file:
for line in self.fs.open(path, 'rb'):
lines += 1
if line.rstrip():
non_blank += 1
return LineCounts(lines=lines, non_blank=non_blank) | Count the lines in the matched files.
Returns:
`~LineCounts`: A named tuple containing line counts.
Example:
>>> import fs
>>> fs.open_fs('~/projects').glob('**/*.py').count_lines()
LineCounts(lines=5767102, non_blank=4915110) | codesearchnet |
def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray:
waveform = np.pad(waveform, (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), mode='reflect')
complex_spectrogram = spectrogram(waveform, window=self.window, frame_length=self.n_fft, hop_length=self.hop_length, fft_length=self.n_fft, power=None, center=self.center, mel_filters=None, mel_floor=None)
amplitude_spectrogram = np.sqrt(np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor)
mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram)
log_mel_spectrogram = np.log(np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor)
return log_mel_spectrogram.T | Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by
`int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode.
Args:
waveform (`np.ndarray` of shape `(length,)`):
The input waveform. This must be a single real-valued, mono waveform.
Returns:
`numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`. | github-repos |
def _render_normalized_cost_bar(self, cost, max_cost, length):
num_ticks = int(np.ceil(float(cost) / max_cost * length))
num_ticks = num_ticks or 1
output = RL('[', font_attr=self._LINE_COST_ATTR)
output += RL('|' * num_ticks + ' ' * (length - num_ticks), font_attr=['bold', self._LINE_COST_ATTR])
output += RL(']', font_attr=self._LINE_COST_ATTR)
return output | Render a text bar representing a normalized cost.
Args:
cost: the absolute value of the cost.
max_cost: the maximum cost value to normalize the absolute cost with.
length: (int) length of the cost bar, in number of characters, excluding
the brackets on the two ends.
Returns:
An instance of debugger_cli_common.RichTextLine. | github-repos |
def get_validators(self, id=None, endpoint=None):
return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint) | Returns the current NEO consensus nodes information and voting status.
Args:
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def getColor(name):
try:
c = getColorInfoList()[getColorList().index(name.upper())]
return ((c[1] / 255.0), (c[2] / 255.0), (c[3] / 255.0))
except:
return (1, 1, 1) | Retrieve RGB color in PDF format by name.
Returns:
a triple of floats in range 0 to 1. In case of name-not-found, "white" is returned. | codesearchnet |
def get_table(self, table, retry=DEFAULT_RETRY):
table_ref = _table_arg_to_table_ref(table, default_project=self.project)
api_response = self._call_api(retry, method='GET', path=table_ref.path)
return Table.from_api_repr(api_response) | Fetch the table referenced by ``table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance. | codesearchnet |
def funds(self, term, field=None, **kwargs):
params = kwargs
params['q'] = term
if field:
params['f'] = field
else:
params['f'] = 'fu.org.n'
baseuri = (self._BASE_URI + 'funds')
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res | Search for funds matching a search term.
Args:
term (str): Fund id to search on
field (str): The field to search on.
Options are title, amount, org_name and type.
kwargs (dict): additional keywords passed into
requests.session.get params keyword. | codesearchnet |
def expression_filter(self, name, **kwargs):
def decorator(func):
self.filters[name] = ExpressionFilter(name, func, **kwargs)
return decorator | Returns a decorator function for adding an expression filter.
Args:
name (str): The name of the filter.
**kwargs: Variable keyword arguments for the filter.
Returns:
Callable[[Callable[[AbstractExpression, Any], AbstractExpression]]]: A decorator
function for adding an expression filter. | codesearchnet |
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):
out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
if target_sizes is not None:
if isinstance(target_sizes, List):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({'scores': score, 'labels': label, 'boxes': box})
return results | Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`ConditionalDetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. | github-repos |
def VerifyRow(self, parser_mediator, row):
try:
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
except (TypeError, ValueError):
return False
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
return False
try:
my_event = int(row['event'], 10)
except (TypeError, ValueError):
return False
if my_event < 1 or my_event > 77:
return False
try:
category = int(row['cat'], 10)
except (TypeError, ValueError):
return False
if category < 1 or category > 4:
return False
return True | Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise. | juraj-google-style |
def get_options_as(op: Union[schema_fb.Operator, schema_fb.OperatorT], opts_type: Type[OptsT]) -> Optional[OptsT]:
err = ValueError(f'Unsupported options type: {opts_type}')
type_name: str = opts_type.__name__
if not type_name.endswith('T'):
raise err
base_type_name = type_name.removesuffix('T')
is_opt_1_type = hasattr(schema_fb.BuiltinOptions, base_type_name)
if not is_opt_1_type and (not hasattr(schema_fb.BuiltinOptions2, base_type_name)):
raise err
if isinstance(op, schema_fb.Operator):
if not is_opt_1_type:
enum_val = getattr(schema_fb.BuiltinOptions2, base_type_name)
opts_creator = schema_fb.BuiltinOptions2Creator
raw_ops = op.BuiltinOptions2()
actual_enum_val = op.BuiltinOptions2Type()
else:
enum_val = getattr(schema_fb.BuiltinOptions, base_type_name)
opts_creator = schema_fb.BuiltinOptionsCreator
raw_ops = op.BuiltinOptions()
actual_enum_val = op.BuiltinOptionsType()
if raw_ops is None or actual_enum_val != enum_val:
return None
return opts_creator(enum_val, raw_ops)
elif isinstance(op, schema_fb.OperatorT):
if is_opt_1_type:
raw_ops_t = op.builtinOptions
else:
raw_ops_t = op.builtinOptions2
if raw_ops_t is None or not isinstance(raw_ops_t, opts_type):
return None
return raw_ops_t
else:
return None | Get the options of an operator as the specified type.
Requested type must be an object-api type (ends in 'T').
Args:
op: The operator to get the options from.
opts_type: The type of the options to get.
Returns:
The options as the specified type, or None if the options are not of the
specified type.
Raises:
ValueError: If the specified type is not a valid options type. | github-repos |
def remove(self, value):
try:
index = self._dict[value]
except KeyError:
raise ValueError('Value "%s" is not present.')
else:
del self[index] | Remove value from self.
Args:
value: Element to remove from self
Raises:
ValueError: if element is already present | juraj-google-style |
def log_sigmoid(x):
if any_symbolic_tensors((x,)):
return LogSigmoid().symbolic_call(x)
return backend.nn.log_sigmoid(x) | Logarithm of the sigmoid activation function.
It is defined as `f(x) = log(1 / (1 + exp(-x)))`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0])
>>> keras.ops.log_sigmoid(x)
array([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32) | github-repos |
def fit(self, X, y, X_val=None, y_val=None):
y = y.reshape((len(y), 1))
if sparse.issparse(X):
X = X.tocsr()
if X_val is not None:
n_val = len(y_val)
y_val = y_val.reshape((n_val, 1))
self.i = X.shape[1]
self.l1 = self.l1 / self.i
self.w = (np.random.rand((self.i + 2) * self.h + 1) - .5) * 1e-6
self.w_opt = self.w
self.n_opt = 0
logger.info('training ...')
n_obs = X.shape[0]
batch = self.b
n_epoch = self.n
idx = range(n_obs)
self.auc_opt = .5
start = time.time()
print('\tEPOCH TRAIN VALID BEST TIME (m)')
print('\t--------------------------------------------')
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
0, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
epoch = 1
while epoch <= n_epoch:
np.random.shuffle(idx)
for i in range(int(n_obs / batch) + 1):
if (i + 1) * batch > n_obs:
sub_idx = idx[batch * i:n_obs]
else:
sub_idx = idx[batch * i:batch * (i + 1)]
x = X[sub_idx]
neg_idx = [n_idx for n_idx, n_y in enumerate(y[sub_idx]) if n_y == 0.]
pos_idx = [p_idx for p_idx, p_y in enumerate(y[sub_idx]) if p_y == 1.]
x0 = x[neg_idx]
x1 = x[pos_idx]
ret = minimize(self.func,
self.w,
args=(x0, x1),
method='L-BFGS-B',
jac=self.fprime,
options={'maxiter': 5})
self.w = ret.x
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
if auc_val > self.auc_opt:
self.auc_opt = auc_val
self.w_opt = self.w
self.n_opt = epoch
if epoch == n_epoch:
n_epoch += 5
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
epoch, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
epoch += 1
if X_val is not None:
print('Optimal epoch is {0} ({1:.6f})'.format(self.n_opt,
self.auc_opt))
self.w = self.w_opt
logger.info('done training') | Train a network with the quasi-Newton method.
Args:
X (np.array of float): feature matrix for training
y (np.array of float): target values for training
X_val (np.array of float): feature matrix for validation
y_val (np.array of float): target values for validation | juraj-google-style |
def add_trunk_group(self, intf, value):
string = 'switchport trunk group {}'.format(value)
return self.configure_interface(intf, string) | Adds the specified trunk group to the interface
Args:
intf (str): The interface name to apply the trunk group to
value (str): The trunk group value to apply to the interface
Returns:
True if the operation as successfully applied otherwise false | codesearchnet |
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return token_ids_0 + [self.sep_token_id]
sep = [self.sep_token_id]
return token_ids_0 + sep + token_ids_1 + sep | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A XLMProphetNet sequence has the following format:
- single sequence: `X [SEP]`
- pair of sequences: `A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. | github-repos |
def _load_and_verify_metadata(self, submission_type):
metadata_filename = os.path.join(self._extracted_submission_dir,
'metadata.json')
if not os.path.isfile(metadata_filename):
logging.error('metadata.json not found')
return None
try:
with open(metadata_filename, 'r') as f:
metadata = json.load(f)
except IOError as e:
logging.error('Failed to load metadata: %s', e)
return None
for field_name in REQUIRED_METADATA_JSON_FIELDS:
if field_name not in metadata:
logging.error('Field %s not found in metadata', field_name)
return None
if submission_type != metadata['type']:
logging.error('Invalid submission type in metadata, expected "%s", '
'actual "%s"', submission_type, metadata['type'])
return None
entry_point = metadata['entry_point']
if not os.path.isfile(os.path.join(self._extracted_submission_dir,
entry_point)):
logging.error('Entry point not found: %s', entry_point)
return None
if not entry_point.endswith('.sh'):
logging.warning('Entry point is not an .sh script. '
'This is not necessarily a problem, but if submission '
'won''t run double check entry point first: %s',
entry_point)
return metadata | Loads and verifies metadata.
Args:
submission_type: type of the submission
Returns:
dictionaty with metadata or None if metadata not found or invalid | juraj-google-style |
def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:
VALID_STR.validate(group_name, 'get_group_by_name')
for group in self.groups:
if group.group_name == group_name:
return group
return None | Gets a group from its name
Args:
group_name:
Returns: Group | juraj-google-style |
def volatility_fn(self):
pass | Python callable calculating the instantaneous volatility matrix.
The callable should accept two real `Tensor` arguments of the same dtype and
shape `times_shape`. The first argument is the scalar time t, the second
argument is the value of Ito process X - `Tensor` of shape `batch_shape +
[dim]`. Here `batch_shape` is an arbitrary shape. The result is value of
volatility `S_ij`(t, X). The return value of the callable is a real `Tensor`
of the same dtype as the input arguments and of shape
`batch_shape + [dim, dim]`.
Returns:
The instantaneous volatility callable. | github-repos |
def feed(self, url_template, keyword, offset, max_num, page_step):
for i in range(offset, offset + max_num, page_step):
url = url_template.format(keyword, i)
self.out_queue.put(url)
self.logger.debug('put url to url_queue: {}'.format(url)) | Feed urls once
Args:
url_template: A string with parameters replaced with "{}".
keyword: A string indicating the searching keyword.
offset: An integer indicating the starting index.
max_num: An integer indicating the max number of images to be crawled.
page_step: An integer added to offset after each iteration. | juraj-google-style |
def normalize_cell_value(value):
if isinstance(value, dict) or isinstance(value, list):
return json.dumps(value)
return value | Process value for writing into a cell.
Args:
value: any type of variable
Returns:
json serialized value if value is list or dict, else value | juraj-google-style |
def parse_genes(gene_lines):
genes = []
header = []
hgnc_identifiers = set()
delimiter = '\t'
delimiters = ['\t', ' ', ';']
for (i, line) in enumerate(gene_lines):
line = line.rstrip()
if (not (len(line) > 0)):
continue
if line.startswith('
if (not line.startswith('
line_length = 0
delimiter = None
for alt in delimiters:
head_line = line.split(alt)
if (len(head_line) > line_length):
line_length = len(head_line)
delimiter = alt
header = [word.lower() for word in line[1:].split(delimiter)]
else:
if (i == 0):
line_length = 0
for alt in delimiters:
head_line = line.split(alt)
if (len(head_line) > line_length):
line_length = len(head_line)
delimiter = alt
if (('hgnc' in line) or ('HGNC' in line)):
header = [word.lower() for word in line.split(delimiter)]
continue
if line.split(delimiter)[0].isdigit():
header = ['hgnc_id']
else:
header = ['hgnc_symbol']
splitted_line = line.split(delimiter)
gene_info = dict(zip(header, splitted_line))
info_found = False
for key in gene_info:
if gene_info[key]:
info_found = True
break
if (not info_found):
continue
try:
gene = parse_gene(gene_info)
except Exception as e:
LOG.warning(e)
raise SyntaxError('Line {0} is malformed'.format((i + 1)))
identifier = gene.pop('identifier')
if (not (identifier in hgnc_identifiers)):
hgnc_identifiers.add(identifier)
genes.append(gene)
return genes | Parse a file with genes and return the hgnc ids
Args:
gene_lines(iterable(str)): Stream with genes
Returns:
genes(list(dict)): Dictionaries with relevant gene info | codesearchnet |
def build_graph(device, input_shape, variable, num_inputs, axis, grad):
with ops.device('/%s:0' % device):
if not variable:
inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)]
elif axis == 1:
inputs = [array_ops.zeros([input_shape[0], random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)]) for _ in range(num_inputs)]
else:
inputs = [array_ops.zeros([random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5), input_shape[1]]) for _ in range(num_inputs)]
outputs = [array_ops.concat(inputs, axis) for _ in range(100)]
if grad:
return control_flow_ops.group(*list(itertools.chain.from_iterable([gradients_impl.gradients(output, inputs) for output in outputs])))
else:
return control_flow_ops.group(*outputs) | Build a graph containing a sequence of concat operations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not to randomize the input shape
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
Returns:
An array of tensors to run() | github-repos |
def get_feature_from_key(self, feature_key):
feature = self.feature_key_map.get(feature_key)
if feature:
return feature
self.logger.error(('Feature "%s" is not in datafile.' % feature_key))
return None | Get feature for the provided feature key.
Args:
feature_key: Feature key for which feature is to be fetched.
Returns:
Feature corresponding to the provided feature key. | codesearchnet |
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
bvsum = 0
for specie1, occu1 in site.species.items():
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in nn.species.items():
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum | Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA). | juraj-google-style |
def component_mget(self, zip_data, components):
if (not isinstance(components, list)):
print('Components param must be a list')
return
query_params = {'components': ','.join(components)}
return self.fetch_identifier_component('zip/component_mget', zip_data, query_params) | Call the zip component_mget endpoint
Args:
- zip_data - As described in the class docstring.
- components - A list of strings for each component to include in the request.
Example: ["zip/details", "zip/volatility"] | codesearchnet |
def _get_document_path(client, path):
parts = ((client._database_string, 'documents') + path)
return _helpers.DOCUMENT_PATH_DELIMITER.join(parts) | Convert a path tuple into a full path string.
Of the form:
``projects/{project_id}/databases/{database_id}/...
documents/{document_path}``
Args:
client (~.firestore_v1beta1.client.Client): The client that holds
configuration details and a GAPIC client object.
path (Tuple[str, ...]): The components in a document path.
Returns:
str: The fully-qualified document path. | codesearchnet |
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1] | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | github-repos |
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._query_functions:
for query_function in self._query_functions:
query_function.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The Query request payload is missing the query functions field.')
self.length = local_buffer.length()
super(QueryRequestPayload, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer) | Write the data encoding the QueryRequestPayload object to a stream.
Args:
output_buffer (Stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidField: Raised if the query functions are not defined. | codesearchnet |
def on_enter(__msg: Optional[Union[(Callable, str)]]=None) -> Callable:
def decorator(__func):
@wraps(__func)
def wrapper(*args, **kwargs):
if __msg:
print(__msg)
else:
print('Entering {!r}({!r})'.format(__func.__name__, __func))
return __func(*args, **kwargs)
return wrapper
if callable(__msg):
return on_enter()(__msg)
return decorator | Decorator to display a message when entering a function.
Args:
__msg: Message to display
Returns:
Wrapped function | codesearchnet |
def cv_score_mean(self, X, y):
(X, y) = self._format_inputs(X, y)
if self.problem_type.binary_classification:
kf = StratifiedKFold(shuffle=True, random_state=(RANDOM_STATE + 3))
elif self.problem_type.multi_classification:
self.target_type_transformer.inverse_transform(y)
transformer = self.target_type_transformer
kf = StratifiedKFoldMultiClassIndicator(transformer, shuffle=True, n_splits=3, random_state=(RANDOM_STATE + 3))
elif self.problem_type.regression:
kf = KFold(shuffle=True, n_splits=3, random_state=(RANDOM_STATE + 4))
else:
raise NotImplementedError
scoring = {scorer_info.name: scorer_info.scorer for scorer_info in self.scorers_info}
cv_results = cross_validate(self.estimator, X, y, scoring=scoring, cv=kf, return_train_score=False)
results = self._process_cv_results(cv_results)
return results | Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types | codesearchnet |
def schedule(self, callback, *args, **kwargs):
self._executor.submit(callback, *args, **kwargs) | Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None | codesearchnet |
def UpdateBudget(self, client_customer_id, budget_id, micro_amount, delivery_method):
self.client.SetClientCustomerId(client_customer_id)
operations = [{'operator': 'SET', 'operand': {'budgetId': budget_id, 'amount': {'microAmount': micro_amount}, 'deliveryMethod': delivery_method}}]
self.client.GetService('BudgetService').mutate(operations) | Update a Budget with the given budgetId.
Args:
client_customer_id: str Client Customer Id used to update Budget.
budget_id: str Id of the budget to be updated.
micro_amount: str New value for the microAmount field.
delivery_method: str New value for the deliveryMethod field. | codesearchnet |
def allconcat_ring(xs, devices, concat_axis):
n = len(xs)
if (n == 1):
return xs
parts = [[(xs[target] if (target == source) else None) for source in xrange(n)] for target in xrange(n)]
for distance in xrange(1, ((n
for target in xrange(n):
source = ((target + distance) % n)
if (parts[target][source] is None):
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[((target + 1) % n)][source])
source = ((target - distance) % n)
if (parts[target][source] is None):
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[((target - 1) % n)][source])
return mtf.parallel(devices, tf.concat, parts, axis=([concat_axis] * n)) | Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors | codesearchnet |
def __init__(self, cmd='gulp'):
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found") | Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp. | juraj-google-style |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetAttributeListResponsePayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding is missing the unique identifier.')
names = list()
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):
name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)
name.read(local_buffer, kmip_version=kmip_version)
names.append(name)
if (len(names) == 0):
raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding is missing the attribute names.')
self._attribute_names = names
else:
while self.is_tag_next(enums.Tags.ATTRIBUTE_REFERENCE, local_buffer):
if self.is_type_next(enums.Types.STRUCTURE, local_buffer):
reference = objects.AttributeReference()
reference.read(local_buffer, kmip_version=kmip_version)
names.append(primitives.TextString(value=reference.attribute_name, tag=enums.Tags.ATTRIBUTE_NAME))
elif self.is_type_next(enums.Types.ENUMERATION, local_buffer):
reference = primitives.Enumeration(enums.Tags, tag=enums.Tags.ATTRIBUTE_REFERENCE)
reference.read(local_buffer, kmip_version=kmip_version)
name = enums.convert_attribute_tag_to_name(reference.value)
names.append(primitives.TextString(value=name, tag=enums.Tags.ATTRIBUTE_NAME))
else:
raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding contains an invalid AttributeReference type.')
self._attribute_names = names
self.is_oversized(local_buffer) | Read the data encoding the GetAttributeList response payload and
decode it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the unique identifier or attribute
names are missing from the encoded payload. | codesearchnet |
def task_ids(self):
if (not self.id):
raise WorkflowError('Workflow is not running. Cannot get task IDs.')
if self.batch_values:
raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for task IDs.')
wf = self.workflow.get(self.id)
return [task['id'] for task in wf['tasks']] | Get the task IDs of a running workflow
Args:
None
Returns:
List of task IDs | codesearchnet |
def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
return join(script_dir(pyobject, follow_symlinks), filename) | Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended | codesearchnet |
def remove_pardir_symbols(path, sep=os.sep, pardir=os.pardir):
bits = path.split(sep)
bits = (x for x in bits if (x != pardir))
return sep.join(bits) | Remove relative path symobls such as '..'
Args:
path (str): A target path string
sep (str): A strint to refer path delimiter (Default: `os.sep`)
pardir (str): A string to refer parent directory (Default: `os.pardir`)
Returns:
str | codesearchnet |
def which(cmd):
def is_exe(fp):
return (os.path.isfile(fp) and os.access(fp, os.X_OK))
(fpath, fname) = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None | Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python") | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.