code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def end_episode(self, agent_indices):
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str) | Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor. | juraj-google-style |
def check_session_id_signature(session_id, secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):
secret_key = _ensure_bytes(secret_key)
if signed:
pieces = session_id.split('-', 1)
if (len(pieces) != 2):
return False
base_id = pieces[0]
provided_signature = pieces[1]
expected_signature = _signature(base_id, secret_key)
return hmac.compare_digest(encode_utf8(expected_signature), encode_utf8(provided_signature))
else:
return True | Check the signature of a session ID, returning True if it's valid.
The server uses this function to check whether a session ID
was generated with the correct secret key. If signed sessions are disabled,
this function always returns True.
Args:
session_id (str) : The session ID to check
secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)
signed (bool, optional) : Whether to check anything (default: value of
'BOKEH_SIGN_SESSIONS' env var) | codesearchnet |
def _normalize_mlengine_job_id(job_id):
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
tracker = 0
cleansed_job_id = ''
for m in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:m.start()])
cleansed_job_id += job[m.start():m.end()]
tracker = m.end()
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id | Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation. | juraj-google-style |
def process_resource(self, req, resp, resource, uri_kwargs=None):
if ('user' in req.context):
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if (user is not None):
req.context['user'] = user
elif (self.challenge is not None):
req.context.setdefault('challenges', list()).append(self.challenge) | Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None`` | codesearchnet |
def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):
if scaffold is None and summary_op is None or (scaffold is not None and summary_op is not None):
raise ValueError('Exactly one of scaffold or summary_op must be provided.')
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps) | Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used if
no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output by
TF summary methods like `tf.compat.v1.summary.scalar` or
`tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if
more than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set. | github-repos |
def _compute_sequence_length_from_mask(mask, batch_first):
timestep_index = 0 if not batch_first else 1
return torch.sum(mask.int(), dim=timestep_index) | Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False]
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should
be 1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch]
if time_major=True.
time_major: Boolean, which indicates whether the mask is time major or
batch major.
Returns:
sequence_length: 1D int32 tensor. | github-repos |
class UnbatchPandas(beam.PTransform):
def __init__(self, proxy, include_indexes=False):
self._proxy = proxy
self._include_indexes = include_indexes
def expand(self, pcoll):
return pcoll | _unbatch_transform(self._proxy, self._include_indexes) | A transform that explodes a PCollection of DataFrame or Series. DataFrame
is converterd to a schema-aware PCollection, while Series is converted to its
underlying type.
Args:
include_indexes: (optional, default: False) When unbatching a DataFrame
if include_indexes=True, attempt to include index columns in the output
schema for expanded DataFrames. Raises an error if any of the index
levels are unnamed (name=None), or if any of the names are not unique
among all column and index names. | github-repos |
def flux_down(self, fluxDownTop, emission=None):
if (emission is None):
emission = np.zeros_like(self.absorptivity)
E = np.concatenate((np.atleast_1d(fluxDownTop), emission), axis=(- 1))
return np.squeeze(matrix_multiply(self.Tdown, E[(..., np.newaxis)])) | Compute upwelling radiative flux at interfaces between layers.
Inputs:
* fluxUpBottom: flux up from bottom
* emission: emission from atmospheric levels (N)
defaults to zero if not given
Returns:
* vector of upwelling radiative flux between levels (N+1)
element N is the flux up to space. | codesearchnet |
def add_polyhedron(self, neighbors, center, color, opacity=1.0, draw_edges=False, edges_color=[0.0, 0.0, 0.0], edges_linewidth=2):
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i in range(len(neighbors)):
(x, y, z) = neighbors[i].coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if (vtk.VTK_MAJOR_VERSION <= 5):
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if (color == 'element'):
myoccu = 0.0
for (specie, occu) in center.species.items():
if (occu > myoccu):
myspecie = specie
myoccu = occu
color = [(i / 255) for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac) | Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges | codesearchnet |
def _FormatInAddrExToken(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN')
if (token_data.net_type == 4):
ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4])
elif (token_data.net_type == 16):
ip_address = self._FormatPackedIPv6Address(token_data.ip_address)
return {'protocols': protocol, 'net_type': token_data.net_type, 'address': ip_address} | Formats an extended IPv4 address token as a dictionary of values.
Args:
token_data (bsm_token_data_in_addr_ex): AUT_IN_ADDR_EX token data.
Returns:
dict[str, str]: token values. | codesearchnet |
def iter_compress(item_iter, flag_iter):
true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag)
return true_items | iter_compress - like numpy compress
Args:
item_iter (list):
flag_iter (list): of bools
Returns:
list: true_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> item_iter = [1, 2, 3, 4, 5]
>>> flag_iter = [False, True, True, False, True]
>>> true_items = iter_compress(item_iter, flag_iter)
>>> result = list(true_items)
>>> print(result)
[2, 3, 5] | juraj-google-style |
def bind(port, socket_type, socket_proto):
got_socket = False
for family in (socket.AF_INET6, socket.AF_INET):
try:
sock = socket.socket(family, socket_type, socket_proto)
got_socket = True
except socket.error:
continue
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', port))
if (socket_type == socket.SOCK_STREAM):
sock.listen(1)
port = sock.getsockname()[1]
except socket.error:
return None
finally:
sock.close()
return (port if got_socket else None) | Try to bind to a socket of the specified type, protocol, and port.
This is primarily a helper function for PickUnusedPort, used to see
if a particular port number is available.
For the port to be considered available, the kernel must support at least
one of (IPv6, IPv4), and the port must be available on each supported
family.
Args:
port: The port number to bind to, or 0 to have the OS pick a free port.
socket_type: The type of the socket (ex: socket.SOCK_STREAM).
socket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP).
Returns:
The port number on success or None on failure. | codesearchnet |
def modify_model_interface(input_file, output_file, input_type, output_type):
input_type_int = _parse_type_to_int(input_type, 'input_type')
output_type_int = _parse_type_to_int(output_type, 'output_type')
status = _pywrap_modify_model_interface.modify_model_interface(input_file, output_file, input_type_int, output_type_int)
if status != 0:
raise RuntimeError('Error occurred when trying to modify the model input type from float to {input_type} and output type from float to {output_type}.'.format(input_type=input_type, output_type=output_type)) | Modify a quantized model's interface (input/output) from float to integer.
Args:
input_file: Full path name to the input tflite file.
output_file: Full path name to the output tflite file.
input_type: Final input interface type.
output_type: Final output interface type.
Raises:
RuntimeError: If the modification of the model interface was unsuccessful.
ValueError: If the input_type or output_type is unsupported. | github-repos |
def select_and_insert(self, name, data):
self.select_obj(name)
self.insert_into_obj(data) | Combines selection and data insertion into one function
Args:
name: the name of the object you want to insert into
data: the data you want to insert
Returns:
None
Raises:
None | juraj-google-style |
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(x, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(dec, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(dec, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(dec, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(dec, (output_shape[3] + 1), 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(dec, (output_shape[3] + 1), 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[(:, :, :, 3:4)])
out = out_mask[(:, :, :, :3)]
return ((out * mask) + (skip_connections[0] * (1 - mask))) | The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction. | codesearchnet |
def merge(self, merge_commit_message=None, should_remove_source_branch=False, merge_when_pipeline_succeeds=False, **kwargs):
path = ('%s/%s/merge' % (self.manager.path, self.get_id()))
data = {}
if merge_commit_message:
data['merge_commit_message'] = merge_commit_message
if should_remove_source_branch:
data['should_remove_source_branch'] = True
if merge_when_pipeline_succeeds:
data['merge_when_pipeline_succeeds'] = True
server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)
self._update_attrs(server_data) | Accept the merge request.
Args:
merge_commit_message (bool): Commit message
should_remove_source_branch (bool): If True, removes the source
branch
merge_when_pipeline_succeeds (bool): Wait for the build to succeed,
then merge
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabMRClosedError: If the merge failed | codesearchnet |
def LVMPathSpecGetVolumeIndex(path_spec):
volume_index = getattr(path_spec, 'volume_index', None)
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith('/lvm'):
return None
volume_index = None
try:
volume_index = int(location[4:], 10) - 1
except ValueError:
pass
if volume_index is None or volume_index < 0:
return None
return volume_index | Retrieves the volume index from the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
int: volume index or None if not available. | juraj-google-style |
def __init__(self, connection):
self.connection = connection
response = connection.response
super(BambouHTTPError, self).__init__("[HTTP %s(%s)] %s" % (response.status_code, response.reason, response.errors)) | Intializes a BambouHTTPError
Args:
connection: the Connection object | juraj-google-style |
def access_vlan(self, inter_type, inter, vlan_id):
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport = ET.SubElement(int_type, 'switchport')
access = ET.SubElement(switchport, 'access')
accessvlan = ET.SubElement(access, 'accessvlan')
accessvlan.text = vlan_id
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False | Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None | juraj-google-style |
def read_log(self, logfile):
logfile.seek(0)
field_names, _ = self._parse_bro_header(logfile)
while 1:
_line = next(logfile).strip()
if not _line.startswith('
yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter))))
else:
time.sleep(.1)
break | The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file. | juraj-google-style |
def export(self, filepath, encoding='utf-8', gzipped=True):
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data) | Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not | codesearchnet |
def _wrap_decorator(wrapped_function, decorator_name):
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func, decorator_name)
return wrapper | Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
decorator_name: The name of the decorator.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance. | github-repos |
def method_schema(self, method: str, iterate: bool=False) -> dict:
endpoint, method = method.rsplit('.', 1)
resource = self.api_document
for e in endpoint.split('.'):
resource = resource['resources'][e]
resource = resource['methods'][method]['response']['$ref']
properties = self.api_document['schemas'][resource]['properties']
schema = self.to_schema(properties)
if iterate or ('List' in resource and resource.endswith('Response')):
for entry in schema:
if entry['type'] == 'RECORD':
return entry['fields']
elif entry['mode'] == 'REPEATED':
entry['mode'] = 'NULLABLE'
return [entry]
raise 'Unahandled discovery schema.'
else:
return schema | Return BigQuery schema for a Discovery API function.
Use the full dot notation of the rest API function.
Args:
method: the dot notation name of the Google API function
iterate: if true, return only iterable schema
Returns:
A dictionary representation of the resource. | github-repos |
def from_rfc3339(value):
return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc) | Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC. | juraj-google-style |
def get_numeric_features_to_observed_range(examples):
observed_features = collections.defaultdict(list)
for example in examples:
for feature_name in get_numeric_feature_names(example):
original_feature = parse_original_feature_from_example(
example, feature_name)
observed_features[feature_name].extend(original_feature.original_value)
return {
feature_name: {
'observedMin': min(feature_values),
'observedMax': max(feature_values),
}
for feature_name, feature_values in iteritems(observed_features)
} | Returns numerical features and their observed ranges.
Args:
examples: Examples to read to get ranges.
Returns:
A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts,
with a key for each numerical feature. | juraj-google-style |
def FlagCxx14Features(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
include = Match('\\s*
if (include and (include.group(1) in ('scoped_allocator', 'shared_mutex'))):
error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.' % include.group(1))) | Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | codesearchnet |
def _get_permutation(tensor, n_dims, active_dim):
if not tensor.shape:
raise ValueError("Tensor's rank should be static")
rank = len(tensor.shape)
batch_rank = rank - n_dims
if active_dim == n_dims - 1:
return None
perm = np.arange(rank)
perm[rank - 1] = batch_rank + active_dim
perm[batch_rank + active_dim] = rank - 1
return perm | Returns the permutation that swaps the active and the last dimensions.
Args:
tensor: `Tensor` having a statically known rank.
n_dims: Number of spatial dimensions.
active_dim: The active spatial dimension.
Returns:
A list representing the permutation, or `None` if no permutation needed.
For example, with 'tensor` having rank 5, `n_dims = 3` and `active_dim = 1`
yields [0, 1, 2, 4, 3]. Explanation: we start with [0, 1, 2, 3, 4], where the
last n_dims=3 dimensions are spatial dimensions, and the first two are batch
dimensions. Among the spatial dimensions, we take the one at index 1, which
is "3", and swap it with the last dimension "4". | github-repos |
def get_auditwheel_output(wheel_path: str) -> None:
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
auditwheel_parser = argparse.ArgumentParser(description='Cross-distro Python wheels.')
sub_parsers = auditwheel_parser.add_subparsers(metavar='command', dest='cmd')
main_show.configure_parser(sub_parsers)
auditwheel_args = argparse.Namespace(WHEEL_FILE=wheel_path, verbose=1)
main_show.execute(args=auditwheel_args, p=auditwheel_parser)
sys.stdout = previous_stdout
return stringio.getvalue() | Run "auditwheel show" on the wheel and return the output.
Args:
wheel_path: path of the wheel file
Returns:
"auditwheel show" output | github-repos |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(Authentication, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
credentials = []
while self.is_tag_next(enums.Tags.CREDENTIAL, local_stream):
credential = objects.Credential()
credential.read(local_stream, kmip_version=kmip_version)
credentials.append(credential)
if (len(credentials) == 0):
raise ValueError('Authentication encoding missing credentials.')
self._credentials = credentials
self.is_oversized(local_stream) | Read the data encoding the Authentication struct and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0. | codesearchnet |
def powerset(iterable, nonempty=False, reverse=False):
iterable = list(iterable)
if nonempty:
start = 1
else:
start = 0
seq_sizes = range(start, (len(iterable) + 1))
if reverse:
seq_sizes = reversed(seq_sizes)
iterable.reverse()
return chain.from_iterable((combinations(iterable, r) for r in seq_sizes)) | Generate the power set of an iterable.
Args:
iterable (Iterable): The iterable from which to generate the power set.
Keyword Args:
nonempty (boolean): If True, don't include the empty set.
reverse (boolean): If True, reverse the order of the powerset.
Returns:
Iterable: An iterator over the power set.
Example:
>>> ps = powerset(np.arange(2))
>>> list(ps)
[(), (0,), (1,), (0, 1)]
>>> ps = powerset(np.arange(2), nonempty=True)
>>> list(ps)
[(0,), (1,), (0, 1)]
>>> ps = powerset(np.arange(2), nonempty=True, reverse=True)
>>> list(ps)
[(1, 0), (1,), (0,)] | codesearchnet |
def get_tri_area(pts):
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri | Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points | juraj-google-style |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
for subkey in registry_key.GetSubkeys():
values_dict = {}
values_dict['subkey_name'] = subkey.name
name_values = subkey.name.split('&')
number_of_name_values = len(name_values)
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey.name))
if number_of_name_values >= 1:
values_dict['device_type'] = name_values[0]
if number_of_name_values >= 2:
values_dict['vendor'] = name_values[1]
if number_of_name_values >= 3:
values_dict['product'] = name_values[2]
if number_of_name_values >= 4:
values_dict['revision'] = name_values[3]
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
if subkey.number_of_subkeys == 0:
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
values_dict['serial'] = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
values_dict['friendly_name'] = friendly_name_value.GetDataAsObject()
else:
values_dict.pop('friendly_name', None)
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
values_dict['parent_id_prefix'] = (
parent_id_prefix_value.GetDataAsObject())
else:
values_dict.pop('parent_id_prefix', None)
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | juraj-google-style |
def CompressedHistograms(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag) | Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`. | juraj-google-style |
def get_stored_version(connection):
if (connection.engine.name == 'sqlite'):
version = connection.execute('PRAGMA user_version').fetchone()[0]
if (version == 0):
raise VersionIsNotStored
return version
elif (connection.engine.name == 'postgresql'):
try:
r = connection.execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone()
if (not r):
raise VersionIsNotStored
version = r[0]
except ProgrammingError:
raise VersionIsNotStored
return version
else:
raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name)) | Returns database version.
Args:
connection (sqlalchemy connection):
Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case)
exist because they created with the database creation.
Returns:
int: version of the database. | codesearchnet |
def assert_corofunction(**kw):
for (name, value) in kw.items():
if (not asyncio.iscoroutinefunction(value)):
raise TypeError('paco: {} must be a coroutine function'.format(name)) | Asserts if a given values are a coroutine function.
Arguments:
**kw (mixed): value to check if it is an iterable.
Raises:
TypeError: if assertion fails. | codesearchnet |
def operate_magmom(self, magmom):
magmom = Magmom(magmom)
transformed_moment = self.apply_rotation_only(magmom.global_moment) * \
np.linalg.det(self.rotation_matrix) * self.time_reversal
return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis) | Apply time reversal operator on the magnetic moment. Note that
magnetic moments transform as axial vectors, not polar vectors.
See 'Symmetry and magnetic structures', Rodríguez-Carvajal and
Bourée for a good discussion. DOI: 10.1051/epjconf/20122200010
Args:
magmom: Magnetic moment as electronic_structure.core.Magmom
class or as list or np array-like
Returns:
Magnetic moment after operator applied as Magmom class | juraj-google-style |
def gui(discord_token, discord_client_id):
logger.info('Starting Modis in GUI')
import tkinter as tk
logger.debug('Loading packages')
from modis.discord_modis import gui as discord_modis_gui
from modis.reddit_modis import gui as reddit_modis_gui
from modis.facebook_modis import gui as facebook_modis_gui
logger.debug('Initialising window')
root = tk.Tk()
root.minsize(width=800, height=400)
root.geometry('800x600')
root.title('Modis Control Panel')
root.iconbitmap('{}/assets/modis.ico'.format(file_dir))
'notebook = ttk.Notebook(root)\n notebook.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S")\n\n
discord = discord_modis_gui.Frame(root, discord_token, discord_client_id)
discord.grid(column=0, row=0, padx=0, pady=0, sticky='W E N S')
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
discord.columnconfigure(0, weight=1)
discord.rowconfigure(0, weight=1)
logger.debug('GUI initialised')
root.mainloop() | Start Modis in gui format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID | codesearchnet |
def depth(self, local: bool = True) -> int:
G = self.graph
if not local:
def remove_local(dagc: DAGCircuit) \
-> Generator[Operation, None, None]:
for elem in dagc:
if dagc.graph.degree[elem] > 2:
yield elem
G = DAGCircuit(remove_local(self)).graph
return nx.dag_longest_path_length(G) - 1 | Return the circuit depth.
Args:
local: If True include local one-qubit gates in depth
calculation. Else return the multi-qubit gate depth. | juraj-google-style |
def get_template_path(filename):
if os.path.isfile(filename):
return os.path.abspath(filename)
for i in sys.path:
if os.path.isfile(os.path.join(i, filename)):
return os.path.abspath(os.path.join(i, filename))
return None | Find raw template in working directory or in sys.path.
template_path from config may refer to templates colocated with the Stacker
config, or files in remote package_sources. Here, we emulate python module
loading to find the path to the template.
Args:
filename (str): Template filename.
Returns:
Optional[str]: Path to file, or None if no file found | codesearchnet |
def report_to_rows(report):
if type(report) is GeneratorType:
leftovers = ''
for chunk in report:
data, extra = chunk.rsplit('\n', 1)
for row in csv_to_rows(leftovers + data):
yield row
leftovers = extra
else:
for row in csv_to_rows(report):
yield row | Helper to convert DBM files into iterator of rows, memory efficient.
Usage example:
```
filename, report = report_file(...)
rows = report_to_rows(report)
```
Args:
* report: (iterator or file) Either an iterator or file that will be
converted to rows.
Returns:
* Iterator of lists representing each row. | github-repos |
def addFixedEffect(self,F=None,A=None):
if A==None:
A = SP.eye(self.P)
if F==None:
F = SP.ones((self.N,1))
assert A.shape[1]==self.P, 'Incompatible shape'
assert F.shape[0]==self.N, 'Incompatible shape'
if F.shape[1]>1:
for m in range(F.shape[1]):
self.vd.addFixedEffTerm(A,F[:,m:m+1])
else:
self.vd.addFixedEffTerm(A,F)
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | add fixed effect to the model
Args:
F: fixed effect matrix [N,1]
A: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect) | juraj-google-style |
async def start_server_in_loop(runner, hostname, port, agent):
await runner.setup()
agent.web.server = aioweb.TCPSite(runner, hostname, port)
await agent.web.server.start()
logger.info(f"Serving on http: | Listens to http requests and sends them to the webapp.
Args:
runner: AppRunner to process the http requests
hostname: host name to listen from.
port: port to listen from.
agent: agent that owns the web app. | juraj-google-style |
def require_representation(self, req):
try:
(type_, subtype, _) = parse_mime_type(req.content_type)
content_type = '/'.join((type_, subtype))
except:
raise falcon.HTTPUnsupportedMediaType(description='Invalid Content-Type header: {}'.format(req.content_type))
if (content_type == 'application/json'):
body = req.stream.read()
return json.loads(body.decode('utf-8'))
else:
raise falcon.HTTPUnsupportedMediaType(description='only JSON supported, got: {}'.format(content_type)) | Require raw representation dictionary from falcon request object.
This does not perform any field parsing or validation but only uses
allowed content-encoding handler to decode content body.
Note:
Currently only JSON is allowed as content type.
Args:
req (falcon.Request): request object
Returns:
dict: raw dictionary of representation supplied in request body | codesearchnet |
def file_name(self, value):
if value == self._defaults['fileName'] and 'fileName' in self._values:
del self._values['fileName']
else:
self._values['fileName'] = value | The file_name property.
Args:
value (string). the property value. | juraj-google-style |
def append(self, other, ignore_index=False):
if (not isinstance(other, self.__class__)):
raise ValueError('May only append instances of same type.')
if (type(ignore_index) is bool):
new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True)
else:
new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True)
if (type(ignore_index) is int):
new_frame.index = range(ignore_index, (ignore_index + len(new_frame)))
else:
new_frame.index = ignore_index
return self.__class__(new_frame) | Append rows of `other` to the end of this frame, returning a new object.
Wrapper around the :meth:`pandas.DataFrame.append` method.
Args:
other (Cartesian):
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
Returns:
Cartesian: | codesearchnet |
def _get_object_type(filename, filepath):
filename_no_ext = os.path.splitext(filename)[0].lower()
if filename_no_ext.endswith(PrecompiledExampleType.test_ends):
object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST
elif PrecompiledExampleType.katas in filepath.split(os.sep):
object_type = PRECOMPILED_OBJECT_TYPE_KATA
elif PrecompiledExampleType.examples in filepath.split(os.sep):
object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE
else:
object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
return object_type | Get type of an object based on it filename/filepath
Args:
filename: object's filename
filepath: object's filepath
Returns: type of the object (example, kata, unit-test) | github-repos |
def index_max(x, idx, y):
return _index_update_helper(tf_np.ndarray._with_index_max, x, idx, y) | Pure equivalent of `x[idx] = maximum(x[idx], y)`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = maximum(x[idx], y)`. Because it's a pure function, `x`
itself won't be changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`. | github-repos |
def twopercent(station_code):
temp = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search(, line)
if value:
temp = float(value.groups()[0])
except IOError:
pass
if not temp:
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'stat')))
flag = 0
tdata = []
for line in fin:
if line.find('2%') is not -1:
flag = 3
if flag > 0:
tdata.append(line.split('\t'))
flag -= 1
temp = float(tdata[2][5].strip())
except IOError:
pass
if temp:
return temp
else:
raise Exception("Error: 2% High Temperature not found") | Two percent high design temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius | juraj-google-style |
def simulate_measurement(self, index: int) -> bool:
args = self._shard_num_args({'index': index})
prob_one = np.sum(self._pool.map(_one_prob_per_shard, args))
result = bool(np.random.random() <= prob_one)
args = self._shard_num_args({
'index': index,
'result': result,
'prob_one': prob_one
})
self._pool.map(_collapse_state, args)
return result | Simulates a single qubit measurement in the computational basis.
Args:
index: Which qubit is measured.
Returns:
True iff the measurement result corresponds to the |1> state. | juraj-google-style |
def __init__(self, ignore):
super().__init__()
self._ignore = ignore | Initialize the visitor.
Args:
ignore: A list of prefixes to ignore. Typically, this list includes things
something like like "builtins.", since we don't want to convert builtin
types to late types. (And, more generally, types of modules that are
always loaded by pytype don't need to be late types) | github-repos |
def _default_ising_beta_range(h, J):
abs_h = [abs(hh) for hh in h.values() if (hh != 0)]
abs_J = [abs(jj) for jj in J.values() if (jj != 0)]
abs_biases = (abs_h + abs_J)
if (not abs_biases):
return [0.1, 1.0]
min_delta_energy = min(abs_biases)
abs_bias_dict = {k: abs(v) for (k, v) in h.items()}
for ((k1, k2), v) in J.items():
abs_bias_dict[k1] += abs(v)
abs_bias_dict[k2] += abs(v)
max_delta_energy = max(abs_bias_dict.values())
hot_beta = (np.log(2) / max_delta_energy)
cold_beta = (np.log(100) / min_delta_energy)
return [hot_beta, cold_beta] | Determine the starting and ending beta from h J
Args:
h (dict)
J (dict)
Assume each variable in J is also in h.
We use the minimum bias to give a lower bound on the minimum energy gap, such at the
final sweeps we are highly likely to settle into the current valley. | codesearchnet |
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor, output_attentions: Optional[bool]=None):
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. | github-repos |
def check_import_stdlib(module):
if ((module in stdlib_list('2.7')) or (module in stdlib_list('3.4')) or (module in stdlib_list('3.5')) or (module in stdlib_list('3.6')) or (module in stdlib_list('3.7')) or (module in ['app', 'args', 'playbook_app'])):
return True
return False | Check if module is in Python stdlib.
Args:
module (str): The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template. | codesearchnet |
def get_allocated_fragments(self, id_or_uri, count=(- 1), start=0):
uri = (self._client.build_uri(id_or_uri) + '/allocated-fragments?start={0}&count={1}'.format(start, count))
return self._client.get_collection(uri) | Gets all fragments that have been allocated in range.
Args:
id_or_uri:
ID or URI of range.
count:
The number of resources to return. A count of -1 requests all items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceed the total number
of items.
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
Returns:
list: A list with the allocated fragements. | codesearchnet |
def get_rots(self) -> Rotation:
return self._rots | Getter for the rotation.
Returns:
The rotation object | github-repos |
def create_store(reducer, initial_state=None, enhancer=None):
if (enhancer is not None):
if (not hasattr(enhancer, '__call__')):
raise TypeError('Expected the enhancer to be a function.')
return enhancer(create_store)(reducer, initial_state)
if (not hasattr(reducer, '__call__')):
raise TypeError('Expected the reducer to be a function.')
current_reducer = [reducer]
current_state = [initial_state]
current_listeners = [[]]
next_listeners = [current_listeners[0]]
is_dispatching = [False]
def ensure_can_mutate_next_listeners():
if (next_listeners[0] == current_listeners[0]):
next_listeners[0] = current_listeners[0][:]
def get_state():
return current_state[0]
def subscribe(listener):
if (not hasattr(listener, '__call__')):
raise TypeError('Expected listener to be a function.')
is_subscribed = [True]
ensure_can_mutate_next_listeners()
next_listeners[0].append(listener)
def unsubcribe():
if (not is_subscribed[0]):
return
is_subscribed[0] = False
ensure_can_mutate_next_listeners()
index = next_listeners[0].index(listener)
next_listeners[0].pop(index)
return unsubcribe
def dispatch(action):
if (not isinstance(action, dict)):
raise TypeError('Actions must be a dict. Use custom middleware for async actions.')
if (action.get('type') is None):
raise ValueError('Actions must have a non-None "type" property. Have you misspelled a constant?')
if is_dispatching[0]:
raise Exception('Reducers may not dispatch actions.')
try:
is_dispatching[0] = True
current_state[0] = current_reducer[0](current_state[0], action)
finally:
is_dispatching[0] = False
listeners = current_listeners[0] = next_listeners[0]
for listener in listeners:
listener()
return action
def replace_reducer(next_reducer):
if (not hasattr(next_reducer, '__call__')):
raise TypeError('Expected next_reducer to be a function')
current_reducer[0] = next_reducer
dispatch({'type': ActionTypes.INIT})
dispatch({'type': ActionTypes.INIT})
return StoreDict(dispatch=dispatch, subscribe=subscribe, get_state=get_state, replace_reducer=replace_reducer) | redux in a nutshell.
observable has been omitted.
Args:
reducer: root reducer function for the state tree
initial_state: optional initial state data
enhancer: optional enhancer function for middleware etc.
Returns:
a Pydux store | codesearchnet |
def reset(self):
if (self._status is not TaskStatus.STOPPED):
raise RuntimeError(('Cannot reset %s in state %s' % (self, self._status)))
self._reset()
self.return_values = {}
self._status = TaskStatus.IDLE | Reset a task.
Allows a task to be started again, clears the ``return_values``.
Raises:
RuntimeError: If the task has not been stopped. | codesearchnet |
def from_json(cls, json_value: Dict[str, Any], *, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None) -> 'DNA':
cloneable_metadata_keys = json_value.pop('_cloneable_metadata_keys', None)
if json_value.get('format', None) == 'compact':
with symbolic.enable_type_check(False):
dna = DNA.parse(symbolic.from_json(json_value.get('value')))
if 'metadata' in json_value:
dna.rebind(metadata=symbolic.from_json(json_value.get('metadata')), raise_on_no_change=False, skip_notification=True)
else:
dna = super(DNA, cls).from_json(json_value, allow_partial=allow_partial, root_path=root_path)
assert isinstance(dna, DNA)
if cloneable_metadata_keys:
dna._cloneable_metadata_keys = set(cloneable_metadata_keys)
return dna | Class method that load a DNA from a JSON value.
Args:
json_value: Input JSON value, only JSON dict is acceptable.
allow_partial: Whether to allow elements of the list to be partial.
root_path: KeyPath of loaded object in its object tree.
Returns:
A DNA object. | github-repos |
def shape(self) -> torch.Size:
if self._rot_mats is not None:
return self._rot_mats.shape[:-2]
elif self._quats is not None:
return self._quats.shape[:-1]
else:
raise ValueError('Both rotations are None') | Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
tensor, for example, the resulting shape would be [10].
Returns:
The virtual shape of the rotation object | github-repos |
def fill_rects(self, *rects):
rect_array = ffi.new('SDL_Rect[]', len(rects))
for i, r in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects))) | Fill some number of rectangles on the current rendering target with the drawing color.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered. | juraj-google-style |
def PushAttributeContainer(self, serialized_data):
self._list.append(serialized_data)
self.data_size += len(serialized_data)
self.next_sequence_number += 1 | Pushes a serialized attribute container onto the list.
Args:
serialized_data (bytes): serialized attribute container data. | codesearchnet |
def heightmap_add_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray
) -> None:
hm3[:] = hm1[:] + hm2[:] | Add two heightmaps together and stores the result in ``hm3``.
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to add to the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
.. deprecated:: 2.0
Do ``hm3[:] = hm1[:] + hm2[:]`` instead. | juraj-google-style |
def ParseFileObject(self, parser_mediator, file_object):
file_entry = parser_mediator.GetFileEntry()
display_name = parser_mediator.GetDisplayName()
file_header_map = self._GetDataTypeMap('custom_file_header')
try:
file_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Invalid Custom Destination: {0:s} - unable to parse file header '
'with error: {1!s}').format(display_name, exception))
if file_header.unknown1 != 2:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid unknown1: '
'{1:d}.').format(display_name, file_header.unknown1))
if file_header.header_values_type > 2:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid header value '
'type: {1:d}.').format(display_name, file_header.header_values_type))
if file_header.header_values_type == 0:
data_map_name = 'custom_file_header_value_type_0'
else:
data_map_name = 'custom_file_header_value_type_1_or_2'
file_header_value_map = self._GetDataTypeMap(data_map_name)
try:
_, value_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, file_header_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Invalid Custom Destination: {0:s} - unable to parse file header '
'value with error: {1!s}').format(display_name, exception))
file_offset += value_data_size
file_size = file_object.get_size()
remaining_file_size = file_size - file_offset
entry_header_map = self._GetDataTypeMap('custom_entry_header')
file_footer_map = self._GetDataTypeMap('custom_file_footer')
first_guid_checked = False
while remaining_file_size > 4:
try:
entry_header, entry_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, entry_header_map)
except (ValueError, errors.ParseError) as exception:
if not first_guid_checked:
raise errors.UnableToParseFile((
'Invalid Custom Destination file: {0:s} - unable to parse '
'entry header with error: {1!s}').format(
display_name, exception))
parser_mediator.ProduceExtractionWarning(
'unable to parse entry header with error: {0!s}'.format(
exception))
break
if entry_header.guid != self._LNK_GUID:
if not first_guid_checked:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid entry '
'header signature offset: 0x{1:08x}.').format(
display_name, file_offset))
try:
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_footer_map)
if file_footer.signature != self._FILE_FOOTER_SIGNATURE:
parser_mediator.ProduceExtractionWarning(
'invalid entry header signature at offset: 0x{0:08x}'.format(
file_offset))
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse footer at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
break
first_guid_checked = True
file_offset += entry_data_size
remaining_file_size -= entry_data_size
lnk_file_size = self._ParseLNKFile(
parser_mediator, file_entry, file_offset, remaining_file_size)
file_offset += lnk_file_size
remaining_file_size -= lnk_file_size
try:
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_footer_map)
if file_footer.signature != self._FILE_FOOTER_SIGNATURE:
parser_mediator.ProduceExtractionWarning(
'invalid footer signature at offset: 0x{0:08x}'.format(file_offset))
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse footer at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception)) | Parses a .customDestinations-ms file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | juraj-google-style |
def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True):
gcount = 0
msg_counter = 0
warning_printed = False
try:
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return (- 1)
for i in range(0, n):
if (i > 0):
time_diff = (dataframe.index[i] - dataframe.index[(i - 1)])
if ((time_diff.delta / 1000000000.0) != timestep):
gcount += 1
if (print_all or (msg_counter <= (print_max - 1))):
if verbose:
print(('Warning: Gap in time series found between %s and %s' % (dataframe.index[(i - 1)], dataframe.index[i])))
msg_counter += 1
if ((msg_counter == print_max) and verbose and (not warning_printed)):
print(('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter))
warning_printed = True
if verbose:
print(('%i gaps found in total.' % gcount))
return gcount | checks if a given dataframe contains gaps and returns the number of gaps
This funtion checks if a dataframe contains any gaps for a given temporal
resolution that needs to be specified in seconds. The number of gaps
detected in the dataframe is returned.
Args:
dataframe: A pandas dataframe object with index defined as datetime
timestep (int): The temporal resolution of the time series in seconds
(e.g., 86400 for daily values)
print_all (bool, opt): Lists every gap on the screen
print_mx (int, opt): The maximum number of gaps listed on the screen in
order to avoid a decrease in performance if numerous gaps occur
verbose (bool, opt): Enables/disables output to the screen
Returns:
The number of gaps as integer. Negative values indicate errors. | codesearchnet |
def _parse_peer_address(self, config):
match = re.search('peer-address ([^\\s]+)', config)
value = (match.group(1) if match else None)
return dict(peer_address=value) | Scans the config block and parses the peer-address value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict | codesearchnet |
def find_coord(targ_length,xyz,rcum,theta,phi):
i = np.nonzero(rcum <= targ_length)[0][-1]
if i == len(theta):
return xyz[-1,:]
else:
r_lcl = targ_length-rcum[i]
(dx,dy,dz) = spherical_to_cartesian(r_lcl,theta[i],phi[i])
return xyz[i,:] + [dx,dy,dz] | Find (x,y,z) ending coordinate of segment path along section
path.
Args:
targ_length = scalar specifying length of segment path, starting
from the begining of the section path
xyz = coordinates specifying the section path
rcum = cumulative sum of section path length at each node in xyz
theta, phi = angles between each coordinate in xyz | juraj-google-style |
def get_associated(self, retrieve=False):
if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasRelatedFile'):
files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasRelatedFile ]
return files
else:
return [] | get pcdm:hasRelatedFile for this resource
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | juraj-google-style |
def should_stop(self):
return self._stop_event.is_set() | Check if stop was requested.
Returns:
True if a stop was requested. | github-repos |
def get_component(self, component_name):
mapping = self.get_components()
return (mapping[component_name] if (component_name in mapping) else None) | Looks up a component by its name.
Args:
component_name: The name of the component to look up.
Returns:
The component for the provided name or None if there is no such component. | codesearchnet |
def save_to_text_file(monsoon_data, file_path):
if not monsoon_data:
raise MonsoonError("Attempting to write empty Monsoon data to "
"file, abort")
utils.create_dir(os.path.dirname(file_path))
with io.open(file_path, 'w', encoding='utf-8') as f:
for md in monsoon_data:
f.write(str(md))
f.write(MonsoonData.delimiter) | Save multiple MonsoonData objects to a text file.
Args:
monsoon_data: A list of MonsoonData objects to write to a text
file.
file_path: The full path of the file to save to, including the file
name. | juraj-google-style |
async def init(self, name, conf=None):
tank = self.tanks.get(name)
if tank is not None:
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = s_common.genpath(self.dirn, 'tanks', iden)
tank = await CryoTank.anit(path, conf)
node = await self.names.open((name,))
await node.set((iden, conf))
self.tanks.put(name, tank)
return tank | Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance. | juraj-google-style |
def MoveStateToNextToken(self):
current = self.next_token
if not current.OpensScope() and (not current.ClosesScope()):
self.lowest_level_on_line = min(self.lowest_level_on_line, self.paren_level)
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.paren_level += 1
if len(self.stack) > 1 and current.ClosesScope():
if subtypes.DICTIONARY_KEY_PART in current.subtypes:
self.stack[-2].last_space = self.stack[-2].indent
else:
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
self.column += len(current.value.split('\n')[0])
elif not current.is_pseudo:
self.column += len(current.value)
self.next_token = self.next_token.next_token
penalty = 0
if not current.is_pylint_comment and (not current.is_pytype_comment) and (not current.is_copybara_comment) and (self.column > self.column_limit):
excess_characters = self.column - self.column_limit
penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
self.column = len(current.value.split('\n')[-1])
return penalty | Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit. | github-repos |
def _binary_product(variables):
multiplier, multiplicand, product = variables
return BinaryQuadraticModel({multiplier: 0.0,
multiplicand: 0.0,
product: 3.0},
{(multiplier, multiplicand): 1.0,
(multiplier, product): -2.0,
(multiplicand, product): -2.0},
0.0,
Vartype.BINARY) | Create a bqm with a gap of 2 that represents the product of two variables.
Args:
variables (list):
multiplier, multiplicand, product
Returns:
:obj:`.BinaryQuadraticModel` | juraj-google-style |
def LessThanOrEqualTo(self, value):
self._awql = self._CreateSingleValueCondition(value, '<=')
return self._query_builder | Sets the type of the WHERE clause as "less than or equal to.
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | codesearchnet |
def config_file(self, filename):
if os.path.isfile(filename):
with open(filename, 'r') as fh:
self._config_data = json.load(fh)
else:
self.tcex.log.error('Could not load configuration file "{}".'.format(filename)) | Load configuration data from provided file and inject values into sys.argv.
Args:
config (str): The configuration file name. | codesearchnet |
def recipe_email_dv360_to_bigquery(config, auth_read, email, subject, dataset, table, dbm_schema, is_incremental_load):
email(config, {'auth': auth_read, 'read': {'from': 'noreply-dv360@google.com', 'to': email, 'subject': subject, 'link': 'https: | Pulls a DV360 Report from a gMail email into BigQuery.
Args:
auth_read (authentication) - Credentials used for reading data.
email (string) - Email address report was sent to.
subject (string) - Regular expression to match subject. Double escape backslashes.
dataset (string) - Existing dataset in BigQuery.
table (string) - Name of table to be written to.
dbm_schema (json) - Schema provided in JSON list format or empty list.
is_incremental_load (boolean) - Append report data to table based on date column, de-duplicates. | github-repos |
def wrap_cached_variables(concrete_function):
outer_graph = func_graph_module.FuncGraph('{}_no_cache'.format(concrete_function.graph.name))
mapped_captures = None
remapped_captures = {}
with outer_graph.as_default():
for capture, placeholder in concrete_function.graph.captures:
cached_variable = getattr(capture, '_cached_variable', None)
if cached_variable is None:
continue
cached_variable = cached_variable()
new_cached_value = cached_variable.read_value()
key = id(capture)
external = concrete_function.graph.function_captures.by_val_external[key]
internal = concrete_function.graph.function_captures.by_val_internal[key]
remapped_captures[key] = [external, internal]
concrete_function.graph.function_captures.add_or_replace(key=key, external=new_cached_value, internal=placeholder, is_by_ref=False)
mapped_captures = True
if not mapped_captures:
return concrete_function
inner_concrete = defun.ConcreteFunction.from_func_graph(concrete_function.graph, concrete_function.function_type, {})
def wrap_function(*args):
return inner_concrete._call_flat(list(args), inner_concrete.captured_inputs)
args = nest.flatten(concrete_function.structured_input_signature, expand_composites=True)
func_graph_module.func_graph_from_py_func(None, wrap_function, args=tuple(args), kwargs={}, func_graph=outer_graph)
fn = defun.ConcreteFunction.from_func_graph(outer_graph, concrete_function.function_type, {})
fn._arg_keywords = concrete_function._arg_keywords
fn._num_positional_args = concrete_function._num_positional_args
for key, capture in remapped_captures.items():
external, internal = capture
concrete_function.graph._function_captures.add_or_replace(key=key, external=external, internal=internal, is_by_ref=False)
return fn | Wraps the concrete function if it uses cached read tensors.
This function creates a new concrete function that captures variables
instead of the cached read tensors.
Args:
concrete_function: A Concrete function that maybe captures cached read
tensors.
Returns:
A concrete function that wraps the original concrete function, which
captures variables instead. If the original function did not capture any
cached values, then the function is not wrapped and the original object is
returned. | github-repos |
def print_table(self, stream=sys.stdout, filter_function=None):
print(self.to_table(filter_function=filter_function), file=stream) | A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2. | juraj-google-style |
def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
with self.graph.as_default():
with tf.name_scope('default_action'):
self._initialize_default_action_fluents()
if batch_size is None:
return self.default_action_fluents
return self._compile_batch_fluents(self.default_action_fluents, batch_size) | Returns a tuple of tensors representing the default action fluents.
Args:
batch_size (int): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors. | juraj-google-style |
def num_tasks(self, job_name):
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError('No such job in cluster: %r' % job_name)
return len(job) | Returns the number of tasks defined in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
The number of tasks defined in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster. | github-repos |
def from_pandas(pandas_df, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None):
return _convert_to_dataset_class(pandas_df, dataset_class, expectations_config, autoinspect_func) | Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
dataset_class (Dataset class) = dataset.pandas_dataset.PandasDataset:
class to which to convert resulting Pandas df
expectations_config (string) = None: path to great_expectations config file
autoinspect_func (function) = None: The autoinspection function that should
be run on the dataset to establish baseline expectations.
Returns:
great_expectations dataset | codesearchnet |
def deserialize(config, custom_objects=None):
if config['class_name'].lower() in ALL_OBJECTS_DICT:
config['class_name'] = config['class_name'].lower()
return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects) | Returns a Keras optimizer object via its configuration.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras Optimizer instance. | github-repos |
def get_cytoband_coordinates(chrom, pos):
coordinate = ""
if chrom in CYTOBANDS:
for interval in CYTOBANDS[chrom][pos]:
coordinate = interval.data
return coordinate | Get the cytoband coordinate for a position
Args:
chrom(str)
pos(int)
Returns:
coordinate(str) | juraj-google-style |
def _is_default_hook(default_hook, hook):
if not hasattr(default_hook, '__call__'):
raise TypeError('Default hooks for ndb.model.Model must be callable')
if not hasattr(hook, '__call__'):
raise TypeError('Hooks must be callable')
return default_hook.im_func is hook.im_func | Checks whether a specific hook is in its default state.
Args:
cls: A ndb.model.Model class.
default_hook: Callable specified by ndb internally (do not override).
hook: The hook defined by a model class using _post_*_hook.
Raises:
TypeError if either the default hook or the tested hook are not callable. | juraj-google-style |
def _getGraphOpTypes(self, graphdef, output_nodes):
name_to_input_name, name_to_node, _ = _extract_graph_summary(graphdef)
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names]) | Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`. | github-repos |
def update_metadata(token: str, commit_sha: str):
frameworks_table = get_frameworks_table()
frameworks_dataset = Dataset.from_pandas(frameworks_table)
resolved_tags_file = hf_hub_download('huggingface/transformers-metadata', 'pipeline_tags.json', repo_type='dataset', token=token)
tags_dataset = Dataset.from_json(resolved_tags_file)
table = {tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(tags_dataset))}
table = update_pipeline_and_auto_class_table(table)
model_classes = sorted(table.keys())
tags_table = pd.DataFrame({'model_class': model_classes, 'pipeline_tag': [table[m][0] for m in model_classes], 'auto_class': [table[m][1] for m in model_classes]})
tags_dataset = Dataset.from_pandas(tags_table)
hub_frameworks_json = hf_hub_download(repo_id='huggingface/transformers-metadata', filename='frameworks.json', repo_type='dataset', token=token)
with open(hub_frameworks_json) as f:
hub_frameworks_json = f.read()
hub_pipeline_tags_json = hf_hub_download(repo_id='huggingface/transformers-metadata', filename='pipeline_tags.json', repo_type='dataset', token=token)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(tmp_dir, 'frameworks.json'))
tags_dataset.to_json(os.path.join(tmp_dir, 'pipeline_tags.json'))
with open(os.path.join(tmp_dir, 'frameworks.json')) as f:
frameworks_json = f.read()
with open(os.path.join(tmp_dir, 'pipeline_tags.json')) as f:
pipeline_tags_json = f.read()
frameworks_equal = hub_frameworks_json == frameworks_json
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if frameworks_equal and hub_pipeline_tags_equal:
print('No updates on the Hub, not pushing the metadata files.')
return
if commit_sha is not None:
commit_message = f'Update with commit {commit_sha}\n\nSee: https:
else:
commit_message = 'Update'
upload_folder(repo_id='huggingface/transformers-metadata', folder_path=tmp_dir, repo_type='dataset', token=token, commit_message=commit_message) | Update the metadata for the Transformers repo in `huggingface/transformers-metadata`.
Args:
token (`str`): A valid token giving write access to `huggingface/transformers-metadata`.
commit_sha (`str`): The commit SHA on Transformers corresponding to this update. | github-repos |
def line_intersection(self, point1, point2, tolerance=1e-8):
b1 = self.bary_coords(point1)
b2 = self.bary_coords(point2)
l = b1 - b2
valid = np.abs(l) > 1e-10
possible = b1 - (b1[valid] / l[valid])[:, None] * l
barys = []
for p in possible:
if (p >= -tolerance).all():
found = False
for b in barys:
if np.all(np.abs(b - p) < tolerance):
found = True
break
if not found:
barys.append(p)
assert len(barys) < 3
return [self.point_from_bary_coords(b) for b in barys] | Computes the intersection points of a line with a simplex
Args:
point1, point2 ([float]): Points that determine the line
Returns:
points where the line intersects the simplex (0, 1, or 2) | juraj-google-style |
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'):
query = urlencode(query)
hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path +
'\n' + query + '\n').lower().encode('utf-8')
signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest())
auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8')
if self._logging:
utils.log({
'query': query,
'hmac_str': hmac_str,
'signature': signature,
'auth': auth
})
return auth | Create the request signature to authenticate
Args:
- method (str): HTTP method
- date (str): HTTP date header string
- nonce (str): Cryptographic nonce
- path (str): URL pathname
- query (dict, default={}): URL query string in key-value pairs
- ctype (str, default='application/json'): HTTP Content-Type | juraj-google-style |
def __call__(self, shape, dtype=None, **kwargs):
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype) | Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments. | github-repos |
def get_folders(cls, session, mailbox_or_id):
if isinstance(mailbox_or_id, Mailbox):
mailbox_or_id = mailbox_or_id.id
return cls(
'/mailboxes/%d/folders.json' % mailbox_or_id,
session=session,
out_type=Folder,
) | List the folders for the mailbox.
Args:
mailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID
of the mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.Folder): Folders
iterator. | juraj-google-style |
class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn, collections.namedtuple('_IndicatorColumn', ['categorical_column'])):
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs)
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(self._variable_shape[-1]))
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0], weighted_column.dense_shape)
return array_ops.scatter_nd(weighted_column.indices, weighted_column.values, weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(id_tensor, default_value=-1)
one_hot_id_tensor = array_ops.one_hot(dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0)
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec
@property
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError('In indicator_column: {}. categorical_column must not be of type _SequenceCategoricalColumn. Suggested fix A: If you wish to use input_layer, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use sequence_input_layer instead of input_layer. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))
return inputs.get(self)
def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError('In indicator_column: {}. categorical_column must be of type _SequenceCategoricalColumn to use sequence_input_layer. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length) | Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function. | github-repos |
def bin(self, bins, labels=None):
return dim(self, bin, bins, labels=labels) | Bins continuous values.
Bins continuous using the provided bins and assigns labels
either computed from each bins center point or from the
supplied labels.
Args:
bins: List or array containing the bin boundaries
labels: List of labels to assign to each bin
If the bins are length N the labels should be length N-1 | codesearchnet |
def __init__(self, *args, **kwargs):
super(StateTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.StateTransaction | Create an instance.
Args:
*args:
**kwargs: | juraj-google-style |
def authenticate(self, request):
request = request._request
user = getattr(request, 'user', None)
if not user or user.is_anonymous:
return None
self.enforce_csrf(request)
return (user, None) | Authenticate the user, requiring a logged-in account and CSRF.
This is exactly the same as the `SessionAuthentication` implementation,
with the `user.is_active` check removed.
Args:
request (HttpRequest)
Returns:
Tuple of `(user, token)`
Raises:
PermissionDenied: The CSRF token check failed. | juraj-google-style |
def get(self, key, index=None):
records = self.get_multi([key], index=index)
try:
return records[0][1]
except IndexError:
return None | Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve | codesearchnet |
def concurrence(state):
rho = np.array(state)
if rho.ndim == 1:
rho = outer(state)
if len(state) != 4:
raise Exception("Concurrence is only defined for more than two qubits")
YY = np.fliplr(np.diag([-1, 1, 1, -1]))
A = rho.dot(YY).dot(rho.conj()).dot(YY)
w = la.eigh(A, eigvals_only=True)
w = np.sqrt(np.maximum(w, 0))
return max(0.0, w[-1] - np.sum(w[0:-1])) | Calculate the concurrence.
Args:
state (np.array): a quantum state (1x4 array) or a density matrix (4x4
array)
Returns:
float: concurrence.
Raises:
Exception: if attempted on more than two qubits. | juraj-google-style |
def __init__(self, command=None, payload=None, print_payload=False):
self.Command = command
self.Magic = settings.MAGIC
if payload is None:
payload = bytearray()
else:
payload = binascii.unhexlify(Helper.ToArray(payload))
self.Checksum = Message.GetChecksum(payload)
self.Payload = payload
if print_payload:
logger.info("PAYLOAD: %s " % self.Payload) | Create an instance.
Args:
command (str): payload command e.g. "inv", "getdata". See NeoNode.MessageReceived() for more commands.
payload (bytes): raw bytes of the payload.
print_payload: UNUSED | juraj-google-style |
def List(self, device_path):
connection = self.protocol_handler.Open(self._handle, destination=b'sync:')
listing = self.filesync_handler.List(connection, device_path)
connection.Close()
return listing | Return a directory listing of the given path.
Args:
device_path: Directory to list. | codesearchnet |
def update_args(self, args):
for arg in vars(args):
if (self.get(arg) and (getattr(args, arg) is not None)):
self._config[self.root_section][arg] = getattr(args, arg) | Update config dictionary with parsed args, as resolved by argparse.
Only root positional arguments that already exist will overridden.
Args:
args (namespace): args parsed by argparse | codesearchnet |
def make_request(url, data, on_complete):
req = ajax.ajax()
req.bind('complete', on_complete)
req.open('POST', url, True)
req.set_header('content-type', 'application/x-www-form-urlencoded')
req.send(data) | Make AJAX request to `url` with given POST `data`. Call `on_complete`
callback when complete.
Args:
url (str): URL.
data (dict): Dictionary with POST data.
on_complete (ref): Reference to function / method which will be called
when the request is done. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.