code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def forward(self, hidden_states):
hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)
num_tokens = None
next_states = torch.empty_like(hidden_states)
for i in range(self.num_experts):
expert_hidden = hidden_states[i]
expert_hidden_reshaped = expert_hidden.reshape(-1, self.hidden_size)
expert_quantized, expert_scale = torch.ops.fbgemm.quantize_fp8_per_row(expert_hidden_reshaped, num_tokens, self.input_scale_ub)
sharded_expert_dim = self.gate_up_proj.shape[-1]
gate_up_proj_scale_float32 = self.gate_up_proj_scale.to(torch.float32)
gate = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[:sharded_expert_dim].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][:sharded_expert_dim].view(-1, 1).contiguous(), use_fast_accum=True)
up = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[sharded_expert_dim:].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][sharded_expert_dim:].view(-1, 1).contiguous(), use_fast_accum=True)
activated = up * self.act_fn(gate)
activated_quantized, activated_scale = torch.ops.fbgemm.quantize_fp8_per_row(activated, num_tokens, self.input_scale_ub)
down_proj_scale_float32 = self.down_proj_scale.to(torch.float32)
expert_output = torch.ops.fbgemm.f8f8bf16_rowwise(activated_quantized, self.down_proj[i].transpose(0, 1).contiguous(), activated_scale, down_proj_scale_float32[i].view(-1, 1).contiguous(), use_fast_accum=True)
next_states[i] = expert_output
next_states = next_states.to(hidden_states.device)
return next_states.view(-1, self.hidden_size) | Args:
hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)
Returns:
torch.Tensor: (batch_size * token_num, hidden_size) | github-repos |
def getStreamNetworkAsGeoJson(self, session, withNodes=True):
features_list = []
for link in self.streamLinks:
link_geoJson = link.getAsGeoJson(session)
if link_geoJson:
link_geometry = json.loads(link.getAsGeoJson(session))
link_properties = {"link_number": link.linkNumber,
"type": link.type,
"num_elements": link.numElements,
"dx": link.dx,
"erode": link.erode,
"subsurface": link.subsurface}
link_feature = {"type": "Feature",
"geometry": link_geometry,
"properties": link_properties,
"id": link.id}
features_list.append(link_feature)
if withNodes:
for node in link.nodes:
node_geoJson = node.getAsGeoJson(session)
if node_geoJson:
node_geometry = json.loads(node_geoJson)
node_properties = {"link_number": link.linkNumber,
"node_number": node.nodeNumber,
"elevation": node.elevation}
node_feature = {"type": "Feature",
"geometry": node_geometry,
"properties": node_properties,
"id": node.id}
features_list.append(node_feature)
feature_collection = {"type": "FeatureCollection",
"features": features_list}
return json.dumps(feature_collection) | Retrieve the stream network geometry in GeoJSON format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
withNodes (bool, optional): Include nodes. Defaults to False.
Returns:
str: GeoJSON string. | juraj-google-style |
def cancelPnLSingle(
self, account: str, modelCode: str, conId: int):
key = (account, modelCode, conId)
reqId = self.wrapper.pnlSingleKey2ReqId.pop(key, None)
if reqId:
self.client.cancelPnLSingle(reqId)
self.wrapper.pnlSingles.pop(reqId, None)
else:
self._logger.error(
'cancelPnLSingle: No subscription for '
f'account {account}, modelCode {modelCode}, conId {conId}') | Cancel PnLSingle subscription for the given account, modelCode
and conId.
Args:
account: Cancel for this account name.
modelCode: Cancel for this account model.
conId: Cancel for this contract ID. | juraj-google-style |
def __init__(self, reactor, hostname, port):
service.MultiService.__init__(self)
self._reactor = reactor
self._hostname = hostname
self._port = port
self._client_factory = None
self._tcp_client = None
self._repeating_metric_handles = [] | Construct a CarbonClientService.
Args:
reactor: The Twisted reactor for your application.
hostname: The hostname of your Carbon server.
port: The port that the Carbon pickle endpoint is listening on. | juraj-google-style |
def sg_one_hot(tensor, opt):
r
assert opt.depth is not None, 'depth is mandatory.'
return tf.one_hot(tensor, opt.depth, name=opt.name) | r"""Converts a tensor into a one-hot tensor.
See `tf.one_hot()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
depth: The number of classes.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | juraj-google-style |
def __init__(self,
qubits: List[Qubit],
registers: List[RegisterSlot],
mem_slots: List[MemorySlot]):
self._qubits = qubits
self._reg_slots = registers
self._mem_slots = mem_slots | Create device specification with specified `qubits`.
Args:
qubits: | juraj-google-style |
def compute_precedence(terminals, productions, precedence_levels):
precedence = collections.OrderedDict()
for terminal in terminals:
precedence[terminal] = DEFAULT_PREC
level_precs = range(len(precedence_levels), 0, (- 1))
for (i, level) in zip(level_precs, precedence_levels):
assoc = level[0]
for symbol in level[1:]:
precedence[symbol] = (assoc, i)
for (production, prec_symbol) in productions:
if (prec_symbol is None):
prod_terminals = ([symbol for symbol in production.rhs if (symbol in terminals)] or [None])
precedence[production] = precedence.get(prod_terminals[(- 1)], DEFAULT_PREC)
else:
precedence[production] = precedence.get(prec_symbol, DEFAULT_PREC)
return precedence | Computes the precedence of terminal and production.
The precedence of a terminal is it's level in the PRECEDENCE tuple. For
a production, the precedence is the right-most terminal (if it exists).
The default precedence is DEFAULT_PREC - (LEFT, 0).
Returns:
precedence - dict[terminal | production] = (assoc, level) | codesearchnet |
def initialize_tpu_system(cluster_resolver=None):
return tpu_strategy_util.initialize_tpu_system_impl(cluster_resolver, TPUClusterResolver) | Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster. If called
inside tf.function, it returns the serialized topology object instead.
Raises:
RuntimeError: If running inside a tf.function.
NotFoundError: If no TPU devices found in eager mode. | github-repos |
def Parse(self, rdf_data):
if not isinstance(rdf_data, (list, set)):
raise ProcessingError("Bad host data format: %s" % type(rdf_data))
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results) | Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An list containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
Raises:
ProcessingError: If rdf_data is not a handled type. | juraj-google-style |
def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed):
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
operation = {
'operator': 'ADD',
'operand': feed_item
}
return operation | Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem. | juraj-google-style |
def _update_services_target_state(sdp_target_state: str):
service_states = get_service_state_list()
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the target state of %s to be %s', service.id,
sdp_target_state)
service.update_target_state(sdp_target_state) | Update the target states of services based on SDP target state.
When we get a new target state this function is called to ensure
components receive the target state(s) and/or act on them.
Args:
sdp_target_state (str): Target state of SDP | juraj-google-style |
def _GetMemberForOffset(self, offset):
if offset < 0 or offset >= self.uncompressed_data_size:
raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format(
offset, self.uncompressed_data_size))
for end_offset, member in iter(self._members_by_end_offset.items()):
if offset < end_offset:
return member
return None | Finds the member whose data includes the provided offset.
Args:
offset (int): offset in the uncompressed data to find the
containing member for.
Returns:
gzipfile.GzipMember: gzip file member or None if not available.
Raises:
ValueError: if the provided offset is outside of the bounds of the
uncompressed data. | juraj-google-style |
def logdet(matrix, name=None):
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), axis=[-1]) | Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility | github-repos |
def remove_tree_by_path(self, path):
with transaction.manager:
trees = self.path_db.get(path, None)
if (not trees):
return
for tree in trees:
return self._remove_tree(tree) | Remove the tree from database by given `path`.
Args:
path (str): Path of the tree. | codesearchnet |
def restore_model(self, directory=None, file=None):
self.model.restore(directory=directory, file=file) | Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is
restored. If no checkpoint directory is given, the model's default saver directory is
used (unless file specifies the entire path).
Args:
directory: Optional checkpoint directory.
file: Optional checkpoint file, or path if directory not given. | juraj-google-style |
def get_ticker_metadata(self, ticker, fmt='json'):
url = "tiingo/daily/{}".format(ticker)
response = self._request('GET', url)
data = response.json()
if fmt == 'json':
return data
elif fmt == 'object':
return dict_to_object(data, "Ticker") | Return metadata for 1 ticker
Use TiingoClient.list_tickers() to get available options
Args:
ticker (str) : Unique identifier for stock | juraj-google-style |
def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
monitor_process = None
try:
p = subprocess.Popen(args, cwd=cwd, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' % (__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while (p.poll() is None):
line = p.stdout.readline()
if (not six.PY2):
line = line.decode()
if ((std_out_filter_fn is None) or std_out_filter_fn(line)):
sys.stdout.write(line)
finally:
if monitor_process:
monitor_process.kill() | Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start. | codesearchnet |
def retry(max_count):
if max_count <= 1:
raise ValueError(f'The `max_count` for `retry` must be larger than 1, got "{max_count}".')
def _outer_decorator(func):
setattr(func, ATTR_MAX_RETRY_CNT, max_count)
@functools.wraps(func)
def _wrapper(*args):
func(*args)
return _wrapper
return _outer_decorator | Decorator for retrying a test case until it passes.
The BaseTestClass will keep executing the test cases annotated with this
decorator until the test passes, or the maxinum number of iterations have
been met.
This decorator only stores the information needed for the retry. It does not
execute the retry.
Args:
max_count: int, the maximum number of times to execute the decorated test
case.
Returns:
The wrapped test function.
Raises:
ValueError, if the user input is invalid. | github-repos |
def from_stream(credential_filename):
if (credential_filename and os.path.isfile(credential_filename)):
try:
return _get_application_default_credential_from_file(credential_filename)
except (ApplicationDefaultCredentialsError, ValueError) as error:
extra_help = ' (provided as parameter to the from_stream() method)'
_raise_exception_for_reading_json(credential_filename, extra_help, error)
else:
raise ApplicationDefaultCredentialsError('The parameter passed to the from_stream() method should point to a file.') | Create a Credentials object by reading information from a file.
It returns an object of type GoogleCredentials.
Args:
credential_filename: the path to the file from where the
credentials are to be read
Raises:
ApplicationDefaultCredentialsError: raised when the credentials
fail to be retrieved. | codesearchnet |
class ConfidenceCriteria(StoppingCriteria):
def __init__(self, assistant_confidence_threshold):
self.assistant_confidence_threshold = assistant_confidence_threshold
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
probs = scores[-1].softmax(-1)
p = probs[0, input_ids[0, -1]].item()
if p < self.assistant_confidence_threshold:
return True
return False | This class can be used to stop generation whenever assistant model's confidence in its prediction for the current token is lower than the threshold
`model.generation_config.assistant_confidence_threshold` even if the number of speculative tokens (defined by `num_assistant_tokens`) is not yet reached.
Args:
assistant_confidence_threshold (`float`):
The value of the threshold. | github-repos |
class SegGptImageSegmentationOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None | Output type of [`SegGptImageSegmentationOutput`].
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
The loss value.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
The predicted masks.
hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, patch_height, patch_width, hidden_size)`.
attentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape
`(batch_size, num_heads, seq_len, seq_len)`. | github-repos |
def wait_and_ignore(condition, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):
try:
return wait_until(condition, timeout, sleep)
except:
pass | Waits wrapper that'll wait for the condition to become true, but will
not error if the condition isn't met.
Args:
condition (lambda) - Lambda expression to wait for to evaluate to True.
Kwargs:
timeout (number) : Maximum number of seconds to wait.
sleep (number) : Sleep time to wait between iterations.
Example::
wait_and_ignore(lambda: driver.find_element_by_id("success").is_displayed(),
timeout=30,
sleep=0.5)
is equivalent to::
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
if driver.find_element_by_id("success").is_displayed():
break;
except:
pass
time.sleep(0.5) | codesearchnet |
def __eq__(self, other):
if (type(self) is type(other) and
self._ub == other._ub and
self._lb == other._lb):
return True
return False | Two LO ranges are the same if they are of the same type, and
have the same frequency range
Args:
other (LoRange): other LoRange
Returns:
bool: are self and other equal. | juraj-google-style |
def __init__(self, input: EventSetNode, func: MapFunction, receive_extras: bool, dtype: Optional[DType]=None, dtype_to_dtype: Optional[Dict[DType, DType]]=None, feature_name_to_dtype: Optional[Dict[str, DType]]=None):
super().__init__()
assert sum((x is not None for x in [dtype, dtype_to_dtype, feature_name_to_dtype])) <= 1
output_dtypes = build_dtypes_list_from_target_dtypes(input, dtype, dtype_to_dtype, feature_name_to_dtype)
assert len(output_dtypes) == len(input.schema.features)
self._receive_extras = receive_extras
self.add_attribute('func', func)
self._func = func
self.add_input('input', input)
self.add_output('output', create_node_new_features_existing_sampling(features=[FeatureSchema(f.name, dtype) for f, dtype in zip(input.schema.features, output_dtypes)], sampling_node=input, creator=self))
self.check() | Constructor.
There can only be one of dtype, dtype_to_dtype or feature_name_to_dtype.
Args:
input: Input node.
func: Function to apply to each elemnent.
dtype: All the output features are expected to be of this type.
dtype_to_dtype: Mapping between current dtype and new dtype.
feature_name_to_dtype: Mapping between feature name and new dtype. | github-repos |
async def register_user(self, password, **kwds):
user = (await self._create_remote_user(password=password, **kwds))
if (not ('pk' in user)):
user['pk'] = user['id']
match_query = (self.model.user == user['id'])
if (self.model.select().where(match_query).count() > 0):
raise RuntimeError('The user is already registered.')
password = self.model(user=user['id'], password=password)
password.save()
return {'user': user, 'sessionToken': self._user_session_token(user)} | This function is used to provide a sessionToken for later requests.
Args:
uid (str): The | codesearchnet |
def orient_undirected_graph(self, data, graph):
self.arguments['{VERBOSE}'] = str(self.verbose).upper()
self.arguments['{SCORE}'] = self.score
self.arguments['{BETA}'] = str(self.beta)
self.arguments['{OPTIM}'] = str(self.optim).upper()
self.arguments['{ALPHA}'] = str(self.alpha)
whitelist = DataFrame(list(nx.edges(graph)), columns=["from", "to"])
blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(-nx.adj_matrix(graph, weight=None).to_dense() + 1,
columns=list(graph.nodes()),
index=list(graph.nodes()))))), columns=["from", "to"])
results = self._run_bnlearn(data, whitelist=whitelist,
blacklist=blacklist, verbose=self.verbose)
return nx.relabel_nodes(nx.DiGraph(results),
{idx: i for idx, i in enumerate(data.columns)}) | Run the algorithm on an undirected graph.
Args:
data (pandas.DataFrame): DataFrame containing the data
graph (networkx.Graph): Skeleton of the graph to orient
Returns:
networkx.DiGraph: Solution on the given skeleton. | juraj-google-style |
def read_nose(in_file):
suites = {}
doc_xml = minidom.parse(in_file)
suite_xml = doc_xml.getElementsByTagName("testsuite")[0]
for case_xml in suite_xml.getElementsByTagName('testcase'):
classname = case_xml.getAttribute('classname')
if classname not in suites:
suites[classname] = []
case = {
'name': case_xml.getAttribute('name'),
'time': float(case_xml.getAttribute('time')),
}
skipped_xml = case_xml.getElementsByTagName('skipped')
if skipped_xml:
if skipped_xml[0].hasAttribute('type'):
type = skipped_xml[0].getAttribute('type')
else:
type = ''
case['skipped'] = {
'type': type,
'message': skipped_xml[0].getAttribute('message'),
'text': "".join([child.nodeValue for child in skipped_xml[0].childNodes]),
}
failure_xml = case_xml.getElementsByTagName('failure')
if failure_xml:
if failure_xml[0].hasAttribute('type'):
type = failure_xml[0].getAttribute('type')
else:
type = ''
case['failure'] = {
'type': type,
'message': failure_xml[0].getAttribute('message'),
'text': "".join([child.nodeValue for child in failure_xml[0].childNodes]),
}
error_xml = case_xml.getElementsByTagName('error')
if error_xml:
if error_xml[0].hasAttribute('type'):
type = error_xml[0].getAttribute('type')
else:
type = ''
case['error'] = {
'type': type,
'message': error_xml[0].getAttribute('message'),
'text': "".join([child.nodeValue for child in error_xml[0].childNodes]),
}
suites[classname].append(case)
return suites | Parse nose-style test reports into a `dict`
Args:
in_file (:obj:`str`): path to nose-style test report
Returns:
:obj:`dict`: dictionary of test suites | juraj-google-style |
def click(self, x, y):
self._run_nowait('target.tap({x: %d, y: %d})' % (x/self._scale, y/self._scale))
return self | Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self | juraj-google-style |
def _operator(attr):
@functools.wraps(attr)
def func(a, *args):
return attr(a.value, *args)
return func | Defers an operator overload to `attr`.
Args:
attr: Operator attribute to use.
Returns:
Function calling operator attribute. | codesearchnet |
def receiveds_format(receiveds):
log.debug('Receiveds for this email are parsed')
output = []
counter = Counter()
for i in receiveds[::(- 1)]:
j = {k: v.strip() for (k, v) in i.items() if v}
j['hop'] = (counter['hop'] + 1)
if i.get('date'):
i['date'] = i['date'].split(';')[(- 1)]
try:
(j['date_utc'], _) = convert_mail_date(i['date'])
except TypeError:
j['date_utc'] = None
size = len(output)
now = j.get('date_utc')
if (size and now):
before = output[(counter['hop'] - 1)].get('date_utc')
if before:
j['delay'] = (now - before).total_seconds()
else:
j['delay'] = 0
else:
j['delay'] = 0
output.append(j)
counter['hop'] += 1
else:
for i in output:
if i.get('date_utc'):
i['date_utc'] = i['date_utc'].isoformat()
else:
return output | Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields | codesearchnet |
def kill_dashboard(self, check_alive=True):
self._kill_process_type(
ray_constants.PROCESS_TYPE_DASHBOARD, check_alive=check_alive) | Kill the dashboard.
Args:
check_alive (bool): Raise an exception if the process was already
dead. | juraj-google-style |
def _ExpectedKeysForEntry(self, entry):
return [entry.name] | Generate a list of expected cache keys for this type of map.
Args:
entry: A NetgroupMapEntry
Returns:
A list of strings | github-repos |
def list2str(self, l: List, joiner: str) -> str:
result = str()
for item in l:
if isinstance(item, list):
result = result + self.list2str(item, joiner) + joiner
elif isinstance(item, dict):
result = result + self.dict2str(item, joiner) + joiner
elif item:
result = result + str(item) + joiner
return result | Convert list to str as input for tokenizer
Args:
l (list): list for converting
joiner (str): join the elements using this string to separate them.
Returns: the value of the list as a string | juraj-google-style |
def _recursive_apply(tensors, apply_fn):
tensors_type = type(tensors)
if isinstance(tensors, tensor_lib.Tensor):
return apply_fn(tensors)
elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors)
elif tensors_type is dict:
return dict(((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()))
else:
raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type {tensors_type!r}') | Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`,
`tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or
`dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure. | github-repos |
def __init__(self, conv_sizes, dense_sizes, scope='cnn-baseline', summary_labels=()):
network = []
for size in conv_sizes:
network.append(dict(type='conv2d', size=size))
network[0]['window'] = 5
network.append(dict(type='flatten'))
for size in dense_sizes:
network.append(dict(type='dense', size=size))
super(CNNBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels) | CNN baseline.
Args:
conv_sizes: List of convolutional layer sizes
dense_sizes: List of dense layer sizes | juraj-google-style |
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool=False):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training)
tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (hidden_states, self_attn_weights) | Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)` | github-repos |
def check_result(data, key=''):
if (not isinstance(data, dict)):
return False
if key:
if (key in data):
return True
return False
if ('resultCode' in data.keys()):
return (True if (data.get('resultCode', (- 1)) == 0) else False)
elif ('code' in data.keys()):
return (True if (data.get('code', (- 1)) == 0) else False)
return False | Check the result of an API response.
Ideally, this should be done by checking that the value of the ``resultCode``
attribute is 0, but there are endpoints that simply do not follow this rule.
Args:
data (dict): Response obtained from the API endpoint.
key (string): Key to check for existence in the dict.
Returns:
bool: True if result was correct, False otherwise. | codesearchnet |
def append_block(self, node, reverse=False):
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
if reverse:
self.to_append_block[-1].appendleft(node)
else:
self.to_append_block[-1].append(node) | Append a statement to the current block.
Args:
node: The statement to prepend.
reverse: When called multiple times, this flag determines whether the
statement should be prepended or appended to the already inserted
statements.
Raises:
ValueError: If the given node is not a statement. | juraj-google-style |
def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None):
x = residual_block_v2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = residual_block_v2(x, filters, name=name + '_block' + str(i))
x = residual_block_v2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x | A set of stacked residual blocks.
Args:
x: Input tensor.
filters: Number of filters in the bottleneck layer in a block.
blocks: Number of blocks in the stacked blocks.
stride1: Stride of the first layer in the first block. Defaults to `2`.
name: Stack label.
Returns:
Output tensor for the stacked blocks. | github-repos |
def _CreateSanitizedDestination(self, source_file_entry, source_path_spec, source_data_stream_name, destination_path):
file_system = source_file_entry.GetFileSystem()
path = getattr(source_path_spec, 'location', None)
path_segments = file_system.SplitPath(path)
for (index, path_segment) in enumerate(path_segments):
path_segments[index] = ''.join([(character if (character not in self._DIRTY_CHARACTERS) else '_') for character in path_segment])
target_filename = path_segments.pop()
parent_path_spec = getattr(source_file_entry.path_spec, 'parent', None)
while parent_path_spec:
if (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
path_segments.insert(0, parent_path_spec.location[1:])
break
elif (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW):
path_segments.insert(0, parent_path_spec.location[1:])
parent_path_spec = getattr(parent_path_spec, 'parent', None)
target_directory = os.path.join(destination_path, *path_segments)
if source_data_stream_name:
target_filename = '{0:s}_{1:s}'.format(target_filename, source_data_stream_name)
return (target_directory, target_filename) | Creates a sanitized path of both destination directory and filename.
This function replaces non-printable and other characters defined in
_DIRTY_CHARACTERS with an underscore "_".
Args:
source_file_entry (dfvfs.FileEntry): file entry of the source file.
source_path_spec (dfvfs.PathSpec): path specification of the source file.
source_data_stream_name (str): name of the data stream of the source file
entry.
destination_path (str): path of the destination directory.
Returns:
tuple[str, str]: sanitized paths of both destination directory and
filename. | codesearchnet |
def check(self, src, expected=None, prologue=None, name=None, version=None, platform='linux'):
ast = self.parse(src, name, version, platform)
actual = pytd_utils.Print(ast)
if expected != IGNORE:
if expected is None:
expected = src
expected = textwrap.dedent(expected).lstrip()
if prologue:
expected = f'{textwrap.dedent(prologue)}\n\n{expected}'
self.assertMultiLineEqual(expected.rstrip(), actual)
return ast | Check the parsing of src.
This checks that parsing the source and then printing the resulting
AST results in the expected text.
Args:
src: A source string.
expected: Optional expected result string. If not provided, src is used
instead. The special value IGNORE can be used to skip checking the
parsed results against expected text.
prologue: An optional prologue to be prepended to the expected text before
comparison. Useful for imports that are introduced during printing the
AST.
name: The name of the module.
version: A python version tuple (None for default value).
platform: A platform string (defaults to "linux").
Returns:
The parsed pytd.TypeDeclUnit. | github-repos |
def _assert_float_dtype(dtype):
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')
return dtype | Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type. | github-repos |
def intersect(self, other: Iterable[Flag]) -> FrozenSet[Flag]:
if Wildcard in self._defined:
return frozenset(other)
else:
return self._defined & frozenset(other) | Returns the subset of flags in ``other`` that are also in
:attr:`.defined`. If the wildcard flag is defined, then all flags in
``other`` are returned.
The ``&`` operator is an alias of this method, making these two
calls equivalent::
perm_flags.union(other_flags)
perm_flags & other_flags
Args:
other: The operand flag set. | juraj-google-style |
def tryload(self, cfgstr=None, on_error='raise'):
cfgstr = self._rectify_cfgstr(cfgstr)
if self.enabled:
try:
if (self.verbose > 1):
self.log('[cacher] tryload fname={}'.format(self.fname))
return self.load(cfgstr)
except IOError:
if (self.verbose > 0):
self.log('[cacher] ... {} cache miss'.format(self.fname))
except Exception:
if (self.verbose > 0):
self.log('[cacher] ... failed to load')
if (on_error == 'raise'):
raise
elif (on_error == 'clear'):
self.clear(cfgstr)
return None
else:
raise KeyError('Unknown method on_error={}'.format(on_error))
elif (self.verbose > 1):
self.log('[cacher] ... cache disabled: fname={}'.format(self.fname))
return None | Like load, but returns None if the load fails due to a cache miss.
Args:
on_error (str): How to handle non-io errors errors. Either raise,
which re-raises the exception, or clear which deletes the cache
and returns None. | codesearchnet |
def prepend_to_list(self, key, *value, pipeline=False):
if pipeline:
self._pipeline.lpush(key, *value)
else:
self._db.lpush(key, *value) | Add new element to the start of the list stored at key.
Args:
key (str): Key where the list is stored
value: Value to add to the list
pipeline (bool): True, start a transaction block. Default false. | juraj-google-style |
def _decode_exp(self, access_token=None):
c = self.get_credentials()
jwt = access_token or c.access_token
x = self.decode_jwt_payload(jwt)
if 'exp' in x:
try:
exp = int(x['exp'])
except ValueError:
raise PanCloudError(
"Expiration time (exp) must be an integer")
else:
self.jwt_exp = exp
return exp
else:
raise PanCloudError("No exp field found in payload") | Extract exp field from access token.
Args:
access_token (str): Access token to decode. Defaults to ``None``.
Returns:
int: JWT expiration in epoch seconds. | juraj-google-style |
def processed_shape(self, shape):
for processor in self.preprocessors:
shape = processor.processed_shape(shape=shape)
return shape | Shape of preprocessed state given original shape.
Args:
shape: original state shape
Returns: processed state shape | codesearchnet |
def position(x=None, y=None):
posx, posy = platformModule._position()
posx = int(posx)
posy = int(posy)
if x is not None:
posx = int(x)
if y is not None:
posy = int(y)
return Point(posx, posy) | Returns the current xy coordinates of the mouse cursor as a two-integer
tuple.
Args:
x (int, None, optional) - If not None, this argument overrides the x in
the return value.
y (int, None, optional) - If not None, this argument overrides the y in
the return value.
Returns:
(x, y) tuple of the current xy coordinates of the mouse cursor. | juraj-google-style |
def __init__(self, workflow_name, graph_name):
self.workflow_name = workflow_name
self.graph_name = graph_name | Initialize the exception for invalid workflow definitions.
Args:
workflow_name (str): The name of the workflow that contains an invalid
definition.
graph_name (str): The name of the dag that is invalid. | juraj-google-style |
def _update_dict(self, to_dict, from_dict):
for key, value in from_dict.items():
if key in to_dict and isinstance(to_dict[key], dict) and \
isinstance(from_dict[key], dict):
self._update_dict(to_dict[key], from_dict[key])
else:
to_dict[key] = from_dict[key] | Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict | juraj-google-style |
def shuffle_dataset(filenames, extra_fn=None):
if outputs_exist(filenames):
tf.logging.info('Skipping shuffle because output files exist')
return
tf.logging.info('Shuffling data...')
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info('Data shuffled.') | Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file. | codesearchnet |
def flatten_expr(self):
if '[' in self.expr and self.is_recursive():
return '_' + self.expr.replace('.', '_DOT').replace('[', '_LBAR_').replace(']', '_RBAR').replace(', ', '_COMMA_')
return self.expr | Flattens the expression into a legal variable name if necessary.
Pytype stores parameterized recursive types in intermediate variables. If
self is such a type, this method flattens self.expr into a string that can
serve as a variable name. For example, 'MyRecursiveAlias[int, str]' is
flattened into '_MyRecursiveAlias_LBAR_int_COMMA_str_RBAR'.
Returns:
If self is a parameterized recursive type, a flattened version of
self.expr that is a legal variable name. Otherwise, self.expr unchanged. | github-repos |
def sparse_message_pass_batched(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=True, average_aggregation=False, name='sparse_ggnn_batched'):
(b, n) = (tf.shape(node_states)[0], tf.shape(node_states)[1])
node_states = tf.reshape(node_states, [(b * n), hidden_size])
indices = adjacency_matrices.indices
new_index2 = indices[(:, 3)]
new_index0 = (indices[(:, 1)] + (indices[(:, 0)] * tf.cast(n, tf.int64)))
new_index1 = (indices[(:, 2)] + (indices[(:, 0)] * tf.cast(n, tf.int64)))
new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1)
new_shape = [tf.cast((b * n), tf.int64), tf.cast((b * n), tf.int64), num_edge_types]
adjacency_matrices = tf.SparseTensor(indices=new_indices, values=adjacency_matrices.values, dense_shape=new_shape)
node_states = sparse_message_pass(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=use_bias, average_aggregation=average_aggregation, name=name)
return tf.reshape(node_states, [b, n, hidden_size]) | Identical to sparse_ggnn except that each input has a batch dimension.
B = The batch size.
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape: [B, N, H]
adjacency_matrices: Adjacency matrices of directed edges for each edge
type and batch. Shape: [B, N, N, T] (sparse).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden layer. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one round of message-passing of shape [B, N, H]. | codesearchnet |
def is_saving_non_distributed():
if not save_context.in_save_context():
return False
options = save_context.get_save_options()
return options.experimental_variable_policy != save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES | Returns whether we're saving a non-distributed version of the model.
It returns True iff we are in saving context and are saving a non-distributed
version of the model. That is, SaveOptions.experimental_variable_policy is
NONE.
Returns:
A boolean. | github-repos |
def _create_vocab_table_lookup_qat_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:
asset_dir = self.create_tempdir('assets').full_path
asset_file = os.path.join(asset_dir, 'vocab_file.txt')
file_io.write_string_to_file(filename=asset_file, file_content='hello,model,quantization\n')
vocab_file = asset.Asset(asset_file)
raw_vocab = io_ops.read_file(vocab_file)
vocabs = ragged_string_ops.string_split_v2(string_ops.string_strip(raw_vocab), sep=',')
kv_init = lookup_ops.KeyValueTensorInitializer(keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64)
table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5)
input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs')
lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32)
matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])
matmul_input = array_ops.fake_quant_with_min_max_args(matmul_input, min=-0.3, max=0.3, num_bits=8, narrow_range=False)
weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32)
weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row]))
weight = array_ops.fake_quant_with_min_max_args(weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False)
output_tensor = math_ops.matmul(matmul_input, weight)
output_tensor = array_ops.fake_quant_with_min_max_args(output_tensor, min=-0.2, max=0.2, num_bits=8, narrow_range=False)
return (input_vocabs_placeholder, lookup_vals, output_tensor) | Creates a simple QAT model that initializes and lookups a vocab table.
This model creates an asset file at "vocab_file.txt" containing
comma-separated vocabularies. It also initializes a `StaticVocabularyTable`
and performs a lookup with the input vocabs, which is a 1D tensor of
strings.
Args:
sess: Tensorflow Session to create the model in.
Returns:
(input_vocabs_placeholder, lookup_vals, output_tensor), where
* input_vocabs_placeholder is a placeholder tensor of 1D strings
* lookup_vals is an output tensor that is a direct result of table lookup
* output_tensor is a float 2x2 matrix | github-repos |
def update_metric_by_name(self, metric_name, metric_type, description=None,
custom_properties=None, tags=None, **kwargs):
data = {'type': metric_type.upper(),
'description': description or '',
'customProperties': custom_properties or {},
'tags': tags or []}
resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX,
str(metric_name)),
data=data, **kwargs)
resp.raise_for_status()
return resp.json() | Create or update a metric object
Args:
metric_name (string): name of metric
type (string): metric type, must be one of 'gauge', 'counter',
'cumulative_counter'
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
tags (optional[list of strings]): list of tags associated with
metric | juraj-google-style |
def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor:
vision_outputs = self.vision_tower(pixel_values=pixel_values).last_hidden_state
image_features = self.multi_modal_projector(vision_outputs)
return image_features | Projects the last hidden state from the vision model into language model space.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). | github-repos |
def _ParseEntry(self, key, val):
if key in self._repeated:
setting = self.section.setdefault(key, [])
setting.extend(val)
else:
self.section.setdefault(key, val) | Adds an entry for a configuration setting.
Args:
key: The name of the setting.
val: The value of the setting. | juraj-google-style |
def compress_file(filepath, compression="gz"):
if compression not in ["gz", "bz2"]:
raise ValueError("Supported compression formats are 'gz' and 'bz2'.")
from monty.io import zopen
if not filepath.lower().endswith(".%s" % compression):
with open(filepath, 'rb') as f_in, \
zopen('%s.%s' % (filepath, compression), 'wb') as f_out:
f_out.writelines(f_in)
os.remove(filepath) | Compresses a file with the correct extension. Functions like standard
Unix command line gzip and bzip2 in the sense that the original
uncompressed files are not retained.
Args:
filepath (str): Path to file.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to "gz". | juraj-google-style |
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope):
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name)
if grad_fn is None:
return [None] * num_inputs
if ops.executing_eagerly_outside_functions() or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
gradient_name_scope = 'gradient_tape/'
if forward_pass_name_scope:
gradient_name_scope += forward_pass_name_scope + '/'
with ops.name_scope(gradient_name_scope):
return grad_fn(mock_op, *out_grads)
else:
return grad_fn(mock_op, *out_grads) | Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
skip_input_indices: a tuple that is passed to the gradient function,
indicating which inputs to skip calculating the gradient for
forward_pass_name_scope: the namescope of the op in the forward pass.
Returns:
The gradients with respect to the inputs of the function, as a list. | github-repos |
def find_one(self, collection, query):
obj = getattr(self.db, collection)
result = obj.find_one(query)
return result | Search a collection for the query provided and return one result. Just
a raw interface to mongo to do any query you want.
Args:
collection: The db collection. See main class documentation.
query: A mongo find query.
Returns:
pymongo Cursor object with the results. | juraj-google-style |
def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq):
TM_number_dict = {}
T_index = []
T_residue = []
residue_count = 1
for residue_label in tmhmm_seq:
if (residue_label == 'T'):
T_residue.append(residue_count)
residue_count = (residue_count + 1)
TM_number_dict.update({'T_residue': T_residue})
T_residue_list = TM_number_dict['T_residue']
count = 0
max_count = (len(T_residue_list) - 1)
TM_helix_count = 0
TM_boundary_dict = {}
while (count <= max_count):
if (count == 0):
TM_start = T_residue_list[count]
count = (count + 1)
continue
elif (count == max_count):
TM_end = T_residue_list[count]
TM_helix_count = (TM_helix_count + 1)
TM_boundary_dict.update({('TM_helix_' + str(TM_helix_count)): [TM_start, TM_end]})
break
elif (T_residue_list[count] != (T_residue_list[(count + 1)] - 1)):
TM_end = T_residue_list[count]
TM_helix_count = (TM_helix_count + 1)
TM_boundary_dict.update({('TM_helix_' + str(TM_helix_count)): [TM_start, TM_end]})
TM_start = T_residue_list[(count + 1)]
count = (count + 1)
leaflet_dict = {}
for leaflet in ['O', 'I']:
leaflet_list = []
for (TM_helix, TM_residues) in TM_boundary_dict.items():
for residue_num in TM_residues:
tmhmm_seq_index = (residue_num - 1)
previous_residue = (tmhmm_seq_index - 1)
next_residue = (tmhmm_seq_index + 1)
if ((tmhmm_seq[previous_residue] == leaflet) or (tmhmm_seq[next_residue] == leaflet)):
leaflet_list.append(residue_num)
leaflet_dict.update({('tmhmm_leaflet_' + leaflet): leaflet_list})
return (TM_boundary_dict, leaflet_dict) | Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet.
Args:
tmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm']
Returns:
leaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside
TM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end]
TODO:
untested method! | codesearchnet |
def state_y(self, t: types.RealTensor, name: str=None) -> types.RealTensor:
name = name or 'state_y'
with tf.name_scope(name):
t = tf.convert_to_tensor(t, dtype=self._dtype)
t_shape = tf.shape(t)
t = tf.broadcast_to(t, tf.concat([[self._dim], t_shape], axis=0))
time_index = tf.searchsorted(self._jump_locations, t)
mr2 = tf.expand_dims(self._mean_reversion, axis=-1)
mr2 = tf.expand_dims(mr2 + tf.transpose(mr2), axis=-1)
def _integrate_volatility_squared(vol, l_limit, u_limit):
vol = tf.expand_dims(vol, axis=-2)
vol_squared = tf.expand_dims(self._rho, axis=-1) * (vol * tf.transpose(vol, perm=[1, 0, 2]))
return vol_squared / mr2 * (tf.math.exp(mr2 * u_limit) - tf.math.exp(mr2 * l_limit))
is_constant_vol = tf.math.equal(tf.shape(self._jump_values_vol)[-1], 0)
v_squared_between_vol_knots = tf.cond(is_constant_vol, lambda: tf.zeros(shape=(self._dim, self._dim, 0), dtype=self._dtype), lambda: _integrate_volatility_squared(self._jump_values_vol, self._padded_knots, self._jump_locations))
v_squared_at_vol_knots = tf.concat([tf.zeros((self._dim, self._dim, 1), dtype=self._dtype), utils.cumsum_using_matvec(v_squared_between_vol_knots)], axis=-1)
vn = tf.concat([self._zero_padding, self._jump_locations], axis=1)
v_squared_t = _integrate_volatility_squared(self._volatility(t), tf.gather(vn, time_index, batch_dims=1), t)
v_squared_t += tf.gather(v_squared_at_vol_knots, time_index, batch_dims=-1)
return tf.math.exp(-mr2 * t) * v_squared_t | Computes the state variable `y(t)` for tha Gaussian HJM Model.
For Gaussian HJM model, the state parameter y(t), can be analytically
computed as follows:
y_ij(t) = exp(-k_i * t) * exp(-k_j * t) * (
int_0^t rho_ij * sigma_i(u) * sigma_j(u) * du)
Args:
t: A rank 1 real `Tensor` of shape `[num_times]` specifying the time `t`.
name: Python string. The name to give to the ops created by this function.
Default value: `None` which maps to the default name `state_y`.
Returns:
A real `Tensor` of shape [self._factors, self._factors, num_times]
containing the computed y_ij(t). | github-repos |
def recipe_dcm_to_storage(config, auth_read, auth_write, account, report_id, report_name, bucket, path):
dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'storage': {'auth': auth_write, 'bucket': bucket, 'path': path}}}) | Move existing CM report into a Storage bucket.
Args:
auth_read (authentication) - Credentials used for reading data.
auth_write (authentication) - Credentials used for writing data.
account (integer) - NA
report_id (integer) - NA
report_name (string) - NA
bucket (string) - NA
path (string) - NA | github-repos |
def camel_to_title(name):
split = re.findall(r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)", name)
ret = " ".join(split)
ret = ret[0].upper() + ret[1:]
return ret | Takes a camelCaseFieldName and returns an Title Case Field Name
Args:
name (str): E.g. camelCaseFieldName
Returns:
str: Title Case converted name. E.g. Camel Case Field Name | juraj-google-style |
def getattr(self, c, attr, default=None, match_only=None):
matching_decor = self.get_decor(c, match_only=match_only)
try:
return getattr(matching_decor, attr)
except AttributeError:
return default | Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend. | juraj-google-style |
def allsame(list_, strict=True):
if len(list_) == 0:
return True
first_item = list_[0]
return list_all_eq_to(list_, first_item, strict) | checks to see if list is equal everywhere
Args:
list_ (list):
Returns:
True if all items in the list are equal | juraj-google-style |
def copy(self) -> 'ConsoleBuffer':
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r)
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other | Returns a copy of this ConsoleBuffer.
Returns:
ConsoleBuffer: A new ConsoleBuffer copy. | codesearchnet |
def __init__(self, left, right):
self.left = left
self.right = right | Initialize an equality.
Args:
left: A string. Left side of the equality.
right: A string. Right side of the equality. | github-repos |
def link_to_storage(self, sensor_log):
if self.walker is not None:
self._sensor_log.destroy_walker(self.walker)
self.walker = None
self.walker = sensor_log.create_walker(self.selector)
self._sensor_log = sensor_log | Attach this DataStreamer to an underlying SensorLog.
Calling this method is required if you want to use this DataStreamer
to generate reports from the underlying data in the SensorLog.
You can call it multiple times and it will unlink itself from any
previous SensorLog each time.
Args:
sensor_log (SensorLog): Actually create a StreamWalker to go along with this
streamer so that we can check if it's triggered. | juraj-google-style |
def _start_trial(self, trial, checkpoint=None):
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.runner = self._setup_runner(trial, reuse_allowed=((checkpoint is not None) or (trial._checkpoint.value is not None)))
if (not self.restore(trial, checkpoint)):
if (trial.status == Trial.ERROR):
raise RuntimeError('Restore from checkpoint failed for Trial {}.'.format(str(trial)))
previous_run = self._find_item(self._paused, trial)
if ((prior_status == Trial.PAUSED) and previous_run):
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
else:
self._train(trial) | Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails. | codesearchnet |
def to_jacobian(self):
if (not self):
return JacobianPoint(X=0, Y=0, Z=0)
return JacobianPoint(X=self.X, Y=self.Y, Z=1) | Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation. | codesearchnet |
def dr( self, r1, r2, cutoff=None ):
delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )
delta_r_squared = sum( delta_r_cartesian**2 )
if cutoff != None:
cutoff_squared = cutoff ** 2
if delta_r_squared > cutoff_squared:
return None
return( math.sqrt( delta_r_squared ) ) | Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2. | juraj-google-style |
def _WsdlHasMethod(self, method_name):
try:
self._method_bindings.get(method_name)
return True
except ValueError:
return False | Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False. | juraj-google-style |
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Monkhorst, kpts=[kpts],
kpts_shift=shift) | Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object | juraj-google-style |
def _write_credentials_file(credentials_file, credentials):
data = {'file_version': 2, 'credentials': {}}
for (key, credential) in iteritems(credentials):
credential_json = credential.to_json()
encoded_credential = _helpers._from_bytes(base64.b64encode(_helpers._to_bytes(credential_json)))
data['credentials'][key] = encoded_credential
credentials_file.seek(0)
json.dump(data, credentials_file)
credentials_file.truncate() | Writes credentials to a file.
Refer to :func:`_load_credentials_file` for the format.
Args:
credentials_file: An open file handle, must be read/write.
credentials: A dictionary mapping user-defined keys to an instance of
:class:`oauth2client.client.Credentials`. | codesearchnet |
def lenet5(images, labels):
images = pt.wrap(images)
with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
return (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2)
.flatten().fully_connected(500).softmax_classifier(10, labels)) | Creates a multi layer convolutional network.
The architecture is similar to that defined in LeNet 5.
Please change this to experiment with architectures.
Args:
images: The input images.
labels: The labels as dense one-hot vectors.
Returns:
A softmax result. | juraj-google-style |
def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None, token=None):
if (limit is None):
limit = config.CONFIG['Datastore.maximum_blob_size']
if (client_context is None):
client_context = ['Platform:Windows', 'Client Context']
config.CONFIG.Validate(parameters='PrivateKeys.executable_signing_private_key')
signing_key = config.CONFIG.Get('PrivateKeys.executable_signing_private_key', context=client_context)
verification_key = config.CONFIG.Get('Client.executable_signing_public_key', context=client_context)
signed_binary_utils.WriteSignedBinary(rdfvalue.RDFURN(aff4_path), content, signing_key, public_key=verification_key, chunk_size=limit, token=token)
logging.info('Uploaded to %s', aff4_path) | Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
token: A security token.
Raises:
IOError: On failure to write. | codesearchnet |
def write_dot(g):
lines = ['digraph g {']
def attrs_txt(items):
if items:
txt = ', '.join((('%s="%s"' % (k, str(v).strip('"'))) for (k, v) in items))
return (('[' + txt) + ']')
else:
return ''
for node in g.nodes():
atxt = attrs_txt(g.node_attributes(node))
txt = ('%s %s;' % (node, atxt))
lines.append(txt)
for e in g.edges():
(edge_from, edge_to) = e
attrs = g.edge_attributes(e)
label = str(g.edge_label(e))
if label:
attrs.append(('label', label))
atxt = attrs_txt(attrs)
txt = ('%s -> %s %s;' % (edge_from, edge_to, atxt))
lines.append(txt)
lines.append('}')
return '\n'.join(lines) | Replacement for pygraph.readwrite.dot.write, which is dog slow.
Note:
This isn't a general replacement. It will work for the graphs that
Rez generates, but there are no guarantees beyond that.
Args:
g (`pygraph.digraph`): Input graph.
Returns:
str: Graph in dot format. | codesearchnet |
def enqueue_message(self, message, timeout):
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message) | Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send. | juraj-google-style |
def create(cls, env, filenames, trim=False):
import_graph = cls(env)
for filename in filenames:
import_graph.add_file_recursive(os.path.abspath(filename), trim)
import_graph.build()
return import_graph | Create and return a final graph.
Args:
env: An environment.Environment object
filenames: A list of filenames
trim: Whether to trim the dependencies of builtin and system files.
Returns:
An immutable ImportGraph with the recursive dependencies of all the
files in filenames | codesearchnet |
def parse_mmcif_header(infile):
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict | Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header | juraj-google-style |
def AssignTasksToClient(self, client_id):
rules = self.Get(self.Schema.RULES)
if not rules:
return 0
if data_store.RelationalDBEnabled():
last_foreman_run = self._GetLastForemanRunTimeRelational(client_id)
else:
last_foreman_run = self._GetLastForemanRunTime(client_id)
latest_rule = max(rule.created for rule in rules)
if latest_rule <= last_foreman_run:
return 0
if data_store.RelationalDBEnabled():
try:
self._SetLastForemanRunTimeRelational(client_id, latest_rule)
except db.UnknownClientError:
pass
if not data_store.RelationalDBEnabled():
self._SetLastForemanRunTime(client_id, latest_rule)
relevant_rules = []
expired_rules = False
now = time.time() * 1e6
for rule in rules:
if rule.expires < now:
expired_rules = True
continue
if rule.created <= int(last_foreman_run):
continue
relevant_rules.append(rule)
if data_store.RelationalDBEnabled():
client_data = data_store.REL_DB.ReadClientFullInfo(client_id)
if client_data is None:
return
else:
client_data = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)
actions_count = 0
for rule in relevant_rules:
if self._EvaluateRules(rule, client_data):
actions_count += self._RunActions(rule, client_id)
if expired_rules:
self.ExpireRules()
return actions_count | Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks. | juraj-google-style |
def emit(self, record):
level = record.levelno
if not FLAGS.is_parsed():
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
if _is_absl_fatal_record(record):
self.flush()
os.abort() | Prints a record out to some streams.
If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
If FLAGS.alsologtostderr is set, it will print to sys.stderr.
If FLAGS.logtostderr is not set, it will log to the stream
associated with the current thread.
Args:
record: logging.LogRecord, the record to emit. | juraj-google-style |
def __init__(self, file_pattern, interval=360, stop_timestamp=MAX_TIMESTAMP):
self.file_pattern = file_pattern
self.interval = interval
self.stop_timestamp = stop_timestamp | Watches a directory for updates to files matching a given file pattern.
Args:
file_pattern: The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
interval: Interval at which to check for files matching file_pattern
in seconds.
stop_timestamp: Timestamp after which no more files will be checked.
**Note**:
1. Any previously used filenames cannot be reused. If a file is added
or updated to a previously used filename, this transform will ignore
that update. To trigger a model update, always upload a file with
unique name.
2. Initially, before the pipeline startup time, WatchFilePattern expects
at least one file present that matches the file_pattern.
3. This transform is supported in streaming mode since
MatchContinuously produces an unbounded source. Running in batch
mode can lead to undesired results or result in pipeline being stuck. | github-repos |
def compile_expression(structdef_url: str, fhir_context: context.FhirPathContext, fhir_path: str) -> python_compiled_expressions.PythonCompiledExpression:
return python_compiled_expressions.PythonCompiledExpression.compile(fhir_path, _PRIMITIVE_HANDLER, structdef_url, fhir_context) | Compiles the FHIRPath expression for the given resource.
Args:
structdef_url: the URL of the FHIR StructureDefinition to use.
fhir_context: a DefinitionLoader used to load FHIR structure definitions and
dependencies.
fhir_path: a FHIRPath expression to be run on the resource
Returns:
A PythonCompiledExpression representing the given FHIRPath string that can
be evaluated against the target resource. | github-repos |
def number(self):
cmd = 'git log --oneline {}'.format(self.sha1)
out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip()
return len(out.splitlines()) | Return this commits number.
This is the same as the total number of commits in history up until
this commit.
This value can be useful in some CI scenarios as it allows to track
progress on any given branch (although there can be two commits with the
same number existing on different branches).
Returns:
int: The commit number/index. | codesearchnet |
def get_geostationary_mask(area):
h = area.proj_dict['h']
(xmax, ymax) = get_geostationary_angle_extent(area)
xmax *= h
ymax *= h
(x, y) = area.get_proj_coords_dask()
return ((((x / xmax) ** 2) + ((y / ymax) ** 2)) <= 1) | Compute a mask of the earth's shape as seen by a geostationary satellite
Args:
area (pyresample.geometry.AreaDefinition) : Corresponding area
definition
Returns:
Boolean mask, True inside the earth's shape, False outside. | codesearchnet |
def set_running_in_gce(worker_executing_project):
global is_running_in_gce
global executing_project
is_running_in_gce = True
executing_project = worker_executing_project | For internal use only; no backwards-compatibility guarantees.
Informs the authentication library that we are running in GCE.
When we are running in GCE, we have the option of using the VM metadata
credentials for authentication to Google services.
Args:
worker_executing_project: The project running the workflow. This information
comes from worker startup information. | github-repos |
def period_from_dict(period: Dict[int, List[int]]) -> dateslib.PeriodTensor:
amount = period['frequency']
period_type = period_pb2.PeriodType.Name(period['type'])
return dateslib.PeriodTensor(amount, dateslib.PeriodType[period_type]) | Utility to convert a dictionary of periods to a PeriodTensor.
Args:
period: A dictionary with keys "type" (which corresponds to the proto type
of the period (see `period_pb2.Period`)) and "frequency".
Returns:
An instance of the `PeriodTensor`. | github-repos |
def _WriteRow(self, output_writer, values, in_bold=False):
row_strings = []
for value_index, value_string in enumerate(values):
padding_size = self._column_sizes[value_index] - len(value_string)
padding_string = ' ' * padding_size
row_strings.extend([value_string, padding_string])
row_strings.pop()
row_strings = ''.join(row_strings)
if in_bold and not win32console:
row_strings = '\x1b[1m{0:s}\x1b[0m'.format(row_strings)
output_writer.Write('{0:s}\n'.format(row_strings)) | Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold. | juraj-google-style |
def uninstall(pkg):
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](((FLATPAK_BINARY_NAME + ' uninstall ') + pkg))
if (out['retcode'] and out['stderr']):
ret['stderr'] = out['stderr'].strip()
ret['result'] = False
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret | Uninstall the specified package.
Args:
pkg (str): The package name.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.uninstall org.gimp.GIMP | codesearchnet |
def get_song_type(self, cache=True):
if not (cache and ('song_type' in self.cache)):
response = self.get_attribute('profile', bucket='song_type')
if response['songs'][0].has_key('song_type'):
self.cache['song_type'] = response['songs'][0]['song_type']
else:
self.cache['song_type'] = []
return self.cache['song_type'] | Get the types of a song.
Args:
cache (boolean): A boolean indicating whether or not the cached value should be used
(if available). Defaults to True.
Returns:
A list of strings, each representing a song type: 'christmas', for example.
Example:
>>> s = song.Song('SOQKVPH12A58A7AF4D')
>>> s.song_type
[u'christmas']
>>> | juraj-google-style |
def _build_js(inputs, outputs, name, implementation, support_code):
input_fields = json.dumps([f[0] for f in inputs])
output_fields = [{'name': f[0], 'type': f[1]} for f in outputs]
output_fields = json.dumps(output_fields, sort_keys=True)
if (support_code is None):
support_code = ''
return "{code}\n{name}={implementation};\nbigquery.defineFunction('{name}', {inputs}, {outputs}, {name});".format(code=support_code, name=name, implementation=implementation, inputs=str(input_fields), outputs=str(output_fields)) | Creates a BigQuery SQL UDF javascript object.
Args:
inputs: a list of (name, type) tuples representing the schema of input.
outputs: a list of (name, type) tuples representing the schema of the output.
name: the name of the function
implementation: a javascript function defining the UDF logic.
support_code: additional javascript code that the function can use. | codesearchnet |
def annotate_source_against_profile(profile_data, source_file_path, node_name_filter=None, op_type_filter=None, min_line=None, max_line=None):
source_file_path = _norm_abs_path(source_file_path)
node_name_regex = re.compile(node_name_filter) if node_name_filter else None
op_type_regex = re.compile(op_type_filter) if op_type_filter else None
line_to_profile_summary = {}
for profile_datum in profile_data:
if not profile_datum.file_path:
continue
if _norm_abs_path(profile_datum.file_path) != source_file_path:
continue
if min_line is not None and profile_datum.line_number < min_line or (max_line is not None and profile_datum.line_number >= max_line):
continue
if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):
continue
if op_type_regex and (not op_type_regex.match(profile_datum.op_type)):
continue
if profile_datum.line_number not in line_to_profile_summary:
line_to_profile_summary[profile_datum.line_number] = profiling.AggregateProfile(profile_datum)
else:
line_to_profile_summary[profile_datum.line_number].add(profile_datum)
return line_to_profile_summary | Annotate a Python source file with profiling information at each line.
(The annotation doesn't change the source file itself.)
Args:
profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.
source_file_path: (`str`) Path to the source file being annotated.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a the namedtuple
`profiling.LineOrFuncProfileSummary`. | github-repos |
def noise_set_type(n: tcod.noise.Noise, typ: int) -> None:
n.algorithm = typ | Set a Noise objects default noise algorithm.
Args:
typ (int): Any NOISE_* constant. | codesearchnet |
def AddSpecification(self, specification):
if specification.identifier in self._format_specifications:
raise KeyError(
'Format specification {0:s} is already defined in store.'.format(
specification.identifier))
self._format_specifications[specification.identifier] = specification
for signature in specification.signatures:
signature_index = len(self._signature_map)
signature_identifier = '{0:s}:{1:d}'.format(
specification.identifier, signature_index)
if signature_identifier in self._signature_map:
raise KeyError('Signature {0:s} is already defined in map.'.format(
signature_identifier))
signature.SetIdentifier(signature_identifier)
self._signature_map[signature_identifier] = specification | Adds a format specification.
Args:
specification (FormatSpecification): format specification.
Raises:
KeyError: if the store already contains a specification with
the same identifier. | juraj-google-style |
def _AddDependencyEdges(self, rdf_artifact):
artifact_dependencies = artifact_registry.GetArtifactPathDependencies(
rdf_artifact)
if artifact_dependencies:
for attribute in artifact_dependencies:
self._AddEdge(attribute, rdf_artifact.name)
else:
self.reachable_nodes.add(rdf_artifact.name)
self.graph[rdf_artifact.name].is_provided = True | Add an edge for every dependency of the given artifact.
This method gets the attribute names for a given artifact and for every
attribute it adds a directed edge from the attribute node to the artifact
node. If an artifact does not have any dependencies it is added to the set
of reachable nodes.
Args:
rdf_artifact: The artifact object. | juraj-google-style |
def hwvtep_set_overlaygw_type(self, **kwargs):
name = kwargs.pop('name')
type = kwargs.pop('type')
ip_args = dict(name=name, gw_type=type)
method_name = 'overlay_gateway_gw_type'
method_class = self._brocade_tunnels
gw_attr = getattr(method_class, method_name)
config = gw_attr(**ip_args)
output = self._callback(config)
return output | Set gateway type
Args:
name (str): gateway-name
type (str): gateway-type
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None | juraj-google-style |
def CaptureNamedVariable(self, name, value, depth, limits):
if (not hasattr(name, '__dict__')):
name = str(name)
else:
name = str(id(name))
self._total_size += len(name)
v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits))
v['name'] = name
return v | Appends name to the product of CaptureVariable.
Args:
name: name of the variable.
value: data to capture
depth: nested depth of dictionaries and vectors so far.
limits: Per-object limits for capturing variable data.
Returns:
Formatted captured data as per Variable proto with name. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.