code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def is_reading_in_conditional_node(self, variable):
variables_read = [n.variables_read for n in self.nodes if n.contains_if()]
variables_read = [item for sublist in variables_read for item in sublist]
return variable in variables_read | Check if the function reads the variable in a IF node
Args:
variable (Variable):
Returns:
bool: True if the variable is read | juraj-google-style |
def set_led(self, colorcode):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(LED_CONTROL_RAM)
data.append(0x01)
data.append(colorcode)
send_data(data) | Set the LED Color of Herkulex
Args:
colorcode (int): The code for colors
(0x00-OFF
0x02-BLUE
0x03-CYAN
0x04-RED
0x05-ORANGE
0x06-VIOLET
0x07-WHITE | juraj-google-style |
def get_sample(self, md5):
if len(md5) < 32:
md5 = self.get_full_md5(md5, self.sample_collection)
sample_info = self.database[self.sample_collection].find_one({'md5': md5})
if not sample_info:
return None
try:
gri... | Get the sample from the data store.
This method first fetches the data from datastore, then cleans it for serialization
and then updates it with 'raw_bytes' item.
Args:
md5: The md5 digest of the sample to be fetched from datastore.
Returns:
The sample dictionary or None | juraj-google-style |
def _create_unicode_map():
unicode_map = {}
for (beta, uni) in _map.BETACODE_MAP.items():
norm = unicodedata.normalize('NFC', uni)
unicode_map[norm] = beta
unicode_map[uni] = beta
final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA)
unicode_map[final_sigma_norm] = 's'... | Create the inverse map from unicode to betacode.
Returns:
The hash map to convert unicode characters to the beta code representation. | codesearchnet |
def __init__(self, clustering_algorithm, n_clusters: int, cluster_args: dict, checkpoints_path: str, batch_size: int=1024, is_batched: bool=False):
super().__init__()
self.clustering_algorithm = clustering_algorithm
self.n_clusters = n_clusters
self.batch_size = batch_size
self.cluster_args = cluste... | Clustering transformation itself, it first preprocesses the data,
then it applies the clustering transformation step by step on each
of the batches.
Example Usage::
pcoll | OnlineClustering(
clustering_algorithm=OnlineKMeansClustering
batch_size=1024,
n_clusters=6
cluster_args={}))
Args:
clustering_algorithm: Cluste... | github-repos |
def _construct_context_for_args(args):
global_default_context = google.datalab.Context.default()
config = {}
for key in global_default_context.config:
config[key] = global_default_context.config[key]
billing_tier_arg = args.get('billing', None)
if billing_tier_arg:
config['bigquery_b... | Construct a new Context for the parsed arguments.
Args:
args: the dictionary of magic arguments.
Returns:
A new Context based on the current default context, but with any explicitly
specified arguments overriding the default's config. | codesearchnet |
def transform(geom, to_sref):
try:
geom = getattr(geom, 'polygon', Envelope(geom).polygon)
except (TypeError, ValueError):
pass
else:
geom.AssignSpatialReference(to_sref)
try:
geom_sref = geom.GetSpatialReference()
except AttributeError:
return trans... | Returns a transformed Geometry.
Arguments:
geom -- any coercible Geometry value or Envelope
to_sref -- SpatialReference or EPSG ID as int | juraj-google-style |
def remove(self, name):
try:
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn(("Unable to find column '%s' in data source" % name)) | Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued. | codesearchnet |
def _ConvertFile(cls, path):
with open(path) as f:
src = f.read()
short_path = os.path.basename(path)
assertions = 0
for assertion_re in (cls.ASSERTION_RE, cls.MOCK_METHOD_CALL_RE):
start = 0
match = assertion_re.search(src, start)
while match:
assertion_start... | Converts a single file from unittest to PyTruth.
Args:
path: string, the path of file to be converted.
Returns:
Boolean: True if the file was successfully converted, otherwise False. | github-repos |
def get_transaction(self, transaction_id):
payload = self._get_data_by_id(transaction_id, 'commit_store_get_transaction')
txn = Transaction()
txn.ParseFromString(payload)
return txn | Returns a Transaction object from the block store by its id.
Params:
transaction_id (str): The header_signature of the desired txn
Returns:
Transaction: The specified transaction
Raises:
ValueError: The transaction is not in the block store | codesearchnet |
def write_events(self, events):
with self.write_lock, self.conn:
self.conn.executemany(
'INSERT INTO state_events('
' identifier, source_statechange_id, log_time, data'
') VALUES(?, ?, ?, ?)',
events,
) | Save events.
Args:
state_change_identifier: Id of the state change that generate these events.
events: List of Event objects. | juraj-google-style |
def returns_true_or_raises(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
ret = f(*args, **kwargs)
if (ret is not True):
raise RuntimeError(('Unexpected return value %r' % ret))
return True
return wrapped | A safety net.
Decorator for functions that are only allowed to return True or raise
an exception.
Args:
f: A function whose only expected return value is True.
Returns:
A wrapped functions whose guaranteed only return value is True. | codesearchnet |
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):
for idx, func in enumerate(iterator):
assert callable(func), 'Test generators must yield callables, got %r' % (
func,)
if getattr(func, '__x_use_name__', False):
new_name = func.__name__
else:
new_name = '%s%s%d' ... | Adds individual test cases to a dictionary.
Args:
dct: The target dictionary.
id_suffix: The dictionary for mapping names to test IDs.
name: The original name of the test case.
iterator: The iterator generating the individual test cases. | juraj-google-style |
def FromId(os_id, error_on_unknown=True):
if not os_id:
return None
for operating_system in OperatingSystem._ALL:
if operating_system.id == os_id:
return operating_system
if error_on_unknown:
raise InvalidEnumValue(os_id, 'Operating System', [value.id for value in Operati... | Gets the enum corresponding to the given operating system id.
Args:
os_id: str, The operating system id to parse
error_on_unknown: bool, True to raise an exception if the id is unknown,
False to just return None.
Raises:
InvalidEnumValue: If the given value cannot be parsed.
Returns:
OperatingSystemTuple, One of the... | github-repos |
def nav_to_vcf_dir(ftp, build):
if (build == 'b37'):
ftp.cwd(DIR_CLINVAR_VCF_B37)
elif (build == 'b38'):
ftp.cwd(DIR_CLINVAR_VCF_B38)
else:
raise IOError('Genome build not recognized.') | Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files.
Args:
ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov
build: (type: string) genome build, either 'b37' or 'b38' | codesearchnet |
def _AddWebPageCriteria(client, ad_group_id):
ad_group_criterion_service = client.GetService('AdGroupCriterionService',
version='v201809')
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
... | Adds a web page criterion to target Dynamic Search Ads.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID of the ad group the criteria is being added to. | juraj-google-style |
def set_lacp_mode(self, name, mode):
if mode not in ['on', 'passive', 'active']:
return False
grpid = re.search(r'(\d+)', name).group()
remove_commands = list()
add_commands = list()
for member in self.get_members(name):
remove_commands.append(... | Configures the LACP mode of the member interfaces
Args:
name(str): The Port-Channel interface name to configure the
LACP mode
mode(str): The LACP mode to configure the member interfaces to.
Valid values are 'on, 'passive', 'active'
Returns:
True if the operation succeeds otherwise False | juraj-google-style |
def sync(self, since=None, timeout_ms=30000, filter=None,
full_state=None, set_presence=None):
request = {
"timeout": int(timeout_ms)
}
if since:
request["since"] = since
if filter:
request["filter"] = filter
... | Perform a sync request.
Args:
since (str): Optional. A token which specifies where to continue a sync from.
timeout_ms (int): Optional. The time in milliseconds to wait.
filter (int|str): Either a Filter ID or a JSON string.
full_state (bool): Return the full state for every room the user has joined
Defaults to false.... | juraj-google-style |
def email_has_role(self, email, role_name, uuid=None):
mbr_data = self.get_membership(uuid=uuid)
docs = []
try:
docs = mbr_data['response']['docs']
except KeyError:
failure_message = 'KeyError in membership data - got {0}'.format(mbr_data)
log.exception(failure_message)
r... | Determine if an email is associated with a role.
Args:
email (str): user email
role_name (str): user role
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: Unexpected data was returned.
requests.RequestException: Exception connection error
Returns:
bool: True or False if email has role_... | codesearchnet |
def upload(target):
log.info('Uploading to pypi server <33>{}'.format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) | Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc | codesearchnet |
def codemirror_field_js_bundle(field):
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.js_bundle_names()[0]
except IndexError:
msg = "Given field with configuration name '{}' does not have a Javascript bundle name"
raise C... | Filter to get CodeMirror Javascript bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_js_bundle }}
Arguments:
field (django.forms.fields.Field): A form field that contains a widget
:class:`djangocodemirror.widget.CodeMirrorWidget`.
Raises:
CodeMirro... | codesearchnet |
def stringize(
self,
rnf_profile,
):
coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right)))
return "({},{},{},{},{})".format(
str(self.genome_id).zfill(rnf_profile.genome_id_width),
str(self.chr_id).zfill(rnf_profile.chr_... | Create RNF representation of this segment.
Args:
rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). | juraj-google-style |
def _format_batch_statuses(statuses, batch_ids, tracker):
proto_statuses = []
for batch_id in batch_ids:
if (statuses[batch_id] == client_batch_submit_pb2.ClientBatchStatus.INVALID):
invalid_txns = tracker.get_invalid_txn_info(batch_id)
for txn_info in invalid_txns:
... | Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ.
Args:
statuses (dict of int): Dict with batch ids as the key, status as value
batch_ids (list of str): The batch ids in their original order
tracker (BatchTracker): A batch tracker with access to invalid info | codesearchnet |
def add_migrations(self, migrations):
if self.__closed:
raise MigrationSessionError("Can't change applied session")
self._to_apply.extend(migrations) | Add migrations to be applied.
Args:
migrations: a list of migrations to add of the form [(app, migration_name), ...]
Raises:
MigrationSessionError if called on a closed MigrationSession | codesearchnet |
def get_victim_email_asset(self, main_type, sub_type, unique_id, asset_id, params=None):
params = params or {}
return self.victim_email_asset(main_type, sub_type, unique_id, asset_id, params=params) | Args:
main_type:
sub_type:
unique_id:
asset_id:
params:
Return: | juraj-google-style |
def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None):
if mapping is None:
mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs)
elif any((arg is not None for arg in [x, y, ildj_map, kwargs])):
raise ValueError('Cannot simultaneously specify mapping and individual ar... | Returns new _Mapping with args merged with self.
Args:
x: `Tensor`. Forward.
y: `Tensor`. Inverse.
ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`
representing the inverse log det jacobian.
kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _M... | github-repos |
def capture_widget(widget, path=None):
if use_qt5:
pixmap = widget.grab()
else:
pixmap = QtGui.QPixmap.grabWidget(widget)
if path:
pixmap.save(path)
else:
image_buffer = QtCore.QBuffer()
image_buffer.open(QtCore.QIODevice.ReadWrite)
pixmap.save(image_buffe... | Grab an image of a Qt widget
Args:
widget: The Qt Widget to capture
path (optional): The path to save to. If not provided - will return image data.
Returns:
If a path is provided, the image will be saved to it.
If not, the PNG buffer will be returned. | codesearchnet |
def _peek(self, chars=1):
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug(('Server sent (peek): ' + line.rstrip()))
return line | Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek. | codesearchnet |
def GetControlSequenceLen(self, buf):
if not self._csi or not buf.startswith(self._csi):
return 0
n = 0
for c in buf:
n += 1
if c.isalpha():
break
return n | Returns the control sequence length at the beginning of buf.
Used in display width computations. Control sequences have display width 0.
Args:
buf: The string to check for a control sequence.
Returns:
The control sequence length at the beginning of buf or 0 if buf does not
start with a control sequence. | github-repos |
def set_style(self, column, style):
column_idx = None
while (len(self.headers) > len(self.__style_list)):
self.__style_list.append(None)
if isinstance(column, six.integer_types):
column_idx = column
elif isinstance(column, six.string_types):
try:
column_idx = self.hea... | Set |Style| for a specific column.
Args:
column (|int| or |str|):
Column specifier. column index or header name correlated with the column.
style (|Style|):
Style value to be set to the column.
Raises:
ValueError: If the column specifier is invalid. | codesearchnet |
def AddKeyByPath(self, key_path, registry_key):
if not key_path.startswith(definitions.KEY_PATH_SEPARATOR):
raise ValueError('Key path does not start with: {0:s}'.format(
definitions.KEY_PATH_SEPARATOR))
if not self._root_key:
self._root_key = FakeWinRegistryKey(self._key_path_prefix... | Adds a Windows Registry key for a specific key path.
Args:
key_path (str): Windows Registry key path to add the key.
registry_key (WinRegistryKey): Windows Registry key.
Raises:
KeyError: if the subkey already exists.
ValueError: if the Windows Registry key cannot be added. | juraj-google-style |
def build_signature_def(inputs=None, outputs=None, method_name=None, defaults=None):
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
... | Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
defaults: Defaults of the S... | github-repos |
def max_range(ranges, combined=True):
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [tuple(np.NaN if v is None else v for v in r) for r in ranges]
if pd and any(isinstance(v, datetime_types) and n... | Computes the maximal lower and upper bounds from a list bounds.
Args:
ranges (list of tuples): A list of range tuples
combined (boolean, optional): Whether to combine bounds
Whether range should be computed on lower and upper bound
independently or both at once
Returns:
The maximum range as a single tuple | juraj-google-style |
def __init__(self, api_key: str, config: interfaces.Config | None=None):
self._config = config or interfaces.Config()
self._genai_processor = genai_model.GenaiModel(api_key=api_key, model_name=self._config.topic_researcher_model_name, generate_content_config=types.GenerateContentConfig(tools=self._config.enable... | Initializes the TopicResearcher.
Args:
api_key: The API key to use for the GenAI API.
config: The agent configuration. | github-repos |
def _file_changed_nilrt(full_filepath):
rs_state_dir = '/var/lib/salt/restartcheck_state'
base_filename = os.path.basename(full_filepath)
timestamp_file = os.path.join(rs_state_dir, '{0}.timestamp'.format(base_filename))
md5sum_file = os.path.join(rs_state_dir, '{0}.md5sum'.format(base_filename))
if... | Detect whether a file changed in an NILinuxRT system using md5sum and timestamp
files from a state directory.
Returns:
- False if md5sum/timestamp state files don't exist
- True/False depending if ``base_filename`` got modified/touched | codesearchnet |
def poll(
self,
transaction_hash: bytes,
):
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash must be a 32 byte hash',
)
transaction_hash = encode_hex(transaction_hash)
... | Wait until the `transaction_hash` is applied or rejected.
Args:
transaction_hash: Transaction hash that we are waiting for. | juraj-google-style |
def cancel(self, request, *args, **kwargs):
status = self.get_object()
status.cancel()
serializer = StatusSerializer(status, context={'request': request})
return Response(serializer.data) | Cancel the task associated with the specified status record.
Arguments:
request (Request): A POST including a task status record ID
Returns
-------
Response: A JSON response indicating whether the cancellation succeeded or not | juraj-google-style |
def _api_scrape(json_inp, ndx):
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['... | Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values fro... | juraj-google-style |
def chmod_r(root: str, permission: int) -> None:
os.chmod(root, permission)
for dirpath, dirnames, filenames in os.walk(root):
for d in dirnames:
os.chmod(os.path.join(dirpath, d), permission)
for f in filenames:
os.chmod(os.path.join(dirpath, f), permission) | Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR`` | juraj-google-style |
def get_tensor_sharding(tensor):
if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():
sharding = tensor._get_xla_sharding()
if sharding is None:
return None
else:
return sharding.SerializeToString... | Returns sharding attribute of a Tensor.
Args:
tensor: a Tensor.
Returns:
The attribute representing XLA sharding on tensor's op. | github-repos |
def update_tag(self, tag_name, description=None, custom_properties=None, **kwargs):
data = {'description': (description or ''), 'customProperties': (custom_properties or {})}
resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), data=data, **kwargs)
resp.raise_for_status()
return resp.json() | update a tag by name
Args:
tag_name (string): name of tag to update
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties | codesearchnet |
def split_by_sparsity(values):
dense_values = []
dense_indices = []
sparse_values = []
sparse_indices = []
for i, v in enumerate(values):
if is_indexed_slices(v):
sparse_values.append(v)
sparse_indices.append(i)
else:
dense_values.append(v)
... | Split values into dense and sparse values.
Args:
values: a list of tensors or `PerReplica`s.
Returns:
Four lists:
a list of dense values, a list of their indices in `values` and
a list of sparse values, a list of their indices in `values`. | github-repos |
def standardize_tuple(value, n, name, allow_zero=False):
error_msg = f'The `{name}` argument must be a tuple of {n} integers. Received {name}={value}'
if isinstance(value, int):
value_tuple = (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
... | Transforms non-negative/positive integer/integers into an integer tuple.
Args:
value: int or iterable of ints. The value to validate and convert.
n: int. The size of the tuple to be returned.
name: string. The name of the argument being validated, e.g. "strides"
or "kernel_size". This is only used to format error mess... | github-repos |
def halt(self):
if self.is_closed:
_std_log.info('Disconnect requested, but AMQP connection already gone')
self._channel = None
return
_std_log.info('Waiting for %d consumer(s) to finish processing before halting', len(self._consumers))
pending_cancels = []
for c in list(self._co... | Signal to consumers they should stop after finishing any messages
currently being processed, then close the connection.
Returns:
defer.Deferred: fired when all consumers have successfully stopped
and the connection is closed. | codesearchnet |
def quote_xml(text):
text = _coerce_unicode(text)
if text.startswith(CDATA_START):
return text
return saxutils.escape(text) | Format a value for display as an XML text node.
Returns:
Unicode string (str on Python 3, unicode on Python 2) | codesearchnet |
def removeColumns(self, columnNames):
model = self.tableView.model()
if (model is not None):
model.removeDataFrameColumns(columnNames)
self.removeColumnButton.setChecked(False) | Removes one or multiple columns from the model.
This method is also a slot.
Args:
columnNames (list): A list of columns, which shall
be removed from the model. | codesearchnet |
def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups):
candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate)
if numobj.extension is not None:
candidate_number_group_index = len(candidate_groups) - 2
else:
candidate_number_... | Returns True if the groups of digits found in our candidate phone number match our
expectations.
Arguments:
numobj -- the original number we found when parsing
normalized_candidate -- the candidate number, normalized to only contain ASCII digits,
but with non-digits (spaces etc) retained
expected_number_groups -- the ... | juraj-google-style |
def find_indices(lst, element):
result = []
offset = (- 1)
while True:
try:
offset = lst.index(element, (offset + 1))
except ValueError:
return result
result.append(offset) | Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values | codesearchnet |
def get_all_datasets(cls, configuration=None, page_size=1000, check_duplicates=True, **kwargs):
dataset = Dataset(configuration=configuration)
dataset['id'] = 'all datasets'
total_rows = kwargs.get('limit', cls.max_int)
start = kwargs.get('offset', 0)
all_datasets = None
attempts = 0
while (... | Get all datasets in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
page_size (int): Size of page to return. Defaults to 1000.
check_duplicates (bool): Whether to check for duplicate datasets. Defaults to True.
**kwargs: See below
limit (int): Number of rows to r... | codesearchnet |
def resource_action(client, action='', log_format='item: %(key)s', **kwargs):
result = None
try:
result = getattr(client, action)(**kwargs)
LOG.info(log_format, kwargs)
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if (error_cod... | Call _action_ using boto3 _client_ with _kwargs_.
This is meant for _action_ methods that will create or implicitely prove a
given Resource exists. The _log_failure_ flag is available for methods that
should always succeed, but will occasionally fail due to unknown AWS
issues.
Args:
client (botocore.client.IAM): boto... | codesearchnet |
def get_soa_record(client, zone_id, zone_name):
response = client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=zone_name,
StartRecordType="SOA",
Ma... | Gets the SOA record for zone_name from zone_id.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_id (string): The AWS Route53 zone id of the hosted zone to query.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
:class:`stacker.util.SOARe... | juraj-google-style |
def localopt(self, forcefield='mmff94', steps=500):
pbmol = pb.Molecule(self._obmol)
pbmol.localopt(forcefield=forcefield, steps=steps)
self._obmol = pbmol.OBMol | A wrapper to pybel's localopt method to optimize a Molecule.
Args:
forcefield: Default is mmff94. Options are 'gaff', 'ghemical',
'mmff94', 'mmff94s', and 'uff'.
steps: Default is 500. | codesearchnet |
def set_query_parameter(url, param_name, param_value):
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, ne... | Given a URL, set or replace a query parameter and return the modified URL.
Args:
url: a given URL
param_name: the parameter name to add
param_value: the parameter value
Returns:
URL with the added parameter | juraj-google-style |
def load_ui_type(uifile):
import pysideuic
import xml.etree.ElementTree as ElementTree
from cStringIO import StringIO
parsed = ElementTree.parse(uifile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uifile, 'r') as f:
o = StringIO(... | Pyside equivalent for the loadUiType function in PyQt.
From the PyQt4 documentation:
Load a Qt Designer .ui file and return a tuple of the generated form
class and the Qt base class. These can then be used to create any
number of instances of the user interface without having to parse the
.ui file more than once.
Not... | codesearchnet |
def days_until(self, target_date_tensor):
return target_date_tensor.ordinal() - self._ordinals | Computes the number of days until the target dates.
Args:
target_date_tensor: A DateTensor object broadcastable to the shape of
"self".
Returns:
An int32 tensor with numbers of days until the target dates.
#### Example
```python
dates = tff.datetime.dates_from_tuples([(2020, 1, 25), (2020, 3, 2)])
target = tff.date... | github-repos |
def floodlight_email(config, task: dict, day: str, alerts: dict[str, list[str, str, str, str, int, str]]) -> None:
for email, table in alerts.items():
t = EmailTemplate()
t.align('center')
t.section(True)
issues = sum((1 for row in table if row[5] != 'NORMAL'))
if issues > 0:... | Send an email to each alert group with status of all activities.
The email template will contain all activities for each email address specified in the input sheet.
Args:
day - the latest day that was present in all combined reports, used for title of email.
alerts - Each email in the sheet with a list of activities ... | github-repos |
def get_config_parameter_multiline(config: ConfigParser,
section: str,
param: str,
default: List[str]) -> List[str]:
try:
multiline = config.get(section, param)
lines = [x.strip() for x in m... | Get multi-line string parameter from ``configparser`` ``.INI`` file,
as a list of strings (one per line, ignoring blank lines).
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default | juraj-google-style |
def upsert(self, insert_index, val, fn=None):
fn = (fn or (lambda current, passed: passed))
self._magnitude = 0
position = self.position_for_index(insert_index)
if ((position < len(self.elements)) and (self.elements[position] == insert_index)):
self.elements[(position + 1)] = fn(self.elements[(p... | Inserts or updates an existing index within the vector.
Args:
- insert_index (int): The index at which the element should be
inserted.
- val (int|float): The value to be inserted into the vector.
- fn (callable, optional): An optional callable taking two
arguments, the current value and the passed value to generate
th... | codesearchnet |
def _to_snake_case(string):
sub_string = r'\1_\2'
string = REGEX_CAMEL_FIRST.sub(sub_string, string)
return REGEX_CAMEL_SECOND.sub(sub_string, string).lower() | Return a snake cased version of the input string.
Args:
string (str): A camel cased string.
Returns:
str: A snake cased string. | juraj-google-style |
def attention_lm_moe_small():
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.moe_num_experts = 128
hparams.moe_layers = '2'
return hparams | Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object. | codesearchnet |
def execute_edit(args, root_dir=None):
EDITOR = os.environ.get('EDITOR', 'vim')
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if not isinstance(status['data'], str) and key in status['data']:
if status['data'][key]['status'] in ['queued', 'stash... | Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in. | juraj-google-style |
def _DepthwiseConv2dNativeBackpropInputGrad(op: ops.Operation, grad):
return [None, gen_nn_ops.depthwise_conv2d_native_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr(... | The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter | github-repos |
def get_definition(self, stmt: Statement, sctx: SchemaContext) -> Tuple[(Statement, SchemaContext)]:
if (stmt.keyword == 'uses'):
kw = 'grouping'
elif (stmt.keyword == 'type'):
kw = 'typedef'
else:
raise ValueError("not a 'uses' or 'type' statement")
(loc, did) = self.resolve_pna... | Find the statement defining a grouping or derived type.
Args:
stmt: YANG "uses" or "type" statement.
sctx: Schema context where the definition is used.
Returns:
A tuple consisting of the definition statement ('grouping' or
'typedef') and schema context of the definition.
Raises:
ValueError: If `stmt` is neither "use... | codesearchnet |
def get_windows_if_list(extended=False):
def _get_mac(x):
size = x["physical_address_length"]
if size != 6:
return ""
data = bytearray(x["physical_address"])
return str2mac(bytes(data)[:size])
def _get_ips(x):
unicast = x['first_unicast_address']
... | Returns windows interfaces through GetAdaptersAddresses.
params:
- extended: include anycast and multicast IPv6 (default False) | juraj-google-style |
def _print_reference(self, reference: message.Message) -> None:
set_oneof = reference.WhichOneof('reference')
if self.json_format == _FhirJsonFormat.PURE and set_oneof is not None and (set_oneof != 'uri'):
standardized_reference = copy.copy(reference)
new_uri = proto_utils.get_value_at_field(sta... | Standardizes and prints the provided reference.
Note that "standardization" in the case of PURE FHIR JSON refers to
un-typing the typed-reference prior to printing.
Args:
reference: The reference to print. | github-repos |
def handle(self, connection_id, message_content):
try:
request = self._request_proto()
request.ParseFromString(message_content)
except DecodeError:
LOGGER.info('Protobuf %s failed to deserialize', request)
return self._wrap_result(self._status.INT... | Handles parsing incoming requests, and wrapping the final response.
Args:
connection_id (str): ZMQ identity sent over ZMQ socket
message_content (bytes): Byte encoded request protobuf to be parsed
Returns:
HandlerResult: result to be sent in response back to client | juraj-google-style |
def _add_value_to_extension(msg: message.Message, extension: message.Message, is_choice_type: bool) -> None:
if is_choice_type:
oneofs = msg.DESCRIPTOR.oneofs
if not oneofs:
raise fhir_errors.InvalidFhirError(f'Choice type is missing a oneof: {msg.DESCRIPTOR.full_name}')
value_fi... | Adds the fields from msg to a generic Extension.
Attempts are first made to set the "value" field of the generic Extension
based on the type of field set on message. If this fails, checks are made
against the generic Code and Coding types, and finally we fall back to adding
the message's fields as sub-extensions.
Arg... | github-repos |
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
if ((frames is None) and (fps is None)):
return times_to_ms(h, m, s, ms)
elif ((frames is not None) and (fps is not None)):
return frames_to_ms(frames, fps)
else:
raise ValueError('Both fps and frames must be specified') | Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000 | codesearchnet |
def has_atomic_move(path):
try:
return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path))
except errors.OpError:
return True | Checks whether the file system supports atomic moves.
Returns whether or not the file system of the given path supports the atomic
move operation for a file or folder. If atomic move is supported, it is
recommended to use a temp location for writing and then move to the final
location.
Args:
path: string, path to a ... | github-repos |
def diff_text1(self, diffs):
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text) | Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text. | juraj-google-style |
def setup_engines(client=None):
if not client:
try:
client = ipyparallel.Client()
except:
raise DistobClusterError(
u)
eids = client.ids
if not eids:
raise DistobClusterError(
u'No ipyparallel compute engines are available'... | Prepare all iPython engines for distributed object processing.
Args:
client (ipyparallel.Client, optional): If None, will create a client
using the default ipyparallel profile. | juraj-google-style |
def get_atlas_per_gene_mutation_df(self, gene_id):
g = self.reference_gempro.genes.get_by_id(gene_id)
single, fingerprint = g.protein.sequence_mutation_summary(alignment_type='seqalign')
structure_type_suffix = 'NA'
append... | Create a single data frame which summarizes a gene and its mutations.
Args:
gene_id (str): Gene ID in the base model
Returns:
DataFrame: Pandas DataFrame of the results | juraj-google-style |
def loop_until_timeout_or_true(timeout_s, function, sleep_s=1):
return loop_until_timeout_or_valid(timeout_s, function, (lambda x: x), sleep_s) | Loops until the specified function returns True or a timeout is reached.
Note: The function may return anything which evaluates to implicit True. This
function will loop calling it as long as it continues to return something
which evaluates to False. We ensure this method is called at least once
regardless of timeou... | codesearchnet |
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):
return ImportLaidOutTensorOperation(mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0] | Import a laid_out_tensor.
For expert users.
The input must be laid out appropriately given the eventual MeshImpl,
and layout.
Args:
mesh: a Mesh
laid_out_tensor: a LaidOutTensor
shape: a mtf.Shape
name: an optional string
Returns:
a mtf.Tensor | codesearchnet |
def content_ratings(self, **kwargs):
path = self._get_id_path('content_ratings')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the content ratings for a TV Series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def create_output_excerpts(self, test_info):
dest_path = test_info.output_path
utils.create_dir(dest_path)
filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info, 'txt')
excerpt_file_path = os.path.join(dest_path, filename)
with open(excerpt_file_path, 'w', encoding='utf-8', errors='... | Convenient method for creating excerpts of adb logcat.
This copies logcat lines from self.adb_logcat_file_path to an excerpt
file, starting from the location where the previous excerpt ended.
Call this method at the end of: `setup_class`, `teardown_test`, and
`teardown_class`.
Args:
test_info: `self.current_test_inf... | github-repos |
def set(self, name, value):
if name not in self._options:
self.register(name, self._generator())
return self._options[name].__set__(self, value) | Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option gen... | juraj-google-style |
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
raise NotImplementedError | Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something whic... | github-repos |
def all_pairs(sets, similarity_func_name='jaccard', similarity_threshold=0.5):
if ((not isinstance(sets, list)) or (len(sets) == 0)):
raise ValueError('Input parameter sets must be a non-empty list.')
if (similarity_func_name not in _similarity_funcs):
raise ValueError('Similarity function {} is... | Find all pairs of sets with similarity greater than a threshold.
This is an implementation of the All-Pair-Binary algorithm in the paper
"Scaling Up All Pairs Similarity Search" by Bayardo et al., with
position filter enhancement.
Args:
sets (list): a list of sets, each entry is an iterable representing a
set.
similar... | codesearchnet |
def size(x):
if any_symbolic_tensors((x,)):
return Size().symbolic_call(x)
return backend.numpy.size(x) | Return the number of elements in a tensor.
Args:
x: Input tensor.
Returns:
Number of elements in `x`. | github-repos |
def GetSubkeyByName(self, name):
pyregf_key = self._pyregf_key.get_sub_key_by_name(name)
if not pyregf_key:
return None
key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])
return REGFWinRegistryKey(pyregf_key, key_path=key_path) | Retrieves a subkey by name.
Args:
name (str): name of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found. | juraj-google-style |
def topological_sort_operations(operations):
in_degrees = collections.OrderedDict()
for op in reversed(operations):
if op not in in_degrees:
in_degrees[op] = 0
for next_op in reversed(_op_dependencies(op)):
in_degrees[next_op] = in_degrees.get(next_op, 0) + 1
nexts = ... | Topological sorts a list of operations.
This does a topological sort of the operations in a graph. The edges include
both data dependencies and control dependencies. Note that the edge goes from
an operation to its dependencies.
The sort is intentionally unstable, reversing orders of operations and
dependencies on ti... | github-repos |
def _record_local(self, node, op, name, typ, orig_val=None, final=None):
if orig_val:
self.current_local_ops.append(LocalOp(name, LocalOp.Op.ASSIGN))
if typ:
self.current_local_ops.append(LocalOp(name, LocalOp.Op.ANNOTATE))
self._update_annotations_dict(node, op, name, typ, orig_val, self.cu... | Record a type annotation on a local variable.
This method records three types of local operations:
- An annotation, e.g., `x: int`. In this case, `typ` is PyTDClass(int) and
`orig_val` is None.
- An assignment, e.g., `x = 0`. In this case, `typ` is None and `orig_val`
is Instance(int).
- An annotated assignment, e.g.,... | github-repos |
def queryize(terms, exclude_screen_name=None):
ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-'))
nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-'))
sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else ''
return ' '.join((ors, nots... | Create query from list of terms, using OR
but intelligently excluding terms beginning with '-' (Twitter's NOT operator).
Optionally add -from:exclude_screen_name.
>>> helpers.queryize(['apple', 'orange', '-peach'])
u'apple OR orange -peach'
Args:
terms (list): Search terms.
exclude_screen_name (str): A single screen ... | juraj-google-style |
def addSearchers(self, *searchers):
self._searchers.extend(searchers)
debug.logger & debug.flagCompiler and debug.logger(
'current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))
return self | Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call... | juraj-google-style |
def get(self,identity,params=None, headers=None):
path = self._sub_url_params('/creditor_bank_accounts/:identity', {
'identity': identity,
})
response = self._perform_request('GET', path, params, headers,
retry_... | Get a single creditor bank account.
Retrieves the details of an existing creditor bank account.
Args:
identity (string): Unique identifier, beginning with "BA".
params (dict, optional): Query string parameters.
Returns:
ListResponse of CreditorBankAccount instances | juraj-google-style |
def stage_tc_indicator_entity(self, indicator_data):
path = '@.{value: summary, '
path += 'type: type, '
path += 'ownerName: ownerName, '
path += 'confidence: confidence || `0`, '
path += 'rating: rating || `0`}'
return self.path_data(indicator_data, path) | Convert JSON data to TCEntity.
Args:
indicator_data (str): [description]
Returns:
[type]: [description] | codesearchnet |
def correct_structure(self, atol=1e-08):
return np.allclose(self.structure.lattice.matrix, self.prim.lattice.matrix, atol=atol) | Determine if the structure matches the standard primitive structure.
The standard primitive will be different between seekpath and pymatgen
high-symmetry paths, but this is handled by the specific subclasses.
Args:
atol (:obj:`float`, optional): Absolute tolerance used to compare
the input structure with the primitiv... | codesearchnet |
def _OpenParentFile(self, file_system, path_spec, vhdi_file):
location = getattr(path_spec, 'location', None)
if (not location):
raise errors.PathSpecError('Unsupported path specification without location.')
location_path_segments = file_system.SplitPath(location)
parent_filename = vhdi_file.par... | Opens the parent file.
Args:
file_system (FileSystem): file system of the VHDI file.
path_spec (PathSpec): path specification of the VHDI file.
vhdi_file (pyvhdi.file): VHDI file.
Raises:
PathSpecError: if the path specification is incorrect. | codesearchnet |
def update_mapping(mapping: Dict[ops.Qid, LogicalIndex],
operations: ops.OP_TREE
) -> None:
for op in ops.flatten_op_tree(operations):
if (isinstance(op, ops.GateOperation) and
isinstance(op.gate, PermutationGate)):
op.gate.update_mapping(ma... | Updates a mapping (in place) from qubits to logical indices according to
a set of permutation gates. Any gates other than permutation gates are
ignored.
Args:
mapping: The mapping to update.
operations: The operations to update according to. | juraj-google-style |
def chat(self, id):
json = self.skype.conn('GET', '{0}/users/ME/conversations/{1}'.format(self.skype.conn.msgsHost, id), auth=SkypeConnection.Auth.RegToken, params={'view': 'msnp24Equivalent'}).json()
cls = SkypeSingleChat
if ('threadProperties' in json):
info = self.skype.conn('GET', '{0}/threads/{... | Get a single conversation by identifier.
Args:
id (str): single or group chat identifier | codesearchnet |
def set_vrf(self, name, vrf, default=False, disable=False):
commands = [('interface %s' % name)]
commands.append(self.command_builder('vrf forwarding', vrf, default=default, disable=disable))
return self.configure(commands) | Applies a VRF to the interface
Note: VRF being applied to interface must already exist in switch
config. Ethernet port must be in routed mode. This functionality
can also be handled in the VRF api.
Args:
name (str): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
vrf (str): The vrf n... | codesearchnet |
def generate(self, model, outfolder, *, exclude=None):
with pythonic_names():
super().generate(model, outfolder)
check_dependency = self.with_dependencies and model.eResource
if check_dependency:
if exclude is None:
exclude = set(... | Generate model code.
Args:
model: The meta-model to generate code for.
outfolder: Path to the directoty that will contain the generated code.
exclude: List of referenced resources for which code was already generated
(to prevent regeneration). | juraj-google-style |
def save_to_text_file(monsoon_data, file_path):
if (not monsoon_data):
raise MonsoonError('Attempting to write empty Monsoon data to file, abort')
utils.create_dir(os.path.dirname(file_path))
with io.open(file_path, 'w', encoding='utf-8') as f:
for md in monsoon_data:
f.write(str... | Save multiple MonsoonData objects to a text file.
Args:
monsoon_data: A list of MonsoonData objects to write to a text
file.
file_path: The full path of the file to save to, including the file
name. | codesearchnet |
def should_stop(self):
return self._coord.should_stop() | Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise. | github-repos |
def VerifyRow(self, parser_mediator, row):
if len(row) < self.MIN_COLUMNS:
return False
try:
timestamp = self._ConvertToTimestamp(row['date'], row['time'])
except (ValueError, TypeError):
return False
if timestamp is None:
return False
try:
action... | Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False other... | juraj-google-style |
def from_monitoring_infos(monitoring_info_list, user_metrics_only=False):
counters = {}
distributions = {}
gauges = {}
string_sets = {}
bounded_tries = {}
for mi in monitoring_info_list:
if user_metrics_only and (not monitoring_infos.is_user_monitoring_info(mi)):
continue
... | Groups MonitoringInfo objects into counters, distributions, gauges and
string sets
Args:
monitoring_info_list: An iterable of MonitoringInfo objects.
user_metrics_only: If true, includes user metrics only.
Returns:
A tuple containing three dictionaries: counters, distributions, gauges and
string set, respectively. Eac... | github-repos |
def rank(self, **kwargs):
axis = kwargs.get('axis', 0)
numeric_only = (True if axis else kwargs.get('numeric_only', False))
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self._map_across_full_axis(axis, func)
if numeric_only:
new_columns = self.compute_index(1, new_... | Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis. | codesearchnet |
def TryConsume(self, token):
if (self.token == token):
self.NextToken()
return True
return False | Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed. | codesearchnet |
def __recognize_list(self, node: yaml.Node, expected_type: Type) -> RecResult:
logger.debug('Recognizing as a list')
if (not isinstance(node, yaml.SequenceNode)):
message = '{}{}Expected a list here.'.format(node.start_mark, os.linesep)
return ([], message)
item_type = generic_type_args(expe... | Recognize a node that we expect to be a list of some kind.
Args:
node: The node to recognize.
expected_type: List[...something...]
Returns
expected_type and the empty string if it was recognized,
[] and an error message otherwise. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.