code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(tf.stop_gradient(x.tensor), scope, batch) | Returns a copy of the input fluent with stop_gradient at tensor level.
Args:
x: The input fluent.
Returns:
A TensorFluent that stops backpropagation of gradient computations. | juraj-google-style |
def get_operation_device(self, operation_name):
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else:
return None | The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name. | juraj-google-style |
def path_get(p: tcod.path.AStar, idx: int) -> Tuple[(int, int)]:
x = ffi.new('int *')
y = ffi.new('int *')
lib.TCOD_path_get(p._path_c, idx, x, y)
return (x[0], y[0]) | Get a point on a path.
Args:
p (AStar): An AStar instance.
idx (int): Should be in range: 0 <= inx < :any:`path_size` | codesearchnet |
def _GetParserFilters(cls, parser_filter_expression):
if (not parser_filter_expression):
return ({}, {})
includes = {}
excludes = {}
preset_names = cls._presets.GetNames()
for parser_filter in parser_filter_expression.split(','):
parser_filter = parser_filter.strip()
if (not parser_filter):
continue
if parser_filter.startswith('!'):
parser_filter = parser_filter[1:]
active_dict = excludes
else:
active_dict = includes
parser_filter = parser_filter.lower()
if (parser_filter in preset_names):
for parser_in_category in cls._GetParsersFromPresetCategory(parser_filter):
(parser, _, plugin) = parser_in_category.partition('/')
active_dict.setdefault(parser, [])
if plugin:
active_dict[parser].append(plugin)
else:
(parser, _, plugin) = parser_filter.partition('/')
active_dict.setdefault(parser, [])
if plugin:
active_dict[parser].append(plugin)
cls._ReduceParserFilters(includes, excludes)
return (includes, excludes) | Retrieves the parsers and plugins to include and exclude.
Takes a comma separated string and splits it up into two dictionaries,
of parsers and plugins to include and to exclude from selection. If a
particular filter is prepended with an exclamation point it will be
added to the exclude section, otherwise in the include.
Args:
parser_filter_expression (str): parser filter expression, where None
represents all parsers and plugins.
Returns:
tuple: containing:
* dict[str, BaseParser]: included parsers and plugins by name.
* dict[str, BaseParser]: excluded parsers and plugins by name. | codesearchnet |
def clean_for_storage(self, data):
data = self.data_to_unicode(data)
if isinstance(data, dict):
for k in dict(data).keys():
if k == '_id':
del data[k]
continue
if '.' in k:
new_k = k.replace('.', '_')
data[new_k] = data[k]
del data[k]
k = new_k
if isinstance(data[k], dict):
data[k] = self.clean_for_storage(data[k])
elif isinstance(data[k], list):
data[k] = [self.clean_for_storage(item) for item in data[k]]
return data | Clean data in preparation for storage.
Deletes items with key having a '.' or is '_id'. Also deletes those items
whose value is a dictionary or a list.
Args:
data: Sample data dictionary to be cleaned.
Returns:
Cleaned data dictionary. | juraj-google-style |
def remind_signature_request(self, signature_request_id, email_address):
request = self._get_request()
return request.post((self.SIGNATURE_REQUEST_REMIND_URL + signature_request_id), data={'email_address': email_address}) | Sends an email to the signer reminding them to sign the signature request
Sends an email to the signer reminding them to sign the signature
request. You cannot send a reminder within 1 hours of the last reminder
that was sent. This includes manual AND automatic reminders.
Args:
signature_request_id (str): The id of the SignatureRequest to send a reminder for
email_address (str): The email address of the signer to send a reminder to
Returns:
A SignatureRequest object | codesearchnet |
def iplot_state_paulivec(rho, figsize=None, slider=False, show_legend=False):
html_template = Template()
javascript_template = Template()
rho = _validate_input_state(rho)
if figsize is None:
figsize = (7, 5)
options = {'width': figsize[0], 'height': figsize[1],
'slider': int(slider), 'show_legend': int(show_legend)}
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
data_to_plot = []
rho_data = process_data(rho)
data_to_plot.append(dict(
data=rho_data
))
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': data_to_plot,
'options': options
})
display(HTML(html + javascript)) | Create a paulivec representation.
Graphical representation of the input array.
Args:
rho (array): State vector or density matrix.
figsize (tuple): Figure size in pixels.
slider (bool): activate slider
show_legend (bool): show legend of graph content | juraj-google-style |
def Log(self, frame):
if (not self._log_message):
return {'isError': True, 'description': {'format': LOG_ACTION_NOT_SUPPORTED}}
if self._quota_recovery_start_time:
ms_elapsed = ((time.time() - self._quota_recovery_start_time) * 1000)
if (ms_elapsed > self.quota_recovery_ms):
self._quota_recovery_start_time = None
else:
return
message = ('LOGPOINT: ' + _FormatMessage(self._definition.get('logMessageFormat', ''), self._EvaluateExpressions(frame)))
line = self._definition['location']['line']
cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line, _GetFrameCodeObjectName(frame))
if native.ApplyDynamicLogsQuota(len(message)):
self._log_message(message)
else:
self._quota_recovery_start_time = time.time()
self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA)
del cdbg_logging_location
return None | Captures the minimal application states, formats it and logs the message.
Args:
frame: Python stack frame of breakpoint hit.
Returns:
None on success or status message on error. | codesearchnet |
def save_screenshot(self, path=None, **kwargs):
path = _prepare_path(path, 'png')
self.driver.save_screenshot(path, **kwargs)
return path | Save a screenshot of the page.
If invoked without arguments, it will save a file to :data:`capybara.save_path` and the
file will be given a randomly generated filename. If invoked with a relative path, the path
will be relative to :data:`capybara.save_path`.
Args:
path (str, optional): The path to where it should be saved.
**kwargs: Arbitrary keywords arguments for the driver.
Returns:
str: The path to which the file was saved. | codesearchnet |
def _inverse_document_frequency(self, token_document_counts, num_documents):
return tf.math.log(1 + num_documents / (1 + token_document_counts)) | Computes the inverse-document-frequency (IDF) component of "tf_idf".
Args:
token_document_counts: An array of the # of documents each token
appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights. | github-repos |
def __init__(self, stream_number, entry_index):
super(SerializedStreamIdentifier, self).__init__()
self.entry_index = entry_index
self.stream_number = stream_number | Initializes a serialized stream attribute container identifier.
Args:
stream_number (int): number of the serialized attribute container stream.
entry_index (int): number of the serialized event within the stream. | juraj-google-style |
def render_build_args(options, ns):
build_args = options.get('buildArgs', {})
for (key, value) in build_args.items():
build_args[key] = value.format(**ns)
return build_args | Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments | codesearchnet |
def init_logging(log_filename, verbose, quiet):
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if log_filename:
file_handler = logging.FileHandler(log_filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
fmt_verbose = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s",
datefmt='%Y-%m-%d,%H:%M:%S.%f'
)
fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s", "%H:%M:%S")
if verbose:
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt_verbose)
stderr_hdl.setFormatter(fmt_verbose)
elif quiet:
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
f_err = SingleLevelFilter(logging.ERROR, True)
f_warn = SingleLevelFilter(logging.WARNING, True)
f_crit = SingleLevelFilter(logging.CRITICAL, True)
console_handler.addFilter(f_err)
console_handler.addFilter(f_warn)
console_handler.addFilter(f_crit)
logger.addHandler(console_handler)
f_info = SingleLevelFilter(logging.INFO, True)
f_debug = SingleLevelFilter(logging.DEBUG, True)
stderr_hdl.addFilter(f_info)
stderr_hdl.addFilter(f_debug)
logger.addHandler(stderr_hdl) | Set up logging with default parameters:
* default console logging level is INFO
* ERROR, WARNING and CRITICAL are redirected to stderr
Args:
log_filename (str): if set, will write DEBUG log there
verbose (bool): DEBUG level in console, overrides 'quiet'
quiet (bool): WARNING level in console | juraj-google-style |
def size(self, name=None):
if name is None:
name = '%s_size' % self._name
return self._size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) | Returns the number of elements in the staging area.
Args:
name: A name for the operation (optional)
Returns:
The created op | github-repos |
def create_dummy_class(klass, dependency):
assert (not building_rtfd())
class _DummyMetaClass(type):
def __getattr__(_, __):
raise AttributeError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy | When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object | codesearchnet |
def set_fig_size(self, width, height=None):
self.figure.figure_width = width
self.figure.figure_height = height
return | Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None. | juraj-google-style |
def find_copy_constructor(type_):
copy_ = type_.constructors(
lambda x: is_copy_constructor(x),
recursive=False,
allow_empty=True)
if copy_:
return copy_[0]
return None | Returns reference to copy constructor.
Args:
type_ (declarations.class_t): the class to be searched.
Returns:
declarations.constructor_t: the copy constructor | juraj-google-style |
def stack_template_url(bucket_name, blueprint, endpoint):
key_name = stack_template_key_name(blueprint)
return ('%s/%s/%s' % (endpoint, bucket_name, key_name)) | Produces an s3 url for a given blueprint.
Args:
bucket_name (string): The name of the S3 bucket where the resulting
templates are stored.
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the URL to.
endpoint (string): The s3 endpoint used for the bucket.
Returns:
string: S3 URL. | codesearchnet |
def configure_attributes(self, json_data):
env = boto3.session.Session(profile_name=self.env, region_name=self.region)
elbclient = env.client('elb')
elb_settings = self.properties['elb']
LOG.debug('Block ELB Settings Pre Configure Load Balancer Attributes:\n%s', pformat(elb_settings))
for job in json.loads(json_data)['job']:
load_balancer_attributes = {'CrossZoneLoadBalancing': {'Enabled': True}, 'AccessLog': {'Enabled': False}, 'ConnectionDraining': {'Enabled': False}, 'ConnectionSettings': {'IdleTimeout': 60}}
if elb_settings.get('connection_draining_timeout'):
connection_draining_timeout = int(elb_settings['connection_draining_timeout'])
LOG.info('Applying Custom Load Balancer Connection Draining Timeout: %d', connection_draining_timeout)
load_balancer_attributes['ConnectionDraining'] = {'Enabled': True, 'Timeout': connection_draining_timeout}
if elb_settings.get('idle_timeout'):
idle_timeout = int(elb_settings['idle_timeout'])
LOG.info('Applying Custom Load Balancer Idle Timeout: %d', idle_timeout)
load_balancer_attributes['ConnectionSettings'] = {'IdleTimeout': idle_timeout}
if elb_settings.get('access_log'):
access_log_bucket_name = elb_settings['access_log']['bucket_name']
access_log_bucket_prefix = elb_settings['access_log']['bucket_prefix']
access_log_emit_interval = int(elb_settings['access_log']['emit_interval'])
LOG.info('Applying Custom Load Balancer Access Log: %s/%s every %d minutes', access_log_bucket_name, access_log_bucket_prefix, access_log_emit_interval)
load_balancer_attributes['AccessLog'] = {'Enabled': True, 'S3BucketName': access_log_bucket_name, 'EmitInterval': access_log_emit_interval, 'S3BucketPrefix': access_log_bucket_prefix}
LOG.info('Applying Load Balancer Attributes')
LOG.debug('Load Balancer Attributes:\n%s', pformat(load_balancer_attributes))
elbclient.modify_load_balancer_attributes(LoadBalancerName=self.app, LoadBalancerAttributes=load_balancer_attributes) | Configure load balancer attributes such as idle timeout, connection draining, etc
Args:
json_data (json): return data from ELB upsert | codesearchnet |
def present(name, parent=None, vlan=None):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
comment_bridge_created = 'Bridge {0} created.'.format(name)
comment_bridge_notcreated = 'Unable to create bridge: {0}.'.format(name)
comment_bridge_exists = 'Bridge {0} already exists.'.format(name)
comment_bridge_mismatch = 'Bridge {0} already exists, but has a different parent or VLAN ID.'.format(name)
changes_bridge_created = {name: {'old': 'Bridge {0} does not exist.'.format(name), 'new': 'Bridge {0} created'.format(name)}}
bridge_exists = __salt__['openvswitch.bridge_exists'](name)
if bridge_exists:
current_parent = __salt__['openvswitch.bridge_to_parent'](name)
if (current_parent == name):
current_parent = None
current_vlan = __salt__['openvswitch.bridge_to_vlan'](name)
if (current_vlan == 0):
current_vlan = None
if __opts__['test']:
if bridge_exists:
if ((current_parent == parent) and (current_vlan == vlan)):
ret['result'] = True
ret['comment'] = comment_bridge_exists
else:
ret['result'] = False
ret['comment'] = comment_bridge_mismatch
else:
ret['result'] = None
ret['comment'] = comment_bridge_created
return ret
if bridge_exists:
if ((current_parent == parent) and (current_vlan == vlan)):
ret['result'] = True
ret['comment'] = comment_bridge_exists
else:
ret['result'] = False
ret['comment'] = comment_bridge_mismatch
else:
bridge_create = __salt__['openvswitch.bridge_create'](name, parent=parent, vlan=vlan)
if bridge_create:
ret['result'] = True
ret['comment'] = comment_bridge_created
ret['changes'] = changes_bridge_created
else:
ret['result'] = False
ret['comment'] = comment_bridge_notcreated
return ret | Ensures that the named bridge exists, eventually creates it.
Args:
name: The name of the bridge.
parent: The name of the parent bridge (if the bridge shall be created
as a fake bridge). If specified, vlan must also be specified.
vlan: The VLAN ID of the bridge (if the bridge shall be created as a
fake bridge). If specified, parent must also be specified. | codesearchnet |
def get_otp(self, message_list):
if isinstance(message_list, six.string_types):
message_list = [message_list, ]
for x in message_list:
if self.separator in x:
raise ValueError('Messages cannot contain separator')
message_list = self.separator.join(message_list)
dt = int(time.time())
prefix = ''.join([random.choice(string.ascii_letters) for x in range(random.randint(0, 20))])
tail = ''.join([random.choice(string.ascii_letters) for x in range(random.randint(0, 20))])
message_list = f'{message_list}{self.separator}{prefix}{dt}{tail}'
message_list = self.encryption_suite.encrypt(message_list.encode())
return base64.urlsafe_b64encode(message_list) | Generates a url-safe base64 encoded encypted message together with current timestamp (to the second).
Throws in some random number of characters to prenvent ecryption chill exploit
Args:
message_list: the message to be encrypted
Returns: | juraj-google-style |
def _parse_local_interface(self, config):
match = re.search(r'local-interface (\w+)', config)
value = match.group(1) if match else None
return dict(local_interface=value) | Scans the config block and parses the local-interface value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict | juraj-google-style |
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None, to_unit_cell=True):
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if (indices is None):
indices = range(len(self))
if (axis is None):
axis = [0, 0, 1]
if (anchor is None):
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= (2 * np.pi)
rm = expm((cross(eye(3), (axis / norm(axis))) * theta))
for i in indices:
site = self._sites[i]
coords = (np.dot(rm, np.array((site.coords - anchor)).T).T + anchor).ravel()
new_site = PeriodicSite(site.species, coords, self._lattice, to_unit_cell=to_unit_cell, coords_are_cartesian=True, properties=site.properties)
self._sites[i] = new_site | Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell | codesearchnet |
def index_2d(seqs: List[List[Any]], target: Any) -> Tuple[int, int]:
for i in range(len(seqs)):
for j in range(len(seqs[i])):
if seqs[i][j] == target:
return i, j
raise ValueError('Item not present.') | Finds the first index of a target item within a list of lists.
Args:
seqs: The list of lists to search.
target: The item to find.
Raises:
ValueError: Item is not present. | juraj-google-style |
def add_error(self, error):
self._count += 1
self._record.add_error('expect@%s+%s' % (time.time(), self._count),
error) | Record an error from expect APIs.
This method generates a position stamp for the expect. The stamp is
composed of a timestamp and the number of errors recorded so far.
Args:
error: Exception or signals.ExceptionRecord, the error to add. | juraj-google-style |
def extract_features(data_path: str, thres: int) -> typing.List[str]:
counter: typing.Counter[str] = Counter()
with open(data_path) as f:
for row in f:
cols = row.strip().split('\t')
if len(cols) < 2:
continue
counter.update(cols[1:])
return [item[0] for item in counter.most_common() if item[1] > thres] | Extracts a features list from the given encoded data file. This filters out
features whose number of occurrences does not exceed the threshold.
Args:
data_path (str): The path to the encoded data file that contains the
features to be extracted, which is typically a training data file.
thres (int): A threshold to filter out features whose number of occurrences
does not exceed the threshold.
Returns:
A list of features | github-repos |
def get_template_path(filename):
if os.path.isfile(filename):
return os.path.abspath(filename)
for i in sys.path:
if os.path.isfile(os.path.join(i, filename)):
return os.path.abspath(os.path.join(i, filename))
return None | Find raw template in working directory or in sys.path.
template_path from config may refer to templates colocated with the Stacker
config, or files in remote package_sources. Here, we emulate python module
loading to find the path to the template.
Args:
filename (str): Template filename.
Returns:
Optional[str]: Path to file, or None if no file found | juraj-google-style |
def list(self, orgId=None, **request_parameters):
check_type(orgId, basestring)
params = dict_from_items_with_values(request_parameters, orgId=orgId)
items = self._session.get_items(API_ENDPOINT, params=params)
for item in items:
(yield self._object_factory(OBJECT_TYPE, item)) | List all licenses for a given organization.
If no orgId is specified, the default is the organization of the
authenticated user.
Args:
orgId(basestring): Specify the organization, by ID.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the licenses returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. | codesearchnet |
def CreatePrecisionHelper(cls, precision):
precision_helper_class = cls._PRECISION_CLASSES.get(precision, None)
if (not precision_helper_class):
raise ValueError('Unsupported precision: {0!s}'.format(precision))
return precision_helper_class | Creates a precision helper.
Args:
precision (str): precision of the date and time value, which should
be one of the PRECISION_VALUES in definitions.
Returns:
class: date time precision helper class.
Raises:
ValueError: if the precision value is unsupported. | codesearchnet |
def set_func(self, name, func):
self.func_name = name
self.func = func | Set the processing function to use for this node.
Args:
name (str): The name of the function to use. This is
just stored for reference in case we need to serialize
the node later.
func (callable): A function that is called to process inputs
for this node. It should have the following signature:
callable(input1_walker, input2_walker, ...)
It should return a list of IOTileReadings that are then pushed into
the node's output stream | codesearchnet |
def query(self, time_indices):
if self._disposed:
raise ValueError('Cannot query: this _WatchStore instance is already disposed')
if (not isinstance(time_indices, (tuple, list))):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and (tensor_helper.translate_dtype(data_item.dtype) == 'string')):
(_, _, data_item) = tensor_helper.array_view(data_item)
data_item = np.array(tensor_helper.process_buffers_for_display(data_item), dtype=np.object)
output.append(data_item)
return output | Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded). | codesearchnet |
def __init__(self, skype=None, raw=None):
self.skype = skype
self.raw = raw | Instantiate a plain instance of this class, and store a reference to the Skype object for later API calls.
Normally this method won't be called or implemented directly.
Implementers should make use of :meth:`fromRaw` and the :meth:`initAttrs` decorator instead.
Args:
skype (Skype): parent Skype instance
raw (dict): raw object, as provided by the API | juraj-google-style |
def _SetupDatabase(host=None, port=None, user=None, password=None, database=None, client_key_path=None, client_cert_path=None, ca_cert_path=None):
with contextlib.closing(_Connect(host=host, port=port, user=user, password=password, database=None, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)) as conn:
with contextlib.closing(conn.cursor()) as cursor:
try:
cursor.execute(CREATE_DATABASE_QUERY.format(database))
except MySQLdb.MySQLError as e:
if (e.args[0] != mysql_error_constants.DB_CREATE_EXISTS):
raise
cursor.execute('USE {}'.format(database))
_CheckCollation(cursor)
def _MigrationConnect():
return _Connect(host=host, port=port, user=user, password=password, database=database, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)
mysql_migration.ProcessMigrations(_MigrationConnect, config.CONFIG['Mysql.migrations_dir']) | Connect to the given MySQL host and create a utf8mb4_unicode_ci database.
Args:
host: The hostname to connect to.
port: The port to connect to.
user: The username to connect as.
password: The password to connect with.
database: The database name to create.
client_key_path: The path of the client private key file.
client_cert_path: The path of the client public key certificate file.
ca_cert_path: The path of the Certificate Authority (CA) certificate file. | codesearchnet |
def cancelPnL(self, account, modelCode: str=''):
key = (account, modelCode)
reqId = self.wrapper.pnlKey2ReqId.pop(key, None)
if reqId:
self.client.cancelPnL(reqId)
self.wrapper.pnls.pop(reqId, None)
else:
self._logger.error(f'cancelPnL: No subscription for account {account}, modelCode {modelCode}') | Cancel PnL subscription.
Args:
account: Cancel for this account.
modelCode: If specified, cancel for this account model. | codesearchnet |
def get_py_internals(version=None, default=None):
if version is None:
version = default
if isinstance(version, dict):
return version
elif version in PY_INTERNALS:
return PY_INTERNALS[version]
else:
return ValueError('Unsupported python version %r requested.' % version) | Given a version specification. It can be any dict which is returned
verbatim, an index into :data:`PY_INTERNALS` or ``None``.
Arguments:
version: The python version to return the internals of.
default: The python version that will be looked up if ``version`` is
None.
Returns:
dict: The python internals for the requested version. | juraj-google-style |
def assert_same_rank(self, other):
other = as_shape(other)
if self.rank is not None and other.rank is not None:
if self.rank != other.rank:
raise ValueError('Shapes %s and %s must have the same rank' % (self, other)) | Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank. | github-repos |
def convert_variables_to_constants_v2_as_graph(func, lower_control_flow=True, aggressive_inlining=False):
converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)
output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)
frozen_func = _construct_concrete_function(func, output_graph_def, converted_input_indices)
return (frozen_func, output_graph_def) | Replaces all the variables in a graph with constants of the same values.
This function works as same as convert_variables_to_constants_v2, but it
returns the intermediate `GraphDef` as well. This `GraphDef` contains all the
debug information after all the transformations in the frozen phase.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs).
Returns:
ConcreteFunction containing a simplified version of the original, and also
the intermediate GraphDef containing the node debug information for the
transformations in the frozen phase. | github-repos |
def wsgi_simple_responder(result: Union[(str, bytes)], handler: Callable[([Union[(str, bytes)]], WSGI_TUPLE_TYPE)], start_response: TYPE_WSGI_START_RESPONSE, status: str='200 OK', extraheaders: TYPE_WSGI_RESPONSE_HEADERS=None) -> TYPE_WSGI_APP_RESULT:
extraheaders = (extraheaders or [])
(contenttype, extraheaders2, output) = handler(result)
response_headers = [('Content-Type', contenttype), ('Content-Length', str(len(output)))]
response_headers.extend(extraheaders)
if (extraheaders2 is not None):
response_headers.extend(extraheaders2)
start_response(status, response_headers)
return [output] | Simple WSGI app.
Args:
result: the data to be processed by ``handler``
handler: a function returning a ``(contenttype, extraheaders, data)``
tuple, e.g. ``text_result``, ``html_result``
start_response: standard WSGI ``start_response`` function
status: status code (default ``"200 OK"``)
extraheaders: optional extra HTTP headers
Returns:
WSGI application result | codesearchnet |
def DeserializeUnsigned(self, reader):
txtype = reader.ReadByte()
if (txtype != int.from_bytes(self.Type, 'little')):
raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little')))
self.DeserializeUnsignedWithoutType(reader) | Deserialize object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: if transaction type is incorrect. | codesearchnet |
def save_aggregate_reports_to_splunk(self, aggregate_reports):
logger.debug('Saving aggregate reports to Splunk')
if (type(aggregate_reports) == dict):
aggregate_reports = [aggregate_reports]
if (len(aggregate_reports) < 1):
return
data = self._common_data.copy()
json_str = ''
for report in aggregate_reports:
for record in report['records']:
new_report = dict()
for metadata in report['report_metadata']:
new_report[metadata] = report['report_metadata'][metadata]
new_report['published_policy'] = report['policy_published']
new_report['source_ip_address'] = record['source']['ip_address']
new_report['source_country'] = record['source']['country']
new_report['source_reverse_dns'] = record['source']['reverse_dns']
new_report['source_base_domain'] = record['source']['base_domain']
new_report['message_count'] = record['count']
new_report['disposition'] = record['policy_evaluated']['disposition']
new_report['spf_aligned'] = record['alignment']['spf']
new_report['dkim_aligned'] = record['alignment']['dkim']
new_report['passed_dmarc'] = record['alignment']['dmarc']
new_report['header_from'] = record['identifiers']['header_from']
new_report['envelope_from'] = record['identifiers']['envelope_from']
if ('dkim' in record['auth_results']):
new_report['dkim_results'] = record['auth_results']['dkim']
if ('spf' in record['auth_results']):
new_report['spf_results'] = record['auth_results']['spf']
data['sourcetype'] = 'dmarc:aggregate'
timestamp = human_timestamp_to_timestamp(new_report['begin_date'])
data['time'] = timestamp
data['event'] = new_report.copy()
json_str += '{0}\n'.format(json.dumps(data))
if (not self.session.verify):
logger.debug('Skipping certificate verification for Splunk HEC')
try:
response = self.session.post(self.url, data=json_str, timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if (response['code'] != 0):
raise SplunkError(response['text']) | Saves aggregate DMARC reports to Splunk
Args:
aggregate_reports: A list of aggregate report dictionaries
to save in Splunk | codesearchnet |
def __ne__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.not_equal, tf.float32) | Returns a TensorFluent for the not-equal relational operator.
Args:
self: The first operand.
other: The second operand. | juraj-google-style |
def collect_variables(self, variables: MultisetOfVariables) -> None:
if (self.variable_name is not None):
variables.add(self.variable_name) | Recursively adds all variables occuring in the expression to the given multiset.
This is used internally by `variables`. Needs to be overwritten by inheriting container expression classes.
This method can be used when gathering the `variables` of multiple expressions, because only one multiset
needs to be created and that is more efficient.
Args:
variables:
Multiset of variables. All variables contained in the expression are recursively added to this multiset. | codesearchnet |
def apply_strain(self, strain):
s = ((1 + np.array(strain)) * np.eye(3))
self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T) | Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger. | codesearchnet |
def form_out(self, _form=None):
_form = (_form or self.object_form)
self.output['forms'] = _form.serialize()
self._add_meta_props(_form)
self.output['forms']['grouping'] = _form.Meta.grouping
self.output['forms']['constraints'] = _form.Meta.constraints
self._patch_form(self.output['forms'])
self.set_client_cmd('form') | Renders form. Applies form modifiers, then writes
result to response payload. If supplied, given form
object instance will be used instead of view's
default ObjectForm.
Args:
_form (:py:attr:`~zengine.forms.json_form.JsonForm`):
Form object to override `self.object_form` | codesearchnet |
def _pack(formatstring, value):
_checkString(formatstring, description='formatstring', minlength=1)
try:
result = struct.pack(formatstring, value)
except:
errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.'
errortext += ' Value: {0!r} Struct format code is: {1}'
raise ValueError(errortext.format(value, formatstring))
if (sys.version_info[0] > 2):
return str(result, encoding='latin1')
return result | Pack a value into a bytestring.
Uses the built-in :mod:`struct` Python module.
Args:
* formatstring (str): String for the packing. See the :mod:`struct` module for details.
* value (depends on formatstring): The value to be packed
Returns:
A bytestring (str).
Raises:
ValueError
Note that the :mod:`struct` module produces byte buffers for Python3,
but bytestrings for Python2. This is compensated for automatically. | codesearchnet |
def __init__(self, timestamp=None):
super(PosixTimeInNanoseconds, self).__init__()
self._precision = definitions.PRECISION_1_NANOSECOND
self._timestamp = timestamp | Initializes a POSIX timestamp in nanoseconds.
Args:
timestamp (Optional[int]): POSIX timestamp in nanoseconds. | juraj-google-style |
def add(self, *args: Any, **kwargs: Any) -> Optional[Callable]:
self.items = {**self.items, **{m.__name__: validate(m) for m in args}, **{k: validate(v) for (k, v) in kwargs.items()}}
if len(args):
return args[0]
return None | Register a function to the list.
Args:
*args: Set/Sequence of positional arguments.
**kwargs: Mapping of named arguments.
Raises:
AttributeError: Raised if the method being added has no name. (i.e. it has
no `__name__` property, and no `name` argument was given.)
Examples:
methods = Methods()
@methods.add
def subtract(minuend, subtrahend):
return minuend - subtrahend | codesearchnet |
def adist(self, codes):
assert codes.ndim == 2
N, M = codes.shape
assert M == self.dtable.shape[0]
dists = np.sum(self.dtable[range(M), codes], axis=1)
return dists | Given PQ-codes, compute Asymmetric Distances between the query (self.dtable)
and the PQ-codes.
Args:
codes (np.ndarray): PQ codes with shape=(N, M) and
dtype=pq.code_dtype where pq is a pq instance that creates the codes
Returns:
np.ndarray: Asymmetric Distances with shape=(N, ) and dtype=np.float32 | juraj-google-style |
def ReleaseFileSystem(self, file_system):
identifier, cache_value = self._file_system_cache.GetCacheValueByObject(
file_system)
if not identifier:
raise RuntimeError('Object not cached.')
if not cache_value:
raise RuntimeError('Invalid cache value.')
self._file_system_cache.ReleaseObject(identifier)
result = cache_value.IsDereferenced()
if result:
self._file_system_cache.RemoveObject(identifier)
return result | Releases a cached file system object.
Args:
file_system (FileSystem): file system object.
Returns:
bool: True if the file system object can be closed.
Raises:
PathSpecError: if the path specification is incorrect.
RuntimeError: if the file system object is not cached or an inconsistency
is detected in the cache. | juraj-google-style |
def _has_data(self):
return 'end' in self.result and 'sum' in self.result['end'] | Checks if the iperf result has valid throughput data.
Returns:
True if the result contains throughput data. False otherwise. | github-repos |
def pretty_print_config_to_json(self, services, hostname=None, x_google_api_name=False):
descriptor = self.get_openapi_dict(services, hostname, x_google_api_name=x_google_api_name)
return json.dumps(descriptor, sort_keys=True, indent=2, separators=(',', ': ')) | JSON string description of a protorpc.remote.Service in OpenAPI format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The OpenAPI descriptor document as a JSON string. | codesearchnet |
def _curvature_range(self):
self._curv_win = tf.get_variable('curv_win', dtype=tf.float32, trainable=False, shape=[self.curvature_window_width], initializer=tf.zeros_initializer)
self._curv_win = tf.scatter_update(self._curv_win, (self._step % self.curvature_window_width), tf.log(self._grad_norm_squared))
valid_window = tf.slice(self._curv_win, tf.constant([0]), tf.expand_dims(tf.minimum(tf.constant(self.curvature_window_width), (self._step + 1)), dim=0))
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t]):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
with tf.control_dependencies([avg_op]):
self._h_min = tf.exp(tf.identity(self._moving_averager.average(self._h_min_t)))
self._h_max = tf.exp(tf.identity(self._moving_averager.average(self._h_max_t)))
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
curv_range_ops.append(avg_op)
return curv_range_ops | Curvature range.
Returns:
h_max_t, h_min_t ops | codesearchnet |
def _create_file_if_needed(filename):
if os.path.exists(filename):
return False
else:
open(filename, 'a+b').close()
logger.info('Credential file {0} created'.format(filename))
return True | Creates the an empty file if it does not already exist.
Returns:
True if the file was created, False otherwise. | codesearchnet |
def get_repo_url(pypirc, repository):
pypirc = os.path.abspath(os.path.expanduser(pypirc))
pypi_config = base.PyPIConfig(pypirc)
repo_config = pypi_config.get_repo_config(repository)
if repo_config:
return repo_config.get_clean_url()
else:
return base.RepositoryURL(repository) | Fetch the RepositoryURL for a given repository, reading info from pypirc.
Will try to find the repository in the .pypirc, including username/password.
Args:
pypirc (str): path to the .pypirc config file
repository (str): URL or alias for the repository
Returns:
base.RepositoryURL for the repository | juraj-google-style |
def AddPathSegment(self, path_segment, scan_object):
if (path_segment in self._path_segments):
raise ValueError('Path segment already set.')
if isinstance(scan_object, PathFilterScanTreeNode):
scan_object.parent = self
self._path_segments[path_segment] = scan_object | Adds a path segment.
Args:
path_segment: a string containing the path segment.
scan_object: a scan object, either a scan tree sub node (instance of
PathFilterScanTreeNode) or a string containing a path.
Raises:
ValueError: if the node already contains a scan object for
the path segment. | codesearchnet |
def transactional(func, args, kwds, **options):
return transactional_async.wrapped_decorator(func, args, kwds, **options).get_result() | Decorator to make a function automatically run in a transaction.
Args:
**ctx_options: Transaction options (see transaction(), but propagation
default to TransactionOptions.ALLOWED).
This supports two forms:
(1) Vanilla:
@transactional
def callback(arg):
...
(2) With options:
@transactional(retries=1)
def callback(arg):
... | codesearchnet |
def GetTestConfigs():
test_configs = [('NDHWC', False), ('NDHWC', True)]
if test.is_gpu_available(cuda_only=True):
test_configs += [('NCDHW', True)]
return test_configs | Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu. | github-repos |
def parse_exception(line):
m = RAISES_REGEX.match(line)
if (m is None):
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Raises block'.format(line=line))
return (m.group(2), m.group(1)) | Parse the first line of a Cartouche exception description.
Args:
line (str): A single line Cartouche exception description.
Returns:
A 2-tuple containing the exception type and the first line of the description. | codesearchnet |
def write_uint64(self, value, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.pack('%sQ' % endian, value) | Pack the value as an unsigned integer and write 8 bytes to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written. | juraj-google-style |
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
regvalue = event_values.get('regvalue', {})
string_parts = []
for key, value in sorted(regvalue.items()):
string_parts.append('{0:s}: {1!s}'.format(key, value))
event_values['text'] = ' '.join(string_parts)
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | juraj-google-style |
def process(self, feed_item):
if feed_item.get(FieldMap.ADVERTISER_ID, None) and feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None):
if not self._key_exists(feed_item.get(FieldMap.ADVERTISER_ID, None), feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None)):
self._create_key(feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None), 'OBJECT_ADVERTISER', feed_item.get(FieldMap.ADVERTISER_ID, None))
object_type = feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_TYPE, None)
entity_id = feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_ID, None)
if object_type and len(object_type) > 7:
entity = object_type[7:]
translated_id = store.translate(entity, entity_id)
entity_id = translated_id or entity_id
self._create_key(feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None), object_type, entity_id)
feed_item[FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_ID] = entity_id
else:
raise Exception('Dynamic targeting key, %s and %s are required' % (FieldMap.ADVERTISER_ID, FieldMap.DYNAMIC_TARGETING_KEY_NAME)) | Processes a Bulkdozer feed item.
This method identifies if the dyanmic targeting key already exists in CM, if
it doesn't it creates it associated with the advertiser, and then inserts an
association with the identified object.
Args:
feed_item: Bulkdozer feed item to process.
Returns:
Newly created or updated CM object. | github-repos |
def _read_content_or_path(content_or_path):
if "\n" in content_or_path.strip():
return content_or_path
if not os.path.exists(content_or_path):
raise IOError("File '%s' doesn't exists!" % content_or_path)
with open(content_or_path) as f:
return f.read() | If `content_or_path` contains ``\\n``, return it. Else assume, that it is
path and read file at that path.
Args:
content_or_path (str): Content or path to the file.
Returns:
str: Content.
Raises:
IOError: whhen the file is not found. | juraj-google-style |
def Generate(self, *args): | Generage a valid value.
Args:
*args: External arguments necessary for generation.
Returns: The generated value. | github-repos |
def check_version_info(redis_client):
redis_reply = redis_client.get('VERSION_INFO')
if (redis_reply is None):
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if (version_info != true_version_info):
node_ip_address = ray.services.get_node_ip_address()
error_message = (((((((((((((('Version mismatch: The cluster was started with:\n Ray: ' + true_version_info[0]) + '\n Python: ') + true_version_info[1]) + '\n Pyarrow: ') + str(true_version_info[2])) + '\nThis process on node ') + node_ip_address) + ' was started with:') + '\n Ray: ') + version_info[0]) + '\n Python: ') + version_info[1]) + '\n Pyarrow: ') + str(version_info[2]))
if (version_info[:2] != true_version_info[:2]):
raise Exception(error_message)
else:
logger.warning(error_message) | Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch. | codesearchnet |
def merge(self, other):
if self.m != other.m or self.p != other.p:
raise ValueError("Cannot merge HyperLogLog with different\
precisions.")
self.reg = np.maximum(self.reg, other.reg) | Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog): | juraj-google-style |
def edit_distance_2(self, word):
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
] | Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word | juraj-google-style |
def create_sonos_playlist_from_queue(self, title):
response = self.avTransport.SaveQueue([
('InstanceID', 0),
('Title', title),
('ObjectID', '')
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id) | Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer` | juraj-google-style |
def line_init(xo: int, yo: int, xd: int, yd: int) -> None:
lib.TCOD_line_init(xo, yo, xd, yd) | Initilize a line whose points will be returned by `line_step`.
This function does not return anything on its own.
Does not include the origin point.
Args:
xo (int): X starting point.
yo (int): Y starting point.
xd (int): X destination point.
yd (int): Y destination point.
.. deprecated:: 2.0
Use `line_iter` instead. | juraj-google-style |
def _init_exception_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client | Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | juraj-google-style |
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmEmbedderOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
realm_outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooler_output = realm_outputs[1]
projected_score = self.cls(pooler_output)
if not return_dict:
return (projected_score,) + realm_outputs[2:4]
else:
return RealmEmbedderOutput(projected_score=projected_score, hidden_states=realm_outputs.hidden_states, attentions=realm_outputs.attentions) | Returns:
Example:
```python
>>> from transformers import AutoTokenizer, RealmEmbedder
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-embedder")
>>> model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> projected_score = outputs.projected_score
``` | github-repos |
def set_uri(self, uri, resource_nr=0, protocol_info=None):
try:
self.resources[resource_nr].uri = uri
if (protocol_info is not None):
self.resources[resource_nr].protocol_info = protocol_info
except IndexError:
if (protocol_info is None):
protocol_info = (uri[:uri.index(':')] + ':*:*:*')
self.resources.append(DidlResource(uri, protocol_info)) | Set a resource uri for this instance. If no resource exists, create
a new one with the given protocol info.
Args:
uri (str): The resource uri.
resource_nr (int): The index of the resource on which to set the
uri. If it does not exist, a new resource is added to the list.
Note that by default, only the uri of the first resource is
used for playing the item.
protocol_info (str): Protocol info for the resource. If none is
given and the resource does not exist yet, a default protocol
info is constructed as '[uri prefix]:*:*:*'. | codesearchnet |
def __rsub__(self, other):
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value) | Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`. | github-repos |
def MakeMixture(metapmf, name='mix'):
mix = Pmf(name=name)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix | Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
name: string name for the new Pmf.
Returns: Pmf object. | juraj-google-style |
def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):
cw = self.session.client('cloudwatch', region_name=bucket_region)
try:
obj_stats = cw.get_metric_statistics(Namespace='AWS/S3', MetricName=statistic, Dimensions=[{'Name': 'StorageType', 'Value': storage_type}, {'Name': 'BucketName', 'Value': bucket_name}], Period=86400, StartTime=(datetime.utcnow() - timedelta(days=days)), EndTime=datetime.utcnow(), Statistics=['Average'])
stat_value = (obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA')
return stat_value
except Exception as e:
self.log.error('Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name, bucket_name, e))
finally:
del cw | Returns datapoints from cloudwatch for bucket statistics.
Args:
bucket_name `(str)`: The name of the bucket
statistic `(str)`: The statistic you want to fetch from
days `(int)`: Sample period for the statistic | codesearchnet |
def makedirs(path):
if path:
if (not os.path.exists(path)):
log.debug('makedirs({})'.format(path))
os.makedirs(path)
else:
realpath = os.path.realpath(path)
if (not os.path.isdir(realpath)):
raise ScriptWorkerException('makedirs: {} already exists and is not a directory!'.format(path)) | Equivalent to mkdir -p.
Args:
path (str): the path to mkdir -p
Raises:
ScriptWorkerException: if path exists already and the realpath is not a dir. | codesearchnet |
def __hash__(self) -> int:
return self._text.__hash__() | Hash function.
NOTE(daiyip): ConstStrKey shares the same hash with its text, which
makes it easy to lookup a dict of string by an ConstStrKey object, and
vice versa.
Returns:
Hash code. | github-repos |
def plot_waterfall(self, f_start=None, f_stop=None, if_id=0, logged=True, cb=True, MJD_time=False, **kwargs):
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1]
plot_f = plot_f[::-1]
if logged:
plot_data = db(plot_data)
dec_fac_x, dec_fac_y = 1, 1
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = int(plot_data.shape[0] / MAX_IMSHOW_POINTS[0])
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(plot_data.shape[1] / MAX_IMSHOW_POINTS[1])
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plt.imshow(plot_data,
aspect='auto',
origin='lower',
rasterized=True,
interpolation='nearest',
extent=extent,
cmap='viridis',
**kwargs
)
if cb:
plt.colorbar()
plt.xlabel("Frequency [MHz]")
if MJD_time:
plt.ylabel("Time [MJD]")
else:
plt.ylabel("Time [s]") | Plot waterfall of data
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
cb (bool): for plotting the colorbar
kwargs: keyword args to be passed to matplotlib imshow() | juraj-google-style |
def RegisterParser(cls, parser_class):
parser_name = parser_class.NAME.lower()
if (parser_name in cls._parser_classes):
raise KeyError('Parser class already set for name: {0:s}.'.format(parser_class.NAME))
cls._parser_classes[parser_name] = parser_class | Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name. | codesearchnet |
def intersection(boxes1, boxes2):
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape, dtype='f4'),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape, dtype='f4'),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths | Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area | juraj-google-style |
def make_sine_surface(dims=DEFAULT_DIMS, offset=0.5, scale=1.0):
gradients = (np.array(make_gradients(dims)) - offset) * scale * np.pi
return np.sin(np.linalg.norm(gradients, axis=0)) | Makes a surface from the 3D sine function.
Args:
dims (pair): the dimensions of the surface to create
offset (float): an offset applied to the function
scale (float): a scale applied to the sine frequency
Returns:
surface: A surface. | juraj-google-style |
def _FractionalAvgPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping')) | Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op. | github-repos |
def assert_that(actual, matcher, label='assert_that', reify_windows=False, use_global_window=True):
assert isinstance(actual, pvalue.PCollection), '%s is not a supported type for Beam assert' % type(actual)
pipeline = actual.pipeline
if getattr(actual.pipeline, 'result', None):
raise RuntimeError('assert_that must be used within a beam.Pipeline context. ' + 'Prior to Beam 2.60.0, asserts outside of the context of a pipeline ' + 'were silently ignored, starting with Beam 2.60.0 this is no longer ' + 'allowed. To fix, move your assert_that call into your pipeline ' + 'context so that it is added before the pipeline is run. For more ' + 'information, see https:
if label in pipeline.applied_labels:
label_idx = 2
while f'{label}_{label_idx}' in pipeline.applied_labels:
label_idx += 1
label = f'{label}_{label_idx}'
if isinstance(matcher, _EqualToPerWindowMatcher):
reify_windows = True
use_global_window = True
class ReifyTimestampWindow(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam, pane_info=DoFn.PaneInfoParam):
return [TestWindowedValue(element, timestamp, [window], pane_info)]
class AddWindow(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, window)
class AssertThat(PTransform):
def expand(self, pcoll):
if reify_windows:
pcoll = pcoll | ParDo(ReifyTimestampWindow())
keyed_singleton = pcoll.pipeline | Create([(None, None)])
keyed_singleton.is_bounded = True
if use_global_window:
pcoll = pcoll | WindowInto(window.GlobalWindows())
keyed_actual = pcoll | 'ToVoidKey' >> Map(lambda v: (None, v))
keyed_actual.is_bounded = True
plain_actual = (keyed_singleton, keyed_actual) | 'Group' >> CoGroupByKey() | 'Unkey' >> Map(lambda k_values: list(k_values[1][1]))
if not use_global_window:
plain_actual = plain_actual | 'AddWindow' >> ParDo(AddWindow())
return plain_actual | 'Match' >> Map(matcher)
def default_label(self):
return label
return actual | AssertThat() | A PTransform that checks a PCollection has an expected value.
Note that assert_that should be used only for testing pipelines since the
check relies on materializing the entire PCollection being checked.
Args:
actual: A PCollection.
matcher: A matcher function taking as argument the actual value of a
materialized PCollection. The matcher validates this actual value against
expectations and raises BeamAssertException if they are not met.
label: Optional string label. This is needed in case several assert_that
transforms are introduced in the same pipeline.
reify_windows: If True, matcher is passed a list of TestWindowedValue.
use_global_window: If False, matcher is passed a dictionary of
(k, v) = (window, elements in the window).
Returns:
Ignored. | github-repos |
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
_show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent) | Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
indent: How far (in increments of 2 spaces) to indent each line of output. | github-repos |
def _add_to(self, db, index, item, default=OOSet):
row = db.get(index, None)
if row is None:
row = default()
db[index] = row
row.add(item) | Add `item` to `db` under `index`. If `index` is not yet in `db`, create
it using `default`.
Args:
db (dict-obj): Dict-like object used to connect to database.
index (str): Index used to look in `db`.
item (obj): Persistent object, which may be stored in DB.
default (func/obj): Reference to function/object, which will be
used to create the object under `index`.
Default :class:`OOSet`. | juraj-google-style |
def _assert_struct_type(self, struct, name, types, path=None, extra_info=None):
wanted_yaml_typenames = set()
for t in types:
wanted_yaml_typenames.add(self._get_yaml_typename(t))
wanted_yaml_typenames = ' or '.join(wanted_yaml_typenames)
actual_yaml_typename = self._get_yaml_typename(type(struct))
if (not isinstance(struct, types)):
err = []
if path:
err.append(self._format_error_path((path + [name])))
err.append(' Expected {w} value for "{n}", got value of type {a}: "{v}"'.format(w=wanted_yaml_typenames, n=name, a=actual_yaml_typename, v=struct))
if extra_info:
err.append(('Tip: ' + extra_info))
raise exceptions.YamlTypeError('\n'.join(err)) | Asserts that given structure is of any of given types.
Args:
struct: structure to check
name: displayable name of the checked structure (e.g. "run_foo" for section run_foo)
types: list/tuple of types that are allowed for given struct
path: list with a source file as a first element and previous names
(as in name argument to this method) as other elements
extra_info: extra information to print if error is found (e.g. hint how to fix this)
Raises:
YamlTypeError: if given struct is not of any given type; error message contains
source file and a "path" (e.g. args -> somearg -> flags) specifying
where the problem is | codesearchnet |
def writeProject(self, session, directory, name):
self.project_directory = directory
with tmp_chdir(directory):
batchDirectory = self._getBatchDirectory(directory)
replaceParamFile = self.replaceParamFile
self._writeReplacementFiles(session=session, directory=directory, name=name)
self.write(session=session, directory=directory, name=name)
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)
self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name) | Write all files for a project from the database to file.
Use this method to write all GsshaPy supported files back into their native file formats. If writing to execute
the model, increase efficiency by using the writeInput method to write only the file needed to run the model.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names. | codesearchnet |
def _PathStripPrefix(self, path):
if (path.startswith('\\\\.\\') or path.startswith('\\\\?\\')):
if ((len(path) < 7) or (path[5] != ':') or (path[6] != self._PATH_SEPARATOR)):
return None
path = path[7:]
elif path.startswith('\\\\'):
return None
elif ((len(path) >= 3) and (path[1] == ':')):
if (path[2] != self._PATH_SEPARATOR):
return None
path = path[3:]
elif path.startswith('\\'):
path = path[1:]
else:
return None
return path | Strips the prefix from a path.
Args:
path (str): Windows path to strip the prefix from.
Returns:
str: path without the prefix or None if the path is not supported. | codesearchnet |
def get_example_from_tensor_dict(self, tensor_dict):
raise NotImplementedError() | Gets an example from a dict with tensorflow tensors.
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples. | github-repos |
def back_up(self, epoch):
backend.set_value(self._ckpt_saved_epoch, epoch)
if self.write_checkpoint_manager.save():
distributed_file_utils.remove_temp_dirpath(self.write_checkpoint_manager.directory, self._model.distribute_strategy) | Back up the current state of training into a checkpoint file.
Args:
epoch: The current epoch information to be saved. | github-repos |
def get_vertices_to_edges_matrix(self, want_xyz=True):
import numpy as np
import scipy.sparse as sp
vpe = np.asarray(self.vertices_per_edge, dtype=np.int32)
IS = np.repeat(np.arange(len(vpe)), 2)
JS = vpe.flatten()
data = np.ones_like(vpe)
data[:, 1] = -1
data = data.flatten()
if want_xyz:
IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
data = np.concatenate((data, data, data))
ij = np.vstack((IS.flatten(), JS.flatten()))
return sp.csc_matrix((data, ij)) | Returns a matrix M, which if multiplied by vertices,
gives back edges (so "e = M.dot(v)"). Note that this generates
one edge per edge, *not* two edges per triangle.
Args:
want_xyz: if true, takes and returns xyz coordinates, otherwise
takes and returns x *or* y *or* z coordinates | juraj-google-style |
def get(self, po):
name = po.name
typ = po.typ
default = po.default
handler = getattr(self, '_get_{}'.format(typ), None)
if (handler is None):
raise ValueError(typ)
self.seen.add(name)
if (not self.parser.has_option(self.section, name)):
if (default is REQUIRED):
raise NameError(self.section, name)
if isinstance(default, INHERIT_GLOBAL):
return handler('global', name, default.default)
return handler(self.section, name, default) | Lookup value for a PluginOption instance
Args:
po: PluginOption
Returns: converted value | codesearchnet |
def __init__(self, maxsize, ttl, out_deque=None, **kw):
super(DequeOutTTLCache, self).__init__(maxsize, ttl, **kw)
if out_deque is None:
out_deque = collections.deque()
elif not isinstance(out_deque, collections.deque):
raise ValueError(u'out_deque should be a collections.deque')
self._out_deque = out_deque
self._tracking = {} | Constructor.
Args:
maxsize (int): the maximum number of entries in the queue
ttl (int): the ttl for entries added to the cache
out_deque :class:`collections.deque`: a `deque` in which to add items
that expire from the cache
**kw: the other keyword args supported by the constructor to
:class:`cachetools.TTLCache`
Raises:
ValueError: if out_deque is not a collections.deque | juraj-google-style |
def from_session(cls, sess, input_tensors, output_tensors):
TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SESSION)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(graph_def, input_tensors, output_tensors, experimental_debug_info_func=_build_debug_info_func(sess.graph)) | Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class. | github-repos |
def __init__(self, emt_id='', emt_pass=''):
if emt_id and emt_pass:
self.initialize(emt_id, emt_pass) | Initialize the interface attributes.
Initialization may also be performed at a later point by manually
calling the ``initialize()`` method.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration | juraj-google-style |
def marginal_counts(counts, meas_qubits):
num_of_qubits = len(list(counts.keys())[0])
qs = sorted(meas_qubits, reverse=True)
meas_keys = count_keys(len(qs))
rgx = [reduce((lambda x, y: ((key[qs.index(y)] if (y in qs) else '\\d') + x)), range(num_of_qubits), '') for key in meas_keys]
meas_counts = []
for m in rgx:
c = 0
for (key, val) in counts.items():
if match(m, key):
c += val
meas_counts.append(c)
return dict(zip(meas_keys, meas_counts)) | Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`. | codesearchnet |
def transform(self, X):
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new | Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables | juraj-google-style |
def call(self, args: Args[FrameType]) -> _HasReturnT: | Calls this function with the given arguments.
Args:
args: The function arguments.
Returns:
An object with information about the result of the function call, with a
get_return_value() method that retrieves the return value. | github-repos |
def _get_client_address(self, req):
try:
forwarded_for = req.get_header('X-Forwarded-For', True)
return forwarded_for.split(',')[0].strip()
except (KeyError, HTTPMissingHeader):
return (
req.env.get('REMOTE_ADDR') if self.remote_address_fallback
else None
) | Get address from ``X-Forwarded-For`` header or use remote address.
Remote address is used if the ``X-Forwarded-For`` header is not
available. Note that this may not be safe to depend on both without
proper authorization backend.
Args:
req (falcon.Request): falcon.Request object.
Returns:
str: client address. | juraj-google-style |
def create_blob(profile, content):
resource = "/blobs"
payload = {"content": content}
data = api.post_request(profile, resource, payload)
return data | Create a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
content
The (UTF-8 encoded) content to create in the blob.
Returns:
A dict with data about the newly created blob. | juraj-google-style |
def value_container(self, value):
raise NotImplementedError('must be implemented in descendants') | Returns the container that this per-replica `value` belongs to.
Args:
value: A value returned by `run()` or a variable created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
`value in experimental_local_results(value_container(value))` will
always be true. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.