code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def complies_with_scope(queue_item, new_request, scope):
if not URLHelper.is_parsable(queue_item.request.url):
return False
if not URLHelper.is_parsable(new_request.url):
return False
if scope.request_methods:
if not queue_item.request.method in scope.request_methods:
return False
if scope.protocol_must_match:
if URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url):
return False
if scope.subdomain_must_match:
current_subdomain = URLHelper.get_subdomain(queue_item.request.url)
new_subdomain = URLHelper.get_subdomain(new_request.url)
www_matches = False
if current_subdomain == "www" and new_subdomain == "":
www_matches = True
if new_subdomain == "www" and current_subdomain == "":
www_matches = True
if not www_matches and current_subdomain != new_subdomain:
return False
if scope.hostname_must_match:
if URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url):
return False
if scope.tld_must_match:
if URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url):
return False
return True | Check if the new request complies with the crawling scope.
Args:
queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.
new_request (:class:`nyawc.http.Request`): The request to check.
scope (:class:`nyawc.Options.OptionsScope`): The scope to check.
Returns:
bool: True if it complies, False otherwise. | juraj-google-style |
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date):
payload = {'language': self.client.language.value, 'command': PaymentCommand.GET_TOKENS.value, 'merchant': {'apiLogin': self.client.api_login, 'apiKey': self.client.api_key}, 'creditCardTokenInformation': {'payerId': payer_id, 'creditCardTokenId': credit_card_token_id, 'startDate': start_date.strftime('%Y-%m-%dT%H:%M:%S'), 'endDate': end_date.strftime('%Y-%m-%dT%H:%M:%S')}, 'test': self.client.is_test}
return self.client._post(self.url, json=payload) | With this functionality you can query previously the Credit Cards Token.
Args:
payer_id:
credit_card_token_id:
start_date:
end_date:
Returns: | codesearchnet |
def initialize(self, input_shape, rng):
try:
if (not self._first_init):
return ()
self._first_init = False
self._params = self.new_parameters(input_shape, rng)
return self._params
except Exception:
(name, trace) = (self.__class__.__name__, _short_traceback())
raise LayerError(name, 'initialize', self._caller, input_shape, trace) | Initialize the layer given an input shape and rng.
Returns new_parameters(input_shape, rng) on the first call and () on any
subsequent call, as the layer is already initialized. This is used for
networks that share parameters, so the layer only produces them once.
Note that all arguments and return values can be tuples or dictionaries
or arbitraty nested structures composed of tuples and dictionaries.
Args:
input_shape: a tuple representing the shape of the input.
rng: random number generator.
Returns:
Newly created parameters on the first call and () on all subsequent calls. | codesearchnet |
def open_file_with_default_program(file_path, background=False, return_cmd=False):
desktop_env = system.get_name()
if (desktop_env == 'windows'):
open_file_cmd = ('explorer.exe ' + ("'%s'" % file_path))
elif (desktop_env == 'mac'):
open_file_cmd = ('open ' + ("'%s'" % file_path))
else:
file_mime_type = system.get_cmd_out(['xdg-mime', 'query', 'filetype', file_path])
desktop_file = system.get_cmd_out(['xdg-mime', 'query', 'default', file_mime_type])
open_file_cmd = desktopfile.execute(desktopfile.locate(desktop_file)[0], files=[file_path], return_cmd=True)
if return_cmd:
return open_file_cmd
else:
def_program_proc = sp.Popen(open_file_cmd, shell=True)
if (not background):
def_program_proc.wait() | Opens a file with the default program for that type.
Open the file with the user's preferred application.
Args:
file_path (str) : Path to the file to be opened.
background (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing. | codesearchnet |
def _parse_result_block_line(self, instrumentation_block, line):
instrumentation_block.add_value(line)
return instrumentation_block | Parses the instrumentation result block's line.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumentation run.
line: string, the raw instrumentation output to add to the
instrumenation result block's _InstrumentationResultBlocki
object.
Returns:
The instrumentation result block for the instrumentation run. | github-repos |
def find(self, binding_id, instance):
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding | find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding | juraj-google-style |
def valid_ip_prefix(ip_prefix):
try:
ip_prefix = ipaddress.ip_network(ip_prefix)
except ValueError:
return False
else:
if ((ip_prefix.version == 4) and (ip_prefix.max_prefixlen != 32)):
return False
if ((ip_prefix.version == 6) and (ip_prefix.max_prefixlen != 128)):
return False
return True | Perform a sanity check on ip_prefix.
Arguments:
ip_prefix (str): The IP-Prefix to validate
Returns:
True if ip_prefix is a valid IPv4 address with prefix length 32 or a
valid IPv6 address with prefix length 128, otherwise False | codesearchnet |
def download_files_maybe_extract(urls, directory, check_files=[]):
check_files = [os.path.join(directory, f) for f in check_files]
if _check_download(*check_files):
return
for url in urls:
download_file_maybe_extract(url=url, directory=directory)
if not _check_download(*check_files):
raise ValueError('[DOWNLOAD FAILED] `*check_files` not found') | Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.
Args:
urls (str): Url of files.
directory (str): Directory to download to.
check_files (list of str): Check if these files exist, ensuring the download succeeded.
If these files exist before the download, the download is skipped.
Raises:
ValueError: Error if one of the ``check_files`` are not found following the download. | juraj-google-style |
def help_members(obj, use_other=False):
r
import utool as ut
attrnames = dir(obj)
attr_list = [getattr(obj, attrname) for attrname in attrnames]
attr_types = ut.lmap(ut.type_str, map(type, attr_list))
unique_types, groupxs = ut.group_indices(attr_types)
type_to_items = ut.dzip(unique_types, ut.apply_grouping(attr_list, groupxs))
type_to_itemname = ut.dzip(unique_types, ut.apply_grouping(attrnames, groupxs))
memtypes = ['instancemethod']
func_mems = ut.dict_subset(type_to_items, memtypes, [])
func_list = ut.flatten(func_mems.values())
defsig_list = []
num_unbound_args_list = []
num_args_list = []
for func in func_list:
argspec = ut.get_func_argspec(func)
args = argspec.args
unbound_args = get_unbound_args(argspec)
defsig = ut.func_defsig(func)
defsig_list.append(defsig)
num_unbound_args_list.append(len(unbound_args))
num_args_list.append(len(args))
group = ut.hierarchical_group_items(defsig_list, [num_unbound_args_list, num_args_list])
print(repr(obj))
print(ut.repr3(group, strvals=True))
if use_other:
other_mems = ut.delete_keys(type_to_items.copy(), memtypes)
other_mems_attrnames = ut.dict_subset(type_to_itemname, other_mems.keys())
named_other_attrs = ut.dict_union_combine(other_mems_attrnames, other_mems, lambda x, y: list(zip(x, y)))
print(ut.repr4(named_other_attrs, nl=2, strvals=True)) | r"""
Inspects members of a class
Args:
obj (class or module):
CommandLine:
python -m utool.util_inspect help_members
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> obj = ut.DynStruct
>>> result = help_members(obj)
>>> print(result) | juraj-google-style |
def nltk_stemmer(stemmer, token, i=None, tokens=None):
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem) | Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set. | juraj-google-style |
def __init__(self, key_path_prefix):
super(WindowsRegistryKeyPathPrefixFilter, self).__init__()
self._key_path_prefix = key_path_prefix | Initializes a Windows Registry key filter.
Args:
key_path_prefix (str): the key path prefix. | juraj-google-style |
def filter_spec(spec, paths, wildcard='*', separator='/'):
def remove_key(path, spec):
if len(path) == 0:
return
elif len(path) == 1:
key = path.pop()
if not isinstance(spec, collections.Mapping):
raise LagoUserException(
'You have tried to remove the following key - "{key}".\n'
'Keys can not be removed from type {spec_type}\n'
'Please verify that path - "{{path}}" is valid'.format(
key=key, spec_type=type(spec)
)
)
if key == wildcard:
spec.clear()
else:
spec.pop(key, None)
else:
current = path[0]
if current == wildcard:
if isinstance(spec, list):
iterator = iter(spec)
elif isinstance(spec, collections.Mapping):
iterator = spec.itervalues()
else:
raise LagoUserException(
'Glob char {char} should refer only to dict or list, '
'not to {spec_type}\n'
'Please fix path - "{{path}}"'.format(
char=wildcard, spec_type=type(spec)
)
)
for i in iterator:
remove_key(path[1:], i)
else:
try:
remove_key(path[1:], spec[current])
except KeyError:
raise LagoUserException(
'Malformed path "{{path}}", key "{key}" '
'does not exist'.format(key=current)
)
except TypeError:
raise LagoUserException(
'Malformed path "{{path}}", can not get '
'by key from type {spec_type}'.format(
spec_type=type(spec)
)
)
for path in paths:
try:
remove_key(path.split(separator), spec)
except LagoUserException as e:
e.message = e.message.format(path=path)
raise | Remove keys from a spec file.
For example, with the following path: domains/*/disks/*/metadata
all the metadata dicts from all domains disks will be removed.
Args:
spec (dict): spec to remove keys from
paths (list): list of paths to the keys that should be removed
wildcard (str): wildcard character
separator (str): path separator
Returns:
None
Raises:
utils.LagoUserException: If a malformed path was detected | juraj-google-style |
def parse_datetime(value):
if not value:
return None
elif isinstance(value, datetime.datetime):
return value
return dateutil.parser.parse(value) | Attempts to parse `value` into an instance of ``datetime.datetime``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string or datetime.datetime value. | juraj-google-style |
def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[(bool, str)]:
(verdict, reason, test_info) = self.consult_filters(item_session.request.url_info, item_session.url_record)
if (verdict and self._robots_txt_checker):
can_fetch = (yield from self.consult_robots_txt(request))
if (not can_fetch):
verdict = False
reason = 'robotstxt'
(verdict, reason) = self.consult_hook(item_session, verdict, reason, test_info)
return (verdict, reason) | Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine. | codesearchnet |
def _rows_event_to_dict(e, stream):
pk_cols = e.primary_key if isinstance(e.primary_key, (list, tuple)) \
else (e.primary_key, )
if isinstance(e, row_event.UpdateRowsEvent):
sig = signals.rows_updated
action = 'update'
row_converter = _convert_update_row
elif isinstance(e, row_event.WriteRowsEvent):
sig = signals.rows_inserted
action = 'insert'
row_converter = _convert_write_row
elif isinstance(e, row_event.DeleteRowsEvent):
sig = signals.rows_deleted
action = 'delete'
row_converter = _convert_write_row
else:
assert False, 'Invalid binlog event'
meta = {
'time': e.timestamp,
'log_pos': stream.log_pos,
'log_file': stream.log_file,
'schema': e.schema,
'table': e.table,
'action': action,
}
rows = list(map(row_converter, e.rows))
for row in rows:
row['keys'] = {k: row['values'][k] for k in pk_cols}
return rows, meta | Convert RowsEvent to a dict
Args:
e (pymysqlreplication.row_event.RowsEvent): the event
stream (pymysqlreplication.BinLogStreamReader):
the stream that yields event
Returns:
dict: event's data as a dict | juraj-google-style |
def get_variable_layout(self, variable):
raise NotImplementedError() | Retrieve the `TensorLayout` for the variable.
Args:
variable: A `Variable` instance.
return:
The `TensorLayout` for the variable, which can be used by
`backend.distribute_value()` to redistribute a variable. | github-repos |
def __init__(self, components):
global _next_device_number, _next_device_number_lock
self.components = tuple((device_util.canonicalize(d) for d in components))
if not self.components:
raise ValueError('ParallelDevice requires at least one component.')
ctx = context.context()
with _next_device_number_lock:
self._name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)
_next_device_number += 1
device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(self._name, self.components)
context.register_custom_device(device, self._name, device_info)
self._device_ids = None
self._device_scope = None
_all_parallel_devices[self._name] = self | Creates a device which executes operations in parallel on `components`.
Args:
components: A list of device names. Each operation executed on the
returned device executes on these component devices.
Returns:
A string with the name of the newly created device. | github-repos |
def put_image(self, name, val):
assert isinstance(val, np.ndarray)
arr = image_to_nhwc(val)
self._dispatch((lambda m: m.process_image(name, arr)))
s = create_image_summary(name, arr)
self._dispatch((lambda m: m.process_summary(s))) | Put an image.
Args:
name (str):
val (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images
in range [0,255]. If channel is 3, assumed to be RGB. | codesearchnet |
def copy_all_a(input_a, *other_inputs, **kwargs):
output = []
while (input_a.count() > 0):
output.append(input_a.pop())
for input_x in other_inputs:
input_x.skip_all()
return output | Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading) | codesearchnet |
def generate_zip_data(M, L, n_cells, cluster_probs=None):
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels) | Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels | juraj-google-style |
def _create_and_save_state(cls, mapreduce_spec, _app):
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state | Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state. | juraj-google-style |
def concat(self, name=None):
return self._implementation.concat(name=name) | Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor. | github-repos |
def clone(self, name=None):
if name is None:
name = self.module_name + "_clone"
return Linear(output_size=self.output_size,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name=name) | Returns a cloned `Linear` module.
Args:
name: Optional string assigning name of cloned module. The default name
is constructed by appending "_clone" to `self.module_name`.
Returns:
Cloned `Linear` module. | juraj-google-style |
def get_nmr_quadrupole_moment(self, isotope=None):
quad_mom = self._el.nmr_quadrupole_moment
if len(quad_mom) == 0:
return 0.0
if isotope is None:
isotopes = list(quad_mom.keys())
isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False)
return quad_mom.get(isotopes[0], 0.0)
else:
if isotope not in quad_mom:
raise ValueError("No quadrupole moment for isotope {}".format(
isotope))
return quad_mom.get(isotope, 0.0) | Gets the nuclear electric quadrupole moment in units of
e*millibarns
Args:
isotope (str): the isotope to get the quadrupole moment for
default is None, which gets the lowest mass isotope | juraj-google-style |
def create_grad(node, namer, tangent=False):
if (not isinstance(node, (gast.Subscript, gast.Name, gast.Str))):
raise TypeError
if anno.hasanno(node, 'temp_var'):
return create_grad(anno.getanno(node, 'temp_var'), namer, tangent)
def _name_grad(node):
if (not isinstance(node, gast.Name)):
raise TypeError
varname = node.id
name = namer.grad(varname, tangent)
grad_node = gast.Name(id=name, ctx=None, annotation=None)
anno.setanno(grad_node, 'adjoint_var', node)
return grad_node
if isinstance(node, gast.Subscript):
grad_node = create_grad(node.value, namer, tangent=tangent)
grad_node.ctx = gast.Load()
return gast.Subscript(value=grad_node, slice=node.slice, ctx=None)
elif isinstance(node, gast.Str):
grad_node = create_grad(gast.Name(id=node.s, ctx=None, annotation=None), namer, tangent=tangent)
return gast.Str(grad_node.id)
else:
return _name_grad(node) | Given a variable, create a variable for the gradient.
Args:
node: A node to create a gradient for, can be a normal variable (`x`) or a
subscript (`x[i]`).
namer: The namer object which will determine the name to use for the
gradient.
tangent: Whether a tangent (instead of adjoint) is created.
Returns:
node: A node representing the gradient with the correct name e.g. the
gradient of `x[i]` is `dx[i]`.
Note that this returns an invalid node, with the `ctx` attribute
missing. It is assumed that this attribute is filled in later.
Node has an `adjoint_var` annotation referring to the node it is an
adjoint of. | codesearchnet |
def _create_mirrored_tpu_variables(**kwargs):
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs['initial_value']
with maybe_init_scope():
initial_value = initial_value() if callable(initial_value) else initial_value
if i > 0:
var0name = value_list[0].name.split(':')[0]
kwargs['name'] = '%s/replica_%d/' % (var0name, i)
kwargs['initial_value'] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list | Returns a list of `tf.Variable`s.
The list contains `number_replicas` `tf.Variable`s and can be used to
initialize a `TPUMirroredVariable`.
Args:
**kwargs: the keyword arguments for creating a variable | github-repos |
def org(self, notification_type, priority='Low'):
self._notification_type = notification_type
self._recipients = None
self._priority = priority
self._is_organization = True | Set vars for the passed in data. Used for org notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": true
}
Args:
notification_type (str): The notification type.
priority (str): The priority: Low, Medium, High. | codesearchnet |
def _build_kernel(self, kernel_source, compile_flags=()):
return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags)) | Convenience function for building the kernel for this worker.
Args:
kernel_source (str): the kernel source to use for building the kernel
Returns:
cl.Program: a compiled CL kernel | juraj-google-style |
def flush(cls, *args):
return _remove_keys([], [(cls._make_key(args) if args else cls.PREFIX) + '*']) | Removes all keys of this namespace
Without args, clears all keys starting with cls.PREFIX
if called with args, clears keys starting with given cls.PREFIX + args
Args:
*args: Arbitrary number of arguments.
Returns:
List of removed keys. | juraj-google-style |
def num_nodes(self, leaves=True, internal=True):
if (not isinstance(leaves, bool)):
raise TypeError('leaves must be a bool')
if (not isinstance(internal, bool)):
raise TypeError('internal must be a bool')
num = 0
for node in self.traverse_preorder():
if ((leaves and node.is_leaf()) or (internal and (not node.is_leaf()))):
num += 1
return num | Compute the total number of selected nodes in this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
Returns:
``int``: The total number of selected nodes in this ``Tree`` | codesearchnet |
def datporod(gnomoutfile):
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return (float(results[0]), float(results[1]), float(results[2])) | Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume | codesearchnet |
def tf_output(c_op, index):
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret | Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output | github-repos |
def _dict_func(self, func, axis, *args, **kwargs):
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
return full_result | Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler. | juraj-google-style |
def _reset(self, indices):
self.assert_common_preconditions()
return np.stack([self._envs[index].reset() for index in indices]) | Resets environments at indices shouldn't pre-process or record.
Subclasses should override this to do the actual reset if something other
than the default implementation is desired.
Args:
indices: list of indices of underlying envs to call reset on.
Returns:
np.ndarray of stacked observations from the reset-ed envs. | codesearchnet |
def CaseGroups(unicode_dir=_UNICODE_DIR):
togroup = {}
def DoLine(codes, fields):
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups | Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups | juraj-google-style |
def _get_response(self, endpoint, request_dict):
http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter was input incorrectly, or the API is currently down. Please try again.'
json_error = 'Could not retrieve JSON values. Try again with a shorter date range.'
try:
qsp = urllib.parse.urlencode(request_dict, doseq=True)
resp = urllib.request.urlopen((((self.base_url + endpoint) + '?') + qsp)).read()
except (AttributeError or NameError):
try:
qsp = urllib.urlencode(request_dict, doseq=True)
resp = urllib2.urlopen((((self.base_url + endpoint) + '?') + qsp)).read()
except urllib2.URLError:
raise MesoPyError(http_error)
except urllib.error.URLError:
raise MesoPyError(http_error)
try:
json_data = json.loads(resp.decode('utf-8'))
except ValueError:
raise MesoPyError(json_error)
return self._checkresponse(json_data) | Returns a dictionary of data requested by each function.
Arguments:
----------
endpoint: string, mandatory
Set in all other methods, this is the API endpoint specific to each function.
request_dict: string, mandatory
A dictionary of parameters that are formatted into the API call.
Returns:
--------
response: A dictionary that has been dumped from JSON.
Raises:
-------
MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages.
Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too
long and redirect_error is shown if the url is formatted incorrectly. | codesearchnet |
def from_proto(cls, struct_def_proto: message.Message, backbone_element_path: Optional[str]=None, element_type: Optional[str]=None, parent_definitions: Optional[ChildDefinitions]=None) -> 'StructureDataType':
struct_def = cast(Any, struct_def_proto)
raw_url = struct_def.url.value
base_type = struct_def.type.value
element_type = element_type if element_type else base_type
qualified_path = f'{element_type}.{backbone_element_path}' if backbone_element_path else element_type
child_defs = ChildDefinitions()
slices: dict[str, _SliceBuilder] = collections.defaultdict(lambda: _SliceBuilder(None, None, []))
root_element_definition = None
for elem in struct_def.snapshot.element:
if elem.base.path.value == 'Extension.url':
continue
path = _get_analytic_path(elem.path.value, elem.id.value)
if path == qualified_path:
if elem.slice_name.value:
slice_def = slices[f':{elem.slice_name.value}']
slice_def.slice_def = elem
slice_def.relative_path = ''
else:
root_element_definition = elem
continue
if re.search(f'^{qualified_path}\\.\\w+', path):
relative_path = path[len(qualified_path) + 1:]
closest_slice_ancestor = re.search(f'^{qualified_path}[\\.]?(.*(?<!.extension):[\\w-]+)(?:$|\\.)', elem.id.value)
if closest_slice_ancestor is None:
child_defs.add_definition(relative_path, elem)
else:
slice_def = slices[closest_slice_ancestor[1]]
if elem.slice_name.value:
slice_def.slice_def = elem
slice_def.relative_path = relative_path
else:
slice_def.slice_rules.append((relative_path, elem))
if parent_definitions is not None:
child_defs.update(parent_definitions)
if not root_element_definition:
raise ValueError(f'StructureDataType {raw_url} searching on {qualified_path} missing root element definition. {struct_def}')
return cls(structure_definition=struct_def, backbone_element_path=backbone_element_path, base_type=base_type, element_type=element_type, _child_defs=child_defs, _slices=tuple((slice_def.to_slice() for slice_def in slices.values())), _raw_url=raw_url, root_element_definition=root_element_definition, cardinality=Cardinality.SCALAR) | Creates a StructureDataType from a proto.
Args:
struct_def_proto: Proto containing information about the structure
definition.
backbone_element_path: Optional path to the structure def.
element_type: Potential alternative type name for the type.
parent_definitions: Element definitions defined by parent structure
definitions which should override definitions in `struct_def_proto`. If
structure definitions supply element definitions at nested paths, e.g.
Foo.bar.baz.quux, those element definitions need to be passed via the
`parent_definitions` argument to ensure element definitions will be
chosen from the parent rather than `struct_def_proto`. e.g. if
`struct_def_proto` defines 'Baz.quux,' the parent's 'Foo.bar.baz.quux'
definition must be provided here in order to be chosen over the
`struct_def_proto` definition.
Returns:
A StructureDataType. | github-repos |
class Idefics2Encoder(nn.Module):
def __init__(self, config: Idefics2Config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics2EncoderLayer`].
Args:
config: Idefics2Config | github-repos |
def merge_tags(left, right, factory=Tags):
if isinstance(left, Mapping):
tags = dict(left)
elif hasattr(left, 'tags'):
tags = _tags_to_dict(left.tags)
else:
tags = _tags_to_dict(left)
if isinstance(right, Mapping):
tags.update(right)
elif hasattr(left, 'tags'):
tags.update(_tags_to_dict(right.tags))
else:
tags.update(_tags_to_dict(right))
return factory(**tags) | Merge two sets of tags into a new troposphere object
Args:
left (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with lower priority
right (Union[dict, troposphere.Tags]): dictionary or Tags object to be
merged with higher priority
factory (type): Type of object to create. Defaults to the troposphere
Tags class. | juraj-google-style |
def _get_paddings_constant(paddings):
if isinstance(paddings, tensor_lib.Tensor):
return tensor_util.constant_value(paddings, partial=True)
elif isinstance(paddings, (list, tuple)):
return [_get_paddings_constant(x) for x in paddings]
else:
return paddings | Helper to get the constant values of the paddings arg to pad().
Used under V1 graph mode to facilitate computation of the shape of the output
tensor of `pad()`.
Args:
paddings: The same paddings arg as passed to pad(). Can be a Tensor, or
a nested list or tuple of Tensor and/or numbers.
Returns:
A nested list or numbers or `None`, in which `None` indicates unknown
padding size. | github-repos |
def remove_collisions(self, min_dist=0.5):
s_f_coords = self.structure.frac_coords
f_coords = self.extrema_coords
if (len(f_coords) == 0):
if (self.extrema_type is None):
logger.warning('Please run ChargeDensityAnalyzer.get_local_extrema first!')
return
new_f_coords = []
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
dist_matrix = self.structure.lattice.get_all_distances(f_coords, s_f_coords)
all_dist = np.min(dist_matrix, axis=1)
new_f_coords = []
for (i, f) in enumerate(f_coords):
if (all_dist[i] > min_dist):
new_f_coords.append(f)
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords | Remove predicted sites that are too close to existing atoms in the
structure.
Args:
min_dist (float): The minimum distance (in Angstrom) that
a predicted site needs to be from existing atoms. A min_dist
with value <= 0 returns all sites without distance checking. | codesearchnet |
def generate_code(meta, prefix=None, node=False, min=False):
if isinstance(meta, dict):
url_prefix, auth_header, resources = parse_meta(meta)
else:
url_prefix, auth_header, resources = meta
if prefix is not None:
url_prefix = prefix
core = render_core(url_prefix, auth_header, resources)
if min:
filename = 'res.web.min.js'
else:
filename = 'res.web.js'
if node:
filename = 'res.node.js'
base = read_file(filename)
return base.replace('" | Generate res.js
Args:
meta: tuple(url_prefix, auth_header, resources) or metadata of API
Returns:
res.js source code | juraj-google-style |
def create_schema(self, model, waiting_models):
bucket_name = model._get_bucket_name()
index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
ins = model(fake_context)
fields = self.get_schema_fields(ins._collect_index_fields())
new_schema = self.compile_schema(fields)
schema = get_schema_from_solr(index_name)
if not (schema == new_schema):
try:
client.create_search_schema(index_name, new_schema)
print("+ %s (%s) search schema is created." % (model.__name__, index_name))
except:
print("+ %s (%s) search schema checking operation is taken to queue." % (
model.__name__, index_name))
waiting_models.append(model) | Creates search schemas.
Args:
model: model to execute
waiting_models: if riak can't return response immediately, model is taken to queue.
After first execution session, method is executed with waiting models and controlled.
And be ensured that all given models are executed properly.
Returns: | juraj-google-style |
def _as_document(self, partition):
schema = ' '.join((u'{} {} {} {} {}'.format(c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns))
values = ''
for stat in partition.stats:
if stat.uvalues:
values += (' '.join((e[:200] for e in stat.uvalues)) + '\n')
def resum(g):
try:
return str(GVid.parse(g).summarize())
except KeyError:
return g
except ValueError:
logger.debug("Failed to parse gvid '{}' from partition '{}' grain coverage".format(g, partition.identity.vname))
return g
keywords = ((((' '.join(partition.space_coverage) + ' ') + ' '.join([resum(g) for g in partition.grain_coverage if resum(g)])) + ' ') + ' '.join((str(x) for x in partition.time_coverage)))
doc_field = u('{} {} {} {} {} {}').format(values, schema, ' '.join([u('{}').format(partition.identity.vid), u('{}').format(partition.identity.id_), u('{}').format(partition.identity.name), u('{}').format(partition.identity.vname)]), partition.display.title, partition.display.description, partition.display.sub_description, partition.display.time_description, partition.display.geo_description)
document = dict(vid=u('{}').format(partition.identity.vid), dataset_vid=u('{}').format(partition.identity.as_dataset().vid), title=u('{}').format(partition.table.description), keywords=u('{}').format(keywords), doc=doc_field)
return document | Converts given partition to the document indexed by FTS backend.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema. | codesearchnet |
def _get_bases(type_):
try:
class _(type_):
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__):
return BaseClass, MetaClass | Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass. | juraj-google-style |
def write_signatures(self, signatures):
self.fileobj.seek(self.signature_offset)
sig_entries = [dict(algorithm_id=id_,
size=len(sig),
signature=sig)
for (id_, sig) in signatures]
sigs = sigs_header.build(dict(
filesize=self.filesize,
count=len(signatures),
sigs=sig_entries,
))
self.fileobj.write(sigs)
signatures_len = len(sigs)
self.additional_offset = self.signature_offset + signatures_len
if not self.additional_offset == self.fileobj.tell():
raise IOError('ended up at unexpected offset') | Write signature data to the MAR file.
Args:
signatures (list): list of signature tuples of the form
(algorithm_id, signature_data) | juraj-google-style |
def __init__(self, name, context=None):
super(Job, self).__init__(name)
if context is None:
context = datalab.Context.default()
self._context = context
self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)
if not name.startswith('projects/'):
name = 'projects/' + self._context.project_id + '/jobs/' + name
self._name = name
self._refresh_state() | Initializes an instance of a CloudML Job.
Args:
name: the name of the job. It can be an operation full name
("projects/[project_id]/jobs/[operation_name]") or just [operation_name].
context: an optional Context object providing project_id and credentials. | juraj-google-style |
def parse(self, template):
self._compile_delimiters()
start_index = 0
content_end_index, parsed_section, section_key = None, None, None
parsed_template = ParsedTemplate()
states = []
while True:
match = self._template_re.search(template, start_index)
if match is None:
break
match_index = match.start()
end_index = match.end()
matches = match.groupdict()
if matches['change'] is not None:
matches.update(tag='=', tag_key=matches['delims'])
elif matches['raw'] is not None:
matches.update(tag='&', tag_key=matches['raw_name'])
tag_type = matches['tag']
tag_key = matches['tag_key']
leading_whitespace = matches['whitespace']
did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS
did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS
is_tag_interpolating = tag_type in ['', '&']
if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating:
if end_index < len(template):
end_index += template[end_index] == '\r' and 1 or 0
if end_index < len(template):
end_index += template[end_index] == '\n' and 1 or 0
elif leading_whitespace:
match_index += len(leading_whitespace)
leading_whitespace = ''
if start_index != match_index:
parsed_template.add(template[start_index:match_index])
start_index = end_index
if tag_type in ('
state = (tag_type, end_index, section_key, parsed_template)
states.append(state)
section_key, parsed_template = tag_key, ParsedTemplate()
continue
if tag_type == '/':
if tag_key != section_key:
raise ParsingError("Section end tag mismatch: %s != %s" % (tag_key, section_key))
parsed_section = parsed_template
(tag_type, section_start_index, section_key, parsed_template) = states.pop()
node = self._make_section_node(template, tag_type, tag_key, parsed_section,
section_start_index, match_index)
else:
node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace)
parsed_template.add(node)
if start_index != len(template):
parsed_template.add(template[start_index:])
return parsed_template | Parse a template string starting at some index.
This method uses the current tag delimiter.
Arguments:
template: a unicode string that is the template to parse.
index: the index at which to start parsing.
Returns:
a ParsedTemplate instance. | juraj-google-style |
def write_file_to_zip_with_neutral_metadata(zfile, filename, content):
info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))
info.compress_type = zipfile.ZIP_DEFLATED
info.comment = "".encode()
info.create_system = 0
zfile.writestr(info, content) | Write the string `content` to `filename` in the open ZipFile `zfile`.
Args:
zfile (ZipFile): open ZipFile to write the content into
filename (str): the file path within the zip file to write into
content (str): the content to write into the zip
Returns: None | juraj-google-style |
def leave_swarm(self, force=False):
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True | Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def _retrieve_info(self, request):
info = _metadata.get_service_account_info(
request,
service_account=self._service_account_email)
self._service_account_email = info['email']
self._scopes = info['scopes'] | Retrieve information about the service account.
Updates the scopes and retrieves the full service account email.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests. | juraj-google-style |
def locked_put(self, credentials):
entity, _ = self.model_class.objects.get_or_create(
**{self.key_name: self.key_value})
setattr(entity, self.property_name, credentials)
entity.save() | Write a Credentials to the Django datastore.
Args:
credentials: Credentials, the credentials to store. | juraj-google-style |
def __init__(self, *, separator_stride_cls: Type[message.Message], code_cls: Type[message.Message], default_timezone: str) -> None:
self.separator_stride_cls = separator_stride_cls
self.code_cls = code_cls
self.default_timezone = default_timezone | Creates a new instance of primitive_wrappers.Context.
Args:
separator_stride_cls: The Base64BinarySeparatorStride type to use when
parsing/printing Base64Binary FHIR primitives.
code_cls: The Code type to use when parsing/printing profiled-Code
primitives.
default_timezone: The default timezone to use for date/time-like primitive
parsing/printing. | github-repos |
def profile_settings_args(self, ij, required):
if self.args.permutation_id is not None:
if 'sqlite3' not in sys.modules:
print('The sqlite3 module needs to be build-in to Python for this feature.')
sys.exit(1)
profile_args = self.profile_settings_args_layout_json(required)
else:
profile_args = self.profile_settings_args_install_json(ij, required)
return profile_args | Return args based on install.json or layout.json params.
Args:
ij (dict): The install.json contents.
required (bool): If True only required args will be returned.
Returns:
dict: Dictionary of required or optional App args. | juraj-google-style |
def validate_user_name(self, user_name, timeout=-1):
uri = self.URI + '/validateLoginName/' + user_name
return self._client.create_with_zero_body(uri=uri, timeout=timeout) | Verifies if a userName is already in use.
Args:
user_name:
The userName to be verified.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns: True if user name is in use, False if it is not. | juraj-google-style |
class _BigTableWriteFn(beam.DoFn):
def __init__(self, project_id, instance_id, table_id, flush_count, max_row_bytes):
super().__init__()
self.beam_options = {'project_id': project_id, 'instance_id': instance_id, 'table_id': table_id, 'flush_count': flush_count, 'max_row_bytes': max_row_bytes}
self.table = None
self.batcher = None
self.service_call_metric = None
self.written = Metrics.counter(self.__class__, 'Written Row')
def __getstate__(self):
return self.beam_options
def __setstate__(self, options):
self.beam_options = options
self.table = None
self.batcher = None
self.service_call_metric = None
self.written = Metrics.counter(self.__class__, 'Written Row')
def write_mutate_metrics(self, status_list):
for status in status_list:
code = status.code if status else None
grpc_status_string = ServiceCallMetric.bigtable_error_code_to_grpc_status_string(code)
self.service_call_metric.call(grpc_status_string)
def start_service_call_metrics(self, project_id, instance_id, table_id):
resource = resource_identifiers.BigtableTable(project_id, instance_id, table_id)
labels = {monitoring_infos.SERVICE_LABEL: 'BigTable', monitoring_infos.METHOD_LABEL: 'google.bigtable.v2.MutateRows', monitoring_infos.RESOURCE_LABEL: resource, monitoring_infos.BIGTABLE_PROJECT_ID_LABEL: self.beam_options['project_id'], monitoring_infos.INSTANCE_ID_LABEL: self.beam_options['instance_id'], monitoring_infos.TABLE_ID_LABEL: self.beam_options['table_id']}
return ServiceCallMetric(request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN, base_labels=labels)
def start_bundle(self):
if self.table is None:
client = Client(project=self.beam_options['project_id'])
instance = client.instance(self.beam_options['instance_id'])
self.table = instance.table(self.beam_options['table_id'])
self.service_call_metric = self.start_service_call_metrics(self.beam_options['project_id'], self.beam_options['instance_id'], self.beam_options['table_id'])
self.batcher = MutationsBatcher(self.table, batch_completed_callback=self.write_mutate_metrics, flush_count=self.beam_options['flush_count'], max_row_bytes=self.beam_options['max_row_bytes'])
def process(self, row):
self.written.inc()
self.batcher.mutate(row)
def finish_bundle(self):
if self.batcher:
self.batcher.close()
self.batcher = None
Lineage.sinks().add('bigtable', self.beam_options['project_id'], self.beam_options['instance_id'], self.beam_options['table_id'])
def display_data(self):
return {'projectId': DisplayDataItem(self.beam_options['project_id'], label='Bigtable Project Id'), 'instanceId': DisplayDataItem(self.beam_options['instance_id'], label='Bigtable Instance Id'), 'tableId': DisplayDataItem(self.beam_options['table_id'], label='Bigtable Table Id')} | Creates the connector can call and add_row to the batcher using each
row in beam pipe line
Args:
project_id(str): GCP Project ID
instance_id(str): GCP Instance ID
table_id(str): GCP Table ID
flush_count(int): Max number of rows to flush
max_row_bytes(int) Max number of row mutations size to flush | github-repos |
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) | Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
configuration.
Returns:
[`CLIPConfig`]: An instance of a configuration object | github-repos |
def filter_benchmarks(benchmarks, bench_funcs, base_ver):
for bm in list(benchmarks):
func = bench_funcs[bm]
if getattr(func, '_python2_only', False) and (3, 0) <= base_ver:
benchmarks.discard(bm)
logging.info("Skipping Python2-only benchmark %s; "
"not compatible with Python %s" % (bm, base_ver))
continue
return benchmarks | Filters out benchmarks not supported by both Pythons.
Args:
benchmarks: a set() of benchmark names
bench_funcs: dict mapping benchmark names to functions
python: the interpereter commands (as lists)
Returns:
The filtered set of benchmark names | juraj-google-style |
def resize_to(self, width, height):
self.driver.resize_window_to(self.handle, width, height) | Resizes the window to the given dimensions.
If this method was called for a window that is not current, then after calling this method
the current window should remain the same as it was before calling this method.
Args:
width (int): The new window width in pixels.
height (int): The new window height in pixels. | codesearchnet |
def VerifyMessageSignature(self, unused_response_comms, packed_message_list, cipher, cipher_verified, api_version, remote_public_key):
_ = api_version
result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
if (cipher_verified or cipher.VerifyCipherSignature(remote_public_key)):
stats_collector_instance.Get().IncrementCounter('grr_authenticated_messages')
result = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
if (packed_message_list.timestamp != self.timestamp):
result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
if (not cipher.cipher_metadata):
cipher.cipher_metadata = rdf_flows.CipherMetadata(source=packed_message_list.source)
return result | Verify the message list signature.
This is the way the messages are verified in the client.
In the client we also check that the nonce returned by the server is correct
(the timestamp doubles as a nonce). If the nonce fails we deem the response
unauthenticated since it might have resulted from a replay attack.
Args:
packed_message_list: The PackedMessageList rdfvalue from the server.
cipher: The cipher belonging to the remote end.
cipher_verified: If True, the cipher's signature is not verified again.
api_version: The api version we should use.
remote_public_key: The public key of the source.
Returns:
An rdf_flows.GrrMessage.AuthorizationState.
Raises:
DecryptionError: if the message is corrupt. | codesearchnet |
def run(func, options, args=(), kwargs={}, host='localhost', port=8000):
run_stats = run_profilers((func, args, kwargs), options)
result = None
for prof in run_stats:
if not result:
result = run_stats[prof]['result']
del run_stats[prof]['result']
post_data = gzip.compress(
json.dumps(run_stats).encode('utf-8'))
urllib.request.urlopen('http:
return result | Runs profilers on a function.
Args:
func: A Python function.
options: A string with profilers configuration (i.e. 'cmh').
args: func non-keyword arguments.
kwargs: func keyword arguments.
host: Host name to send collected data.
port: Port number to send collected data.
Returns:
A result of func execution. | juraj-google-style |
def calculate_entropy(self, entropy_string):
total = 0
for char in entropy_string:
if char.isalpha():
prob = self.frequency[char.lower()]
total += ((- math.log(prob)) / math.log(2))
logging.debug('Entropy score: {0}'.format(total))
return total | Calculates the entropy of a string based on known frequency of
English letters.
Args:
entropy_string: A str representing the string to calculate.
Returns:
A negative float with the total entropy of the string (higher
is better). | codesearchnet |
def _get_group_object(name):
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', (('WinNT: | A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object | codesearchnet |
def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
self._CopyFromDateTimeValues(date_time_values) | Copies time elements from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC. | juraj-google-style |
def chrome_decrypt(encrypted_value: bytes, key: bytes, init_vector: bytes) \
-> str:
encrypted_value = encrypted_value[3:]
cipher = AES.new(key, AES.MODE_CBC, IV=init_vector)
decrypted = cipher.decrypt(encrypted_value)
return clean(decrypted) | Decrypt Chrome/Chromium's encrypted cookies.
Args:
encrypted_value: Encrypted cookie from Chrome/Chromium's cookie file
key: Key to decrypt encrypted_value
init_vector: Initialization vector for decrypting encrypted_value
Returns:
Decrypted value of encrypted_value | juraj-google-style |
def Trim(self, flags):
logger.info("Trimming!")
flags = bytearray(flags)
length = 1 << self.Depth - 1
while len(flags) < length:
flags.append(0)
MerkleTree._TrimNode(self.Root, 0, self.Depth, flags) | Trim the nodes from the tree keeping only the root hash.
Args:
flags: "0000" for trimming, any other value for keeping the nodes. | juraj-google-style |
def _format_output(content, typ):
if ('csv' in str(typ)):
return _format_csv(content, delimiter=',')
if ('tsv' in str(typ)):
return _format_csv(content, delimiter='\t')
return content | Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format. | codesearchnet |
def _Initialize(self, http, url):
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url | Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self. | juraj-google-style |
def download_image(self, device_label, image_id, file_name):
response = None
try:
response = requests.get(urls.download_image(self._giid, device_label, image_id), headers={'Cookie': 'vid={}'.format(self._vid)}, stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk) | Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file | codesearchnet |
def register_lookup_handler(lookup_type, handler_or_path):
handler = handler_or_path
if isinstance(handler_or_path, basestring):
handler = load_object_from_string(handler_or_path)
LOOKUP_HANDLERS[lookup_type] = handler
if (type(handler) != type):
logger = logging.getLogger(__name__)
logger.warning(('Registering lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type))
warnings.warn(('Lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type), DeprecationWarning, stacklevel=2) | Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under
handler_or_path (OneOf[func, str]): a function or a path to a handler | codesearchnet |
def _DetermineOperatingSystem(self, searcher):
find_specs = [file_system_searcher.FindSpec(location='/etc', case_sensitive=False), file_system_searcher.FindSpec(location='/System/Library', case_sensitive=False), file_system_searcher.FindSpec(location='/Windows/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WINNT/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WINNT35/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WTSRV/System32', case_sensitive=False)]
locations = []
for path_spec in searcher.Find(find_specs=find_specs):
relative_path = searcher.GetRelativePath(path_spec)
if relative_path:
locations.append(relative_path.lower())
windows_locations = set(['/windows/system32', '\\windows\\system32', '/winnt/system32', '\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32', '\\wtsrv\\system32', '/wtsrv/system32'])
operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN
if windows_locations.intersection(set(locations)):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT
elif ('/system/library' in locations):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS
elif ('/etc' in locations):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX
return operating_system | Tries to determine the underlying operating system.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
Returns:
str: operating system for example "Windows". This should be one of
the values in definitions.OPERATING_SYSTEM_FAMILIES. | codesearchnet |
def inference(self, observed_arr):
for i in range(len(self.__deconvolution_layer_list)):
try:
observed_arr = self.__deconvolution_layer_list[i].forward_propagate(observed_arr)
except:
self.__logger.debug("Error raised in Deconvolution layer " + str(i + 1))
raise
return observed_arr | Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced. | juraj-google-style |
def load_steps(working_dir=None, steps_dir=None, step_file=None, step_list=None):
if (steps_dir is not None):
step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))
elif (step_file is not None):
step_files = [step_file]
elif (step_list is not None):
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, '*.cwl'))
else:
step_files.append(path)
else:
step_files = []
if (working_dir is not None):
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if (working_dir is not None):
if ((not (working_dir == os.path.dirname(f))) and (not is_url(f))):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException, PackedWorkflowException) as e:
logger.warning(e)
return steps | Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries. | codesearchnet |
def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):
try:
audio = EasyMP3(filename)
audio.tags = None
audio['artist'] = artist
audio['title'] = title
if year:
audio['date'] = str(year)
if album:
audio['album'] = album
if track_number:
audio['tracknumber'] = track_number
if genre:
audio['genre'] = genre
if url:
audio['website'] = url
audio.save()
if artwork_url:
artwork_url = artwork_url.replace('https', 'http')
mime = 'image/jpeg'
if ('.jpg' in artwork_url):
mime = 'image/jpeg'
if ('.png' in artwork_url):
mime = 'image/png'
if ('-large' in artwork_url):
new_artwork_url = artwork_url.replace('-large', '-t500x500')
try:
image_data = requests.get(new_artwork_url).content
except Exception as e:
image_data = requests.get(artwork_url).content
else:
image_data = requests.get(artwork_url).content
audio = MP3(filename, ID3=OldID3)
audio.tags.add(APIC(encoding=3, mime=mime, type=3, desc='Cover', data=image_data))
audio.save()
if url:
audio = MP3(filename, ID3=OldID3)
audio.tags.add(WXXX(encoding=3, url=url))
audio.save()
return True
except Exception as e:
puts((colored.red('Problem tagging file: ') + colored.white('Is this file a WAV?')))
return False | Attempt to put ID3 tags on a file.
Args:
artist (str):
title (str):
year (int):
genre (str):
artwork_url (str):
album (str):
track_number (str):
filename (str):
url (str): | codesearchnet |
def remove_acl(path):
if (platform.system() == constants.PLATFORM_DARWIN and
os.path.isfile('/bin/chmod')):
subprocess.call(['/bin/chmod', '-R', '-N', path])
elif ((platform.system() == constants.PLATFORM_LINUX) and
os.path.isfile('/bin/setfacl')):
subprocess.call(['/bin/setfacl', '-R', '-b', path]) | Remove the ACL of the file or folder located on the given path.
Also remove the ACL of any file and folder below the given one,
recursively.
Args:
path (str): Path to the file or folder to remove the ACL for,
recursively. | juraj-google-style |
def __eq__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
) | Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False. | juraj-google-style |
def delete(self, name, **kwargs):
self.gitlab.http_delete(self.path, query_data={'name': name}, **kwargs) | Delete a Label on the server.
Args:
name: The name of the label
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request | juraj-google-style |
def AddEventTags(self, event_tags):
self._RaiseIfNotWritable()
for event_tag in event_tags:
self.AddEventTag(event_tag) | Adds event tags.
Args:
event_tags (list[EventTag]): event tags.
Raises:
IOError: when the storage file is closed or read-only or
if the event tags cannot be serialized.
OSError: when the storage file is closed or read-only or
if the event tags cannot be serialized. | juraj-google-style |
def _LastEntryTimestamp(dct, upper_bound_timestamp):
if (upper_bound_timestamp is None):
upper_bound = (lambda _: True)
else:
upper_bound = (lambda key: (key <= upper_bound_timestamp))
try:
return max(filter(upper_bound, iterkeys(dct)))
except ValueError:
return None | Searches for greatest timestamp lower than the specified one.
Args:
dct: A dictionary from timestamps to some items.
upper_bound_timestamp: An upper bound for timestamp to be returned.
Returns:
Greatest timestamp that is lower than the specified one. If no such value
exists, `None` is returned. | codesearchnet |
def df_first_row_to_dict(df):
if (df is not None):
return [dict(r) for (i, r) in df.head(1).iterrows()][0] | First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None` | codesearchnet |
def GetCredential(self, path_spec, identifier):
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None) | Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set. | juraj-google-style |
def resize(self, height, width):
return self.client.api.resize(self.id, height, width) | Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def pretokenized_tfrecord_dataset(filenames, text2self, eos_included, repeat, batch_size, sequence_length):
dataset = tf.data.TFRecordDataset(filenames, buffer_size=((64 * 1024) * 1024))
if repeat:
dataset = dataset.repeat()
keys = (['targets'] if text2self else ['inputs', 'targets'])
def decode_example(serialized_example):
'Return a dict of Tensors from a serialized tensorflow.Example.'
data_fields = {}
data_items_to_decoders = {}
for k in keys:
data_fields[k] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
if (not eos_included):
decoded = [tf.concat([v, [1]], 0) for v in decoded]
return dict(zip(decode_items, decoded))
dataset = dataset.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_and_batch(dataset, batch_size, sequence_length) | Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches | codesearchnet |
def merge(self, options):
if (not options):
return _CallSettings(timeout=self.timeout, retry=self.retry, page_descriptor=self.page_descriptor, page_token=self.page_token, bundler=self.bundler, bundle_descriptor=self.bundle_descriptor, kwargs=self.kwargs)
else:
if (options.timeout == OPTION_INHERIT):
timeout = self.timeout
else:
timeout = options.timeout
if (options.retry == OPTION_INHERIT):
retry = self.retry
else:
retry = options.retry
if (options.page_token == OPTION_INHERIT):
page_token = self.page_token
else:
page_token = options.page_token
if options.is_bundling:
bundler = self.bundler
else:
bundler = None
if (options.kwargs == OPTION_INHERIT):
kwargs = self.kwargs
else:
kwargs = self.kwargs.copy()
kwargs.update(options.kwargs)
return _CallSettings(timeout=timeout, retry=retry, page_descriptor=self.page_descriptor, page_token=page_token, bundler=bundler, bundle_descriptor=self.bundle_descriptor, kwargs=kwargs) | Returns new _CallSettings merged from this and a CallOptions object.
Note that passing if the CallOptions instance specifies a page_token,
the merged _CallSettings will have ``flatten_pages`` disabled. This
permits toggling per-resource/per-page page streaming.
Args:
options (CallOptions): an instance whose values override
those in this object. If None, ``merge`` returns a copy of this
object
Returns:
CallSettings: The merged settings and options. | codesearchnet |
def ip_geoloc(ip, hit_api=True):
from ..logs.models import IPInfoCheck
try:
obj = IPInfoCheck.objects.get(ip_address=ip).ip_info
except IPInfoCheck.DoesNotExist:
if hit_api:
try:
obj = IPInfoCheck.check_ip(ip)
except RateExceededError:
return None
else:
return None
return (obj.latitude, obj.longitude) | Get IP geolocation.
Args:
ip (str): IP address to use if no data provided.
hit_api (bool): whether to hit api if info not found.
Returns:
str: latitude and longitude, comma-separated. | codesearchnet |
def _archive_elements(self):
try:
stackfile_key, propertyfile_key = self._craft_s3_keys()
template_file = self._config.get('environment', {}).get('template', None)
bucket = self._config.get('environment', {}).get('bucket', None)
if not os.path.isfile(template_file):
logging.info("{} is not actually a file".format(template_file))
return False
logging.info('Copying parameters to s3:
temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])
with open(temp_file_name, 'w') as dump_file:
json.dump(self._parameters, dump_file, indent=4)
self._s3.upload_file(temp_file_name, bucket, propertyfile_key)
logging.info('Copying {} to s3:
self._s3.upload_file(template_file, bucket, stackfile_key)
self._templateUrl = 'https:
logging.info("template_url: " + self._templateUrl)
return True
except Exception as x:
logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))
traceback.print_exc(file=sys.stdout)
return False | Cloud Formation likes to take the template from S3 so here we put the
template into S3. We also store the parameters file that was used in
this run. Note: you can pass anything as the version string but you
should at least consider a version control tag or git commit hash as
the version.
Args:
None
Returns:
True if the stuff lands in S3 or False if the file doesn't
really exist or the upload goes sideways. | juraj-google-style |
def jsonify(data, pretty=False, **kwargs):
isod = isinstance(data, OrderedDict)
params = {'for_json': True, 'default': _complex_encode}
if pretty:
params['indent'] = 2
params['sort_keys'] = (False if isod else True)
params.update(kwargs)
try:
return json.dumps(data, ensure_ascii=False, **params)
except UnicodeDecodeError:
return json.dumps(data, **params) | Serialize Python objects to JSON with optional 'pretty' formatting
Raises:
TypeError: from :mod:`json` lib
ValueError: from :mod:`json` lib
JSONDecodeError: from :mod:`json` lib | codesearchnet |
def bearing(self, format='numeric'):
bearings = []
for segment in self:
if len(segment) < 2:
bearings.append([])
else:
bearings.append(segment.bearing(format))
return bearings | Calculate bearing between locations in segments.
Args:
format (str): Format of the bearing string to return
Returns:
list of list of float: Groups of bearings between points in
segments | juraj-google-style |
def get_all_anchors(stride=None, sizes=None):
if stride is None:
stride = cfg.RPN.ANCHOR_STRIDE
if sizes is None:
sizes = cfg.RPN.ANCHOR_SIZES
cell_anchors = generate_anchors(
stride,
scales=np.array(sizes, dtype=np.float) / stride,
ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float))
max_size = cfg.PREPROC.MAX_SIZE
field_size = int(np.ceil(max_size / stride))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.flatten()
shift_y = shift_y.flatten()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
K = shifts.shape[0]
A = cell_anchors.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))
field_of_anchors = field_of_anchors.astype('float32')
field_of_anchors[:, :, :, [2, 3]] += 1
return field_of_anchors | Get all anchors in the largest possible image, shifted, floatbox
Args:
stride (int): the stride of anchors.
sizes (tuple[int]): the sizes (sqrt area) of anchors
Returns:
anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox
The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE. | juraj-google-style |
def __init__(self, module_name, text):
super(Report, self).__init__()
self.module_name = module_name
self.text = text | Initializes the analysis report.
Args:
module_name (str): name of the analysis plugin that generated
the report.
text (str): report text. | juraj-google-style |
def parse_args(bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]:
commas = char_locs['commas']
for span in parsed:
if ((parsed[span]['type'] != 'Function') or ('parens_span' not in parsed[span])):
continue
(sp, ep) = parsed[span]['parens_span']
if (ep == (- 1)):
args_end = (len(bels) - 1)
else:
args_end = (ep - 1)
args = []
arg_start = (sp + 1)
each_arg_end_list = sorted(([(end - 1) for end in commas.get(sp, [])] + [args_end]))
for arg_end in each_arg_end_list:
while ((arg_start < args_end) and (bels[arg_start] == ' ')):
arg_start += 1
trimmed_arg_end = arg_end
while ((trimmed_arg_end > arg_start) and (bels[trimmed_arg_end] == ' ')):
trimmed_arg_end -= 1
if (trimmed_arg_end < arg_start):
trimmed_arg_end = arg_start
arg = ''.join(bels[arg_start:(trimmed_arg_end + 1)])
args.append({'arg': arg, 'span': (arg_start, trimmed_arg_end)})
arg_start = (arg_end + 2)
parsed[span]['args'] = args
return (parsed, errors) | Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages | codesearchnet |
def _process_active_view_and_verification(self, placement, feed_item):
if FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION in feed_item:
if feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None) == 'ON':
placement['vpaidAdapterChoice'] = 'HTML5'
placement['videoActiveViewOptOut'] = False
elif feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None) == 'OFF':
placement['vpaidAdapterChoice'] = 'DEFAULT'
placement['videoActiveViewOptOut'] = True
elif feed_item[FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION] == 'LET_DCM_DECIDE' or feed_item[FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION] == '':
placement['vpaidAdapterChoice'] = 'DEFAULT'
placement['videoActiveViewOptOut'] = False
else:
raise Exception('%s is not a valid value for the placement Active View and Verification field' % feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None)) | Updates / creates active view and verification settings.
This method updates the CM item by setting or creating active view and
verification settings based on the Bulkdozer feed configurations.
Args:
placement: The CM placement object to be updated.
feed_item: The Bulkdozer feed item with the configurations.
Raises:
Exception: In case the values for active view and verification enumeration
is invalid. | github-repos |
def noise_get(
n: tcod.noise.Noise, f: Sequence[float], typ: int = NOISE_DEFAULT
) -> float:
return float(lib.TCOD_noise_get_ex(n.noise_c, ffi.new("float[4]", f), typ)) | Return the noise value sampled from the ``f`` coordinate.
``f`` should be a tuple or list with a length matching
:any:`Noise.dimensions`.
If ``f`` is shoerter than :any:`Noise.dimensions` the missing coordinates
will be filled with zeros.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
Returns:
float: The sampled noise value. | juraj-google-style |
def __init__(self, query_functions=None):
super(QueryRequestPayload, self).__init__(enums.Tags.REQUEST_PAYLOAD)
self._query_functions = None
self.query_functions = query_functions | Construct a QueryRequestPayload object.
Args:
query_functions (list): A list of QueryFunction enumerations. | juraj-google-style |
def as_operation(self):
result = encoding.CopyProtoMessage(self._op)
names = sorted(self._metric_values_by_name_then_sign.keys())
for name in names:
mvs = self._metric_values_by_name_then_sign[name]
result.metricValueSets.append(sc_messages.MetricValueSet(metricName=name, metricValues=mvs.values()))
return result | Obtains a single `Operation` representing this instances contents.
Returns:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation` | codesearchnet |
def get_vmss_vm(access_token, subscription_id, resource_group, vmss_name, instance_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'/virtualMachines/', str(instance_id),
'?api-version=', COMP_API])
return do_get(endpoint, access_token) | Get individual VMSS VM details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
instance_id (int): VM ID of the scale set VM.
Returns:
HTTP response. JSON body of VMSS VM model view. | juraj-google-style |
def previous_weekday(date):
weekday = date.weekday()
if weekday == 0:
n_days = 3
elif weekday == 6:
n_days = 2
else:
n_days = 1
return date - datetime.timedelta(days=n_days) | Returns the last weekday before date
Args:
date (datetime or datetime.date)
Returns:
(datetime or datetime.date)
Raises:
- | juraj-google-style |
def build_eval_session(module_spec, class_count):
(eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization) = create_module_graph(module_spec)
eval_sess = tf.Session(graph=eval_graph)
with eval_graph.as_default():
(_, _, bottleneck_input, ground_truth_input, final_tensor) = add_final_retrain_ops(class_count, FLAGS.final_tensor_name, bottleneck_tensor, wants_quantization, is_training=False)
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)
(evaluation_step, prediction) = add_evaluation_step(final_tensor, ground_truth_input)
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, evaluation_step, prediction) | Builds an restored eval session without train operations for exporting.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
Returns:
Eval session containing the restored eval graph.
The bottleneck input, ground truth, eval step, and prediction tensors. | codesearchnet |
def add_cookie(self, cookie_dict):
if (not isinstance(cookie_dict, dict)):
raise TypeError('Type of the cookie must be a dict.')
if ((not cookie_dict.get('name', None)) or (not cookie_dict.get('value', None))):
raise KeyError("Missing required keys, 'name' and 'value' must be provided.")
self._execute(Command.ADD_COOKIE, {'cookie': cookie_dict}) | Set a cookie.
Support:
Web(WebView)
Args:
cookie_dict: A dictionary contain keys: "name", "value",
["path"], ["domain"], ["secure"], ["httpOnly"], ["expiry"].
Returns:
WebElement Object. | codesearchnet |
def count(self, value):
if value == self._defaults['count'] and 'count' in self._values:
del self._values['count']
else:
self._values['count'] = value | The count property.
Args:
value (int). the property value. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.