code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def _install_signal_handler(self, signal_number, signal_name):
old_signal_handler = None
def handler(handled_signal_number, frame):
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler) | Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name. | juraj-google-style |
def log_softmax(x, reduced_dim, extra_logit=None, name=None):
return x - reduce_logsumexp(
x, reduced_dim, extra_logit=extra_logit, name=name) | log(softmax(x)).
Args:
x: a Tensor whose shape contains vocab_dim
reduced_dim: a Dimension
extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)
name: an optional string
Returns:
a Tensor with the same shape as x | juraj-google-style |
def set_marked(self, name: str, marked: bool = False,
unmarked: bool = False) -> None:
if marked:
self._marked[name] = True
elif unmarked:
self._marked[name] = False
else:
self._marked.pop(name, None) | Add or remove the ``\\Marked`` and ``\\Unmarked`` mailbox
attributes.
Args:
name: The name of the mailbox.
marked: True if the ``\\Marked`` attribute should be added.
unmarked: True if the ``\\Unmarked`` attribute should be added. | juraj-google-style |
def export_as_package(self, package_path, cv_source):
if os.path.exists(package_path):
raise exceptions.UserError('{} already exists'.format(package_path))
package_name = os.path.basename(os.path.normpath(package_path))
os.makedirs(package_path)
with open(os.path.join(package_path, '__init__.py'), 'wb') as f:
f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))
os.makedirs(os.path.join(package_path, 'baselearners'))
open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()
for (idx, base_learner) in enumerate(self.base_learners):
base_learner.export_as_file(os.path.join(package_path, 'baselearners', ('baselearner' + str(idx))))
self.base_learner_origin.export_as_file(os.path.join(package_path, 'metalearner'), self.secondary_learner_hyperparameters)
with open(os.path.join(package_path, 'cv.py'), 'wb') as f:
f.write(cv_source.encode('utf8'))
ensemble_source = ''
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f:
ensemble_source += f.read()
ensemble_source += '\n\n def {}(self, X):\n return self._process_using_meta_feature_generator(X, "{}")\n\n'.format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator)
with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:
f.write(ensemble_source.encode('utf8'))
builder_source = ''
for (idx, base_learner) in enumerate(self.base_learners):
builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx)
builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name)
builder_source += 'from {} import metalearner\n'.format(package_name)
builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name)
builder_source += '\nbase_learners = [\n'
for (idx, base_learner) in enumerate(self.base_learners):
builder_source += ' baselearner{}.base_learner,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nmeta_feature_generators = [\n'
for (idx, base_learner) in enumerate(self.base_learners):
builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners, meta_feature_generators=meta_feature_generators, secondary_learner=metalearner.base_learner, cv_function=return_splits_iterable)\n'
with open(os.path.join(package_path, 'builder.py'), 'wb') as f:
f.write(builder_source.encode('utf8')) | Exports the ensemble as a Python package and saves it to `package_path`.
Args:
package_path (str, unicode): Absolute/local path of place to save package in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Raises:
exceptions.UserError: If os.path.join(path, name) already exists. | codesearchnet |
def connections(self, origin, destination, dt=datetime.now(), only_direct=False):
query = {'S': origin, 'Z': destination, 'date': dt.strftime('%d.%m.%y'), 'time': dt.strftime('%H:%M'), 'start': 1, 'REQ0JourneyProduct_opt0': (1 if only_direct else 0)}
rsp = requests.get('http:
return parse_connections(rsp.text) | Find connections between two stations
Args:
origin (str): origin station
destination (str): destination station
dt (datetime): date and time for query
only_direct (bool): only direct connections | codesearchnet |
def get_all(self, uids: Iterable[int]) -> Mapping[(int, Record)]:
return {uid: self._records[uid] for uid in uids if (uid in self._records)} | Get records by a set of UIDs.
Args:
uids: The message UIDs. | codesearchnet |
def get_phrases_from_posmap(posmaps, input_ids):
left_idx = 0
right_idx = posmaps.shape[-1] - 1
posmaps = posmaps.clone()
posmaps[:, 0:left_idx + 1] = False
posmaps[:, right_idx:] = False
token_ids = []
for posmap in posmaps:
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids.append([input_ids[i] for i in non_zero_idx])
return token_ids | Get token ids of phrases from posmaps and input_ids.
Args:
posmaps (`torch.BoolTensor` of shape `(num_boxes, hidden_size)`):
A boolean tensor of text-thresholded logits related to the detected bounding boxes.
input_ids (`torch.LongTensor`) of shape `(sequence_length, )`):
A tensor of token ids. | github-repos |
def _call_location():
frame = tf_inspect.currentframe()
assert frame.f_back.f_code.co_name == '_tfmw_add_deprecation_warning', 'This function should be called directly from _tfmw_add_deprecation_warning, as the caller is identified heuristically by chopping off the top stack frames.'
for _ in range(3):
parent = frame.f_back
if parent is None:
break
frame = parent
return '{}:{}'.format(frame.f_code.co_filename, frame.f_lineno) | Extracts the caller filename and line number as a string.
Returns:
A string describing the caller source location. | github-repos |
def batch_dense(inputs, units, activation=None, kernel_initializer=None, reuse=None, name=None):
inputs_shape = shape_list(inputs)
if (len(inputs_shape) != 3):
raise ValueError('inputs must have 3 dimensions')
batch = inputs_shape[0]
input_units = inputs_shape[2]
if ((not isinstance(batch, int)) or (not isinstance(input_units, int))):
raise ValueError('inputs must have static dimensions 0 and 2')
with tf.variable_scope(name, default_name='batch_dense', values=[inputs], reuse=reuse, dtype=inputs.dtype):
if (kernel_initializer is None):
kernel_initializer = tf.random_normal_initializer(stddev=(input_units ** (- 0.5)))
w = tf.get_variable('w', [batch, input_units, units], initializer=kernel_initializer, dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if (activation is not None):
y = activation(y)
return y | Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known. | codesearchnet |
async def loadCoreModule(self, ctor, conf=None):
if (conf is None):
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
(await s_coro.ornot(modu.preCoreModule))
except asyncio.CancelledError:
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
(await s_coro.ornot(modu.initCoreModule))
except asyncio.CancelledError:
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
(await self.fire('core:module:load', module=ctor))
return modu | Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module | codesearchnet |
def update_from_group(self, data=None, timeout=(- 1)):
uri = '{}/updateFromGroup'.format(self.data['uri'])
return self._helper.update(data, uri, timeout=timeout) | Use this action to make a logical enclosure consistent with the enclosure group when the logical enclosure is
in the Inconsistent state.
Args:
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical enclosure. | codesearchnet |
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
return (vocab_file,) | Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
Returns:
`Tuple(str)`: Paths to the files saved. | github-repos |
def _make_shred(self, c, name, feature_extractors, sheet_name):
height, width, channels = self.orig_img.shape
r_x, r_y, r_w, r_h = cv2.boundingRect(c)
epsilon = 0.01 * cv2.arcLength(c, True)
simplified_contour = cv2.approxPolyDP(c, epsilon, True)
if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3:
print("Skipping piece
name, r_w, r_h))
return None
if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100:
print("Skipping piece
name, r_w, r_h))
return None
box_center, bbox, angle = cv2.minAreaRect(c)
if bbox[0] > bbox[1]:
angle += 90
bbox = (bbox[1], bbox[0])
if bbox[1] / float(bbox[0]) > 70:
print("Skipping piece
return None
y1 = math.floor(box_center[1] - bbox[1] / 2)
x1 = math.floor(box_center[0] - bbox[0] / 2)
bbox = tuple(map(int, map(math.ceil, bbox)))
piece_mask = np.zeros([height, width, 1], dtype=np.uint8)
cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED)
img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w]
piece_in_context = self.save_image(
"pieces/%s_ctx" % name,
self.orig_img[max(r_y - 10, 0):r_y + r_h + 10,
max(r_x - 10, 0):r_x + r_w + 10])
mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w]
img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask)
img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA)
img_roi[:, :, 3] = mask[:, :, 0]
M = cv2.getRotationMatrix2D((box_center[0] - r_x,
box_center[1] - r_y), angle, 1)
M[0][2] += r_x - x1
M[1][2] += r_y - y1
img_roi = cv2.warpAffine(img_roi, M, bbox)
piece_fname = self.save_image("pieces/%s" % name, img_roi, "png")
_, _, _, mask = cv2.split(img_roi)
_, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 1:
print("Piece
cnt = contours[0]
features_fname = self.save_image("pieces/%s_mask" % name, mask, "png")
base_features = {
"on_sheet_x": r_x,
"on_sheet_y": r_y,
"on_sheet_width": r_w,
"on_sheet_height": r_h,
"on_sheet_angle": angle,
"width": img_roi.shape[1],
"height": img_roi.shape[0],
}
tags_suggestions = []
for feat in feature_extractors:
fts, tags = feat.get_info(img_roi, cnt, name)
base_features.update(fts)
tags_suggestions += tags
if tags_suggestions:
print(name, tags_suggestions)
return Shred(
contour=c,
features=base_features,
features_fname=features_fname,
img_roi=img_roi,
name=name,
piece_fname=piece_fname,
piece_in_context_fname=piece_in_context,
sheet=sheet_name,
simplified_contour=simplified_contour,
tags_suggestions=tags_suggestions,
) | Creates a Shred instances from a given contour.
Args:
c: cv2 contour object.
name: string shred name within a sheet.
feature_extractors: iterable of AbstractShredFeature instances.
Returns:
A new Shred instance or None on failure. | juraj-google-style |
def get_lb_nat_rule(access_token, subscription_id, resource_group, lb_name, rule_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '/inboundNatRules/', rule_name, '?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | Get details about a load balancer inbound NAT rule.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the load balancer.
rule_name (str): Name of the NAT rule.
Returns:
HTTP response. JSON body of rule. | codesearchnet |
def process_entry(self, entry):
try:
corrections = self.get_corrections_dict(entry)
except CompatibilityError:
return None
entry.correction = sum(corrections.values())
return entry | Process a single entry with the chosen Corrections.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned. | codesearchnet |
def requires_open_handle(method):
@functools.wraps(method)
def wrapper_requiring_open_handle(self, *args, **kwargs):
'The wrapper to be returned.'
if self.is_closed():
raise usb_exceptions.HandleClosedError()
return method(self, *args, **kwargs)
return wrapper_requiring_open_handle | Decorator to ensure a handle is open for certain methods.
Subclasses should decorate their Read() and Write() with this rather than
checking their own internal state, keeping all "is this handle open" logic
in is_closed().
Args:
method: A class method on a subclass of UsbHandle
Raises:
HandleClosedError: If this handle has been closed.
Returns:
A wrapper around method that ensures the handle is open before calling through
to the wrapped method. | codesearchnet |
def set_files(self, files_downloaded, files_failed):
self.files_downloaded = files_downloaded
self.files_failed = files_failed
self.__record_progress(Status.GET_FILE_DIFF) | set_files: records progress from downloading files
Args:
files_downloaded ([str]): list of files that have been downloaded
files_failed ([str]): list of files that failed to download
Returns: None | juraj-google-style |
def keys_of_type_exist(self, *keys):
keys_exist = [(key, (key in self.keys()), expected_type) for (key, expected_type) in keys]
return tuple((ContextItemInfo(key=k[0], key_in_context=k[1], expected_type=k[2], is_expected_type=(isinstance(self[k[0]], k[2]) if k[1] else None), has_value=(k[1] and (not (self[k[0]] is None)))) for k in keys_exist)) | Check if keys exist in context and if types are as expected.
Args:
*keys: *args for keys to check in context.
Each arg is a tuple (str, type)
Returns:
Tuple of namedtuple ContextItemInfo, same order as *keys.
ContextItemInfo(key,
key_in_context,
expected_type,
is_expected_type)
Remember if there is only one key in keys, the return assignment
needs an extra comma to remind python that it's a tuple:
# one
a, = context.keys_of_type_exist('a')
# > 1
a, b = context.keys_of_type_exist('a', 'b') | codesearchnet |
def get_frame(self, frame_id):
if ((frame_id < 0) or (frame_id >= self._frame_cnt)):
raise IndexError('"frame_id" must be between 0 and {}'.format((self._frame_cnt - 1)))
if (frame_id == self._position):
return self.read()
if self._cache:
img = self._cache.get(frame_id)
if (img is not None):
self._position = (frame_id + 1)
return img
self._set_real_position(frame_id)
(ret, img) = self._vcap.read()
if ret:
if self._cache:
self._cache.put(self._position, img)
self._position += 1
return img | Get frame by index.
Args:
frame_id (int): Index of the expected frame, 0-based.
Returns:
ndarray or None: Return the frame if successful, otherwise None. | codesearchnet |
def from_dir(cls, top, exts=None, exclude_dirs='_*'):
pseudos = []
if (exts == 'all_files'):
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info(('Skipping file %s' % f))
except:
logger.info(('Skipping file %s' % f))
if (not pseudos):
logger.warning(('No pseudopotentials parsed from folder %s' % top))
return None
logger.info(('Creating PseudoTable with %i pseudopotentials' % len(pseudos)))
else:
if (exts is None):
exts = ('psp8',)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical(('Error in %s:\n%s' % (p, exc)))
return cls(pseudos).sort_by_z() | Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z. | codesearchnet |
def multiple_replace(string, replacements):
pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL)
return pattern.sub(lambda x: replacements[x.group(0)], string) | Simultaneously replace multiple strigns in a string
Args:
string (str): Input string
replacements (Dict[str,str]): Replacements dictionary
Returns:
str: String with replacements | juraj-google-style |
def obj_with_unit(obj, unit):
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for (k, v) in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type) | Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.). | codesearchnet |
def translate_ostat(ostat):
ostat_lower = ostat.strip().lower()
if ostat_lower == 'monomer':
return 1
elif ostat_lower == 'homo-dimer':
return 2
elif ostat_lower == 'homo-trimer':
return 3
elif ostat_lower == 'homo-tetramer':
return 4
elif ostat_lower == 'homo-pentamer':
return 5
elif ostat_lower == 'homo-hexamer':
return 6
elif ostat_lower == 'homo-heptamer':
return 7
elif ostat_lower == 'homo-octamer':
return 8
else:
num = int(ostat_lower.split('-')[1])
return num | Translate the OSTAT field to an integer.
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
ostat (str): Predicted oligomeric state of the PDB file
Returns:
int: Translated string to integer | juraj-google-style |
def _add_to_collections(var, weight_collections):
for weight_collection in weight_collections:
if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:
continue
if isinstance(var, variables.PartitionedVariable):
for constituent_var in list(var):
ops.add_to_collection(weight_collection, constituent_var)
else:
ops.add_to_collection(weight_collection, var) | Adds a var to the list of weight_collections provided.
Handles the case for partitioned and non-partitioned variables.
Args:
var: A variable or Partitioned Variable.
weight_collections: List of collections to add variable to. | github-repos |
def load(cls, archive_file: PackageSource, handler: primitive_handler.PrimitiveHandler, struct_def_class: Type[_StructDefT], search_param_class: Type[_SearchParameterT], code_system_class: Type[_CodeSystemT], value_set_class: Type[_ValueSetT], resource_time_zone: str='Z') -> 'FhirPackage[_StructDefT, _SearchParameterT, _CodeSystemT, _ValueSetT]':
collections_per_resource_type = {'StructureDefinition': ResourceCollection[_StructDefT](struct_def_class, handler, resource_time_zone), 'SearchParameter': ResourceCollection[_SearchParameterT](search_param_class, handler, resource_time_zone), 'CodeSystem': ResourceCollection[_CodeSystemT](code_system_class, handler, resource_time_zone), 'ValueSet': ResourceCollection[_ValueSetT](value_set_class, handler, resource_time_zone)}
with _open_path_or_factory(archive_file) as fd:
if not isinstance(fd.name, str) or fd.name.endswith('.zip'):
json_files = _read_fhir_package_zip(fd)
elif fd.name.endswith('.tar.gz') or fd.name.endswith('.tgz'):
json_files = _read_fhir_package_npm(fd)
else:
raise ValueError(f'Unsupported file type from {fd.name}')
ig_info: Optional[IgInfo] = None
for file_name, raw_json in json_files:
json_obj = json.loads(raw_json, parse_float=decimal.Decimal, parse_int=decimal.Decimal)
if not isinstance(json_obj, dict):
continue
if os.path.basename(file_name) == 'package.json':
ig_info = _parse_ig_info(json_obj)
_add_resource_to_collection(json_obj, json_obj, collections_per_resource_type)
if ig_info is None:
raise ValueError(f'Package {fd.name} does not contain a package.json file stating its URL and version.')
return FhirPackage(ig_info=ig_info, structure_definitions=collections_per_resource_type['StructureDefinition'], search_parameters=collections_per_resource_type['SearchParameter'], code_systems=collections_per_resource_type['CodeSystem'], value_sets=collections_per_resource_type['ValueSet']) | Instantiates and returns a new `FhirPackage` from a `.zip` file.
Most users should not use this directly, but rather use the load methods
in FHIR version-specific packages.
Args:
archive_file: A path to the `.zip`, `.tar.gz` or `.tgz` file containing
the `FhirPackage` contents.
handler: The FHIR primitive handler used for resource parsing.
struct_def_class: The StructureDefinition proto class to use.
search_param_class: The SearchParameter proto class to use.
code_system_class: The CodeSystem proto class to use.
value_set_class: The Valueset proto class to use.
resource_time_zone: The time zone code to parse resource dates into.
Returns:
An instance of `FhirPackage`.
Raises:
ValueError: In the event that the file or contents are invalid. | github-repos |
def new_from_list(cls, items, **kwargs):
obj = cls(**kwargs)
for item in items:
obj.append(ListItem(item))
return obj | Populates the ListView with a string list.
Args:
items (list): list of strings to fill the widget with. | juraj-google-style |
def load_env(workdir, logfile=None, loglevel=logging.INFO):
setup_sdk_logging(logfile, loglevel)
workdir = os.path.abspath(workdir)
loaded_workdir = lago_workdir.Workdir(path=workdir)
prefix = loaded_workdir.get_prefix('current')
return SDK(loaded_workdir, prefix) | Load an existing Lago environment
Args:
workdir(str): Path to the workdir directory, as created by
:func:`~lago.sdk.init` or created by the CLI.
logfile(str): A Path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago environment
Raises:
:exc:`~lago.utils.LagoException`: If loading the environment failed. | juraj-google-style |
def add(self, value, date=None, return_value=False, key=None):
data = {}
if (self._metric_id is None):
self.tcex.handle_error(715, [self._metric_name])
body = {'value': value}
if (date is not None):
body['date'] = self.tcex.utils.format_datetime(date, date_format='%Y-%m-%dT%H:%M:%SZ')
if (key is not None):
body['name'] = key
self.tcex.log.debug('metric data: {}'.format(body))
params = {}
if return_value:
params = {'returnValue': 'true'}
url = '/v2/customMetrics/{}/data'.format(self._metric_id)
r = self.tcex.session.post(url, json=body, params=params)
if ((r.status_code == 200) and ('application/json' in r.headers.get('content-type', ''))):
data = r.json()
elif (r.status_code == 204):
pass
else:
self.tcex.handle_error(710, [r.status_code, r.text])
return data | Add metrics data to collection.
Args:
value (str): The value of the metric.
date (str, optional): The optional date of the metric.
return_value (bool, default:False): Tell the API to return the updates metric value.
key (str, optional): The key value for keyed metrics.
Return:
dict: If return_value is True a dict with the current value for the time period
is returned. | codesearchnet |
def matches_count(count, options):
if options.get("count") is not None:
return count == int(options["count"])
if options.get("maximum") is not None and int(options["maximum"]) < count:
return False
if options.get("minimum") is not None and int(options["minimum"]) > count:
return False
if options.get("between") is not None and count not in options["between"]:
return False
return True | Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options. | juraj-google-style |
def RemoveProcessedTaskStorage(self, task):
if task.identifier not in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} does not exist.'.format(
task.identifier))
del self._task_storage_writers[task.identifier] | Removes a processed task storage.
Args:
task (Task): task.
Raises:
IOError: if the task storage does not exist.
OSError: if the task storage does not exist. | juraj-google-style |
def get_connection(db_type, db_pth, user=None, password=None, name=None):
if db_type == 'sqlite':
print(db_pth)
conn = sqlite3.connect(db_pth)
elif db_type == 'mysql':
import mysql.connector
conn = mysql.connector.connect(user=user, password=password, database=name)
elif db_type == 'django_mysql':
from django.db import connection as conn
else:
print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type))
return conn | Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database
Example:
>>> from msp2db.db import get_connection
>>> conn = get_connection('sqlite', 'library.db')
If using "mysql" mysql.connector needs to be installed.
If using "django_mysql" Django needs to be installed.
Args:
db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql"
Returns:
sql connection object | juraj-google-style |
def _handle_oss_error():
try:
(yield)
except _OssError as exception:
if (exception.status in _ERROR_CODES):
raise _ERROR_CODES[exception.status](exception.details.get('Message', ''))
raise | Handle OSS exception and convert to class IO exceptions
Raises:
OSError subclasses: IO error. | codesearchnet |
def __init__(self, obj=None, prop=None):
if not hasattr(self, "args_type"):
self.__set_type(obj, prop)
self.obj, self.prop = obj, prop
self.__check_lock()
self.wrap2spy()
self.is_in_queue = False | It will create the true base
flow:
__new__ => __init__
=> set type based on arguments
=> check the arguments is valid or not based on type
=> wrap the target
Args:
obj: None / function / instance method / module / class
Inspected target
If the target is None, it will create a Pure() class
prop: None / string
Inspected target when obj contains callable things | juraj-google-style |
def repeat(sequence):
N = len(sequence)
def f(i):
return sequence[i%N]
return partial(force, sequence=_advance(f)) | Return a driver function that can advance a repeated of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce | juraj-google-style |
def plot_compare(self, other_plotter, legend=True):
import matplotlib.lines as mlines
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[str(Spin.up)][i] for e in data['energy']][d],
'c-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'][d],
[e[str(Spin.down)][i] for e in data['energy']][d],
'm--', linewidth=band_linewidth)
if legend:
handles = [mlines.Line2D([], [], linewidth=2,
color='b', label='bs 1 up'),
mlines.Line2D([], [], linewidth=2,
color='r', label='bs 1 down',
linestyle="--"),
mlines.Line2D([], [], linewidth=2,
color='c', label='bs 2 up'),
mlines.Line2D([], [], linewidth=2,
color='m', linestyle="--",
label='bs 2 down')]
plt.legend(handles=handles)
return plt | plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures | juraj-google-style |
def load_variant_bulk(self, variants):
if not len(variants) > 0:
return
LOG.debug("Loading variant bulk")
try:
result = self.variant_collection.insert_many(variants)
except (DuplicateKeyError, BulkWriteError) as err:
for var_obj in variants:
try:
self.upsert_variant(var_obj)
except IntegrityError as err:
pass
return | Load a bulk of variants
Args:
variants(iterable(scout.models.Variant))
Returns:
object_ids | juraj-google-style |
def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Normal'):
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):
self._loc = array_ops.identity(loc, name='loc')
self._scale = array_ops.identity(scale, name='scale')
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name) | Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`. | github-repos |
def report_validation_warning(self, element_path: str, msg: str) -> None: | Reports the given warning during FHIR validation.
This indicates that the element complies with the FHIR specification, but
may be missing some desired-but-not-required property, like additional
fields that are useful to consumers.
Args:
element_path: The path to the field where the issue occurred.
msg: The warning message that was produced. | github-repos |
def mat2euler(rmat, axes='sxyz'):
try:
(firstaxis, parity, repetition, frame) = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
(firstaxis, parity, repetition, frame) = axes
i = firstaxis
j = _NEXT_AXIS[(i + parity)]
k = _NEXT_AXIS[((i - parity) + 1)]
M = np.array(rmat, dtype=np.float32, copy=False)[(:3, :3)]
if repetition:
sy = math.sqrt(((M[(i, j)] * M[(i, j)]) + (M[(i, k)] * M[(i, k)])))
if (sy > EPS):
ax = math.atan2(M[(i, j)], M[(i, k)])
ay = math.atan2(sy, M[(i, i)])
az = math.atan2(M[(j, i)], (- M[(k, i)]))
else:
ax = math.atan2((- M[(j, k)]), M[(j, j)])
ay = math.atan2(sy, M[(i, i)])
az = 0.0
else:
cy = math.sqrt(((M[(i, i)] * M[(i, i)]) + (M[(j, i)] * M[(j, i)])))
if (cy > EPS):
ax = math.atan2(M[(k, j)], M[(k, k)])
ay = math.atan2((- M[(k, i)]), cy)
az = math.atan2(M[(j, i)], M[(i, i)])
else:
ax = math.atan2((- M[(j, k)]), M[(j, j)])
ay = math.atan2((- M[(k, i)]), cy)
az = 0.0
if parity:
(ax, ay, az) = ((- ax), (- ay), (- az))
if frame:
(ax, az) = (az, ax)
return vec((ax, ay, az)) | Converts given rotation matrix to euler angles in radian.
Args:
rmat: 3x3 rotation matrix
axes: One of 24 axis sequences as string or encoded tuple
Returns:
converted euler angles in radian vec3 float | codesearchnet |
def decode(self, ids, strip_extraneous=False):
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path | Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size. | juraj-google-style |
def restore_from_checkpoint(self, checkpoint_path):
import tensorflow as tf
all_vars = tf.contrib.slim.get_variables_to_restore(exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step'])
saver = tf.train.Saver(all_vars)
saver.restore(self.tf_session, checkpoint_path) | To restore inception model variables from the checkpoint file.
Some variables might be missing in the checkpoint file, so it only
loads the ones that are avialable, assuming the rest would be
initialized later.
Args:
checkpoint_path: Path to the checkpoint file for the Inception graph. | codesearchnet |
def get(self, node_id):
return (self._nodes[_node.Root.ID].get(node_id) or self._nodes[_node.Root.ID].get(self._sid_map.get(node_id))) | Get a note with the given ID.
Args:
node_id (str): The note ID.
Returns:
gkeepapi.node.TopLevelNode: The Note or None if not found. | codesearchnet |
def _generate_security_groups(config_key):
raw_default_groups = validate_key_values(CONFIG, 'base', config_key, default='')
default_groups = _convert_string_to_native(raw_default_groups)
LOG.debug('Default security group for %s is %s', config_key, default_groups)
entries = {}
for env in ENVS:
entries[env] = []
if isinstance(default_groups, list):
groups = _remove_empty_entries(default_groups)
for env in entries:
entries[env] = groups
elif isinstance(default_groups, dict):
entries.update(default_groups)
LOG.debug('Generated security group: %s', entries)
return entries | Read config file and generate security group dict by environment.
Args:
config_key (str): Configuration file key
Returns:
dict: of environments in {'env1': ['group1', 'group2']} format | codesearchnet |
def get_nmr_prize_pool(self, round_num=0, tournament=1):
tournaments = self.get_competitions(tournament)
tournaments.sort(key=lambda t: t['number'])
if round_num == 0:
t = tournaments[-1]
else:
tournaments = [t for t in tournaments if t['number'] == round_num]
if len(tournaments) == 0:
raise ValueError("invalid round number")
t = tournaments[0]
return t['prizePoolNmr'] | Get NMR prize pool for the given round and tournament.
Args:
round_num (int, optional): The round you are interested in,
defaults to current round.
tournament (int, optional): ID of the tournament, defaults to 1
Returns:
decimal.Decimal: prize pool in NMR
Raises:
Value Error: in case of invalid round number | juraj-google-style |
def assert_iter(**kw):
for name, value in kw.items():
if not isiter(value):
raise TypeError(
'paco: {} must be an iterable object'.format(name)) | Asserts if a given values implements a valid iterable interface.
Arguments:
**kw (mixed): value to check if it is an iterable.
Raises:
TypeError: if assertion fails. | juraj-google-style |
def lyap_r_len(**kwargs):
min_len = (((kwargs['emb_dim'] - 1) * kwargs['lag']) + 1)
min_len += (kwargs['trajectory_len'] - 1)
min_len += ((kwargs['min_tsep'] * 2) + 1)
return min_len | Helper function that calculates the minimum number of data points required
to use lyap_r.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_r (required: emb_dim, lag, trajectory_len and
min_tsep)
Returns:
minimum number of data points required to call lyap_r with the given
parameters | codesearchnet |
def get_weights(self):
strategy = self._distribution_strategy or self._compile_time_distribution_strategy
if strategy:
with strategy.scope():
return base_layer.Layer.get_weights(self)
return base_layer.Layer.get_weights(self) | Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays. | github-repos |
def failure_packages(self, failure_index=None):
(phase, _) = self._get_failed_phase(failure_index)
fr = phase.failure_reason
return (fr.involved_requirements() if fr else None) | Get packages involved in a failure.
Args:
failure_index: See `failure_reason`.
Returns:
A list of Requirement objects. | codesearchnet |
def read_raw(self, key):
data = None
if key is not None:
data = self.db.read(key.strip())
else:
self.tcex.log.warning(u'The key field was None.')
return data | Read method of CRUD operation for raw data.
Args:
key (string): The variable to read from the DB.
Returns:
(any): Results retrieved from DB. | juraj-google-style |
def row(self, content='', align='left'):
return u'{lm}{vert}{cont}{vert}'.format(lm=(' ' * self.margins.left), vert=self.border_style.outer_vertical, cont=self._format_content(content, align)) | A row of the menu, which comprises the left and right verticals plus the given content.
Returns:
str: A row of this menu component with the specified content. | codesearchnet |
def eval_autoregressive(self, features=None, decode_length=50):
results = self._slow_greedy_infer(features, decode_length=decode_length)
return (results['logits'], results['losses']) | Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training". | codesearchnet |
def serialize_state(self, name=None):
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)
else:
return gen_io_ops.reader_serialize_state(self._reader_ref, name=name) | Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor. | github-repos |
def fetch(self, x, y, w, h):
if (not at_least_libvips(8, 8)):
raise Error('libvips too old')
psize = ffi.new('size_t *')
pointer = vips_lib.vips_region_fetch(self.pointer, x, y, w, h, psize)
if (pointer == ffi.NULL):
raise Error('unable to fetch from region')
pointer = ffi.gc(pointer, glib_lib.g_free)
return ffi.buffer(pointer, psize[0]) | Fill a region with pixel data.
Pixels are filled with data!
Returns:
Pixel data.
Raises:
:class:`.Error` | codesearchnet |
def read_int16(self, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.unpack('%sh' % endian, 2) | Read 2 byte as a signed integer value from the stream.
Args:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: | juraj-google-style |
def findall(self, title=None):
if title is None:
return list(self._sheets)
if title not in self._titles:
return []
return list(self._titles[title]) | Return a list of worksheets with the given title.
Args:
title(str): title/name of the worksheets to return, or ``None`` for all
Returns:
list: list of contained worksheet instances (possibly empty) | juraj-google-style |
def BindVar(self, var_id, value):
if var_id not in self._vars:
raise KeyError(var_id)
self._var_bindings[var_id].append(value) | Associates a value with given variable.
This can be called multiple times to associate multiple values.
Args:
var_id: A variable id to bind the values to.
value: A value to bind to the specified variable.
Raises:
KeyError: If given variable is not specified in the pattern. | juraj-google-style |
def getEstTraitCorrCoef(self,term_i=None):
cov = self.getEstTraitCovar(term_i)
stds=SP.sqrt(cov.diagonal())[:,SP.newaxis]
RV = cov/stds/stds.T
return RV | Returns the estimated trait correlation matrix
Args:
term_i: index of the term we are interested in | juraj-google-style |
def write_structure(times=None):
if (times is None):
return report_loc.write_structure(f.root.times)
else:
if (not isinstance(times, Times)):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.write_structure(times) | Produce a formatted record of a times data structure.
Args:
times (Times, optional): If not provided, uses the current root timer.
Returns:
str: Timer tree hierarchy in a formatted string.
Raises:
TypeError: If provided argument is not a Times object. | codesearchnet |
def _get_stats(self):
return _pywrap_dtensor_device.GetStats(context.context()._handle, self._device_info) | Returns the number of cache hit and miss for function compilation.
Returns:
A dictionary.
'miss': number of cache misses;
'hit': number of cache hits; and
'size': size of cache;
miss count. | github-repos |
def _GetEventData(
self, parser_mediator, record_index, evtx_record, recovered=False):
event_data = WinEvtxRecordEventData()
try:
event_data.record_number = evtx_record.identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read record identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
try:
event_identifier = evtx_record.event_identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read event identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
event_identifier = None
try:
event_identifier_qualifiers = evtx_record.event_identifier_qualifiers
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read event identifier qualifiers from event record: '
'{0:d} with error: {1!s}').format(record_index, exception))
event_identifier_qualifiers = None
event_data.offset = evtx_record.offset
event_data.recovered = recovered
if event_identifier is not None:
event_data.event_identifier = event_identifier
if event_identifier_qualifiers is not None:
event_data.message_identifier = (
(event_identifier_qualifiers << 16) | event_identifier)
event_data.event_level = evtx_record.event_level
event_data.source_name = evtx_record.source_name
event_data.computer_name = evtx_record.computer_name
event_data.user_sid = evtx_record.user_security_identifier
event_data.strings = list(evtx_record.strings)
event_data.strings_parsed = {}
if event_identifier in self._EVTX_FIELD_MAP.keys():
rules = self._EVTX_FIELD_MAP.get(event_identifier, [])
for rule in rules:
if len(evtx_record.strings) <= rule.index:
parser_mediator.ProduceExtractionWarning((
'evtx_record.strings has unexpected length of {0:d} '
'(expected at least {1:d})'.format(
len(evtx_record.strings), rule.index)))
event_data.strings_parsed[rule.name] = evtx_record.strings[rule.index]
event_data.xml_string = evtx_record.xml_string
return event_data | Extract data from a Windows XML EventLog (EVTX) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evtx_record (pyevtx.record): event record.
recovered (Optional[bool]): True if the record was recovered.
Return:
WinEvtxRecordEventData: event data. | juraj-google-style |
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter | Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'. | juraj-google-style |
def add(self, label):
label.label_list = self
self.label_tree.addi(label.start, label.end, label) | Add a label to the end of the list.
Args:
label (Label): The label to add. | juraj-google-style |
def __init__(self, model_class, key_name, key_value, property_name):
super(DjangoORMStorage, self).__init__()
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name | Constructor for Storage.
Args:
model: string, fully qualified name of db.Model model class.
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the
credentials.
property_name: string, name of the property that is an
CredentialsProperty. | juraj-google-style |
def __init__(self, channel):
self.CreateWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.CreateWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,
)
self.GetWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.GetWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,
)
self.InstantiateWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.InstantiateInlineWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateInlineWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UpdateWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.UpdateWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,
)
self.ListWorkflowTemplates = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesResponse.FromString,
)
self.DeleteWorkflowTemplate = channel.unary_unary(
"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate",
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.DeleteWorkflowTemplateRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):
for node in nodes_to_prune:
del self._node_inputs[node]
del self._node_ctrl_inputs[node]
del self._node_recipients[node]
del self._node_ctrl_recipients[node] | Prune nodes out of input and recipient maps.
Args:
nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned. | github-repos |
def dot(A, B):
try:
result = A.__matmul__(B)
if (result is NotImplemented):
result = B.__rmatmul__(A)
except AttributeError:
result = B.__rmatmul__(A)
return result | Matrix multiplication between A and B
This function is equivalent to ``A @ B``, which is unfortunately
not possible under python 2.x.
Args:
A (sequence):
B (sequence):
Returns:
sequence: | codesearchnet |
def sort_auto_mapping(fname: str, overwrite: bool=False) -> Optional[bool]:
with open(fname, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
new_lines = []
line_idx = 0
while line_idx < len(lines):
if _re_intro_mapping.search(lines[line_idx]) is not None:
indent = len(re.search('^(\\s*)\\S', lines[line_idx]).groups()[0]) + 8
while not lines[line_idx].startswith(' ' * indent + '('):
new_lines.append(lines[line_idx])
line_idx += 1
blocks = []
while lines[line_idx].strip() != ']':
if lines[line_idx].strip() == '(':
start_idx = line_idx
while not lines[line_idx].startswith(' ' * indent + ')'):
line_idx += 1
blocks.append('\n'.join(lines[start_idx:line_idx + 1]))
else:
blocks.append(lines[line_idx])
line_idx += 1
blocks = sorted(blocks, key=lambda x: _re_identifier.search(x).groups()[0])
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(fname, 'w', encoding='utf-8') as f:
f.write('\n'.join(new_lines))
else:
return '\n'.join(new_lines) != content | Sort all auto mappings in a file.
Args:
fname (`str`): The name of the file where we want to sort auto-mappings.
overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file.
Returns:
`Optional[bool]`: Returns `None` if `overwrite=True`. Otherwise returns `True` if the file has an auto-mapping
improperly sorted, `False` if the file is okay. | github-repos |
def get_api_init_text(packages, packages_to_ignore, output_package, api_name, api_version, compat_api_versions=None, lazy_loading=_LAZY_LOADING, use_relative_imports=False):
if compat_api_versions is None:
compat_api_versions = []
module_code_builder = _ModuleInitCodeBuilder(output_package, api_version, lazy_loading, use_relative_imports)
def in_packages(m):
return any((package in m for package in packages))
for module in list(sys.modules.values()):
if not module or not hasattr(module, '__name__') or module.__name__ is None or (not in_packages(module.__name__)):
continue
if packages_to_ignore and any([p for p in packages_to_ignore if p in module.__name__]):
continue
if ('.contrib.' in module.__name__ or module.__name__.endswith('.contrib')) and '.lite' not in module.__name__:
continue
for module_contents_name in dir(module):
if module.__name__ + '.' + module_contents_name in _SYMBOLS_TO_SKIP_EXPLICITLY:
continue
attr = getattr(module, module_contents_name)
_, attr = tf_decorator.unwrap(attr)
add_imports_for_symbol(module_code_builder, attr, module.__name__, module_contents_name, api_name, api_version)
for compat_api_version in compat_api_versions:
add_imports_for_symbol(module_code_builder, attr, module.__name__, module_contents_name, api_name, compat_api_version, _COMPAT_MODULE_TEMPLATE % compat_api_version)
if compat_api_versions:
add_nested_compat_imports(module_code_builder, compat_api_versions, output_package)
return module_code_builder.build() | Get a map from destination module to __init__.py code for that module.
Args:
packages: Base python packages containing python with target tf_export
decorators.
packages_to_ignore: python packages to be ignored when checking for
tf_export decorators.
output_package: Base output python package where generated API will be
added.
api_name: API you want to generate Currently, only `tensorflow`.
api_version: API version you want to generate (1 or 2).
compat_api_versions: Additional API versions to generate under compat/
directory.
lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is
produced and if `False`, static imports are used.
use_relative_imports: True if we should use relative imports when importing
submodules.
Returns:
A dictionary where
key: (string) destination module (for e.g. tf or tf.consts).
value: (string) text that should be in __init__.py files for
corresponding modules. | github-repos |
def set_signature_defs(tflite_model, signature_def_map):
model = tflite_model
if not isinstance(tflite_model, bytearray):
model = bytearray(tflite_model)
serialized_signature_def_map = {k: v.SerializeToString() for k, v in signature_def_map.items()}
model_buffer = signature_def_util.SetSignatureDefMap(model, serialized_signature_def_map)
return model_buffer | Sets SignatureDefs to the Metadata of a TfLite flatbuffer buffer.
Args:
tflite_model: Binary TFLite model (bytes or bytes-like object) to which to
add signature_def.
signature_def_map: dict containing SignatureDefs to store in metadata.
Returns:
buffer: A TFLite model binary identical to model buffer with
metadata field containing SignatureDef.
Raises:
ValueError:
tflite_model buffer does not contain a valid TFLite model.
signature_def_map is empty or does not contain a SignatureDef. | github-repos |
def predict_dataset(self, df):
if (len(list(df.columns)) == 2):
df.columns = ['A', 'B']
if (self.model is None):
raise AssertionError('Model has not been trained before predictions')
df2 = DataFrame()
for (idx, row) in df.iterrows():
df2 = df2.append(row, ignore_index=True)
df2 = df2.append({'A': row['B'], 'B': row['A']}, ignore_index=True)
return predict.predict(deepcopy(df2), deepcopy(self.model))[::2] | Runs Jarfo independently on all pairs.
Args:
x (pandas.DataFrame): a CEPC format Dataframe.
kwargs (dict): additional arguments for the algorithms
Returns:
pandas.DataFrame: a Dataframe with the predictions. | codesearchnet |
def get_projection_on_elements(self, structure):
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico | Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]] | juraj-google-style |
def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):
draw_spectra_stacked(ss, title, num_rows, setup)
plt.show() | Plots one or more stacked in subplots sharing same x-axis.
Args:
ss: list of Spectrum objects
title=None: window title
num_rows=None: (optional) number of rows for subplot grid. If not passed,
num_rows will be the number of plots, and the number of columns will be 1.
If passed, number of columns is calculated automatically.
setup: PlotSpectrumSetup object | codesearchnet |
def lstsq(A, b):
A = asarray(A, float)
b = asarray(b, float)
if (A.ndim == 1):
A = A[(:, newaxis)]
if (A.shape[1] == 1):
return (dot(A.T, b) / squeeze(dot(A.T, A)))
rcond = (finfo(double).eps * max(*A.shape))
return npy_lstsq(A, b, rcond=rcond)[0] | r"""Return the least-squares solution to a linear matrix equation.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Least-squares solution. | codesearchnet |
def __init__(self, channel):
self.ExportAssets = channel.unary_unary(
"/google.cloud.asset.v1beta1.AssetService/ExportAssets",
request_serializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.ExportAssetsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchGetAssetsHistory = channel.unary_unary(
"/google.cloud.asset.v1beta1.AssetService/BatchGetAssetsHistory",
request_serializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryResponse.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def ask(question, default_answer=False, default_answer_str='no'):
response = default_answer
def should_ignore_tty():
'\n Check, if we want to ignore an opened tty result.\n '
ret_to_bool = {'yes': True, 'no': False, 'true': True, 'false': False}
envs = [os.getenv('CI', default='no'), os.getenv('TEST', default='no')]
vals = [ret_to_bool[val] for val in envs if (val in ret_to_bool)]
return any(vals)
ignore_stdin_istty = should_ignore_tty()
has_tty = (sys.stdin.isatty() and (not ignore_stdin_istty))
if has_tty:
response = query_yes_no(question, default_answer_str)
else:
LOG.debug('NoTTY: %s -> %s', question, response)
return response | Ask for user input.
This asks a yes/no question with a preset default.
You can bypass the user-input and fetch the default answer, if
you set
Args:
question: The question to ask on stdout.
default_answer: The default value to return.
default_answer_str:
The default answer string that we present to the user.
Tests:
>>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True)
True
>>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False)
False | codesearchnet |
def cv_score_mean(self, X, y):
X, y = self._format_inputs(X, y)
if self.problem_type.binary_classification:
kf = StratifiedKFold(
shuffle=True, random_state=RANDOM_STATE + 3)
elif self.problem_type.multi_classification:
self.target_type_transformer.inverse_transform(y)
transformer = self.target_type_transformer
kf = StratifiedKFoldMultiClassIndicator(
transformer, shuffle=True, n_splits=3,
random_state=RANDOM_STATE + 3)
elif self.problem_type.regression:
kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)
else:
raise NotImplementedError
scoring = {
scorer_info.name: scorer_info.scorer
for scorer_info in self.scorers_info
}
cv_results = cross_validate(
self.estimator, X, y,
scoring=scoring, cv=kf, return_train_score=False)
results = self._process_cv_results(cv_results)
return results | Compute mean score across cross validation folds.
Split data and labels into cross validation folds and fit the model for
each fold. Then, for each scoring type in scorings, compute the score.
Finally, average the scores across folds. Returns a dictionary mapping
scoring to score.
Args:
X (np.array): data
y (np.array): labels
scorings (List[str]): scoring types | juraj-google-style |
def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):
if steps is not None and batch_size is not None:
raise ValueError('If ' + steps_name + ' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None | Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Args:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished. Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`. | github-repos |
def _free_up_space(self, size, this_rel_path=None):
space = self.size + size - self.maxsize
if space <= 0:
return
removes = []
for row in self.database.execute("SELECT path, size, time FROM files ORDER BY time ASC"):
if space > 0:
removes.append(row[0])
space -= row[1]
else:
break
for rel_path in removes:
if rel_path != this_rel_path:
global_logger.debug("Deleting {}".format(rel_path))
self.remove(rel_path) | If there are not size bytes of space left, delete files
until there is
Args:
size: size of the current file
this_rel_path: rel_pat to the current file, so we don't delete it. | juraj-google-style |
class FbgemmFp8Config(QuantizationConfigMixin):
def __init__(self, activation_scale_ub: float=1200.0, modules_to_not_convert: Optional[List]=None, **kwargs):
self.quant_method = QuantizationMethod.FBGEMM_FP8
self.activation_scale_ub = activation_scale_ub
self.modules_to_not_convert = modules_to_not_convert
def get_loading_attributes(self):
attibutes_dict = copy.deepcopy(self.__dict__)
loading_attibutes = ['activation_scale_ub']
loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes}
return loading_attibutes_dict | This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using fbgemm fp8 quantization.
Args:
activation_scale_ub (`float`, *optional*, defaults to 1200.0):
The activation scale upper bound. This is used when quantizing the input activation.
modules_to_not_convert (`list`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have
some modules left in their original precision. | github-repos |
def load_metrics(event_dir, epoch):
metrics = {}
for filename in tf.gfile.ListDirectory(event_dir):
path = os.path.join(event_dir, filename)
for event in tf.train.summary_iterator(path):
if event.step == epoch and event.HasField("summary"):
value = event.summary.value[0]
metrics[value.tag] = value.simple_value
return metrics | Loads metrics for this epoch if they have already been written.
This reads the entire event file but it's small with just per-epoch metrics.
Args:
event_dir: TODO(koz4k): Document this.
epoch: TODO(koz4k): Document this.
Returns:
metrics. | juraj-google-style |
def connect(self, host='localhost'):
get_logger().info('Connecting to RabbitMQ server...')
self._conn = pika.BlockingConnection(pika.ConnectionParameters(host=host))
self._channel = self._conn.channel()
get_logger().info('Declaring topic exchanger {}...'.format(self.exchange))
self._channel.exchange_declare(exchange=self.exchange, type='topic')
get_logger().info('Creating RabbitMQ queue...')
result = self._channel.queue_declare(exclusive=True)
self._queue_name = result.method.queue
if self.listen_all:
get_logger().info('Binding queue to exchanger {} (listen all)...'.format(self.exchange))
self._channel.queue_bind(exchange=self.exchange, queue=self._queue_name, routing_key='*')
else:
for routing_key in self.topics:
get_logger().info('Binding queue to exchanger {} with routing key {}...'.format(self.exchange, routing_key))
self._channel.queue_bind(exchange=self.exchange, queue=self._queue_name, routing_key=routing_key)
get_logger().info('Binding callback...')
self._channel.basic_consume(self._callback, queue=self._queue_name, no_ack=True) | Connect to the server and set everything up.
Args:
host: hostname to connect to | codesearchnet |
def FromDictionary(cls, dictionary):
if ('user_id' in dictionary):
raise errors.GitkitClientError('use localId instead')
if ('localId' not in dictionary):
raise errors.GitkitClientError('must specify localId')
if ('email' not in dictionary):
raise errors.GitkitClientError('must specify email')
return cls(decode=False, **dictionary) | Initializes from user specified dictionary.
Args:
dictionary: dict of user specified attributes
Returns:
GitkitUser object | codesearchnet |
def _match_dbname(self, dbname):
for config in self._clusters:
if re.match(config['pattern'], dbname):
return config
raise Exception(('No such database %s.' % dbname)) | Map a database name to the Cluster that holds the database.
Args:
dbname: A database name.
Returns:
A dict containing the information about the Cluster that holds the
database. | codesearchnet |
def fetch(self, card_id, data={}, **kwargs):
return super(Card, self).fetch(card_id, data, **kwargs) | Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id | juraj-google-style |
def set_direct(self, address_value_dict):
with self._lock:
for address, value in address_value_dict.items():
self._validate_write(address)
if address in self._state:
self._state[address].set_result(result=value)
else:
fut = _ContextFuture(address=address)
self._state[address] = fut
fut.set_result(result=value) | Called in the context manager's set method to either overwrite the
value for an address, or create a new future and immediately set a
value in the future.
Args:
address_value_dict (dict of str:bytes): The unique full addresses
with bytes to set at that address.
Raises:
AuthorizationException | juraj-google-style |
def __call__(self, *args: Union[str, List[str]], **kwargs: Any) -> Union[Any, List[Any]]:
return super().__call__(*args, **kwargs) | Extract the features of the input(s) text.
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model. | github-repos |
class XLMPoolerEndLogits(nn.Module):
def __init__(self, config: XLMConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz)
start_states = hidden_states.gather(-2, start_positions)
start_states = start_states.expand(-1, slen, -1)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if p_mask.dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e+30 * p_mask
return x | Compute SQuAD end logits from sequence hidden states.
Args:
config ([`XLMConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
to use. | github-repos |
def get_for_type(input_type="text"):
if input_type in RandomInputHelper.cache:
return RandomInputHelper.cache[input_type]
types = {
"text": RandomInputHelper.get_random_value,
"hidden": RandomInputHelper.get_random_value,
"search": RandomInputHelper.get_random_value,
"color": RandomInputHelper.get_random_color,
"week": {"function": RandomInputHelper.get_random_value, "params": [2, ["1234"]]},
"password": RandomInputHelper.get_random_password,
"number": RandomInputHelper.get_random_number,
"tel": RandomInputHelper.get_random_telephonenumber,
"url": RandomInputHelper.get_random_url,
"textarea": RandomInputHelper.get_random_text,
"email": RandomInputHelper.get_random_email
}
if types.get(input_type) is None:
return ""
if type(types.get(input_type)) is dict:
generator = types.get(input_type)
value = generator.get("function")(*generator.get("params"))
else:
value = types.get(input_type)()
RandomInputHelper.cache[input_type] = value
return value | Get a random string for the given html input type
Args:
input_type (str): The input type (e.g. email).
Returns:
str: The (cached) random value. | juraj-google-style |
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=-1):
lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps)
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) | Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. | github-repos |
def is_valid_op(self, symmop):
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1
and self.centered_mol[ind[0]].species
== site.species):
return False
return True | Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule. | juraj-google-style |
def Run(self, conf, args):
try:
options, args = self.parser.parse_args(args)
except SystemExit as e:
return e.code
if options.maps:
self.log.info('Setting configured maps to %s', options.maps)
conf.maps = options.maps
warnings, errors = (0, 0)
self.log.info('Verifying program and system configuration.')
config_warnings, config_errors = config.VerifyConfiguration(conf)
warnings += config_warnings
errors += config_errors
self.log.info('Verifying data sources.')
errors += self.VerifySources(conf)
self.log.info('Verifying data caches.')
errors += self.VerifyMaps(conf)
self.log.info('Verification result: %d warnings, %d errors', warnings, errors)
if warnings + errors:
self.log.info('Verification failed!')
else:
self.log.info('Verification passed!')
return warnings + errors | Run the Verify command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: nss_cache.config.Config object
args: list of arguments to be parsed
Returns:
count of warnings and errors detected when verifying | github-repos |
def verify(self, obj):
if isinstance(obj, str):
raise ValidationError("Object was not a list", reason="a string was passed instead of a list", object=obj)
out_obj = []
if self._min_length is not None and len(obj) < self._min_length:
raise ValidationError("List was too short",
reason="list length %d was less than the minimum %d" % (len(obj), self._min_length),
min_length=self._min_length, actual_length=len(obj))
if self._max_length is not None and len(obj) > self._max_length:
raise ValidationError("List was too long",
reason="list length %d was greater than the max %d" % (len(obj), self._max_length),
min_length=self._max_length, actual_length=len(obj))
for val in obj:
out_obj.append(self._verifier.verify(val))
return out_obj | Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation. | juraj-google-style |
def generate_match_query(field, value, with_operator_and):
parsed_value = None
try:
parsed_value = json.loads(value.lower())
except (ValueError, TypeError, AttributeError):
pass
if isinstance(value, bool):
return {'match': {field: value}}
elif isinstance(parsed_value, bool):
return {'match': {field: value.lower()}}
if with_operator_and:
return {'match': {field: {'query': value, 'operator': 'and'}}}
return {'match': {field: value}} | Helper for generating a match query.
Args:
field (six.text_type): The ES field to be queried.
value (six.text_type/bool): The value of the query (bool for the case of type-code query ["core: true"]).
with_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along
with '"operator": "and"', so that all tokens of the query value are required to match.
Notes:
If value is of instance bool, then the shortened version of the match query is generated, at all times. | codesearchnet |
def movies(self, **kwargs):
path = self._get_id_path('movies')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the list of movies for a particular genre by id. By default, only
movies with 10 or more votes are included.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639-1 code.
include_all_movies: (optional) Toggle the inclusion of all movies
and not just those with 10 or more ratings.
Expected value is: True or False.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is: True or False.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def copyright_model_factory(*, validator=validators.is_copyright_model, **kwargs):
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs) | Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given. | codesearchnet |
def Artifacts(self, os_name=None, cpe=None, label=None):
return [c.artifact for c in self.conditions if c.Artifacts(os_name, cpe, label)] | Find the artifacts that correspond with other trigger conditions.
Args:
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of artifacts to be processed. | codesearchnet |
def attention_bias_ignore_padding(memory_padding):
ret = memory_padding * large_compatible_negative(memory_padding.dtype)
return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1) | Create an bias tensor to be added to attention logits.
Args:
memory_padding: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, 1, memory_length]. | juraj-google-style |
def value_ranges(self, value_ranges):
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
)) | Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val" | juraj-google-style |
def filter_pem(data):
assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data))
certs = set()
new_list = []
in_pem_block = False
for line in re.split(b'[\\r\\n]+', data):
if (line == b'-----BEGIN CERTIFICATE-----'):
assert (not in_pem_block)
in_pem_block = True
elif (line == b'-----END CERTIFICATE-----'):
assert in_pem_block
in_pem_block = False
content = b''.join(new_list)
content = rewrap_bytes(content)
certs.add(((b'-----BEGIN CERTIFICATE-----\n' + content) + b'\n-----END CERTIFICATE-----\n'))
new_list = []
elif in_pem_block:
new_list.append(line)
return certs | Processes the bytes for PEM certificates.
Returns:
``set`` containing each certificate | codesearchnet |
def check_tape_safe(self, operator, skip_options=None):
skip_options = skip_options or []
if not operator.variables:
raise AssertionError('`operator.variables` was empty')
def _assert_not_none(iterable):
for item in iterable:
self.assertIsNotNone(item)
with backprop.GradientTape() as tape:
grad = tape.gradient(operator.to_dense(), operator.variables)
_assert_not_none(grad)
with backprop.GradientTape() as tape:
var_grad = tape.gradient(operator, operator.variables)
_assert_not_none(var_grad)
nest.assert_same_structure(var_grad, grad)
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.adjoint().to_dense(), operator.variables))
x = math_ops.cast(array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype)
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.matvec(x), operator.variables))
if not operator.is_square:
return
for option in [CheckTapeSafeSkipOptions.DETERMINANT, CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT, CheckTapeSafeSkipOptions.DIAG_PART, CheckTapeSafeSkipOptions.TRACE]:
with backprop.GradientTape() as tape:
if option not in skip_options:
_assert_not_none(tape.gradient(getattr(operator, option)(), operator.variables))
if operator.is_non_singular is False:
return
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.inverse().to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.solvevec(x), operator.variables))
if not (operator.is_self_adjoint and operator.is_positive_definite):
return
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.cholesky().to_dense(), operator.variables)) | Check gradients are not None w.r.t. operator.variables.
Meant to be called from the derived class.
This ensures grads are not w.r.t every variable in operator.variables. If
more fine-grained testing is needed, a custom test should be written.
Args:
operator: LinearOperator. Exact checks done will depend on hints.
skip_options: Optional list of CheckTapeSafeSkipOptions.
Makes this test skip particular checks. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.