code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def UpdateUser(self, user, ssh_keys):
if not bool(USER_REGEX.match(user)):
self.logger.warning('Invalid user account name %s.', user)
return False
if not self._GetUser(user):
if not (self._AddUser(user)
and self._UpdateUserGroups(user, self.groups)):
return False
if not self._UpdateSudoer(user, sudoer=True):
return False
pw_entry = self._GetUser(user)
if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin':
message = 'Not updating user %s. User set `nologin` as login shell.'
self.logger.debug(message, user)
return True
try:
self._UpdateAuthorizedKeys(user, ssh_keys)
except (IOError, OSError) as e:
message = 'Could not update the authorized keys file for user %s. %s.'
self.logger.warning(message, user, str(e))
return False
else:
return True | Update a Linux user with authorized SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Returns:
bool, True if the user account updated successfully. | juraj-google-style |
def sign(mv):
md5 = hashlib.md5()
update_hash(md5, mv)
return md5.digest() | Obtains a signature for a `MetricValue`
Args:
mv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a
MetricValue that's part of an operation
Returns:
string: a unique signature for that operation | juraj-google-style |
def _save_tf_record_dataset(self, repr_ds: RepresentativeDataset, signature_def_key: str) -> _RepresentativeDatasetFile:
if not context.executing_eagerly():
with session.Session() as sess:
repr_ds = replace_tensors_by_numpy_ndarrays(repr_ds, sess)
expected_input_keys = self.expected_input_key_map.get(signature_def_key, None)
tfrecord_file_path = self.path_map[signature_def_key]
with python_io.TFRecordWriter(tfrecord_file_path) as writer:
for repr_sample in repr_ds:
if expected_input_keys is not None and set(repr_sample.keys()) != expected_input_keys:
raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(repr_sample.keys())}. Please provide correct input keys for representative samples.')
sample = _RepresentativeDataSample()
for input_name, input_value in repr_sample.items():
sample.tensor_proto_inputs[input_name].CopyFrom(tensor_util.make_tensor_proto(input_value))
writer.write(sample.SerializeToString())
logging.info('Saved representative dataset for signature def: %s to: %s', signature_def_key, tfrecord_file_path)
return _RepresentativeDatasetFile(tfrecord_file_path=str(tfrecord_file_path)) | Saves `repr_ds` to a TFRecord file.
Each sample in `repr_ds` is serialized as `RepresentativeDataSample`.
Args:
repr_ds: `RepresentativeDataset` to save.
signature_def_key: The signature def key associated with `repr_ds`.
Returns:
a RepresentativeDatasetFile instance contains the path to the saved file.
Raises:
KeyError: If the set of input keys in the dataset samples doesn't match
the set of expected input keys. | github-repos |
def get_sessions(self, app_path):
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return list(self._applications[app_path].sessions) | Gets all currently active sessions for an application.
Args:
app_path (str) :
The configured application path for the application to return
sessions for.
Returns:
list[ServerSession] | juraj-google-style |
def email(self, name, subject, header, body, **kwargs):
group_obj = Email(name, subject, header, body, **kwargs)
return self._group(group_obj) | Add Email data to Batch object.
Args:
name (str): The name for this Group.
subject (str): The subject for this Email.
header (str): The header for this Email.
body (str): The body for this Email.
date_added (str, kwargs): The date timestamp the Indicator was created.
from_addr (str, kwargs): The **from** address for this Email.
to_addr (str, kwargs): The **to** address for this Email.
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Email. | codesearchnet |
def unpack(self, buff, offset=0):
self.action_type = UBInt16(enum_ref=ActionType)
self.action_type.unpack(buff, offset)
for cls in ActionHeader.__subclasses__():
if self.action_type.value in cls.get_allowed_types():
self.__class__ = cls
break
super().unpack(buff, offset) | Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error. | juraj-google-style |
def group_molecules(self, mol_list):
mol_hash = [(i, self._mapper.get_molecule_hash(m)) for (i, m) in enumerate(mol_list)]
mol_hash.sort(key=(lambda x: x[1]))
raw_groups = tuple([tuple([m[0] for m in g]) for (k, g) in itertools.groupby(mol_hash, key=(lambda x: x[1]))])
group_indices = []
for rg in raw_groups:
mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)]
mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]])
not_alone_mols = set(itertools.chain.from_iterable(mol_eq))
alone_mols = (set(rg) - not_alone_mols)
group_indices.extend([[m] for m in alone_mols])
while (len(not_alone_mols) > 0):
current_group = {not_alone_mols.pop()}
while (len(not_alone_mols) > 0):
candidate_pairs = set([tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)])
mutual_pairs = (candidate_pairs & mol_eq)
if (len(mutual_pairs) == 0):
break
mutual_mols = set(itertools.chain.from_iterable(mutual_pairs))
current_group |= mutual_mols
not_alone_mols -= mutual_mols
group_indices.append(sorted(current_group))
group_indices.sort(key=(lambda x: (len(x), (- x[0]))), reverse=True)
all_groups = [[mol_list[i] for i in g] for g in group_indices]
return all_groups | Group molecules by structural equality.
Args:
mol_list: List of OpenBabel OBMol or pymatgen objects
Returns:
A list of lists of matched molecules
Assumption: if s1=s2 and s2=s3, then s1=s3
This may not be true for small tolerances. | codesearchnet |
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):
del targets
with tf.variable_scope("image_channel_compress_modality"):
hidden_size = model_hparams.hidden_size
img_len = model_hparams.img_len
channels = 3
batch = common_layers.shape_list(body_output)[0]
x = tf.layers.conv2d(
body_output,
hidden_size * channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
activation=tf.nn.relu,
name="decompress_conv")
x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])
x = common_layers.layer_preprocess(x, model_hparams)
x = tf.layers.dense(x,
vocab_size,
use_bias=True,
activation=None,
name="output_conv")
x = tf.reshape(
x, [batch, img_len, img_len, channels, vocab_size])
return x | Transforms body output to return logits.
Args:
body_output: Tensor of shape [batch, img_len, img_len, depth].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
Tensor of shape [batch, img_len, img_len, channels, vocab_size]. | juraj-google-style |
def add_compound(self, compound):
logger.debug("Adding compound {0} to variant {1}".format(
compound, self['variant_id']))
self['compounds'].append(compound) | Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary | juraj-google-style |
def process_streamer(self, streamer, callback=None):
index = streamer.index
if (index in self._in_progress_streamers):
raise InternalError('You cannot add a streamer again until it has finished streaming.')
queue_item = QueuedStreamer(streamer, callback)
self._in_progress_streamers.add(index)
self._logger.debug('Streamer %d: queued to send %d readings', index, queue_item.initial_count)
self._queue.put_nowait(queue_item) | Start streaming a streamer.
Args:
streamer (DataStreamer): The streamer itself.
callback (callable): An optional callable that will be called as:
callable(index, success, highest_id_received_from_other_side) | codesearchnet |
def make_connection(transport, **kwargs):
if (transport not in TRANSPORTS):
raise TypeError('invalid transport specified')
klass = TRANSPORTS[transport]
return klass(**kwargs) | Creates a connection instance based on the transport
This function creates the EapiConnection object based on the desired
transport. It looks up the transport class in the TRANSPORTS global
dictionary.
Args:
transport (string): The transport to use to create the instance.
**kwargs: Arbitrary keyword arguments.
Returns:
An instance of a connection object based on the transport
Raises:
TypeError: A TypeError is raised if the transport keyword is not
found in the list (keys) of available transports. | codesearchnet |
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
if isinstance(values, list):
is_all_ragged = all((isinstance(rt, ragged_tensor.RaggedTensor) for rt in values))
is_any_ragged = any((isinstance(rt, ragged_tensor.RaggedTensor) for rt in values))
else:
is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor)
is_any_ragged = is_all_ragged
if is_all_ragged and (mask is None or isinstance(mask, ragged_tensor.RaggedTensor)):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = _assert_splits_match(nested_row_split_list)
if isinstance(mask, ragged_tensor.RaggedTensor):
assertion_list_for_mask = _assert_splits_match([nested_row_split_list[0], mask.nested_row_splits])
with ops.control_dependencies(assertion_list_for_mask):
mask = array_ops.expand_dims(mask.flat_values, -1)
flat_values = []
for value in values:
with ops.control_dependencies(assertion_list):
flat_values.append(array_ops.expand_dims(value.flat_values, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('One of the inputs does not have acceptable types.')
elif isinstance(mask, ragged_tensor.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs.')
return (values, mask) | If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged). | github-repos |
def download_tile(map_layer, zoom, x, y):
try:
tile_url = map_layer.get_tile_url(zoom, x, y)
(tmp_file, headers) = urllib.request.urlretrieve(tile_url)
return ((x, y), tmp_file)
except URLError as e:
app.logger.info('Error downloading tile x={}, y={}, z={} for layer {}: {}'.format(x, y, zoom, map_layer, e.reason))
return ((x, y), pkg_resources.resource_filename('geos', 'static/empty_tile.png')) | Download a given tile from the tile server.
Args:
map_layer (MapLayer): MapLayer object which provides the tile-url.
zoom (int): zoom level
x (int): Tile-x-coordinate
y (int): Tile-y-coordinate
Returns:
file: temporary file containing the downloaded image. | codesearchnet |
def is50or60(msg, spd_ref, trk_ref, alt_ref):
def vxy(v, angle):
vx = v * np.sin(np.radians(angle))
vy = v * np.cos(np.radians(angle))
return vx, vy
if not (bds50.is50(msg) and bds60.is60(msg)):
return None
h50 = bds50.trk50(msg)
v50 = bds50.gs50(msg)
if h50 is None or v50 is None:
return 'BDS50,BDS60'
h60 = bds60.hdg60(msg)
m60 = bds60.mach60(msg)
i60 = bds60.ias60(msg)
if h60 is None or (m60 is None and i60 is None):
return 'BDS50,BDS60'
m60 = np.nan if m60 is None else m60
i60 = np.nan if i60 is None else i60
XY5 = vxy(v50*aero.kts, h50)
XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60)
XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60)
allbds = ['BDS50', 'BDS60', 'BDS60']
X = np.array([XY5, XY6m, XY6i])
Mu = np.array(vxy(spd_ref*aero.kts, trk_ref))
try:
dist = np.linalg.norm(X-Mu, axis=1)
BDS = allbds[np.nanargmin(dist)]
except ValueError:
return 'BDS50,BDS60'
return BDS | Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches. | juraj-google-style |
def from_string(key, password='notasecret'):
parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'No key in PEM format was detected. This implementation '
'can only use the PyCrypto library for keys in PEM '
'format.')
return PyCryptoSigner(pkey) | Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
Signer instance.
Raises:
NotImplementedError if the key isn't in PEM format. | juraj-google-style |
def _KillProcess(self, pid):
if sys.platform.startswith('win'):
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(
process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logger.error('Unable to kill process {0:d} with error: {1!s}'.format(
pid, exception)) | Issues a SIGKILL or equivalent to the process.
Args:
pid (int): process identifier (PID). | juraj-google-style |
def _isbn_cleaner(fn):
@wraps(fn)
def wrapper(isbn):
return fn(_clean_isbn(isbn))
return wrapper | Decorator for calling other functions from this module.
Purpose of this decorator is to clean the ISBN string from garbage and
return list of digits.
Args:
fn (function): function in which will be :func:`_clean_isbn(isbn)` call
wrapped. | juraj-google-style |
def download(dest_file_path: [List[Union[(str, Path)]]], source_url: str, force_download=True):
if isinstance(dest_file_path, list):
dest_file_paths = [Path(path) for path in dest_file_path]
else:
dest_file_paths = [Path(dest_file_path).absolute()]
if (not force_download):
to_check = list(dest_file_paths)
dest_file_paths = []
for p in to_check:
if p.exists():
log.info(f'File already exists in {p}')
else:
dest_file_paths.append(p)
if dest_file_paths:
cache_dir = os.getenv('DP_CACHE_DIR')
cached_exists = False
if cache_dir:
first_dest_path = (Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15])
cached_exists = first_dest_path.exists()
else:
first_dest_path = dest_file_paths.pop()
if (not cached_exists):
first_dest_path.parent.mkdir(parents=True, exist_ok=True)
simple_download(source_url, first_dest_path)
else:
log.info(f'Found cached {source_url} in {first_dest_path}')
for dest_path in dest_file_paths:
dest_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(first_dest_path), str(dest_path)) | Download a file from URL to one or several target locations
Args:
dest_file_path: path or list of paths to the file destination files (including file name)
source_url: the source URL
force_download: download file if it already exists, or not | codesearchnet |
def __init__(self, corruption_type=None, severity=1, **kwargs):
super(Imagenet2012CorruptedConfig, self).__init__(**kwargs)
self.corruption_type = corruption_type
self.severity = severity | BuilderConfig for Imagenet2012Corrupted.
Args:
corruption_type: string, must be one of the items in TYPE_LIST.
severity: integer, bewteen 1 and 5.
**kwargs: keyword arguments forwarded to super. | juraj-google-style |
def StopService(service_name, service_binary_name=None):
try:
status = win32serviceutil.QueryServiceStatus(service_name)[1]
except pywintypes.error as e:
if (getattr(e, 'winerror', None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST):
logging.debug("Tried to stop '%s', but the service is not installed.", service_name)
else:
logging.exception("Unable to query status of service '%s':", service_name)
return
for _ in range(20):
if (status == win32service.SERVICE_STOPPED):
break
elif (status != win32service.SERVICE_STOP_PENDING):
try:
win32serviceutil.StopService(service_name)
except pywintypes.error:
logging.exception("Unable to stop service '%s':", service_name)
time.sleep(1)
status = win32serviceutil.QueryServiceStatus(service_name)[1]
if (status == win32service.SERVICE_STOPPED):
logging.info("Service '%s' stopped.", service_name)
return
elif (not service_binary_name):
return
output = subprocess.check_output(['taskkill', '/im', ('%s*' % service_binary_name), '/f'], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
logging.debug('%s', output)
time.sleep(2) | Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution. | codesearchnet |
def verify_link_in_task_graph(chain, decision_link, task_link):
log.info("Verifying the {} {} task definition is part of the {} {} task graph...".format(
task_link.name, task_link.task_id, decision_link.name, decision_link.task_id
))
if task_link.task_id in decision_link.task_graph:
graph_defn = deepcopy(decision_link.task_graph[task_link.task_id])
verify_task_in_task_graph(task_link, graph_defn)
log.info("Found {} in the graph; it's a match".format(task_link.task_id))
return
raise_on_errors(["Can't find task {} {} in {} {} task-graph.json!".format(
task_link.name, task_link.task_id, decision_link.name, decision_link.task_id
)]) | Compare the runtime task definition against the decision task graph.
Args:
chain (ChainOfTrust): the chain we're operating on.
decision_link (LinkOfTrust): the decision task link
task_link (LinkOfTrust): the task link we're testing
Raises:
CoTError: on failure. | juraj-google-style |
def group(text, size):
if size <= 0:
raise ValueError("n must be a positive integer")
return [text[i:i + size] for i in range(0, len(text), size)] | Group ``text`` into blocks of ``size``.
Example:
>>> group("test", 2)
['te', 'st']
Args:
text (str): text to separate
size (int): size of groups to split the text into
Returns:
List of n-sized groups of text
Raises:
ValueError: If n is non positive | juraj-google-style |
def __init__(self, unique_identifier=None, attribute_names=None):
super(GetAttributeListResponsePayload, self).__init__(
enums.Tags.RESPONSE_PAYLOAD
)
self._unique_identifier = None
self._attribute_names = list()
self.unique_identifier = unique_identifier
self.attribute_names = attribute_names | Construct a GetAttributeList response payload.
Args:
unique_identifier (string): The ID of the managed object with
which the retrieved attribute names should be associated.
Optional, defaults to None.
attribute_names: A list of strings identifying the names of the
attributes associated with the managed object. Optional,
defaults to None. | juraj-google-style |
def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True):
if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels]
bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes)
if len(bad_phenotypes) > 0: raise ValueError("Error phenotype(s) "+str(bad_phenotypes)+" are not in the data.")
data = self.copy()
if len(input_phenotype_labels) == 0: return data
def _swap_in(d,inputs,output):
overlap = set(d.keys()).intersection(inputs)
if len(overlap) == 0: return d
keepers = [(k,v) for k,v in d.items() if k not in inputs]
return dict(keepers+\
[(output_phenotype_label,max([d[x] for x in overlap]))])
data['phenotype_calls'] = data.apply(lambda x:
_swap_in(x['phenotype_calls'],input_phenotype_labels,output_phenotype_label)
,1)
def _set_label(d):
vals = [k for k,v in d.items() if v==1]
return np.nan if len(vals) == 0 else vals[0]
data['phenotype_label'] = data.apply(lambda x:
_set_label(x['phenotype_calls']),1)
return data | Rename one or more input phenotypes to a single output phenotype
Args:
input_phenotype_labels (list): A str name or list of names to combine
output_phenotype_label (list): A str name to change the phenotype names to
verbose (bool): output more details
Returns:
CellDataFrame: The CellDataFrame modified. | juraj-google-style |
def GetFormatsWithSignatures(cls, parser_filter_expression=None):
specification_store = specification.FormatSpecificationStore()
remainder_list = []
for (parser_name, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression):
format_specification = parser_class.GetFormatSpecification()
if (format_specification and format_specification.signatures):
specification_store.AddSpecification(format_specification)
if (parser_name == 'plist'):
remainder_list.append(parser_name)
else:
remainder_list.append(parser_name)
return (specification_store, remainder_list) | Retrieves the format specifications that have signatures.
This method will create a specification store for parsers that define
a format specification with signatures and a list of parser names for
those that do not.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
Returns:
tuple: containing:
* FormatSpecificationStore: format specifications with signatures.
* list[str]: names of parsers that do not have format specifications with
signatures, or have signatures but also need to be applied 'brute
force'. | codesearchnet |
def flip_back(output_flipped, flip_pairs, target_type='gaussian-heatmap'):
if target_type not in ['gaussian-heatmap', 'combined-target']:
raise ValueError('target_type should be gaussian-heatmap or combined-target')
if output_flipped.ndim != 4:
raise ValueError('output_flipped should be [batch_size, num_keypoints, height, width]')
batch_size, num_keypoints, height, width = output_flipped.shape
channels = 1
if target_type == 'combined-target':
channels = 3
output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]
output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width)
output_flipped_back = output_flipped.clone()
for left, right in flip_pairs.tolist():
output_flipped_back[:, left, ...] = output_flipped[:, right, ...]
output_flipped_back[:, right, ...] = output_flipped[:, left, ...]
output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width))
output_flipped_back = output_flipped_back.flip(-1)
return output_flipped_back | Flip the flipped heatmaps back to the original form.
Args:
output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):
The output heatmaps obtained from the flipped images.
flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):
Pairs of keypoints which are mirrored (for example, left ear -- right ear).
target_type (`str`, *optional*, defaults to `"gaussian-heatmap"`):
Target type to use. Can be gaussian-heatmap or combined-target.
gaussian-heatmap: Classification target with gaussian distribution.
combined-target: The combination of classification target (response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Returns:
torch.Tensor: heatmaps that flipped back to the original image | github-repos |
def __setitem__(self, key, value):
if isinstance(value, np.ndarray):
dtype = str(value.dtype)
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
self.unmaterialized_cols[key] = SeriesWeld(
value,
weld_type,
self,
key
)
elif isinstance(value, SeriesWeld):
self.unmaterialized_cols[key] = value
elif isinstance(value, LazyOpResult):
self.unmaterialized_cols[key] = SeriesWeld(
value.expr,
value.weld_type,
self,
key
) | Summary
Args:
key (TYPE): Description
value (TYPE): Description
Returns:
TYPE: Description | juraj-google-style |
def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req = Request(
method=method,
url=uri,
params=params,
headers=headers,
data=data,
json=json)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
return RTMResponse(resp) | Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMResponse | juraj-google-style |
def __init__(self, channel):
self.capture_realtime = channel.unary_stream(
'/ClearlyServer/capture_realtime',
request_serializer=protos_dot_clearly__pb2.CaptureRequest.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.RealtimeEventMessage.FromString,
)
self.filter_tasks = channel.unary_stream(
'/ClearlyServer/filter_tasks',
request_serializer=protos_dot_clearly__pb2.FilterTasksRequest.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.TaskMessage.FromString,
)
self.filter_workers = channel.unary_stream(
'/ClearlyServer/filter_workers',
request_serializer=protos_dot_clearly__pb2.FilterWorkersRequest.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.WorkerMessage.FromString,
)
self.find_task = channel.unary_unary(
'/ClearlyServer/find_task',
request_serializer=protos_dot_clearly__pb2.FindTaskRequest.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.TaskMessage.FromString,
)
self.seen_tasks = channel.unary_unary(
'/ClearlyServer/seen_tasks',
request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.SeenTasksMessage.FromString,
)
self.reset_tasks = channel.unary_unary(
'/ClearlyServer/reset_tasks',
request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.Empty.FromString,
)
self.get_stats = channel.unary_unary(
'/ClearlyServer/get_stats',
request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,
response_deserializer=protos_dot_clearly__pb2.StatsMessage.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def _on_disconnect(self):
self._logger.info("Connection to device %s was interrupted", self.connection_string)
self.connection_interrupted = True | Callback when a device is disconnected unexpectedly.
Args:
adapter_id (int): An ID for the adapter that was connected to the device
connection_id (int): An ID for the connection that has become disconnected | juraj-google-style |
def ToScriptHash(data, unhex=True):
if ((len(data) > 1) and unhex):
data = binascii.unhexlify(data)
return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8'))) | Get a script hash of the data.
Args:
data (bytes): data to hash.
unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'
Returns:
UInt160: script hash. | codesearchnet |
def parse_arguments(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent())
source_group = parser.add_mutually_exclusive_group(required=True)
source_group.add_argument(
'--csv',
metavar='FILE',
required=False,
action='append',
help='CSV data to transform.')
source_group.add_argument(
'--bigquery',
metavar='PROJECT_ID.DATASET.TABLE_NAME',
type=str,
required=False,
help=('Must be in the form `project.dataset.table_name`. BigQuery '
'data to transform'))
parser.add_argument(
'--analysis',
metavar='ANALYSIS_OUTPUT_DIR',
required=True,
help='The output folder of analyze')
parser.add_argument(
'--prefix',
metavar='OUTPUT_FILENAME_PREFIX',
required=True,
type=str)
parser.add_argument(
'--output',
metavar='DIR',
default=None,
required=True,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument(
'--shuffle',
action='store_true',
default=False,
help='If used, data source is shuffled. This is recommended for training data.')
parser.add_argument(
'--batch-size',
metavar='N',
type=int,
default=100,
help='Larger values increase performance and peak memory usage.')
cloud_group = parser.add_argument_group(
title='Cloud Parameters',
description='These parameters are only used if --cloud is used.')
cloud_group.add_argument(
'--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
cloud_group.add_argument(
'--job-name',
type=str,
help='Unique dataflow job name.')
cloud_group.add_argument(
'--project-id',
help='The project to which the job will be submitted.')
cloud_group.add_argument(
'--num-workers',
metavar='N',
type=int,
default=0,
help='Set to 0 to use the default size determined by the Dataflow service.')
cloud_group.add_argument(
'--worker-machine-type',
metavar='NAME',
type=str,
help='A machine name from https:
' If not given, the service uses the default machine type.')
cloud_group.add_argument(
'--async',
action='store_true',
help='If used, this script returns before the dataflow job is completed.')
args = parser.parse_args(args=argv[1:])
if args.cloud and not args.project_id:
raise ValueError('--project-id is needed for --cloud')
if args.async and not args.cloud:
raise ValueError('--async should only be used with --cloud')
if not args.job_name:
args.job_name = ('dataflow-job-{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
return args | Parse command line arguments.
Args:
argv: list of command line arguments including program name.
Returns:
The parsed arguments as returned by argparse.ArgumentParser. | juraj-google-style |
def ReadClientPostingLists(self, keywords):
start_time, end_time, filtered_keywords, _ = self._AnalyzeKeywords(keywords)
return self.ReadPostingLists(
filtered_keywords,
start_time=start_time.AsMicrosecondsSinceEpoch(),
end_time=end_time.AsMicrosecondsSinceEpoch()) | Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients. | juraj-google-style |
def gene_to_panels(self, case_obj):
LOG.info('Building gene to panels')
gene_dict = {}
for panel_info in case_obj.get('panels', []):
panel_name = panel_info['panel_name']
panel_version = panel_info['version']
panel_obj = self.gene_panel(panel_name, version=panel_version)
if (not panel_obj):
LOG.warning('Panel: {0}, version {1} does not exist in database'.format(panel_name, panel_version))
for gene in panel_obj['genes']:
hgnc_id = gene['hgnc_id']
if (hgnc_id not in gene_dict):
gene_dict[hgnc_id] = set([panel_name])
continue
gene_dict[hgnc_id].add(panel_name)
LOG.info('Gene to panels done')
return gene_dict | Fetch all gene panels and group them by gene
Args:
case_obj(scout.models.Case)
Returns:
gene_dict(dict): A dictionary with gene as keys and a set of
panel names as value | codesearchnet |
def get_publications():
data = DOWNER.download(URL)
dom = dhtmlparser.parseString(handle_encodnig(data))
book_list = dom.find('div', {'class': 'polozka'})
books = []
for book in book_list:
books.append(_process_book(book))
return books | Get list of publication offered by cpress.cz.
Returns:
list: List of :class:`.Publication` objects. | codesearchnet |
def __ne__(self, other):
if not isinstance(other, SemanticTime):
return True
return self._SORT_ORDER != other._SORT_ORDER | Determines if the date time values are not equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are not equal to other. | juraj-google-style |
async def put(self, cid):
if settings.SIGNATURE_VERIFICATION:
super().verify()
try:
body = json.loads(self.request.body)
except:
self.set_status(400)
self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'})
raise tornado.web.Finish
public_key = body.get('public_key', None)
if isinstance(body['message'], str):
message = json.loads(body['message'])
elif isinstance(body['message'], dict):
message = body['message']
descr = message.get('description')
coinid = message.get('coinid')
if (not (coinid in settings.bridges.keys())):
self.set_status(400)
self.write({'error': 400, 'reason': 'Unknown coin id'})
raise tornado.web.Finish
if (not all([public_key, descr, coinid])):
self.set_status(400)
self.write({'error': 400, 'reason': 'Missed required fields'})
raise tornado.web.Finish
owneraddr = self.account.validator[coinid](public_key)
response = (await self.account.blockchain.ownerbycid(cid=cid))
if isinstance(response, dict):
if ('error' in response.keys()):
error_code = response['error']
self.set_status(error_code)
self.write({'error': error_code, 'reason': response['error']})
raise tornado.web.Finish
if (response != owneraddr):
self.set_status(403)
self.write({'error': 403, 'reason': 'Owner does not match.'})
raise tornado.web.Finish
fee = (await billing.update_description_fee(owneraddr=owneraddr, cid=cid, description=descr))
if (coinid in settings.bridges.keys()):
self.account.blockchain.setendpoint(settings.bridges[coinid])
else:
self.set_status(400)
self.write({'error': 400, 'reason': 'Invalid coinid'})
raise tornado.web.Finish
request = (await self.account.blockchain.setdescrforcid(cid=cid, descr=descr, owneraddr=owneraddr))
if ('error' in request.keys()):
self.set_status(request['error'])
self.write(request)
raise tornado.web.Finish
self.write({'cid': cid, 'description': descr, 'coinid': coinid, 'owneraddr': owneraddr}) | Update description for content
Accepts:
Query string args:
- "cid" - int
Request body parameters:
- message (signed dict):
- "description" - str
- "coinid" - str
Returns:
dict with following fields:
- "confirmed": None
- "txid" - str
- "description" - str
- "content" - str
- "read_access" - int
- "write_access" - int
- "cid" - int
- "txid" - str
- "seller_pubkey" - str
- "seller_access_string": None or str
Verified: True | codesearchnet |
def get_samples(self, md5='', sha1='', sha256=''):
params = {'api_key': self.api_key, 'username': self.username}
if md5:
params['c-md5'] = md5
if sha1:
params['c-sha1'] = sha1
if sha256:
params['c-sha256'] = sha256
r = requests.get('{0}/samples/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' in result_data:
if 'total_count' in result_data['meta']:
if result_data['meta']['total_count'] > 0:
return result_data
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None | Searches for a sample in CRITs. Currently only hashes allowed.
Args:
md5: md5sum
sha1: sha1sum
sha256: sha256sum
Returns:
JSON response or None if not found | juraj-google-style |
def get_age(dob: PotentialDatetimeType,
when: PotentialDatetimeType,
default: str = "") -> Union[int, str]:
dob = coerce_to_pendulum_date(dob)
when = coerce_to_pendulum_date(when)
if dob is None or when is None:
return default
return (when - dob).years | Age (in whole years) at a particular date, or ``default``.
Args:
dob: date of birth
when: date/time at which to calculate age
default: value to return if either input is ``None``
Returns:
age in whole years (rounded down), or ``default`` | juraj-google-style |
def _SetCredentials(self, **kwds):
args = {
'api_key': self._API_KEY,
'client': self,
'client_id': self._CLIENT_ID,
'client_secret': self._CLIENT_SECRET,
'package_name': self._PACKAGE,
'scopes': self._SCOPES,
'user_agent': self._USER_AGENT,
}
args.update(kwds)
from apitools.base.py import credentials_lib
self._credentials = credentials_lib.GetCredentials(**args) | Fetch credentials, and set them for this client.
Note that we can't simply return credentials, since creating them
may involve side-effecting self.
Args:
**kwds: Additional keyword arguments are passed on to GetCredentials.
Returns:
None. Sets self._credentials. | juraj-google-style |
def sample_frames(self, video: 'torch.Tensor', frame_factor: int, min_frames: int, max_frames: int, metadata: Optional[Union[VideoMetadata, dict]]=None, num_frames: Optional[int]=None, fps: Optional[int]=None):
if fps is not None and num_frames is not None:
raise ValueError('`num_frames` and `fps` are mutually exclusive arguments, please use only one!')
num_frames = num_frames if num_frames is not None else self.num_frames
fps = fps if fps is not None else self.fps
total_num_frames = video.shape[0]
if num_frames is not None:
num_frames = round(num_frames / frame_factor) * frame_factor
elif fps is not None:
if metadata is None:
raise ValueError('Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video')
max_frames = math.floor(min(max_frames, total_num_frames) / frame_factor) * frame_factor
num_frames = total_num_frames / metadata['fps'] * fps
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
num_frames = math.floor(num_frames / frame_factor) * frame_factor
if num_frames > total_num_frames:
raise ValueError(f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. Decrease `num_frames` or `fps` for sampling.")
if num_frames is not None:
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
else:
indices = torch.arange(0, total_num_frames).int()
video = video[indices].contiguous()
return video | Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
video (`torch.Tensor`):
Video that need to be sampled.
frame_factor (`int`):
The temporal patch size of the vision encoder. Number of sampled frames will be rounded to be divisible by frame factor.
min_frames (`int`):
The minimum number of frames that can be sampled.
max_frames (`int`):
The maximum number of frames that can be sampled.
metadata (`VideoMetadata`, *optional*):
Metadata of the video containing information about total duration, fps and total number of frames.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
torch.Tensor:
Sampled video frames. | github-repos |
def _CheckCacheFileForMatch(self, cache_filename, scopes):
creds = {'scopes': (sorted(list(scopes)) if scopes else None), 'svc_acct_name': self.__service_account_name}
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cached_creds_str = cache_file.LockedRead()
if (not cached_creds_str):
return None
cached_creds = json.loads(cached_creds_str)
if (creds['svc_acct_name'] == cached_creds['svc_acct_name']):
if (creds['scopes'] in (None, cached_creds['scopes'])):
return cached_creds['scopes']
except KeyboardInterrupt:
raise
except:
pass | Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None. | codesearchnet |
def zip(self, destination: typing.Union[(str, Path)]=None, encode: bool=True) -> str:
if encode:
self._encode()
if (destination is None):
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for (root, _, items) in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path) | Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file | codesearchnet |
def engine_from_environment() -> Engine:
api_key = os.environ.get(ENV_API_KEY)
if (not api_key):
raise EnvironmentError('Environment variable {} is not set.'.format(ENV_API_KEY))
default_project_id = os.environ.get(ENV_DEFAULT_PROJECT_ID)
return Engine(api_key=api_key, default_project_id=default_project_id) | Returns an Engine instance configured using environment variables.
If the environment variables are set, but incorrect, an authentication
failure will occur when attempting to run jobs on the engine.
Required Environment Variables:
QUANTUM_ENGINE_PROJECT: The name of a google cloud project, with the
quantum engine enabled, that you have access to.
QUANTUM_ENGINE_API_KEY: An API key for the google cloud project named
by QUANTUM_ENGINE_PROJECT.
Raises:
EnvironmentError: The environment variables are not set. | codesearchnet |
def search_orcid(orcid):
url = 'https:
r = requests.get(url, headers=headers)
if r.status_code != 200:
r.raise_for_status()
return r.json() | Search the ORCID public API
Specfically, return a dictionary with the personal details
(name, etc.) of the person associated with the given ORCID
Args:
orcid (`str`): The ORCID to be searched
Returns:
`dict`: Dictionary with the JSON response from the API
Raises:
`~requests.HTTPError`: If the given ORCID cannot be found, an `~requests.HTTPError`
is raised with status code 404 | juraj-google-style |
def get_item(target, i, opts):
assert isinstance(opts, GetItemOpts)
if isinstance(target, tensor_array_ops.TensorArray):
return _tf_tensorarray_get_item(target, i)
elif tensor_util.is_tf_type(target):
if target.dtype == dtypes.variant:
return _tf_tensor_list_get_item(target, i, opts)
elif target.dtype == dtypes.string and target.shape.ndims == 0:
return _tf_tensor_string_get_item(target, i)
else:
return _tf_tensor_get_item(target, i)
else:
return _py_get_item(target, i) | The slice read operator (i.e. __getitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports getitem semantics.
i: Index to read from.
opts: A GetItemOpts object.
Returns:
The read element.
Raises:
ValueError: if target is not of a supported type. | github-repos |
def cumprod(x, axis=None, dtype=None):
return Cumprod(axis=axis, dtype=dtype)(x) | Return the cumulative product of elements along a given axis.
Args:
x: Input tensor.
axis: Axis along which the cumulative product is computed.
By default the input is flattened.
dtype: dtype of returned tensor. Defaults to x.dtype.
Returns:
Output tensor. | github-repos |
def parse_name(name):
bucket = None
item = None
m = re.match(_STORAGE_NAME, name)
if m:
bucket = m.group(1)
item = m.group(2)
if item is not None:
item = item[1:]
else:
m = re.match('(' + _OBJECT_NAME + ')', name)
if m:
item = m.group(1)
return bucket, item | Parse a gs:// URL into the bucket and item names.
Args:
name: a GCS URL of the form gs://bucket or gs://bucket/item
Returns:
The bucket name (with no gs:// prefix), and the item name if present. If the name
could not be parsed returns None for both. | juraj-google-style |
def ToHashArray(self):
hashes = set()
MerkleTree.__DepthFirstSearch(self.Root, hashes)
return list(hashes) | Turn the tree into a list of hashes.
Returns:
list: | codesearchnet |
def quantile_gaussianize(x):
from scipy.stats import norm, rankdata
x = asarray(x, float).copy()
ok = isfinite(x)
x[ok] *= (- 1)
y = empty_like(x)
y[ok] = rankdata(x[ok])
y[ok] = norm.isf((y[ok] / (sum(ok) + 1)))
y[(~ ok)] = x[(~ ok)]
return y | Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975] | codesearchnet |
def listdir(path='.'):
return [name.rstrip('/') for (name, _) in get_instance(path).list_objects(path, first_level=True)] | Return a list containing the names of the entries in the directory given by
path.
Equivalent to "os.listdir".
Args:
path (path-like object): Path or URL.
Returns:
list of str: Entries names. | codesearchnet |
def _separate_hdxobjects(self, hdxobjects, hdxobjects_name, id_field, hdxobjectclass):
new_hdxobjects = self.data.get(hdxobjects_name, list())
':type : List[HDXObjectUpperBound]'
if new_hdxobjects:
hdxobject_names = set()
for hdxobject in hdxobjects:
hdxobject_name = hdxobject[id_field]
hdxobject_names.add(hdxobject_name)
for new_hdxobject in new_hdxobjects:
if (hdxobject_name == new_hdxobject[id_field]):
merge_two_dictionaries(hdxobject, new_hdxobject)
break
for new_hdxobject in new_hdxobjects:
if (not (new_hdxobject[id_field] in hdxobject_names)):
hdxobjects.append(hdxobjectclass(new_hdxobject, configuration=self.configuration))
del self.data[hdxobjects_name] | Helper function to take a list of HDX objects contained in the internal dictionary and add them to a
supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in
the internal dictionary is then deleted.
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones
hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects
id_field (str): Field on which to match to determine if object already exists in list
hdxobjectclass (type): Type of the HDX Object to be added/updated
Returns:
None | codesearchnet |
def circuit_to_latex_using_qcircuit(circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> str:
diagram = circuit.to_text_diagram_drawer(qubit_namer=qcircuit_qubit_namer, qubit_order=qubit_order, get_circuit_diagram_info=get_qcircuit_diagram_info)
return _render(diagram) | Returns a QCircuit-based latex diagram of the given circuit.
Args:
circuit: The circuit to represent in latex.
qubit_order: Determines the order of qubit wires in the diagram.
Returns:
Latex code for the diagram. | codesearchnet |
def commit_output(cls, shard_ctx, iterator):
outs = tuple(iterator)
shard_ctx._state.writer_state["outs"] = outs | Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs. | juraj-google-style |
def run_categorical_analysis(table, schema_list, args):
import google.datalab.bigquery as bq
categorical_columns = []
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type == 'string':
categorical_columns.append(col_schema['name'])
if categorical_columns:
sys.stdout.write('Running categorical analysis...')
for name in categorical_columns:
if args.bigquery_table:
table_name = parse_table_name(args.bigquery_table)
else:
table_name = 'table_name'
sql = .format(name=name, table=table_name)
out_file = os.path.join(args.output_dir,
CATEGORICAL_ANALYSIS_FILE % name)
if args.bigquery_table:
df = bq.Query(sql).execute().result().to_dataframe()
else:
query = bq.Query(sql, data_sources={'table_name': table})
df = query.execute().result().to_dataframe()
string_buff = six.StringIO()
df.to_csv(string_buff, index=False, header=False)
file_io.write_string_to_file(out_file, string_buff.getvalue())
sys.stdout.write('done.\n') | Find vocab values for the categorical columns and writes a csv file.
The vocab files are in the from
label1
label2
label3
...
Args:
table: Reference to FederatedTable (if bigquery_table is false) or a
regular Table (otherwise)
schema_list: Bigquery schema json object
args: the command line args | juraj-google-style |
def named(self, name: str) -> 'ColumnExpressionBuilder':
if self._children:
raise AttributeError(f'named() must not be called on a builder with child selects. Got named called on {str(self)}.')
return ColumnExpressionBuilder(self._builder, name, self._children, self._needs_unnest, True) | The named() function.
Sets the column name of a given FHIR path in the View. Once the column
name is set, the FHIR path is sealed to be immutable.
Args:
name: The column name as a string.
Returns:
A new ColumnExpressionBuilder with the given alias name. | github-repos |
def get_compile_config(self):
if self.compiled and hasattr(self, '_compile_config'):
return self._compile_config.serialize()
return {} | Returns a serialized config with information for compiling the model.
This method returns a config dictionary containing all the information
(optimizer, loss, metrics, etc.) with which the model was compiled.
Returns:
A dict containing information for compiling the model. | github-repos |
def get_image_patches(self, image: np.array, grid_pinpoints: List[Tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> List[np.array]:
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints must be a list of possible resolutions.')
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=input_data_format)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches]
return patches | Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.array`):
The input image to be processed.
grid_pinpoints (List[Tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`List[np.array]`: A list of NumPy arrays containing the processed image patches. | github-repos |
def update(self, rid, data, raise_on_error=True):
return self.put(rid, data, raise_on_error) | Update the for the provided Id. Alias for put() method.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response. | juraj-google-style |
def find_vasp_calculations():
dir_list = [ './' + re.sub( r'vasprun\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ]
gz_dir_list = [ './' + re.sub( r'vasprun\.xml\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ]
return dir_list + gz_dir_list | Returns a list of all subdirectories that contain either a vasprun.xml file
or a compressed vasprun.xml.gz file.
Args:
None
Returns:
(List): list of all VASP calculation subdirectories. | juraj-google-style |
def parse_newsgroup(line):
parts = line.split()
try:
group = parts[0]
low = int(parts[1])
high = int(parts[2])
status = parts[3]
except (IndexError, ValueError):
raise ValueError('Invalid newsgroup info')
return (group, low, high, status) | Parse a newsgroup info line to python types.
Args:
line: An info response line containing newsgroup info.
Returns:
A tuple of group name, low-water as integer, high-water as integer and
posting status.
Raises:
ValueError: If the newsgroup info cannot be parsed.
Note:
Posting status is a character is one of (but not limited to):
"y" posting allowed
"n" posting not allowed
"m" posting is moderated | codesearchnet |
def createTemplate(data):
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data) | Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID. | codesearchnet |
def get_bit_mask_from_enumerations(enumerations):
return functools.reduce((lambda x, y: (x | y)), [z.value for z in enumerations]) | A utility function that computes a bit mask from a collection of
enumeration values.
Args:
enumerations (list): A list of enumeration values to be combined in a
composite bit mask.
Returns:
int: The composite bit mask. | codesearchnet |
def firmware_version(self):
namespace = 'urn:brocade.com:mgmt:brocade-firmware-ext'
request_ver = ET.Element('show-firmware-version', xmlns=namespace)
ver = self._callback(request_ver, handler='get')
return ver.find(('. | Returns firmware version.
Args:
None
Returns:
Dictionary
Raises:
None | codesearchnet |
def __init__(self, field, value, **kwargs):
return super(DomainCondition, self).__init__(
field=field, value=value, **kwargs
) | Initialize a new generic query condition.
Args:
field (str): Field name to search on. This should be the
Pythonified name as in the internal models, not the
name as provided in the API e.g. ``first_name`` for
the Customer's first name instead of ``firstName``.
value (mixed): The value of the field. | juraj-google-style |
def cumulative_distribution(self, X):
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return np.multiply(U, V)
else:
h = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta)
h = -np.power(h, 1.0 / self.theta)
cdfs = np.exp(h)
return cdfs | Computes the cumulative distribution function for the copula, :math:`C(u, v)`
Args:
X: `np.ndarray`
Returns:
np.array: cumulative probability | juraj-google-style |
def add(self, index, value):
self.buf.append(value)
if ((index - self.flush_at) < self.interval):
return
value = np.mean(self.buf)
if self.verbose:
logger.info('iter={} {{{}}}={}'.format(index, self.name, value))
if (self.fd is not None):
print('{} {:g}'.format(index, value), file=self.fd)
self.flush_at = index
self.buf = [] | Add a value to the series.
Args:
index (int): Index.
value (float): Value. | codesearchnet |
def get_student_current_grade(self, username, course_id):
resp = self.requester.get(
urljoin(
self.base_url,
'/api/grades/v1/courses/{course_key}/?username={username}'.format(
username=username,
course_key=course_id
)
)
)
resp.raise_for_status()
return CurrentGrade(resp.json()[0]) | Returns an CurrentGrade object for the user in a course
Args:
username (str): an edx user's username
course_id (str): an edX course id.
Returns:
CurrentGrade: object representing the student current grade for a course | juraj-google-style |
def create_cloudwatch_log_event(app_name, env, region, rules):
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
log_group = rules.get('log_group')
filter_name = rules.get('filter_name')
filter_pattern = rules.get('filter_pattern')
if (not log_group):
LOG.critical('Log group is required and no "log_group" is defined!')
raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!')
if (not filter_name):
LOG.critical('Filter name is required and no filter_name is defined!')
raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!')
if (filter_pattern is None):
LOG.critical('Filter pattern is required and no filter_pattern is defined!')
raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(' ', '_'))
principal = 'logs.{}.amazonaws.com'.format(region)
account_id = get_env_credential(env=env)['accountId']
source_arn = 'arn:aws:logs:{0}:{1}:log-group:{2}:*'.format(region, account_id, log_group)
add_lambda_permissions(function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region)
cloudwatch_client.put_subscription_filter(logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)
LOG.info('Created Cloudwatch log event with filter: %s', filter_pattern) | Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings | codesearchnet |
def delete_tag(self, key, update_session=True):
existing_tags = {x.key: x for x in self.tags}
if key in existing_tags:
if update_session:
db.session.delete(existing_tags[key])
self.tags.remove(existing_tags[key])
return True
return False | Removes a tag from a resource based on the tag key. Returns `True` if the tag was removed or `False` if the
tag didn't exist
Args:
key (str): Key of the tag to delete
update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True
Returns: | juraj-google-style |
def ParseBookmarkFolderRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
title = self._GetRowValue(query_hash, row, 'title')
event_data = FirefoxPlacesBookmarkFolderEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = title or 'N/A'
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a bookmark folder row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | juraj-google-style |
def parse_dict(self, args: dict[str, Any], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:
unused_keys = set(args.keys())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
obj = dtype(**inputs)
outputs.append(obj)
if not allow_extra_keys and unused_keys:
raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}')
return tuple(outputs) | Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types.
Args:
args (`dict`):
dict containing config values
allow_extra_keys (`bool`, *optional*, defaults to `False`):
Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer. | github-repos |
def copy_directory_structure(destination_directory, relative_path):
full_path = os.path.join(destination_directory, relative_path)
if os.path.exists(full_path):
return
os.makedirs(destination_directory, relative_path) | Create all the intermediate directories required for relative_path to exist within destination_directory.
This assumes that relative_path is a directory located within root_dir.
Examples:
destination_directory: /tmp/destination
relative_path: test/unit/
will create: /tmp/destination/test/unit
Args:
destination_directory (str): root of the destination directory where the directory structure will be created.
relative_path (str): relative path that will be created within destination_directory | codesearchnet |
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False) -> ContextManager[None]:
if context.executing_eagerly():
if op is not None:
if not hasattr(op, 'device'):
op = convert_to_tensor(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError('Encountered an Eager-defined Tensor during graph construction, but a function was not being built.')
return default_graph._colocate_with_for_gradient(op, gradient_uid=gradient_uid, ignore_existing=ignore_existing) | Returns a context manager for colocating op gradients with an op.
Internal API. In eager mode, returns a context manager that sets the default
device for new ops to the same device as the given op. Does the same if a
function is currently being built (i.e. the current mode is graph, but the
overall mode is eager).
In all other cases, returns a `Graph.colocate_with()` context manager,
optionally accounting for gradients (if a gradient UID is specified).
Args:
op: Operation or Tensor with which to colocate.
gradient_uid: Optional gradient UID to enable colocation of gradients during
compilation.
ignore_existing: See `Graph.colocate_with()`.
Returns:
A context manager used to colocate ops and gradients with the specified
operation. | github-repos |
def assert_scalar(tensor, name=None, message=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
message = _message_prefix(message)
if shape.ndims != 0:
if context.executing_eagerly():
raise ValueError('%sExpected scalar shape, saw shape: %s.' % (message, shape))
else:
raise ValueError('%sExpected scalar shape for %s, saw shape: %s.' % (message, tensor.name, shape))
return tensor | Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).
This function raises `ValueError` unless it can be certain that the given
`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is
unknown.
Args:
tensor: A `Tensor`.
name: A name for this operation. Defaults to "assert_scalar"
message: A string to prefix to the default message.
Returns:
The input tensor (potentially converted to a `Tensor`).
Raises:
ValueError: If the tensor is not scalar (rank 0), or if its shape is
unknown. | github-repos |
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
headers = self._headers.copy()
other_headers = other._headers.copy()
try:
del headers["sent-at"]
except KeyError:
pass
try:
del other_headers["sent-at"]
except KeyError:
pass
return (
self.topic == other.topic
and self.body == other.body
and headers == other_headers
) | Two messages of the same class with the same topic, headers, and body are equal.
The "sent-at" header is excluded from the equality check as this is set
automatically and is dependent on when the object is created.
Args:
other (object): The object to check for equality.
Returns:
bool: True if the messages are equal. | juraj-google-style |
def DEFINE_boolean(flag_name, default_value, docstring):
def str2bool(bool_str):
return bool_str.lower() in ('true', 't', '1')
get_context_parser().add_argument(
'--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
get_context_parser().add_argument(
'--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_')) | Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag. | juraj-google-style |
def wait_stopped(self, timeout=None, force=False):
self.join(timeout)
if (self.is_alive() and (force is False)):
raise TimeoutExpiredError('Error waiting for background thread to exit', timeout=timeout) | Wait for the thread to stop.
You must have previously called signal_stop or this function will
hang.
Args:
timeout (float): The maximum time to wait for the thread to stop
before raising a TimeoutExpiredError. If force is True,
TimeoutExpiredError is not raised and the thread is just
marked as a daemon thread so that it does not block cleanly
exiting the process.
force (bool): If true and the thread does not exit in timeout seconds
no error is raised since the thread is marked as daemon and will
be killed when the process exits. | codesearchnet |
class FixedThreshold(ThresholdFn):
def __init__(self, cutoff: float, **kwargs):
super().__init__(**kwargs)
self._cutoff = cutoff
@property
def is_stateful(self) -> bool:
return False
@property
def threshold(self) -> float:
return self._cutoff
def apply(self, score: Optional[float]) -> Optional[int]:
if score is None:
return None
if math.isnan(score):
return self._missing_label
if score < self.threshold:
return self._normal_label
return self._outlier_label | Applies a fixed cutoff value to anomaly scores.
This `ThresholdFn` is stateless and uses a pre-defined cutoff value to
classify anomaly scores. Scores below the cutoff are considered normal, while
scores at or above the cutoff are classified as outliers.
Args:
cutoff (float): The fixed threshold value. Anomaly scores at or above this
value will be labeled as outliers.
**kwargs: Additional keyword arguments to be passed to the base
`ThresholdFn` constructor. | github-repos |
def circuit_diagram_info(val: Any, args: Optional[CircuitDiagramInfoArgs]=None, default=RaiseTypeErrorIfNotProvided):
if (args is None):
args = CircuitDiagramInfoArgs.UNINFORMED_DEFAULT
getter = getattr(val, '_circuit_diagram_info_', None)
result = (NotImplemented if (getter is None) else getter(args))
if isinstance(result, str):
return CircuitDiagramInfo(wire_symbols=(result,))
if isinstance(result, collections.Iterable):
return CircuitDiagramInfo(wire_symbols=tuple(result))
if (result is not NotImplemented):
return result
if (default is not RaiseTypeErrorIfNotProvided):
return default
if (getter is None):
raise TypeError("object of type '{}' has no _circuit_diagram_info_ method.".format(type(val)))
raise TypeError("object of type '{}' does have a _circuit_diagram_info_ method, but it returned NotImplemented.".format(type(val))) | Requests information on drawing an operation in a circuit diagram.
Calls _circuit_diagram_info_ on `val`. If `val` doesn't have
_circuit_diagram_info_, or it returns NotImplemented, that indicates that
diagram information is not available.
Args:
val: The operation or gate that will need to be drawn.
args: A CircuitDiagramInfoArgs describing the desired drawing style.
default: A default result to return if the value doesn't have circuit
diagram information. If not specified, a TypeError is raised
instead.
Returns:
If `val` has no _circuit_diagram_info_ method or it returns
NotImplemented, then `default` is returned (or a TypeError is
raised if no `default` is specified).
Otherwise, the value returned by _circuit_diagram_info_ is returned.
Raises:
TypeError:
`val` doesn't have circuit diagram information and `default` was
not specified. | codesearchnet |
def GetHostname(self, session_identifier=CURRENT_SESSION):
hostname_artifact = self._hostnames.get(session_identifier, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or '' | Retrieves the hostname related to the event.
If the hostname is not stored in the event it is determined based
on the preprocessing information that is stored inside the storage file.
Args:
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session.
Returns:
str: hostname. | juraj-google-style |
def diff_parameters(old_params, new_params):
[changes, diff] = diff_dictionaries(old_params, new_params)
if changes == 0:
return []
return diff | Compares the old vs. new parameters and returns a "diff"
If there are no changes, we return an empty list.
Args:
old_params(dict): old paramters
new_params(dict): new parameters
Returns:
list: A list of differences | juraj-google-style |
def _get_events_data(object_key: str) -> List[dict]:
events_data = []
key = _keys.events_data(object_key)
for event_id in _get_events_list(object_key):
event_dict = literal_eval(DB.get_hash_value(key, event_id))
events_data.append(event_dict)
return events_data | Get the list of event data for the object with the specified key.
Args:
object_key (str): Key of an object in the database. | juraj-google-style |
def _restore_output_tensor_names(graph_def: graph_pb2.GraphDef) -> graph_pb2.GraphDef:
output_renaming_map = {}
with session.Session(graph=ops.Graph()):
importer.import_graph_def(graph_def, name='')
graph = ops.get_default_graph()
for op in graph.get_operations():
if op.type == '_Retval':
expected_node_name = op.name
if op.get_attr('tf_saved_model.index_path') is not None:
index_path_name = op.get_attr('tf_saved_model.index_path')[0]
index_path_name = index_path_name.decode('utf-8').split(':')[0]
try:
index_path_node = graph.get_operation_by_name(index_path_name)
if index_path_node.type == '_Retval':
expected_node_name = index_path_name
except KeyError:
pass
retval_input_node_name = op.inputs[0].op.name
output_renaming_map[retval_input_node_name] = expected_node_name
for node in reversed(graph_def.node):
if node.name in output_renaming_map:
node.name = output_renaming_map[node.name]
elif node.op == '_Retval':
graph_def.node.remove(node)
else:
for idx, input_name in enumerate(node.input):
if input_name in output_renaming_map:
node.input[idx] = output_renaming_map[input_name]
updating_inputs = []
for input_name in reversed(node.input):
if input_name.startswith('^') and input_name[1:] in output_renaming_map:
updating_inputs.append(input_name[1:])
node.input.remove(input_name)
for updating_input in updating_inputs:
node.input.append('^' + output_renaming_map[updating_input])
return graph_def | Restores the output tensor names of the converted model.
During the conversion, the output tensor names of the original model are
embedded in the `tf_saved_model.index_path` attribute of the RetVal nodes and
might become the name of Retval nodes as well (with an index suffix if there
are multiple output tensors from one node). Since Retval nodes are not used in
SavedModel, this function removes them and restore the names to the actual
output tensors.
Args:
graph_def: the converted GraphDef.
Returns:
The GraphDef with Retval nodes removed and output tensor names restored. | github-repos |
def _ParseHeader(self, parser_mediator, file_object):
header_map = self._GetDataTypeMap('cups_ipp_header')
try:
(header, _) = self._ReadStructureFromFileObject(file_object, 0, header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile('[{0:s}] Unable to parse header with error: {1!s}'.format(self.NAME, exception))
format_version = '{0:d}.{1:d}'.format(header.major_version, header.minor_version)
if (format_version not in self._SUPPORTED_FORMAT_VERSIONS):
raise errors.UnableToParseFile('[{0:s}] Unsupported format version {1:s}.'.format(self.NAME, format_version))
if (header.operation_identifier != 5):
display_name = parser_mediator.GetDisplayName()
logger.debug('[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header of: {2:s}.'.format(self.NAME, header.operation_identifier, display_name)) | Parses a CUPS IPP header from a file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed. | codesearchnet |
def parameterized_send(self, request, parameter_list):
response_queues = OrderedDict()
for parameter in parameter_list:
response_queues[parameter] = self.send(request % parameter)
return response_queues | Send batched requests for a list of parameters
Args:
request (str): Request to send, like "%s.*?\n"
parameter_list (list): parameters to format with, like
["TTLIN", "TTLOUT"]
Returns:
dict: {parameter: response_queue} | juraj-google-style |
def write_structure(times=None):
if times is None:
return report_loc.write_structure(f.root.times)
else:
if not isinstance(times, Times):
raise TypeError("Expected Times instance for param 'times' (default is root).")
return report_loc.write_structure(times) | Produce a formatted record of a times data structure.
Args:
times (Times, optional): If not provided, uses the current root timer.
Returns:
str: Timer tree hierarchy in a formatted string.
Raises:
TypeError: If provided argument is not a Times object. | juraj-google-style |
def __init__(self, encoding=None, suppress_output=False):
if not encoding:
encoding = self._GetConsoleEncoding()
elif encoding == 'win':
encoding = 'cp437'
self._encoding = encoding or 'ascii'
self._term = '' if suppress_output else os.getenv('TERM', '').lower()
if self.SupportsAnsi():
self._csi = '\x1b['
self._font_bold = '1'
self._font_italic = '4'
else:
self._csi = None
self._font_bold = ''
self._font_italic = ''
is_screen_reader = False
if self._encoding == 'utf8' and (not is_screen_reader):
self._box_line_characters = BoxLineCharactersUnicode()
self._bullets = self._BULLETS_UNICODE
self._progress_tracker_symbols = ProgressTrackerSymbolsUnicode()
elif self._encoding == 'cp437' and (not is_screen_reader):
self._box_line_characters = BoxLineCharactersUnicode()
self._bullets = self._BULLETS_WINDOWS
self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()
else:
self._box_line_characters = BoxLineCharactersAscii()
if is_screen_reader:
self._box_line_characters = BoxLineCharactersScreenReader()
self._bullets = self._BULLETS_ASCII
self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()
self._get_raw_key = [console_attr_os.GetRawKeyFunction()]
self._term_size = (0, 0) if suppress_output else console_attr_os.GetTermSize()
self._display_width_cache = {} | Constructor.
Args:
encoding: Encoding override.
ascii -- ASCII art. This is the default.
utf8 -- UTF-8 unicode.
win -- Windows code page 437.
suppress_output: True to create a ConsoleAttr that doesn't want to output
anything. | github-repos |
def build_byte_align_buff(bits):
bitmod = (len(bits) % 8)
if (bitmod == 0):
rdiff = bitarray()
else:
rdiff = bitarray((8 - bitmod))
rdiff.setall(False)
return (rdiff + bits) | Pad the left side of a bitarray with 0s to align its length with byte boundaries.
Args:
bits: A bitarray to be padded and aligned.
Returns:
A newly aligned bitarray. | codesearchnet |
def _ValidateDataTypeDefinition(cls, data_type_definition):
if not cls._IsIdentifier(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} not a valid identifier'.format(
data_type_definition.name))
if keyword.iskeyword(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} matches keyword'.format(
data_type_definition.name))
members = getattr(data_type_definition, 'members', None)
if not members:
raise ValueError(
'Data type definition name: {0!s} missing members'.format(
data_type_definition.name))
defined_attribute_names = set()
for member_definition in members:
attribute_name = member_definition.name
if not cls._IsIdentifier(attribute_name):
raise ValueError('Attribute name: {0!s} not a valid identifier'.format(
attribute_name))
if attribute_name.startswith('_'):
raise ValueError('Attribute name: {0!s} starts with underscore'.format(
attribute_name))
if keyword.iskeyword(attribute_name):
raise ValueError('Attribute name: {0!s} matches keyword'.format(
attribute_name))
if attribute_name in defined_attribute_names:
raise ValueError('Attribute name: {0!s} already defined'.format(
attribute_name))
defined_attribute_names.add(attribute_name) | Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid. | juraj-google-style |
def convert(self):
if not _jit:
raise ImportError('Cannot import jit from jax.')
if not self._serving_funcs:
raise ValueError('No serving func is specified.')
if not self._inputs:
raise ValueError('Input tensors are not specified.')
if len(self._inputs) != len(self._serving_funcs):
msg = 'Input tensor mapping len {} does not match serving func len {}.'.format(len(self._inputs), len(self._serving_funcs))
raise ValueError(msg)
if not isinstance(self._inputs, (tuple, list)):
raise ValueError('Input tensors should be pass in a tuple list wrapped in an array.')
if len(self._serving_funcs) > 1:
raise ValueError('Currently only support single serving function.')
if not isinstance(self._inputs[0], (tuple, list)):
raise ValueError('The input placeholders are not a dictionary.')
input_names = []
ordered_inputs = []
for input_name, tensor in self._inputs[0]:
input_names.append(input_name)
ordered_inputs.append(tensor)
try:
hlo_proto = _jit(self._serving_funcs[0]).trace(*ordered_inputs).lower(lowering_platforms=('cpu',)).compiler_ir('hlo').as_serialized_hlo_module_proto()
except Exception:
raise ValueError('Failed to convert the given Jax function to hlo.')
converter_kwargs = {'input_content': hlo_proto, 'input_names': input_names, 'is_proto_format': True}
converter_kwargs.update(self._get_base_converter_args())
quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, None, experimental_qdq_annotation=self._experimental_strict_qdq)
self._validate_inference_input_output_types(quant_mode)
converter_kwargs.update(quant_mode.converter_flags())
result = _convert_jax_hlo(**converter_kwargs)
return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer) | Converts a Jax serving func based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ImportError:
If cannot import the jit from jax.
ValueError:
No serving function is specified.
Input tensors are not specified.
The truth value of an array with more than one element is ambiguous.
Failed to convert the given Jax function to hlo. | github-repos |
class CustomObjectScope(object):
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup) | Exposes custom classes/functions to Keras deserialization internals.
Under a scope `with custom_object_scope(objects_dict)`, Keras methods such
as `tf.keras.models.load_model` or `tf.keras.models.model_from_config`
will be able to deserialize any custom object referenced by a
saved config (e.g. a custom layer or metric).
Example:
Consider a custom regularizer `my_regularizer`:
```python
layer = Dense(3, kernel_regularizer=my_regularizer)
config = layer.get_config() # Config contains a reference to `my_regularizer`
...
# Later:
with custom_object_scope({'my_regularizer': my_regularizer}):
layer = Dense.from_config(config)
```
Args:
*args: Dictionary or dictionaries of `{name: object}` pairs. | github-repos |
def _adjust_block(p, ip, filters, block_id=None):
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
img_dim = 2 if backend.image_data_format() == 'channels_first' else -2
with backend.name_scope('adjust_block'):
if p is None:
p = ip
elif p.shape[img_dim] != ip.shape[img_dim]:
with backend.name_scope(f'adjust_reduction_block_{block_id}'):
p = layers.Activation('relu', name=f'adjust_relu_1_{block_id}')(p)
p1 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_1_{block_id}')(p)
p1 = layers.Conv2D(filters
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_2_{block_id}')(p2)
p2 = layers.Conv2D(filters
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p)
elif p.shape[channel_dim] != filters:
with backend.name_scope(f'adjust_projection_block_{block_id}'):
p = layers.Activation('relu')(p)
p = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'adjust_conv_projection_{block_id}', use_bias=False, kernel_initializer='he_normal')(p)
p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p)
return p | Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Args:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor | github-repos |
def to_image(dataset):
dataset = dataset.squeeze()
if dataset.ndim < 2:
raise ValueError("Need at least a 2D array to make an image.")
else:
return XRImage(dataset) | convert ``dataset`` into a :class:`~trollimage.xrimage.XRImage` instance.
Convert the ``dataset`` into an instance of the
:class:`~trollimage.xrimage.XRImage` class. This function makes no other
changes. To get an enhanced image, possibly with overlays and decoration,
see :func:`~get_enhanced_image`.
Args:
dataset (xarray.DataArray): Data to be converted to an image.
Returns:
Instance of :class:`~trollimage.xrimage.XRImage`. | juraj-google-style |
def _validate_all_blocks_supported(ir_blocks, query_metadata_table):
if (len(ir_blocks) < 3):
raise AssertionError(u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal query is expected to have at least a QueryRoot, GlobalOperationsStart, and ConstructResult block. The query metadata table is {}.'.format(query_metadata_table))
construct_result = _get_construct_result(ir_blocks)
unsupported_blocks = []
unsupported_fields = []
for block in ir_blocks[:(- 1)]:
if isinstance(block, constants.SUPPORTED_BLOCK_TYPES):
continue
if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES):
continue
unsupported_blocks.append(block)
for (field_name, field) in six.iteritems(construct_result.fields):
if (not isinstance(field, constants.SUPPORTED_OUTPUT_EXPRESSION_TYPES)):
unsupported_fields.append((field_name, field))
elif (field.location.field in constants.UNSUPPORTED_META_FIELDS):
unsupported_fields.append((field_name, field))
if ((len(unsupported_blocks) > 0) or (len(unsupported_fields) > 0)):
raise NotImplementedError(u'Encountered unsupported blocks {} and unsupported fields {} during construction of SQL query tree for IR blocks {} with query metadata table {}.'.format(unsupported_blocks, unsupported_fields, ir_blocks, query_metadata_table)) | Validate that all IR blocks and ConstructResult fields passed to the backend are supported.
Args:
ir_blocks: List[BasicBlock], IR blocks to validate.
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Raises:
NotImplementedError, if any block or ConstructResult field is unsupported. | codesearchnet |
def remove_room_alias(self, room_alias):
try:
self.api.remove_room_alias(room_alias)
return True
except MatrixRequestError:
return False | Remove mapping of an alias
Args:
room_alias(str): The alias to be removed.
Returns:
bool: True if the alias is removed, False otherwise. | codesearchnet |
def _log_score(score):
logger.info('Score of ({}/{}) set for submission {}'.format(score.points_earned, score.points_possible, score.submission.uuid)) | Log the creation of a score.
Args:
score (Score): The score model.
Returns:
None | codesearchnet |
def sg_float(tensor, opt):
r
return tf.cast(tensor, tf.sg_floatx, name=opt.name) | r"""Casts a tensor to floatx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name : If provided, it replaces current tensor's name
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`. | juraj-google-style |
def usufyToXlsExport(d, fPath):
from pyexcel_xls import get_data
try:
oldData = {"OSRFramework": get_data(fPath) }
except:
oldData = {"OSRFramework":[]}
tabularData = _generateTabularData(d, oldData)
from pyexcel_xls import save_data
save_data(fPath, tabularData) | Workaround to export to a .xls file.
Args:
-----
d: Data to export.
fPath: File path for the output file. | juraj-google-style |
def build_chain(self, source, chain):
for group in WalkByGroup(source, chain.order+1):
pre = group[:-1]
res = group[-1]
if pre not in chain.content:
chain.content[pre] = {res: 1}
else:
if res not in chain.content[pre]:
chain.content[pre][res] = 1
else:
chain.content[pre][res] += 1
chain.decache() | Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.