code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def reopen_encoded(fileobj, mode='r', fallback_encoding=None):
encoding = determine_encoding(fileobj.name, fallback_encoding)
fileobj.close()
return open(fileobj.name, mode, encoding=encoding) | Makes sure that a file was opened with some valid encoding.
Arguments:
fileobj (file): The file-object.
mode (str, optional): The mode in which to re-open the file.
fallback_encoding (str, optional): The encoding in which to re-open
the file if it does not specify an encoding itself.
Returns:
file: The re-opened file. | codesearchnet |
def _tokenize(self, text, **kwargs):
return self.sp_model.encode(text, out_type=str) | Args:
text: TextInput
Returns a tokenized string. The Gemma tokenizer never adds a prefix space. | github-repos |
def DeserializeExclusiveData(self, reader):
self.Type = TransactionType.RegisterTransaction
self.AssetType = reader.ReadByte()
self.Name = reader.ReadVarString()
self.Amount = reader.ReadFixed8()
self.Precision = reader.ReadByte()
self.Owner = ECDSA.Deserialize_Secp256r1(reader)
self.Admin = reader.ReadUInt160() | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): | juraj-google-style |
def get_absolute_name(package, relative_name):
path = (package.split('.') if package else [])
name = relative_name.lstrip('.')
ndots = (len(relative_name) - len(name))
if (ndots > len(path)):
return relative_name
absolute_path = path[:((len(path) + 1) - ndots)]
if name:
absolute_path.append(name)
return '.'.join(absolute_path) | Joins a package name and a relative name.
Args:
package: A dotted name, e.g. foo.bar.baz
relative_name: A dotted name with possibly some leading dots, e.g. ..x.y
Returns:
The relative name appended to the parent's package, after going up one
level for each leading dot.
e.g. foo.bar.baz + ..hello.world -> foo.hello.world
The unchanged relative_name if it does not start with a dot
or has too many leading dots. | codesearchnet |
def stop(self, timeout=5):
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
current = threading.currentThread()
if ((timeout is not None) and (timeout >= 0)):
endtime = (time.time() + timeout)
while self._threads:
worker = self._threads.pop()
if ((worker is not current) and worker.isAlive()):
try:
if ((timeout is None) or (timeout < 0)):
worker.join()
else:
remaining_time = (endtime - time.time())
if (remaining_time > 0):
worker.join(remaining_time)
if worker.isAlive():
c = worker.conn
if (c and (not c.rfile.closed)):
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
c.socket.shutdown()
worker.join()
except (AssertionError, KeyboardInterrupt):
pass | Terminate all worker threads.
Args:
timeout (int): time to wait for threads to stop gracefully | codesearchnet |
def decode(cls, command_str):
name, _, arg = command_str.partition(" ")
args = []
if len(arg) > 0:
if arg[0] != '{' or arg[-1] != '}':
raise DataError("Invalid command, argument is not contained in { and }", arg=arg, cmd=name)
arg = arg[1:-1]
args = arg.split(",")
proc = []
for arg in args:
if arg.startswith("hex:"):
arg = unhexlify(arg[4:]).decode('utf-8')
proc.append(arg)
return Command(name, proc) | Decode a string encoded command back into a Command object.
Args:
command_str (str): The encoded command string output from a
previous call to encode.
Returns:
Command: The decoded Command object. | juraj-google-style |
def cut_video(in_file,
out_file,
start=None,
end=None,
vcodec=None,
acodec=None,
log_level='info',
print_cmd=False,
**kwargs):
options = {'log_level': log_level}
if vcodec is None:
options['vcodec'] = 'copy'
if acodec is None:
options['acodec'] = 'copy'
if start:
options['ss'] = start
else:
start = 0
if end:
options['t'] = end - start
convert_video(in_file, out_file, print_cmd, **options) | Cut a clip from a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
start (None or float): Start time (in seconds).
end (None or float): End time (in seconds).
vcodec (None or str): Output video codec, None for unchanged.
acodec (None or str): Output audio codec, None for unchanged.
log_level (str): Logging level of ffmpeg.
print_cmd (bool): Whether to print the final ffmpeg command. | juraj-google-style |
def remove_collisions(self, min_dist=0.5):
s_f_coords = self.structure.frac_coords
f_coords = self.extrema_coords
if len(f_coords) == 0:
if self.extrema_type is None:
logger.warning(
"Please run ChargeDensityAnalyzer.get_local_extrema first!")
return
new_f_coords = []
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
dist_matrix = self.structure.lattice.get_all_distances(f_coords,
s_f_coords)
all_dist = np.min(dist_matrix, axis=1)
new_f_coords = []
for i, f in enumerate(f_coords):
if all_dist[i] > min_dist:
new_f_coords.append(f)
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords | Remove predicted sites that are too close to existing atoms in the
structure.
Args:
min_dist (float): The minimum distance (in Angstrom) that
a predicted site needs to be from existing atoms. A min_dist
with value <= 0 returns all sites without distance checking. | juraj-google-style |
def _add_ttl_ns(self, line):
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
lg.debug("line:\n%s", line)
line = str(line).strip()
if line is None or line == 'none' or line == '' \
or not line.lower().startswith('@prefix'):
return
line = line.replace("@prefix","",1).strip()
if line.endswith("."):
line = line[:-1]
prefix = line[:line.find(":")].strip()
uri = self.clean_iri(line[line.find(":")+1:].strip())
lg.debug("\nprefix: %s uri: %s", prefix, uri)
self.bind(prefix, uri, override=False, calc=False) | takes one prefix line from the turtle file and binds the namespace
to the class
Args:
line: the turtle prefix line string | juraj-google-style |
def extract_backup_bundle(self, resource, timeout=-1):
return self._client.update(resource, uri=self.BACKUP_ARCHIVE_PATH, timeout=timeout) | Extracts the existing backup bundle on the appliance and creates all the artifacts.
Args:
resource (dict): Deployment Group to extract.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in
OneView, it just stops waiting for its completion.
Returns:
dict: A Deployment Group associated with the Artifact Bundle backup. | juraj-google-style |
def dft_task(cls, mol, xc='b3lyp', **kwargs):
t = NwTask.from_molecule(mol, theory='dft', **kwargs)
t.theory_directives.update({'xc': xc, 'mult': t.spin_multiplicity})
return t | A class method for quickly creating DFT tasks with optional
cosmo parameter .
Args:
mol: Input molecule
xc: Exchange correlation to use.
\\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the
theory is always "dft" for a dft task. | codesearchnet |
def get_special_dtypes_update(self, model, torch_dtype: 'torch.dtype') -> Dict[str, 'torch.dtype']:
return {name: torch_dtype for name, _ in model.named_parameters() if any((m in name for m in self.modules_to_not_convert))} | returns dtypes for modules that are not quantized - used for the computation of the device_map in case
one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
in `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
torch_dtype (`torch.dtype`):
The dtype passed in `from_pretrained` method. | github-repos |
def run(self, inputs=None, warmup_iterations: int=10, benchmark_iterations: int=100) -> TestResultCollection:
inputs = inputs or self.generate_random_inputs()
def run_model(model, **kwargs):
return model.run(inputs, warmup_iterations, benchmark_iterations, **kwargs)
try:
cpu_base_result = run_model(self._ori_model, enable_gpu=False)
except RuntimeError as err:
logging.info('%s cannot run on CPU. Reason: %s.', self._ori_model.model_config, err)
cpu_base_result = None
gpu_base_result = run_model(self._ori_model, enable_gpu=True)
trt_results = list(map(run_model, self._trt_models))
return TestResultCollection(test_name=self._name, model_config=self.model_config, cpu_base_result=cpu_base_result, gpu_base_result=gpu_base_result, trt_results=trt_results) | Runs model inference with provided or randomly generated input tensors.
Args:
inputs: Mapping from names to input ndarrays in TF1. Or a sequence of
tensors in TF2. If `None`, ramdomly generated input tensors will be used
instead.
warmup_iterations: Number of inferences to warm up the runtime.
benchmark_iterations: Number of inferences to measure the latency.
Returns:
`TestResultCollection` summarizing latency and numerics information for
different TensorRT conversion settings. | github-repos |
def _set_least_batch_id(self, txn_signature):
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if (current_index <= least_index):
return
if all((all(((t.header_signature in self._txn_results) for t in b.transactions)) for b in self._batches[least_index:current_index])):
all_prior = True
if (not all_prior):
return
possible_least = self._batches[current_index].header_signature
for b in self._batches[current_index:]:
if (not all(((t.header_signature in self._txn_results) for t in b.transactions))):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least | Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set. | codesearchnet |
def __init__(self,
lang='en',
lower=True,
charset=None):
super(SentenceCharTokenizer, self).__init__(lang, lower, charset) | Encodes text into `(samples, sentences, characters)`
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
(Default value: None) | juraj-google-style |
def _FilterOutPathInfoDuplicates(path_infos):
pi_dict = {}
for pi in path_infos:
path_key = (pi.path_type, pi.GetPathID())
pi_dict.setdefault(path_key, []).append(pi)
def _SortKey(pi):
return (
pi.stat_entry.st_ctime,
pi.stat_entry.st_mtime,
pi.stat_entry.st_atime,
pi.stat_entry.st_ino,
)
for pi_values in pi_dict.values():
if len(pi_values) > 1:
pi_values.sort(key=_SortKey, reverse=True)
return [v[0] for v in pi_dict.values()] | Filters out duplicates from passed PathInfo objects.
Args:
path_infos: An iterable with PathInfo objects.
Returns:
A list of PathInfo objects with duplicates removed. Duplicates are
removed following this logic: they're sorted by (ctime, mtime, atime,
inode number) in the descending order and then the first one is taken
and the others are dropped. | juraj-google-style |
def dataverse_download_doi(doi, local_fname=None, file_requirements={}, clobber=False):
metadata = dataverse_search_doi(doi)
def requirements_match(metadata):
for key in file_requirements.keys():
if (metadata['dataFile'].get(key, None) != file_requirements[key]):
return False
return True
for file_metadata in metadata['data']['latestVersion']['files']:
if requirements_match(file_metadata):
file_id = file_metadata['dataFile']['id']
md5sum = file_metadata['dataFile']['md5']
if (local_fname is None):
local_fname = file_metadata['dataFile']['filename']
if ((not clobber) and os.path.isfile(local_fname)):
print('Checking existing file to see if MD5 sum matches ...')
md5_existing = get_md5sum(local_fname)
if (md5_existing == md5sum):
print('File exists. Not overwriting.')
return
print("Downloading data to '{}' ...".format(local_fname))
dataverse_download_id(file_id, md5sum, fname=local_fname, clobber=False)
return
raise DownloadError(('No file found under the given DOI matches the requirements.\nThe metadata found for this DOI was:\n' + json.dumps(file_metadata, indent=2, sort_keys=True))) | Downloads a file from the Dataverse, using a DOI and set of metadata
parameters to locate the file.
Args:
doi (str): Digital Object Identifier (DOI) containing the file.
local_fname (Optional[str]): Local filename to download the file to. If
`None`, then use the filename provided by the Dataverse. Defaults to
`None`.
file_requirements (Optional[dict]): Select the file containing the
given metadata entries. If multiple files meet these requirements,
only the first in downloaded. Defaults to `{}`, corresponding to no
requirements.
Raises:
DownloadError: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
requests.exceptions.HTTPError: The given DOI does not exist, or there
was a problem connecting to the Dataverse. | codesearchnet |
def ReadSerialized(cls, json_string):
if json_string:
json_dict = json.loads(json_string)
return cls.ReadSerializedDict(json_dict)
return None | Reads an attribute container from serialized form.
Args:
json_string (str): JSON serialized attribute container.
Returns:
AttributeContainer: attribute container or None. | juraj-google-style |
def put(self, value, priority=100):
task_name = '{}{:03d}_{}'.format(self.TASK_PREFIX, priority, self._counter)
path = posixpath.join(self._queue_path, task_name)
self._client.kv[path] = value | Put a task into the queue.
Args:
value (str): Task data.
priority (int): An optional priority as an integer with at most 3 digits.
Lower values signify higher priority. | juraj-google-style |
def update_branch(profile, name, sha):
ref = "heads/" + name
data = refs.update_ref(profile, ref, sha)
return data | Move a branch's HEAD to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to update.
sha
The commit SHA to point the branch's HEAD to.
Returns:
A dict with data about the branch. | juraj-google-style |
def prepare_wheel_srcs(headers: list[str], srcs: list[str], dests: list[str], aot: list[str], srcs_dir: str, version: str) -> None:
prepare_headers(headers, os.path.join(srcs_dir, 'tensorflow/include'))
prepare_srcs(srcs, dests, srcs_dir)
prepare_aot(aot, os.path.join(srcs_dir, 'tensorflow/xla_aot_runtime_src'))
create_init_files(os.path.join(srcs_dir, 'tensorflow'))
shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/MANIFEST.in'), os.path.join(srcs_dir, 'MANIFEST.in'))
shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/THIRD_PARTY_NOTICES.txt'), os.path.join(srcs_dir, 'tensorflow/THIRD_PARTY_NOTICES.txt'))
update_xla_tsl_imports(os.path.join(srcs_dir, 'tensorflow'))
if dests:
return
if not is_windows():
rename_libtensorflow(os.path.join(srcs_dir, 'tensorflow'), version)
if not is_macos() and (not is_windows()):
patch_so(srcs_dir) | Rearrange source and header files.
Args:
headers: a list of paths to header files.
srcs: a list of paths to the rest of files.
dests: a list of paths to files with srcs files destinations.
aot: a list of paths to files that should be in xla_aot directory.
srcs_dir: directory to copy files to.
version: tensorflow version. | github-repos |
def psnr_and_ssim(output, target):
output = tf.cast(output, dtype=tf.int32)
target = tf.cast(target, dtype=tf.int32)
psnr = tf.image.psnr(output, target, max_val=255)
ssim = tf.image.ssim(output, target, max_val=255)
return (psnr, ssim) | Compute the PSNR and SSIM.
Args:
output: 4-D Tensor, shape=(num_frames, height, width, num_channels)
target: 4-D Tensor, shape=(num_frames, height, width, num_channels)
Returns:
psnr: 1-D Tensor, shape=(num_frames,)
ssim: 1-D Tensor, shape=(num_frames,) | codesearchnet |
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution, event_ndims):
with tf.name_scope('pad_mix_dims'):
def _get_ndims(d):
if (tensorshape_util.rank(d.batch_shape) is not None):
return tensorshape_util.rank(d.batch_shape)
return tf.shape(input=d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = tf.where(categorical_distribution.is_scalar_batch(), dist_batch_ndims, (dist_batch_ndims - cat_batch_ndims))
s = tf.shape(input=x)
x = tf.reshape(x, shape=tf.concat([s[:(- 1)], tf.ones([pad_ndims], dtype=tf.int32), s[(- 1):], tf.ones([event_ndims], dtype=tf.int32)], axis=0))
return x | Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`. | codesearchnet |
def _checksum(cls, line):
tr_table = str.maketrans({c: None for c in ascii_uppercase + "+ ."})
no_letters = line[:68].translate(tr_table).replace("-", "1")
return sum([int(l) for l in no_letters]) % 10 | Compute the checksum of a full line
Args:
line (str): Line to compute the checksum from
Return:
int: Checksum (modulo 10) | juraj-google-style |
def get_poi_types(self, **kwargs):
params = {'cultureInfo': util.language_code(kwargs.get('lang'))}
result = self.make_request('geo', 'get_poi_types', **params)
values = result.get('types', [])
return (True, [emtype.PoiType(**a) for a in values]) | Obtain POI types.
Args:
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[PoiType]), or message string
in case of error. | codesearchnet |
def _has_strict_none_origins(self, binding):
if not self._analyzing:
return True
has_any_none_origin = False
walker = cfg_utils.walk_binding(binding, keep_binding=lambda b: self._data_is_none(b.data))
origin = None
while True:
try:
origin = walker.send(origin)
except StopIteration:
break
for source_set in origin.source_sets:
if not source_set:
if self.ctx.program.is_reachable(src=self.frame.node, dst=origin.where):
return True
has_any_none_origin = True
return not has_any_none_origin | Whether the binding has any possible origins, with None filtering.
Determines whether the binding has any possibly visible origins at the
current node once we've filtered out false positives on None. The caller
still must call HasCombination() to find out whether these origins are
actually reachable.
Args:
binding: A cfg.Binding.
Returns:
True if there are possibly visible origins, else False. | github-repos |
def _SmallestColSize(self, text):
if not text:
return 0
stripped = terminal.StripAnsiText(text)
return max(len(word) for word in stripped.split()) | Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text. | juraj-google-style |
def remote_file(self, branch='master', filename=''):
LOG.info('Retrieving "%s" from "%s".', filename, self.git_short)
file_contents = ''
try:
file_blob = self.project.files.get(file_path=filename, ref=branch)
except gitlab.exceptions.GitlabGetError:
file_blob = None
LOG.debug('GitLab file response:\n%s', file_blob)
if (not file_blob):
msg = 'Project "{0}" is missing file "{1}" in "{2}" branch.'.format(self.git_short, filename, branch)
LOG.warning(msg)
raise FileNotFoundError(msg)
else:
file_contents = b64decode(file_blob.content).decode()
LOG.debug('Remote file contents:\n%s', file_contents)
return file_contents | Read the remote file on Git Server.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of
repository.
Returns:
str: Contents of remote file.
Raises:
FileNotFoundError: Requested file missing. | codesearchnet |
def _ParseVSSProcessingOptions(self, options):
vss_only = False
vss_stores = None
self._process_vss = (not getattr(options, 'no_vss', False))
if self._process_vss:
vss_only = getattr(options, 'vss_only', False)
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
try:
self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')
except ValueError:
raise errors.BadConfigOption('Unsupported VSS stores')
self._vss_only = vss_only
self._vss_stores = vss_stores | Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. | codesearchnet |
def AddTableColumns(self, table, columns):
table_columns = self._table_columns.setdefault(table, [])
for attr in columns:
if attr not in table_columns:
table_columns.append(attr) | Add columns to table if they are not already there.
Args:
table: table name as a string
columns: an iterable of column names | juraj-google-style |
def get_timezone_olson_id():
tzoffset = int(time.timezone / 3600)
if tzoffset <= 0:
gmt = f'GMT+{-tzoffset}'
else:
gmt = f'GMT-{tzoffset}'
return GMT_to_olson[gmt] | Return the Olson ID of the local (non-DST) timezone.
Returns:
A string representing one of the Olson IDs of the local (non-DST)
timezone. | github-repos |
def _IsMetadataFile(self, file_entry):
if ((file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK) and (file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK)):
return True
return False | Determines if the file entry is a metadata file.
Args:
file_entry (dfvfs.FileEntry): a file entry object.
Returns:
bool: True if the file entry is a metadata file. | codesearchnet |
def run(self, inputs: Dict[(str, Union[(float, Iterable)])], torch_size: Optional[int]=None) -> Union[(float, Iterable)]:
for i in self.inputs:
self.nodes[i]['value'] = inputs[i]
for func_set in self.function_sets:
for func_name in func_set:
lambda_fn = self.nodes[func_name]['lambda_fn']
output_node = list(self.successors(func_name))[0]
signature = self.nodes[func_name]['func_inputs']
input_values = [self.nodes[n]['value'] for n in signature]
res = lambda_fn(*input_values)
if ((torch_size is not None) and (len(signature) == 0)):
self.nodes[output_node]['value'] = torch.tensor(([res] * torch_size), dtype=torch.double)
else:
self.nodes[output_node]['value'] = res
return self.nodes[self.output_node]['value'] | Executes the GrFN over a particular set of inputs and returns the
result.
Args:
inputs: Input set where keys are the names of input nodes in the
GrFN and each key points to a set of input values (or just one).
Returns:
A set of outputs from executing the GrFN, one for every set of
inputs. | codesearchnet |
def orient_directed_graph(self, data, dag, alg='HC'):
alg_dic = {'HC': hill_climbing, 'HCr': hill_climbing_with_removal, 'tabu': tabu_search, 'EHC': exploratory_hill_climbing}
return alg_dic[alg](data, dag, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose) | Modify and improve a directed acyclic graph solution using CGNN.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
dag (nx.DiGraph): Graph that provides the initial solution,
on which the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN. | codesearchnet |
def prepare_minibatch(self, audio_paths, texts, overwrite=False,
is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
if seq_length == -1:
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
else:
x = np.zeros((mb_size, seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat)
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x,
'y': y,
'texts': texts,
'input_lengths': input_lengths,
'label_lengths': label_lengths,
} | Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents | juraj-google-style |
def caption_to_item(self, caption):
captions = self.captions()
if caption not in captions:
raise LookupError('There is no menu item with the caption "%s"' % caption)
return self._items[captions.index(caption)] | Get a MenuItem from the caption.
Args:
caption: (str) The caption to look up.
Returns:
(MenuItem) The first-match menu item with the caption, if any.
Raises:
LookupError: If a menu item with the caption does not exist. | github-repos |
def from_list(cls, vals: List[Value] = [], reverse: bool = False) -> "LinkedList":
res = EmptyList()
for v in (vals if reverse else vals[::-1]):
res = cls(v, res)
return res | Create an instance from a standard list.
Args:
vals: Python list of instance values. | juraj-google-style |
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
super(VirusTotalAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._api_key = None
self._checked_for_old_python_version = False | Initializes a VirusTotal analyzer.
Args:
hash_queue (Queue.queue): queue that contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): queue the analyzer will append
HashAnalysis objects to. | juraj-google-style |
def digest_content(self, rule):
data = OrderedDict()
current_key = None
for token in rule.content:
if token.type == 'ident':
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
if token.type == 'string':
data[current_key] = token.value
return data | Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties. | juraj-google-style |
def _get_anchor(module_to_name, fullname):
if (not _anchor_re.match(fullname)):
raise ValueError(("'%s' is not a valid anchor" % fullname))
anchor = fullname
for module_name in module_to_name.values():
if fullname.startswith((module_name + '.')):
rest = fullname[(len(module_name) + 1):]
if (len(anchor) > len(rest)):
anchor = rest
return anchor | Turn a full member name into an anchor.
Args:
module_to_name: Dictionary mapping modules to short names.
fullname: Fully qualified name of symbol.
Returns:
HTML anchor string. The longest module name prefix of fullname is
removed to make the anchor.
Raises:
ValueError: If fullname uses characters invalid in an anchor. | codesearchnet |
def benchmark_config():
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF
return config | Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer.
Returns:
A TensorFlow ConfigProto object. | github-repos |
def __init__(self, resolver_context):
super(FileSystem, self).__init__()
self._is_cached = False
self._is_open = False
self._path_spec = None
self._resolver_context = resolver_context
if not getattr(self, 'TYPE_INDICATOR', None):
raise ValueError('Missing type indicator.') | Initializes a file system.
Args:
resolver_context (Context): resolver context.
Raises:
ValueError: if a derived file system class does not define a type
indicator. | juraj-google-style |
def get_course_enrollments(self, enterprise_customer, days):
return CourseEnrollment.objects.filter(created__gt=(datetime.datetime.now() - datetime.timedelta(days=days))).filter(user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True)) | Get course enrollments for all the learners of given enterprise customer.
Arguments:
enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners
of this enterprise customer.
days (int): Include course enrollment of this number of days.
Returns:
(list): A list of CourseEnrollment objects. | codesearchnet |
def __init__(self, project_id, instance_id, database_id, pool=None, credentials=None, max_batch_size_bytes=1048576, max_number_rows=50, max_number_cells=500):
self._configuration = _BeamSpannerConfiguration(project=project_id, instance=instance_id, database=database_id, table=None, query_name=None, credentials=credentials, pool=pool, snapshot_read_timestamp=None, snapshot_exact_staleness=None)
self._max_batch_size_bytes = max_batch_size_bytes
self._max_number_rows = max_number_rows
self._max_number_cells = max_number_cells
self._database_id = database_id
self._project_id = project_id
self._instance_id = instance_id
self._pool = pool | A PTransform to write onto Google Cloud Spanner.
Args:
project_id: Cloud spanner project id. Be sure to use the Project ID,
not the Project Number.
instance_id: Cloud spanner instance id.
database_id: Cloud spanner database id.
max_batch_size_bytes: (optional) Split the mutations into batches to
reduce the number of transaction sent to Spanner. By default it is
set to 1 MB (1048576 Bytes).
max_number_rows: (optional) Split the mutations into batches to
reduce the number of transaction sent to Spanner. By default it is
set to 50 rows per batch.
max_number_cells: (optional) Split the mutations into batches to
reduce the number of transaction sent to Spanner. By default it is
set to 500 cells per batch. | github-repos |
def pyxb_to_dict(rp_pyxb):
return {
'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')),
'num': _get_as_int(rp_pyxb),
'block': _get_as_set(rp_pyxb, 'block'),
'pref': _get_as_set(rp_pyxb, 'pref'),
} | Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
} | juraj-google-style |
def set_transaction_execution_result(self, txn_signature, is_valid, context_id, state_changes, events, data, error_message, error_data):
raise NotImplementedError() | Set the status of an executed transaction.
Called by the executor after a transaction has been processed.
The scheduler must know when transactions have finished being
applied so that it can determine which transactions will become
eligible for processing.
Args:
txn_signature (str): The signature of the transaction, which
must match the header_signature field of the Transaction
object which was part of the added Batch.
is_valid (bool): True if transaction applied successfully or False
if the transaction failed and was not applied.
context_id (str): If status is True, contains the context_id
associated with the state changes made by the transaction.
If status is False, this should be set to None.
Raises:
ValueError: Thrown if transaction_signature does not match a
transaction. | codesearchnet |
def restore(cls, metadata_checkpoint_dir, search_alg=None, scheduler=None, trial_executor=None):
newest_ckpt_path = _find_newest_ckpt(metadata_checkpoint_dir)
with open(newest_ckpt_path, 'r') as f:
runner_state = json.load(f, cls=_TuneFunctionDecoder)
logger.warning(''.join(['Attempting to resume experiment from {}. '.format(metadata_checkpoint_dir), 'This feature is experimental, and may not work with all search algorithms. ', 'This will ignore any new changes to the specification.']))
from ray.tune.suggest import BasicVariantGenerator
runner = TrialRunner((search_alg or BasicVariantGenerator()), scheduler=scheduler, trial_executor=trial_executor)
runner.__setstate__(runner_state['runner_data'])
trials = []
for trial_cp in runner_state['checkpoints']:
new_trial = Trial(trial_cp['trainable_name'])
new_trial.__setstate__(trial_cp)
trials += [new_trial]
for trial in sorted(trials, key=(lambda t: t.last_update_time), reverse=True):
runner.add_trial(trial)
return runner | Restores all checkpointed trials from previous run.
Requires user to manually re-register their objects. Also stops
all ongoing trials.
Args:
metadata_checkpoint_dir (str): Path to metadata checkpoints.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment.
trial_executor (TrialExecutor): Manage the execution of trials.
Returns:
runner (TrialRunner): A TrialRunner to resume experiments from. | codesearchnet |
def to_text_diagram_drawer(self, *, use_unicode_characters: bool=True, qubit_namer: Optional[Callable[([ops.Qid], str)]]=None, transpose: bool=False, precision: Optional[int]=3, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT, get_circuit_diagram_info: Optional[Callable[([ops.Operation, protocols.CircuitDiagramInfoArgs], protocols.CircuitDiagramInfo)]]=None) -> TextDiagramDrawer:
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(self.all_qubits())
qubit_map = {qubits[i]: i for i in range(len(qubits))}
if (qubit_namer is None):
qubit_namer = (lambda q: (str(q) + ('' if transpose else ': ')))
diagram = TextDiagramDrawer()
for (q, i) in qubit_map.items():
diagram.write(0, i, qubit_namer(q))
moment_groups = []
for moment in self._moments:
_draw_moment_in_diagram(moment, use_unicode_characters, qubit_map, diagram, precision, moment_groups, get_circuit_diagram_info)
w = diagram.width()
for i in qubit_map.values():
diagram.horizontal_line(i, 0, w)
if moment_groups:
_draw_moment_groups_in_diagram(moment_groups, use_unicode_characters, diagram)
if transpose:
diagram = diagram.transpose()
return diagram | Returns a TextDiagramDrawer with the circuit drawn into it.
Args:
use_unicode_characters: Determines if unicode characters are
allowed (as opposed to ascii-only diagrams).
qubit_namer: Names qubits in diagram. Defaults to str.
transpose: Arranges qubit wires vertically instead of horizontally.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the diagram.
get_circuit_diagram_info: Gets circuit diagram info. Defaults to
protocol with fallback.
Returns:
The TextDiagramDrawer instance. | codesearchnet |
def sg_max(tensor, opt):
r
return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | r"""Computes the maximum of elements across axis of a tensor.
See `tf.reduce_max()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | juraj-google-style |
def get_resource_group(access_token, subscription_id, rgname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token) | Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body. | juraj-google-style |
def _CheckGitkitError(self, raw_response):
try:
response = simplejson.loads(raw_response)
if 'error' not in response:
return response
else:
error = response['error']
if 'code' in error:
code = error['code']
if str(code).startswith('4'):
raise errors.GitkitClientError(error['message'])
else:
raise errors.GitkitServerError(error['message'])
except simplejson.JSONDecodeError:
pass
raise errors.GitkitServerError('null error code from Gitkit server') | Raises error if API invocation failed.
Args:
raw_response: string, the http response.
Raises:
GitkitClientError: if the error code is 4xx.
GitkitServerError: if the response if malformed.
Returns:
Successful response as dict. | juraj-google-style |
def make_string_field_value(cls, field):
if field.regex is not None:
raise NotImplementedError
string_range = cls.get_range(field)
return cls.get_random_string(string_range) | String Field has three constraints (apart from anything
in the super class)
Args:
field (StringField): actual string field object from a
model declaration
Returns:
random string value | juraj-google-style |
def _CreateLineString(self, parent, coordinate_list):
if not coordinate_list:
return None
linestring = ET.SubElement(parent, 'LineString')
tessellate = ET.SubElement(linestring, 'tessellate')
tessellate.text = '1'
if len(coordinate_list[0]) == 3:
altitude_mode = ET.SubElement(linestring, 'altitudeMode')
altitude_mode.text = 'absolute'
coordinates = ET.SubElement(linestring, 'coordinates')
if len(coordinate_list[0]) == 3:
coordinate_str_list = ['%f,%f,%f' % t for t in coordinate_list]
else:
coordinate_str_list = ['%f,%f' % t for t in coordinate_list]
coordinates.text = ' '.join(coordinate_str_list)
return linestring | Create a KML LineString element.
The points of the string are given in coordinate_list. Every element of
coordinate_list should be one of a tuple (longitude, latitude) or a tuple
(longitude, latitude, altitude).
Args:
parent: The parent ElementTree.Element instance.
coordinate_list: The list of coordinates.
Returns:
The LineString ElementTree.Element instance or None if coordinate_list is
empty. | juraj-google-style |
def move(self, x, y):
self._cursor = self._normalizePoint(x, y) | Move the virtual cursor.
Args:
x (int): x-coordinate to place the cursor.
y (int): y-coordinate to place the cursor.
.. seealso:: :any:`get_cursor`, :any:`print_str`, :any:`write` | codesearchnet |
def get(self, config_id):
return self.prepare_model(self.client.api.inspect_config(config_id)) | Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def global_variables_initializer():
if context.executing_eagerly():
return control_flow_ops.no_op(name='global_variables_initializer')
return variables_initializer(global_variables()) | Returns an Op that initializes global variables.
This is just a shortcut for `variables_initializer(global_variables())`
@compatibility(TF2)
In TF2, variables are initialized immediately when they are created. There is
no longer a need to run variable initializers before using them.
@end_compatibility
Returns:
An Op that initializes global variables in the graph. | github-repos |
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
encoded_stream = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not encoded_stream:
raise errors.BackEndError(
'Unable to open encoded stream: {0:s}.'.format(
self.path_spec.comparable))
super(EncodedStreamFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._encoded_stream = encoded_stream
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE | Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the encoded stream is missing. | juraj-google-style |
def _ParseMRUListExValue(self, registry_key):
mrulistex_value = registry_key.GetValueByName('MRUListEx')
if not mrulistex_value:
return None
mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulistex_value.data)})
return self._ReadStructureFromByteStream(
mrulistex_value.data, 0, mrulistex_entries_map, context=context) | Parses the MRUListEx value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
Returns:
mrulistex_entries: MRUListEx entries or None if not available. | juraj-google-style |
def poll(self, channel_id=None, json=None, **kwargs):
path = "/event-service/v1/channels/{}/poll".format(channel_id)
r = self._httpclient.request(
method="POST",
url=self.url,
json=json,
path=path,
**kwargs
)
return r | Read one or more events from a channel.
Reads events (log records) from the identified channel. Events
are read in chronological order.
Args:
channel_id (str): The channel ID.
json (dict): Payload/request body.
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``event_poll.py`` example. | juraj-google-style |
def _GetCachedEntryDataTypeMap(self, format_type, value_data, cached_entry_offset):
if (format_type not in self._SUPPORTED_FORMAT_TYPES):
raise errors.ParseError('Unsupported format type: {0:d}'.format(format_type))
data_type_map_name = ''
if (format_type == self._FORMAT_TYPE_XP):
data_type_map_name = 'appcompatcache_cached_entry_xp_32bit'
elif (format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10)):
data_type_map_name = 'appcompatcache_cached_entry_header_8'
else:
cached_entry = self._ParseCommon2003CachedEntry(value_data, cached_entry_offset)
if ((cached_entry.path_offset_32bit == 0) and (cached_entry.path_offset_64bit != 0)):
number_of_bits = '64'
else:
number_of_bits = '32'
if (format_type == self._FORMAT_TYPE_2003):
data_type_map_name = 'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits)
elif (format_type == self._FORMAT_TYPE_VISTA):
data_type_map_name = 'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits)
elif (format_type == self._FORMAT_TYPE_7):
data_type_map_name = 'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits)
return self._GetDataTypeMap(data_type_map_name) | Determines the cached entry data type map.
Args:
format_type (int): format type.
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data or None
if the data type map is not defined.
Raises:
ParseError: if the cached entry data type map cannot be determined. | codesearchnet |
def _DictToListOfStrings(self, data_dict):
ret_list = []
for key, value in iter(data_dict.items()):
if key in ('body', 'datetime', 'type', 'room', 'rooms', 'id'):
continue
ret_list.append('{0:s} = {1!s}'.format(key, value))
return ret_list | Converts a dictionary into a list of strings.
Args:
data_dict (dict[str, object]): dictionary to convert.
Returns:
list[str]: list of strings. | juraj-google-style |
def delete_box(self, key):
if key:
uri = self.box_root_uri + '/' + key
return self._req('delete', uri)
else:
return requests.codes.bad_request, None | Deletes the box specified by the key
Args:
returns (status code for the DELETE request, success message dict) | juraj-google-style |
def _row_from_mapping(mapping, schema):
if (len(schema) == 0):
raise ValueError(_TABLE_HAS_NO_SCHEMA)
row = []
for field in schema:
if (field.mode == 'REQUIRED'):
row.append(mapping[field.name])
elif (field.mode == 'REPEATED'):
row.append(mapping.get(field.name, ()))
elif (field.mode == 'NULLABLE'):
row.append(mapping.get(field.name))
else:
raise ValueError('Unknown field mode: {}'.format(field.mode))
return tuple(row) | Convert a mapping to a row tuple using the schema.
Args:
mapping (Dict[str, object])
Mapping of row data: must contain keys for all required fields in
the schema. Keys which do not correspond to a field in the schema
are ignored.
schema (List[google.cloud.bigquery.schema.SchemaField]):
The schema of the table destination for the rows
Returns:
Tuple[object]:
Tuple whose elements are ordered according to the schema.
Raises:
ValueError: If schema is empty. | codesearchnet |
def get_neighbor_ip(ip_addr, cidr="30"):
our_octet = None
neighbor_octet = None
try:
ip_addr_split = ip_addr.split(".")
max_counter = 0
if int(cidr) == 30:
ranger = 4
elif int(cidr) == 31:
ranger = 2
while max_counter < 256:
try:
if int(ip_addr_split[3]) >= max_counter and int(ip_addr_split[3]) < (max_counter + ranger):
if ranger == 4:
our_octet = max_counter + 1
neighbor_octet = max_counter + 2
break
elif ranger == 2:
our_octet = max_counter
neighbor_octet = max_counter + 1
break
max_counter += ranger
except UnboundLocalError:
print("The mask between the neighbors must be 30, or 31")
exit("BAD NEIGHBOR MASK")
if int(ip_addr_split[3]) == our_octet:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
elif int(ip_addr_split[3]) == neighbor_octet:
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
else:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
return our_ip_addr, neighbor_ip_addr
except IndexError:
LOGGER.critical('Function get_neighbor_ip IndexError ip_addr {item} cidr {cidr}'.format(item=ip_addr,
cidr=cidr))
raise IndexError("You have entered invalid input, you must enter a ipv4 address") | Function to figure out the IP's between neighbors address
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
cidr: CIDR value of 30, or 31
Returns: returns Our IP and the Neighbor IP in a tuple | juraj-google-style |
def CreateSharedBudget(client):
budget_service = client.GetService('BudgetService', version='v201809')
budget = {
'name': 'Shared Interplanetary Budget
'amount': {
'microAmount': '2000000'
},
'deliveryMethod': 'STANDARD',
'isExplicitlyShared': 'true'
}
operation = {
'operator': 'ADD',
'operand': budget
}
response = budget_service.mutate([operation])
return response['value'][0] | Creates an explicit budget to be used only to create the Campaign.
Args:
client: AdWordsClient the client to run the example with.
Returns:
dict An object representing a shared budget. | juraj-google-style |
def load(url_or_handle, cache=None, **kwargs):
ext = get_extension(url_or_handle)
try:
loader = loaders[ext.lower()]
message = "Using inferred loader '%s' due to passed file extension '%s'."
log.debug(message, loader.__name__[6:], ext)
return load_using_loader(url_or_handle, loader, cache, **kwargs)
except KeyError:
log.warning("Unknown extension '%s', attempting to load as image.", ext)
try:
with read_handle(url_or_handle, cache=cache) as handle:
result = _load_img(handle)
except Exception as e:
message = "Could not load resource %s as image. Supported extensions: %s"
log.error(message, url_or_handle, list(loaders))
raise RuntimeError(message.format(url_or_handle, list(loaders)))
else:
log.info("Unknown extension '%s' successfully loaded as image.", ext)
return result | Load a file.
File format is inferred from url. File retrieval strategy is inferred from
URL. Returned object type is inferred from url extension.
Args:
url_or_handle: a (reachable) URL, or an already open file handle
Raises:
RuntimeError: If file extension or URL is not supported. | juraj-google-style |
def make_connection_with_forwarded_port(self, host_port, device_port, uid=UNKNOWN_UID, cmd=ConnectionHandshakeCommand.INIT):
self.host_port = host_port
self.device_port = device_port
self._counter = self._id_counter()
self.create_socket_connection()
self.send_handshake_request(uid, cmd) | Makes a connection to the server with the given forwarded port.
This process assumes that a device port has already been forwarded to a
host port, and it only makes a connection to the snippet server based on
the forwarded port. This is typically used by clients that share the same
snippet server, e.g. the snippet client and its event client.
Args:
host_port: int, the host port which has already been forwarded.
device_port: int, the device port listened by the snippet server.
uid: int, the uid of the server session to continue. It will be ignored
if the `cmd` requires the server to create a new session.
cmd: ConnectionHandshakeCommand, the handshake command Enum for the
server, which requires the server to create a new session or use the
current session. | github-repos |
def coord(self):
return self._coord | Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object. | github-repos |
def insert(self, table, insert_obj, ignore=True):
if isinstance(insert_obj, pd.DataFrame):
if insert_obj.empty:
raise ValueError('The input DataFrame is empty, please check!')
insert_obj = insert_obj.to_dict(orient='records')
elif not isinstance(insert_obj, list):
raise ValueError(
f"The {reprlib.repr(insert_obj)} must be list of dicts type!")
ignore_str = 'IGNORE' if ignore else ''
return self._session.execute(
table.__table__.insert().prefix_with(ignore_str), insert_obj) | [insert bulk data]
Arguments:
table {[DeclarativeMeta cls]} -- [reflection of table]
insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj]
Keyword Arguments:
ignore {bool} -- [wether ignore exception or not] (default: {True})
Raises:
ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"]
Returns:
[type] -- [description] | juraj-google-style |
def __init__(self, action, error):
logger.error(action, exc_info=error)
QDialog.__init__(self)
self.setWindowTitle(_('Autosave error'))
self.setModal(True)
layout = QVBoxLayout()
header = _('Error message:')
txt = '<br>{}<br><br>{}<br>{!s}'.format(action, header, error)
layout.addWidget(QLabel(txt))
layout.addSpacing(15)
txt = _("Hide all future autosave-related errors during this session")
self.dismiss_box = QCheckBox(txt)
layout.addWidget(self.dismiss_box)
layout.addSpacing(15)
button_box = QDialogButtonBox(QDialogButtonBox.Ok)
button_box.accepted.connect(self.accept)
layout.addWidget(button_box)
self.setLayout(layout) | Constructor.
Args:
action (str): what Spyder was trying to do when error occured
error (Exception): the error that occured | juraj-google-style |
def ConvertOutputToUnicode(self, buf):
if isinstance(buf, str):
buf = buf.encode(self._encoding)
return str(buf, self._encoding, 'replace') | Converts a console output string buf to unicode.
Mainly used for testing. Allows test comparisons in unicode while ensuring
that unicode => encoding => unicode works.
Args:
buf: The console output string to convert.
Returns:
The console output string buf converted to unicode. | github-repos |
def __init__(self, dims):
self._dims = [convert_to_dimension(d) for d in tuple(dims)]
if len(set(dims)) != len(dims):
raise ValueError("Shape must not have repeated dimensions %s" % dims) | Constructs a shape for a Tensor or Mesh.
Args:
dims: List-like of Dimensions.
Raises:
ValueError: If Dimensions are repeated. | juraj-google-style |
def load_entity(self, name, file_name, reload_cache=False):
Entity.verify_name(name)
self.entities.load(Entity.wrap_name(name), file_name, reload_cache)
with open(file_name) as f:
self.padaos.add_entity(name, f.read().split('\n'))
self.must_train = True | Loads an entity, optionally checking the cache first
Args:
name (str): The associated name of the entity
file_name (str): The location of the entity file
reload_cache (bool): Whether to refresh all of cache | juraj-google-style |
def __init__(self, etk, cdr_document: Dict, mime_type, url, doc_id=None) -> None:
Segment.__init__(self, json_path="$", _value=cdr_document, _document=self)
self.etk = etk
self.cdr_document = cdr_document
self.mime_type = mime_type
self.url = url
if doc_id:
self.cdr_document["doc_id"] = doc_id
self.extraction_provenance_records = list()
if self.etk.kg_schema:
self.kg = KnowledgeGraph(self.etk.kg_schema, self.etk.ontology, self)
else:
self.kg = None
if not self.etk.kg_schema:
self.etk.log("Schema not found.", "warning", self.doc_id, self.url)
self._provenance_id_index = 0
self._provenances = dict()
self._jsonpath_provenances = dict()
self._kg_provenances = dict() | Wrapper object for CDR documents.
Args:
etk (ETK): embed the etk object so that docs have access to global info.
cdr_document (JSON): the raw CDR document received in ETK.
Returns: the wrapped CDR document | juraj-google-style |
def auth(self, token):
t = self.sendToken(token)
return self.getToken(t) | Take an existing Skype token and refresh it, to extend the expiry time without other credentials.
Args:
token (str): existing Skype token
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed | codesearchnet |
def _snapshot_task_progresses(self) -> Iterable[_pywrap_server_lib.SnapshotTaskProgressWrapper]:
return self._server.snapshot_task_progresses() | Returns the progresses of the snapshot tasks currently being executed.
Returns:
An `Iterable[common_pb2.SnapshotTaskProgress]`. | github-repos |
def make_iterable(value):
if (sys.version_info <= (3, 0)):
if isinstance(value, unicode):
value = str(value)
if (isinstance(value, str) or isinstance(value, dict)):
value = [value]
if (not isinstance(value, collections.Iterable)):
raise TypeError('value must be an iterable object')
return value | Converts the supplied value to a list object
This function will inspect the supplied value and return an
iterable in the form of a list.
Args:
value (object): An valid Python object
Returns:
An iterable object of type list | codesearchnet |
def hex(self):
return ' | Returns the HTML-style hex code for the Colour.
Returns:
str: the colour as a HTML-sytle hex string | codesearchnet |
def fastrcnn_predictions(boxes, scores):
assert (boxes.shape[1] == cfg.DATA.NUM_CLASS)
assert (scores.shape[1] == cfg.DATA.NUM_CLASS)
boxes = tf.transpose(boxes, [1, 0, 2])[(1:, :, :)]
scores = tf.transpose(scores[(:, 1:)], [1, 0])
def f(X):
'\n prob: n probabilities\n box: nx4 boxes\n\n Returns: n boolean, the selection\n '
(prob, box) = X
output_shape = tf.shape(prob, out_type=tf.int64)
ids = tf.reshape(tf.where((prob > cfg.TEST.RESULT_SCORE_THRESH)), [(- 1)])
prob = tf.gather(prob, ids)
box = tf.gather(box, ids)
selection = tf.image.non_max_suppression(box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
selection = tf.gather(ids, selection)
if (get_tf_version_tuple() >= (1, 13)):
sorted_selection = tf.sort(selection, direction='ASCENDING')
mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1), values=tf.ones_like(sorted_selection, dtype=tf.bool), dense_shape=output_shape)
mask = tf.sparse.to_dense(mask, default_value=False)
else:
sorted_selection = (- tf.nn.top_k((- selection), k=tf.size(selection))[0])
mask = tf.sparse_to_dense(sparse_indices=sorted_selection, output_shape=output_shape, sparse_values=True, default_value=False)
return mask
buggy_tf = (get_tf_version_tuple() in [(1, 11), (1, 12)])
masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool, parallel_iterations=(1 if buggy_tf else 10))
selected_indices = tf.where(masks)
scores = tf.boolean_mask(scores, masks)
(topk_scores, topk_indices) = tf.nn.top_k(scores, tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)), sorted=False)
filtered_selection = tf.gather(selected_indices, topk_indices)
(cat_ids, box_ids) = tf.unstack(filtered_selection, axis=1)
final_scores = tf.identity(topk_scores, name='scores')
final_labels = tf.add(cat_ids, 1, name='labels')
final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids')
final_boxes = tf.gather_nd(boxes, final_ids, name='boxes')
return (final_boxes, final_scores, final_labels) | Generate final results from predictions of all proposals.
Args:
boxes: n#classx4 floatbox in float32
scores: nx#class
Returns:
boxes: Kx4
scores: K
labels: K | codesearchnet |
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
dequantized_arr = (arr + 0.5).astype(dtype) * (
max_val - min_val) / levels + min_val
return dequantized_arr | Dequantize an array.
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the dequantized array.
Returns:
tuple: Dequantized array. | juraj-google-style |
def _prefix_from_prefix_string(self, prefixlen_str):
try:
if (not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str)):
raise ValueError
prefixlen = int(prefixlen_str)
if (not (0 <= prefixlen <= self._max_prefixlen)):
raise ValueError
except ValueError:
raise NetmaskValueError(('%s is not a valid prefix length' % prefixlen_str))
return prefixlen | Turn a prefix length string into an integer.
Args:
prefixlen_str: A decimal string containing the prefix length.
Returns:
The prefix length as an integer.
Raises:
NetmaskValueError: If the input is malformed or out of range. | codesearchnet |
def date_clean(date, dashboard_style=False):
if dashboard_style:
dt = str(date)
out = dt[4:6] + '/' + dt[6:] + '/' + dt[:4]
else:
dt = str(date)
out = dt[:4] + '-' + dt[4:6] + '-' + dt[6:]
return out | Clean the numerical date value in order to present it.
Args:
boo: numerical date (20160205)
Returns:
Stringified version of the input date ("2016-02-05") | juraj-google-style |
def _process_parameter_type(param, param_name, func):
optional = False
if param.annotation != inspect.Parameter.empty:
param_type = param.annotation
if 'typing' in str(param_type):
param_type = ''.join(str(param_type).split('typing.')).replace('transformers.', '~')
elif hasattr(param_type, '__module__'):
param_type = f'{param_type.__module__.replace('transformers.', '~').replace('builtins', '')}.{param.annotation.__name__}'
if param_type[0] == '.':
param_type = param_type[1:]
elif False:
print(f'🚨 {param_type} for {param_name} of {func.__qualname__} in file {func.__code__.co_filename} has an invalid type')
if 'ForwardRef' in param_type:
param_type = re.sub("ForwardRef\\('([\\w.]+)'\\)", '\\1', param_type)
if 'Optional' in param_type:
param_type = re.sub('Optional\\[(.*?)\\]', '\\1', param_type)
optional = True
else:
param_type = ''
return (param_type, optional) | Process and format a parameter's type annotation.
Args:
param (`inspect.Parameter`): The parameter from the function signature
param_name (`str`): The name of the parameter
func (`function`): The function the parameter belongs to | github-repos |
def _openfile(instance, filething, filename, fileobj, writable, create):
assert ((not create) or writable)
if isinstance(filething, FileThing):
filename = filething.filename
fileobj = filething.fileobj
filething = None
if (filething is not None):
if is_fileobj(filething):
fileobj = filething
elif hasattr(filething, '__fspath__'):
filename = filething.__fspath__()
if (not isinstance(filename, (bytes, text_type))):
raise TypeError('expected __fspath__() to return a filename')
else:
filename = filething
if (instance is not None):
if (not writable):
instance.filename = filename
elif (filename is None):
filename = getattr(instance, 'filename', None)
if (fileobj is not None):
verify_fileobj(fileobj, writable=writable)
(yield FileThing(fileobj, filename, (filename or fileobj_name(fileobj))))
elif (filename is not None):
verify_filename(filename)
inmemory_fileobj = False
try:
fileobj = open(filename, ('rb+' if writable else 'rb'))
except IOError as e:
if (writable and (e.errno == errno.EOPNOTSUPP)):
try:
with open(filename, 'rb') as fileobj:
fileobj = BytesIO(fileobj.read())
except IOError as e2:
raise MutagenError(e2)
inmemory_fileobj = True
elif (create and (e.errno == errno.ENOENT)):
assert writable
try:
fileobj = open(filename, 'wb+')
except IOError as e2:
raise MutagenError(e2)
else:
raise MutagenError(e)
with fileobj as fileobj:
(yield FileThing(fileobj, filename, filename))
if inmemory_fileobj:
assert writable
data = fileobj.getvalue()
try:
with open(filename, 'wb') as fileobj:
fileobj.write(data)
except IOError as e:
raise MutagenError(e)
else:
raise TypeError('Missing filename or fileobj argument') | yields a FileThing
Args:
filething: Either a file name, a file object or None
filename: Either a file name or None
fileobj: Either a file object or None
writable (bool): if the file should be opened
create (bool): if the file should be created if it doesn't exist.
implies writable
Raises:
MutagenError: In case opening the file failed
TypeError: in case neither a file name or a file object is passed | codesearchnet |
def write_file(self, path, contents):
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents) | Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write. | juraj-google-style |
def to_json(self):
return {'lat': self.lat, 'lon': self.lon, 'time': (self.time.isoformat() if (self.time is not None) else None)} | Creates a JSON serializable representation of this instance
Returns:
:obj:`dict`: For example,
{
"lat": 9.3470298,
"lon": 3.79274,
"time": "2016-07-15T15:27:53.574110"
} | codesearchnet |
def remove(package_name):
if (package_name not in packages):
raise HolodeckException(('Unknown package name ' + package_name))
for (config, path) in _iter_packages():
if (config['name'] == package_name):
shutil.rmtree(path) | Removes a holodeck package.
Args:
package_name (str): the name of the package to remove | codesearchnet |
def pack_small_tensors(tower_grads, max_bytes=0):
assert (max_bytes >= 0)
orig_grads = [g for (g, _) in tower_grads[0]]
assert all(((g.dtype == tf.float32) for g in orig_grads))
sizes = [(4 * g.shape.num_elements()) for g in orig_grads]
print_stats(sizes)
small_ranges = []
large_indices = []
new_sizes = []
def end_interval(indices, small_ranges, large_indices):
if (len(indices) > 1):
small_ranges.insert(0, [indices[0], indices[(- 1)]])
else:
large_indices.insert(0, indices[0])
cur_range = []
cur_size = 0
for (i, s) in reversed(list(enumerate(sizes))):
if (cur_size > max_bytes):
end_interval(cur_range, small_ranges, large_indices)
new_sizes.insert(0, cur_size)
cur_range = []
cur_size = 0
cur_range.insert(0, i)
cur_size += s
end_interval(cur_range, small_ranges, large_indices)
new_sizes.insert(0, cur_size)
print_stats(new_sizes)
num_gv = len(orig_grads)
packing = {}
if len(small_ranges):
new_tower_grads = []
for (dev_idx, gv_list) in enumerate(tower_grads):
assert (len(gv_list) == num_gv), "Possible cause: Networks constructed on different workers don't have the same number of variables. If you use tf.GraphKeys or tf.global_variables() with multiple graphs per worker during network construction, you need to use appropriate scopes, see https:
new_gv_list = []
for r in small_ranges:
key = ('%d:%d' % (dev_idx, len(new_gv_list)))
new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_tower_grads.append(new_gv_list)
return (new_tower_grads, packing)
else:
return (tower_grads, None) | Concatenate gradients together more intelligently.
Does binpacking
Args:
tower_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small. | codesearchnet |
def format_sympy_expr(sympy_expr, functions=None):
if functions is None:
functions = {}
str_expr = str(sympy_expr)
result = str_expr.replace(" ", "")
for fn_name, char in six.iteritems(functions):
result = result.replace(fn_name, char)
return result | Convert sympy expression into a string which can be encoded.
Args:
sympy_expr: Any sympy expression tree or string.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
A string representation of the expression suitable for encoding as a
sequence input. | juraj-google-style |
def WinChmod(filename, acl_list, user=None):
if user is None:
user = win32api.GetUserName()
if not os.path.exists(filename):
raise RuntimeError("filename %s does not exist" % filename)
acl_bitmask = 0
for acl in acl_list:
acl_bitmask |= getattr(ntsecuritycon, acl)
dacl = win32security.ACL()
win_user, _, _ = win32security.LookupAccountName("", user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)
security_descriptor = win32security.GetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION)
security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl,
DACL_DEFAULT)
win32security.SetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor) | Provide chmod-like functionality for windows.
Doco links:
goo.gl/n7YR1
goo.gl/rDv81
goo.gl/hDobb
Args:
filename: target filename for acl
acl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.
e.g. ["FILE_GENERIC_READ", "FILE_GENERIC_WRITE"]
user: username string. If not specified we use the user we are running as.
Raises:
AttributeError: if a bad permission is passed
RuntimeError: if filename doesn't exist | juraj-google-style |
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__())) | Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved | juraj-google-style |
def load_all_yamls(cls, directories):
yaml_files = []
loaded_yamls = {}
for d in directories:
if (d.startswith('/home') and (not os.path.exists(d))):
os.makedirs(d)
for (dirname, subdirs, files) in os.walk(d):
yaml_files.extend(map((lambda x: os.path.join(dirname, x)), filter((lambda x: x.endswith('.yaml')), files)))
for f in yaml_files:
loaded_yamls[f] = cls.load_yaml_by_path(f)
return loaded_yamls | Loads yaml files from all given directories.
Args:
directories: list of directories to search
Returns:
dict of {fullpath: loaded_yaml_structure} | codesearchnet |
def DeleteSignedBinary(binary_urn, token=None):
if _ShouldUseLegacyDatastore():
try:
aff4.FACTORY.Open(binary_urn, aff4_type=aff4.AFF4Stream, mode='r', token=token)
except aff4.InstantiationError:
raise SignedBinaryNotFoundError(binary_urn)
aff4.FACTORY.Delete(binary_urn, token=token)
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.ReadSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn))
except db.UnknownSignedBinaryError:
if _ShouldUseLegacyDatastore():
return
else:
raise SignedBinaryNotFoundError(binary_urn)
data_store.REL_DB.DeleteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn)) | Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
token: ACL token to use with the legacy (non-relational) datastore.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist. | codesearchnet |
def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):
_logger.debug('Saving file to {0}, mode={1}.',
filename, mode)
dir_path = os.path.dirname(filename)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
response.body = Body(open(filename, mode)) | Open a file object on to the Response Body.
Args:
filename: The path where the file is to be saved
response: Response
mode: The file mode
This function will create the directories if not exist. | juraj-google-style |
def _create_keras_history_helper(tensors, processed_ops, created_layers):
if ops.executing_eagerly_outside_functions():
raise ValueError('`create_keras_history` should only be called if eager is disabled!')
from tensorflow.python.keras.engine import base_layer
tensor_list = nest.flatten(tensors)
sparse_ops = []
ragged_tensors = []
for tensor in tensor_list:
if getattr(tensor, '_keras_history', None) is not None:
continue
if isinstance(tensor, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
sparse_ops.append(tensor.op)
continue
if tf_utils.is_ragged(tensor):
ragged_tensors.append(tensor)
continue
op = tensor.op
if op not in processed_ops:
op_inputs = list(op.inputs)
constants = {}
layer_inputs = []
for i, op_input in enumerate(op_inputs):
if uses_keras_history(op_input):
layer_inputs.append(op_input)
else:
ds_with_session = distribute_lib.in_cross_replica_context() and (not ops.executing_eagerly_outside_functions())
using_xla = control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())
if ds_with_session or using_xla or _UNSAFE_GRAPH_OP_LAYER_CREATION:
constants[i] = op_input
else:
with ops.init_scope():
constants[i] = backend.function([], op_input)([])
layer_inputs = unnest_if_single_tensor(layer_inputs)
processed_ops, created_layers = _create_keras_history_helper(layer_inputs, processed_ops, created_layers)
name = op.name
node_def = op.node_def.SerializeToString()
op_layer = base_layer.TensorFlowOpLayer(node_def, constants=constants, name=name)
created_layers.append(op_layer)
op_layer._set_connectivity_metadata(args=(layer_inputs,), kwargs={}, outputs=op.outputs)
processed_ops.update([op])
if sparse_ops or ragged_tensors:
lambda_example = '\n weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)\n output = tf.keras.layers.Lambda(weights_mult)(input)\n '
raise ValueError('Tensorflow ops that generate ragged or sparse tensor outputs are currently not supported by Keras automatic op wrapping. Please wrap these ops in a Lambda layer: \n\n```\n{example}\n```\nSparse ops encountered: {sparse_ops}\nRagged tensors encountered: {ragged_tensors}\n'.format(example=lambda_example, sparse_ops=str(sparse_ops), ragged_tensors=str(ragged_tensors)))
return (processed_ops, created_layers) | Helper method for `create_keras_history`.
Args:
tensors: A structure of Tensors for which to create Keras metadata.
processed_ops: Set. TensorFlow operations that have already been wrapped in
`TensorFlowOpLayer` instances.
created_layers: List. The `TensorFlowOpLayer` instances created.
Returns:
Tuple. First element is the updated set of TensorFlow Operations that
have been wrapped in `TensorFlowOpLayer` instances. Second element is
a list of the `TensorFlowOpLayer` instances created. | github-repos |
def infer_tests_to_run(output_file: str, diff_with_last_commit: bool=False, filter_models: bool=False, test_all: bool=False):
if not test_all:
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
else:
modified_files = [str(k) for k in PATH_TO_TESTS.glob('*test_**.py', recursive=True) + glob.glob('examples*.py', recursive=True)
if len(model_impacted) >= NUM_MODELS_TO_TRIGGER_FULL_CI and filter_models:
print(f'More than {NUM_MODELS_TO_TRIGGER_FULL_CI - 1} models are impacted and `filter_models=False`. CI is configured to test everything.')
else:
test_files_to_run = [f for f in modified_files if f.startswith('tests') and '/test_' in f]
impacted_files = get_impacted_files_from_tiny_model_summary(diff_with_last_commit=diff_with_last_commit)
test_map = create_module_to_test_map(reverse_map=reverse_map, filter_models=filter_models)
for f in modified_files + impacted_files:
if f in test_map:
test_files_to_run.extend(test_map[f])
test_files_to_run = sorted(set(test_files_to_run))
test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == 'repo_utils']
test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == 'sagemaker']
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
print(f'\n
create_test_list_from_filter(test_files_to_run, out_path='test_preparation/')
doctest_list = get_doctest_files()
print(f'\n
if len(doctest_list) > 0:
doctest_file = Path(output_file).parent / 'doctest_list.txt'
with open(doctest_file, 'w', encoding='utf-8') as f:
f.write(' '.join(doctest_list)) | The main function called by the test fetcher. Determines the tests to run from the diff.
Args:
output_file (`str`):
The path where to store the summary of the test fetcher analysis. Other files will be stored in the same
folder:
- examples_test_list.txt: The list of examples tests to run.
- test_repo_utils.txt: Will indicate if the repo utils tests should be run or not.
- doctest_list.txt: The list of doctests to run.
diff_with_last_commit (`bool`, *optional*, defaults to `False`):
Whether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with
the branching point from main (for use on each PR).
filter_models (`bool`, *optional*, defaults to `True`):
Whether or not to filter the tests to core models only, when a file modified results in a lot of model
tests. | github-repos |
def remove_objects_from_args(args: Iterable[Any], kwargs: Dict[str, Any], pvalue_class: Union[Type[T], Tuple[Type[T], ...]]) -> Tuple[List[Any], Dict[str, Any], List[T]]:
pvals = []
def swapper(value):
pvals.append(value)
return ArgumentPlaceholder()
new_args = [swapper(v) if isinstance(v, pvalue_class) else v for v in args]
new_kwargs = dict(((k, swapper(v)) if isinstance(v, pvalue_class) else (k, v) for k, v in sorted(kwargs.items())))
return (new_args, new_kwargs, pvals) | For internal use only; no backwards-compatibility guarantees.
Replaces all objects of a given type in args/kwargs with a placeholder.
Args:
args: A list of positional arguments.
kwargs: A dictionary of keyword arguments.
pvalue_class: A class object representing the types of arguments that must
be replaced with a placeholder value (instance of ArgumentPlaceholder).
Returns:
A 3-tuple containing a modified list of positional arguments, a modified
dictionary of keyword arguments, and a list of all objects replaced with
a placeholder value. | github-repos |
def load_transcripts(adapter, transcripts_lines=None, build='37', ensembl_genes=None):
ensembl_genes = (ensembl_genes or adapter.ensembl_genes(build))
if (transcripts_lines is None):
transcripts_lines = fetch_ensembl_transcripts(build=build)
transcripts_dict = parse_transcripts(transcripts_lines)
for ens_tx_id in list(transcripts_dict):
parsed_tx = transcripts_dict[ens_tx_id]
ens_gene_id = parsed_tx['ensembl_gene_id']
gene_obj = ensembl_genes.get(ens_gene_id)
if (not gene_obj):
transcripts_dict.pop(ens_tx_id)
LOG.debug('Gene %s does not exist in build %s', ens_gene_id, build)
continue
parsed_tx['hgnc_id'] = gene_obj['hgnc_id']
parsed_tx['primary_transcripts'] = set(gene_obj.get('primary_transcripts', []))
ref_seq_transcripts = 0
nr_primary_transcripts = 0
nr_transcripts = len(transcripts_dict)
transcript_objs = []
with progressbar(transcripts_dict.values(), label='Building transcripts', length=nr_transcripts) as bar:
for tx_data in bar:
tx_data['is_primary'] = False
primary_transcripts = tx_data['primary_transcripts']
refseq_identifier = None
refseq_identifiers = []
for category in TRANSCRIPT_CATEGORIES:
identifiers = tx_data[category]
if (not identifiers):
continue
for refseq_id in identifiers:
refseq_identifiers.append(refseq_id)
ref_seq_transcripts += 1
if (refseq_id in primary_transcripts):
refseq_identifier = refseq_id
tx_data['is_primary'] = True
nr_primary_transcripts += 1
if (not refseq_identifier):
refseq_identifier = refseq_id
if refseq_identifier:
tx_data['refseq_id'] = refseq_identifier
if refseq_identifiers:
tx_data['refseq_identifiers'] = refseq_identifiers
tx_obj = build_transcript(tx_data, build)
transcript_objs.append(tx_obj)
LOG.info('Loading transcripts...')
if (len(transcript_objs) > 0):
adapter.load_transcript_bulk(transcript_objs)
LOG.info('Number of transcripts in build %s: %s', build, nr_transcripts)
LOG.info('Number of transcripts with refseq identifier: %s', ref_seq_transcripts)
LOG.info('Number of primary transcripts: %s', nr_primary_transcripts)
return transcript_objs | Load all the transcripts
Transcript information is from ensembl.
Args:
adapter(MongoAdapter)
transcripts_lines(iterable): iterable with ensembl transcript lines
build(str)
ensembl_genes(dict): Map from ensembl_id -> HgncGene
Returns:
transcript_objs(list): A list with all transcript objects | codesearchnet |
def start_dag(self, dag, *, data=None):
return self._client.send(Request(action='start_dag', payload={'name': (dag.name if isinstance(dag, Dag) else dag), 'data': (data if isinstance(data, MultiTaskData) else None)})).payload['dag_name'] | Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag. | codesearchnet |
def _pick_or_create_inserted_op_moment_index(self, splitter_index: int, op: ops.Operation, strategy: InsertStrategy) -> int:
if ((strategy is InsertStrategy.NEW) or (strategy is InsertStrategy.NEW_THEN_INLINE)):
self._moments.insert(splitter_index, ops.Moment())
return splitter_index
if (strategy is InsertStrategy.INLINE):
if ((0 <= (splitter_index - 1) < len(self._moments)) and self._can_add_op_at((splitter_index - 1), op)):
return (splitter_index - 1)
return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.NEW)
if (strategy is InsertStrategy.EARLIEST):
if self._can_add_op_at(splitter_index, op):
p = self._prev_moment_available(op, splitter_index)
return (p or 0)
return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.INLINE)
raise ValueError('Unrecognized append strategy: {}'.format(strategy)) | Determines and prepares where an insertion will occur.
Args:
splitter_index: The index to insert at.
op: The operation that will be inserted.
strategy: The insertion strategy.
Returns:
The index of the (possibly new) moment where the insertion should
occur.
Raises:
ValueError: Unrecognized append strategy. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.