code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def step(self, actions):
for (index, (env, action)) in enumerate(zip(self._envs, actions)):
if (not env.action_space.contains(action)):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [env.step(action) for (env, action) in zip(self._envs, actions)]
else:
transitions = [env.step(action, blocking=False) for (env, action) in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
(observs, rewards, dones, infos) = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return (observ, reward, done, info) | Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags. | codesearchnet |
def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=0.01, dist_from_surf=0):
sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()
def substitute(site, i):
slab = self.slab.copy()
props = self.slab.site_properties
if sub_both_sides:
eq_indices = [indices for indices in sym_slab.equivalent_indices if (i in indices)][0]
for ii in eq_indices:
if (('%.6f' % sym_slab[ii].frac_coords[2]) != ('%.6f' % site.frac_coords[2])):
props['surface_properties'][ii] = 'substitute'
slab.replace(ii, atom)
break
props['surface_properties'][i] = 'substitute'
slab.replace(i, atom)
slab.add_site_property('surface_properties', props['surface_properties'])
return slab
substituted_slabs = []
sorted_sites = sorted(sym_slab, key=(lambda site: site.frac_coords[2]))
if (sorted_sites[0].surface_properties == 'surface'):
d = (sorted_sites[0].frac_coords[2] + dist_from_surf)
else:
d = (sorted_sites[(- 1)].frac_coords[2] - dist_from_surf)
for (i, site) in enumerate(sym_slab):
if ((d - range_tol) < site.frac_coords[2] < (d + range_tol)):
if (target_species and (site.species_string in target_species)):
substituted_slabs.append(substitute(site, i))
elif (not target_species):
substituted_slabs.append(substitute(site, i))
matcher = StructureMatcher()
return [s[0] for s in matcher.group_structures(substituted_slabs)] | Function that performs substitution-type doping on the surface and
returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
Args:
atom (str): atom corresponding to substitutional dopant
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
target_species (list): List of specific species to substitute
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface | codesearchnet |
def put_vmss(access_token, subscription_id, resource_group, vmss_name, vmss_body):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API])
body = json.dumps(vmss_body)
return do_put(endpoint, body, access_token) | Put VMSS body.
Can be used to create or update a scale set.
E.g. call get_vmss(), make changes to the body, call put_vmss().
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vmss_body (dictionary): Body containining
Returns:
HTTP response. JSON body of the virtual machine scale set properties. | codesearchnet |
def object_table(self, object_id=None):
self._check_connected()
if (object_id is not None):
return self._object_table(object_id)
else:
object_keys = self._keys((ray.gcs_utils.TablePrefix_OBJECT_string + '*'))
object_ids_binary = {key[len(ray.gcs_utils.TablePrefix_OBJECT_string):] for key in object_keys}
results = {}
for object_id_binary in object_ids_binary:
results[binary_to_object_id(object_id_binary)] = self._object_table(binary_to_object_id(object_id_binary))
return results | Fetch and parse the object table info for one or more object IDs.
Args:
object_id: An object ID to fetch information about. If this is
None, then the entire object table is fetched.
Returns:
Information from the object table. | codesearchnet |
def disambiguate_pdf(self, file, language=None, entities=None):
body = {'customisation': 'generic'}
if language:
body['language'] = {'lang': language}
if entities:
body['entities'] = entities
files = {'query': str(body), 'file': (file, open(file, 'rb'), 'application/pdf', {'Expires': '0'})}
(res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'})
if (status != 200):
logger.debug(('Disambiguation failed with error ' + str(status)))
return (self.decode(res), status) | Call the disambiguation service in order to process a pdf file .
Args:
pdf (file): PDF file to be disambiguated.
language (str): language of text (if known)
Returns:
dict, int: API response and API status. | codesearchnet |
def _applyInter(finter0, finter1, conflict='ignore'):
OPTIONS = ['error', 'ignore', 'me', 'other']
assert (conflict in OPTIONS), 'Invalid value in `conflict`.'
min_int = (- (2 ** 63))
inter0 = tuple([(f.getValue() if f else min_int) for f in finter0])
inter1 = tuple([(f.getValue() if f else min_int) for f in finter1])
le00 = (inter0[0] <= inter1[0])
le01 = ((inter1[1] == min_int) or (inter0[0] <= inter1[1]))
le11 = ((inter1[1] == min_int) or ((inter0[1] != min_int) and (inter0[1] <= inter1[1])))
ge00 = ((not le00) or (inter0[0] == inter1[0]))
ge10 = ((inter0[1] == min_int) or (inter0[1] >= inter1[0]))
if (le00 and ge10 and le11):
return (finter1[0], finter0[1])
elif (le00 and ge10 and (not le11)):
return finter1
elif (ge00 and le01 and le11):
return finter0
elif (ge00 and le01 and (not le11)):
return (finter0[0], finter1[1])
elif (conflict == 'me'):
return finter0
elif (conflict == 'other'):
return finter1
elif (conflict == 'error'):
raise Exception('Disjoint intervals!')
return None | Return the restriction of first interval by the second.
Args:
- inter0, inter1 (tuple of Feature): intervals
Return(tuple of Feature): the resulting interval
- conflict(str): if a property hasn't compatible values/constrains, do:
- ``"error"``: raise exception.
- ``"ignore"``: return None.
- ``"me"``: return finter0.
- ``"other"``: return finter1. | codesearchnet |
def package_and_copy(package_root_dir, setup_py, output_tar_path):
if (not output_tar_path.startswith('gs:
raise ValueError('output_tar_path needs to be a GCS path.')
if (not os.path.isfile(setup_py)):
raise ValueError(('Supplied file "%s" does not exist.' % setup_py))
dest_setup_py = os.path.join(package_root_dir, 'setup.py')
if (dest_setup_py != setup_py):
if os.path.isfile(dest_setup_py):
os.rename(dest_setup_py, (dest_setup_py + '._bak_'))
shutil.copyfile(setup_py, dest_setup_py)
tempdir = tempfile.mkdtemp()
previous_cwd = os.getcwd()
os.chdir(package_root_dir)
try:
sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]
subprocess.check_call(sdist)
source = os.path.join(tempdir, '*.tar.gz')
gscopy = ['gsutil', 'cp', source, output_tar_path]
subprocess.check_call(gscopy)
return
finally:
os.chdir(previous_cwd)
if (dest_setup_py != setup_py):
os.remove(dest_setup_py)
if os.path.isfile((dest_setup_py + '._bak_')):
os.rename((dest_setup_py + '._bak_'), dest_setup_py)
shutil.rmtree(tempdir) | Repackage an CloudML package and copy it to a staging dir.
Args:
package_root_dir: the root dir to install package from. Usually you can get the path
from inside your module using a relative path to __file__.
setup_py: the path to setup.py.
output_tar_path: the GCS path of the output tarball package.
Raises:
ValueError if output_tar_path is not a GCS path, or setup_py does not exist. | codesearchnet |
def contains(self, rect):
return (rect.y >= self.y and \
rect.x >= self.x and \
rect.y+rect.height <= self.y+self.height and \
rect.x+rect.width <= self.x+self.width) | Tests if another rectangle is contained by this one
Arguments:
rect (Rectangle): The other rectangle
Returns:
bool: True if it is container, False otherwise | juraj-google-style |
def find_wells_with_curve(self, mnemonic, alias=None):
return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is not None]) | Returns a new Project with only the wells which have the named curve.
Args:
menmonic (str): the name of the curve to look for.
alias (dict): a welly alias dictionary.
Returns:
project. | juraj-google-style |
def PrintExtractionSummary(self, processing_status):
if (not processing_status):
self._output_writer.Write('WARNING: missing processing status information.\n')
elif (not processing_status.aborted):
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = processing_status.foreman_status.number_of_produced_warnings
if number_of_warnings:
output_text = '\n'.join(['', 'Number of warnings generated while extracting events: {0:d}.'.format(number_of_warnings), '', 'Use pinfo to inspect warnings in more detail.', ''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join(['', 'Path specifications that could not be processed:', ''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n') | Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status. | codesearchnet |
def reqs(amend: bool = False, stage: bool = False):
changed_files = CTX.repo.changed_files()
if 'requirements.txt' in changed_files or 'requirements-dev.txt' in changed_files:
LOGGER.error('Requirements have changed; cannot update them')
sys.exit(-1)
_write_reqs(amend, stage) | Write requirements files
Args:
amend: amend last commit with changes
stage: stage changes | juraj-google-style |
def find_mapreduce_yaml(status_file=__file__):
checked = set()
yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)
if (not yaml):
yaml = _find_mapreduce_yaml(os.getcwd(), checked)
return yaml | Traverse directory trees to find mapreduce.yaml file.
Begins with the location of status.py and then moves on to check the working
directory.
Args:
status_file: location of status.py, overridable for testing purposes.
Returns:
the path of mapreduce.yaml file or None if not found. | codesearchnet |
def avg_grads(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads | Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers. | codesearchnet |
def dframe(self, dimensions=None, multi_index=False):
if dimensions:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
else:
dimensions = self.kdims
vdims = [d for d in dimensions if d in self.vdims]
if vdims:
raise ValueError('%s element does not hold data for value '
'dimensions. Could not return data for %s '
'dimension(s).' %
(type(self).__name__, ', '.join([d.name for d in vdims])))
return super(StatisticsElement, self).dframe(dimensions, False) | Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension | juraj-google-style |
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
if any_symbolic_tensors((x,)):
return NanToNum(nan=nan, posinf=posinf, neginf=neginf).symbolic_call(x)
return backend.numpy.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) | Replace NaN with zero and infinity with large finite numbers.
Args:
x: Input data.
nan: Optional float or int. Value to replace `NaN` entries with.
posinf: Optional float or int.
Value to replace positive infinity with.
neginf: Optional float or int.
Value to replace negative infinity with.
Returns:
`x`, with non-finite values replaced. | github-repos |
def CheckCasts(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
matched_new_or_template = match.group(1)
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after')) | Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | juraj-google-style |
def write_fingerprint(export_dir: str) -> None:
if flags.config().saved_model_fingerprinting.value():
fingerprint_path = file_io.join(compat.as_str(export_dir), compat.as_str(constants.FINGERPRINT_FILENAME))
logging.info('Writing fingerprint to %s', fingerprint_path)
try:
fingerprint_serialized = fingerprinting_pywrap.CreateFingerprintDef(export_dir)
except FingerprintException as e:
raise ValueError(e) from None
file_io.atomic_write_string_to_file(fingerprint_path, fingerprint_serialized)
metrics.SetWriteFingerprint(fingerprint=fingerprint_serialized)
try:
metrics.SetWritePathAndSingleprint(path=export_dir, singleprint=singleprint_from_fingerprint_proto(export_dir))
except metrics.MetricException:
logging.info('path_and_singleprint metric could not be set. Model saving will continue.') | Write fingerprint protobuf, if requested.
Writes a `tf.saved_model.experimental.Fingerprint` object to a
`fingerprint.pb` file in the `export_dir` using the `saved_model.pb` file
contained in `export_dir`.
Args:
export_dir: The directory in which to write the fingerprint. | github-repos |
def quaternion_from_axis_rotation(angle, axis):
out = np.zeros(4, dtype=float)
if (axis == 'x'):
out[1] = 1
elif (axis == 'y'):
out[2] = 1
elif (axis == 'z'):
out[3] = 1
else:
raise ValueError('Invalid axis input.')
out *= math.sin((angle / 2.0))
out[0] = math.cos((angle / 2.0))
return Quaternion(out) | Return quaternion for rotation about given axis.
Args:
angle (float): Angle in radians.
axis (str): Axis for rotation
Returns:
Quaternion: Quaternion for axis rotation.
Raises:
ValueError: Invalid input axis. | codesearchnet |
def run(self, timeout=-1):
def target():
self.process = subprocess.Popen(self.cmd,
stdout=self.stdout_dest,
stderr=self.stderr_dest,
shell=self.shell)
stdout, stderr = self.process.communicate()
if self.decode_out:
if stdout:
self.stdout = stdout.decode("utf-8")
if stderr:
self.stderr = stderr.decode("utf-8")
thread = threading.Thread(target=target)
thread.start()
if timeout > 0:
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
raise SubprocessError(("Reached timeout after {t} seconds"
.format(t=timeout)))
else:
thread.join()
return self.process.returncode, self.stdout, self.stderr | Run the subprocess.
Arguments:
timeout (optional) If a positive real value, then timout after
the given number of seconds.
Raises:
SubprocessError If subprocess has not completed after "timeout"
seconds. | juraj-google-style |
def AssertThat(target):
if type(target) is type:
if issubclass(target, BaseException):
return _ExceptionClassSubject(target)
return _ClassSubject(target)
for super_type, subject_class in six.iteritems(_TYPE_CONSTRUCTORS):
if issubclass(type(target), super_type):
return subject_class(target)
if _IsMock(target):
return _MockSubject(target)
if _IsNumeric(target):
return _NumericSubject(target)
if _IsComparable(target) and _IsIterable(target):
return _ComparableIterableSubject(target)
if _IsComparable(target):
return _ComparableSubject(target)
if _IsIterable(target):
return _IterableSubject(target)
return _DefaultSubject(target) | Gateway function that initiates an assertion.
Args:
target: any object whatsoever, the object under test.
Returns:
A subject appropriate for the target. | github-repos |
def _create_delegate_handler(delegate):
@coroutine
def handler(*args):
yield
yield delegate.send(Transition(args, delegate))
return handler | Creates a handler function that creates a co-routine that can yield once with the given
positional arguments to the delegate as a transition.
Args:
delegate (Coroutine): The co-routine to delegate to.
Returns:
A :class:`callable` handler that returns a co-routine that ignores the data it receives
and sends with the arguments given to the handler as a :class:`Transition`. | juraj-google-style |
def dump(self):
results = []
for data in self.data():
results.append(data)
return results | Dump raw JSON output of matching queryset objects.
Returns:
List of dicts. | codesearchnet |
def add_how(voevent, descriptions=None, references=None):
if not voevent.xpath('How'):
etree.SubElement(voevent, 'How')
if descriptions is not None:
for desc in _listify(descriptions):
etree.SubElement(voevent.How, 'Description')
voevent.How.Description[-1] = desc
if references is not None:
voevent.How.extend(_listify(references)) | Add descriptions or references to the How section.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
descriptions(str): Description string, or list of description
strings.
references(:py:class:`voeventparse.misc.Reference`): A reference element
(or list thereof). | juraj-google-style |
def __init__(self, callback):
super(Interface, self).__init__(callback)
self._mac_address_table = brocade_mac_address_table(
callback=pynos.utilities.return_xml
) | Interface init function.
Args:
callback: Callback function that will be called for each action.
Returns:
Interface Object
Raises:
None | juraj-google-style |
def has_register(self, register):
has_reg = False
if (isinstance(register, QuantumRegister) and (register in self.qregs)):
has_reg = True
elif (isinstance(register, ClassicalRegister) and (register in self.cregs)):
has_reg = True
return has_reg | Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit. | codesearchnet |
def dataverse_search_doi(doi):
url = '{}/api/datasets/:persistentId?persistentId=doi:{}'.format(dataverse, doi)
r = requests.get(url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as error:
print('Error looking up DOI "{}" in the Harvard Dataverse.'.format(doi))
print(r.text)
raise error
return json.loads(r.text) | Fetches metadata pertaining to a Digital Object Identifier (DOI) in the
Harvard Dataverse.
Args:
doi (str): The Digital Object Identifier (DOI) of the entry in the
Dataverse.
Raises:
requests.exceptions.HTTPError: The given DOI does not exist, or there
was a problem connecting to the Dataverse. | juraj-google-style |
def open(self, callback):
if self.is_active:
raise ValueError("This manager is already open.")
if self._closed:
raise ValueError("This manager has been closed and can not be re-used.")
self._callback = functools.partial(_wrap_callback_errors, callback)
self._rpc = bidi.ResumableBidiRpc(
start_rpc=self._client.api.streaming_pull,
initial_request=self._get_initial_request,
should_recover=self._should_recover,
)
self._rpc.add_done_callback(self._on_rpc_done)
self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue)
self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response)
self._leaser = leaser.Leaser(self)
self._heartbeater = heartbeater.Heartbeater(self)
self._dispatcher.start()
self._consumer.start()
self._leaser.start()
self._heartbeater.start() | Begin consuming messages.
Args:
callback (Callable[None, google.cloud.pubsub_v1.message.Messages]):
A callback that will be called for each message received on the
stream. | juraj-google-style |
def encode_endian(text, encoding, errors="strict", le=True):
encoding = codecs.lookup(encoding).name
if encoding == "utf-16":
if le:
return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors)
else:
return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors)
elif encoding == "utf-32":
if le:
return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors)
else:
return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors)
else:
return text.encode(encoding, errors) | Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError | juraj-google-style |
def _operator(self, op, close_group=False):
op = op.upper().strip()
if (op not in OP_LIST):
raise ValueError("Error: '{}' is not a valid operator.".format(op))
else:
if close_group:
op = ((') ' + op) + ' (')
else:
op = ((' ' + op) + ' ')
self.__query['q'] += op
return self | Add an operator between terms.
There must be a term added before using this method.
All operators have helpers, so this method is usually not necessary to directly invoke.
Arguments:
op (str): The operator to add. Must be in the OP_LIST.
close_group (bool): If ``True``, will end the current parenthetical
group and start a new one.
If ``False``, will continue current group.
Example::
"(foo AND bar)" is one group.
"(foo) AND (bar)" is two groups.
Returns:
SearchHelper: Self | codesearchnet |
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):
data_dos = {'total': [], 'partial': {}}
with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f:
count_series = 0
for line in f:
if line.lstrip().startswith("
count_series += 1
if count_series > 1:
break
else:
data_dos['total'].append(
[Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1])])
total_elec = float(line.split()[2])
lw_l = 0
hg_l = -len(data_dos['total'])
if trim_dos:
tmp_data = np.array(data_dos['total'])
tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]
lw_l = len(tmp_data[:, 1]) - len(tmp_den)
tmp_ene = tmp_data[lw_l:, 0]
tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]
hg_l = len(tmp_ene) - len(tmp_den)
tmp_ene = tmp_ene[:-hg_l]
tmp_data = np.vstack((tmp_ene, tmp_den)).T
data_dos['total'] = tmp_data.tolist()
for file_name in os.listdir(path_dir):
if file_name.endswith(
"transdos") and file_name != 'boltztrap.transdos':
tokens = file_name.split(".")[1].split("_")
site = tokens[1]
orb = '_'.join(tokens[2:])
with open(os.path.join(path_dir, file_name), 'r') as f:
for line in f:
if not line.lstrip().startswith("
if site not in data_dos['partial']:
data_dos['partial'][site] = {}
if orb not in data_dos['partial'][site]:
data_dos['partial'][site][orb] = []
data_dos['partial'][site][orb].append(
float(line.split()[1]))
data_dos['partial'][site][orb] = data_dos['partial'][site][
orb][lw_l:-hg_l]
dos_full = {'energy': [], 'density': []}
for t in data_dos['total']:
dos_full['energy'].append(t[0])
dos_full['density'].append(t[1])
dos = Dos(efermi, dos_full['energy'],
{Spin(dos_spin): dos_full['density']})
dos_partial = data_dos['partial']
return dos, dos_partial | Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files
Args:
path_dir: (str) dir containing DOS files
efermi: (float) Fermi energy
dos_spin: (int) -1 for spin down, +1 for spin up
trim_dos: (bool) whether to post-process / trim DOS
Returns:
tuple - (DOS, dict of partial DOS) | juraj-google-style |
def get_plot(self, xlim=None, ylim=None):
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allenergies = []
plt = pretty_plot(12, 8)
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(x, y, color=colors[i % ncolors],
label=str(key), linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt | Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits. | juraj-google-style |
def save(self, filething=None, deleteid3=False, padding=None):
self._save(filething, self.metadata_blocks, deleteid3, padding) | Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used. | juraj-google-style |
def save(self, path):
with open(path, 'w') as out_file:
json.dump(self.to_dict(), out_file, indent=4) | Save the specification of this MLPipeline in a JSON file.
The content of the JSON file is the dict returned by the `to_dict` method.
Args:
path (str): Path to the JSON file to write. | codesearchnet |
def Group(items, key):
result = {}
for item in items:
result.setdefault(key(item), []).append(item)
return result | Groups items by given key function.
Args:
items: An iterable or an iterator of items.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key. | juraj-google-style |
def get_data(self, label: str) -> Any:
return self._get_resource(label, self._data, "data") | Get a data resource by label
Args:
label (str): The labvel for the data resource to fetch
Returns:
The requeted data object | juraj-google-style |
def get_extended_attention_mask(self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]):
attention_mask = word_attention_mask
if entity_attention_mask is not None:
attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f'Wrong shape for attention_mask (shape {attention_mask.shape})')
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
return extended_attention_mask | Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
word_attention_mask (`torch.LongTensor`):
Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
entity_attention_mask (`torch.LongTensor`, *optional*):
Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. | github-repos |
async def on_message(message):
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
if server is not None and author != channel.server.me:
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
package = content.split(" ")
command = package[0][len(prefix):]
args = package[1:]
arg = ' '.join(args)
if command == 'hex':
await client.send_typing(channel)
hex_strs = api_hexconvert.convert_hex_value(arg)
if len(hex_strs) > 0:
for hex_str in hex_strs:
image_url = convert_hex_to_url(hex_str)
embed = ui_embed.success(channel, image_url, hex_str)
await embed.send()
else:
embed = ui_embed.fail_api(channel)
await embed.send()
else:
hex_strs = api_hexconvert.convert_hex_value(content)
if len(hex_strs) > 0:
for hex_str in hex_strs:
await client.send_typing(channel)
image_url = convert_hex_to_url(hex_str)
embed = ui_embed.success(channel, image_url, hex_str)
await embed.send() | The on_message event handler for this module
Args:
message (discord.Message): Input message | juraj-google-style |
def keep_artifacts(self, **kwargs):
path = ('%s/%s/artifacts/keep' % (self.manager.path, self.get_id()))
self.manager.gitlab.http_post(path) | Prevent artifacts from being deleted when expiration is set.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the request could not be performed | codesearchnet |
def get_train_op(self, loss, learning_rate, optimizer=None, clip_norm=None, learnable_scopes=None, optimizer_scope_name=None, **kwargs):
if (optimizer_scope_name is None):
opt_scope = tf.variable_scope('Optimizer')
else:
opt_scope = tf.variable_scope(optimizer_scope_name)
with opt_scope:
if (learnable_scopes is None):
variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
variables_to_train = []
for scope_name in learnable_scopes:
variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))
if (optimizer is None):
optimizer = tf.train.AdamOptimizer
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
def clip_if_not_none(grad):
if (grad is not None):
return tf.clip_by_norm(grad, clip_norm)
opt = optimizer(learning_rate, **kwargs)
grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)
if (clip_norm is not None):
grads_and_vars = [(clip_if_not_none(grad), var) for (grad, var) in grads_and_vars]
train_op = opt.apply_gradients(grads_and_vars)
return train_op | Get train operation for given loss
Args:
loss: loss, tf tensor or scalar
learning_rate: scalar or placeholder.
clip_norm: clip gradients norm by clip_norm.
learnable_scopes: which scopes are trainable (None for all).
optimizer: instance of tf.train.Optimizer, default Adam.
**kwargs: parameters passed to tf.train.Optimizer object
(scalars or placeholders).
Returns:
train_op | codesearchnet |
def _save_and_write_assets(self, meta_graph_def, assets_list=None):
write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def)
asset_filename_map = _maybe_save_assets(write_fn, assets_list)
if not asset_filename_map:
tf_logging.info('No assets to write.')
return
copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files) | Saves asset to the meta graph and writes asset files to disk.
Args:
meta_graph_def: The meta graph def to which the assets will be added.
assets_list: The list where the asset paths are setup. | github-repos |
def row_starts(self, name=None):
with ops.name_scope(name, 'RaggedRowStarts', [self]):
return self._row_partition.row_starts() | Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64) | github-repos |
def random_restore(rnd: Optional[tcod.random.Random], backup: tcod.random.Random) -> None:
lib.TCOD_random_restore((rnd.random_c if rnd else ffi.NULL), backup.random_c) | Restore a random number generator from a backed up copy.
Args:
rnd (Optional[Random]): A Random instance, or None to use the default.
backup (Random): The Random instance which was used as a backup.
.. deprecated:: 8.4
You can use the standard library copy and pickle modules to save a
random state. | codesearchnet |
def __init__(self, reactants_coeffs, products_coeffs):
all_reactants = sum([k * v for k, v in reactants_coeffs.items()],
Composition({}))
all_products = sum([k * v for k, v in products_coeffs.items()],
Composition({}))
if not all_reactants.almost_equals(all_products, rtol=0,
atol=self.TOLERANCE):
raise ReactionError("Reaction is unbalanced!")
self._els = all_reactants.elements
self.reactants_coeffs = reactants_coeffs
self.products_coeffs = products_coeffs
self._coeffs = []
self._els = []
self._all_comp = []
for c in set(list(reactants_coeffs.keys()) +
list(products_coeffs.keys())):
coeff = products_coeffs.get(c, 0) - reactants_coeffs.get(c, 0)
if abs(coeff) > self.TOLERANCE:
self._all_comp.append(c)
self._coeffs.append(coeff) | Reactants and products to be specified as dict of {Composition: coeff}.
Args:
reactants_coeffs ({Composition: float}): Reactants as dict of
{Composition: amt}.
products_coeffs ({Composition: float}): Products as dict of
{Composition: amt}. | juraj-google-style |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetUsageAllocationRequestPayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream):
self._usage_limits_count = primitives.LongInteger(tag=enums.Tags.USAGE_LIMITS_COUNT)
self._usage_limits_count.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream) | Read the data encoding the GetUsageAllocation request payload and
decode it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload. | codesearchnet |
def run_the_target(G, target, settings):
sprint = settings["sprint"]
sprint("Running target {}".format(target))
the_formula = get_the_node_dict(G, target)["formula"]
run_commands(the_formula, settings) | Wrapper function that sends to commands in a target's 'formula'
to run_commands()
Args:
The graph we are going to build
The target to run
The settings dictionary | juraj-google-style |
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter) | Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout. | juraj-google-style |
def min_zoom(self):
zoom_levels = [map_layer.min_zoom for map_layer in self.layers]
return min(zoom_levels) | Get the minimal zoom level of all layers.
Returns:
int: the minimum of all zoom levels of all layers
Raises:
ValueError: if no layers exist | codesearchnet |
def ensure_resource_data(self, update_data=False):
if (not any(((key in self.data) for key in self.UNIQUE_IDENTIFIERS))):
raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)
if (not update_data):
return
resource_data = None
if (('uri' in self.UNIQUE_IDENTIFIERS) and self.data.get('uri')):
resource_data = self._helper.do_get(self.data['uri'])
else:
for identifier in self.UNIQUE_IDENTIFIERS:
identifier_value = self.data.get(identifier)
if identifier_value:
result = self.get_by(identifier, identifier_value)
if (result and isinstance(result, list)):
resource_data = result[0]
break
if resource_data:
self.data.update(resource_data)
else:
raise exceptions.HPOneViewResourceNotFound(RESOURCE_DOES_NOT_EXIST) | Retrieves data from OneView and updates resource object.
Args:
update_data: Flag to update resource data when it is required. | codesearchnet |
def __init__(self, gcs_dag_bucket, gcs_dag_file_path=None):
self._gcs_dag_bucket = gcs_dag_bucket
self._gcs_dag_file_path = gcs_dag_file_path or '' | Initializes an instance of a Airflow object.
Args:
gcs_dag_bucket: Bucket where Airflow expects dag files to be uploaded.
gcs_dag_file_path: File path of the Airflow dag files. | juraj-google-style |
def mp_atan2(y, x):
return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace('pi', str(math.pi)).replace('y', y).replace('x', x) | muparser atan2 function
Implements an atan2(y,x) function for older muparser versions (<2.1.0);
atan2 was added as a built-in function in muparser 2.1.0
Args:
y (str): y argument of the atan2(y,x) function
x (str): x argument of the atan2(y,x) function
Returns:
A muparser string that calculates atan2(y,x) | codesearchnet |
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec,
angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number() | Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number | juraj-google-style |
def get_sql_statement_with_environment(item, args=None):
if isinstance(item, basestring):
item = _sql_statement.SqlStatement(item)
elif (not isinstance(item, _sql_statement.SqlStatement)):
item = SqlModule.get_default_query_from_module(item)
if (not item):
raise Exception(('Expected a SQL statement or module but got %s' % str(item)))
env = {}
if item.module:
env.update(item.module.__dict__)
parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)
if parser:
args = SqlModule._get_sql_args(parser, args=args)
else:
args = None
if isinstance(args, dict):
env.update(args)
return (item, env) | Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use. | codesearchnet |
def dispatch_event(event):
try:
if (event.http_verb == enums.HTTPVerbs.GET):
requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()
elif (event.http_verb == enums.HTTPVerbs.POST):
requests.post(event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT).raise_for_status()
except request_exception.RequestException as error:
logging.error(('Dispatch event failed. Error: %s' % str(error))) | Dispatch the event being represented by the Event object.
Args:
event: Object holding information about the request to be dispatched to the Optimizely backend. | codesearchnet |
def jacobian(func, x, unconnected_gradients=None, parallel_iterations=None, experimental_use_pfor=True, name=None):
unconnected_gradients = unconnected_gradients or tf.UnconnectedGradients.NONE
x, is_x_batch_size = _prepare_args(x)
with tf.name_scope(name or 'jacobian'):
if not callable(func):
raise ValueError('`func` should be a callable in eager mode or when `tf.GradientTape` is used.')
with tf.GradientTape() as tape:
tape.watch(x)
y = func(x)
jac = tape.batch_jacobian(y, x, unconnected_gradients=unconnected_gradients, parallel_iterations=parallel_iterations, experimental_use_pfor=experimental_use_pfor)
if is_x_batch_size:
return jac
return jac[0] | Computes the jacobian of `func` wrt to `x`.
Args:
func: Python callable accepting one `Tensor` of shape of `x` and returning
a `Tensor` of any shape. The function whose jacobian is to be computed.
x: A `Tensor` with respect to which the gradient is to be computed.
unconnected_gradients: An enum `tf.UnconnectedGradients` which specifies
the gradient value returned when the given input tensors are
unconnected. Default value: `None`, which maps to
`tf.UnconnectedGradients.NONE`.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, uses pfor for computing the Jacobian.
Else uses a tf.while_loop.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'jacobian').
Returns:
A `Tensor` with the gradient of `y` wrt each of `x`. | github-repos |
def _checkTensorElementLocations(self, out, a):
begin_line_num = 0
while not out.lines[begin_line_num].startswith('array'):
begin_line_num += 1
element_index = 0
for line_num in range(begin_line_num, len(out.lines)):
line = out.lines[line_num]
if '...' in line:
raise ValueError('Unexpected found ellipses in line representing array')
matches = re.finditer(self._ELEMENT_REGEX, line)
for match in matches:
subscripts = list(np.unravel_index(element_index, a.shape))
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(out, subscripts)
self.assertFalse(is_omitted)
self.assertEqual(line_num, row)
self.assertEqual(match.start(), start_col)
self.assertEqual(match.end(), end_col)
element_index += 1
self.assertEqual(element_index, np.size(a)) | Check the results of locate_tensor_element on an ndarray representation.
that represents a numpy.ndarray.
Args:
out: An instance of RichTextLines representing a numpy.ndarray.
a: The numpy.ndarray being represented.
Raises:
ValueError: if any ellipses ("...") are found in the lines representing
the array. | github-repos |
def IsPathSuffix(mod_path, path):
return (mod_path.endswith(path) and ((len(mod_path) == len(path)) or mod_path[:(- len(path))].endswith(os.sep))) | Checks whether path is a full path suffix of mod_path.
Args:
mod_path: Must be an absolute path to a source file. Must not have
file extension.
path: A relative path. Must not have file extension.
Returns:
True if path is a full path suffix of mod_path. False otherwise. | codesearchnet |
def scale_stoichiometry( self, scaling ):
return { k:v*scaling for k,v in self.stoichiometry.items() } | Scale the Calculation stoichiometry
Returns the stoichiometry, scaled by the argument scaling.
Args:
scaling (float): The scaling factor.
Returns:
(Counter(Str:Int)): The scaled stoichiometry as a Counter of label: stoichiometry pairs | juraj-google-style |
def cosine_proximity(y_true, y_pred, axis=-1):
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis) | Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value. | github-repos |
def find_last(self, selector, **kwargs):
self.debug_log(('Finding last element with selector: %s' % selector))
elements = self.find_all(selector, **kwargs)
if len(elements):
self.debug_log(('find_last (%s): element found' % selector))
return elements[(- 1)]
else:
self.debug_log(('find_last (%s): No element found' % selector))
return None | Return the last element found with a selector
Args:
selector (str): the selector used to find the element
Kwargs:
wait_until_present (bool)
wait_until_visible (bool)
raise_exception (bool)
Returns:
None if no element was found
proxy_element is an element was found
Raises:
this function might raise an exception depending
on the raise_exception kwargs
or
the config proxy_driver:raise_exception | codesearchnet |
def CheckCasts(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Search('(\\bnew\\s+(?:const\\s+)?|\\S<\\s*(?:const\\s+)?)?\\b(int|float|double|bool|char|int32|uint32|int64|uint64)(\\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if (match and (not expecting_function)):
matched_type = match.group(2)
matched_new_or_template = match.group(1)
if Match('\\([^()]+\\)\\s*\\[', match.group(3)):
return
matched_funcptr = match.group(3)
if ((matched_new_or_template is None) and (not (matched_funcptr and (Match('\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\(', matched_funcptr) or matched_funcptr.startswith('(*)')))) and (not Match(('\\s*using\\s+\\S+\\s*=\\s*' + matched_type), line)) and (not Search(('new\\(\\S+\\)\\s*' + matched_type), line))):
error(filename, linenum, 'readability/casting', 4, ('Using deprecated casting style. Use static_cast<%s>(...) instead' % matched_type))
if (not expecting_function):
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', '\\((int|float|double|bool|char|u?int(16|32|64))\\)', error)
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', '\\((char\\s?\\*+\\s?)\\)\\s*"', error):
pass
else:
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', '\\((\\w+\\s?\\*+\\s?)\\)', error)
match = Search('(?:[^\\w]&\\(([^)*][^)]*)\\)[\\w(])|(?:[^\\w]&(static|dynamic|down|reinterpret)_cast\\b)', line)
if match:
parenthesis_error = False
match = Match('^(.*&(?:static|dynamic|down|reinterpret)_cast\\b)<', line)
if match:
(_, y1, x1) = CloseExpression(clean_lines, linenum, len(match.group(1)))
if ((x1 >= 0) and (clean_lines.elided[y1][x1] == '(')):
(_, y2, x2) = CloseExpression(clean_lines, y1, x1)
if (x2 >= 0):
extended_line = clean_lines.elided[y2][x2:]
if (y2 < (clean_lines.NumLines() - 1)):
extended_line += clean_lines.elided[(y2 + 1)]
if Match('\\s*(?:->|\\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4, 'Are you taking an address of something dereferenced from a cast? Wrapping the dereferenced expression in parentheses will make the binding more obvious')
else:
error(filename, linenum, 'runtime/casting', 4, 'Are you taking an address of a cast? This is dangerous: could be a temp var. Take the address before doing the cast, rather than after') | Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | codesearchnet |
def delete(self, key):
key = self._service_key(key)
self._service_ops['delete'](key) | Removes the object named by `key` in `service`.
Args:
key: Key naming the object to remove. | juraj-google-style |
def publish_time(self):
timestamp = self._message.publish_time
delta = datetime.timedelta(seconds=timestamp.seconds, microseconds=(timestamp.nanos
return (datetime_helpers._UTC_EPOCH + delta) | Return the time that the message was originally published.
Returns:
datetime: The date and time that the message was published. | codesearchnet |
def AddBackpropAccumulator(self, op: ops.Operation, grad):
self.Exit()
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context:
self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name='b_acc')
if self.outer_context:
self.outer_context.Exit()
else:
value = op.inputs[0]
if isinstance(self.outer_context, WhileContext) and self.outer_context.grad_state is not None:
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context:
self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context:
self.outer_context.Exit()
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='b_acc')
self.loop_enters.append(enter_acc)
merge_acc = merge([enter_acc, enter_acc], name='b_acc')[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc)
result_acc = exit(switch_acc_false, name='b_acc')
self.loop_exits.append(result_acc)
self.ExitResult([result_acc])
return result_acc | Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant. | github-repos |
def retrieve_file_from_url(url):
try:
(alias_source, _) = urlretrieve(url)
with open(alias_source, 'r') as f:
content = f.read()
if content[:3].isdigit():
raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip()))
except Exception as exception:
if isinstance(exception, CLIError):
raise
raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception))
return alias_source | Retrieve a file from an URL
Args:
url: The URL to retrieve the file from.
Returns:
The absolute path of the downloaded file. | codesearchnet |
def FormatProblem(self, d=None):
if (not d):
d = self.GetDictToFormat()
output_error_text = (self.__class__.ERROR_TEXT % d)
if (('reason' in d) and d['reason']):
return ('%s\n%s' % (output_error_text, d['reason']))
else:
return output_error_text | Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added | codesearchnet |
def _receive_signal(self, progress):
self.progress = progress
self.updateProgress.emit(progress) | this function takes care of signals emitted by the subscripts
the default behaviour is that it just reemits the signal
Args:
progress: progress of subscript | juraj-google-style |
def tournament_number2name(self, number):
tournaments = self.get_tournaments()
d = {t['tournament']: t['name'] for t in tournaments}
return d.get(number, None) | Translate tournament number to tournament name.
Args:
number (int): tournament number to translate
Returns:
name (str): name of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_number2name(4)
'delta'
>>> NumerAPI().tournament_number2name(99)
None | codesearchnet |
def ChunkedDecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
chunk_selector,
mode):
return layers.Serial(
layers.Residual(
layers.Map(layers.LayerNorm()),
layers.ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout,
chunk_selector=chunk_selector, mode=mode),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
),
layers.Map(ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode))
) | Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer. | juraj-google-style |
def _VerifyMaxBatchSizeAnnotations(self, expected_engines, original_gdef, converted_gdef, default_max_batch_size, expected_max_batch_sizes=None):
if isinstance(expected_max_batch_sizes, collections.abc.Collection):
self.assertEqual(len(expected_max_batch_sizes), len(expected_engines))
else:
self.assertIsNone(expected_max_batch_sizes, "'expected_max_batch_sizes' shall only be a sequence of integers or `None`.")
def _ChainAllNodes(graph_def):
return itertools.chain(graph_def.node, itertools.chain(*[func.node_def for func in graph_def.library.function]))
old_name_to_node_map = {self._ToString(node.name): node for node in _ChainAllNodes(original_gdef)}
new_name_to_func_map = {self._ToString(func.signature.name): func for func in converted_gdef.library.function}
def _DetectStaticBatchSize(node_def):
shapes = node_def.attr['_output_shapes'].list.shape
batch_size = set((list(s.dim)[0].size if len(s.dim) >= 2 else None for s in shapes))
if len(batch_size) == 1 and list(batch_size)[0] >= 1:
return list(batch_size)[0]
return None
name_to_engines_map = {}
actual_max_batch_sizes = []
for node in _ChainAllNodes(converted_gdef):
if node.op == 'TRTEngineOp':
engine = node
engine_name = self._RemoveGraphSequenceNumber(self._Canonicalize(self._ToString(engine.name)))
self.assertIn(engine_name, expected_engines)
name_to_engines_map[engine_name] = engine
self.assertIn('max_batch_size', node.attr)
engine_max_batch_size = node.attr['max_batch_size'].i
self.assertIsInstance(engine_max_batch_size, int)
actual_max_batch_sizes.append(engine_max_batch_size)
seg_func = node.attr['segment_func'].func
self.assertIsNotNone(seg_func)
self.assertIn(seg_func.name, new_name_to_func_map)
seg_func_def = new_name_to_func_map[seg_func.name]
logging.info('Segment function name: %s. Including %d nodes.', seg_func.name, len(seg_func_def.node_def))
node_max_batch_size_all_none = True
for alternative_node in seg_func_def.node_def:
node_name = self._Canonicalize(self._ToString(alternative_node.name))
if node_name not in old_name_to_node_map:
continue
original_node = old_name_to_node_map[node_name]
node_max_batch_size = None
if '_tftrt_op_max_batch_size' in original_node.attr:
node_max_batch_size = original_node.attr['_tftrt_op_max_batch_size'].i
elif original_node.op != 'Const' and alternative_node.op != 'Const' and ('_output_shapes' in original_node.attr):
node_max_batch_size = _DetectStaticBatchSize(original_node)
logging.info("'{%s}(%s)'s max batch size annotation is %s. '{%s}'s max batch size is %s.", node_name, original_node.op, str(node_max_batch_size), engine_name, str(engine_max_batch_size))
node_max_batch_size_all_none &= node_max_batch_size is None
self.assertTrue(engine_max_batch_size == node_max_batch_size or node_max_batch_size is None)
logging.info("'{%s}'s max batch size is %d.", engine_name, engine_max_batch_size)
self.assertTrue(default_max_batch_size is None or engine_max_batch_size == default_max_batch_size or (not node_max_batch_size_all_none))
self.assertCountEqual(expected_engines, tuple(name_to_engines_map.keys()))
if expected_max_batch_sizes is not None:
self.assertCountEqual(expected_max_batch_sizes, actual_max_batch_sizes) | Verifies the max batch size annotations in the original and converted GraphDef.
Args:
expected_engines: A sequence of engines names.
original_gdef: GraphDef. The graph def before TensorRT conversion.
converted_gdef: GraphDef. The graph def after TensorRT conversion.
default_max_batch_size: The default maximum batch size to use if no node
inside a segment is annotated with a customized max batch size. This
value is None when the graph is converted to TF-TRT with dynamic
engines.
expected_max_batch_sizes: Optional. A sequence of max batch sizes for all
the engines. `None` if does not check enforce max batch sizes. | github-repos |
def profile(self, profile):
self._staging_data = None
lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')
profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))
self._profile = profile
self._profile['profile_args'] = profile_args
self.load_tcex()
self.reports.profile(profile.get('profile_name'))
self._create_tc_dirs() | Set the current profile.
Args:
profile (dict): The profile data. | codesearchnet |
def get_schema(self, reportId: int=None) -> list:
if reportId is None:
reportId = self.reportId
schema = []
report = API_SearchAds(self.config, self.auth).reports().get(reportId=reportId).execute()
for column in report['request']['columns']:
name = column.get('columnName', column.get('savedColumnName'))
schema.append({'name': column_header_sanitize(name), 'type': self.column_type(report['request']['reportScope']['agencyId'], report['request']['reportScope']['advertiserId'], name), 'mode': 'NULLABLE'})
return schema | Read columns from report and produce BigQuery compatible schema.
Columns with an unknown type default to STRING.
Args:
reportId - optional, if not given uses prior value from request(...) call.
Returns:
List of BigQuery schema fields derived from report columns. | github-repos |
def interpolate(self, date, method=None, order=None):
if (not (self.start <= date <= self.stop)):
raise ValueError(("Date '%s' not in range" % date))
prev_idx = 0
ephem = self
while True:
idx = len(ephem)
if (idx == 1):
break
k = (idx
if (date > ephem[k].date):
prev_idx += k
ephem = ephem[k:]
else:
ephem = ephem[:k]
method = (method if (method is not None) else self.method)
order = (order if (order is not None) else self.order)
if (method == self.LINEAR):
y0 = self[prev_idx]
y1 = self[(prev_idx + 1)]
result = (y0[:] + (((y1[:] - y0[:]) * (date.mjd - y0.date.mjd)) / (y1.date.mjd - y0.date.mjd)))
elif (method == self.LAGRANGE):
stop = (((prev_idx + 1) + (order
start = ((prev_idx - (order
if (stop >= len(self)):
start -= (stop - len(self))
elif (start < 0):
stop -= start
start = 0
subset = self[start:stop]
date_subset = np.array([x.date.mjd for x in subset])
result = np.zeros(6)
for j in range(order):
mask = (date_subset != date_subset[j])
l_j = ((date.mjd - date_subset[mask]) / (date_subset[j] - date_subset[mask]))
result = (result + (l_j.prod() * subset[j]))
else:
raise ValueError('Unkown interpolation method', method)
orb = ephem[0]
return orb.__class__(date, result, orb.form, orb.frame, orb.propagator) | Interpolate data at a given date
Args:
date (Date):
method (str): Method of interpolation to use
order (int): In case of ``LAGRANGE`` method is used
Return:
Orbit: | codesearchnet |
def AddFileDescriptor(self, file_desc):
self._AddFileDescriptor(file_desc)
for extension in file_desc.extensions_by_name.values():
self._file_desc_by_toplevel_extension[
extension.full_name] = file_desc | Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor. | juraj-google-style |
def _AnalyzeFileObject(self, mediator, file_object):
maximum_read_size = max([
analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])
hashers_only = True
for analyzer_object in self._analyzers:
if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):
hashers_only = False
break
file_size = file_object.get_size()
if (hashers_only and self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
return
file_object.seek(0, os.SEEK_SET)
data = file_object.read(maximum_read_size)
while data:
if self._abort:
break
for analyzer_object in self._analyzers:
if self._abort:
break
if (not analyzer_object.INCREMENTAL_ANALYZER and
file_size > analyzer_object.SIZE_LIMIT):
continue
if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and
self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
continue
self.processing_status = analyzer_object.PROCESSING_STATUS_HINT
analyzer_object.Analyze(data)
self.last_activity_timestamp = time.time()
data = file_object.read(maximum_read_size)
display_name = mediator.GetDisplayName()
for analyzer_object in self._analyzers:
if self._abort:
break
for result in analyzer_object.GetResults():
logger.debug((
'[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '
'file: {2:s}.').format(
result.attribute_name, result.attribute_value, display_name))
mediator.AddEventAttribute(
result.attribute_name, result.attribute_value)
analyzer_object.Reset()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING | Processes a file-like object with analyzers.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_object (dfvfs.FileIO): file-like object to process. | juraj-google-style |
def parse_message(message: str) -> str:
if message is None:
return ''
message = message.strip().lower()
if not message.startswith(('run-slow', 'run_slow', 'run slow')):
return ''
message = message[len('run slow'):]
while message.strip().startswith(':'):
message = message.strip()[1:]
return message | Parses a GitHub pull request's comment to find the models specified in it to run slow CI.
Args:
message (`str`): The body of a GitHub pull request's comment.
Returns:
`str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the
empty string is returned. | github-repos |
def _GetNumberOfSecondsFromElements(self, year, month, day_of_month, hours, minutes, seconds):
if ((not year) or (not month) or (not day_of_month)):
return None
if (hours is None):
hours = 0
elif (hours not in range(0, 24)):
raise ValueError('Hours value: {0!s} out of bounds.'.format(hours))
if (minutes is None):
minutes = 0
elif (minutes not in range(0, 60)):
raise ValueError('Minutes value: {0!s} out of bounds.'.format(minutes))
if (seconds is None):
seconds = 0
elif (seconds not in range(0, 60)):
raise ValueError('Seconds value: {0!s} out of bounds.'.format(seconds))
days_per_month = self._GetDaysPerMonth(year, month)
if ((day_of_month < 1) or (day_of_month > days_per_month)):
raise ValueError('Day of month value out of bounds.')
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
number_of_seconds = calendar.timegm(time_elements_tuple)
return int(number_of_seconds) | Retrieves the number of seconds from the date and time elements.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
day_of_month (int): day of the month, where 1 represents the first day.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
Returns:
int: number of seconds since January 1, 1970 00:00:00 or None if year,
month or day of month are not set.
Raises:
ValueError: if the time elements are invalid. | codesearchnet |
def from_path(cls, path, format=None):
name = None
data = None
if format is None:
formats = (FileFormat.py, FileFormat.yaml)
else:
formats = (format,)
try:
mode = os.stat(path).st_mode
except (IOError, OSError):
raise PackageMetadataError(
"Path %r did not exist, or was not accessible" % path)
is_dir = stat.S_ISDIR(mode)
for name_ in config.plugins.package_repository.filesystem.package_filenames:
for format_ in formats:
if is_dir:
filepath = os.path.join(path, "%s.%s" % (name_,
format_.extension))
exists = os.path.isfile(filepath)
else:
if format is None:
if os.path.splitext(path)[1] != format_.extension:
continue
filepath = path
exists = True
if exists:
data = load_from_file(filepath, format_, disable_memcache=True)
break
if data:
name = data.get("name")
if name is not None or isinstance(name, basestring):
break
if data is None:
raise PackageMetadataError("No package definition file found at %s" % path)
if name is None or not isinstance(name, basestring):
raise PackageMetadataError(
"Error in %r - missing or non-string field 'name'" % filepath)
package = create_package(name, data, package_cls=cls)
result = package._get_preprocessed(data)
if result:
package, data = result
package.filepath = filepath
package.includes = set()
def visit(d):
for k, v in d.iteritems():
if isinstance(v, SourceCode):
package.includes |= (v.includes or set())
elif isinstance(v, dict):
visit(v)
visit(data)
package._validate_includes()
return package | Load a developer package.
A developer package may for example be a package.yaml or package.py in a
user's source directory.
Args:
path: Directory containing the package definition file, or file
path for the package file itself
format: which FileFormat to use, or None to check both .py and .yaml
Returns:
`Package` object. | juraj-google-style |
def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(np_trainset, feature_names=(self._categorical_columns + self._numeric_columns), class_names=labels, categorical_features=range(len(categories)), categorical_names={i: v for (i, v) in enumerate(categories)}, kernel_width=kernel_width)
exp = explainer.explain_instance(prediction_instance[0], predict_fn, num_features=num_features, labels=range(len(labels)))
return exp | Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation. | codesearchnet |
def _copy_stream_position(position):
if isinstance(position, types.StreamPosition):
output = types.StreamPosition()
output.CopyFrom(position)
return output
return types.StreamPosition(**position) | Copy a StreamPosition.
Args:
position (Union[ \
dict, \
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \
]):
StreamPostion (or dictionary in StreamPosition format) to copy.
Returns:
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition:
A copy of the input StreamPostion. | codesearchnet |
def _update_from_body(self, destination, source):
for key, value in source.iteritems():
destination_value = destination.get(key)
if isinstance(value, dict) and isinstance(destination_value, dict):
self._update_from_body(destination_value, value)
else:
destination[key] = value | Updates the dictionary for an API payload with the request body.
The values from the body should override those already in the payload, but
for nested fields (message objects) the values can be combined
recursively.
Args:
destination: A dictionary containing an API payload parsed from the
path and query parameters in a request.
source: A dictionary parsed from the body of the request. | juraj-google-style |
def upper_diag_self_prodx(list_):
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)] | juraj-google-style |
def list_from_file(filename, prefix='', offset=0, max_num=0):
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if ((max_num > 0) and (cnt >= max_num)):
break
item_list.append((prefix + line.rstrip('\n')))
cnt += 1
return item_list | Load a text file and parse the content as a list of strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the begining of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
Returns:
list[str]: A list of strings. | codesearchnet |
def assert_print_equals_golden(self, json_path: str, proto_path: str, proto_cls: Type[message.Message], *, print_f: Callable[..., str], json_delimiter: Optional[str]=None, proto_delimiter: Optional[str]=None, **print_kwargs: Any) -> None:
testdata = self._read_json_and_protos(json_path, proto_path, proto_cls, json_delimiter=json_delimiter, proto_delimiter=proto_delimiter)
for json_str, proto in zip(testdata.json_strs, testdata.protos):
from_json = json.loads(json_str, parse_int=decimal.Decimal, parse_float=decimal.Decimal)
orig_proto = copy.deepcopy(proto)
raw_json_str = print_f(proto, **print_kwargs)
from_proto = json.loads(raw_json_str, parse_int=decimal.Decimal, parse_float=decimal.Decimal)
self.assertEqual(proto, orig_proto)
self.assertEqual(from_json, from_proto) | Compare printer output against 'golden' file.
Note that we perform a comparison between Python native types after calling
into json.loads(...), as diffing raw strings can have minor differences in
spaces that are inconsequential to the underlying representations.
If json_delimiter and proto_delimiter are supplied, the cardinality of the
resulting sequences must match exactly or an error will be thrown.
Args:
json_path: The filepath to the .json file (loaded as a 'golden').
proto_path: The filepath to the .prototxt file (loaded as a 'test case').
proto_cls: The type of protobuf message to serialize to and print from
(type under test).
print_f: The print function to execute and examine.
json_delimiter: An optional delimiter for the .json file to load multiple
representations. Defaults to None.
proto_delimiter: An optional delimiter for the .prototxt file to load
multiple representations. Defaults to None.
**print_kwargs: An optional list of key/value arguments to supply to the
print function. | github-repos |
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
if resolved_archive_file.endswith('.safetensors'):
load_function = load_tf_weights_from_safetensors
else:
load_function = load_tf_weights_from_h5
return load_function(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix) | Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and
shapes.
Args:
model (`keras.models.Model`):
The model to load the weights into.
resolved_archive_file (`str`):
The location of the H5 file.
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers. | github-repos |
def mark_streamer(self, index):
self._logger.debug("Marking streamer %d manually", index)
if index >= len(self.streamers):
raise ArgumentError("Invalid streamer index", index=index, num_streamers=len(self.streamers))
self._manually_triggered_streamers.add(index) | Manually mark a streamer that should trigger.
The next time check_streamers is called, the given streamer will be
manually marked that it should trigger, which will cause it to trigger
unless it has no data.
Args:
index (int): The index of the streamer that we should mark as
manually triggered.
Raises:
ArgumentError: If the streamer index is invalid. | juraj-google-style |
def joinpaths(self, *paths):
if (sys.version_info >= (3, 6)):
paths = [os.fspath(path) for path in paths]
if (len(paths) == 1):
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
joined_path_segments = [path_segment]
else:
if (joined_path_segments and (not joined_path_segments[(- 1)].endswith(sep))):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments) | Mimic os.path.join using the specified path_separator.
Args:
*paths: (str) Zero or more paths to join.
Returns:
(str) The paths joined by the path separator, starting with
the last absolute path in paths. | codesearchnet |
def save(self, path):
data = self.encode()
with open(path, "wb") as out:
out.write(data) | Save a binary copy of this report
Args:
path (string): The path where we should save the binary copy of the report | juraj-google-style |
def _parse_book_links(dom):
links = []
picker = (lambda x: x.params.get('class', '').startswith('boxProKnihy'))
for el in dom.find(None, fn=picker):
book_ref = el.find('a')
if ((not book_ref) or ('href' not in book_ref[0].params)):
continue
links.append(book_ref[0].params['href'])
return links | Parse links to the details about publications from page with book list.
Args:
dom (obj): HTMLElement container of the page with book list.
Returns:
list: List of strings / absolute links to book details. | codesearchnet |
def ParseOptions(self, options):
self._ParseTimezoneOption(options)
names = ['analysis_plugins', 'language', 'profiling']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=names)
self.list_analysis_plugins = self._analysis_plugins == 'list'
self.list_language_identifiers = self._preferred_language == 'list'
self.list_profilers = self._profilers == 'list'
if (self.list_analysis_plugins or self.list_language_identifiers or
self.list_profilers or self.list_timezones):
return
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['output_modules'])
self.list_output_modules = self._output_format == 'list'
if self.list_output_modules:
return
self._ParseInformationalOptions(options)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
self._ParseLogFileOptions(options)
self._ParseProcessingOptions(options)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['event_filters'])
self._deduplicate_events = getattr(options, 'dedup', True)
if self._data_location:
options.data_location = self._data_location
else:
logger.warning('Unable to automatically determine data location.')
self._command_line_arguments = self.GetCommandLineArguments()
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['storage_file'])
if not self._storage_file_path:
raise errors.BadConfigOption('Missing storage file option.')
if not os.path.isfile(self._storage_file_path):
raise errors.BadConfigOption(
'No such storage file: {0:s}.'.format(self._storage_file_path))
self._EnforceProcessMemoryLimit(self._process_memory_limit)
self._analysis_plugins = self._CreateAnalysisPlugins(options)
self._output_module = self._CreateOutputModule(options) | Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. | juraj-google-style |
def nltk_stemmer(stemmer, token, i=None, tokens=None):
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem) | Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set. | codesearchnet |
def _SetPath(self, path):
old_path = self._path
if old_path and not io_wrapper.IsCloudPath(old_path):
try:
size = tf.io.gfile.stat(old_path).length
logger.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except tf.errors.OpError as e:
logger.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path) | Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch. | juraj-google-style |
def aes_encrypt(base64_encryption_key, data):
if isinstance(data, text_type):
data = data.encode("UTF-8")
aes_key_bytes, hmac_key_bytes = _extract_keys(base64_encryption_key)
data = _pad(data)
iv_bytes = os.urandom(AES_BLOCK_SIZE)
cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)
data = iv_bytes + cipher.encrypt(data)
hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()
return as_base64(data + hmac_signature) | Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end | juraj-google-style |
def whilst(coro, coro_test, assert_coro=None, *args, **kw):
assert_corofunction(coro=coro, coro_test=coro_test)
results = []
assert_coro = (assert_coro or assert_true)
while (yield from assert_coro((yield from coro_test()))):
results.append((yield from coro(*args, **kw)))
return results | Repeatedly call `coro` coroutine function while `coro_test` returns `True`.
This function is the inverse of `paco.until()`.
This function is a coroutine.
Arguments:
coro (coroutinefunction): coroutine function to execute.
coro_test (coroutinefunction): coroutine function to test.
assert_coro (coroutinefunction): optional assertion coroutine used
to determine if the test passed or not.
*args (mixed): optional variadic arguments to pass to `coro` function.
Raises:
TypeError: if input arguments are invalid.
Returns:
list: result values returned by `coro`.
Usage::
calls = 0
async def task():
nonlocal calls
calls += 1
return calls
async def calls_lt_4():
return calls > 4
await paco.until(task, calls_lt_4)
# => [1, 2, 3, 4, 5] | codesearchnet |
def __len__(self):
raise NotImplementedError | Number of batch in the Sequence.
Returns:
The number of batches in the Sequence. | github-repos |
def Add(self, artifact=None, target=None, callback=None):
if target is None:
target = Target()
os_name = target.Get("os") or [None]
cpe = target.Get("cpe") or [None]
label = target.Get("label") or [None]
attributes = itertools.product(os_name, cpe, label)
new_conditions = [Condition(artifact, *attr) for attr in attributes]
self.conditions.update(new_conditions)
self._Register(new_conditions, callback) | Add criteria for a check.
Args:
artifact: An artifact name.
target: A tuple of artifact necessary to process the data.
callback: Entities that should be called if the condition matches. | juraj-google-style |
def build_masked_loss(loss_function, mask_value):
def masked_loss_function(y_true, y_pred):
mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
return loss_function(y_true * mask, y_pred * mask)
return masked_loss_function | Builds a loss function that masks based on targets
Args:
loss_function: The loss function to mask
mask_value: The value to mask in the targets
Returns:
function: a loss function that acts like loss_function with masked inputs | juraj-google-style |
def get_replicas(self, service_id: str) -> str:
replicas = []
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve '
'replication level of the service')
service_tasks = self._client.services.get(service_id).tasks()
for task in service_tasks:
if task['Status']['State'] == "running":
replicas.append(task)
return len(replicas) | Get the replication level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replication level of the service | juraj-google-style |
def send(self, message):
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json()) | Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed | juraj-google-style |
def supported_cache_type(types):
if isinstance(types, str):
types = [typ.strip() for typ in types.split(',')]
for typ in types:
if (typ not in ['reflink', 'hardlink', 'symlink', 'copy']):
return False
return True | Checks if link type config option has a valid value.
Args:
types (list/string): type(s) of links that dvc should try out. | codesearchnet |
def list_features(self, **kwargs):
params = {
'language': util.language_code(kwargs.get('lang')),
'publicData': True
}
result = self.make_request('list_features', {}, **params)
if not util.check_result(result):
return False, result.get('message', 'UNKNOWN ERROR')
values = util.response_list(result, 'Data')
return True, [emtype.ParkingFeature(**a) for a in values] | Obtain a list of parkings.
Args:
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Parking]), or message
string in case of error. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.