code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def sort(self, cmp=None, key=None, reverse=False):
def _DefaultKey(value):
result = []
for key in self.header:
try:
result.append(float(value[key]))
except ValueError:
result.a... | Sorts rows in the texttable.
Args:
cmp: func, non default sort algorithm to use.
key: func, applied to each element before sorting.
reverse: bool, reverse order of sort. | juraj-google-style |
def reduce_by_device(parallelism, data, reduce_fn):
unique_devices = []
device_to_data = {}
for (dev, datum) in zip(parallelism.devices, data):
if (dev not in device_to_data):
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[de... | Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_devi... | codesearchnet |
def record_corrected_value(self, value, expected_interval, count=1):
while True:
if (not self.record_value(value, count)):
return False
if ((value <= expected_interval) or (expected_interval <= 0)):
return True
value -= expected_interval | Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1) | codesearchnet |
def update_endpoint(self, endpoint_name, endpoint_config_name):
if (not _deployment_entity_exists((lambda : self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)))):
raise ValueError('Endpoint with name "{}" does not exist; please use an existing endpoint name'.format(endpoint_name))
self.... | Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request
Raise an error if endpoint with endpoint_name does not exist.
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.
endpoint_config_name (str): Name of the Amazon SageMaker endpoint confi... | codesearchnet |
def plot(self):
plt.rcParams['xtick.major.pad'] = '6'
plt.rcParams['ytick.major.pad'] = '6'
plt.rcParams['axes.linewidth'] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2)
energies = [self._get_ener... | Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x. | codesearchnet |
def run_without_time_limit(self, cmd):
cmd = ([DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd)
logging.info('Docker command: %s', ' '.join(cmd))
start_time = time.time()
retval = subprocess.call(cmd)
elapsed_time_sec = int((time.time() - start_time))
logging.info('Elapsed time of attack: %d'... | Runs docker command without time limit.
Args:
cmd: list with the command line arguments which are passed to docker
binary
Returns:
how long it took to run submission in seconds
Raises:
WorkerError: if error occurred during execution of the submission | codesearchnet |
def matrix(self) -> np.ndarray:
num_qubits = self.num_qubits()
if (num_qubits is None):
raise ValueError('Unknown number of qubits')
num_dim = (2 ** num_qubits)
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for (gate, coefficient) in self.items():
result += (protocols.un... | Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary. | codesearchnet |
def quality(self, tests, alias=None):
this_tests = ((((tests.get('each', []) + tests.get('Each', [])) + tests.get('EACH', [])) + tests.get(self.mnemonic, [])) + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)]))
this_tests = filter(None, this_tests)
if (not tests.get(self.mnemonic, 1))... | Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints. | codesearchnet |
def update(self, iterable):
for pair in pairwise_longest(iterable, fillvalue=_FILL):
self._edges.append(pair)
self._results = None | Update with an ordered iterable of items.
Args:
iterable: An ordered iterable of items. The relative
order of the items in this iterable will be respected
in the TopoSet (in the absence of cycles). | juraj-google-style |
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
images_kwargs = AyaVisionProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
num_image_patches = [self.image_processor.get_number_of_image_patches(*i... | Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input moda... | github-repos |
def add_archive_as_dir(self, zip_file_obj):
BalancedDiscStorage._check_interface(zip_file_obj)
file_hash = self._get_hash(zip_file_obj)
dir_path = self._create_dir_path(file_hash)
full_path = os.path.join(dir_path, file_hash)
if os.path.exists(full_path):
shutil.rmtree(full_path)
os.mkdi... | Add archive to the storage and unpack it.
Args:
zip_file_obj (file): Opened file-like object.
Returns:
obj: Path where the `zip_file_obj` was unpacked wrapped in \
:class:`.PathAndHash` structure.
Raises:
ValueError: If there is too many files in .zip archive. \
See :attr:`._max_zipfiles` for details.
AssertionError... | codesearchnet |
def generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):
secret_key = _ensure_bytes(secret_key)
if signed:
base_id = _get_random_string(secret_key=secret_key)
return ((base_id + '-') + _signature(base_id, secret_key))
else:
return _get_random_... | Generate a random session ID.
Typically, each browser tab connected to a Bokeh application
has its own session ID. In production deployments of a Bokeh
app, session IDs should be random and unguessable - otherwise
users of the app could interfere with one another.
If session IDs are signed with a secret key, the ser... | codesearchnet |
def make_action(self, fn, schema_parser, meta):
validate_input = validate_output = None
if "$input" in meta:
with MarkKey("$input"):
validate_input = schema_parser.parse(meta["$input"])
if "$output" in meta:
with MarkKey("$output"):
... | Make resource's method an action
Validate input, output by schema in meta.
If no input schema, call fn without params.
If no output schema, will not validate return value.
Args:
fn: resource's method
schema_parser: for parsing schema in meta
meta: meta data of the action | juraj-google-style |
def unauthorized(cls, errors=None):
if cls.expose_status:
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json | Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | codesearchnet |
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):
gold = arraylike_to_numpy(gold)
if len(ignore_in_pred) > 0:
raise ValueError("ignore_in_pred not defined for ROC-AUC score.")
keep = [x not in ignore_in_gold for x in gold]
gold = gold[keep]
probs = probs[k... | Compute the ROC AUC score, given the gold labels and predicted probs.
Args:
gold: A 1d array-like of gold labels
probs: A 2d array-like of predicted probabilities
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
Returns:
roc_auc_score: The (float) roc_auc score | juraj-google-style |
def generate_nodes(tpm, cm, network_state, indices, node_labels=None):
if (node_labels is None):
node_labels = NodeLabels(None, indices)
node_state = utils.state_of(indices, network_state)
return tuple((Node(tpm, cm, index, state, node_labels) for (index, state) in zip(indices, node_state))) | Generate |Node| objects for a subsystem.
Args:
tpm (np.ndarray): The system's TPM
cm (np.ndarray): The corresponding CM.
network_state (tuple): The state of the network.
indices (tuple[int]): Indices to generate nodes for.
Keyword Args:
node_labels (|NodeLabels|): Textual labels for each node.
Returns:
tuple[Node]: ... | codesearchnet |
def run(self, tag=None, output=None, **kwargs):
start = datetime.datetime.now()
count = 0
if tag:
tag = Uri(tag)
xml_generator = etree.iterparse(self.source,
tag=tag.etree)
... | runs the extractor
Args:
-----
output: ['filepath', None] | juraj-google-style |
def read_links(self, file, encoding=None):
return [item[0] for item in self.iter_text(file, encoding) if item[1]] | Return an iterator of links found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
Returns:
iterable: str | codesearchnet |
def phenSpecificEffects(snps, pheno1, pheno2, K=None, covs=None, test='lrt'):
N = snps.shape[0]
if (K is None):
K = SP.eye(N)
assert (pheno1.shape[1] == pheno2.shape[1]), 'Only consider equal number of phenotype dimensions'
if (covs is None):
covs = SP.ones(N, 1)
assert ((pheno1.shap... | Univariate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
pheno1: [N x 1] SP.array of 1 phenotype for N individuals
pheno2: [N x 1] SP.array of 1 phenotype for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koef... | codesearchnet |
def __init__(self, value):
super().__init__(duration=0)
if abs(value) > 1:
raise PulseError("Absolute value of PV amplitude exceeds 1.")
self._value = complex(value) | create new persistent value command.
Args:
value (complex): Complex value to apply, bounded by an absolute value of 1.
The allowable precision is device specific.
Raises:
PulseError: when input value exceed 1. | juraj-google-style |
def get_usb_serial(self, port_num):
port = self.port_map[str(port_num)]
arg = ''.join(['DEVICE INFO,', self._addr, '.', port])
cmd = (['esuit64', '-t', arg])
info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
serial = None
if "SERIAL" in info:
serial_info = info.split('SER... | Get the device serial number
Args:
port_num: port number on the Cambrionix unit
Return:
usb device serial number | juraj-google-style |
def create_clusters(provider, context, **kwargs):
conn = get_session(provider.region).client('ecs')
try:
clusters = kwargs["clusters"]
except KeyError:
logger.error("setup_clusters hook missing \"clusters\" argument")
return False
if isinstance(clusters, basestring):
... | Creates ECS clusters.
Expects a "clusters" argument, which should contain a list of cluster
names to create.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded. | juraj-google-style |
def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT]):
self._base = base
self._env_vars = getattr(base, '_env_vars', {}) | A ModelHandler that skips batching in RunInference.
Args:
base: An implementation of the underlying model handler. | github-repos |
def individual(self, ind_id=None):
for ind_obj in self.individuals:
if ind_obj.ind_id == ind_id:
return ind_obj
return None | Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual) | juraj-google-style |
def encode_chunk(dataframe):
csv_buffer = six.StringIO()
dataframe.to_csv(csv_buffer, index=False, header=False, encoding='utf-8', float_format='%.15g', date_format='%Y-%m-%d %H:%M:%S.%f')
body = csv_buffer.getvalue()
if isinstance(body, bytes):
body = body.decode('utf-8')
body = body.encode... | Return a file-like object of CSV-encoded rows.
Args:
dataframe (pandas.DataFrame): A chunk of a dataframe to encode | codesearchnet |
def get_excitation_spectrum(self, width=0.1, npoints=2000):
roots = self.parse_tddft()
data = roots["singlet"]
en = np.array([d["energy"] for d in data])
osc = np.array([d["osc_strength"] for d in data])
epad = 20.0 * width
emin = en[0] - epad
emax = en[... | Generate an excitation spectra from the singlet roots of TDDFT
calculations.
Args:
width (float): Width for Gaussian smearing.
npoints (int): Number of energy points. More points => smoother
curve.
Returns:
(ExcitationSpectrum) which can be plotted using
pymatgen.vis.plotters.SpectrumPlotter. | juraj-google-style |
def get_md5sum(fname, chunk_size=1024):
def iter_chunks(f):
while True:
chunk = f.read(chunk_size)
if not chunk:
break
yield chunk
sig = hashlib.md5()
with open(fname, 'rb') as f:
for chunk in iter_chunks(f):
sig.update(... | Returns the MD5 checksum of a file.
Args:
fname (str): Filename
chunk_size (Optional[int]): Size (in Bytes) of the chunks that should be
read in at once. Increasing chunk size reduces the number of reads
required, but increases the memory usage. Defaults to 1024.
Returns:
The MD5 checksum of the file, which is a stri... | juraj-google-style |
def label(self, label, action='ADD', params=None):
if params is None:
params = {}
if not label:
self._tcex.handle_error(925, ['label', 'Security Label', 'label', 'label', label])
if not self.can_update():
self._tcex.handle_error(910, [self.type])
... | Adds a Security Label to a Indicator/Group or Victim
Args:
params:
label: The name of the Security Label
action: | juraj-google-style |
def load_library(library_location):
if os.path.exists(library_location):
if os.path.isdir(library_location):
directory_contents = os.listdir(library_location)
kernel_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]
else:
... | Loads a TensorFlow plugin.
"library_location" can be a path to a specific shared object, or a folder.
If it is a folder, all shared objects that are named "libtfkernel*" will be
loaded. When the library is loaded, kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow proces... | github-repos |
def catchup_subscriber(self, connection_id):
with self._subscribers_cv:
subscriber = self._subscribers[connection_id]
last_known_block_id = subscriber.get_last_known_block_id()
subscriptions = subscriber.subscriptions
if (last_known_block_id is not None):
LOGGER.debug('Catching u... | Send an event list with all events that are in the given
subscriptions from all blocks since that latest block in the current
chain that is in the given last known block ids.
Raises:
PossibleForkDetectedError
A possible fork was detected while building the event list
NoKnownBlockError
None of the last known blocks wer... | codesearchnet |
def dag_to_circuit(dag):
qregs = collections.OrderedDict()
for qreg in dag.qregs.values():
qreg_tmp = QuantumRegister(qreg.size, name=qreg.name)
qregs[qreg.name] = qreg_tmp
cregs = collections.OrderedDict()
for creg in dag.cregs.values():
creg_tmp = ClassicalRegister(creg.size, n... | Build a ``QuantumCircuit`` object from a ``DAGCircuit``.
Args:
dag (DAGCircuit): the input dag.
Return:
QuantumCircuit: the circuit representing the input dag. | codesearchnet |
def SetProtocol(self, protocol):
if protocol not in self.SUPPORTED_PROTOCOLS:
raise ValueError('Unsupported protocol: {0!s}'.format(protocol))
self._protocol = protocol | Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: if the protocol is not supported. | juraj-google-style |
def member_of(self, group):
if isinstance(group, Group):
group = group.name
return self.groups.filter(name=group).exists() | Returns whether a user is a member of a certain group.
Args:
group
The name of a group (string) or a group object
Returns:
Boolean | codesearchnet |
def get_sub_category(alt_len, ref_len, category, svtype=None):
subcategory = ''
if (category in ('snv', 'indel', 'cancer')):
if (ref_len == alt_len):
subcategory = 'snv'
else:
subcategory = 'indel'
elif (category == 'sv'):
subcategory = svtype
return subca... | Get the subcategory for a VCF variant
The sub categories are:
'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv'
Args:
alt_len(int)
ref_len(int)
category(str)
svtype(str)
Returns:
subcategory(str) | codesearchnet |
def choose_template(self, template):
n1 = int(template)/10
n2 = int(template)%10
self.send('^TS'+'0'+str(n1)+str(n2)) | Choose a template
Args:
template: String, choose which template you would like.
Returns:
None
Raises:
None | juraj-google-style |
def __init__(self, concentration1=None, concentration0=None, validate_args=False, allow_nan_stats=True, name='Beta'):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(ops.convert_to_tenso... | Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicatin... | github-repos |
def list_class_funcnames(fname, blank_pats=['
with open(fname, 'r') as file_:
lines = file_.readlines()
funcname_list = []
for lx, line in enumerate(lines):
if any([line.startswith(pat) for pat in blank_pats]):
funcname_list.append('')
if line.star... | list_class_funcnames
Args:
fname (str): filepath
blank_pats (list): defaults to ' #'
Returns:
list: funcname_list
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> fname = 'util_class.py'
>>> blank_pats = [' #']
>>> funcname_list = list_class_funcnames(fname, blank_pats)
>>> print... | juraj-google-style |
def convert_labels(Y, source, dest):
if (Y is None):
return Y
if isinstance(Y, np.ndarray):
Y = Y.copy()
assert isinstance(Y, int)
elif isinstance(Y, torch.Tensor):
Y = Y.clone()
assert (np.sum((Y.numpy() - Y.numpy().astype(int))) == 0.0)
else:
raise Value... | Convert a matrix from one label type to another
Args:
Y: A np.ndarray or torch.Tensor of labels (ints)
source: The convention the labels are currently expressed in
dest: The convention to convert the labels to
Conventions:
'categorical': [0: abstain, 1: positive, 2: negative]
'plusminus': [0: abstain, 1: positive, -1... | codesearchnet |
def success(channel, title, datapacks):
gui = ui_embed.UI(channel, title, '', modulename=modulename, datapacks=datapacks)
return gui | Creates an embed UI containing the help message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
title (str): The title of the embed
datapacks (list): The hex value
Returns:
ui (ui_embed.UI): The embed UI object | codesearchnet |
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0] | Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of ... | juraj-google-style |
def extract_subject_info_extension(cert_obj):
try:
subject_info_der = cert_obj.extensions.get_extension_for_oid(cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)).value.value
return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0])
except Exception as e:
logging.de... | Extract DataONE SubjectInfo XML doc from certificate.
Certificates issued by DataONE may include an embedded XML doc containing
additional information about the subject specified in the certificate DN. If
present, the doc is stored as an extension with an OID specified by DataONE and
formatted as specified in the Data... | codesearchnet |
def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None):
if (canvas.__module__ in SUPPORTED_CANVAS):
drawer = SUPPORTED_CANVAS[canvas.__module__]
drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw() | Draw the tree on a canvas.
Args:
canvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.)
stem_color (tupel): Color or gradient for the stem of the tree.
leaf_color (tupel): Color for the leaf (= the color for last iteration... | codesearchnet |
def Register(self, app_id, challenge, registered_keys):
client_data = model.ClientData(model.ClientData.TYP_REGISTRATION, challenge, self.origin)
challenge_param = self.InternalSHA256(client_data.GetJson())
app_param = self.InternalSHA256(app_id)
for key in registered_keys:
try:
if (... | Registers app_id with the security key.
Executes the U2F registration flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key.
registered_keys: List of keys already registered for this app_id+user.
Returns:
RegisterResponse wit... | codesearchnet |
def _inverse_log_det_jacobian(self, y):
raise NotImplementedError('inverse_log_det_jacobian not implemented.') | Subclass implementation of `inverse_log_det_jacobian` public function.
In particular, this method differs from the public function, in that it
does not take `event_ndims`. Thus, this implements the minimal Jacobian
determinant calculation (i.e. over `inverse_min_event_ndims`).
Args:
y: `Tensor`. The input to the "inv... | github-repos |
def run(app: web.Application):
host = app['config']['host']
port = app['config']['port']
web.run_app(app, host=host, port=port) | Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()` | juraj-google-style |
def _use_memcache(self, key, options=None):
flag = ContextOptions.use_memcache(options)
if flag is None:
flag = self._memcache_policy(key)
if flag is None:
flag = ContextOptions.use_memcache(self._conn.config)
if flag is None:
flag = True
return flag | Return whether to use memcache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached in memcache, False otherwise. | juraj-google-style |
def elaborate_borns_and_epsilon(ucell, borns, epsilon, primitive_matrix=None, supercell_matrix=None, is_symmetry=True, symmetrize_tensors=False, symprec=1e-05):
assert (len(borns) == ucell.get_number_of_atoms()), ('num_atom %d != len(borns) %d' % (ucell.get_number_of_atoms(), len(borns)))
if symmetrize_tensors:... | Symmetrize Born effective charges and dielectric constants and
extract Born effective charges of symmetrically independent atoms
for primitive cell.
Args:
ucell (Atoms): Unit cell structure
borns (np.array): Born effective charges of ucell
epsilon (np.array): Dielectric constant tensor
Returns:
(np.array) Born effec... | codesearchnet |
def register_token(self, token_class, regexp=None):
if (regexp is None):
regexp = token_class.regexp
self.tokens.register(token_class, regexp) | Register a token class.
Args:
token_class (tdparser.Token): the token class to register
regexp (optional str): the regexp for elements of that token.
Defaults to the `regexp` attribute of the token class. | codesearchnet |
def run_local_server(self, host='localhost', port=8080, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, success_message=_DEFAULT_WEB_SUCCESS_MESSAGE, open_browser=True, **kwargs):
self.redirect_uri = 'http:
(auth_url, _) = self.authorization_url(**kwargs)
wsgi_app = _RedirectWSGIApp(success_messa... | Run the flow using the server strategy.
The server strategy instructs the user to open the authorization URL in
their browser and will attempt to automatically open the URL for them.
It will start a local web server to listen for the authorization
response. Once authorization is complete the authorization server will
... | codesearchnet |
def GetMessages(self, files):
def _GetAllMessageNames(desc):
'Walk a message Descriptor and recursively yields all message names.'
(yield desc.full_name)
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
(yield full_name)
resu... | Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file name... | codesearchnet |
def config_to_string(config):
output = []
for section, section_content in config.items():
output.append("[{}]".format(section))
for option, option_value in section_content.items():
output.append("{} = {}".format(option, option_value))
return "\n".join(output) | Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config. | juraj-google-style |
def _get_best(values: List[float], losses: List[float],
max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float:
assert len(values) == len(losses), "lengths of values and losses should be equal"
min_ind = np.argmin(losses)
for i in range(min_ind - 1, 0, -1):
... | Find the best value according to given losses
Args:
values: list of considered values
losses: list of obtained loss values corresponding to `values`
max_loss_div: maximal divergence of loss to be considered significant
min_val_div: minimum divergence of loss to be considered significant
Returns:
best value divided by... | juraj-google-style |
def _get_annotations(self, text, language=''):
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language']... | Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input. | juraj-google-style |
def set_params(self, **params):
if 'bias' in params.keys():
self.intercept_ = params['bias']
if 'weights' in params.keys():
self.coef_ = params['weights']
for key in params.keys():
if 'b_' == key[:2]:
self.B[int(key[2:])] = params[key]
return self | Set the parameters of the estimator.
Args:
bias (array-like) : bias of the estimator. Also known as the intercept in a linear model.
weights (array-like) : weights of the features. Also known as coeficients.
NER biases (array-like) : NER entities infering column position on X and bias value. Ex: `b_4=10, b_5=6`.
Exam... | juraj-google-style |
def open(path, mode=gdalconst.GA_ReadOnly):
path = getattr(path, 'name', path)
try:
return Raster(vsiprefix(path), mode)
except AttributeError:
try:
imgdata = path.read()
except AttributeError:
raise TypeError('Not a file-like object providing read()')
... | Returns a Raster instance.
Arguments:
path -- local or remote path as str or file-like object
Keyword args:
mode -- gdal constant representing access mode | juraj-google-style |
def encode(self, label):
label = super().encode(label)
return torch.tensor(self.stoi.get(label, self.unknown_index)) | Encodes a ``label``.
Args:
label (object): Label to encode.
Returns:
torch.Tensor: Encoding of the label. | codesearchnet |
def while_loop(self, context, step_method):
logger.debug("starting")
context['whileCounter'] = 0
if self.stop is None and self.max is None:
logger.error(f"while decorator missing both max and stop.")
raise PipelineDefinitionError("the ... | Run step inside a while loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context) | juraj-google-style |
def daemon(args):
if os.environ.get(DVC_DAEMON):
logger.debug('skipping launching a new daemon.')
return
cmd = [sys.executable]
if (not is_binary()):
cmd += ['-m', 'dvc']
cmd += (['daemon', '-q'] + args)
env = fix_env()
file_path = os.path.abspath(inspect.stack()[0][1])
... | Launch a `dvc daemon` command in a detached process.
Args:
args (list): list of arguments to append to `dvc daemon` command. | codesearchnet |
def get_versions(self):
versions_response = self.repo.api.http_request('GET', ('%s/fcr:versions' % self.uri))
versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)
for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):
... | retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances | codesearchnet |
def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None):
response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific)
return self._read_boolean_response(response) | See Also: updateNodeCapabilitiesResponse()
Args:
nodeId:
node:
vendorSpecific:
Returns: | juraj-google-style |
def _workflow_complete(workflow_stage_dict: dict):
complete_stages = []
for (_, stage_config) in workflow_stage_dict.items():
complete_stages.append((stage_config['status'] == 'complete'))
if all(complete_stages):
LOG.info('PB workflow complete!')
return True
return False | Check if the workflow is complete.
This function checks if the entire workflow is complete.
This function is used by `execute_processing_block`.
Args:
workflow_stage_dict (dict): Workflow metadata dictionary.
Returns:
bool, True if the workflow is complete, otherwise False. | codesearchnet |
def concat(self, axis, other, **kwargs):
return self._append_list_of_managers(other, axis, **kwargs) | Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects. | codesearchnet |
def from_files(path_dir, dos_spin=1):
run_type, warning, efermi, gap, doping_levels = \
BoltztrapAnalyzer.parse_outputtrans(path_dir)
vol = BoltztrapAnalyzer.parse_struct(path_dir)
intrans = BoltztrapAnalyzer.parse_intrans(path_dir)
if run_type == "BOLTZ":
... | get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down
Returns:
a BoltztrapAnalyzer object | juraj-google-style |
def get_branch(profile, name):
ref = ('heads/' + name)
data = refs.get_ref(profile, ref)
return data | Fetch a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to fetch.
Returns:
A dict with data baout the branch. | codesearchnet |
def get_config(self):
return {} | Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict. | github-repos |
def output(self, _filename):
txt = ''
for c in self.contracts:
txt += "\nContract %s\n"%c.name
table = PrettyTable(['Variable', 'Dependencies'])
for v in c.state_variables:
table.add_row([v.name, _get(v, c)])
txt += str(table)
... | _filename is not used
Args:
_filename(string) | juraj-google-style |
def __init__(self, filesystem):
self._filesystem = filesystem
self.name = ''
self.path = ''
self._inode = None
self._islink = False
self._isdir = False
self._statresult = None
self._statresult_symlink = None | Initialize the dir entry with unset values.
Args:
filesystem: the fake filesystem used for implementation. | juraj-google-style |
def Evaluate(self, client_obj):
if (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL):
quantifier = all
elif (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY):
quantifier = any
else:
raise ValueError(('Unexpected match mode value: %s' % self.match_mode))
re... | Evaluates rules held in the rule set.
Args:
client_obj: Either an aff4 client object or a client_info dict as returned
by ReadFullInfoClient if the relational db is used for reading.
Returns:
A bool value of the evaluation.
Raises:
ValueError: The match mode is of unknown value. | codesearchnet |
def create_raw(self, key, value):
data = None
if ((key is not None) and (value is not None)):
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data | Create method of CRUD operation for raw data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write. | codesearchnet |
def reorder_resources(self, resource_ids, hxl_update=True):
dataset_id = self.data.get('id')
if not dataset_id:
raise HDXError('Dataset has no id! It must be read, created or updated first.')
data = {'id': dataset_id,
'order': resource_ids}
s... | Reorder resources in dataset according to provided list.
If only some resource ids are supplied then these are
assumed to be first and the other resources will stay in
their original order.
Args:
resource_ids (List[str]): List of resource ids
hxl_update (bool): Whether to call package_hxl_update. Defaults to True.
Re... | juraj-google-style |
def add_listener(self, event, listener):
self.emit('new_listener', event, listener)
self._listeners[event].append(listener)
self._check_limit(event)
return self | Bind a listener to a particular event.
Args:
event (str): The name of the event to listen for. This may be any
string value.
listener (def or async def): The callback to execute when the event
fires. This may be a sync or async function. | juraj-google-style |
def package_info(pkg_name):
indent = " "
for config, _ in _iter_packages():
if pkg_name == config["name"]:
print("Package:", pkg_name)
print(indent, "Platform:", config["platform"])
print(indent, "Version:", config["version"])
print(indent, "Path:", ... | Prints the information of a package.
Args:
pkg_name (str): The name of the desired package to get information | juraj-google-style |
def __init__(self, value, translator):
self.value = value
self.translator = translator | Creates a NestedValueProvider that wraps the provided ValueProvider.
Args:
value: ValueProvider object to wrap
translator: function that is applied to the ValueProvider
Raises:
``RuntimeValueProviderError``: if any of the provided objects are not
accessible. | github-repos |
def remove_user(username):
users = passwd_reader.load_users()
assert username in users, "Username '%s' not found!" % username
del users[username]
passwd_reader.save_users(users)
home_dir = settings.DATA_PATH + username
if os.path.exists(home_dir):
shutil.rmtree(home_dir... | Remove user, his home directory and so on..
Args:
username (str): User's name. | juraj-google-style |
def __init__(self, output_filename="std_err.txt"):
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter() | Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stderr for vasp
is being redirected. The error messages that are checked are
present in the stderr. Defaults to "std_err.txt", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`. | juraj-google-style |
def human_timestamp_to_datetime(human_timestamp, to_utc=False):
settings = {}
if to_utc:
settings = {'TO_TIMEZONE': 'UTC'}
return dateparser.parse(human_timestamp, settings=settings) | Converts a human-readable timestamp into a Python ``DateTime`` object
Args:
human_timestamp (str): A timestamp string
to_utc (bool): Convert the timestamp to UTC
Returns:
DateTime: The converted timestamp | codesearchnet |
def process(self, element):
text_input, prediction_result = element
softmax = torch.nn.Softmax(dim=-1)(prediction_result.inference['logits']).detach().numpy()
return [{'input': text_input, 'softmax': softmax}] | Takes the input text and the prediction result, and returns a dictionary
with the input text and the softmax probabilities
Args:
element: The tuple of input text and the prediction result
Returns:
A list of dictionaries, each containing the input text
and the softmax output. | github-repos |
def correction(self, word):
return max(self.candidates(word), key=self.word_probability) | The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate | codesearchnet |
def Uniform(cls,
low: 'TensorFluent', high: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
if low.scope != high.scope:
raise ValueError('Uniform distribution: parameters must have same scope!')
dist = tf.distribution... | Returns a TensorFluent for the Uniform sampling op with given low and high parameters.
Args:
low: The low parameter of the Uniform distribution.
high: The high parameter of the Uniform distribution.
batch_size: The size of the batch (optional).
Returns:
The Uniform distribution and a TensorFluent sample drawn from th... | juraj-google-style |
def get_upstream_artifacts_full_paths_per_task_id(context):
upstream_artifacts = context.task['payload']['upstreamArtifacts']
task_ids_and_relative_paths = [
(artifact_definition['taskId'], artifact_definition['paths'])
for artifact_definition in upstream_artifacts
]
optional_artif... | List the downloaded upstream artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict, dict: lists of the paths to upstream artifacts, sorted by task_id.
First dict represents the existing upstream artifacts. The second one
maps the optional artifacts that couldn't be downloade... | juraj-google-style |
def block_diag(*blocks: np.ndarray) -> np.ndarray:
for b in blocks:
if b.shape[0] != b.shape[1]:
raise ValueError('Blocks must be square.')
if not blocks:
return np.zeros((0, 0), dtype=np.complex128)
n = sum(b.shape[0] for b in blocks)
dtype = functools.reduce(_merge_d... | Concatenates blocks into a block diagonal matrix.
Args:
*blocks: Square matrices to place along the diagonal of the result.
Returns:
A block diagonal matrix with the given blocks along its diagonal.
Raises:
ValueError: A block isn't square. | juraj-google-style |
def metadata_matches(self, query={}):
result = (len(query.keys()) > 0)
for key in query.keys():
result = (result and (query[key] == self.metadata.get(key)))
return result | Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: ... | codesearchnet |
def replica_id_in_sync_group(self):
if tensor_util.is_tf_type(self._replica_id_in_sync_group):
return self._replica_id_in_sync_group
return constant_op.constant(self._replica_id_in_sync_group, dtypes.int32, name='replica_id_in_sync_group') | Returns the id of the replica.
This identifies the replica among all replicas that are kept in sync. The
value of the replica id can range from 0 to
`tf.distribute.ReplicaContext.num_replicas_in_sync` - 1.
NOTE: This is not guaranteed to be the same ID as the XLA replica ID use
for low-level operations such as collec... | github-repos |
def load(config):
if config.sys_path:
logger.debug('Appending %s to sys.path.', config.sys_path)
sys.path.append(config.sys_path)
logger.debug('sys.path is now %s', sys.path)
if config.lookups:
for (key, handler) in config.lookups.items():
register_lookup_handler(key,... | Loads a stacker configuration by modifying sys paths, loading lookups,
etc.
Args:
config (:class:`Config`): the stacker config to load.
Returns:
:class:`Config`: the stacker config provided above. | codesearchnet |
def get_missing_services(self, services):
required_services = set(services)
provided_services = set(self._services.keys())
missing_services = required_services.difference(provided_services)
return sorted(missing_services) | Check if all required services are provided
Args:
services: List with the service names which are required
Returns:
List with missing services | juraj-google-style |
def __init__(
self, resolver_context, encoding_method=None, file_object=None):
if file_object is not None and encoding_method is None:
raise ValueError(
'File-like object provided without corresponding encoding method.')
super(EncodedStream, self).__init__(resolver_context)
self.... | Initializes a file-like object.
If the file-like object is chained do not separately use the parent
file-like object.
Args:
resolver_context (Context): resolver context.
encoding_method (Optional[str]): method used to the encode the data.
file_object (Optional[file]): parent file-like object.
Raises:
ValueError: if ... | juraj-google-style |
def _maybe_cast_inputs(self, inputs):
compute_dtype = self._compute_dtype
if self._autocast and compute_dtype and dtypes.as_dtype(compute_dtype).is_floating:
def f(x):
cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)
if isinstance... | Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype | github-repos |
def AddFile(self, filepath):
if (filepath not in self._files):
self._files.add(filepath)
return True
return False | Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source. | codesearchnet |
def from_py_func(cls, code):
from bokeh.util.deprecation import deprecated
deprecated("'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSHover directly instead.")
if (not isinstance(code, FunctionType)):
raise ValueError('CustomJSHover.from_py_func only accepts... | Create a ``CustomJSHover`` instance from a Python functions. The
function is translated to JavaScript using PScript.
The python functions must have no positional arguments. It is
possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword
arguments to the functions.
The ``code`` function namespace will con... | codesearchnet |
def _other_wrapper(self, name, writing):
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
'Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending sh... | Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below. | codesearchnet |
def _dirint_bins(ktp, alt, w, dktp):
it = range(len(ktp))
ktp_bin = [-1] * len(ktp)
ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it]
ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it]
ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ... | Determine the bins for the DIRINT coefficients.
Args:
ktp : Altitude-independent clearness index
alt : Solar altitude angle
w : precipitable water estimated from surface dew-point temperature
dktp : stability index
Returns:
tuple of ktp_bin, alt_bin, w_bin, dktp_bin | juraj-google-style |
def __sendCommand(self, cmd):
logging.info('%s: sendCommand[%s]', self.port, cmd)
if self.logThreadStatus == self.logStatus['running']:
self.logThreadStatus = self.logStatus['pauseReq']
while self.logThreadStatus != self.logStatus['paused'] and self.logThreadStatus != se... | send specific command to reference unit over serial port
Args:
cmd: OpenThread CLI string
Returns:
Done: successfully send the command to reference unit and parse it
Value: successfully retrieve the desired value from reference unit
Error: some errors occur, indicates by the followed specific error number | juraj-google-style |
def number(digit):
spoken = str(digit)
if spoken.startswith("8") or spoken[:len(spoken) % 3] == "11":
article = "an "
else:
article = "a "
if spoken.endswith("1") and spoken != "11":
suffix = "st"
elif spoken.endswith("2") and spoken != "12":
suffix = "nd"
elif spoken.endswith("3") and spoken != "13"... | Gets a spoken-word representation for a number.
Arguments:
digit (int): An integer to convert into spoken-word.
Returns:
A spoken-word representation for a digit,
including an article ('a' or 'an') and a suffix,
e.g. 1 -> 'a 1st', 11 -> "an 11th". Adittionally
delimits characters in pairs of three for values > 999. | juraj-google-style |
def _record_result_type(recorder, f):
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
res = recorder(args, kwargs, res)
return res
return wrapper | A decorator that records some information about the function.
Args:
recorder: a function of signature `(args, kwargs, res) -> res`.
f: the original function.
Returns:
A transformed function that calls the original function and then the
recorder afterwards. | github-repos |
def _full_reduce(nodes):
was_reduced, nodes = maybe_reduce(nodes)
while was_reduced:
was_reduced, nodes = maybe_reduce(nodes)
return nodes | Apply degree reduction to ``nodes`` until it can no longer be reduced.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes in the curve.
Returns:
numpy.ndarray: The fully degree-reduced nodes. | juraj-google-style |
def __init__(self, filename, args, version):
self.args = args
self.version = version
self.filename = filename
try:
with open(self.filename, 'rb') as file:
self.data = json.load(file)
except IOError:
self.data = {} | Args:
filename:
Filename for database.
args:
Program arguments.
version:
Version of file. | juraj-google-style |
def run_command(self, command, arg=None, is_eval=False):
mode = ((is_eval and 'eval') or 'command')
if isinstance(arg, tuple):
(name, d) = arg
else:
(name, d) = (arg, {})
result = getattr(self.connection.admin, mode)(command, name, **d)
return result | run command on the server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result | codesearchnet |
def daemonize(pidfile=None):
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
os.chdir('/')
os.umask(0)
pid = os.fork()
if (pid > 0):
os._exit(0)
os.setsid()
pid = os.fork()
if (pid > 0):
os._exit(0)
def terminate(signal, stack_frame):
msg = 'Terminating on s... | Turn the running process into a proper daemon according to PEP3143.
Args:
pidfile --The pidfile to create. | codesearchnet |
def __init__(self, instrumentation_key, telemetry_channel=None):
if instrumentation_key:
if isinstance(instrumentation_key, channel.TelemetryChannel):
telemetry_channel = instrumentation_key
instrumentation_key = None
else:
raise Exception... | Initializes a new instance of the class.
Args:
instrumentation_key (str). the instrumentation key to use for this telemetry client.\n
telemetry_channel (:class:`channel.TelemetryChannel`). the optional telemetry channel to be used instead of
constructing a default one. | juraj-google-style |
def _tensor_product(self, other, reverse=False):
if not isinstance(other, Kraus):
other = Kraus(other)
ka_l, ka_r = self._data
kb_l, kb_r = other._data
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_d... | Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel subclass.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False
Returns:
Kraus: the tensor product channel as a Kraus object.
Raises:
QiskitError: if other cannot be converted to a c... | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.