code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def load_schema(schema_path):
try:
with open(schema_path) as schema_file:
schema = json.load(schema_file)
except ValueError as e:
raise SchemaInvalidError('Invalid JSON in schema or included schema: '
'%s\n%s' % (schema_file.name, str(e)))
return schema | Load the JSON schema at the given path as a Python object.
Args:
schema_path: A filename for a JSON schema.
Returns:
A Python object representation of the schema. | juraj-google-style |
def _PrintTasksInformation(self, storage_reader):
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title='Tasks')
for task_start, _ in storage_reader.GetSessions():
start_time = timelib.Timestamp.CopyToIsoFormat(
task_start.timestamp)
task_identifier = uuid.UUID(hex=task_start.identifier)
task_identifier = '{0!s}'.format(task_identifier)
table_view.AddRow([task_identifier, start_time])
table_view.Write(self._output_writer) | Prints information about the tasks.
Args:
storage_reader (StorageReader): storage reader. | juraj-google-style |
def remove_deps(self, deps):
if not isinstance(deps, (list, tuple)):
deps = [deps]
assert all(isinstance(d, Dependency) for d in deps)
self._deps = [d for d in self._deps if d not in deps]
if self.is_work:
for task in self:
task.remove_deps(deps) | Remove a list of dependencies from the :class:`Node`.
Args:
deps: List of :class:`Dependency` objects specifying the dependencies of the node. | juraj-google-style |
def create_contentkey_authorization_policy(access_token, content):
path = '/ContentKeyAuthorizationPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = content
return do_ams_post(endpoint, path, body, access_token) | Create Media Service Content Key Authorization Policy.
Args:
access_token (str): A valid Azure authentication token.
content (str): Content Payload.
Returns:
HTTP response. JSON body. | juraj-google-style |
def _create_L_ind(self, L):
if issparse(L[0]):
L = [L_t.todense() for L_t in L]
L = self._to_numpy(L)
L_ind = np.ones((self.n, self.m * self.k))
for yi, y in enumerate(self.task_graph.feasible_set()):
for t in range(self.t):
L_ind[:, yi :: self.k] *= np.where(
np.logical_or(L[t] == y[t], L[t] == 0), 1, 0
)
L_ind[:, yi :: self.k] *= np.where(sum(L) != 0, 1, 0)
return L_ind | Convert T label matrices with labels in 0...K_t to a one-hot format
Here we can view e.g. the $(i,j)$ entries of the $T$ label matrices as
a _label vector_ emitted by LF j for data point i.
Args:
L: a T-length list of [n,m] scipy.sparse label matrices with values
in {0,1,...,k}
Returns:
L_ind: An [n,m*k] dense np.ndarray with values in {0,1}
Note that no column is required for 0 (abstain) labels. | juraj-google-style |
def default_multivariate_normal_fn(dtype, shape, name, trainable, add_variable_fn):
del name, trainable, add_variable_fn
dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims) | Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution. | codesearchnet |
def _ReadAppJsonFile(self, relative_path):
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return json.load(f)
except (IOError, ValueError):
return None | Reads JSON file from an application directory.
Args:
relative_path: file name relative to application root directory.
Returns:
Parsed JSON data or None if the file does not exist, can't be read or
not a valid JSON file. | codesearchnet |
def __init__(self, columns: list[str]) -> None:
self.columns = columns | Base Opertation class data processing transformations.
Args:
columns: List of column names to apply the transformation. | github-repos |
def FromJson(json):
type = ContractParameterType.FromString(json['type'])
value = json['value']
param = ContractParameter(type=type, value=None)
if type == ContractParameterType.Signature or type == ContractParameterType.ByteArray:
param.Value = bytearray.fromhex(value)
elif type == ContractParameterType.Boolean:
param.Value = bool(value)
elif type == ContractParameterType.Integer:
param.Value = int(value)
elif type == ContractParameterType.Hash160:
param.Value = UInt160.ParseString(value)
elif type == ContractParameterType.Hash256:
param.Value = UInt256.ParseString(value)
elif type == ContractParameterType.PublicKey:
param.Value = ECDSA.decode_secp256r1(value).G
elif type == ContractParameterType.String:
param.Value = str(value)
elif type == ContractParameterType.Array:
val = [ContractParameter.FromJson(item) for item in value]
param.Value = val
return param | Convert a json object to a ContractParameter object
Args:
item (dict): The item to convert to a ContractParameter object
Returns:
ContractParameter | juraj-google-style |
def __init__(self, file_name, timeout=10, delay=.05):
self.file_name = os.path.abspath(file_name)
self.lockfile = os.path.abspath(file_name) + ".lock"
self.timeout = float(timeout)
self.delay = float(delay)
self.is_locked = False
if self.delay > self.timeout or self.delay <= 0 or self.timeout <= 0:
raise ValueError("delay and timeout must be positive with delay "
"<= timeout") | Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
Args:
file_name: Name of file to lock.
timeout: Maximum timeout for locking. Defaults to 10.
delay: Delay between each attempt to lock. Defaults to 0.05. | juraj-google-style |
def write_message(self, msg, timeout=None):
replace_dict = {'command': self.CMD_TO_WIRE[msg.command]}
if msg.has_data:
data = msg[-1]
replace_dict[msg._fields[-1]] = len(data)
self.stream.write(struct.pack(msg.struct_format,
*msg._replace(**replace_dict)), timeout)
if msg.has_data:
self.stream.write(data, timeout) | Write an arbitrary message (of one of the types above).
For the host side implementation, this will only ever be a DataMessage, but
it's implemented generically enough here that you could use
FilesyncTransport to implement the device side if you wanted.
Args:
msg: The message to send, must be one of the types above.
timeout: timeouts.PolledTimeout to use for the operation. | juraj-google-style |
def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None):
if ((password is None) and (bearer_token is None)):
logger.error('No authentication information provided; please check your object')
raise KeyError
session = requests.Session()
session.trust_env = False
headers = {'Accept-encoding': 'gzip', 'User-Agent': ('twitterdev-search-tweets-python/' + VERSION)}
if bearer_token:
logger.info('using bearer token for authentication')
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
session.headers = headers
else:
logger.info('using username and password for authentication')
session.auth = (username, password)
session.headers = headers
if extra_headers_dict:
headers.update(extra_headers_dict)
return session | Creates a Requests Session for use. Accepts a bearer token
for premiums users and will override username and password information if
present.
Args:
username (str): username for the session
password (str): password for the user
bearer_token (str): token for a premium API user. | codesearchnet |
def new(arg_name, annotated_with=None):
if (annotated_with is not None):
annotation = annotations.Annotation(annotated_with)
else:
annotation = annotations.NO_ANNOTATION
return BindingKey(arg_name, annotation) | Creates a BindingKey.
Args:
arg_name: the name of the bound arg
annotation: an Annotation, or None to create an unannotated binding key
Returns:
a new BindingKey | codesearchnet |
def install(self, connection, partition, table_name=None, index_columns=None, materialize=False, logger=None):
raise NotImplementedError | Installs partition's mpr to the database to allow to execute sql queries over mpr.
Args:
connection:
partition (orm.Partition):
materialize (boolean): if True, create generic table. If False create MED over mpr.
Returns:
str: name of the created table. | codesearchnet |
def delete_variant(self, variant):
mongo_variant = self.get_variant(variant)
if mongo_variant:
if (mongo_variant['observations'] == 1):
LOG.debug('Removing variant {0}'.format(mongo_variant.get('_id')))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug('Decreasing observations for {0}'.format(mongo_variant.get('_id')))
message = self.db.variant.update_one({'_id': mongo_variant['_id']}, {'$inc': {'observations': (- 1), 'homozygote': (- variant.get('homozygote', 0)), 'hemizygote': (- variant.get('hemizygote', 0))}, '$pull': {'families': variant.get('case_id')}}, upsert=False)
return | Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary | codesearchnet |
def _export_work_errors(self, work, output_file):
errors = set()
for v in itervalues(work.work):
if v['is_completed'] and v['error'] is not None:
errors.add(v['error'])
with open(output_file, 'w') as f:
for e in sorted(errors):
f.write(e)
f.write('\n') | Saves errors for given work pieces into file.
Args:
work: instance of either AttackWorkPieces or DefenseWorkPieces
output_file: name of the output file | juraj-google-style |
def get_uri(dir_name):
fullpath = os.path.abspath(dir_name)
try:
hostname = socket.gethostbyaddr(socket.gethostname())[0]
except:
hostname = socket.gethostname()
return '{}:{}'.format(hostname, fullpath) | Returns the URI path for a directory. This allows files hosted on
different file servers to have distinct locations.
Args:
dir_name:
A directory name.
Returns:
Full URI path, e.g., fileserver.host.com:/full/path/of/dir_name. | codesearchnet |
def report(
vulnerabilities,
fileobj,
print_sanitised,
):
n_vulnerabilities = len(vulnerabilities)
unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)]
n_unsanitised = len(unsanitised_vulnerabilities)
n_sanitised = n_vulnerabilities - n_unsanitised
heading = "{} vulnerabilit{} found{}.\n".format(
'No' if n_unsanitised == 0 else n_unsanitised,
'y' if n_unsanitised == 1 else 'ies',
" (plus {} sanitised)".format(n_sanitised) if n_sanitised else "",
)
vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities
with fileobj:
for i, vulnerability in enumerate(vulnerabilities_to_print, start=1):
fileobj.write(vulnerability_to_str(i, vulnerability))
if n_unsanitised == 0:
fileobj.write(color(heading, GOOD))
else:
fileobj.write(color(heading, DANGER)) | Prints issues in color-coded text format.
Args:
vulnerabilities: list of vulnerabilities to report
fileobj: The output file object, which may be sys.stdout | juraj-google-style |
def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):
password = key_chain.GetCredential(path_spec, 'password')
if password:
bde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
bde_volume.set_recovery_password(recovery_password)
startup_key = key_chain.GetCredential(path_spec, 'startup_key')
if startup_key:
bde_volume.read_startup_key(startup_key)
bde_volume.open_file_object(file_object) | Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | codesearchnet |
def __init__(
self, full_name=None, group_identifier=None, identifier=None,
path_separator='/', user_directory=None, username=None):
super(UserAccountArtifact, self).__init__()
self._path_separator = path_separator
self.full_name = full_name
self.group_identifier = group_identifier
self.identifier = identifier
self.user_directory = user_directory
self.username = username | Initializes an user artifact.
Args:
full_name (Optional[str]): name describing the user e.g. full name.
group_identifier (Optional[str]): identifier of the primary group
the user is part of.
identifier (Optional[str]): user identifier.
path_separator (Optional[str]): path segment separator.
user_directory (Optional[str]): path of the user (or home or profile)
directory.
username (Optional[str]): name uniquely identifying the user. | juraj-google-style |
def Dict(fields):
check_user_facing_fields_dict(fields, 'Dict')
class _Dict(_ConfigComposite):
def __init__(self):
key = 'Dict.' + str(DictCounter.get_next_count())
super(_Dict, self).__init__(
name=None,
key=key,
fields=fields,
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
return _Dict | Schema for configuration data with string keys and typed values via :py:class:`Field` .
Args:
fields (Dict[str, Field]) | juraj-google-style |
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs | Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs | juraj-google-style |
def place_market_order(self, product_id, side, size=None, funds=None, client_oid=None, stp=None, overdraft_enabled=None, funding_amount=None):
params = {'product_id': product_id, 'side': side, 'order_type': 'market', 'size': size, 'funds': funds, 'client_oid': client_oid, 'stp': stp, 'overdraft_enabled': overdraft_enabled, 'funding_amount': funding_amount}
params = dict(((k, v) for (k, v) in params.items() if (v is not None)))
return self.place_order(**params) | Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example. | codesearchnet |
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur | A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor. | juraj-google-style |
def create_software_renderer(self, surface):
renderer = object.__new__(Renderer)
renderer._ptr = self._ptr = check_ptr_err(lib.SDL_CreateSoftwareRenderer(surface._ptr))
return renderer | Create a 2D software rendering context for a surface.
Args:
surface (Surface): The surface where rendering is done.
Returns:
Renderer: A 2D software rendering context.
Raises:
SDLError: If there was an error creating the renderer. | juraj-google-style |
def _pack_with_custom_ops(dataset, keys, length):
from tensor2tensor.data_generators.ops import pack_sequences_ops
k1, k2 = keys
def map_fn_custom(x):
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset | Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset. | juraj-google-style |
def load_recipe(self, recipe):
self.recipe = recipe
for module_description in recipe['modules']:
module_name = module_description['name']
module = self.config.get_module(module_name)(self)
self._module_pool[module_name] = module | Populates the internal module pool with modules declared in a recipe.
Args:
recipe: Dict, recipe declaring modules to load. | juraj-google-style |
def get_structure_by_id(self, cod_id, **kwargs):
r = requests.get("http:
return Structure.from_str(r.text, fmt="cif", **kwargs) | Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure. | juraj-google-style |
def run_foreach_or_conditional(self, context):
logger.debug('starting')
if self.foreach_items:
self.foreach_loop(context)
else:
self.run_conditional_decorators(context)
logger.debug('done') | Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | codesearchnet |
def remove_model_references_from_file(filename, models, condition):
filename = REPO_PATH / filename
with open(filename, 'r') as f:
init_file = f.read()
new_file_lines = []
for i, line in enumerate(init_file.split('\n')):
if any((condition(line, model) for model in models)):
continue
new_file_lines.append(line)
with open(filename, 'w') as f:
f.write('\n'.join(new_file_lines)) | Remove all references to the given models from the given file
Args:
filename (str): The file to remove the references from
models (List[str]): The models to remove
condition (Callable): A function that takes the line and model and returns True if the line should be removed | github-repos |
def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]):
email_candidates = []
if (emails != None):
email_candidates = emails
elif (emailsFile != None):
with open(emailsFile, 'r') as iF:
email_candidates = iF.read().splitlines()
elif (nicks != None):
for n in nicks:
for d in domains:
if (d not in excludeDomains):
email_candidates.append(((n + '@') + d))
elif (nicksFile != None):
with open(nicksFile, 'r') as iF:
nicks = iF.read().splitlines()
for n in nicks:
for d in domains:
if (d not in excludeDomains):
email_candidates.append(((n + '@') + d))
return email_candidates | Method that generates a list of emails.
Args:
-----
emails: Any premade list of emails.
emailsFile: Filepath to the emails file (one per line).
nicks: A list of aliases.
nicksFile: Filepath to the aliases file (one per line).
domains: Domains where the aliases will be tested.
excludeDomains: Domains to be excluded from the created list.
Returns:
--------
list: the list of emails that will be verified. | codesearchnet |
async def movehere(self, channel):
self.logger.debug('movehere command')
(await self.embed.delete())
self.embed.channel = channel
(await self.embed.send())
(await self.add_reactions())
self.statuslog.info('Moved to front') | Moves the embed message to a new channel; can also be used to move the musicplayer to the front
Args:
channel (discord.Channel): The channel to move to | codesearchnet |
def set_rgb_dim_level(self, channelIndex: int, rgb: RGBColorState, dimLevel: float):
data = {
"channelIndex": channelIndex,
"deviceId": self.id,
"simpleRGBColorState": rgb,
"dimLevel": dimLevel,
}
return self._restCall(
"device/control/setSimpleRGBColorDimLevel", body=json.dumps(data)
) | sets the color and dimlevel of the lamp
Args:
channelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex
rgb(RGBColorState): the color of the lamp
dimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX
Returns:
the result of the _restCall | juraj-google-style |
def dedent(self, node, dirty=True):
if node.id not in self._subitems:
return
del self._subitems[node.id]
node.super_list_item_id = None
node.parent_item = None
if dirty:
node.touch(True) | Dedent an item. Does nothing if the target is not indented under this item.
Args:
node (gkeepapi.node.ListItem): Item to dedent.
dirty (bool): Whether this node should be marked dirty. | juraj-google-style |
def _pare_down_model(self, strain_gempro, genes_to_remove):
strain_genes = [x.id for x in strain_gempro.genes]
genes_to_remove.extend(self.missing_in_orthology_matrix)
genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes)))
if len(genes_to_remove) == 0:
log.info('{}: no genes marked non-functional'.format(strain_gempro.id))
return
else:
log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove)))
if strain_gempro.model:
strain_gempro.model._trimmed = False
strain_gempro.model._trimmed_genes = []
strain_gempro.model._trimmed_reactions = {}
cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove)
if strain_gempro.model._trimmed:
log.info('{}: marked {} genes as non-functional, '
'deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes),
len(strain_gempro.model._trimmed_reactions)))
else:
for g in genes_to_remove:
strain_gempro.genes.get_by_id(g).functional = False
log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove))) | Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the
COBRApy method delete_model_genes is utilized to delete genes.
Args:
strain_gempro (GEMPRO): GEMPRO object
genes_to_remove (list): List of gene IDs to remove from the model | juraj-google-style |
def create_audit_student_enrollment(self, course_id):
audit_enrollment = {'mode': 'audit', 'course_details': {'course_id': course_id}}
resp = self.requester.post(urljoin(self.base_url, self.enrollment_url), json=audit_enrollment)
resp.raise_for_status()
return Enrollment(resp.json()) | Creates an audit enrollment for the user in a given course
Args:
course_id (str): an edX course id
Returns:
Enrollment: object representing the student enrollment in the provided course | codesearchnet |
def _print_drift_report(self):
try:
response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)
rows = []
for resource in response.get('StackResources', []):
row = []
row.append(resource.get('LogicalResourceId', 'unknown'))
row.append(resource.get('PhysicalResourceId', 'unknown'))
row.append(resource.get('ResourceStatus', 'unknown'))
row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))
rows.append(row)
print('Drift Report:')
print(tabulate(rows, headers=['Logical ID', 'Physical ID', 'Resource Status', 'Drift Info']))
except Exception as wtf:
logging.error(wtf, exc_info=True)
return False
return True | Report the drift of the stack.
Args:
None
Returns:
Good or Bad; True or False
Note: not yet implemented | codesearchnet |
def parse(self, s, term_join=None):
if (not term_join):
term_join = (lambda x: (('(' + ' OR '.join(x)) + ')'))
toks = self.scan(s)
if (toks and toks[0] and ((toks[0][0] == self.TERM) or (toks[0][0] == self.QUOTEDTERM))):
toks = ([(self.MARKER, 'about')] + toks)
bymarker = []
for t in toks:
if (t[0] == self.MARKER):
bymarker.append((t[1], []))
else:
bymarker[(- 1)][1].append(t)
comps = []
for t in bymarker:
t = list(t)
if ((t[0] == 'in') and (len(t[1]) == 1) and isinstance(t[1][0][1], string_types) and (self.stem(t[1][0][1]) in self.geograins.keys())):
t[0] = 'by'
if ((t[0] == 'from') and (len(t[1]) == 1) and (t[1][0][0] != self.YEAR)):
t[0] = 'source'
comps.append(t)
groups = {marker: [] for (marker, _) in comps}
for (marker, terms) in comps:
groups[marker] += [term for (marker, term) in terms]
for (marker, group) in groups.items():
if (marker == 'about'):
continue
if ((len(group) > 1) and (marker not in self.multiterms)):
(groups[marker], extras) = ([group[0]], group[1:])
if (not ('about' in groups)):
groups['about'] = extras
else:
groups['about'] += extras
if (marker == 'by'):
groups['by'] = [self.geograins.get(self.stem(e)) for e in group]
for (marker, terms) in iteritems(groups):
if (len(terms) > 1):
if (marker in 'in'):
groups[marker] = ' '.join(terms)
else:
groups[marker] = term_join(terms)
elif (len(terms) == 1):
groups[marker] = terms[0]
else:
pass
return groups | Parses search term to
Args:
s (str): string with search term.
or_join (callable): function to join 'OR' terms.
Returns:
dict: all of the terms grouped by marker. Key is a marker, value is a term.
Example:
>>> SearchTermParser().parse('table2 from 1978 to 1979 in california')
{'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'} | codesearchnet |
def linear_interpolate(tensor1, tensor2, coeffs):
interp_tensors = []
for coeff in coeffs:
interp_tensor = (tensor1 + (coeff * (tensor2 - tensor1)))
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0) | Linearly interpolate between two tensors at coeff.
Args:
tensor1: 4-D Tensor, shape=(NHWC)
tensor2: 4-D Tensor, shape=(NHWC)
coeffs: list of floats.
Returns:
interp_latents: 5-D Tensor, with interp_latents[i] representing
interpolations at coeffs[i].
shape=(len(coeffs), NHWC) | codesearchnet |
def get_ctl_field(self, controlfield, alt=None):
if not alt:
return self.controlfields[controlfield]
return self.controlfields.get(controlfield, alt) | Method wrapper over :attr:`.controlfields` dictionary.
Args:
controlfield (str): Name of the controlfield.
alt (object, default None): Alternative value of the `controlfield`
when `controlfield` couldn't be found.
Returns:
str: record from given `controlfield` | juraj-google-style |
def get_data_path(self, filename, env_prefix=None):
if (env_prefix == None):
target_file = filename
else:
target_file = os.path.join(env_prefix, filename)
if os.path.exists(os.path.join(self._data_path, target_file)):
return os.path.join(self._data_path, target_file)
else:
raise DataNotFoundError(u('Cannot find data file: {0}').format(target_file)) | Get data path.
Args:
filename (string) : Name of file inside of /data folder to retrieve.
Kwargs:
env_prefix (string) : Name of subfolder, ex: 'qa' will find files in /data/qa
Returns:
String - path to file.
Usage::
open(WTF_DATA_MANAGER.get_data_path('testdata.csv')
Note: WTF_DATA_MANAGER is a provided global instance of DataManager | codesearchnet |
def handle_backend_response(self, orig_request, backend_request, response_status, response_headers, response_body, method_config, start_response):
for (header, value) in response_headers:
if ((header.lower() == 'content-type') and (not value.lower().startswith('application/json'))):
return self.fail_request(orig_request, ('Non-JSON reply: %s' % response_body), start_response)
self.check_error_response(response_body, response_status)
empty_response = self.check_empty_response(orig_request, method_config, start_response)
if (empty_response is not None):
return empty_response
body = self.transform_rest_response(response_body)
cors_handler = self._create_cors_handler(orig_request)
return util.send_wsgi_response(response_status, response_headers, body, start_response, cors_handler=cors_handler) | Handle backend response, transforming output as needed.
This calls start_response and returns the response body.
Args:
orig_request: An ApiRequest, the original request from the user.
backend_request: An ApiRequest, the transformed request that was
sent to the backend handler.
response_status: A string, the status from the response.
response_headers: A dict, the headers from the response.
response_body: A string, the body of the response.
method_config: A dict, the API config of the method to be called.
start_response: A function with semantics defined in PEP-333.
Returns:
A string containing the response body. | codesearchnet |
def multi_replace(str_, search_list, repl_list):
if isinstance(repl_list, six.string_types):
repl_list_ = ([repl_list] * len(search_list))
else:
repl_list_ = repl_list
newstr = str_
assert (len(search_list) == len(repl_list_)), 'bad lens'
for (search, repl) in zip(search_list, repl_list_):
newstr = newstr.replace(search, repl)
return newstr | r"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam | codesearchnet |
def RegisterPathSpec(cls, path_spec_type):
type_indicator = path_spec_type.TYPE_INDICATOR
if (type_indicator in cls._path_spec_types):
raise KeyError('Path specification type: {0:s} already set.'.format(type_indicator))
cls._path_spec_types[type_indicator] = path_spec_type
if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False):
cls._system_level_type_indicators[type_indicator] = path_spec_type | Registers a path specification type.
Args:
path_spec_type (type): path specification type.
Raises:
KeyError: if path specification is already registered. | codesearchnet |
def _begin_disconnection_action(self, action):
conn_key = action.data['id']
callback = action.data['callback']
if self._get_connection_state(conn_key) != self.Idle:
callback(conn_key, self.id, False, 'Cannot start disconnection, connection is not idle')
return
data = self._get_connection(conn_key)
data['state'] = self.Disconnecting
data['microstate'] = None
data['callback'] = callback
data['timeout'] = action.timeout | Begin a disconnection attempt
Args:
action (ConnectionAction): the action object describing what we are
connecting to and what the result of the operation was | juraj-google-style |
class PromptDepthAnythingNeck(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.reassemble_stage = PromptDepthAnythingReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
self.fusion_stage = PromptDepthAnythingFeatureFusionStage(config)
def forward(self, hidden_states: List[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None, prompt_depth: Optional[torch.Tensor]=None) -> List[torch.Tensor]:
if not isinstance(hidden_states, (tuple, list)):
raise TypeError('hidden_states should be a tuple or list of tensors')
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
output = self.fusion_stage(features, prompt_depth=prompt_depth)
return output | PromptDepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For PromptDepthAnything, it includes 2 stages:
* PromptDepthAnythingReassembleStage
* PromptDepthAnythingFeatureFusionStage.
Args:
config (dict): config dict. | github-repos |
def build_defaults(self):
defaults = {}
for arg in self.args:
if (not isinstance(arg, _BaseOpt)):
raise errors.InvalidSchemeError('Unable to build default for non-Option type')
if (not isinstance(arg.default, NoDefault)):
defaults[arg.name] = arg.default
if isinstance(arg, DictOption):
if arg.scheme:
b = arg.scheme.build_defaults()
if b:
defaults[arg.name] = b
return defaults | Build a dictionary of default values from the `Scheme`.
Returns:
dict: The default configurations as set by the `Scheme`.
Raises:
errors.InvalidSchemeError: The `Scheme` does not contain
valid options. | codesearchnet |
def format_counts(counts, header=None):
counts_dict = {}
for key, val in counts.items():
key = format_counts_memory(key, header)
counts_dict[key] = val
return counts_dict | Format a single experiment result coming from backend to present
to the Qiskit user.
Args:
counts (dict): counts histogram of multiple shots
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
dict: a formatted counts | juraj-google-style |
def sparse_eye(num_rows, num_columns=None, dtype=dtypes.float32, name=None):
with ops.name_scope(name, default_name='eye', values=[num_rows, num_columns]):
num_rows = _make_int64_tensor(num_rows, 'num_rows')
num_columns = num_rows if num_columns is None else _make_int64_tensor(num_columns, 'num_columns')
diag_size = math_ops.minimum(num_rows, num_columns)
diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
return sparse_tensor.SparseTensor(indices=array_ops_stack.stack([diag_range, diag_range], axis=1), values=array_ops.ones(diag_size, dtype=dtype), dense_shape=[num_rows, num_columns]) | Creates a two-dimensional sparse tensor with ones along the diagonal.
Args:
num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
of rows in the resulting matrix.
num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
the number of columns in the resulting matrix. Defaults to `num_rows`.
dtype: The type of element in the resulting `Tensor`.
name: A name for this `Op`. Defaults to "eye".
Returns:
A `SparseTensor` of shape [num_rows, num_columns] with ones along the
diagonal. | github-repos |
def Decompress(self, compressed_data):
try:
uncompressed_data = self._bz2_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._bz2_decompressor, 'unused_data', b'')
except (EOFError, IOError) as exception:
raise errors.BackEndError((
'Unable to decompress BZIP2 compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data | Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the BZIP2 compressed stream cannot be decompressed. | juraj-google-style |
def Close(self, abort=False):
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
if not abort and self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
self._closed_event.set()
if abort:
if not self._closed_event.is_set():
logger.warning(
'{0:s} queue aborting. Contents may be lost.'.format(self.name))
self._terminate_event.set()
self._linger_seconds = 0
if self._zmq_thread:
logger.debug('[{0:s}] Waiting for thread to exit.'.format(self.name))
self._zmq_thread.join(timeout=self.timeout_seconds)
if self._zmq_thread.isAlive():
logger.error((
'{0:s} ZMQ responder thread did not exit within timeout').format(
self.name))
else:
logger.debug(
'{0:s} queue closing, will linger for up to {1:d} seconds'.format(
self.name, self._linger_seconds)) | Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
Raises:
QueueAlreadyClosed: if the queue is not started, or has already been
closed.
RuntimeError: if closed or terminate event is missing. | juraj-google-style |
def subscribe(self, topic, callback, ordered=True):
if (('+' in topic) or ('
regex = re.compile(topic.replace('+', '[^/]+').replace('
self.wildcard_queues.append((topic, regex, callback, ordered))
else:
self.queues[topic] = PacketQueue(0, callback, ordered)
try:
self.client.subscribe(topic, 1, self._on_receive)
except operationError as exc:
raise InternalError('Could not subscribe to topic', topic=topic, message=exc.message) | Subscribe to future messages in the given topic
The contents of topic should be in the format created by self.publish with a
sequence number of message type encoded as a json string.
Wildcard topics containing + and # are allowed and
Args:
topic (string): The MQTT topic to subscribe to
callback (callable): The callback to call when a new mesage is received
The signature of callback should be callback(sequence, topic, type, message)
ordered (bool): Whether messages on this topic have a sequence number that must
be checked and queued to ensure that packets are received in order | codesearchnet |
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask) | The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction. | juraj-google-style |
def FromEncoded(cls, encoded):
match_spec = (encoded & ((1 << 11) | (1 << 15)))
match_type = ((encoded & (7 << 12)) >> 12)
match_id = (encoded & ((1 << 11) - 1))
if (match_spec not in cls.SpecifierEncodingMap):
raise ArgumentError('Unknown encoded match specifier', match_spec=match_spec, known_specifiers=cls.SpecifierEncodingMap.keys())
spec_name = cls.SpecifierEncodingMap[match_spec]
if (match_id == cls.MatchAllCode):
match_id = None
return DataStreamSelector(match_type, match_id, spec_name) | Create a DataStreamSelector from an encoded 16-bit value.
The binary value must be equivalent to what is produced by
a call to self.encode() and will turn that value back into
a a DataStreamSelector.
Note that the following operation is a no-op:
DataStreamSelector.FromEncode(value).encode()
Args:
encoded (int): The encoded binary representation of a
DataStreamSelector.
Returns:
DataStreamSelector: The decoded selector. | codesearchnet |
def guess_leb_size(path):
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
block_size = None
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBIFS_NODE_MAGIC, buf):
start = m.start()
chdr = nodes.common_hdr(buf[start:start+UBIFS_COMMON_HDR_SZ])
if chdr and chdr.node_type == UBIFS_SB_NODE:
sb_start = start + UBIFS_COMMON_HDR_SZ
sb_end = sb_start + UBIFS_SB_NODE_SZ
if chdr.len != len(buf[sb_start:sb_end]):
f.seek(sb_start)
buf = f.read(UBIFS_SB_NODE_SZ)
else:
buf = buf[sb_start:sb_end]
sbn = nodes.sb_node(buf)
block_size = sbn.leb_size
f.close()
return block_size
f.close()
return block_size | Get LEB size from superblock
Arguments:
Str:path -- Path to file.
Returns:
Int -- LEB size.
Searches file for superblock and retrieves leb size. | juraj-google-style |
def tables_get(self, table_name):
url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name))
return datalab.utils.Http.request(url, credentials=self._credentials) | Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | codesearchnet |
def remove_alias(alias_names):
alias_table = get_alias_table()
for alias_name in alias_names:
if alias_name not in alias_table.sections():
raise CLIError(ALIAS_NOT_FOUND_ERROR.format(alias_name))
alias_table.remove_section(alias_name)
_commit_change(alias_table) | Remove an alias.
Args:
alias_name: The name of the alias to be removed. | juraj-google-style |
def check_addresses(address_list, is_remote=False):
assert all((isinstance(x, (tuple, string_types)) for x in address_list))
if (is_remote and any((isinstance(x, string_types) for x in address_list))):
raise AssertionError('UNIX domain sockets not allowed for remoteaddresses')
for address in address_list:
check_address(address) | Check if the format of the addresses is correct
Arguments:
address_list (list[tuple]):
Sequence of (``str``, ``int``) pairs, each representing an IP
address and port respectively
.. note::
when supported by the platform, one or more of the elements in
the list can be of type ``str``, representing a valid UNIX
domain socket
is_remote (boolean):
Whether or not the address list
Raises:
AssertionError:
raised when ``address_list`` contains an invalid element
ValueError:
raised when any address in the list has an incorrect format
Example:
>>> check_addresses([('127.0.0.1', 22), ('127.0.0.1', 2222)]) | codesearchnet |
def _add_sphere(ax):
(u, v) = np.mgrid[0:2 * np.pi:20j, 0:np.pi:10j]
x = np.cos(u) * np.sin(v)
y = np.sin(u) * np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color='grey', linewidth=0.2)
return ax | _add_sphere(ax)
Add a wireframe unit sphere onto matplotlib 3D axes
Args:
ax - matplotlib 3D axes object
Returns:
updated matplotlib 3D axes | juraj-google-style |
def determine_opening_indent(indent_texts):
num_lines = len(indent_texts)
if (num_lines < 1):
return 0
assert (num_lines >= 1)
first_line_indent = indent_texts[0][0]
if (num_lines == 1):
return first_line_indent
assert (num_lines >= 2)
second_line_indent = indent_texts[1][0]
second_line_text = indent_texts[1][1]
if (len(second_line_text) == 0):
return first_line_indent
return second_line_indent | Determine the opening indent level for a docstring.
The opening indent level is the indent level is the first non-zero indent
level of a non-empty line in the docstring.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Returns:
The opening indent level as an integer. | codesearchnet |
def parse_config(args=sys.argv):
parser = argparse.ArgumentParser(
description='Read in the config file')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
return parser.parse_args(args[1:]) | Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD | juraj-google-style |
def decode(self, targets, encoder_outputs, attention_bias):
with tf.name_scope("decode"):
decoder_inputs = self.embedding_softmax_layer(targets)
with tf.name_scope("shift_targets"):
decoder_inputs = tf.pad(
decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
with tf.name_scope("add_pos_encoding"):
length = tf.shape(decoder_inputs)[1]
decoder_inputs += model_utils.get_position_encoding(
length, self.params.hidden_size)
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
decoder_inputs = tf.nn.dropout(
decoder_inputs, 1 - self.params.layer_postprocess_dropout)
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
length)
outputs = self.decoder_stack(
decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias)
logits = self.embedding_softmax_layer.linear(outputs)
return logits | Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence.
int tensor with shape [batch_size, target_length]
encoder_outputs: continuous representation of input sequence.
float tensor with shape [batch_size, input_length, hidden_size]
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size] | juraj-google-style |
def _kill_process_type(self, process_type, allow_graceful=False, check_alive=True, wait=False):
process_infos = self.all_processes[process_type]
if (process_type != ray_constants.PROCESS_TYPE_REDIS_SERVER):
assert (len(process_infos) == 1)
for process_info in process_infos:
process = process_info.process
if (process.poll() is not None):
if check_alive:
raise Exception("Attempting to kill a process of type '{}', but this process is already dead.".format(process_type))
else:
continue
if process_info.use_valgrind:
process.terminate()
process.wait()
if (process.returncode != 0):
message = 'Valgrind detected some errors in process of type {}. Error code {}.'.format(process_type, process.returncode)
if (process_info.stdout_file is not None):
with open(process_info.stdout_file, 'r') as f:
message += ('\nPROCESS STDOUT:\n' + f.read())
if (process_info.stderr_file is not None):
with open(process_info.stderr_file, 'r') as f:
message += ('\nPROCESS STDERR:\n' + f.read())
raise Exception(message)
continue
if process_info.use_valgrind_profiler:
os.kill(process.pid, signal.SIGINT)
time.sleep(0.1)
if allow_graceful:
process.terminate()
timer = threading.Timer(1, (lambda process: process.kill()), [process])
try:
timer.start()
process.wait()
finally:
timer.cancel()
if (process.poll() is not None):
continue
process.kill()
if wait:
process.wait()
del self.all_processes[process_type] | Kill a process of a given type.
If the process type is PROCESS_TYPE_REDIS_SERVER, then we will kill all
of the Redis servers.
If the process was started in valgrind, then we will raise an exception
if the process has a non-zero exit code.
Args:
process_type: The type of the process to kill.
allow_graceful (bool): Send a SIGTERM first and give the process
time to exit gracefully. If that doesn't work, then use
SIGKILL. We usually want to do this outside of tests.
check_alive (bool): If true, then we expect the process to be alive
and will raise an exception if the process is already dead.
wait (bool): If true, then this method will not return until the
process in question has exited.
Raises:
This process raises an exception in the following cases:
1. The process had already died and check_alive is true.
2. The process had been started in valgrind and had a non-zero
exit code. | codesearchnet |
def create(self, vid):
command = 'vlan %s' % vid
return self.configure(command) if isvlan(vid) else False | Creates a new VLAN resource
Args:
vid (str): The VLAN ID to create
Returns:
True if create was successful otherwise False | juraj-google-style |
def display(self, updating_pv=None):
data = self._to_dataframe()
data.columns = [self._pcoll_var + '.' + str(column) if isinstance(column, int) else column for column in data.columns]
data = data.map(lambda x: str(x) if isinstance(x, dict) else x)
if updating_pv:
if data.empty:
_LOGGER.debug('Skip a visualization update due to empty data.')
else:
self._display_dataframe(data.copy(deep=True), updating_pv)
if self._display_facets:
self._display_dive(data.copy(deep=True), updating_pv)
self._display_overview(data.copy(deep=True), updating_pv)
else:
self._display_dataframe(data.copy(deep=True))
if self._display_facets:
self._display_dive(data.copy(deep=True))
self._display_overview(data.copy(deep=True)) | Displays the visualization through IPython.
Args:
updating_pv: A PCollectionVisualization object. When provided, the
display_id of each visualization part will inherit from the initial
display of updating_pv and only update that visualization web element
instead of creating new ones.
The visualization has 3 parts: facets-dive, facets-overview and paginated
data table. Each part is assigned an auto-generated unique display id
(the uniqueness is guaranteed throughout the lifespan of the PCollection
variable). | github-repos |
def _QueryHashes(self, digests):
url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}
try:
json_response = self.MakeRequestAndDecodeJSON(self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query VirusTotal with error: {0!s}.'.format(exception))
return json_response | Queries VirusTotal for a specfic hashes.
Args:
digests (list[str]): hashes to look up.
Returns:
dict[str, object]: JSON response or None on error. | codesearchnet |
def function_table(self, function_id=None):
self._check_connected()
function_table_keys = self.redis_client.keys((ray.gcs_utils.FUNCTION_PREFIX + '*'))
results = {}
for key in function_table_keys:
info = self.redis_client.hgetall(key)
function_info_parsed = {'DriverID': binary_to_hex(info[b'driver_id']), 'Module': decode(info[b'module']), 'Name': decode(info[b'name'])}
results[binary_to_hex(info[b'function_id'])] = function_info_parsed
return results | Fetch and parse the function table.
Returns:
A dictionary that maps function IDs to information about the
function. | codesearchnet |
def google_api_build_results(config, auth, api_call, results):
if 'bigquery' in results:
if 'schema' not in results['bigquery']:
results['bigquery']['schema'] = Discovery_To_BigQuery(api_call['api'], api_call['version'], api_call.get('key', None), api_call.get('labels', None)).method_schema(api_call['function'], api_call.get('iterate', False))
if 'format' not in results['bigquery']:
results['bigquery']['format'] = 'JSON'
results['bigquery']['skip_rows'] = 0
table_create(config, results['bigquery'].get('auth', auth), config.project, results['bigquery']['dataset'], results['bigquery']['table'], results['bigquery']['schema'], overwrite=False)
return results | Builds the BigQuery table to house the Google API call results.
Optional piece of the recipe, will create a BigQuery table for results.
Takes results, which defines a bigquery endpoint, and adds fields.
Args:
auth (string): either "user" or "service" to make the BigQuery call.
api_call (dict): the JSON for the API call as defined in recipe.
results (dict): defines where the data will be written
Returns (dict):
A modified results JSON with additional API values added.
Raises:
ValueError: If a required key in the recipe is missing. | github-repos |
def GetHasher(cls, hasher_name):
hasher_name = hasher_name.lower()
if hasher_name not in cls._hasher_classes:
raise KeyError(
'hasher class not set for name: {0:s}.'.format(hasher_name))
hasher_class = cls._hasher_classes[hasher_name]
return hasher_class() | Retrieves an instance of a specific hasher.
Args:
hasher_name (str): the name of the hasher to retrieve.
Returns:
BaseHasher: hasher.
Raises:
KeyError: if hasher class is not set for the corresponding name. | juraj-google-style |
def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False):
if isinstance(value, ops.EagerTensor) and (not context.executing_eagerly()):
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, internal.NativeObject):
if dtype and (not dtypes.as_dtype(dtype).is_compatible_with(value.dtype)):
raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtypes.as_dtype(dtype).name} for `value` ({value}) with dtype {value.dtype.name}.')
return value
else:
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref) | Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`. | github-repos |
def helper(*commands):
def decorated_func(f):
f.__help_targets__ = list(commands)
return f
return decorated_func | Decorate a function to be the helper function of commands.
Arguments:
commands: Names of command that should trigger this function object.
---------------------------
Interface of helper methods:
@helper('some-command')
def help_foo(self, args):
'''
Arguments:
args: A list of arguments.
Returns:
A string that is the help message.
'''
pass | juraj-google-style |
def _get_val_list(obj, path_list, reverse=False):
try:
y = getattr(obj, path_list[0])
except AttributeError:
return []
if len(path_list) == 1:
return [y]
else:
val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]
if reverse:
val_list.reverse()
return val_list | Extract values from nested objects by attribute names.
Objects contain attributes which are named references to objects. This will descend
down a tree of nested objects, starting at the given object, following the given
path.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
list of objects | juraj-google-style |
def push(self, stream, reading):
reading = copy.copy(reading)
reading.stream = stream.encode()
if stream.buffered:
output_buffer = stream.output
if self.id_assigner is not None:
reading.reading_id = self.id_assigner(stream, reading)
try:
self._engine.push(reading)
except StorageFullError:
if (stream.output and not self._rollover_streaming) or (not stream.output and not self._rollover_storage):
raise
self._erase_buffer(stream.output)
self._engine.push(reading)
for walker in self._queue_walkers:
if walker.selector.output == output_buffer:
walker.notify_added(stream)
for selector in self._monitors:
if selector is None or selector.matches(stream):
for callback in self._monitors[selector]:
callback(stream, reading)
for walker in self._virtual_walkers:
if walker.matches(stream):
walker.push(stream, reading)
self._last_values[stream] = reading | Push a reading into a stream, updating any associated stream walkers.
Args:
stream (DataStream): the stream to push the reading into
reading (IOTileReading): the reading to push | juraj-google-style |
def reply(self, status=200, new_response=False, **kw):
res = Response(**kw) if new_response else self._response
res.status(status or res._status)
res.mock = self
self._response = res
return res | Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance. | juraj-google-style |
def anonymous_login(services):
if isinstance(services, str):
services = [services]
clients = {}
for serv in services:
try:
clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT)
except KeyError:
print("Error: No known client for '{}' service.".format(serv))
except Exception:
print("Error: Unable to create client for '{}' service.\nAnonymous access may not be allowed.".format(serv))
return clients | Initialize services without authenticating to Globus Auth.
Note:
Clients may have reduced functionality without authentication.
Arguments:
services (str or list of str): The services to initialize clients for.
Returns:
dict: The clients requested, indexed by service name. | codesearchnet |
def zero_fill_missing_phenotypes(self):
if self.is_uniform(verbose=False):
return self.copy()
output = self.copy()
def _do_fill(d, names):
old_names = list(d.keys())
old_values = list(d.values())
missing = (set(names) - set(old_names))
return dict(zip((old_names + list(missing)), (old_values + ([0] * len(missing)))))
pnames = self.phenotypes
output['phenotype_calls'] = output.apply((lambda x: _do_fill(x['phenotype_calls'], pnames)), 1)
return output | Fill in missing phenotypes and scored types by listing any missing data as negative
Returns:
CellDataFrame: The CellDataFrame modified. | codesearchnet |
def __init__(self, name, aliases=None, description=None, urls=None):
super(StructureDefinition, self).__init__(
name, aliases=aliases, description=description, urls=urls)
self.family_definition = None | Initializes a data type definition.
Args:
name (str): name.
aliases (Optional[list[str]]): aliases.
description (Optional[str]): description.
urls (Optional[list[str]]): URLs. | juraj-google-style |
def trace_stop(self):
cmd = enums.JLinkTraceCommand.STOP
res = self._dll.JLINKARM_TRACE_Control(cmd, 0)
if (res == 1):
raise errors.JLinkException('Failed to stop trace.')
return None | Stops collecting trace data.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None`` | codesearchnet |
def get_property(self, prop):
prop = prop.split('.')
root = self
for p in prop:
if p in root:
root = root[p]
else:
return None
return root | Access nested value using dot separated keys
Args:
prop (:obj:`str`): Property in the form of dot separated keys
Returns:
Property value if exists, else `None` | juraj-google-style |
def _StructPackEncoder(wire_type, format):
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder | Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack(). | juraj-google-style |
def is_parameterized(val: Any) -> bool:
if isinstance(val, sympy.Basic):
return True
getter = getattr(val, '_is_parameterized_', None)
result = (NotImplemented if (getter is None) else getter())
if (result is not NotImplemented):
return result
else:
return False | Returns whether the object is parameterized with any Symbols.
A value is parameterized when it has an `_is_parameterized_` method and
that method returns a truthy value, or if the value is an instance of
sympy.Basic.
Returns:
True if the gate has any unresolved Symbols
and False otherwise. If no implementation of the magic
method above exists or if that method returns NotImplemented,
this will default to False. | codesearchnet |
def write_entry_to_file(file_descriptor, entry_comment, entry_key):
escaped_key = re.sub(r'([^\\])"', '\\1\\"', entry_key)
file_descriptor.write(u'\n' % entry_comment)
file_descriptor.write(u'"%s" = "%s";\n' % (escaped_key, escaped_key)) | Writes a localization entry to the file
Args:
file_descriptor (file, instance): The file to write the entry to.
entry_comment (str): The entry's comment.
entry_key (str): The entry's key. | juraj-google-style |
def create_model(text_in, timesteps, phase):
with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
lstm = (embedded
.cleave_sequence(timesteps)
.sequence_lstm(LOWER)
.sequence_lstm(UPPER))
return (lstm.squash_sequence()
.dropout(keep_prob=0.8, phase=phase)
.fully_connected(CHARS, activation_fn=None)) | Creates a 2 layer LSTM model with dropout.
Args:
text_in: The input text as ASCII ordinals in a Tensor.
timesteps: The number of timesteps in the sequence.
phase: Phase controls whether or not dropout is active. In training mode
we want to perform dropout, but in test we want to disable it.
Returns:
The logits. | juraj-google-style |
def _parse_symbol(self, sym):
special = {'Hw': 'H', 'Ow': 'O', 'Wat': 'O', 'wat': 'O', 'OH': '', 'OH2': '', 'NO3': 'N'}
parsed_sym = None
m_sp = re.match('|'.join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match('w?[A-Z][a-z]*', sym)
if m:
parsed_sym = m.group()
if ((parsed_sym is not None) and (m_sp or (not re.match('{}\\d*'.format(parsed_sym), sym)))):
msg = '{} parsed as {}'.format(sym, parsed_sym)
warnings.warn(msg)
self.errors.append(msg)
return parsed_sym | Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible. | codesearchnet |
def token_of_request(self, url, body=None, content_type=None):
parsed_url = urlparse(url)
query = parsed_url.query
path = parsed_url.path
data = path
if query != '':
data = ''.join([data, '?', query])
data = ''.join([data, "\n"])
if body:
mimes = [
'application/x-www-form-urlencoded'
]
if content_type in mimes:
data += body
return '{0}:{1}'.format(self.__access_key, self.__token(data)) | 带请求体的签名(本质上是管理凭证的签名)
Args:
url: 待签名请求的url
body: 待签名请求的body
content_type: 待签名请求的body的Content-Type
Returns:
管理凭证 | juraj-google-style |
def fpn_map_rois_to_levels(boxes):
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)),
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes | Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty. | juraj-google-style |
def get_enterprise_customer_for_user(auth_user):
EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser')
try:
return EnterpriseCustomerUser.objects.get(user_id=auth_user.id).enterprise_customer
except EnterpriseCustomerUser.DoesNotExist:
return None | Return enterprise customer instance for given user.
Some users are associated with an enterprise customer via `EnterpriseCustomerUser` model,
1. if given user is associated with any enterprise customer, return enterprise customer.
2. otherwise return `None`.
Arguments:
auth_user (contrib.auth.User): Django User
Returns:
(EnterpriseCustomer): enterprise customer associated with the current user. | juraj-google-style |
def on_get(self, req, resp, handler=None, **kwargs):
self.handle((handler or self.list), req, resp, **kwargs) | Respond on GET HTTP request assuming resource list retrieval flow.
This request handler assumes that GET requests are associated with
resource list retrieval. Thus default flow for such requests is:
* Retrieve list of existing resource instances and prepare their
representations by calling list retrieval method handler.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): list method handler to be called. Defaults
to ``self.list``.
**kwargs: additional keyword arguments retrieved from url template. | codesearchnet |
def ends_with(self, suffix):
suffix = suffix.lower()
found_words = []
res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words | Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found. | juraj-google-style |
def setUserPwd(self, user, pwd):
def getSkypeToken(self):
self.liveLogin(user, pwd)
self.getSkypeToken = MethodType(getSkypeToken, self) | Replace the stub :meth:`getSkypeToken` method with one that connects via the Microsoft account flow using the
given credentials. Avoids storing the account password in an accessible way.
Args:
user (str): username or email address of the connecting account
pwd (str): password of the connecting account | juraj-google-style |
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):
if isinstance(flow_or_path, np.ndarray):
if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2):
raise ValueError('Invalid flow with shape {}'.format(
flow_or_path.shape))
return flow_or_path
elif not is_str(flow_or_path):
raise TypeError(
'"flow_or_path" must be a filename or numpy array, not {}'.format(
type(flow_or_path)))
if not quantize:
with open(flow_or_path, 'rb') as f:
try:
header = f.read(4).decode('utf-8')
except Exception:
raise IOError('Invalid flow file: {}'.format(flow_or_path))
else:
if header != 'PIEH':
raise IOError(
'Invalid flow file: {}, header does not contain PIEH'.
format(flow_or_path))
w = np.fromfile(f, np.int32, 1).squeeze()
h = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))
else:
assert concat_axis in [0, 1]
cat_flow = imread(flow_or_path, flag='unchanged')
if cat_flow.ndim != 2:
raise IOError(
'{} is not a valid quantized flow file, its dimension is {}.'.
format(flow_or_path, cat_flow.ndim))
assert cat_flow.shape[concat_axis] % 2 == 0
dx, dy = np.split(cat_flow, 2, axis=concat_axis)
flow = dequantize_flow(dx, dy, *args, **kwargs)
return flow.astype(np.float32) | Read an optical flow map.
Args:
flow_or_path (ndarray or str): A flow map or filepath.
quantize (bool): whether to read quantized pair, if set to True,
remaining args will be passed to :func:`dequantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
Returns:
ndarray: Optical flow represented as a (h, w, 2) numpy array | juraj-google-style |
def export_default_scripts(target_folder, source_folder = None, raise_errors = False, verbose=False):
scripts_to_load = get_classes_in_folder(source_folder, Script)
if verbose:
print(('attempt to load {:d} scripts: '.format(len(scripts_to_load))))
loaded_scripts, failed, loaded_instruments = Script.load_and_append(scripts_to_load, raise_errors=raise_errors)
for name, value in loaded_scripts.items():
filename = os.path.join(target_folder, '{:s}.b26'.format(name))
value.save_b26(filename)
if verbose:
print('\n================================================')
print('================================================')
print(('saved {:d} scripts, {:d} failed'.format(len(loaded_scripts), len(failed))))
if failed != {}:
for error_name, error in failed.items():
print(('failed to create script: ', error_name, error)) | tries to instantiate all the scripts that are imported in /scripts/__init__.py
saves each script that could be instantiated into a .b26 file in the folder path
Args:
target_folder: target path for .b26 files
source_folder: location of python script files | juraj-google-style |
def get_wulff_shape(self, material_id):
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)['surfaces']
lattice = SpacegroupAnalyzer(structure).get_conventional_standard_structure().lattice
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf['miller_index'])
if ((miller not in miller_energy_map) or surf['is_reconstructed']):
miller_energy_map[miller] = surf['surface_energy']
(millers, energies) = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies) | Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape | codesearchnet |
def handle_http_error(error: HTTPException) -> ResponseReturnValue:
code = error.code or 500
return (DQMResponse(name=error.name, description=error.description, code=code), code) | DQM HTTP Error Response.
Args:
* error: HTTP error
Returns:
* DQMResponse for the error with the relevant status code | github-repos |
def _variable_with_weight_decay(name, shape, stddev, wd):
dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if (wd is not None):
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var | Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor | codesearchnet |
def prune(t):
if isinstance(t, TypeVariable):
if (t.instance is not None):
t.instance = prune(t.instance)
return t.instance
return t | Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function Prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator | codesearchnet |
def metta_config(quarter, num_dimensions):
first_day, last_day = quarter_boundaries(quarter)
return {
'start_time': first_day,
'end_time': last_day,
'prediction_window': 3,
'label_name': 'onet_soc_code',
'label_type': 'categorical',
'matrix_id': 'job_postings_{}'.format(quarter),
'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)],
} | Returns metta metadata for a quarter's SOC code classifier matrix
Args:
quarter (str) quarter, in format '2015Q1'
num_dimensions (int) Number of features in matrix
Returns: (dict) metadata suitable for metta.archive_train_test | juraj-google-style |
def _create_table_and_update_context(node, context):
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table | Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table. | juraj-google-style |
def _read_csv_with_offset_pandas_on_ray(fname, num_splits, start, end, kwargs, header):
index_col = kwargs.get('index_col', None)
bio = file_open(fname, 'rb')
bio.seek(start)
to_read = (header + bio.read((end - start)))
bio.close()
pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)
pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns))
if (index_col is not None):
index = pandas_df.index
pandas_df.index = pandas.RangeIndex(0, len(pandas_df))
else:
index = len(pandas_df)
return (_split_result_for_readers(1, num_splits, pandas_df) + [index]) | Use a Ray task to read a chunk of a CSV into a Pandas DataFrame.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
fname: The filename of the file to open.
num_splits: The number of splits (partitions) to separate the DataFrame into.
start: The start byte offset.
end: The end byte offset.
kwargs: The kwargs for the Pandas `read_csv` function.
header: The header of the file.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index. | codesearchnet |
def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None):
if (cls._file_system_remainder_list is None or
cls._file_system_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_FILE_SYSTEM)
cls._file_system_remainder_list = remainder_list
cls._file_system_store = specification_store
if cls._file_system_scanner is None:
cls._file_system_scanner = cls._GetSignatureScanner(
cls._file_system_store)
return cls._GetTypeIndicators(
cls._file_system_scanner, cls._file_system_store,
cls._file_system_remainder_list, path_spec,
resolver_context=resolver_context) | Determines if a file contains a supported file system types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.