code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def match(obj, matchers=TYPES):
buf = get_bytes(obj)
for matcher in matchers:
if matcher.match(buf):
return matcher
return None | Matches the given input againts the available
file type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if type matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type. | codesearchnet |
def set_parameters(self, parameters_dict):
DB.set_hash_value(self._key, 'parameters', parameters_dict)
self.publish('parameters_updated') | Set the subarray parameters.
Args:
parameters_dict (dict): Dictionary of Subarray parameters | codesearchnet |
def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):
print(make_subsection(name_prefix + enum_descriptor.name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for value_index, value in enumerate(enum_descriptor.value):
field_location = locations[path + (2, value_index)]
row_tuples.append((
make_code(value.name),
value.number,
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Name', 'Number', 'Description'), row_tuples) | Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name. | juraj-google-style |
def get_ams_access_token(accountname, accountkey):
accountkey_encoded = urllib.parse.quote(accountkey, safe='')
body = (((('grant_type=client_credentials&client_id=' + accountname) + '&client_secret=') + accountkey_encoded) + ' &scope=urn%3aWindowsAzureMediaServices')
return do_ams_auth(ams_auth_endpoint, body) | Get Media Services Authentication Token.
Args:
accountname (str): Azure Media Services account name.
accountkey (str): Azure Media Services Key.
Returns:
HTTP response. JSON body. | codesearchnet |
def _is_ndb(self):
if isinstance(self._model, type):
if ((_NDB_MODEL is not None) and issubclass(self._model, _NDB_MODEL)):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError('Model class not an NDB or DB model: {0}.'.format(self._model)) | Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model. | codesearchnet |
def get_raw_data(self, url, *args, **kwargs):
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if (res.status_code == 200):
return res.content
else:
return None | Gets data from url as bytes
Returns content under the provided url as bytes
ie. for binary data
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
bytes | codesearchnet |
def _span_attrs_to_pb(span_attr, proto_type):
attr_pb = getattr(trace_pb2.Span, proto_type)()
ParseDict(span_attr, attr_pb)
return attr_pb | Convert a span attribute dict to protobuf, including Links, Attributes,
TimeEvents.
Args:
span_attr (dict): A dict that needs to be converted to protobuf.
proto_type (str): The type of the Protobuf.
Returns:
An instance of the specified protobuf. | juraj-google-style |
def select_serial_number_row(self, serial_number):
sheet = self.table
col = self.db_sheet_cols.id
rows = sheet.loc[:, col] == serial_number
return sheet.loc[rows, :] | Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame | juraj-google-style |
def _decode_helper(self, pred_logits, format):
if format == DecodeType.CHARACTER:
decoder = self.char_decode
eos_token = 1
eos_str = '[s]'
elif format == DecodeType.BPE:
decoder = self.bpe_decode
eos_token = 2
eos_str = '
elif format == DecodeType.WORDPIECE:
decoder = self.wp_decode
eos_token = 102
eos_str = '[SEP]'
else:
raise ValueError(f'Format {format} is not supported.')
dec_strs, conf_scores = ([], [])
batch_size = pred_logits.size(0)
batch_max_length = pred_logits.size(1)
_, preds_index = pred_logits.topk(1, dim=-1, largest=True, sorted=True)
preds_index = preds_index.view(-1, batch_max_length)[:, 1:]
preds_str = decoder(preds_index)
preds_max_prob, _ = torch.nn.functional.softmax(pred_logits, dim=2).max(dim=2)
preds_max_prob = preds_max_prob[:, 1:]
for index in range(batch_size):
pred_eos = preds_str[index].find(eos_str)
pred = preds_str[index][:pred_eos]
pred_index = preds_index[index].tolist()
pred_eos_index = pred_index.index(eos_token) if eos_token in pred_index else -1
pred_max_prob = preds_max_prob[index][:pred_eos_index + 1]
confidence_score = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(pred)
conf_scores.append(confidence_score)
return (dec_strs, conf_scores) | Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.
Args:
pred_logits (`torch.Tensor`):
List of model prediction logits.
format (`Union[DecoderType, str]`):
Type of model prediction. Must be one of ['char', 'bpe', 'wp'].
Returns:
`tuple`:
dec_strs(`str`): The decode strings of model prediction. conf_scores(`List[float]`): The confidence
score of model prediction. | github-repos |
def _coords2idx(self, coords):
x = self._coords2vec(coords)
idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale)
return idx[1] | Converts from sky coordinates to pixel indices.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates.
Returns:
Pixel indices of the coordinates, with the same shape as the input
coordinates. Pixels which are outside the map are given an index
equal to the number of pixels in the map. | codesearchnet |
def get_cookiecutter_config(template, default_config=None, version=None):
default_config = default_config or {}
config_dict = cc_config.get_user_config()
repo_dir, _ = cc_repository.determine_repo_dir(
template=template,
abbreviations=config_dict['abbreviations'],
clone_to_dir=config_dict['cookiecutters_dir'],
checkout=version,
no_input=True)
context_file = os.path.join(repo_dir, 'cookiecutter.json')
context = cc_generate.generate_context(
context_file=context_file,
default_context={**config_dict['default_context'], **default_config})
return repo_dir, cc_prompt.prompt_for_config(context) | Obtains the configuration used for cookiecutter templating
Args:
template: Path to the template
default_config (dict, optional): The default configuration
version (str, optional): The git SHA or branch to use when
checking out template. Defaults to latest version
Returns:
tuple: The cookiecutter repo directory and the config dict | juraj-google-style |
def _generate_queries_for_title_symbols(title_field, query_value):
values_tokenized_by_whitespace = query_value.split()
symbol_queries = []
for value in values_tokenized_by_whitespace:
if any(((character in value) for character in ElasticSearchVisitor.TITLE_SYMBOL_INDICATING_CHARACTER)):
symbol_queries.append(generate_match_query('.'.join([title_field, FieldVariations.search]), value, with_operator_and=False))
return wrap_queries_in_bool_clauses_if_more_than_one(symbol_queries, use_must_clause=True) | Generate queries for any symbols in the title against the whitespace tokenized field of titles.
Returns:
(dict): The query or queries for the whitespace tokenized field of titles. If none such tokens exist, then
returns an empty dict.
Notes:
Splits the value stream into tokens according to whitespace.
Heuristically identifies the ones that contain symbol-indicating-characters (examples of those tokens are
"g-2", "SU(2)"). | codesearchnet |
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, List[int]], vision_feature_select_strategy: str, **kwargs):
if vision_feature_select_strategy not in ['default', 'full']:
raise ValueError(f'Unexpected select feature strategy: {self.vision_feature_select_strategy}')
kwargs = {k: v for k, v in kwargs.items() if v is not None}
image_outputs = self.vision_model(pixel_values, output_hidden_states=False, **kwargs)
hidden_state = image_outputs.last_hidden_state
return hidden_state | Obtains image last hidden states from the vision tower and apply al projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, List[int]]`):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). | github-repos |
def GetFormattedField(self, event, field_name):
callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None)
callback_function = None
if callback_name:
callback_function = getattr(self, callback_name, None)
if callback_function:
output_value = callback_function(event)
else:
output_value = getattr(event, field_name, '-')
if (output_value is None):
output_value = '-'
elif (not isinstance(output_value, py2to3.STRING_TYPES)):
output_value = '{0!s}'.format(output_value)
return output_value | Formats the specified field.
Args:
event (EventObject): event.
field_name (str): name of the field.
Returns:
str: value of the field. | codesearchnet |
def format_diff_xml(a_xml, b_xml):
return '\n'.join(
difflib.ndiff(
reformat_to_pretty_xml(a_xml).splitlines(),
reformat_to_pretty_xml(b_xml).splitlines(),
)
) | Create a diff between two XML documents.
Args:
a_xml: str
b_xml: str
Returns:
str : `Differ`-style delta | juraj-google-style |
def json_set_auths(recipe, auth):
if isinstance(recipe, dict):
if 'auth' in recipe:
recipe['auth'] = auth
for key, value in recipe.items():
json_set_auths(value, auth)
elif isinstance(recipe, list) or isinstance(recipe, tuple):
for index, value in enumerate(recipe):
json_set_auths(value, auth)
return recipe | Recusrsively finds auth in script JSON and sets them.
Args:
recipe: (dict) A dictionary representation fo the JSON script.
auth: (string) Either 'service' or 'user'.
Returns:
(recipe) same structure but with all auth fields replaced. | github-repos |
def _table_filename(tbl_filename):
tbl_filename = str(tbl_filename)
txfn = _normalize_table_path(tbl_filename)
gzfn = (txfn + '.gz')
if os.path.exists(txfn):
if (os.path.exists(gzfn) and (os.stat(gzfn).st_mtime > os.stat(txfn).st_mtime)):
tbl_filename = gzfn
else:
tbl_filename = txfn
elif os.path.exists(gzfn):
tbl_filename = gzfn
else:
raise ItsdbError('Table does not exist at {}(.gz)'.format(tbl_filename))
return tbl_filename | Determine if the table path should end in .gz or not and return it.
A .gz path is preferred only if it exists and is newer than any
regular text file path.
Raises:
:class:`delphin.exceptions.ItsdbError`: when neither the .gz
nor text file exist. | codesearchnet |
def stub_batch(cls, size, **kwargs):
return [cls.stub(**kwargs) for _ in range(size)] | Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances | codesearchnet |
async def on_message(message):
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
if server is not None and author != channel.server.me:
flipchecked = api_flipcheck.flipcheck(content)
if flipchecked:
await client.send_typing(channel)
await client.send_message(channel, flipchecked) | The on_message event handler for this module
Args:
message (discord.Message): Input message | juraj-google-style |
def kill_pid(self, pid):
try:
p = psutil.Process(pid)
p.terminate()
self.info_log(('Killed [pid:%s][name:%s]' % (p.pid, p.name())))
except psutil.NoSuchProcess:
self.error_log(('No such process: [pid:%s]' % pid)) | Kill process by pid
Args:
pid (int) | codesearchnet |
def __eq__(self, other):
if type(self) is type(other) and \
self._qubits == other._qubits:
return True
return False | Two device specs are the same if they have the same qubits.
Args:
other (DeviceSpecification): other DeviceSpecification
Returns:
bool: are self and other equal. | juraj-google-style |
async def addFeedNodes(self, name, items):
func = self.core.getFeedFunc(name)
if func is None:
raise s_exc.NoSuchName(name=name)
logger.info(f'adding feed nodes ({name}): {len(items)}')
async for node in func(self, items):
yield node | Call a feed function and return what it returns (typically yields Node()s).
Args:
name (str): The name of the feed record type.
items (list): A list of records of the given feed type.
Returns:
(object): The return value from the feed function. Typically Node() generator. | juraj-google-style |
def squeeze(name, x, factor=2, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = common_layers.shape_list(x)
if (factor == 1):
return x
height = int(shape[1])
width = int(shape[2])
n_channels = int(shape[3])
if (not reverse):
assert (((height % factor) == 0) and ((width % factor) == 0))
x = tf.reshape(x, [(- 1), (height
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [(- 1), (height
else:
x = tf.reshape(x, ((- 1), height, width, int((n_channels / (factor ** 2))), factor, factor))
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, ((- 1), int((height * factor)), int((width * factor)), int((n_channels / (factor ** 2)))))
return x | Block-wise spatial squeezing of x to increase the number of channels.
Args:
name: Used for variable scoping.
x: 4-D Tensor of shape (batch_size X H X W X C)
factor: Factor by which the spatial dimensions should be squeezed.
reverse: Squueze or unsqueeze operation.
Returns:
x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X
(cXfactor^2). If reverse is True, then it is factor = (1 / factor) | codesearchnet |
def match_variables(self, pattern, return_type='name'):
pattern = re.compile(pattern)
vars_ = [v for v in self.variables.values() if pattern.search(v.name)]
return (vars_ if return_type.startswith('var') else [v.name for v in vars_]) | Return columns whose names match the provided regex pattern.
Args:
pattern (str): A regex pattern to match all variable names against.
return_type (str): What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match. | codesearchnet |
def __init__(self, cache_file_name, update_cache=True):
self._cache_file_name = cache_file_name
self._cache = self._read_cache_from_file()
self._update_cache = update_cache | Opens the cache file and reads previous results.
Args:
cache_file_name: string file name
update_cache: Specifies whether ApiCache should write out the
cache file when closing it | juraj-google-style |
def set_configuration_from_sharded_input_tensors(self, input_tensors):
if not self._frozen:
self._tuple_shapes = None
number_of_shards = len(input_tensors)
self.set_number_of_shards(number_of_shards)
for t in input_tensors:
if len(t) != self.number_of_tuple_elements:
raise ValueError(f'input_tensors is {str(input_tensors)} but must be a list of lists, where each inner list has length number_of_tuple_elements={self.number_of_tuple_elements}')
sharded_shapes = [[t[i].shape for t in input_tensors] for i in range(self.number_of_tuple_elements)]
unsharded_shapes = [policy.get_unsharded_shape(s) for policy, s in zip(self._sharding_policies, sharded_shapes)]
self.set_tuple_shapes(unsharded_shapes)
for i in range(1, self.number_of_shards):
for t1, t2 in zip(input_tensors[0], input_tensors[i]):
if t1.dtype != t2.dtype:
raise TypeError(f'types of the tuple elements of input_tensors {str(input_tensors)} are not consistent')
self.set_tuple_types([t.dtype for t in input_tensors[0]]) | Sets the shapes and types of the queue tuple elements.
input_tensors is a list of lists of Tensors whose types and shapes are used
to set the queue configuration. The length of the outer list is the number
of shards required, and each inner list is the tuple of Tensors to use to
determine the types and shapes of the corresponding shard. This method
depends on the shard dimension, and calling it freezes the shard policy.
Args:
input_tensors: list of lists of Tensors. The outer list length corresponds
to the desired number of shards, and each inner list is the size
and shape of the desired configuration of the corresponding shard.
Raises:
ValueError: if any inner list is not a list of length
self.number_of_tuple_elements; or the inner lists do not combine to
form a consistent unsharded shape.
TypeError: if the types of the Tensors in the inner lists do not match. | github-repos |
def of_definition(service_def):
vcap_services = streamsx.topology.context._vcap_from_service_definition(service_def)
service_name = streamsx.topology.context._name_from_service_definition(service_def)
return StreamingAnalyticsConnection(vcap_services, service_name) | Create a connection to a Streaming Analytics service.
The single service is defined by `service_def` which can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": {...} }`` with the `service credentials` as the value of the ``credentials`` key.
Args:
service_def(dict): Definition of the service to connect to.
Returns:
StreamingAnalyticsConnection: Connection to defined service. | codesearchnet |
def delete(self, name, version, _lock=True):
link_path = self._link_path(name)
if _lock:
file_lock = _exclusive_lock(self._lock_path('links', name))
else:
file_lock = _no_lock()
with file_lock:
logger.debug('Acquired or inherited lock for link %s.', name)
if (not _path_exists(link_path)):
raise FiletrackerFileNotFoundError
if (_file_version(link_path) > version):
logger.info('Tried to delete newer version of %s (%d < %d), ignoring.', name, version, _file_version(link_path))
return False
digest = self._digest_for_link(name)
with _exclusive_lock(self._lock_path('blobs', digest)):
logger.debug('Acquired lock for blob %s.', digest)
should_delete_blob = False
with self._db_transaction() as txn:
logger.debug('Started DB transaction (deleting link).')
digest_bytes = digest.encode()
link_count = self.db.get(digest_bytes, txn=txn)
if (link_count is None):
raise RuntimeError('File exists but has no key in db')
link_count = int(link_count)
if (link_count == 1):
logger.debug('Deleting last link to blob %s.', digest)
self.db.delete(digest_bytes, txn=txn)
self.db.delete('{}:logical_size'.format(digest).encode(), txn=txn)
should_delete_blob = True
else:
new_count = str((link_count - 1)).encode()
self.db.put(digest_bytes, new_count, txn=txn)
logger.debug('Committing DB transaction (deleting link).')
logger.debug('Committed DB transaction (deleting link).')
os.unlink(link_path)
logger.debug('Deleted link %s.', name)
if should_delete_blob:
os.unlink(self._blob_path(digest))
logger.debug('Released lock for blob %s.', digest)
logger.debug('Released (or gave back) lock for link %s.', name)
return True | Removes a file from the storage.
Args:
name: name of the file being deleted.
May contain slashes that are treated as path separators.
version: file "version" that is meant to be deleted
If the file that is stored has newer version than provided,
it will not be deleted.
lock: whether or not to acquire locks
This is for internal use only,
normal users should always leave it set to True.
Returns whether or not the file has been deleted. | codesearchnet |
def _FormatOtherFileToken(self, token_data):
timestamp = (token_data.microseconds + (token_data.timestamp * definitions.MICROSECONDS_PER_SECOND))
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)
date_time_string = date_time.CopyToDateTimeString()
return {'string': token_data.name.rstrip('\x00'), 'timestamp': date_time_string} | Formats an other file token as a dictionary of values.
Args:
token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data.
Returns:
dict[str, str]: token values. | codesearchnet |
def GetLayerFromFeatureServiceByURL(self, url, layerName='', returnURLOnly=False):
fs = None
try:
fs = arcrest.agol.FeatureService(url=url, securityHandler=self._securityHandler)
return self.GetLayerFromFeatureService(fs=fs, layerName=layerName, returnURLOnly=returnURLOnly)
except:
(line, filename, synerror) = trace()
raise common.ArcRestHelperError({'function': 'GetLayerFromFeatureServiceByURL', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
fs = None
del fs
gc.collect() | Obtains a layer from a feature service by URL reference.
Args:
url (str): The URL of the feature service.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. | codesearchnet |
def decode(tokens):
token_is_alnum = [(t[0] in _ALPHANUMERIC_CHAR_SET) for t in tokens]
ret = []
for (i, token) in enumerate(tokens):
if ((i > 0) and token_is_alnum[(i - 1)] and token_is_alnum[i]):
ret.append(u' ')
ret.append(token)
return ''.join(ret) | Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string | codesearchnet |
def kill(container, rm=True):
container = get_container(container)
if (not container):
raise Exception(('No such container: %s' % container))
unbind_all(container['ip'])
sudo(('docker kill %s' % container['name']))
if rm:
sudo(('docker rm %s' % container['name'])) | Kill a container
Args:
* container: Container name or ID
* rm=True: Remove the container or not | codesearchnet |
def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y):
return image_transform(
X,
cv2.GaussianBlur,
ksize=(ksize_width, ksize_height),
sigmaX=sigma_x,
sigmaY=sigma_y
) | Apply Gaussian blur to the given data.
Args:
X: data to blur
kernel_size: Gaussian kernel size
stddev: Gaussian kernel standard deviation (in both X and Y directions) | juraj-google-style |
def write_config_json(config_file, data):
outfile = None
try:
with open(config_file, 'w') as outfile:
json.dump(data, outfile)
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "init_config_json",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
outfile = None
del outfile
gc.collect() | Serializes an object to disk.
Args:
config_file (str): The path on disk to save the file.
data (object): The object to serialize. | juraj-google-style |
def configure_vlan(self, vid, commands):
commands = make_iterable(commands)
commands.insert(0, 'vlan %s' % vid)
return self.configure(commands) | Configures the specified Vlan using commands
Args:
vid (str): The VLAN ID to configure
commands: The list of commands to configure
Returns:
True if the commands completed successfully | juraj-google-style |
def in_sorted(values, value):
index = bisect.bisect_left(values, value)
if (index >= len(values)):
return False
return (values[index] == value) | Checks if a value is in a sorted list.
Uses the :mod:`bisect` builtin to find the insertion point for
``value``.
Args:
values (List[int]): Integers sorted in ascending order.
value (int): Value to check if contained in ``values``.
Returns:
bool: Indicating if the value is contained. | codesearchnet |
def __init__(self, ones_prefactor):
super().__init__()
self.ones_prefactor = ones_prefactor | Initializes an AllOnes layer.
Args:
ones_prefactor: the scalar to emit when all ones is detected. | github-repos |
def power(maf=0.5, beta=0.1, N=100, cutoff=5e-08):
'\n\tstd(snp)=sqrt(2.0*maf*(1-maf)) \n\tpower = \\int \n\n\tbeta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp) \n\tE[beta_ML]\t= (snp^T*snp)^{-1}*snp^T*E[Y] \n\t\t\t\t= (snp^T*snp)^{-1}*snp^T*snp * beta\n\t\t\t\t= beta\n\tVar[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1}\n\t\t\t\t= (snp^T*snp)^{-1}\n\t\t\t\t= 1/N * var(snp)\n\t\t\t\t= 1/N * maf*(1-maf)\n\t'
assert ((maf >= 0.0) and (maf <= 0.5)), ('maf needs to be between 0.0 and 0.5, got %f' % maf)
if (beta < 0.0):
beta = (- beta)
std_beta = (1.0 / np.sqrt((N * ((2.0 * maf) * (1.0 - maf)))))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange((0.5 / (n_grid + 1.0)), ((n_grid - 0.5) / (n_grid + 1.0)), (1.0 / (n_grid + 1.0)))
beta_theoretical = ((st.norm.isf(beta_in) * std_beta) + non_centrality)
pvals = st.chi2.sf(((beta_theoretical / std_beta) * (beta_theoretical / std_beta)), 1.0)
power = (pvals < cutoff).mean()
return (power, pvals) | estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters | codesearchnet |
def export(self, path, session):
if self._graph is not tf_v1.get_default_graph():
raise RuntimeError("default graph differs from the graph where the "
"module was instantiated.")
if self._graph is not session.graph:
raise RuntimeError("session graph differs from the graph where the "
"module was instantiated.")
self._impl.export(path, session) | Exports the module with the variables from the session in `path`.
Note that it is the module definition in the ModuleSpec used to create this
module that gets exported. The session is only used to provide the value
of variables.
Args:
path: path where to export the module to.
session: session where to export the variables from.
Raises:
RuntimeError: if there is an issue during the export. | juraj-google-style |
def var(series):
if np.issubdtype(series.dtype, np.number):
return series.var()
else:
return np.nan | Returns the variance of values in a series.
Args:
series (pandas.Series): column to summarize. | juraj-google-style |
def filter_by(cls, **kwargs):
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q | Same as SQLAlchemy's filter_by. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter_by(email="new@x.com")
>>> shipments = Shipment.filter_by(country="India", limit=3, reverse=True) | juraj-google-style |
def match_globs(path, patterns):
for pattern in (p for p in patterns if p):
if pattern.startswith('/'):
regex = fnmatch.translate(pattern[1:])
temp_path = path[1:] if path.startswith('/') else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
elif fnmatch.fnmatch(path, pattern):
return True
return False | Test whether the given *path* matches any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the *path* matches any pattern in *patterns*. | juraj-google-style |
def get_gradient_components(self, value):
return value | Returns the components of `value` that should be included in gradients.
For a ResourceVariable, its gradient component is its handle tensor.
For now, we return the ResourceVariable because the gradient infrastructure
has special logic to handle ResourceVariables. We should remove the special
logic and return the handle tensor.
Args:
value: A `ResourceVariable`.
Returns:
`value` itself. | github-repos |
def _extract_units(self, obj, value):
if isinstance(value, dict):
if 'units' in value:
value = copy(value)
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
return value | Internal helper for dealing with units associated units properties
when setting values on |UnitsSpec| properties.
When ``value`` is a dict, this function may mutate the value of the
associated units property.
Args:
obj (HasProps) : instance to update units spec property value for
value (obj) : new value to set for the property
Returns:
copy of ``value``, with 'units' key and value removed when
applicable | juraj-google-style |
def run(self, path_or_tests, dot_env_path=None, mapping=None):
if validator.is_testcase_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif validator.is_testcases(path_or_tests):
return self.run_tests(path_or_tests)
else:
raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests)) | main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data | juraj-google-style |
def CoinFromRef(coin_ref, tx_output, state=CoinState.Unconfirmed, transaction=None):
coin = Coin(coin_reference=coin_ref, tx_output=tx_output, state=state)
coin._transaction = transaction
return coin | Get a Coin object using a CoinReference.
Args:
coin_ref (neo.Core.CoinReference): an object representing a single UTXO / transaction input.
tx_output (neo.Core.Transaction.TransactionOutput): an object representing a transaction output.
state (neo.Core.State.CoinState):
Returns:
Coin: self. | juraj-google-style |
def get_reduced_structure(self, reduction_algo="niggli"):
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True,
site_properties=self.site_properties, charge=self._charge)
else:
return self.copy() | Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL". | juraj-google-style |
def get_tensor_spec(batches):
from keras.src.utils.module_utils import tensorflow as tf
def get_single_tensor_spec(*tensors):
x = tensors[0]
rank = len(x.shape)
if rank < 1:
raise ValueError(f'When passing a dataset to a Keras model, the arrays must be at least rank 1. Received: {x} of rank {len(x.shape)}.')
for t in tensors:
if len(t.shape) != rank:
raise ValueError(f'When passing a dataset to a Keras model, the corresponding arrays in each batch must have the same rank. Received: {x} and {t}')
shape = []
for dims in zip(*[list(x.shape) for x in tensors]):
dims_set = set(dims)
shape.append(dims_set.pop() if len(dims_set) == 1 else None)
shape[0] = None
dtype = backend.standardize_dtype(x.dtype)
if isinstance(x, tf.RaggedTensor):
return tf.RaggedTensorSpec(shape=shape, dtype=dtype, ragged_rank=x.ragged_rank, row_splits_dtype=x.row_splits.dtype)
if isinstance(x, tf.SparseTensor) or is_scipy_sparse(x) or is_jax_sparse(x):
return tf.SparseTensorSpec(shape=shape, dtype=dtype)
else:
return tf.TensorSpec(shape=shape, dtype=dtype)
return tree.map_structure(get_single_tensor_spec, *batches) | Return the common tensor spec for a list of batches.
Args:
batches: list of structures of tensors. The structures must be
identical, but the shape at each leaf may be different.
Returns: the common tensor spec for all the batches. | github-repos |
def install(package_name):
holodeck_path = util.get_holodeck_path()
binary_website = "https:
if package_name not in packages:
raise HolodeckException("Unknown package name " + package_name)
package_url = packages[package_name]
print("Installing " + package_name + " at " + holodeck_path)
install_path = os.path.join(holodeck_path, "worlds")
binary_url = binary_website + util.get_os_key() + "_" + package_url
_download_binary(binary_url, install_path)
if os.name == "posix":
_make_binary_excecutable(package_name, install_path) | Installs a holodeck package.
Args:
package_name (str): The name of the package to install | juraj-google-style |
def get_include():
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include') | Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string. | github-repos |
def update_firmware(self, firmware_information, force=False):
firmware_uri = '{}/firmware'.format(self.data['uri'])
result = self._helper.update(firmware_information, firmware_uri, force=force)
self.refresh()
return result | Installs firmware to the member interconnects of a SAS Logical Interconnect.
Args:
firmware_information: Options to install firmware to a SAS Logical Interconnect.
force: If sets to true, the operation completes despite any problems with the network connectivy
or the erros on the resource itself.
Returns:
dict: SAS Logical Interconnect Firmware. | codesearchnet |
def constant_to_var(self, pyval, subst=None, node=None, source_sets=None, discard_concrete_values=False):
source_sets = source_sets or [[]]
node = node or self.ctx.root_node
kwargs = {'subst': subst, 'node': node, 'source_sets': source_sets, 'discard_concrete_values': discard_concrete_values}
def constant_to_value(new_pyval):
return self.constant_to_value(new_pyval, subst, node)
if isinstance(pyval, pytd.NothingType):
return self.ctx.program.NewVariable([], [], self.ctx.root_node)
elif isinstance(pyval, pytd.Alias):
return self.constant_to_var(pyval.type, **kwargs)
elif isinstance(pyval, abstract_utils.AsInstance):
cls = pyval.cls
if isinstance(pyval, abstract_utils.AsReturnValue) and isinstance(cls, pytd.NothingType):
return self.never.to_variable(node)
else:
return self.pytd_cls_to_instance_var(cls, **kwargs)
elif isinstance(pyval, pytd.Constant):
return self.pytd_cls_to_instance_var(pyval.type, **kwargs)
result = constant_to_value(pyval)
if result is not None:
return result.to_variable(node)
assert pyval.__class__ != cfg.Variable, pyval
if pyval.__class__ == tuple:
content = (self.constant_to_var(v, **kwargs) for v in pyval)
return self.build_tuple(self.ctx.root_node, content)
raise ValueError(f'Cannot convert {pyval.__class__} to an abstract value') | Convert a constant to a Variable.
This converts a constant to a cfg.Variable. Unlike constant_to_value, it
can handle things that need to be represented as a Variable with multiple
possible values (i.e., a union type), like pytd.Function.
Args:
pyval: The Python constant to convert. Can be a PyTD definition or a
builtin constant.
subst: The current type parameters.
node: The current CFG node. (For instances)
source_sets: An iterator over instances of SourceSet (or just tuples).
discard_concrete_values: Whether concrete values should be discarded from
type parameters.
Returns:
A cfg.Variable.
Raises:
TypeParameterError: if conversion is attempted on a type parameter without
a substitution.
ValueError: if pytype is not of a known type. | github-repos |
def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):
retries = 0
while retries < retry_count:
try:
response = request(
url=_METADATA_IP_ROOT, method='GET', headers=_METADATA_HEADERS,
timeout=timeout)
metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)
return (response.status == http_client.OK and
metadata_flavor == _METADATA_FLAVOR_VALUE)
except exceptions.TransportError:
_LOGGER.info('Compute Engine Metadata server unavailable on'
'attempt %s of %s', retries+1, retry_count)
retries += 1
return False | Checks to see if the metadata server is available.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
timeout (int): How long to wait for the metadata server to respond.
retry_count (int): How many times to attempt connecting to metadata
server using above timeout.
Returns:
bool: True if the metadata server is reachable, False otherwise. | juraj-google-style |
class Upsample(nn.Module):
def __init__(self, scale, num_features):
super().__init__()
self.scale = scale
if scale & scale - 1 == 0:
for i in range(int(math.log(scale, 2))):
self.add_module(f'convolution_{i}', nn.Conv2d(num_features, 4 * num_features, 3, 1, 1))
self.add_module(f'pixelshuffle_{i}', nn.PixelShuffle(2))
elif scale == 3:
self.convolution = nn.Conv2d(num_features, 9 * num_features, 3, 1, 1)
self.pixelshuffle = nn.PixelShuffle(3)
else:
raise ValueError(f'Scale {scale} is not supported. Supported scales: 2^n and 3.')
def forward(self, hidden_state):
if self.scale & self.scale - 1 == 0:
for i in range(int(math.log(self.scale, 2))):
hidden_state = self.__getattr__(f'convolution_{i}')(hidden_state)
hidden_state = self.__getattr__(f'pixelshuffle_{i}')(hidden_state)
elif self.scale == 3:
hidden_state = self.convolution(hidden_state)
hidden_state = self.pixelshuffle(hidden_state)
return hidden_state | Upsample module.
Args:
scale (`int`):
Scale factor. Supported scales: 2^n and 3.
num_features (`int`):
Channel number of intermediate features. | github-repos |
def copy(source, destination, ignore=None, adapter=None, fatal=True, logger=LOG.debug):
return _file_op(source, destination, _copy, adapter, fatal, logger, ignore=ignore) | Copy source -> destination
Args:
source (str | None): Source file or folder
destination (str | None): Destination file or folder
ignore (callable | list | str | None): Names to be ignored
adapter (callable | None): Optional function to call on 'source' before copy
fatal (bool | None): Abort execution on failure if True
logger (callable | None): Logger to use
Returns:
(int): 1 if effectively done, 0 if no-op, -1 on failure | juraj-google-style |
def restriction_coder(self):
return coders.registry.get_coder(object) | Returns a ``Coder`` for restrictions.
Returned``Coder`` will be used for the restrictions produced by the current
``RestrictionProvider``.
Returns:
an object of type ``Coder``. | github-repos |
def _save_tf1_model(self, sess: session.Session, saved_model_path: str, signature_key: str, tags: Collection[str], inputs: Mapping[str, core.Tensor], outputs: Mapping[str, core.Tensor], init_op: Optional[ops.Operation]=None, assets_collection: Optional[Sequence[core.Symbol]]=None) -> None:
v1_builder = builder.SavedModelBuilder(saved_model_path)
sig_def = signature_def_utils_impl.predict_signature_def(inputs=inputs, outputs=outputs)
v1_builder.add_meta_graph_and_variables(sess, tags, signature_def_map={signature_key: sig_def}, main_op=init_op, assets_collection=assets_collection)
v1_builder.save() | Saves a TF1 model.
Args:
sess: Current tf.Session object.
saved_model_path: Directory to save the model.
signature_key: The key to the SignatureDef that inputs & outputs
correspond to.
tags: Set of tags associated with the model.
inputs: Input name -> input tensor mapping.
outputs: Output name -> output tensor mapping.
init_op: Op for initialization.
assets_collection: Assets collection. This collection is a list of string
tensors. Each tensor contains the asset file names. | github-repos |
def register_app(self, app):
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self | Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes) | juraj-google-style |
def values(self):
all_values = [v.decode('utf-8') for (k, v) in self.rdb.hgetall(self.session_hash).items()]
return all_values | Returns a list of all values in the dictionary.
Returns:
list of str: [value1,value2,...,valueN] | codesearchnet |
def get_contract_state(self, contract_hash, id=None, endpoint=None):
return self._call_endpoint(GET_CONTRACT_STATE, params=[contract_hash], id=id, endpoint=endpoint) | Get a contract state object by its hash
Args:
contract_hash: (str) the hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def warn_logging(logger):
def showwarning(message, category, filename, lineno, file=None, line=None):
logger.warning(message)
return showwarning | Create a `showwarning` function that uses the given logger.
Arguments:
logger (~logging.Logger): the logger to use.
Returns:
function: a function that can be used as the `warnings.showwarning`
callback. | codesearchnet |
def new_panel(store, institute_id, panel_name, display_name, csv_lines):
institute_obj = store.institute(institute_id)
if institute_obj is None:
flash("{}: institute not found".format(institute_id))
return None
panel_obj = store.gene_panel(panel_name)
if panel_obj:
flash("panel already exists: {} - {}".format(panel_obj['panel_name'],
panel_obj['display_name']))
return None
log.debug("parse genes from CSV input")
try:
new_genes = parse_genes(csv_lines)
except SyntaxError as error:
flash(error.args[0], 'danger')
return None
log.debug("build new gene panel")
panel_id = None
try:
panel_data = build_panel(dict(
panel_name=panel_name,
institute=institute_obj['_id'],
version=1.0,
date=dt.datetime.now(),
display_name=display_name,
genes=new_genes,
), store)
panel_id= store.add_gene_panel(panel_data)
except Exception as err:
log.error('An error occurred while adding the gene panel {}'.format(err))
return panel_id | Create a new gene panel.
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
panel_name(str)
display_name(str)
csv_lines(iterable(str)): Stream with genes
Returns:
panel_id: the ID of the new panel document created or None | juraj-google-style |
def is_attribute_deprecated(self, attribute):
rule_set = self._attribute_rule_sets.get(attribute)
if rule_set.version_deprecated:
if (self._version >= rule_set.version_deprecated):
return True
else:
return False
else:
return False | Check if the attribute is deprecated by the current KMIP version.
Args:
attribute (string): The name of the attribute
(e.g., 'Unique Identifier'). Required. | codesearchnet |
def _get_command_and_argv(argv):
command_name = argv[0]
if (not command_name):
argv = argv[1:]
elif (command_name == settings.command):
argv.remove(command_name)
return (command_name, argv) | Extract the command name and arguments to pass to docopt.
Args:
argv: The argument list being used to run the command.
Returns:
A tuple containing the name of the command and the arguments to pass
to docopt. | codesearchnet |
def _batchNumpyGather(self, params, indices, axis, batch_dims):
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1) for i in range(params.shape[0])]) | Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array | github-repos |
def _get_setting(self, key, default_value=None, value_type=str):
try:
state_entry = self._state_view.get(SettingsView.setting_address(key))
except KeyError:
return default_value
if (state_entry is not None):
setting = Setting()
setting.ParseFromString(state_entry)
for setting_entry in setting.entries:
if (setting_entry.key == key):
return value_type(setting_entry.value)
return default_value | Get the setting stored at the given key.
Args:
key (str): the setting key
default_value (str, optional): The default value, if none is
found. Defaults to None.
value_type (function, optional): The type of a setting value.
Defaults to `str`.
Returns:
str: The value of the setting if found, default_value
otherwise. | codesearchnet |
def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
if not os.path.isabs(out_dvdb):
out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))
if self.verbose:
print("Will merge %d files into output DVDB %s" % (len(pot_files), out_dvdb))
for i, f in enumerate(pot_files):
print(" [%d] %s" % (i, f))
if len(pot_files) == 1:
with open(pot_files[0], "r") as inh, open(out_dvdb, "w") as out:
for line in inh:
out.write(line)
return out_dvdb
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgdvdb.stdin", "mrgdvdb.stdout", "mrgdvdb.stderr"])
inp = StringIO()
inp.write(out_dvdb + "\n")
inp.write(str(len(pot_files)) + "\n")
for fname in pot_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "wt") as fh:
fh.writelines(self.stdin_data)
fh.flush()
os.fsync(fh.fileno())
retcode = self.execute(workdir)
if retcode == 0 and delete_source:
for f in pot_files:
try:
os.remove(f)
except IOError:
pass
return out_dvdb | Merge POT files containing 1st order DFPT potential
return the absolute path of the new database in workdir.
Args:
delete_source: True if POT1 files should be removed after (successful) merge. | juraj-google-style |
def translate_file(
estimator, subtokenizer, input_file, output_file=None,
print_all_translations=True):
batch_size = _DECODE_BATCH_SIZE
sorted_inputs, sorted_keys = _get_sorted_inputs(input_file)
num_decode_batches = (len(sorted_inputs) - 1)
def input_generator():
for i, line in enumerate(sorted_inputs):
if i % batch_size == 0:
batch_num = (i
print("Decoding batch %d out of %d." % (batch_num, num_decode_batches))
yield _encode_and_add_eos(line, subtokenizer)
def input_fn():
ds = tf.data.Dataset.from_generator(
input_generator, tf.int64, tf.TensorShape([None]))
ds = ds.padded_batch(batch_size, [None])
return ds
translations = []
for i, prediction in enumerate(estimator.predict(input_fn)):
translation = _trim_and_decode(prediction["outputs"], subtokenizer)
translations.append(translation)
if print_all_translations:
print("Translating:")
print("\tInput: %s" % sorted_inputs[i])
print("\tOutput: %s\n" % translation)
print("=" * 100)
if output_file is not None:
if tf.gfile.IsDirectory(output_file):
raise ValueError("File output is a directory, will not save outputs to "
"file.")
tf.logging.info("Writing to file %s" % output_file)
with tf.gfile.Open(output_file, "w") as f:
for index in xrange(len(sorted_keys)):
f.write("%s\n" % translations[sorted_keys[index]]) | Translate lines in file, and save to output file if specified.
Args:
estimator: tf.Estimator used to generate the translations.
subtokenizer: Subtokenizer object for encoding and decoding source and
translated lines.
input_file: file containing lines to translate
output_file: file that stores the generated translations.
print_all_translations: If true, all translations are printed to stdout.
Raises:
ValueError: if output file is invalid. | juraj-google-style |
def giant_text_sqltype(dialect: Dialect) -> str:
if dialect.name == SqlaDialectName.SQLSERVER:
return 'NVARCHAR(MAX)'
elif dialect.name == SqlaDialectName.MYSQL:
return 'LONGTEXT'
else:
raise ValueError("Unknown dialect: {}".format(dialect.name)) | Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server. | juraj-google-style |
def walk(self, walker):
def walk_func(step):
for dep in self.graph.downstream(step.name):
if not dep.ok:
step.set_status(FailedStatus("dependency has failed"))
return step.ok
return step.run()
return self.graph.walk(walker, walk_func) | Walks each step in the underlying graph, in topological order.
Args:
walker (func): a walker function to be passed to
:class:`stacker.dag.DAG` to walk the graph. | juraj-google-style |
def pattern_from_collections_and_statement(data_collections, statement):
BaseCollection.are_collections_aligned(data_collections)
correct_var = BaseCollection._check_conditional_statement(statement, len(data_collections))
num_statement_clean = BaseCollection._replace_operators(statement)
pattern = []
for i in xrange(len(data_collections[0])):
num_statement = num_statement_clean
for (j, coll) in enumerate(data_collections):
var = correct_var[j]
num_statement = num_statement.replace(var, str(coll[i]))
num_statement = BaseCollection._restore_operators(num_statement)
pattern.append(eval(num_statement, {}))
return pattern | Generate a list of booleans from data collections and a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
pattern: A list of True/False booleans with the length of the
Data Collections where True meets the conditional statement
and False does not. | codesearchnet |
def run(self, instance):
last = instance
for item in self.stack:
if isinstance(item, str):
last = getattr(last, item)
else:
last = last(*item[0], **item[1])
self.stack = []
return last | Run the recorded chain of methods on `instance`.
Args:
instance: an object. | codesearchnet |
def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:
dataset_file_map = {}
for signature_def_key, repr_ds in representative_dataset.items():
if signature_def_key not in self.path_map:
raise ValueError(f'SignatureDef key does not exist in the provided path_map: {signature_def_key}')
dataset_file_map[signature_def_key] = self._save_tf_record_dataset(repr_ds, signature_def_key)
return dataset_file_map | Saves the representative dataset.
Args:
representative_dataset: Signature def key -> representative dataset
mapping. Each dataset is saved in a separate TFRecord file whose path
matches the signature def key of `path_map`.
Raises:
ValueError: When the signature def key in `representative_dataset` is not
present in the `path_map`.
Returns:
A map from signature key to the RepresentativeDatasetFile instance
contains the path to the saved file. | github-repos |
def absorption_coefficient(dielectric):
energies_in_eV = np.array(dielectric[0])
real_dielectric = parse_dielectric_data(dielectric[1])
imag_dielectric = parse_dielectric_data(dielectric[2])
epsilon_1 = np.mean(real_dielectric, axis=1)
epsilon_2 = np.mean(imag_dielectric, axis=1)
return (((((2.0 * np.sqrt(2.0)) * pi) * eV_to_recip_cm) * energies_in_eV) * np.sqrt(((- epsilon_1) + np.sqrt(((epsilon_1 ** 2) + (epsilon_2 ** 2)))))) | Calculate the optical absorption coefficient from an input set of
pymatgen vasprun dielectric constant data.
Args:
dielectric (list): A list containing the dielectric response function
in the pymatgen vasprun format.
| element 0: list of energies
| element 1: real dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.
| element 2: imaginary dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.
Returns:
(np.array): absorption coefficient using eV as frequency units (cm^-1).
Notes:
The absorption coefficient is calculated as
.. math:: \\alpha = \\frac{2\sqrt{2} \pi}{\lambda} \sqrt{-\epsilon_1+\sqrt{\epsilon_1^2+\epsilon_2^2}} | codesearchnet |
def delete_record(self, record):
self.children.remove(record.resource)
record.delete() | Remove a DNSRecord
Args:
record (:obj:`DNSRecord`): :obj:`DNSRecord` to remove
Returns:
`None` | juraj-google-style |
def wait_for_redis_to_start(redis_ip_address, redis_port, password=None, num_retries=5):
redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port, password=password)
counter = 0
while (counter < num_retries):
try:
logger.info('Waiting for redis server at {}:{} to respond...'.format(redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError:
time.sleep(1)
logger.info('Failed to connect to the redis server, retrying.')
counter += 1
else:
break
if (counter == num_retries):
raise Exception('Unable to connect to Redis. If the Redis instance is on a different machine, check that your firewall is configured properly.') | Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis. | codesearchnet |
def _update_trial_info(self, expr_dir):
trial_id = expr_dir[(- 8):]
meta_file = os.path.join(expr_dir, EXPR_META_FILE)
meta = parse_json(meta_file)
result_file = os.path.join(expr_dir, EXPR_RESULT_FILE)
offset = self._result_offsets.get(trial_id, 0)
(results, new_offset) = parse_multiple_json(result_file, offset)
self._add_results(results, trial_id)
self._result_offsets[trial_id] = new_offset
if meta:
TrialRecord.objects.filter(trial_id=trial_id).update(trial_status=meta['status'], end_time=timestamp2date(meta.get('end_time', None)))
elif (len(results) > 0):
metrics = {'episode_reward': results[(- 1)].get('episode_reward_mean', None), 'accuracy': results[(- 1)].get('mean_accuracy', None), 'loss': results[(- 1)].get('loss', None)}
if results[(- 1)].get('done'):
TrialRecord.objects.filter(trial_id=trial_id).update(trial_status='TERMINATED', end_time=results[(- 1)].get('date', None), metrics=str(metrics))
else:
TrialRecord.objects.filter(trial_id=trial_id).update(metrics=str(metrics)) | Update information for given trial.
Meta file will be loaded if exists, and the trial information
in db backend will be updated.
Args:
expr_dir(str) | codesearchnet |
def vector_projection(v1, v2):
return ((scalar_projection(v1, v2) * v2) / np.linalg.norm(v2)) | compute the vector projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v | codesearchnet |
def imflip(img, direction='horizontal'):
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return np.flip(img, axis=1)
else:
return np.flip(img, axis=0) | Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image. | juraj-google-style |
def _maybe_download_corpora(tmp_dir):
mnli_filename = 'MNLI.zip'
mnli_finalpath = os.path.join(tmp_dir, 'MNLI')
if (not tf.gfile.Exists(mnli_finalpath)):
zip_filepath = generator_utils.maybe_download(tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath | Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string | codesearchnet |
def universal_transformer_with_gru_as_transition_function(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
(state, unused_inputs, unused_memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')
assert (not hparams.add_step_timing_signal)
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(transition_function_input, hparams)
with tf.variable_scope('gru'):
transition_function_update_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='update', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)
tf.contrib.summary.scalar('gru_update_gate', tf.reduce_mean(transition_function_update_gate))
transition_function_reset_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='reset', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)
tf.contrib.summary.scalar('gru_reset_gate', tf.reduce_mean(transition_function_reset_gate))
reset_state = (transition_function_reset_gate * state)
transition_function_candidate = _ffn_layer_multi_inputs([transition_function_input, reset_state], hparams, name='candidate', bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False)
transition_function_output = (((1 - transition_function_update_gate) * transition_function_input) + (transition_function_update_gate * transition_function_candidate))
transition_function_output = common_layers.layer_preprocess(transition_function_output, hparams)
return (transition_function_output, unused_inputs, unused_memory) | Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used | codesearchnet |
def _df_index_name(df):
if df.index.name:
return df.index.name
elif df.index.names:
try:
return '_'.join(df.index.names)
except TypeError:
return 'index'
else:
return 'index' | Return the Bokeh-appropriate column name for a ``DataFrame`` index
If there is no named index, then `"index" is returned.
If there is a single named index, then ``df.index.name`` is returned.
If there is a multi-index, and the index names are all strings, then
the names are joined with '_' and the result is returned, e.g. for a
multi-index ``['ind1', 'ind2']`` the result will be "ind1_ind2".
Otherwise if any index name is not a string, the fallback name "index"
is returned.
Args:
df (DataFrame) : the ``DataFrame`` to find an index name for
Returns:
str | codesearchnet |
def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):
for d in directories:
if d.startswith(os.path.expanduser('~')) and not os.path.exists(d):
os.makedirs(d)
possible_path = os.path.join(d, rel_path)
if os.path.exists(possible_path):
loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug)
if loaded is not None:
return (possible_path, cls.load_yaml_by_path(possible_path))
return None | Load a yaml file with path that is relative to one of given directories.
Args:
directories: list of directories to search
name: relative path of the yaml file to load
log_debug: log all messages as debug
Returns:
tuple (fullpath, loaded yaml structure) or None if not found | juraj-google-style |
def to_maildir(self, flags: Iterable[Union[bytes, Flag]]) -> str:
codes = []
for flag in flags:
if isinstance(flag, bytes):
flag = Flag(flag)
from_sys = self._from_sys.get(flag)
if from_sys is not None:
codes.append(from_sys)
else:
from_kwd = self._from_kwd.get(flag)
if from_kwd is not None:
codes.append(from_kwd)
return ''.join(codes) | Return the string of letter codes that are used to map to defined
IMAP flags and keywords.
Args:
flags: The flags and keywords to map. | juraj-google-style |
def _get_client_fqdn(self, client_info_contents):
yamldict = yaml.safe_load(client_info_contents)
fqdn = yamldict['system_info']['fqdn']
client_id = yamldict['client_id'].split('/')[1]
return client_id, fqdn | Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN. | juraj-google-style |
def load_from_xml(self, path):
with open(os.path.expanduser(path), 'r') as ifile:
et = ElementTree.parse(ifile)
root = et.getroot()
all_objects = {}
for child in root:
obj_type = self.__getattribute__(child.tag)
objects = [obj_type(obj) for obj in child]
all_objects[child.tag] = JSSObjectList(self.factory, None, objects)
return all_objects | Load all objects from XML file and return as dict.
The dict returned will have keys named the same as the
JSSObject classes contained, and the values will be
JSSObjectLists of all full objects of that class (for example,
the equivalent of my_jss.Computer().retrieve_all()).
This method can potentially take a very long time!
Args:
path: String file path to the file you wish to load from.
Path will have ~ expanded prior to opening. | codesearchnet |
def PlistValueToPlainValue(plist):
if isinstance(plist, dict):
ret_value = dict()
for key, value in iteritems(plist):
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist | Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type. | juraj-google-style |
def read(path, encoding='utf-8'):
try:
with io.open(path, encoding=encoding) as f:
return f.read()
except Exception as e:
logger.error('read: %s failed. Error: %s', path, e)
return '' | Read the content of the file.
Args:
path (str): Path to the file
encoding (str): File encoding. Default: utf-8
Returns:
str: File content or empty string if there was an error | codesearchnet |
def _create_slots(self, table: 'TableConfig', variable_creator: Callable[[Text, init_ops_v2.Initializer], tf_variables.Variable], initializer_wrapper: Optional[Callable[[str, init_ops_v2.Initializer], init_ops_v2.Initializer]]=None) -> Dict[Text, tf_variables.Variable]:
names = self._slot_names()
initializers = self._slot_initializers()
if initializer_wrapper is not None:
initializers = [initializer_wrapper(name, initializer) for name, initializer in zip(names, initializers)]
if self.slot_variable_creation_fn is not None:
return self.slot_variable_creation_fn(table, names, initializers)
else:
slots = {}
for slot, initializer in zip(names, initializers):
slots[slot] = variable_creator(slot, initializer)
return slots | Creates slot variables for table.
Args:
table: The table variable to create slots for.
variable_creator: A function which creates variables. Takes parameters
'name', 'initializer'.
initializer_wrapper: A function that wraps the initializer.
Returns:
A dict of variables, keyed by self._slot_names(). | github-repos |
def _pack_with_custom_ops(dataset, keys, length):
from tensor2tensor.data_generators.ops import pack_sequences_ops
if len(keys) == 1:
k1, = keys
k2 = k1
elif len(keys) == 2:
k1, k2 = keys
else:
raise ValueError("must have 1 or 2 keys")
def map_fn_custom(x):
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
}
if len(keys) == 2:
packed.update({
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
})
return packed
dataset = dataset.map(map_fn_custom,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices)
return dataset | Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 1 or 2)
length: an integer
Returns:
a dataset. | juraj-google-style |
def get_unfrozen_copy(values):
if isinstance(values, (frozendict, dict)):
return {key: get_unfrozen_copy(value) for (key, value) in values.items()}
elif isinstance(values, (list, tuple)):
return [get_unfrozen_copy(value) for value in values]
return values | Recursively convert `value`'s tuple values into lists, and frozendicts into dicts.
Args:
values (frozendict/tuple): the frozendict/tuple.
Returns:
values (dict/list): the unfrozen copy. | codesearchnet |
def has_kwargs(fn):
if isinstance(fn, functools.partial):
fn = fn.func
elif _is_callable_object(fn):
fn = fn.__call__
elif not callable(fn):
raise TypeError(f'Argument `fn` should be a callable. Received: fn={fn} (of type {type(fn)})')
return tf_inspect.getfullargspec(fn).varkw is not None | Returns whether the passed callable has **kwargs in its signature.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`bool`: if `fn` has **kwargs in its signature.
Raises:
`TypeError`: If fn is not a Function, or function-like object. | github-repos |
def IsPathSuffix(mod_path, path):
return (mod_path.endswith(path) and
(len(mod_path) == len(path) or
mod_path[:-len(path)].endswith(os.sep))) | Checks whether path is a full path suffix of mod_path.
Args:
mod_path: Must be an absolute path to a source file. Must not have
file extension.
path: A relative path. Must not have file extension.
Returns:
True if path is a full path suffix of mod_path. False otherwise. | juraj-google-style |
def raster_statistics(raster_file):
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
(minv, maxv, meanv, std) = band.ComputeStatistics(False)
return (minv, maxv, meanv, std) | Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std. | codesearchnet |
def _rewrite_insert(self, sql, params, return_id=False):
returning = (self.qn(self.query.model._meta.pk.attname) if return_id else '*')
if (self.query.conflict_action.value == 'UPDATE'):
return self._rewrite_insert_update(sql, params, returning)
elif (self.query.conflict_action.value == 'NOTHING'):
return self._rewrite_insert_nothing(sql, params, returning)
raise SuspiciousOperation(('%s is not a valid conflict action, specify ConflictAction.UPDATE or ConflictAction.NOTHING.' % str(self.query.conflict_action))) | Rewrites a formed SQL INSERT query to include
the ON CONFLICT clause.
Arguments:
sql:
The SQL INSERT query to rewrite.
params:
The parameters passed to the query.
returning:
What to put in the `RETURNING` clause
of the resulting query.
Returns:
A tuple of the rewritten SQL query and new params. | codesearchnet |
def l2_regression_sq_loss(y, target, name=None):
with tf.name_scope(name, 'l2_regression_sq', [y, target]) as scope:
y = tf.convert_to_tensor(y, name='y')
target = tf.convert_to_tensor(target, name='target')
return reduce_batch_sum(tf.square((y - target)), name=scope) | Calculates the sum of squared errors between y and target.
Args:
y: the calculated values.
target: the desired values.
name: the name for this op, defaults to l2_regression
Returns:
A tensorflow op. | codesearchnet |
def ParseFileObject(self, parser_mediator, file_object):
scca_file = pyscca.file()
try:
scca_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning('unable to open file with error: {0!s}'.format(exception))
return
format_version = scca_file.format_version
executable_filename = scca_file.executable_filename
prefetch_hash = scca_file.prefetch_hash
run_count = scca_file.run_count
number_of_volumes = scca_file.number_of_volumes
volume_serial_numbers = []
volume_device_paths = []
path = ''
for volume_information in iter(scca_file.volumes):
volume_serial_number = volume_information.serial_number
volume_device_path = volume_information.device_path
volume_serial_numbers.append(volume_serial_number)
volume_device_paths.append(volume_device_path)
timestamp = volume_information.get_creation_time_as_integer()
if timestamp:
event_data = windows_events.WindowsVolumeEventData()
event_data.device_path = volume_device_path
event_data.origin = parser_mediator.GetFilename()
event_data.serial_number = volume_serial_number
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for filename in iter(scca_file.filenames):
if (not filename):
continue
if (filename.startswith(volume_device_path) and filename.endswith(executable_filename)):
(_, _, path) = filename.partition(volume_device_path)
mapped_files = []
for (entry_index, file_metrics) in enumerate(scca_file.file_metrics_entries):
mapped_file_string = file_metrics.filename
if (not mapped_file_string):
parser_mediator.ProduceExtractionWarning('missing filename for file metrics entry: {0:d}'.format(entry_index))
continue
file_reference = file_metrics.file_reference
if file_reference:
mapped_file_string = '{0:s} [MFT entry: {1:d}, sequence: {2:d}]'.format(mapped_file_string, (file_reference & 281474976710655), (file_reference >> 48))
mapped_files.append(mapped_file_string)
event_data = WinPrefetchExecutionEventData()
event_data.executable = executable_filename
event_data.mapped_files = mapped_files
event_data.number_of_volumes = number_of_volumes
event_data.path = path
event_data.prefetch_hash = prefetch_hash
event_data.run_count = run_count
event_data.version = format_version
event_data.volume_device_paths = volume_device_paths
event_data.volume_serial_numbers = volume_serial_numbers
timestamp = scca_file.get_last_run_time_as_integer(0)
if (not timestamp):
parser_mediator.ProduceExtractionWarning('missing last run time')
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if (format_version >= 26):
for last_run_time_index in range(1, 8):
timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)
if (not timestamp):
continue
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
date_time_description = 'Previous {0:s}'.format(definitions.TIME_DESCRIPTION_LAST_RUN)
event = time_events.DateTimeValuesEvent(date_time, date_time_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
scca_file.close() | Parses a Windows Prefetch file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object. | codesearchnet |
def dr( self, cell_lengths ):
half_cell_lengths = cell_lengths / 2.0
this_dr = self.final_site.r - self.initial_site.r
for i in range( 3 ):
if this_dr[ i ] > half_cell_lengths[ i ]:
this_dr[ i ] -= cell_lengths[ i ]
if this_dr[ i ] < -half_cell_lengths[ i ]:
this_dr[ i ] += cell_lengths[ i ]
return this_dr | Particle displacement vector for this jump
Args:
cell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell.
Returns
(np.array(x,y,z)): dr | juraj-google-style |
def _num_elements(self) -> NoReturn:
raise NotImplementedError() | Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor | github-repos |
def guass(self, mu: float, sigma: float) -> float:
return float(
lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma)
) | Return a random number using Gaussian distribution.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.