content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_available_tf_versions(include_prerelease: bool = False) -> List[str]:
"""Return available Terraform versions."""
tf_releases = json.loads(
requests.get("https://releases.hashicorp.com/index.json").text
)["terraform"]
tf_versions = sorted(
[k for k, _v in tf_releases["versions"].items()], # descending
key=LooseVersion,
reverse=True,
)
if include_prerelease:
return [i for i in tf_versions if i]
return [i for i in tf_versions if i and "-" not in i] | 29,600 |
def relay_tagged(c, x, tag):
"""Implementation of tagged for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, None)
return rtag(c.ref(x)) | 29,601 |
def get_vf(Xf, Nf):
"""
compute the 1-spectrogram of the projection of a frequency band of the mix at 1 frequency on some directions
:param Xf: T x I complex STFT of mix at a given f
:param Nf: Mp x Md x I projection matrix
:return: Vf: Mp x Ml x Nt magnitude spectrogram of projection
"""
Vf = np.tensordot(Nf, Xf, axes=(-1, 1))
Vf = np.abs(Vf)
return Vf | 29,602 |
def gen_data_code(stream, bits=ic.core_opts.data_bits):
# type: (ic.Stream, int) -> dict
"""
Create a similarity preserving ISCC Data-Code with the latest standard algorithm.
:param Stream stream: Input data stream.
:param int bits: Bit-length of ISCC Data-Code (default 64).
:return: ISCC Data-Code
:rtype: dict
"""
return gen_data_code_v0(stream, bits) | 29,603 |
def file_get_size_in_bytes(path: str) -> int:
"""Return the size of the file in bytes."""
return int(os.stat(path).st_size) | 29,604 |
def model_fn_builder(
bert_config,
init_checkpoint,
layer_indexes,
use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
model = modeling.BertModel(
config= bert_config,
is_training= False,
input_ids= features['input_ids'],
input_mask= features['input_mask'],
token_type_ids= features['input_type_ids'],
use_one_hot_embeddings= use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT: raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {"unique_id": features["unique_ids"]}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode= mode,
predictions= predictions,
scaffold_fn= scaffold_fn)
return output_spec
return model_fn | 29,605 |
def save_labeled_tree(args):
""" A tree is stored as (sent: str, spans: list, labels, list, tags: list).
"""
with open(args.ifile, "r") as fr, \
open(args.ofile, "w") as fw:
for tree in fr:
tree = tree.strip()
action = get_actions(tree)
tags, sent, sent_lower = get_tags_tokens_lowercase(tree)
gold_span, binary_actions = get_nonbinary_spans_label(action)
tags = tags
sent = ' '.join(sent)
spans = [(a, b) for a, b, _ in gold_span]
labels = [l for _, _, l in gold_span]
data = (sent, spans, labels, tags)
json.dump(data, fw)
fw.write("\n") | 29,606 |
def git2pep440(ver_str):
"""
Converts a git description to a PEP440 conforming string
:param ver_str: git version description
:return: PEP440 version description
"""
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + "+dirty"
elif dash_count == 2:
tag, commits, sha1 = ver_str.split('-')
return "{}.post0.dev{}+{}".format(tag, commits, sha1)
elif dash_count == 3:
tag, commits, sha1, _ = ver_str.split('-')
return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1)
else:
raise RuntimeError("Invalid version string") | 29,607 |
def rejoin(hyphenated, line):
"""Add hyphenated word part to line start, dehyphenating when required."""
first_part, hyphen = split_hyphen(hyphenated)
second_part, rest = split_first_token(line)
if is_same_vowel(first_part[-1], second_part[0]):
# same vowel before and after hyphen
keep_hyphen = True
elif not (first_part[-1].isalpha() and second_part[0].isalpha()):
# only join alphabetic with alphabetic char
keep_hyphen = True
elif not (is_regular_word(first_part) and
is_regular_word(second_part)):
# one or both are not "regular" words
keep_hyphen = True
elif is_proper_noun(second_part):
# Keep hyphen for proper noun compounds. Don't check first
# part as start-of-sentence capitalization may confound the
# capitalization pattern.
keep_hyphen = True
else:
keep_hyphen = False
if keep_hyphen:
rejoined = first_part + hyphen + second_part
else:
rejoined = first_part + second_part
return rejoined + rest | 29,608 |
def msg_bytes(msg: Union[bytes, bytearray, zmq.Frame]) -> Union[bytes, bytearray]:
"""Return message frame as bytes.
"""
return msg.bytes if isinstance(msg, zmq.Frame) else msg | 29,609 |
def test_build_symmetry_operations():
"""Check that all the symmetry operations are built correctly."""
one = np.identity(4)
two = np.zeros((4, 4))
two[1, 0] = 1.0
two[0, 1] = 1.0
two[2, 2] = 1.0
two[3, 3] = 1.0
three = np.zeros((4, 4))
three[0, 0] = 1.0
three[1, 1] = 1.0
three[3, 2] = 1.0
three[2, 3] = 1.0
ref = [[one, 1.0, False], [two, 1.0, True], [three, 1.0, True]]
test = [
[[1, 2, 3, 4], 1.0, False],
[[2, 1, 3, 4], 1.0, True],
[[1, 2, 4, 3], 1.0, True],
]
tensor_utils.build_symmetry_operations(test)
for val in range(3):
assert ref[val][0].all() == test[val][0].all() | 29,610 |
def abort(msg):
""" Prints the given error message and aborts the program with a return
code of 1.
"""
click.secho(msg, fg='red')
sys.exit(1) | 29,611 |
def iucr_string(values):
"""Convert a central value (average) and its s.u. into an IUCr compliant number representation.
:param values: pair of central value (average) and s.u.
:type values: tuple((float, float))
:return: IUCr compliant representation
:rtype: str
"""
if values[1] == 0 or values[1] is None: # No or zero s.u. given
return str(values[0])
sig_pos = math.floor(math.log10(abs(values[1]))) # position of first significant digit
sig_3 = math.trunc(abs(values[1]) * 10 ** (2 - sig_pos)) / 10 ** (2 - sig_pos) # 1st three significant s.u. digits
sig_3 *= 10 ** -(sig_pos + 1) # s.u. moved directly behind decimal separator (final range: 0.100-0.999)
if sig_3 < 0.195: # round to two digits (final s.u. range: 0.10-0.19)
su = round(abs(values[1]), 1 - sig_pos)
avg = round(values[0], 1 - sig_pos)
sig_len = 2
elif sig_3 < 0.950: # round to one digit (final s.u. range: 0.2-0.9)
su = round(abs(values[1]), -sig_pos)
avg = round(values[0], -sig_pos)
sig_len = 1
else: # round to two digits and move forward (final s.u.: 0.10)
sig_pos += 1
su = round(abs(values[1]), 1 - sig_pos)
avg = round(values[0], 1 - sig_pos)
sig_len = 2
if sig_pos > 0: # only integral part for s.u. >= 1.95
sign_shift = -1 if values[0] < 0 else 0
avg_str = ('{:' + str(sig_pos + sign_shift) + '.0f}').format(avg).strip()
su_str = ('{:' + str(sig_pos) + '.0f}').format(su)
else: # fractional and possibly integral part for s.u. < 1.95
avg_str = ('{:.' + str(-sig_pos + sig_len - 1) + 'f}').format(avg)
su_str = '{:.0f}'.format(abs(su / 10 ** (sig_pos - sig_len + 1)))
return '{:s}({:s})'.format(avg_str, su_str) | 29,612 |
def augment_signals(ds, augment_configs):
"""
Apply all augmentation methods specified in 'augment_config' and return a dataset where all elements are drawn randomly from the augmented and unaugmented datasets.
"""
augmented_datasets = []
for conf in augment_configs:
aug_kwargs = {k: v for k, v in conf.items() if k not in {"type", "split"}}
if conf["type"] == "random_resampling":
augmented_datasets.append(augment_by_random_resampling(ds, **aug_kwargs))
elif conf["type"] == "additive_noise":
augmented_datasets.append(augment_by_additive_noise(ds, **aug_kwargs))
else:
logger.warning("Unknown signal augmentation type '%s', skipping", conf["type"])
# Sample randomly from the unaugmented dataset and all augmented datasets
return tf.data.experimental.sample_from_datasets([ds] + augmented_datasets) | 29,613 |
def sse_pack(d):
"""For sending sse to client. Formats a dictionary into correct form for SSE"""
buf = ''
for k in ['retry','id','event','data']:
if k in d.keys():
buf += '{}: {}\n'.format(k, d[k])
return buf + '\n' | 29,614 |
def proj_beta_model(r2d_kpc, n0, r_c, beta):
"""
Compute a projected beta model:
P(R) = \int n_e dl at given R
Parameters
----------
- r2d_kpc: array of projected radius at which to compute integration
- n0 : normalization
- r_c : core radius parameter
- beta : slope of the profile
Outputs
--------
- The projected profile in units of kpc times original profile
"""
return np.sqrt(np.pi) * n0 * r_c * gamma(1.5*beta - 0.5) / gamma(1.5*beta) * (1 + (r2d_kpc/r_c)**2)**(0.5-1.5*beta) | 29,615 |
def read_raw_datafile(filename):
"""
Read and format the weather data from one csv file downloaded from the
climate.weather.gc.ca website.
"""
dataset = pd.read_csv(filename, dtype='str')
valid_columns = [
'Date/Time', 'Year', 'Month', 'Day', 'Max Temp (°C)', 'Min Temp (°C)',
'Mean Temp (°C)', 'Total Precip (mm)']
dataset['Date/Time'] = pd.to_datetime(
dataset['Date/Time'], format="%Y-%m-%d")
dataset = (
dataset
.drop(labels=[c for c in dataset.columns if c not in valid_columns],
axis=1)
.set_index('Date/Time', drop=True)
)
return dataset | 29,616 |
def zeros(shape, backend=TensorFunctions):
"""
Produce a zero tensor of size `shape`.
Args:
shape (tuple): shape of tensor
backend (:class:`Backend`): tensor backend
Returns:
:class:`Tensor` : new tensor
"""
return Tensor.make([0] * int(operators.prod(shape)), shape, backend=backend) | 29,617 |
def delete(name: str):
"""Remove a Maestral configuration."""
if name not in list_configs():
click.echo("Configuration '{}' could not be found.".format(name))
else:
from maestral.config.base import get_conf_path
for file in os.listdir(get_conf_path("maestral")):
if file.startswith(name):
os.unlink(os.path.join(get_conf_path("maestral"), file))
click.echo("Deleted configuration '{}'.".format(name)) | 29,618 |
def formatLabels(labels, total_time, time):
"""Format labels into vector where each value represents a window of
time seconds"""
time_threshold = 1
num_windows = total_time // time
Y = np.zeros(num_windows)
for label in labels:
start = label['start']
duration = label['duration']
end = start + duration
start_window = int(round(start / time))
end_window = int(round(end / time))
if end_window > start_window:
window_limit = (start_window + 1) * 30
if window_limit - start <= time_threshold:
start_window += 1
if end - window_limit <= time_threshold:
end_window -= 1
Y[start_window:end_window + 1] = 1
print("{} arousals".format(len(labels)))
return Y | 29,619 |
def line(line_def, **kwargs):
"""Highlights a character in the line"""
def replace(s):
return "(%s)" % ansi.aformat(s.group()[1:], attrs=["bold", ])
return ansi.aformat(
re.sub('@.?', replace, line_def),
**kwargs) | 29,620 |
def ceil_datetime_at_minute_interval(timestamp, minute):
"""
From http://stackoverflow.com/questions/13071384/python-ceil-a-datetime-to-next-quarter-of-an-hour
:param timestamp:
:type timestamp: datetime.datetime
:param minute:
:type minute: int
:return:
:rtype: datetime.datetime
"""
# how many secs have passed this hour
nsecs = timestamp.minute * 60 + timestamp.second + timestamp.microsecond * 1e-6
# number of seconds to next minute mark
seconds = minute * 60
delta = (nsecs // seconds) * seconds + seconds - nsecs
if delta < seconds:
return timestamp + datetime.timedelta(seconds=delta)
else:
return timestamp | 29,621 |
def iterate_fasta(fasta, database = 'data/gsaid.db'):
"""
Function to iterate through aligned fasta file *non threaded*
:param cursor: sqlite database handler
:param fasta: file containing sequences
:param ref: file containing reference sequence, default-> NC_04552.fa
"""
handle = convert_fasta(open(fasta))
cursor, conn = open_connection(database)
for header, seq in handle:
insert_seq(cursor, header, seq)
conn.commit()
conn.close() | 29,622 |
def make_response(
activated=True,
expires_in=0,
auto_activation_supported=True,
oauth_server=None,
DATA=None,
):
"""
Helper for making ActivationRequirementsResponses with known fields
"""
DATA = DATA or []
data = {
"activated": activated,
"expires_in": expires_in,
"oauth_server": oauth_server,
"DATA": DATA,
"auto_activation_supported": auto_activation_supported,
}
response = requests.Response()
response.headers["Content-Type"] = "application/json"
response._content = json.dumps(data).encode("utf-8")
return ActivationRequirementsResponse(
GlobusHTTPResponse(response, client=mock.Mock())
) | 29,623 |
def post_create_manager_config(
api_client,
id,
log_files=None,
configuration_files=None,
ports=None,
process_manager=None,
executables=None,
**kwargs
): # noqa: E501
"""post_create_manager_config # noqa: E501
Create a new plugin manager configuration. If no params are provided,
a vanilla example configuration will be created. See docs for param specs:
https://docs.cohesive.net/docs/network-edge-plugins/plugin-manager/ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.post_create_manager_config(client, id, async_req=True)
:param int id: ID for Plugin instance (running container) (required)
:param log_files list: List of Log File objects
:param configuration_files list: List of Configuration File objects
:param ports list: List of Port objects
:param process_manager Dict: Process Manager object
:param executables list: List of Executable objects
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"log_files",
"configuration_files",
"ports",
"process_manager",
"executables",
]
collection_formats = {}
path_params = {"id": id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/plugin-instances/{id}/manager",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 29,624 |
def get_long_desc() -> str:
""" read long description and adjust master with version for badges or links
only for release versions (x.y.z)
"""
get_version()
release = re.compile(r'(\d+\.){0,2}\d+$')
with open(os.path.join(wd, 'README.md')) as fd:
if _version == '0.0.0' or not release.match(_version):
_long_description = fd.read()
else:
lines = fd.readlines()
for i, line in enumerate(lines):
if not line.startswith('['):
break
if 'travis' in line:
lines[i] = line.replace('master', _version)
elif 'codecov' in line:
commit = get_commit()
if commit != '':
lines[i] = line.replace('branch/master',
'commit/' + commit)
_long_description = ''.join(lines)
return _long_description | 29,625 |
def get_current_user():
"""Load current user or use anon user."""
return auth.User(
uuid=None,
login='anon',
password='',
name='anon',
visiblity=None,
language=None,
last_seen=None,
) | 29,626 |
def validate(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ*'):
"""Check whether the check digit is valid."""
try:
valid = checksum(number, alphabet) == 1
except Exception: # noqa: B902
raise InvalidFormat()
if not valid:
raise InvalidChecksum()
return number | 29,627 |
def pct_chg(x: Union[np.ndarray, pd.Series]) -> np.ndarray:
"""Percentage change between the current and a prior element.
Args:
x: A numpy.ndarray or pandas.Series object
Returns:
A numpy.ndarray with the results
"""
x = x.astype("float64")
if isinstance(x, pd.DataFrame):
pc = x.pct_change().values.reshape(-1, 1)
else:
x = np.reshape(x, (-1,))
x_df = pd.Series(x, name="x")
pc = x_df.pct_change().values.reshape(-1, 1)
return pc | 29,628 |
def calc_gram_matrix(input_mat):
"""
Paper directly mentions about calculating Gram matrix:
G_{ij}^l = \sum_k F_{ij}^l F_{jk}^l
i and j stand for filter position and k stands for position in each filters.
If matrix A is composed of vectors, a1, a2, a3, etc,
e.g. A = [a1, a2, a3, ...] note that a1, a2, a3 are column vecdtors
then Gram matrix G can be calculated as $G = A^T cdot A$
inputs:
It takes input shape of [1, height, width, channel]
returns:
[1, channel, channel, 1]
"""
channel_size = input_mat.shape[-1]
# From [1, height, width, channel] to [1, height * width, channel]
vectorized_input = tf.reshape(input_mat, [1, -1, channel_size])
# Transform it to shape of [channel, height * width]
mat_2d = vectorized_input[0, :, :]
F = tf.transpose(mat_2d)
# Calculate gram matrix
gram_mat = tf.linalg.matmul(F, mat_2d) # this produce the shape of [channel, channel]
feature_map_size = input_mat.shape[1] * input_mat.shape[2]
return gram_mat / feature_map_size | 29,629 |
def parse_annotations_with_food_part_template(annotations, premise):
""" """
annotations_aggregated = []
annotations_reported = []
rows_grouped_by_premises = annotations[annotations["premise"] == premise]
for hypothesis in rows_grouped_by_premises["hypothesis"].unique():
rows_grouped_by_hypotheses = rows_grouped_by_premises[
rows_grouped_by_premises["hypothesis"] == hypothesis
]
ners = rows_grouped_by_hypotheses[f"correct_ner"]
n_entail = len(ners[ners == "Entails"])
n_non_entail = len(ners[ners == "Not Entails/Error"])
# Report to the annotator if annotation results are highly
# non consensus.
if n_entail == n_non_entail:
annotations_reported.append(
[rows_grouped_by_hypotheses.iloc[0]["id"], premise, hypothesis]
)
continue
correct_ner = "Entails" if n_entail > n_non_entail else "Not Entails"
if rows_grouped_by_hypotheses["premise"].values[0][:3] == "<s>":
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0][
3:
]
else:
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0]
id_ = rows_grouped_by_hypotheses.iloc[0]["id"]
food = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food"]
food_id = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_id"]
food_part = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_part"]
food_part_id = None
chemical = rows_grouped_by_hypotheses.iloc[0]["hypothesis_chemical"]
chemical_id = rows_grouped_by_hypotheses.iloc[0][
"hypothesis_chemical_id"
]
conc_value = None
conc_value_id = None
conc_unit = None
conc_unit_id = None
annotations_aggregated.append(
[
id_,
premise_filtered,
hypothesis.replace("(whole plant)", ""),
correct_ner,
n_entail,
n_non_entail,
food,
food_id,
food_part,
food_part_id,
chemical,
chemical_id,
conc_value,
conc_value_id,
conc_unit,
conc_unit_id,
]
)
return annotations_aggregated, annotations_reported | 29,630 |
def filter_dictionary(dictionary, filter_func):
"""
returns the first element of `dictionary` where the element's key pass the filter_func.
filter_func can be either a callable or a value.
- if callable filtering is checked with `test(element_value)`
- if value filtering is checked with `element_value == filter_func`
:param dictionary:
:param test:
:return:
>>> filter_dictionary({'arg': 'test'}, 'test')
'arg'
>>> filter_dictionary({}, 'test')
>>> def is_test(value):
... return value == 'test'
>>> filter_dictionary({'arg': 'test'}, is_test)
'arg'
"""
if not callable(filter_func):
test_func = lambda x: x == filter_func
else:
test_func = filter_func
for key, value in dictionary.iteritems():
if test_func(value):
return key | 29,631 |
def restart_workflow(workflow_id, clear_data=False, delete_files=False):
"""Restart a workflow with the latest spec.
Clear data allows user to restart the workflow without previous data."""
workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first()
WorkflowProcessor.reset(workflow_model, clear_data=clear_data, delete_files=delete_files)
return get_workflow(workflow_model.id) | 29,632 |
def create_template_alias(AwsAccountId=None, TemplateId=None, AliasName=None, TemplateVersionNumber=None):
"""
Creates a template alias for a template.
See also: AWS API Documentation
Exceptions
:example: response = client.create_template_alias(
AwsAccountId='string',
TemplateId='string',
AliasName='string',
TemplateVersionNumber=123
)
:type AwsAccountId: string
:param AwsAccountId: [REQUIRED]\nThe ID of the AWS account that contains the template that you creating an alias for.\n
:type TemplateId: string
:param TemplateId: [REQUIRED]\nAn ID for the template.\n
:type AliasName: string
:param AliasName: [REQUIRED]\nThe name that you want to give to the template alias that you\'re creating. Don\'t start the alias name with the $ character. Alias names that start with $ are reserved by QuickSight.\n
:type TemplateVersionNumber: integer
:param TemplateVersionNumber: [REQUIRED]\nThe version number of the template.\n
:rtype: dict
ReturnsResponse Syntax
{
'TemplateAlias': {
'AliasName': 'string',
'Arn': 'string',
'TemplateVersionNumber': 123
},
'Status': 123,
'RequestId': 'string'
}
Response Structure
(dict) --
TemplateAlias (dict) --
Information about the template alias.
AliasName (string) --
The display name of the template alias.
Arn (string) --
The Amazon Resource Name (ARN) of the template alias.
TemplateVersionNumber (integer) --
The version number of the template alias.
Status (integer) --
The HTTP status of the request.
RequestId (string) --
The AWS request ID for this operation.
Exceptions
QuickSight.Client.exceptions.ThrottlingException
QuickSight.Client.exceptions.ResourceNotFoundException
QuickSight.Client.exceptions.ConflictException
QuickSight.Client.exceptions.ResourceExistsException
QuickSight.Client.exceptions.LimitExceededException
QuickSight.Client.exceptions.UnsupportedUserEditionException
QuickSight.Client.exceptions.InternalFailureException
:return: {
'TemplateAlias': {
'AliasName': 'string',
'Arn': 'string',
'TemplateVersionNumber': 123
},
'Status': 123,
'RequestId': 'string'
}
:returns:
QuickSight.Client.exceptions.ThrottlingException
QuickSight.Client.exceptions.ResourceNotFoundException
QuickSight.Client.exceptions.ConflictException
QuickSight.Client.exceptions.ResourceExistsException
QuickSight.Client.exceptions.LimitExceededException
QuickSight.Client.exceptions.UnsupportedUserEditionException
QuickSight.Client.exceptions.InternalFailureException
"""
pass | 29,633 |
def spike_lmax(S, Q):
"""Maximum spike given a perturbation"""
S2 = S * S
return ((1.0 / Q) + S2) * (1 + (1.0 / S2)) | 29,634 |
def get_engine():
"""Returns the db engine."""
if not hasattr(g, 'sqlite_engine'):
g.sqlite_engine = create_engine('sqlite:///' + app.config['DATABASE'], echo=True)
return g.sqlite_engine | 29,635 |
def is_on_curve(point):
"""Returns True if the given point lies on the elliptic curve."""
if point is None:
# None represents the point at infinity.
return True
x, y = point
return (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0 | 29,636 |
def find_closest_point(
odlc: Dict[str, float],
boundary_points: List[Dict[str, float]],
obstacles: List[Dict[str, float]],
) -> Tuple[Dict[str, float], List[float]]:
"""Finds the closest safe point to the ODLC while staying within the flight boundary
Parameters
----------
odlc : Dict[str, float]
Point data for the ODLC object
boundary_points : List[Dict[str, float]]
Point data which makes up the flight boundary
obstacles : List[Dict[str, float]]
Point data for the obstacles
Returns
-------
Tuple[Dict[str, float], List[float]]
Closest safe point, and the shrunken boundary (for plotting)
"""
poly_points = [(point["utm_x"], point["utm_y"]) for point in boundary_points]
boundary_shape = Polygon(poly_points)
odlc_shape = Point(odlc["utm_x"], odlc["utm_y"])
for obstacle in obstacles:
# create obstacle as shapely shape
circle = Point(obstacle["utm_x"], obstacle["utm_y"]).buffer(obstacle["radius"]).boundary
obstacle_shape = Polygon(circle)
# remove obstacle area from boundary polygon
boundary_shape = boundary_shape.difference(obstacle_shape)
# scale down boundary by 1% to add a safety margin
boundary_shape = scale_polygon(boundary_shape, 0.01)
p_1, _ = nearest_points(
boundary_shape, odlc_shape
) # point returned in same order as input shapes
closest_point = p_1
zone_number = odlc["utm_zone_number"]
zone_letter = odlc["utm_zone_letter"]
return (
{
"utm_x": closest_point.x,
"utm_y": closest_point.y,
"utm_zone_number": zone_number,
"utm_zone_letter": zone_letter,
"latitude": utm.to_latlon(closest_point.x, closest_point.y, zone_number, zone_letter)[
0
],
"longitude": utm.to_latlon(closest_point.x, closest_point.y, zone_number, zone_letter)[
1
],
},
list(zip(*boundary_shape.exterior.coords.xy)), # pylint: disable=maybe-no-member
) | 29,637 |
def test_T1():
"""
>>> as_input(T1)
>>> main()
5.5
""" | 29,638 |
def _get_band_edge_indices(
band_structure: BandStructure,
tol: float = 0.005,
) -> Tuple[Dict[Spin, List[int]], Dict[Spin, List[int]]]:
"""
Get indices of degenerate band edge states, within a tolerance.
Parameters
----------
band_structure : BandStructure
A band structure.
tol : float
Degeneracy tolerance in meV.
"""
vbm_energy = band_structure.get_vbm()["energy"]
cbm_energy = band_structure.get_cbm()["energy"]
vbm_band_indices = {}
cbm_band_indices = {}
for spin, spin_energies in band_structure.bands.items():
vb_idxs = np.where(
np.any(
(spin_energies > vbm_energy - tol)
& (spin_energies < band_structure.efermi),
axis=1,
)
)[0]
cb_idxs = np.where(
np.any(
(spin_energies < cbm_energy + tol)
& (spin_energies > band_structure.efermi),
axis=1,
)
)[0]
vbm_band_indices[spin] = vb_idxs.tolist()
cbm_band_indices[spin] = cb_idxs.tolist()
return vbm_band_indices, cbm_band_indices | 29,639 |
def spectra(args):
"""subroutine for spectra subcommand
"""
vcf = cyvcf2.VCF(args.vcf, gts012=True)
if args.population:
spectra_data = Counter()
for variant in vcf:
spectra_data[variant.INFO['mutation_type']] += 1
spectra = pd.DataFrame(spectra_data,
['population']).reindex(sorted(spectra_data),
axis='columns')
try:
print(spectra.to_csv(sep='\t', index=False))
except BrokenPipeError:
pass
else:
spectra_data = defaultdict(lambda: np.zeros_like(vcf.samples,
dtype=int))
if args.randomize:
for variant in vcf:
random_haplotype = choice([x for x, y in enumerate(variant.gt_types)
for _ in range(y)])
spectra_data[variant.INFO['mutation_type']][random_haplotype] += 1.
else:
for variant in vcf:
if variant.ploidy == 1:
# haploid ALT are coded as 2 (homozygous ALT)
variant.gt_types[variant.gt_types == 2] = 1
spectra_data[variant.INFO['mutation_type']] += variant.gt_types
spectra = pd.DataFrame(spectra_data,
vcf.samples).reindex(sorted(spectra_data),
axis='columns')
try:
print(spectra.to_csv(sep='\t', index=True,
index_label='sample'))
except BrokenPipeError:
pass | 29,640 |
def make_val_dataloader(data_config, data_path, task=None, data_strct=None):
""" Return a data loader for a validation set """
if not "val_data" in data_config or data_config["val_data"] is None:
print_rank("Validation data list is not set", loglevel=logging.DEBUG)
return None
loader_type = detect_loader_type(data_config["val_data"], data_config["loader_type"])
if loader_type == 'text':
TextDataLoader = get_exp_dataloader(task)
val_dataloader = TextDataLoader(
data = data_strct if data_strct is not None else os.path.join(data_path, data_config["val_data"]),
user_idx = 0,
mode = 'val',
args=data_config
)
else:
raise NotImplementedError("Not supported loader_type={} audio_format={}".format(loader_type, data_config["audio_format"]))
return val_dataloader | 29,641 |
def _get_urls():
"""Stores the URLs for histology file downloads.
Returns
-------
dict
Dictionary with template names as keys and urls to the files as values.
"""
return {
"fsaverage": "https://box.bic.mni.mcgill.ca/s/znBp7Emls0mMW1a/download",
"fsaverage5": "https://box.bic.mni.mcgill.ca/s/N8zstvuRb4sNcSe/download",
"fs_LR_64k": "https://box.bic.mni.mcgill.ca/s/6zKHcg9xXu5inPR/download",
} | 29,642 |
def guid_bytes_to_string(stream):
"""
Read a byte stream to parse as GUID
:ivar bytes stream: GUID in raw mode
:returns: GUID as a string
:rtype: str
"""
Data1 = struct.unpack("<I", stream[0:4])[0]
Data2 = struct.unpack("<H", stream[4:6])[0]
Data3 = struct.unpack("<H", stream[6:8])[0]
Data4 = stream[8:16]
return "%08x-%04x-%04x-%s-%s" % (Data1, Data2, Data3, "".join("%02x" % x for x in Data4[0:2]), "".join("%02x" % x for x in Data4[2:])) | 29,643 |
def test_explained_inertia_decreases(mca):
"""Check the explained inertia decreases."""
assert test_util.is_sorted(mca.explained_inertia) | 29,644 |
def thaiword_to_time(text: str, padding: bool = True) -> str:
"""
Convert Thai time in words into time (H:M).
:param str text: Thai time in words
:param bool padding: Zero padding the hour if True
:return: time string
:rtype: str
:Example:
thaiword_to_time"บ่ายโมงครึ่ง")
# output:
# 13:30
"""
keys_dict = list(_DICT_THAI_TIME.keys())
text = text.replace("กว่า", "").replace("ๆ", "").replace(" ", "")
_i = ["ตีหนึ่ง", "ตีสอง", "ตีสาม", "ตีสี่", "ตีห้า"]
_time = ""
for affix in _THAI_TIME_AFFIX:
if affix in text and affix != "ตี":
_time = text.replace(affix, affix + "|")
break
elif affix in text and affix == "ตี":
for j in _i:
if j in text:
_time = text.replace(j, j + "|")
break
else:
pass
if "|" not in _time:
raise ValueError("Cannot find any Thai word for time affix.")
_LIST_THAI_TIME = _time.split("|")
del _time
hour = _THAI_TIME_CUT.word_tokenize(_LIST_THAI_TIME[0])
minute = _LIST_THAI_TIME[1]
if len(minute) > 1:
minute = _THAI_TIME_CUT.word_tokenize(minute)
else:
minute = 0
text = ""
# determine hour
if hour[-1] == "นาฬิกา" and hour[0] in keys_dict and hour[:-1]:
text += str(thaiword_to_num("".join(hour[:-1])))
elif hour[0] == "ตี" and hour[1] in keys_dict:
text += str(_DICT_THAI_TIME[hour[1]])
elif hour[-1] == "โมงเช้า" and hour[0] in keys_dict:
if _DICT_THAI_TIME[hour[0]] < 6:
text += str(_DICT_THAI_TIME[hour[0]] + 6)
else:
text += str(_DICT_THAI_TIME[hour[0]])
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] == "บ่าย":
text += str(_DICT_THAI_TIME[hour[1]] + 12)
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] in keys_dict:
text += str(_DICT_THAI_TIME[hour[0]] + 12)
elif hour[-1] == "เที่ยงคืน":
text += "0"
elif hour[-1] == "เที่ยงวัน" or hour[-1] == "เที่ยง":
text += "12"
elif hour[0] == "บ่ายโมง":
text += "13"
elif hour[-1] == "ทุ่ม":
if len(hour) == 1:
text += "19"
else:
text += str(_DICT_THAI_TIME[hour[0]] + 18)
if not text:
raise ValueError("Cannot find any Thai word for hour.")
if padding and len(text) == 1:
text = "0" + text
text += ":"
# determine minute
if minute:
n = 0
for affix in minute:
if affix in keys_dict:
if affix != "สิบ":
n += _DICT_THAI_TIME[affix]
elif affix == "สิบ" and n != 0:
n *= 10
elif affix == "สิบ" and n == 0:
n += 10
if n != 0 and n > 9:
text += str(n)
else:
text += "0" + str(n)
else:
text += "00"
return text | 29,645 |
def _build_index_mappings(name, data_prefix, documents, sizes,
num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes, seq_length)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (not os.path.isfile(doc_idx_filename)) or \
(not os.path.isfile(sample_idx_filename)) or \
(not os.path.isfile(shuffle_idx_filename)):
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
# from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
# sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(' > loading doc-idx mapping from {}'.format(
doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading sample-idx mapping from {}'.format(
sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading shuffle-idx mapping from {}'.format(
shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx | 29,646 |
def get_grains(ng,gdmin,angrange0,angrange1,two_dim):
"""
Get specified number of grains with conditions of minimum distance and angle range.
"""
dang = (angrange1-angrange0) /180.0 *np.pi /ng
grains= []
ig = 0
dmin = 1e+30
while True:
if ig >= ng: break
pi= np.zeros((3,))
ai= np.zeros((3,))
pi[0]= random()
pi[1]= random()
if two_dim:
too_close = False
dminl = 1e+30
for i,gi in enumerate(grains):
dlt = pi - gi.point
dlt = dlt - np.round(dlt)
d = np.sqrt( dlt[0]**2+dlt[1]**2 )
dminl = min(d,dminl)
if d < gdmin:
too_close = True
break
if too_close:
continue
dmin = min(dminl,dmin)
pi[2]= 0.0
ai[0]= 0.0
ai[1]= 0.0
ai[2]= angrange0 +dang*ig +random()*dang
else:
pi[2]= random()
too_close = False
for gi in grains:
dlt = pi - gi.point
dlt = dlt - np.round(dlt)
d = np.sqrt( dlt[0]**2+dlt[1]**2+dlt[2]**2 )
if d < gdmin:
too_close
break
if too_close:
continue
ai[0]= random()*np.pi*2 -np.pi
ai[1]= random()*np.pi/2 -np.pi/2
ai[2]= random()*np.pi*2 -np.pi
print(' point,angle =',pi,ai)
gi= Grain(pi,ai)
grains.append(gi)
ig += 1
print(' Minimum distance between grains and limit = ',dmin,gdmin)
return grains | 29,647 |
def two_loops(N: List[int]) -> List[int]:
"""Semi-dynamic programming approach using O(2n):
- Calculate the product of all items before item i
- Calculate the product of all items after item i
- For each item i, multiply the products for before and after i
L[i] = N[i-1] * L[i-1] if i != 0 else 1
R[j] = N[j+1] * R[j+1] if j != (len(N) - 1) else 1
A[i] = L[i] * R[i]
N[0] = 3
N[1] = 7
N[2] = 1
N[3] = 4
N[4] = 8
N[5] = 9
L[0] = 1 = 1
L[1] = (1) * 3 = 3
L[2] = (3) * 7 = 21
L[3] = (21) * 1 = 21
L[4] = (21) * 4 = 84
L[5] = (84) * 8 = 672
R[5] = 1 = 1
R[4] = (1) * 9 = 9
R[3] = (9) * 8 = 72
R[2] = (72) * 4 = 288
R[1] = (288) * 1 = 288
R[0] = (288) * 7 = 2016
A = [L[0]*R[0], L[1]*R[1], L[2]*R[2], L[3]*R[3], L[4]*R[4], L[5]*R[5]]
A = [2016, 864, 6048, 1512, 756, 672]
"""
items_len = len(N)
of_left = [1 for _ in range(items_len)]
of_right = [1 for _ in range(items_len)]
for i in range(items_len):
j = (items_len - 1) - i # Invert i; start counting from len(N) to 0.
of_left[i] = N[i-1] * of_left[i-1] if i != 0 else 1
of_right[j] = N[j+1] * of_right[j+1] if i != 0 else 1
return list(map(lambda p: p[0] * p[1], zip(of_left, of_right))) | 29,648 |
def make_slurm_queue(dirmain, print_level=0):
"""get queue list from slurm """
# Check slurm
list_ids = []
list_scripts = []
usr = os.environ.get('USER')
proc = subprocess.run(['squeue', "-u", usr, "-O", "jobid:.50,name:.150,stdout:.200"], capture_output=True)
all_info_user = proc.stdout.decode('utf-8').split('\n')
all_info_user = [x for x in all_info_user if x != '']
if print_level == 2:
print("Number of Slurm tasks running:", len(all_info_user) - 1)
for i in range(1, len(all_info_user)):
line_id = all_info_user[i][:50].strip()
line_bashname = all_info_user[i][50:200].strip()
line_jobdir = all_info_user[i][200:].strip()
line_jobdir = os.path.realpath(line_jobdir)
# Check bash name plus directory via slurm output (requires slurm submit as above)
if (os.path.exists(os.path.join(dirmain, line_bashname)) and os.path.basename(dirmain) == os.path.basename(
os.path.dirname(line_jobdir))):
list_ids.append(line_id)
list_scripts.append(line_bashname)
if print_level >= 3:
print("ID: ", line_id, ", Script: ", line_bashname)
if print_level == 2:
print("Number of Slurms tasks running for this directory:", len(list_scripts))
return list_ids, list_scripts | 29,649 |
def test_undeclared_always_write():
"""Test error raised if always write specified that isn't in PROP_TYPES."""
with pytest.raises(XDLUndeclaredAlwaysWriteError):
TestUndeclaredAlwaysWrite() | 29,650 |
def in_test_directory():
"""
Change to a temporary empty directory for testing purposes.
"""
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
try:
yield
finally:
os.chdir(old_cwd)
shutil.rmtree(tempdir, onerror=_remove_readonly) | 29,651 |
def read_renku_version_from_dockerfile(path: Union[Path, str]) -> Optional[str]:
"""Read RENKU_VERSION from the content of path if a valid version is available."""
path = Path(path)
if not path.exists():
return
docker_content = path.read_text()
m = re.search(r"^\s*ARG RENKU_VERSION=(.+)$", docker_content, flags=re.MULTILINE)
if not m:
return
try:
return str(Version(m.group(1)))
except ValueError:
return | 29,652 |
def run_package():
""" Entry point for running a Kedro project packaged with `kedro package` using `python -m <project_package>.run` command """
project_context = load_package_context(project_path=Path.cwd(), package_name=Path(__file__).resolve().parent.name)
project_context.run() | 29,653 |
def zero_mean(framed):
"""Calculate zero-mean of frames"""
mean = np.mean(framed, axis=1)
framed = framed - mean[np.newaxis, :].T
return framed | 29,654 |
def now():
"""
返回当前时间
"""
return timezone.now() | 29,655 |
def calc_thickness_of_wing(XFOILdirectory, chordArray2):
"""
calculation wing thickness list
"""
# open airfoil data
data = io_fpa.open2read(os.path.join("data", XFOILdirectory, "foil.dat"))
# make airfoil list
xlist = [float(i.split()[0]) for i in data[1:]]
ylist = [float(i.split()[1]) for i in data[1:]]
# divide upper and lower
zeropoint = None
for i in range(len(xlist)):
if xlist[i] == ylist[i]:
zeropoint = i
upperx = np.array(xlist[:zeropoint+1])[::-1]
uppery = np.array(ylist[:zeropoint+1])[::-1]
lowerx = np.array(xlist[zeropoint:])
lowery = np.array(ylist[zeropoint:])
# interpolate uppwer and lower file in order to be able to different yposition of both upper and lower
linear_interp_upper = interp1d(upperx, uppery)
linear_interp_lower = interp1d(lowerx, lowery)
xx = np.linspace(0., 1., 100)
newylower = linear_interp_lower(xx)
newyupper = linear_interp_upper(xx)
thickness = newyupper - newylower
maxthickness = max(thickness)
# make thickness list of span direction
thickness = [i * maxthickness for i in chordArray2]
return thickness
# plt.plot(self.yy, self.thickness)
# plt.savefig(self.dirname + "/" + "thickness") | 29,656 |
def scholarly_init_connection():
"""
Bind TorRequest to Scholarly service
Parameters
----------
No arguments
Returns
-------
Nothing
"""
while True:
# assign new tor identity
ips = assign_new_ip(text=True)
# use the tor request for scholarly
tor_req = scholarly.use_tor(tor_sock_port=9050, \
tor_control_port=9051, \
tor_pw="scholarly_password")
if tor_req:
# come out of the loop, when successful
break
# print the tor identity
print("Working Tor identity:", ips[1]) | 29,657 |
def rgb_to_hex(r, g, b):
"""Turn an RGB float tuple into a hex code.
Args:
r (float): R value
g (float): G value
b (float): B value
Returns:
str: A hex code (no #)
"""
r_int = round((r + 1.0) / 2 * 255)
g_int = round((g + 1.0) / 2 * 255)
b_int = round((b + 1.0) / 2 * 255)
r_txt = "%02x" % r_int
b_txt = "%02x" % b_int
g_txt = "%02x" % g_int
return r_txt + g_txt + b_txt | 29,658 |
def test_run_string() -> None:
"""Valida run() called with a single string command."""
cmd = "echo 111 && >&2 echo 222"
old_result = subprocess.run(
cmd,
shell=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
)
result = run(cmd)
assert result.returncode == old_result.returncode
assert result.stdout == old_result.stdout
assert result.stderr == old_result.stderr | 29,659 |
def calc_R(x,y, xc, yc):
"""
calculate the distance of each 2D points from the center (xc, yc)
"""
return np.sqrt((x-xc)**2 + (y-yc)**2) | 29,660 |
def get_followers_list(user_url, driver, followers=True):
"""
Returns a list of users who follow or are followed by a user.
Parameters
----------
user_url: string
driver: selenium.webdriver
followers: bool
If True, gets users who are followers of this user.
If False, gets users who this user follows.
"""
if followers:
url = user_url + '/followers/'
else:
url = user_url + '/following/'
process = lambda soup: [
str(item.find_all('a',
{'class': 'userWrapper'})[0].get('href'))
for item in soup.select('div.item')
]
followers = process_whole_page(driver, url, process)
return followers | 29,661 |
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description="Normalize dumped raw features (See detail in parallel_wavegan/bin/normalize.py)."
)
parser.add_argument(
"--metadata",
type=str,
required=True,
help="directory including feature files to be normalized. "
"you need to specify either *-scp or rootdir.")
parser.add_argument(
"--dumpdir",
type=str,
required=True,
help="directory to dump normalized feature files.")
parser.add_argument(
"--feats-stats",
type=str,
required=True,
help="speech statistics file.")
parser.add_argument(
"--skip-wav-copy",
default=False,
action="store_true",
help="whether to skip the copy of wav files.")
parser.add_argument(
"--phones-dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--speaker-dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
"--add-blank",
type=str2bool,
default=True,
help="whether to add blank between phones")
args = parser.parse_args()
dumpdir = Path(args.dumpdir).expanduser()
# use absolute path
dumpdir = dumpdir.resolve()
dumpdir.mkdir(parents=True, exist_ok=True)
# get dataset
with jsonlines.open(args.metadata, 'r') as reader:
metadata = list(reader)
dataset = DataTable(
metadata,
converters={
"feats": np.load,
"wave": None if args.skip_wav_copy else np.load,
})
logging.info(f"The number of files = {len(dataset)}.")
# restore scaler
feats_scaler = StandardScaler()
feats_scaler.mean_ = np.load(args.feats_stats)[0]
feats_scaler.scale_ = np.load(args.feats_stats)[1]
feats_scaler.n_features_in_ = feats_scaler.mean_.shape[0]
vocab_phones = {}
with open(args.phones_dict, 'rt') as f:
phn_id = [line.strip().split() for line in f.readlines()]
for phn, id in phn_id:
vocab_phones[phn] = int(id)
vocab_speaker = {}
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
for spk, id in spk_id:
vocab_speaker[spk] = int(id)
# process each file
output_metadata = []
for item in tqdm(dataset):
utt_id = item['utt_id']
feats = item['feats']
wave = item['wave']
# normalize
feats = feats_scaler.transform(feats)
feats_path = dumpdir / f"{utt_id}_feats.npy"
np.save(feats_path, feats.astype(np.float32), allow_pickle=False)
if not args.skip_wav_copy:
wav_path = dumpdir / f"{utt_id}_wave.npy"
np.save(wav_path, wave.astype(np.float32), allow_pickle=False)
else:
wav_path = wave
phones = item['phones']
text_lengths = item['text_lengths']
if args.add_blank:
phones = add_blank(phones, filed="character")
text_lengths = len(phones)
phone_ids = [vocab_phones[p] for p in phones]
spk_id = vocab_speaker[item["speaker"]]
record = {
"utt_id": item['utt_id'],
"text": phone_ids,
"text_lengths": text_lengths,
'feats': str(feats_path),
"feats_lengths": item['feats_lengths'],
"wave": str(wav_path),
"spk_id": spk_id,
}
# add spk_emb for voice cloning
if "spk_emb" in item:
record["spk_emb"] = str(item["spk_emb"])
output_metadata.append(record)
output_metadata.sort(key=itemgetter('utt_id'))
output_metadata_path = Path(args.dumpdir) / "metadata.jsonl"
with jsonlines.open(output_metadata_path, 'w') as writer:
for item in output_metadata:
writer.write(item)
logging.info(f"metadata dumped into {output_metadata_path}") | 29,662 |
def test_tag():
"""Test that the tag attribute works correctly."""
class DummyModel(Model):
"""The simplest instance of Model possible."""
x = DummyModel()
x.tag['foo']['bar'] = 5
assert len(x.tag.keys()) == 1
assert len(x.tag['foo'].keys()) == 1
assert x.tag['foo']['bar'] == 5
assert 'bar' not in x.tag
x.tag['bar']['baz'] = 3
assert 'bar' in x.tag
assert 'baz' in x.tag['bar']
assert len(x.tag.keys()) == 2 | 29,663 |
def test():
"""
The empty string becomes a2582a3a0e66e6e86e3812dcb672a272.
AoC 2017 becomes 33efeb34ea91902bb2f59c9920caa6cd.
1,2,3 becomes 3efbe78a8d82f29979031a4aa0b16a9d.
1,2,4 becomes 63960835bcdc130f0b66d7ff4f6a5a8e.
"""
tests = {
'': 'a2582a3a0e66e6e86e3812dcb672a272',
'AoC 2017': '33efeb34ea91902bb2f59c9920caa6cd',
'1,2,3': '3efbe78a8d82f29979031a4aa0b16a9d',
'1,2,4': '63960835bcdc130f0b66d7ff4f6a5a8e'
}
for inval, correct in tests.items():
hashval = run(inval)
print hashval
print correct
assert hashval == correct
print '' | 29,664 |
def policy_to_dict(player_policy,
game,
all_states=None,
state_to_information_state=None,
player_id: Optional = None):
"""Converts a Policy instance into a tabular policy represented as a dict.
This is compatible with the C++ TabularExploitability code (i.e.
pyspiel.exploitability, pyspiel.TabularBestResponse, etc.).
While you do not have to pass the all_states and state_to_information_state
arguments, creating them outside of this funciton will speed your code up
dramatically.
Args:
player_policy: The policy you want to convert to a dict.
game: The game the policy is for.
all_states: The result of calling get_all_states.get_all_states. Can be
cached for improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Can be cached for
improved performance.
Returns:
A dictionary version of player_policy that can be passed to the C++
TabularBestResponse, Exploitability, and BestResponse functions/classes.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
state_to_information_state = {
state: str(
np.asarray(all_states[state].information_state_tensor(), dtype=np.float32).tolist()) for
state in all_states
}
tabular_policy = dict()
for state in all_states:
if player_id is not None and all_states[state].current_player() != player_id:
continue
information_state = state_to_information_state[state]
tabular_policy[information_state] = list(
player_policy.action_probabilities(all_states[state]).items())
return tabular_policy | 29,665 |
def compressed_gw(Dist1,Dist2,p1,p2,node_subset1,node_subset2, verbose = False, return_dense = True):
"""
In:
Dist1, Dist2 --- distance matrices of size nxn and mxm
p1,p2 --- probability vectors of length n and m
node_subset1, node_subset2 --- subsets of point indices. This version of the qGW code
specifically uses Voronoi partitions from fixed subsets
(usually these are chosen randomly). Other partitioning schems
are possible, but not currently implemented here.
verbose --- print status and compute times
return_dense --- some parts of the algorithm use sparse matrices. If 'False' a sparse matrix is returned.
Out:
full_coup --- coupling matrix of size nxm giving a probabilistic correspondence between metric spaces.
"""
# Compress Graphs
start = time.time()
if verbose:
print('Compressing Graphs...')
coup1, p_compressed1 = compress_graph_from_subset(Dist1,p1,node_subset1)
coup2, p_compressed2 = compress_graph_from_subset(Dist2,p2,node_subset2)
Dist_new1, p_new1 = compress_graph(Dist1,p_compressed1)
Dist_new2, p_new2 = compress_graph(Dist2,p_compressed2)
if verbose:
print('Time for Compressing:', time.time() - start)
# Match compressed graphs
start = time.time()
if verbose:
print('Matching Compressed Graphs...')
coup_compressed, log = gwa.gromov_wasserstein(Dist_new1, Dist_new2, p_new1, p_new2)
if verbose:
print('Time for Matching Compressed:', time.time() - start)
# Find submatchings and create full coupling
if verbose:
print('Matching Subgraphs and Constructing Coupling...')
supp1 = find_support(p_compressed1)
supp2 = find_support(p_compressed2)
full_coup = coo_matrix((Dist1.shape[0], Dist2.shape[0]))
matching_time = 0
matching_and_expanding_time = 0
num_local_matches = 0
for (i_enum, i) in enumerate(supp1):
subgraph_i = find_support(coup1[:,i])
for (j_enum, j) in enumerate(supp2):
start = time.time()
w_ij = coup_compressed[i_enum,j_enum]
if w_ij > 1e-10:
num_local_matches += 1
subgraph_j = find_support(coup2[:,j])
# Compute submatching
coup_sub_ij = find_submatching_locally_linear(Dist1,Dist2,coup1,coup2,i,j)
matching_time += time.time()-start
# Expand to correct size
idx = np.argwhere(coup_sub_ij > 1e-10)
idx_i = idx.T[0]
idx_j = idx.T[1]
row = np.array(subgraph_i)[idx_i]
col = np.array(subgraph_j)[idx_j]
data = w_ij*np.array([coup_sub_ij[p[0],p[1]] for p in list(idx)])
expanded_coup_sub_ij = coo_matrix((data, (row,col)), shape=(full_coup.shape[0], full_coup.shape[1]))
# Update full coupling
full_coup += expanded_coup_sub_ij
matching_and_expanding_time += time.time()-start
if verbose:
print('Total Time for',num_local_matches,'local matches:')
print('Local matching:', matching_time)
print('Local Matching Plus Expansion:', matching_and_expanding_time)
if return_dense:
return full_coup.toarray()
else:
return full_coup | 29,666 |
def force_gather(*pargs):
"""Like force_ask(), except more insistent. In addition to making a
single attempt to ask a question that offers to define the variable,
it enlists the process_action() function to seek the definition of
the variable. The process_action() function will keep trying to define
the variable until it is defined."""
the_user_dict = get_user_dict()
the_context = dict()
for var_name in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):
if var_name in the_user_dict:
the_context[var_name] = the_user_dict[var_name]
for variable_name in unpack_pargs(pargs):
if variable_name not in [(variable_dict if isinstance(variable_dict, str) else variable_dict['var']) for variable_dict in this_thread.internal['gather']]:
this_thread.internal['gather'].append(dict(var=variable_name, context=the_context))
raise ForcedNameError(variable_name, gathering=True) | 29,667 |
def get_stats(service: googleapiclient.discovery, videos_list: list):
"""Get duration, views and live status of YouTube video with their ID
:param service: a YouTube service build with 'googleapiclient.discovery'
:param videos_list: list of YouTube video IDs
:return items: playlist items (videos) as a list.
"""
items = []
try:
videos_ids = [video['video_id'] for video in videos_list]
except TypeError:
videos_ids = videos_list
# Split task in chunks of size 50 to request on a maximum of 50 videos at each iteration.
videos_chunks = [videos_ids[i:i + min(50, len(videos_ids))] for i in range(0, len(videos_ids), 50)]
for chunk in videos_chunks:
try:
request = get_videos(service=service, videos_list=chunk)
# Keep necessary data
items += [{'video_id': item['id'],
'views': item['statistics'].get('viewCount', 0),
'likes': item['statistics'].get('likeCount', 0),
'comments': item['statistics'].get('commentCount', 0),
'duration': isodate.parse_duration(item['contentDetails'].get('duration', 0)).seconds,
'live_status': item['snippet'].get('liveBroadcastContent')} for item in request['items']]
except googleapiclient.errors.HttpError as http_error:
history.error(http_error.error_details)
sys.exit()
return items | 29,668 |
def FactoredIntegers():
"""
Generate pairs n,F where F is the prime factorization of n.
F is represented as a dictionary in which each prime factor of n
is a key and the exponent of that prime is the corresponding value.
"""
yield 1,{}
i = 2
factorization = {}
while True:
if i not in factorization: # prime
F = {i:1}
yield i,F
factorization[2*i] = F
elif len(factorization[i]) == 1: # prime power
p, x = next(iter(factorization[i].items()))
F = {p:x+1}
yield i,F
factorization[2*i] = F
updateDict(factorization, i+p**x, p, x)
del factorization[i]
else:
yield i,factorization[i]
for p,x in factorization[i].items():
q = p**x
iq = i+q
if iq in factorization and p in factorization[iq]:
iq += p**x # skip higher power of p
updateDict(factorization, iq, p, x)
del factorization[i]
i += 1 | 29,669 |
def NotecardExceptionInfo(exception):
"""Construct a formatted Exception string.
Args:
exception (Exception): An exception object.
Returns:
string: a summary of the exception with line number and details.
"""
name = exception.__class__.__name__
return sys.platform + ": " + name \
+ ": " + ' '.join(map(str, exception.args)) | 29,670 |
def load_test_dataset(cfg: Dict) -> Tuple[Tuple[List]]:
"""Read config and load test dataset
Args:
cfg (Dict): config from config.json
Returns:
Tuple[Tuple[List]]: Test dataset
"""
X_test, y_test, test_prompt_ids = read_dataset(
cfg.preprocess_data_args["test_path"],
cfg.preprocess_data_args["prompt_id"],
cfg.preprocess_data_args["maxlen"],
cfg.preprocess_data_args["to_lower"],
cfg.preprocess_data_args["score_index"],
)
return (X_test, y_test, test_prompt_ids) | 29,671 |
def create_client_dir(client_dir=None):
"""Create client directories"""
if not os.path.exists(client_dir):
os.makedirs(client_dir) | 29,672 |
def clear_caches() -> None:
""" Clear all Caches created by instagramy in current dir """
return shutil.rmtree(cache_dir, ignore_errors=True) | 29,673 |
def linha(tam=20):
"""
-> Cria uma linha
:param tam: (opcional) Determina o tamanho da linha
:return: Sem retorno
"""
print('-'*tam) | 29,674 |
def SingleCameraCalibration_from_xml(elem, helper=None):
""" loads a camera calibration from an Elementree XML node """
assert ET.iselement(elem)
assert elem.tag == "single_camera_calibration"
cam_id = elem.find("cam_id").text
pmat = numpy.array(numpy.mat(elem.find("calibration_matrix").text))
res = numpy.array(numpy.mat(elem.find("resolution").text))[0,:]
scale_elem = elem.find("scale_factor")
if NO_BACKWARDS_COMPAT:
assert scale_elem is None, 'XML file has outdated <scale_factor>'
else:
if scale_elem is not None:
# backwards compatibility
scale = float( scale_elem.text )
if scale != 1.0:
warnings.warn('converting old scaled calibration')
scale_array = numpy.ones((3,4))
scale_array[:,3] = scale # mulitply last column by scale
pmat = scale_array*pmat # element-wise multiplication
if not helper:
helper_elem = elem.find("non_linear_parameters")
if helper_elem is not None:
helper = reconstruct_utils.ReconstructHelper_from_xml(helper_elem)
else:
# make with no non-linear stuff (i.e. make linear)
helper = reconstruct_utils.ReconstructHelper(1,1, # focal length
0,0, # image center
0,0, # radial distortion
0,0) # tangential distortion
return SingleCameraCalibration(cam_id=cam_id,
Pmat=pmat,
res=res,
helper=helper) | 29,675 |
def contrast_augm_cv2(images,fmin,fmax):
"""
this function is equivalent to the numpy version, but 2.8x faster
"""
images = np.copy(images)
contr_rnd = rand_state.uniform(low=fmin,high=fmax,size=images.shape[0])
for i in range(images.shape[0]):
fac = contr_rnd[i]
images[i] = np.atleast_3d(cv2.addWeighted(images[i], fac , 0, 0, 128-fac*128))
return images | 29,676 |
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
return pickle_load('preprocess.p') | 29,677 |
def detection_layer(config, rois, mrcnn_class, mrcnn_bbox, image_meta):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
"""
# Currently only supports batchsize 1
rois = rois.squeeze(0)
_, _, window, _ = parse_image_meta(image_meta)
window = window[0]
detections = refine_detections(rois, mrcnn_class, mrcnn_bbox, window, config)
return detections | 29,678 |
def connectedSocketDiscover():
"""
Try to discover the internal address by using a connected UDP
socket.
@return: a L{Deferred} called with the internal address.
"""
def cb(address):
protocol = DatagramProtocol()
listeningPort = reactor.listenUDP(0, protocol)
protocol.transport.connect(address, 7)
internal = protocol.transport.getHost().host
listeningPort.stopListening()
return internal
return reactor.resolve('A.ROOT-SERVERS.NET').addCallback(cb) | 29,679 |
def construct_source_plate_not_recognised_message(
params: Dict[str, str]
) -> Tuple[List[str], Optional[Message]]:
"""Constructs a message representing a source plate not recognised event;
otherwise returns appropriate errors.
Arguments:
params {Dict[str, str]} -- All parameters of the plate event message request.
Returns:
{[str]} -- Any errors attempting to construct the message, otherwise an empty array.
{Message} -- The constructed message; otherwise None if there are any errors.
"""
try:
user_id = params.get("user_id", "")
robot_serial_number = params.get("robot", "")
if len(user_id) == 0 or len(robot_serial_number) == 0:
return [
"'user_id' and 'robot' are required to construct a "
f"{PLATE_EVENT_SOURCE_NOT_RECOGNISED} event message"
], None
robot_uuid = __get_robot_uuid(robot_serial_number)
if robot_uuid is None:
return [f"Unable to determine a uuid for robot '{robot_serial_number}'"], None
message_content = {
"event": {
"uuid": str(uuid4()),
"event_type": PLATE_EVENT_SOURCE_NOT_RECOGNISED,
"occured_at": __get_current_datetime(),
"user_identifier": user_id,
"subjects": [__construct_robot_message_subject(robot_serial_number, robot_uuid)],
"metadata": {},
},
"lims": app.config["RMQ_LIMS_ID"],
}
return [], Message(message_content)
except Exception as e:
logger.error(f"Failed to construct a {PLATE_EVENT_SOURCE_NOT_RECOGNISED} message")
logger.exception(e)
return [
"An unexpected error occurred attempting to construct the "
f"{PLATE_EVENT_SOURCE_NOT_RECOGNISED} event message"
], None | 29,680 |
def grab_kaeri_nuclide(nuc, build_dir="", n=None):
"""Grabs a nuclide file from KAERI from the web and places
it a {nuc}.html file in the build directory.
Parameters
----------
nuc : str, int
nuclide, preferably in name form.
build_dir : str, optional
Directory to place html files in.
n : None or int
Optional flag on data to grab. None = basic data,
2 = cross section summary, 3 = cross section graphs.
"""
if not isinstance(nuc, basestring):
nuc = nucname.name(nuc).upper()
if n is None:
filename = os.path.join(build_dir, nuc + '.html')
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}'.format(nuc)
else:
filename = os.path.join(build_dir, '{nuc}_{n}.html'.format(nuc=nuc, n=n))
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}&n={n}'.format(nuc, n=n)
print(" getting {0} and placing in {1}".format(nuc, filename))
# Get the url
req = urllib2.Request(kaeri_url, headers={'User-Agent': 'Mozilla/5.0'})
hdl = urllib2.urlopen(req, timeout=30.0)
i = 1
# try reading in the data until it works or ten times
read_in = False
while (not read_in) and (i <= 10):
try:
kaeri_html = hdl.read()
read_in = True
except URLError:
hdl.close()
i += 1
print(" getting {0} and placing in {1}, attempt {2}".format(nuc, filename, i))
hdl = urllib2.urlopen(req, timeout=30.0)
# Write out to the file
with open(filename, 'w') as f:
f.write(kaeri_html) | 29,681 |
def odd(x):
"""True if x is odd."""
return (x & 1) | 29,682 |
def sparse_transformer_local():
"""Set of hyperparameters for a sparse model using only local."""
hparams = common_hparams.basic_params1()
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.add_hparam("max_target_length", 4096)
hparams.add_hparam("add_timing_signal", False)
hparams.add_hparam("local_num_heads", 8)
hparams.add_hparam("sparsity_cluster_num_heads", 0)
hparams.add_hparam("sparsity_strided_num_heads", 0)
hparams.add_hparam("sparsity_cluster_strided_num_heads", 0)
hparams.add_hparam("sparsity_skip_first", 0)
hparams.add_hparam("ema", True)
hparams.add_hparam("query_shape", (512,))
hparams.add_hparam("memory_query_shape", (512,))
hparams.add_hparam("memory_flange", (512,))
hparams.add_hparam("sparsity_cluster_size", 0)
hparams.add_hparam("sparsity_cluster_attention_window", 0)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 24)
hparams.add_hparam("attention_key_channels", 0) # Uses hidden_size
hparams.add_hparam("attention_value_channels", 0) # Uses hidden_size
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("filter_size", 2048) # Used in ffn_layer
hparams.add_hparam("relu_dropout", 0.0) # Used in ffn_layer
hparams.add_hparam("input_dropout", 0.0) # dropout on input sequences
hparams.add_hparam("target_dropout", 0.0) # dropout on target sequences
hparams.add_hparam("use_tpu", True)
hparams.tpu_enable_host_call = True # Enable summaries on TPU
hparams.pad_batch = True
hparams.bottom = {
"targets": target_bottom,
}
# Optimizer
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer = "adafactor"
hparams.optimizer_adafactor_beta1 = 0.0
hparams.optimizer_adafactor_beta2 = 0.999
hparams.optimizer_adafactor_clipping_threshold = 1.0
hparams.optimizer_adafactor_decay_type = "pow"
hparams.optimizer_adafactor_memory_exponent = 0.8
hparams.optimizer_adafactor_multiply_by_parameter_scale = True
hparams.learning_rate_schedule = "constant*rsqrt_normalized_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.learning_rate_constant = 0.01
hparams.initializer_gain = 0.2
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.label_smoothing = 0.0
hparams.summarize_vars = True
hparams.hidden_size = 512
# Memory saving measures
hparams.add_hparam("cache_padding_bias", False)
hparams.add_hparam("embedding_dims", 512)
hparams.add_hparam("share_qk", True)
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
# relative attention
hparams.max_relative_position = 1024
hparams.add_hparam("local_relative", True)
hparams.add_hparam("sparsity_cluster_relative", True)
hparams.add_hparam("sparsity_cluster_strided_relative", True)
hparams.add_hparam("sparsity_strided_relative", False)
# Decoding
hparams.add_hparam("nucleus_sampling", 0.9)
hparams.add_hparam("num_decode_cores", 8)
hparams.add_hparam("fast_decode", False)
# Clustering hparams
hparams.add_hparam("beta", 1e-4)
hparams.add_hparam("decay", 0.999)
# LSH attention as in Reformer
hparams.add_hparam("hash_items", False)
hparams.add_hparam("token_bias_wt_trainable", False)
return hparams | 29,683 |
def make_aperture_mask(self, snr_threshold=5, margin=4):
"""Returns an aperture photometry mask.
Parameters
----------
snr_threshold : float
Background detection threshold.
"""
# Find the pixels that are above the threshold in the median flux image
median = np.nanmedian(self.flux, axis=0)
mad = median_absolute_deviation(median[np.isfinite(median)])
# 1.4826 turns MAD into STDEV for a Gaussian
mad_cut = 1.4826 * mad * snr_threshold
region = np.where(median > mad_cut, 1, 0)
# Label all contiguous regions above the threshold
labels = scipy.ndimage.label(region)[0]
# Central pixel coordinate
centralpix = [1 + median.shape[0] // 2, 1 + median.shape[1] // 2]
# find brightest pix within margin of central pix
central_img = median[centralpix[0] - margin: centralpix[0] + margin,
centralpix[1] - margin: centralpix[1] + margin]
# unravel_index converts indices into a tuple of coordinate arrays
brightestpix = np.unravel_index(central_img.argmax(), central_img.shape)
bpixy, bpixx = brightestpix
# Which label corresponds to the brightest pixel?
regnum = labels[centralpix[0] - margin + bpixy, centralpix[1] - margin + bpixx]
return labels == regnum | 29,684 |
def test_relative_content_url_with_object():
"""This function tests creating a content mention with a relative content URL but with a Khoros object.
.. versionadded:: 2.4.0
:returns: None
"""
title, url, content_id = get_content_test_data(relative_url=True)
khoros = Khoros()
print()
# Testing arguments without a Content ID
response = messages.format_content_mention(khoros, title=title, url=url)
assert expected_content_response(response) # nosec
# Testing arguments with a Content ID
response = messages.format_content_mention(khoros, content_id=content_id, title=title, url=url)
assert expected_content_response(response) # nosec
# Testing dictionary without a Content ID
content_info = {'title': title, 'url': url}
response = messages.format_content_mention(khoros, content_info)
assert expected_content_response(response) # nosec
# Testing dictionary with a Content ID
content_info['id'] = content_id
response = messages.format_content_mention(khoros, content_info)
assert expected_content_response(response) # nosec
return | 29,685 |
def verify_json_message(json_message, expected_message):
"""
Verify JSON message value.
"""
LOGGER.debug(json_message)
LOGGER.debug(expected_message)
try:
message = json.loads(json_message)
except TypeError:
message = json_message
validation = [KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE, KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE]
for key in validation:
assert_equal(message[key], expected_message[key]) | 29,686 |
def verify_password(username, password):
"""Verify the password."""
if username in users:
return check_password_hash(users.get(username), password)
return False | 29,687 |
def data_mnist(one_hot=True):
"""
Preprocess MNIST dataset
"""
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0],
FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)
X_test = X_test.reshape(X_test.shape[0],
FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
print "Loaded MNIST test data."
if one_hot:
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, FLAGS.NUM_CLASSES).astype(np.float32)
y_test = np_utils.to_categorical(y_test, FLAGS.NUM_CLASSES).astype(np.float32)
return X_train, y_train, X_test, y_test | 29,688 |
def get_file_paths_by_pattern(pattern='*', folder=None):
"""Get a file path list matched given pattern.
Args:
pattern(str): a pattern to match files.
folder(str): searching folder.
Returns
(list of str): a list of matching paths.
Examples
>>> get_file_paths_by_pattern('*.png') # get all *.png files in folder
>>> get_file_paths_by_pattern('*rotate*') # get all files with 'rotate' in name
"""
if folder is None:
return glob.glob(pattern)
else:
return glob.glob(os.path.join(folder, pattern)) | 29,689 |
def pval_two(n, m, N, Z_all, tau_obs):
"""
Calculate the p-value of a two sided test.
Given a tau_obs value use absolute value to
find values more extreme than the observed tau.
Parameters
----------
n : int
the sum of all subjects in the sample group
m : int
number of subjects who are 1 if control group
N : array
an array of all subjects in all groups
Z_all: matrix
the output from the function nchoosem
tau_obs: float
the observed value of tau
Returns
--------
pd : float
the pval of the test statistic
"""
assert m <= n, "# of subjects who are 1 must be <= to sum of all subjects"
n_Z_all = Z_all.shape[0]
dat = np.zeros((n, 2))
N = [int(x) for x in N]
if N[0] > 0:
dat[0:N[0], :] = 1
if N[1] > 0:
dat[(N[0]): (N[0] + N[1]), 0] = 1
dat[(N[0]): (N[0] + N[1]), 1] = 0
if N[2] > 0:
dat[(N[0]+N[1]):(N[0]+N[1]+N[2]), 0] = 0
dat[(N[0]+N[1]):(N[0]+N[1]+N[2]), 1] = 1
if N[3] > 0:
dat[(N[0]+N[1]+N[2]):(N[0]+N[1]+N[2]+N[3]), ] = 0
tau_hat = np.matmul(Z_all, dat[:, 0]) / (m) - \
np.matmul((1 - Z_all), dat[:, 1]) / (n-m)
tau_N = (N[1]-N[2]) / n
pd = sum(np.round(np.abs(tau_hat-tau_N), 15) >=
np.round(np.abs(tau_obs-tau_N), 15))/n_Z_all
return pd | 29,690 |
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs) | 29,691 |
def test_then_1() -> None:
"""test_then_1."""
f: Parser = token("foo")
def f_then(result: ParseResult) -> None:
assert result.tokens == ["foo"]
def f_catch(result: ParseResult) -> None:
assert False, "成功した場合は空振りする必要がある。"
assert f.exec("foobar").then(f_then).catch(f_catch) | 29,692 |
def install(lib_name):
"""
Installes a library from a local file in ./libs
Parameters
----------
lib_name: str
the name of the library that will be installed
Returns
-------
nothing
Exception
---------
Raises an Exception if the library can't be installed
from a local file
"""
if 'git+https://github.com/' in library:
file = next(git_to_filename(library))
if file is None:
raise Exception('could not install ' + lib_name +
' from file, because file does not exist.')
else:
args = [
'install',
'--upgrade',
'--force-reinstall',
join(getcwd(), 'libs', file),
]
else:
args = [
'install',
lib_name,
'--no-index',
'--find-links',
'file://' + getcwd() + '/libs',
]
if pip.main(args) != 0:
raise Exception('could not install ' + lib_name + ' from file') | 29,693 |
def test_plot_state(sampler, tmpdir, filename, track_gradients):
"""Test making the state plot"""
x = np.arange(10)
sampler.min_likelihood = x
sampler.max_likelihood = x
sampler.iteration = 1003
sampler.training_iterations = [256, 711]
sampler.train_on_empty = False
sampler.population_iterations = [256, 500, 711, 800]
sampler.population_acceptance = 4 * [0.5]
sampler.population_radii = 4 * [1.]
sampler.checkpoint_iterations = [600]
sampler.likelihood_evaluations = x
sampler.state = MagicMock()
sampler.state.log_vols = np.linspace(0, -10, 1050)
sampler.state.track_gradients = track_gradients
sampler.state.gradients = np.arange(1050)
sampler.logZ_history = x
sampler.dZ_history = x
sampler.mean_acceptance_history = x
sampler.rolling_p = np.arange(4)
if filename is not None:
sampler.output = tmpdir.mkdir('test_plot_state')
filename = os.path.join(sampler.output, filename)
fig = NestedSampler.plot_state(sampler, filename)
if filename is not None:
assert os.path.exists(filename)
else:
assert fig is not None | 29,694 |
def classify_pixel(input_data, classifier, threads=8, ram=4000):
"""
Runs a pre-trained ilastik classifier on a volume of data
Adapted from Stuart Berg's example here:
https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
Arguments:
input_data: data to be classified - 3D numpy array
classifier: ilastik trained/classified file
threads: number of thread to use for classifying input data
ram: RAM to use in MB
Returns:
pixel_out: The raw trained classifier
"""
import numpy as np
import six
import pdb
from collections import OrderedDict
import vigra
import os
import ilastik_main
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
# Before we start ilastik, prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = str(threads)
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
# Set the command-line arguments directly into argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = classifier
# Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)
# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator
# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()
# For this example, we'll use random input data to "batch process"
print("input_data.shape", input_data.shape)
# In this example, we're using 2D data (extra dimension for channel).
# Tagging the data ensures that ilastik interprets the axes correctly.
input_data = vigra.taggedView(input_data, 'xyz')
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value
print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict([("Raw Data",
[DatasetInfo(preloaded_array=input_data)])])
# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
# be exported to disk according to project's DataExport settings.
# In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.\
run_export(role_data_dict, export_to_array=True)
predictions = np.squeeze(predictions)
print("predictions.dtype, predictions.shape", predictions.dtype, predictions.shape)
print("DONE.")
return predictions | 29,695 |
def invite_accepted_candidates():
"""Invites accepted candidates to create an account and set their own password."""
form = InviteAcceptedCandidatesForm()
if form.validate_on_submit():
selected = [ Candidate.query.filter_by(id=c).first() for c in form.selected_candidates.data.split(',') ]
user_role = Role.query.filter_by(name='User').first()
# for each selected candidate create a new user account
for candidate in selected:
user = User.query.filter_by(email=candidate.email).first()
if user is None:
user = User(
role=user_role,
first_name=candidate.first_name,
last_name=candidate.last_name,
email=candidate.email,
candidate=candidate)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
invite_link = url_for(
'account.join_from_invite',
user_id=user.id,
token=token,
_external=True)
get_queue().enqueue(
send_email,
recipient=user.email,
subject='You Are Invited To Join',
template='account/email/invite',
user=user,
invite_link=invite_link)
str = ''
for c in selected:
str += c.first_name + ' ' + c.last_name + ', '
str = str[:-2]
flash('Candidates {} successfully invited'.format(str),
'form-success')
return render_template('admin/invite_accepted_candidates.html', form=form, all_terms=Term.query.order_by(Term.end_date.desc()).all(), accepted_candidates=Candidate.query.filter_by(status=Status.ASSIGNED).all()) | 29,696 |
def plot_perc_miscoordination(joint_actions, state, iter_avg, n_runs, num_episodes, run=None):
"""Plot percentage of miscoordination per agent"""
perc_joint_actions = shape_dim_for_plot(iter_avg, n_runs, joint_actions, run)
means = perc_joint_actions.mean(axis=2)
x_axis = np.arange(0, num_episodes/iter_avg) * iter_avg
perc_coor_1 = means[state][0] + means[state][4] + means[state][8]
perc_coor_2 = means[state][0] + means[state][4] + means[state][5]
perc_misc_1 = 1 - perc_coor_1
perc_misc_2 = 1 - perc_coor_2
plt.plot(x_axis, perc_misc_1, label='agent 1')
plt.plot(x_axis, perc_misc_2, label='agent 2')
plt.title(f"Percentage miscoordination per {iter_avg} time steps")
plt.xlabel("number of iterations")
plt.ylabel("percentage")
plt.legend() | 29,697 |
def ramp_overlap(x0, y0, w0, h0, angle0, x1, y1, w1, h1, angle1):
"""Calculates the overlap area between two ramps."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
dw1 = _rect_diagonal(w1, h1)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, dw1):
return 0.
# Check if bounging boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = rect_bounding_frame(
x1, y1, w1, h1, angle1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect_1 = _build_shapely_ramp(x0, y0, w0, h0, angle0)
rect_2 = _build_shapely_ramp(x1, y1, w1, h1, angle1)
return rect_1.intersection(rect_2).area | 29,698 |
def url_external(path, query):
"""Generate external URLs with HTTPS (if configured)."""
try:
api_url = request.url_root
if settings.URL_SCHEME is not None:
parsed = urlparse(api_url)
parsed = parsed._replace(scheme=settings.URL_SCHEME)
api_url = parsed.geturl()
if query is not None:
path = path + query_string(query)
return urljoin(api_url, path)
except RuntimeError:
return None | 29,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.