content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def log_loss(
predictions: ArrayLike,
targets: ArrayLike,
) -> ArrayLike:
"""Calculates the log loss of predictions wrt targets.
Args:
predictions: a vector of probabilities of arbitrary shape.
targets: a vector of probabilities of shape compatible with predictions.
Returns:
a vector of same shape of `predictions`.
"""
base.type_assert([predictions, targets], float)
return -jnp.log(likelihood(predictions, targets))
| 9,100
|
def GetAccessTokenOrDie(options):
"""Generates a fresh access token using credentials passed into the script.
Args:
options: Flag values passed into the script.
Returns:
A fresh access token.
Raises:
ValueError: response JSON could not be parsed, or has no access_token.
"""
cred = GetDSApiCredOrDie(options)
[cid, csc, refresh_token] = cred.split(",")
query_string_template = (
"refresh_token=%s&client_id=%s&client_secret=%s"
"&grant_type=refresh_token"
)
output = RunCommand(
[
"curl",
"--data",
query_string_template % (refresh_token, cid, csc),
"https://accounts.google.com/o/oauth2/token",
]
)
json_output = json.loads(output)
if "access_token" in json_output:
return json_output["access_token"]
else:
raise ValueError("missing access_token in response: %s" % output)
| 9,101
|
def test_races2000():
"""Test module races2000.py by downloading
races2000.csv and testing shape of
extracted data has 77 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = races2000(test_path)
try:
assert x_train.shape == (77, 5)
except:
shutil.rmtree(test_path)
raise()
| 9,102
|
def remove_whitespace(tokens):
"""Remove any top-level whitespace and comments in a token list."""
return tuple(
token for token in tokens
if token.type not in ('whitespace', 'comment'))
| 9,103
|
def _closefile(fpath, modnames):
"""
An api to remove dependencies from code by "closing" them.
CommandLine:
xdoctest -m ~/code/netharn/netharn/export/closer.py _closefile
xdoctest -m netharn.export.closer _closefile --fpath=~/code/boltons/tests/test_cmdutils.py --modnames=ubelt,
Example:
>>> # SCRIPT
>>> # ENTRYPOINT
>>> import scriptconfig as scfg
>>> config = scfg.quick_cli({
>>> 'fpath': scfg.Path(None),
>>> 'modnames': scfg.Value([]),
>>> })
>>> fpath = config['fpath'] = ub.expandpath('~/code/boltons/tests/test_cmdutils.py')
>>> modnames = config['modnames'] = ['ubelt']
>>> _closefile(**config)
"""
from xdoctest import static_analysis as static
modpath = fpath
expand_names = modnames
source = open(fpath, 'r').read()
calldefs = static.parse_calldefs(source, fpath)
calldefs.pop('__doc__', None)
closer = Closer()
for key in calldefs.keys():
closer.add_static(key, modpath)
closer.expand(expand_names)
#print(ub.repr2(closer.body_defs, si=1))
print(closer.current_sourcecode())
| 9,104
|
def update(args):
"""
For LdaCgsMulti
"""
(docs, doc_indices, mtrand_state, dtype) = args
start, stop = docs[0][0], docs[-1][1]
global Ktype
if _K.value < 2 ** 8:
Ktype = np.uint8
elif _K.value < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Invalid Ktype. k={}".format(_K))
corpus = np.frombuffer(_corpus, dtype=dtype)[start:stop]
Z = np.frombuffer(_Z, dtype=Ktype)[start:stop].copy()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float32)
gbl_word_top = gbl_word_top.reshape(_V.value, _K.value)
loc_word_top = gbl_word_top.copy()
inv_top_sums = np.frombuffer(_inv_top_sums, dtype=np.float32).copy()
top_doc = np.frombuffer(_top_doc, dtype=np.float32)
top_doc = top_doc.reshape(_K.value, int(top_doc.size/_K.value))
top_doc = top_doc[:, doc_indices[0]:doc_indices[1]].copy()
log_p = 0
log_wk = np.log(gbl_word_top * inv_top_sums[np.newaxis, :])
log_kc = np.log(top_doc / top_doc.sum(0)[np.newaxis, :])
indices = np.array([(j - start) for (i,j) in docs], dtype='i')
if dtype == np.uint16 and Ktype == np.uint8:
update_fn = cgs_update[cython.ushort,cython.uchar]
elif dtype == np.uint16 and Ktype == np.uint16:
update_fn = cgs_update[cython.ushort,cython.ushort]
elif dtype == np.uint32 and Ktype == np.uint8:
update_fn = cgs_update[cython.uint,cython.uchar]
elif dtype == np.uint32 and Ktype == np.uint16:
update_fn = cgs_update[cython.uint,cython.ushort]
else:
raise NotImplementedError
results = update_fn(_iteration.value,
corpus,
loc_word_top,
inv_top_sums,
top_doc,
Z,
indices,
mtrand_state[0],
mtrand_state[1],
mtrand_state[2],
mtrand_state[3],
mtrand_state[4])
#final_results = [np.asarray(result, dtype=dtype)
# for result,dtype in zip(results[:4],
# [Ktype, np.float32, np.float32, np.float32])]
#final_results.extend(results[4:])
(loc_word_top, inv_top_sums, top_doc, Z, log_p, mtrand_str, mtrand_keys,
mtrand_pos, mtrand_has_gauss, mtrand_cached_gaussian) = results
loc_word_top -= gbl_word_top
return (Z, top_doc, loc_word_top, log_p,
mtrand_str, mtrand_keys, mtrand_pos,
mtrand_has_gauss, mtrand_cached_gaussian)
| 9,105
|
def GetPackages(manifest, conf, cache_dir, interactive=False):
"""
Make sure that the packages exist. If they don't, then
attempt to download them. If interactive, use lots of
dialog messages.
"""
conf.SetPackageDir(cache_dir)
try:
manifest.RunValidationProgram(cache_dir, kind=Manifest.VALIDATE_INSTALL)
except Exceptions.UpdateInvalidUpdateException as e:
if interactive:
Dialog.MessageBox(Title(),
"Invalid installation:\n\n\t" + str(e),
height=20, width=45).run()
raise InstallationError(str(e))
except BaseException as e:
if conf.SystemManifest() is None:
LogIt("No system manifest (duh), can't run validation program")
else:
LogIt("Trying to run validation program, got exception {}".format(str(e)))
raise
# Okay, now let's ensure all the packages are downloaded
LogIt("Using cache directory {}".format(cache_dir))
try:
count = 0
total = len(manifest.Packages())
for pkg in manifest.Packages():
count += 1
LogIt("Locating package file {}-{}".format(pkg.Name(), pkg.Version()))
if interactive:
if os.path.exists(os.path.join(cache_dir, pkg.FileName())):
status = Dialog.MessageBox(Title(), "", height=8, width=60, wait=False)
text = "Verifying"
else:
text = "Downloading and verifying"
status = Dialog.Gauge(Title(), "", height=8, width=60)
status.prompt = "{} package {} ({} of {})".format(text, pkg.Name(), count, total)
status.clear()
status.run()
LogIt("Started gauge")
def DownloadHandler(path, url, size=0, progress=None, download_rate=None):
if progress:
if status.__class__ == Dialog.Gauge:
status.percentage = progress
LogIt("DownloadHandler({}, {}, {}, {}, {})".format(path, url, size, progress, download_rate))
try:
pkg_file = conf.FindPackageFile(pkg,
pkg_type=PkgFileFullOnly,
handler=DownloadHandler if interactive else None,
save_dir=cache_dir)
except Exceptions.ChecksumFailException as e:
if interactive:
try:
Dialog.MessageBox(Title(),
"Package {} has an invalid checksum".format(pkg.Name()),
height=5, width=50).run()
except:
pass
raise InstallationError("Invalid package checksum")
except BaseException as e:
LogIt("Got exception {} while trying to download package".format(str(e)))
raise
finally:
if interactive:
if status.__class__ == Dialog.Gauge:
status.percentage = 100
dc = status.result
if pkg_file is None:
if interactive:
try:
Dialog.MessageBox(Title(),
"Unable to locate package {}".format(pkg.Name()),
height=15, width=30).run()
except:
pass
raise InstallationError("Missing package {}".format(pkg.Name()))
else:
pkg_file.close()
try:
# I have no idea why I need this.
# Without this, the next YesNo dialog won't be able to use arrow keys.
# Investigate this
Dialog.MessageBox("", "Packages Verified", wait=False).run()
except:
pass
except InstallationError:
raise
except BaseException as e:
LogIt("Got exception {} while trying to load packages".format(str(e)))
raise InstallationError(str(e))
| 9,106
|
def warn(string: str) -> str:
"""Add warn colour codes to string
Args:
string (str): Input string
Returns:
str: Warn string
"""
return "\033[93m" + string + "\033[0m"
| 9,107
|
def test_get_latest_certification_period_no_threshold():
""" Tests the get_latest_certification_period function to make sure it returns Nones if there's no prior period """
results = get_latest_certification_period()
assert results['quarter'] is None
assert results['year'] is None
| 9,108
|
def normalized_mean_square_error(logits, labels, axis = [0,1,2,3]):
"""
logits : [batch_size, w, h, num_classes]
labels : [batch_size, w, h, 1]
"""
with tf.name_scope("normalized_mean_square_error"):
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(logits, labels), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(labels), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse
| 9,109
|
def load_glove_data():
"""
Loads Stanford's dictionary of word embeddings created by using corpus of
Twitter posts. Word embeddings are vectors of 200 components.
OUTPUT:
dictionary containing tweet word embeddings
"""
glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')
f = open(glove_path,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
return model
| 9,110
|
def assertIsNotSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is not* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertFalse(
dns._isSubdomainOf(descendant, ancestor),
'%r is a subdomain of %r' % (descendant, ancestor))
| 9,111
|
def test_string_in_filter(query):
"""
Test in filter on a string field.
"""
Pet.objects.create(name="Brutus", age=12)
Pet.objects.create(name="Mimi", age=3)
Pet.objects.create(name="Jojo, the rabbit", age=3)
schema = Schema(query=query)
query = """
query {
pets (name_In: ["Brutus", "Jojo, the rabbit"]) {
edges {
node {
name
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["pets"]["edges"] == [
{"node": {"name": "Brutus"}},
{"node": {"name": "Jojo, the rabbit"}},
]
| 9,112
|
def get_partition_to_num_rows(
namespace, tablename, partition_column, partition_column_values
):
"""
Helper function to get total num_rows in hive for given
partition_column_values.
"""
partitions = {
"{0}={1}".format(partition_column, partition_column_value)
for partition_column_value in partition_column_values
}
# Setting higher number of retries, as during testing, sometimes default
# "retries" values didn't seem enough in some cases.
ms = metastore.metastore(
namespace=namespace,
meta_only=True,
retries=10,
# timeout in milliseconds.
timeout=1800000,
)
partition_to_num_rows = {}
all_partitions = ms.get_partitions(tablename)
for hive_partition in all_partitions:
assert "numRows" in hive_partition.parameters, (
"numRows not in hive_partition.parameters,"
"Do not use Presto tables, only Hive tables!')"
)
if hive_partition.partitionName in partitions:
patition_column_value = hive_partition.partitionName.split("=")[1]
partition_to_num_rows[patition_column_value] = int(
hive_partition.parameters["numRows"]
)
return partition_to_num_rows
| 9,113
|
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = initialize_game()
card_title = "Welcome"
speech_output = "Hello! I am Cookoo. Let's play a game. " \
"Are you ready to play?"
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output))
| 9,114
|
def kl_divergence_with_logits(logits_a, logits_b):
"""
Compute the per-element KL-divergence of a batch.
Args:
logits_a: tensor, model outputs of input a
logits_b: tensor, model outputs of input b
Returns:
Tensor of per-element KL-divergence of model outputs a and b
"""
a = tf.nn.softmax(logits_a, axis=1)
a_loga = tf.reduce_sum(a * log_softmax(logits_a), 1)
a_logb = tf.reduce_sum(a * log_softmax(logits_b), 1)
return a_loga - a_logb
| 9,115
|
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path, encoding="utf-8")
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels
| 9,116
|
def cut_tree_balanced(linkage_matrix_Z, max_cluster_size, verbose=False):
"""This function performs a balanced cut tree of a SciPy linkage matrix built using any linkage method
(e.g. 'ward'). It builds upon the SciPy and Numpy libraries.
The function looks recursively along the hierarchical tree, from the root (single cluster gathering
all the samples) to the leaves (i.e. the clusters with only one sample), retrieving the biggest
possible clusters containing a number of samples lower than a given maximum. In this way, if a
cluster at a specific tree level contains a number of samples higher than the given maximum, it is
ignored and its offspring (smaller) sub-clusters are taken into consideration. If the cluster contains
a number of samples lower than the given maximum, it is taken as result and its offspring sub-clusters
not further processed.
Input parameters:
linkage_matrix_Z: linkage matrix resulting from calling the method scipy.cluster.hierarchy.ward()
I.e. it contains the hierarchical clustering encoded as a linkage matrix.
max_cluster_size: maximum number of data samples contained within the resulting clusters. Thus, all
resulting clusters will contain a number of data samples <= max_cluster_size.
Note that max_cluster_size must be >= 1.
verbose: activates (True) / deactivates (False) some output print commands, which can be useful to
test and understand the proposed tree cut method.
Returns:
vec_cluster_id: one-dimensional numpy array of integers containing for each input sample its corresponding
cluster id. The cluster id is an integer which is higher for deeper tree levels.
vec_last_cluster_level: one-dimensional numpy array of arrays containing for each input sample its
corresponding cluster tree level, i.e. a sequence of 0s and 1s. Note that the cluster level is longer for
deeper tree levels, being [0] the root cluster, [0, 0] and [0, 1] its offspring, and so on. Also note that
in each cluster splitting, the label 0 denotes the bigger cluster, while the label 1 denotes the smallest.
"""
try:
# Assert that the input max_cluster_size is >= 1
assert max_cluster_size >= 1
# Perform a full cut tree of the linkage matrix, i.e. containing all tree levels
full_cut = cut_tree(linkage_matrix_Z)
if verbose:
print("Interim full cut tree (square matrix)")
print("Shape = " + str(full_cut.shape))
print(full_cut)
print('')
# Initialize the vble containing the current cluster id (it will be higher for each newly
# found valid cluster, i.e. for each found cluster with <= max_cluster_size data samples)
last_cluster_id = 1
# Initialize the resulting cluster id vector (containing for each row in input_data_x_sample
# its corresponding cluster id)
vec_cluster_id = np.zeros(full_cut.shape[1], dtype=int)
# Initialize the resulting cluster level vector (containing for each data sample its
# corresponding cluster tree level, i.e. a string of '0's and '1's separated by '.')
vec_last_cluster_level = np.empty((full_cut.shape[1],), dtype=object)
for i in range(full_cut.shape[1]): vec_last_cluster_level[i] = np.array([0],int)
# Scan the full cut matrix from the last column (root tree level) to the first column (leaves tree level)
if verbose:
print("Note about columns: within the full cut tree, the column " + str(full_cut.shape[1]-1) +
" represents the root, while 0 represent the leaves.")
print("We now scan the full cut tree from the root (column " + str(full_cut.shape[1]-1) + ") "
"to the leaves (column 0).")
print('')
for curr_column in range(full_cut.shape[1]-1,-1,-1):
# Get a list of unique group ids and their count within the current tree level
values, counts = np.unique(full_cut[:,curr_column], return_counts=True)
# Stop if all samples have been already selected (i.e. if all data samples have been already clustered)
if (values.size==1) and (values[0]==-1):
break
# For each group id within the current tree level
for curr_elem_pos in range(values.size):
# If it is a valid group id (i.e. not yet marked as processed with -1) ...
# Note: data samples which were alredy included in a valid cluster id (i.e. at a higher tree level)
# are marked with the group id -1 (see below)
if (values[curr_elem_pos] >= 0):
# Select the current group id
selected_curr_value = values[curr_elem_pos]
# Look for the vector positions (related to rows in input_data_x_sample) belonging to
# the current group id
selected_curr_elems = np.where(full_cut[:,curr_column]==selected_curr_value)
# Major step #1: Populate the resulting vector of cluster levels for each data sample
# If we are not at the root level (i.e. single cluster gathering all the samples) ...
if curr_column < (full_cut.shape[1]-1):
# Get the ancestor values and element positions
selected_ancestor_value = full_cut[selected_curr_elems[0][0],curr_column+1]
selected_ancestor_elems = np.where(full_cut[:,curr_column+1]==selected_ancestor_value)
# Compute the values and counts of the offspring (i.e. curr_elem + brothers) and sort them
# by their count (so that the biggest cluster gets the offspring_elem_label = 0, see below)
offspring_values, offspring_counts = np.unique(full_cut[selected_ancestor_elems,curr_column],
return_counts=True)
count_sort_ind = np.argsort(-offspring_counts)
offspring_values = offspring_values[count_sort_ind]
offspring_counts = offspring_counts[count_sort_ind]
# If the number of descendants is > 1 (i.e. if the curr_elem has at least one brother)
if (offspring_values.shape[0] > 1):
# Select the position of the current value (i.e. 0 or 1) and append it to the cluster level
offspring_elem_label = np.where(offspring_values==selected_curr_value)[0][0]
for i in selected_curr_elems[0]:
vec_last_cluster_level[i] = np.hstack((vec_last_cluster_level[i], offspring_elem_label))
# Major step #2: Populate the resulting vector of cluster ids for each data sample,
# and mark them as already clustered (-1)
# If the number of elements is below max_cluster_size ...
if (counts[curr_elem_pos] <= max_cluster_size):
if verbose:
print("Current column in full cut tree = " + str(curr_column))
print("list_group_ids: " + str(values))
print("list_count_samples: " + str(counts))
print("selected_curr_value: " + str(selected_curr_value) + ", count_samples = " +
str(counts[curr_elem_pos]) + ", marked as result")
print('')
# Relate these vector positions to the current cluster id
vec_cluster_id[selected_curr_elems] = last_cluster_id
# Delete these vector positions at the lower tree levels for further processing
# (i.e. mark these elements as already clustered)
full_cut[selected_curr_elems,0:curr_column] = -1
# Update the cluster id
last_cluster_id = last_cluster_id + 1
# Return the resulting clustering array (containing for each row in input_data_x_sample its
# corresponding cluster id) and the clustering level
return vec_cluster_id, vec_last_cluster_level
except AssertionError:
print("Please use a max_cluster_size >= 1")
| 9,117
|
def len_smaller(length: int) -> Callable:
"""Measures if the length of a sequence is smaller than a given length.
>>> len_smaller(2)([0, 1, 2])
False
"""
def len_smaller(seq):
return count(seq) < length
return len_smaller
| 9,118
|
def e_2e_fun(theta, e_init=e_1f):
"""
Electron energy after Compton scattering, (using energy e_1f)
:param theta: angle for scattered photon
:param e_init: initial photon energy
:return:
"""
return e_init / (((m_e * c ** 2) / e_init) * (1 / (1 - np.cos(theta))) + 1)
| 9,119
|
def tag_images_for_google_drive(
input_files: AbstractSet[Path],
database: Optional[Path],
extra_tags: Optional[Set[str]],
tag_file: Optional[Path] = None,
from_files: bool = False,
from_db: bool = False,
force: bool = False,
dry: bool = False,
verbose: int = 0) -> Tuple[Mapping[Path, Tuple[str, Sequence[str]]], Mapping[Path, Tuple[str, Sequence[str]]]]:
"""
Analyse csv and files to extract tag and inject hash tag in description.
:param database: The CSV file or None
:param input_files: A set of filename
:param tag_file: A filename to save all tags or None
:param from_file: A boolean value to use only the files names
:param from_db: A boolean value to use only the CSV file
:param dry: True to simulate the modification in files.
:return: A tuple with the new data base and the description of all modified files.
"""
assert bool(from_files) + bool(from_db) < 2
merge = not from_db and not from_files
assert not ((from_db or merge) and not database)
if not extra_tags:
extra_tags = set()
updated_files: Dict[Path, Tuple[str, List[str]]] = {} # Files to update
update_descriptions = False
ref_descriptions: Dict[Path, Tuple[str, List[str]]] = {}
description_date = 0.0
if database and database.is_file():
description_date = database.stat().st_mtime
with open(str(database), 'rt', encoding='utf-8') as csv_file:
rows = csv.reader(csv_file, delimiter=',')
ref_descriptions = {Path(row[0]): _extract_tags(row[1], '#') for row in rows if len(row) == 2}
else:
update_descriptions = True
if not shutil.which("exiftool"):
LOGGER.error("Install exiftool in PATH before to use tag_images_for_google_drive")
raise OSError(-1, "exiftool not found")
with ExifTool() as exif_tool:
# 1. Update images files
update_descriptions = _manage_files(exif_tool,
input_files,
from_db,
from_files,
ref_descriptions,
extra_tags,
update_descriptions,
updated_files,
force,
verbose)
# 2. Apply the descriptions file
update_descriptions = _manage_db(exif_tool,
description_date,
from_db,
from_files,
merge,
ref_descriptions,
extra_tags,
update_descriptions,
updated_files,
force,
verbose)
# 3. Apply update files
_manage_updated_files(exif_tool, dry, updated_files)
# 4. Update description
_manage_updated_db(database, dry, ref_descriptions, update_descriptions)
# 5. Count tags
all_tags: AbstractSet[str] = set()
nb_files = len(ref_descriptions)
nb_total_tags = 0
for _, (_, keywords) in ref_descriptions.items():
nb_total_tags += len(keywords)
all_tags = set(all_tags).union(keywords)
LOGGER.info(f"Use {nb_total_tags} tags in {nb_files} files, with a dictionary of {len(all_tags)} "
f"({int(nb_files / nb_total_tags * 100) if nb_total_tags else 0} t/f).")
_manage_tags_file(all_tags, dry, tag_file)
LOGGER.debug("Done")
return ref_descriptions, updated_files
| 9,120
|
def xml_reader(filename):
"""
A method using iterparse as above would be preferable, since we just want to
collect the first few tags. Unfortunately, so far iterparse does not work
with html (aka broken xml).
"""
name = os.path.basename(filename)
with open(filename, "rb") as file_h:
if etree.LXML_VERSION < (3, 3):
parser = etree.HTMLParser(encoding="latin1")
tree = etree.parse(file_h, parser)
row_it = tree.iter(tag="row")
element = next(row_it)
attrs = [unicode(child.tag) for child in element.iterchildren()]
else:
row_it = etree.iterparse(file_h, tag="row", html=True)
(event, element) = next(row_it)
attrs = [unicode(child.tag) for child in element.iterchildren()]
return (name, attrs)
| 9,121
|
def exercise_10():
"""
nucleic acids: RNA
"""
in_pdb_str = """\
HEADER RNA 26-MAR-97 1MIS
CRYST1 1.000 1.000 1.000 90.00 90.00 90.00 P 1 1
ORIGX1 1.000000 0.000000 0.000000 0.00000
ORIGX2 0.000000 1.000000 0.000000 0.00000
ORIGX3 0.000000 0.000000 1.000000 0.00000
SCALE1 1.000000 0.000000 0.000000 0.00000
SCALE2 0.000000 1.000000 0.000000 0.00000
SCALE3 0.000000 0.000000 1.000000 0.00000
ATOM 1 O5' G A 1 -39.305 107.866 -51.789 1.00 0.00 O
ATOM 2 C5' G A 1 -38.172 107.974 -50.957 1.00 0.00 C
ATOM 3 C4' G A 1 -37.351 109.206 -51.345 1.00 0.00 C
ATOM 4 O4' G A 1 -36.833 109.097 -52.674 1.00 0.00 O
ATOM 5 C3' G A 1 -38.169 110.487 -51.328 1.00 0.00 C
ATOM 6 O3' G A 1 -38.370 110.983 -50.015 1.00 0.00 O
ATOM 7 C2' G A 1 -37.260 111.365 -52.146 1.00 0.00 C
ATOM 8 O2' G A 1 -36.123 111.712 -51.383 1.00 0.00 O
ATOM 9 C1' G A 1 -36.827 110.411 -53.252 1.00 0.00 C
ATOM 10 N9 G A 1 -37.778 110.451 -54.399 1.00 0.00 N
ATOM 11 C8 G A 1 -38.788 109.567 -54.744 1.00 0.00 C
ATOM 12 N7 G A 1 -39.381 109.860 -55.867 1.00 0.00 N
ATOM 13 C5 G A 1 -38.736 111.011 -56.303 1.00 0.00 C
ATOM 14 C6 G A 1 -38.952 111.785 -57.481 1.00 0.00 C
ATOM 15 O6 G A 1 -39.762 111.589 -58.384 1.00 0.00 O
ATOM 16 N1 G A 1 -38.098 112.882 -57.536 1.00 0.00 N
ATOM 17 C2 G A 1 -37.146 113.193 -56.587 1.00 0.00 C
ATOM 18 N2 G A 1 -36.416 114.294 -56.811 1.00 0.00 N
ATOM 19 N3 G A 1 -36.930 112.460 -55.487 1.00 0.00 N
ATOM 20 C4 G A 1 -37.761 111.387 -55.408 1.00 0.00 C
ATOM 21 H5' G A 1 -37.557 107.080 -51.058 1.00 0.00 H
ATOM 22 H5'' G A 1 -38.499 108.071 -49.921 1.00 0.00 H
ATOM 23 H4' G A 1 -36.512 109.317 -50.658 1.00 0.00 H
ATOM 24 H3' G A 1 -39.114 110.342 -51.848 1.00 0.00 H
ATOM 25 H2' G A 1 -37.743 112.247 -52.531 1.00 0.00 H
ATOM 26 HO2' G A 1 -36.410 112.213 -50.618 1.00 0.00 H
ATOM 27 H1' G A 1 -35.830 110.704 -53.596 1.00 0.00 H
ATOM 28 H8 G A 1 -39.075 108.696 -54.159 1.00 0.00 H
ATOM 29 H1 G A 1 -38.189 113.490 -58.338 1.00 0.00 H
ATOM 30 H21 G A 1 -36.579 114.849 -57.640 1.00 0.00 H
ATOM 31 H22 G A 1 -35.704 114.571 -56.150 1.00 0.00 H
ATOM 32 HO5' G A 1 -39.804 107.089 -51.525 1.00 0.00 H
ATOM 33 P C A 2 -39.575 112.019 -49.689 1.00 0.00 P
ATOM 34 OP1 C A 2 -39.502 112.375 -48.255 1.00 0.00 O
ATOM 35 OP2 C A 2 -40.825 111.449 -50.239 1.00 0.00 O
ATOM 36 O5' C A 2 -39.205 113.330 -50.550 1.00 0.00 O
ATOM 37 C5' C A 2 -38.206 114.229 -50.120 1.00 0.00 C
ATOM 38 C4' C A 2 -38.024 115.328 -51.169 1.00 0.00 C
ATOM 39 O4' C A 2 -37.635 114.785 -52.430 1.00 0.00 O
ATOM 40 C3' C A 2 -39.302 116.088 -51.460 1.00 0.00 C
ATOM 41 O3' C A 2 -39.648 117.004 -50.437 1.00 0.00 O
ATOM 42 C2' C A 2 -38.900 116.766 -52.741 1.00 0.00 C
ATOM 43 O2' C A 2 -37.943 117.772 -52.478 1.00 0.00 O
ATOM 44 C1' C A 2 -38.214 115.608 -53.457 1.00 0.00 C
ATOM 45 N1 C A 2 -39.213 114.844 -54.247 1.00 0.00 N
ATOM 46 C2 C A 2 -39.629 115.388 -55.459 1.00 0.00 C
ATOM 47 O2 C A 2 -39.204 116.482 -55.828 1.00 0.00 O
ATOM 48 N3 C A 2 -40.513 114.676 -56.213 1.00 0.00 N
ATOM 49 C4 C A 2 -40.964 113.480 -55.805 1.00 0.00 C
ATOM 50 N4 C A 2 -41.837 112.816 -56.576 1.00 0.00 N
ATOM 51 C5 C A 2 -40.527 112.907 -54.565 1.00 0.00 C
ATOM 52 C6 C A 2 -39.665 113.627 -53.828 1.00 0.00 C
ATOM 53 H5' C A 2 -37.265 113.700 -49.985 1.00 0.00 H
ATOM 54 H5'' C A 2 -38.505 114.679 -49.173 1.00 0.00 H
ATOM 55 H4' C A 2 -37.260 116.032 -50.837 1.00 0.00 H
ATOM 56 H3' C A 2 -40.104 115.385 -51.660 1.00 0.00 H
ATOM 57 H2' C A 2 -39.744 117.164 -53.296 1.00 0.00 H
ATOM 58 HO2' C A 2 -38.340 118.429 -51.904 1.00 0.00 H
ATOM 59 H1' C A 2 -37.448 115.962 -54.119 1.00 0.00 H
ATOM 60 H41 C A 2 -42.140 113.214 -57.454 1.00 0.00 H
ATOM 61 H42 C A 2 -42.186 111.915 -56.282 1.00 0.00 H
ATOM 62 H5 C A 2 -40.825 111.939 -54.208 1.00 0.00 H
ATOM 63 H6 C A 2 -39.333 113.231 -52.894 1.00 0.00 H
ATOM 132 P A A 5 -49.440 124.595 -55.324 1.00 0.00 P
ATOM 133 OP1 A A 5 -49.855 126.011 -55.204 1.00 0.00 O
ATOM 134 OP2 A A 5 -49.735 123.644 -54.229 1.00 0.00 O
ATOM 135 O5' A A 5 -50.048 124.002 -56.693 1.00 0.00 O
ATOM 136 C5' A A 5 -51.166 123.141 -56.672 1.00 0.00 C
ATOM 137 C4' A A 5 -51.363 122.530 -58.060 1.00 0.00 C
ATOM 138 O4' A A 5 -50.271 121.673 -58.404 1.00 0.00 O
ATOM 139 C3' A A 5 -52.607 121.669 -58.101 1.00 0.00 C
ATOM 140 O3' A A 5 -53.761 122.442 -58.379 1.00 0.00 O
ATOM 141 C2' A A 5 -52.264 120.728 -59.232 1.00 0.00 C
ATOM 142 O2' A A 5 -52.381 121.398 -60.470 1.00 0.00 O
ATOM 143 C1' A A 5 -50.787 120.462 -58.947 1.00 0.00 C
ATOM 144 N9 A A 5 -50.642 119.381 -57.952 1.00 0.00 N
ATOM 145 C8 A A 5 -50.039 119.417 -56.717 1.00 0.00 C
ATOM 146 N7 A A 5 -50.053 118.272 -56.091 1.00 0.00 N
ATOM 147 C5 A A 5 -50.688 117.412 -56.982 1.00 0.00 C
ATOM 148 C6 A A 5 -50.989 116.037 -56.932 1.00 0.00 C
ATOM 149 N6 A A 5 -50.698 115.257 -55.883 1.00 0.00 N
ATOM 150 N1 A A 5 -51.588 115.490 -58.008 1.00 0.00 N
ATOM 151 C2 A A 5 -51.867 116.259 -59.057 1.00 0.00 C
ATOM 152 N3 A A 5 -51.638 117.555 -59.224 1.00 0.00 N
ATOM 153 C4 A A 5 -51.036 118.077 -58.126 1.00 0.00 C
ATOM 154 H5' A A 5 -52.054 123.706 -56.387 1.00 0.00 H
ATOM 155 H5'' A A 5 -51.000 122.338 -55.954 1.00 0.00 H
ATOM 156 H4' A A 5 -51.445 123.311 -58.809 1.00 0.00 H
ATOM 157 H3' A A 5 -52.700 121.122 -57.163 1.00 0.00 H
ATOM 158 H2' A A 5 -52.868 119.824 -59.228 1.00 0.00 H
ATOM 159 HO2' A A 5 -51.824 122.180 -60.448 1.00 0.00 H
ATOM 160 H1' A A 5 -50.247 120.181 -59.845 1.00 0.00 H
ATOM 161 H8 A A 5 -49.574 120.307 -56.306 1.00 0.00 H
ATOM 162 H61 A A 5 -50.933 114.274 -55.905 1.00 0.00 H
ATOM 163 H62 A A 5 -50.244 115.650 -55.071 1.00 0.00 H
ATOM 164 H2 A A 5 -52.341 115.756 -59.891 1.00 0.00 H
TER 262 C A 8
"""
in_h = iotbx.pdb.input(source_info=None, lines=in_pdb_str).construct_hierarchy(sort_atoms=True)
out_h = iotbx.pdb.input(source_info=None, lines=in_pdb_str).construct_hierarchy(sort_atoms=False)
# print in_h.as_pdb_string()
# in_h.sort_atoms_in_place()
# print "="*50
# print in_h.as_pdb_string()
validate_result(in_h, out_h)
| 9,122
|
def interval_list_intersection(A: List[List], B: List[List], visualization: bool = True) -> List[List]:
"""
LeteCode 986: Interval List Intersections
Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
Examples:
1. A: [[0, 2], [5, 10], [13, 23], [24, 25]], B: [[1, 5], [8, 12], [15, 24], [25, 26]]
return: [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
"""
res = []
i = j = 0
while i < len(A) and j < len(B):
s = max(A[i][0], B[j][0])
e = min(A[i][1], B[j][1])
if s <= e:
res.append([s, e])
if A[i][1] < B[j][1]:
i += 1
else:
j += 1
if visualization:
interval_list_intersection_visualization(A, B, res)
return res
| 9,123
|
def set_project_user_attribute_name(number: int, value: str) -> None:
"""
Args:
number (int): user attribute number
value (str): value
"""
| 9,124
|
def test_amf_classifier_serialization():
"""Trains a AMFClassifier on iris, saves and loads it again. Check that
everything is the same between the original and loaded forest
"""
random_state = 42
n_estimators = 1
n_classes = 3
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=random_state
)
X_train_1, X_train_2, y_train_1, y_train_2 = train_test_split(
X_train, y_train, test_size=0.2, random_state=random_state
)
clf1 = AMFClassifier(
n_estimators=n_estimators, n_classes=n_classes, random_state=random_state
)
clf1.partial_fit(X_train_1, y_train_1)
filename = "amf_on_iris.pkl"
clf1.save(filename)
clf2 = AMFClassifier.load(filename)
os.remove(filename)
def test_forests_are_equal(clf1, clf2):
# Test samples
samples1 = clf1.no_python.samples
samples2 = clf2.no_python.samples
assert samples1.n_samples_increment == samples2.n_samples_increment
n_samples1 = samples1.n_samples
n_samples2 = samples2.n_samples
assert n_samples1 == n_samples2
assert samples1.n_samples_capacity == samples2.n_samples_capacity
assert np.all(samples1.labels[:n_samples1] == samples2.labels[:n_samples2])
assert np.all(samples1.features[:n_samples1] == samples2.features[:n_samples2])
# Test nopython.trees
for n_estimator in range(n_estimators):
tree1 = clf1.no_python.trees[n_estimator]
tree2 = clf2.no_python.trees[n_estimator]
# Test tree attributes
assert tree1.n_features == tree2.n_features
assert tree1.step == tree2.step
assert tree1.loss == tree2.loss
assert tree1.use_aggregation == tree2.use_aggregation
assert tree1.iteration == tree2.iteration
assert tree1.n_classes == tree2.n_classes
assert tree1.dirichlet == tree2.dirichlet
assert np.all(tree1.intensities == tree2.intensities)
# Test tree.nodes
nodes1 = tree1.nodes
nodes2 = tree2.nodes
assert np.all(nodes1.index == nodes2.index)
assert np.all(nodes1.is_leaf == nodes2.is_leaf)
assert np.all(nodes1.depth == nodes2.depth)
assert np.all(nodes1.n_samples == nodes2.n_samples)
assert np.all(nodes1.parent == nodes2.parent)
assert np.all(nodes1.left == nodes2.left)
assert np.all(nodes1.right == nodes2.right)
assert np.all(nodes1.feature == nodes2.feature)
assert np.all(nodes1.weight == nodes2.weight)
assert np.all(nodes1.log_weight_tree == nodes2.log_weight_tree)
assert np.all(nodes1.threshold == nodes2.threshold)
assert np.all(nodes1.time == nodes2.time)
assert np.all(nodes1.memory_range_min == nodes2.memory_range_min)
assert np.all(nodes1.memory_range_max == nodes2.memory_range_max)
assert np.all(nodes1.n_features == nodes2.n_features)
assert nodes1.n_nodes == nodes2.n_nodes
assert nodes1.n_samples_increment == nodes2.n_samples_increment
assert nodes1.n_nodes_capacity == nodes2.n_nodes_capacity
assert np.all(nodes1.counts == nodes2.counts)
assert nodes1.n_classes == nodes2.n_classes
test_forests_are_equal(clf1, clf2)
# Test predict proba
y_pred = clf1.predict_proba(X_test)
y_pred_pkl = clf2.predict_proba(X_test)
assert np.all(y_pred == y_pred_pkl)
clf1.partial_fit(X_train_2, y_train_2)
clf2.partial_fit(X_train_2, y_train_2)
test_forests_are_equal(clf1, clf2)
y_pred = clf1.predict_proba(X_test)
y_pred_pkl = clf2.predict_proba(X_test)
assert np.all(y_pred == y_pred_pkl)
| 9,125
|
def get(environ: OsEnvironLike = None) -> str:
"""Get the application ID from the environment.
Args:
environ: Environment dictionary. Uses os.environ if `None`.
Returns:
Default application ID as a string.
We read from the environment APPLICATION_ID (deprecated) or else
GAE_APPLICATION.
"""
if environ is None:
environ = os.environ
return environ.get('APPLICATION_ID', environ.get('GAE_APPLICATION', ''))
| 9,126
|
def tokenize_query(query):
""" Tokenize a query """
tokenized_query = tokenizer.tokenize(query)
stop_words = set(nltk.corpus.stopwords.words("english"))
tokenized_query = [
word for word in tokenized_query if word not in stop_words]
tokenized_query = [stemmer.stem(word) for word in tokenized_query]
tokenized_query = [word.lower() for word in tokenized_query]
return tokenized_query
| 9,127
|
def redo(layer):
"""Redo any previously undone actions."""
layer.redo()
| 9,128
|
def _add_run_common(parser):
"""Add common args for 'exp run' and 'exp resume'."""
# inherit arguments from `dvc repro`
add_repro_arguments(parser)
parser.add_argument(
"-n",
"--name",
default=None,
help=(
"Human-readable experiment name. If not specified, a name will "
"be auto-generated."
),
metavar="<name>",
)
parser.add_argument(
"-S",
"--set-param",
action="append",
default=[],
help="Use the specified param value when reproducing pipelines.",
metavar="[<filename>:]<param_name>=<param_value>",
)
parser.add_argument(
"--queue",
action="store_true",
default=False,
help="Stage this experiment in the run queue for future execution.",
)
parser.add_argument(
"--run-all",
action="store_true",
default=False,
help="Execute all experiments in the run queue. Implies --temp.",
)
parser.add_argument(
"-j",
"--jobs",
type=int,
default=1,
help="Run the specified number of experiments at a time in parallel.",
metavar="<number>",
)
parser.add_argument(
"--temp",
action="store_true",
dest="tmp_dir",
help=(
"Run this experiment in a separate temporary directory instead of "
"your workspace."
),
)
| 9,129
|
def rewrite_return(func):
"""Rewrite ret ops to assign to a variable instead, which is returned"""
ret_normalization.run(func)
[ret] = findallops(func, 'ret')
[value] = ret.args
ret.delete()
return value
| 9,130
|
def get_loss_fn(loss_factor=1.0):
"""Gets a loss function for squad task."""
def _loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
return squad_loss_fn(
start_positions,
end_positions,
start_logits,
end_logits,
loss_factor=loss_factor)
return _loss_fn
| 9,131
|
async def run_command(*args):
"""
https://asyncio.readthedocs.io/en/latest/subprocess.html
"""
# Create subprocess
process = await asyncio.create_subprocess_exec(
*args,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
# Return stdout
return stdout.decode().strip()
| 9,132
|
def replace_config(conf_file_path, src_key, new_value):
"""Update the config file by a given dict."""
with open(conf_file_path, "a+") as fp:
content = json.load(fp)
content[src_key].update(new_value)
fp.truncate(0)
fp.write(json.dumps(content, indent=4))
| 9,133
|
def runcmd(ctx, cmdargs):
"""Execute the given command"""
utils.run_solver(cmdargs)
| 9,134
|
def get_variants_in_region(db, chrom, start, stop):
"""
Variants that overlap a region
Unclear if this will include CNVs
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}
}, projection={'_id': False}, limit=SEARCH_LIMIT))
#add_consequence_to_variants(variants)
return list(variants)
| 9,135
|
def get_business(bearer_token, business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
#4
return request(API_HOST, business_path, bearer_token)
| 9,136
|
def AlexNet_modified(input_shape=None, regularize_weight=0.0001):
"""
Alexnet convolution layers with added batch-normalization and regularization
:param input_shape:
:param regularize_weight:
:return:
"""
from keras.layers import Conv2D, Input, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras.models import Model
from keras.regularizers import l2
img_input = Input(shape=input_shape)
#Branch A (mimic the original alexnet)
x = Conv2D(48, (11, 11), strides=(4,4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
x = MaxPooling2D((3,3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((2, 2))(x)
x = Conv2D(128, (5, 5), strides=(1,1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
# Branch B (mimic the original alexnet)
y = Conv2D(48, (11, 11), strides=(4, 4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((2, 2))(y)
y = Conv2D(128, (5, 5), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = ZeroPadding2D((1, 1))(y)
out = concatenate([x,y], axis=-1)
inputs = img_input
model = Model(inputs, out, name='alexnet')
return model
| 9,137
|
def main():
""" Make all images in current directory. """
for tex_file in iglob('./*.tex'):
jobname = tex_file.split('/')[-1][:-4]
os.system(f'pdflatex --shell-escape {jobname}')
for ext in ['aux', 'log', 'pdf']:
os.system(f'rm {jobname}.{ext}')
| 9,138
|
def readblock(fileObj):
"""
parse the block of data like below
ORDINATE ERROR ABSCISSA
2.930E-06 1.8D-07 5.00E+02 X.
8.066E-06 4.8D-07 6.80E+02 .X.
1.468E-05 8.3D-07 9.24E+02 ..X.
2.204E-05 1.2D-06 1.26E+03 ...X...
"""
data = []
p = re.compile('ORDINATE')
q = re.compile('0LINEAR COEFFICIENTS')
for line in fileObj:
if q.search(line) is not None:
break
if p.search(line) is None:
dataContent = line[0:31]
dataContent = dataContent.replace('D', 'E')
datarow = list(map(float, dataContent.split()))
data.append(datarow)
return np.array(data)
| 9,139
|
def test_config_override() -> None:
"""Test config can be overriden."""
bot = Phial(
"test-token",
config={
"prefix": "/",
"registerHelpCommand": False,
"baseHelpText": "All commands:",
"autoReconnect": False,
"loopDelay": 0.5,
"hotReload": True,
"maxThreads": 1,
},
)
assert bot.config == {
"prefix": "/",
"registerHelpCommand": False,
"baseHelpText": "All commands:",
"autoReconnect": False,
"loopDelay": 0.5,
"hotReload": True,
"maxThreads": 1,
}
| 9,140
|
def test_get_svl_data_number_multi_row(test_conn):
""" Tests that get_svl_data raises an SvlNumberValueError when the query
returns multiple values.
"""
svl_plot = {
"type": "number",
"data": "bigfoot",
"value": {"field": "number"},
}
with pytest.raises(SvlNumberValueError):
get_svl_data(svl_plot, test_conn)
| 9,141
|
def as_scalar(scalar):
"""Check and return the input if it is a scalar.
If it is not scalar, raise a ValueError.
Parameters
----------
scalar : Any
the object to check
Returns
-------
float
the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar)
| 9,142
|
def evaluate_dnf( # pylint: disable=too-many-arguments,too-many-locals
num_objects: int,
num_vars: int,
nullary: np.ndarray,
unary: np.ndarray,
binary: np.ndarray,
and_kernel: np.ndarray,
or_kernel: np.ndarray,
target_arity: int,
) -> np.ndarray:
"""Evaluate given batch of interpretations."""
# nullary (B, numNullary)
# unary (B, O, numUnary)
# binary (B, O, O-1, numBinary)
# and_kernel (H, IN)
# or_kernel (H,)
# ---------------------------
# We need a binding / permutation matrix that binds every object to every
# variable, so we can evaluate the rule. The following list of tuples,
# tells us which constant each variable is for each permutation
perm_idxs = np.array(
list(itertools.permutations(range(num_objects), num_vars))
) # (K, V)
# ---
# Binary comparison indices for variables, XY XZ YX YZ ...
var_bidxs = np.stack(np.nonzero(1 - np.eye(num_vars))).T # (V*(V-1), 2)
perm_bidxs = perm_idxs[:, var_bidxs] # (K, V*(V-1), 2)
obj_idxs = np.stack(np.nonzero(1 - np.eye(num_objects))).T # (O*(O-1), 2)
# The following matrix tells with variable binding pair is actually the
# object pair we're looking for
var_obj_pairs = (perm_bidxs[..., None, :] == obj_idxs).all(-1)
# (K, V*(V-1), O*(O-1))
# We are guaranteed to have 1 matching pair due to unique bindings, so the
# non-zero elements in the last dimension encode the index we want
var_obj_pairs = np.reshape(np.nonzero(var_obj_pairs)[-1], var_obj_pairs.shape[:2])
# (K, V*(V-1))
# ---------------------------
batch_size = nullary.shape[0] # B
# Take the permutations
perm_unary = unary[:, perm_idxs] # (B, K, V, numUnary)
perm_binary = binary.reshape(
(batch_size, -1, binary.shape[-1])
) # (B, O*(O-1), numBinary)
perm_binary = perm_binary[:, var_obj_pairs] # (B, K, V*(V-1), numBinary)
perm_binary = perm_binary.reshape(
(
batch_size,
var_obj_pairs.shape[0],
num_vars,
num_vars - 1,
perm_binary.shape[-1],
)
)
# (B, K, V, V-1, numBinary)
# ---------------------------
# Merge different arities
flat_nullary = np.repeat(
nullary[:, None], perm_unary.shape[1], axis=1
) # (B, K, numNullary)
interpretation = flatten_interpretation(flat_nullary, perm_unary, perm_binary)
# (B, K, IN)
# ---------------------------
# Evaluate
and_eval = np.min(
interpretation[:, :, None] * and_kernel + (and_kernel == 0), -1
) # (B, K, H)
# ---
# Reduction of existential variables if any, K actually expands to O, O-1 etc numVars many times
# If the arity of the target predicate is 0, then we can reduce over K. If
# it is 1, then expand once then reduce over remaining variables, i.e. O, K//O, H -> (O, H)
shape_range = num_objects - np.arange(num_objects) # [numObjs, numObjs-1, ...]
new_shape = np.concatenate(
[[batch_size], shape_range[:target_arity], [-1, and_eval.shape[-1]]]
) # [B, O, K//O,, H]
and_eval = np.reshape(and_eval, new_shape)
# (B, O, K//0, H)
perm_eval = np.max(and_eval, -2) # (B, H,) if arity 0, (B, O, H) if 1 etc.
# ---
or_eval = np.max(
or_kernel * perm_eval - (or_kernel == 0), -1
) # (B,) if arity 0, (B, O) if 1 etc.
# ---------------------------
return or_eval
| 9,143
|
def black_color_func(word, font_size, position, orientation,
random_state=None, **kwargs):
"""Make word cloud black and white."""
return("hsl(0,100%, 1%)")
| 9,144
|
def setup(hass, config):
""" Setup history hooks. """
hass.http.register_path(
'GET',
re.compile(
r'/api/history/entity/(?P<entity_id>[a-zA-Z\._0-9]+)/'
r'recent_states'),
_api_last_5_states)
hass.http.register_path('GET', URL_HISTORY_PERIOD, _api_history_period)
return True
| 9,145
|
def test_rsun_missing():
"""Tests output if 'rsun' is missing"""
euvi_no_rsun = Map(fitspath)
euvi_no_rsun.meta['rsun'] = None
assert euvi_no_rsun.rsun_obs.value == sun.solar_semidiameter_angular_size(euvi.date).to('arcsec').value
| 9,146
|
def shuffle_file(filename):
"""Shuffle lines in file.
"""
sp = filename.split('/')
shuffled_filename = '/'.join(sp[:-1] + ['shuffled_{}'.format(sp[-1])])
logger.info(shuffled_filename)
os.system('shuf {} > {}'.format(filename, shuffled_filename))
return shuffled_filename
| 9,147
|
def fitallseq(digitslist, list):
"""if there is repeating digits, itertools.permutations() is still usable
if fail, still print some print, if i >= threshold, served as start point for new searching """
for p in itertools.permutations(digitslist):
#print "".join(pw)
i=0
pw="".join(p)
for seq in list:
if seqfit(seq,pw):
i=i+1
continue
else:
break
if i==nlines:
print("password sequence is found as:", pw)
return True
print("password is not found in all %d digits permutations", len(digitslist))
return False
| 9,148
|
def adduser(args):
"""Add or update a user to the database: <username> <password> [[role] [role] ...]"""
try:
username, password = args[0:2]
except (IndexError, ValueError), exc:
print >> sys.stderr, "you must include at least a username and password: %s" % exc
usage()
try:
roles = args[2:]
except IndexError:
roles = []
try:
store = _store()
user = User(username)
user.set_password(password)
for role in roles:
user.add_role(role)
store.put(user)
except Exception, exc:
print >> sys.stderr, 'unable to create or update user: %s' % exc
raise
return True
| 9,149
|
def register_celery(app: Flask):
"""Load the celery config from the app instance."""
CELERY.conf.update(app.config.get("CELERY", {}))
CELERY.flask_app = app # set flask_app attribute used by FlaskTask
app.logger.info(
f"Celery settings:\n{CELERY.conf.humanize(with_defaults=False, censored=True)}\n"
)
| 9,150
|
def test_issue_too_many(web3, issuer, issue_script_owner, customer):
"""Issue over allowance."""
with pytest.raises(TransactionFailed):
issuer.functions.issue(customer, 3000).transact({"from": issue_script_owner})
| 9,151
|
def check_linear_dependence(matrix: np.ndarray) -> bool:
"""
Functions checks by Cauchy-Schwartz inqeuality whether two matrices are linear dependent or not.
:param matrix: 2x2 matrix to be processed.
:return: Boolean.
"""
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
if i != j:
inner_product = np.inner(matrix[:, i], matrix[:, j])
norm_i = np.linalg.norm(matrix[:, i])
norm_j = np.linalg.norm(matrix[:, j])
print("I: ", matrix[:, i])
print("J: ", matrix[:, j])
print("Prod: ", inner_product)
print("Norm i: ", norm_i)
print("Norm j: ", norm_j)
if np.abs(inner_product - norm_j * norm_i) < 1e-5:
print("Dependent")
return True
else:
print("Independent")
return False
| 9,152
|
def cors_400(details: str = None) -> cors_response:
"""
Return 400 - Bad Request
"""
errors = Model400BadRequestErrors()
errors.details = details
error_object = Model400BadRequest([errors])
return cors_response(
req=request,
status_code=400,
body=json.dumps(delete_none(error_object.to_dict()), indent=_INDENT, sort_keys=True)
if _INDENT != 0 else json.dumps(delete_none(error_object.to_dict()), sort_keys=True),
x_error=details
)
| 9,153
|
def detect_label_column(column_names):
""" Detect the label column - which we display as the label for a joined column.
If a table has two columns, one of which is ID, then label_column is the other one.
"""
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
return None
| 9,154
|
def get_date():
"""
get the date
"""
date = subprocess.check_output(["date"])
date = date.decode("utf-8")
date = re.search(r"\w{3} \d{1,2} \w{3} \d{4}", date)
date = date.group(0)
return date
| 9,155
|
def sorted_files(pattern):
"""Return files matching glob pattern, *effectively* sorted by date
"""
return sort_files(glob.glob(pattern))
| 9,156
|
def random_float_tensor(seed, size, a=22695477, c=1, m=2 ** 32, requires_grad=False):
""" Generates random tensors given a seed and size
https://en.wikipedia.org/wiki/Linear_congruential_generator
X_{n + 1} = (a * X_n + c) % m
Using Borland C/C++ values
The tensor will have values between [0,1)
Inputs:
seed (int): an int
size (Tuple[int]): the size of the output tensor
a (int): the multiplier constant to the generator
c (int): the additive constant to the generator
m (int): the modulus constant to the generator
"""
num_elements = 1
for s in size:
num_elements *= s
arr = [(a * seed + c) % m]
for i in range(num_elements - 1):
arr.append((a * arr[i] + c) % m)
return torch.tensor(arr, requires_grad=requires_grad).float().view(size) / m
| 9,157
|
def test_isel_xarray_func(hind_ds_initialized_1d, reconstruction_ds_1d):
"""Test whether applying isel to the objects works."""
hindcast = HindcastEnsemble(hind_ds_initialized_1d)
hindcast = hindcast.add_observations(reconstruction_ds_1d)
hindcast = hindcast.isel(lead=0, init=slice(0, 3)).isel(time=slice(5, 10))
assert hindcast.get_initialized().init.size == 3
assert hindcast.get_initialized().lead.size == 1
assert hindcast.get_observations().time.size == 5
| 9,158
|
def derivable_rng(spec, *, legacy=False):
"""
Get a derivable RNG, for use cases where the code needs to be able to reproducibly derive
sub-RNGs for different keys, such as user IDs.
Args:
spec:
Any value supported by the `seed` parameter of :func:`seedbank.numpy_rng`, in addition
to the following values:
* the string ``'user'``
* a tuple of the form (``seed``, ``'user'``)
Either of these forms will cause the returned function to re-derive new RNGs.
Returns:
function:
A function taking one (or more) key values, like :func:`derive_seed`, and
returning a random number generator (the type of which is determined by
the ``legacy`` parameter).
"""
if spec == 'user':
return DerivingRNG(derive_seed(), legacy)
elif isinstance(spec, tuple):
seed, key = spec
if key != 'user':
raise ValueError('unrecognized key %s', key)
return DerivingRNG(seed, legacy)
else:
return FixedRNG(rng(spec, legacy=legacy))
| 9,159
|
def main() -> None:
"""Function to start the whole application."""
configure_logger()
log: Logger = getLogger(__name__)
log.info("Attempting to start application")
app = get_flask_application()
app.run(
host=app.config["PIPWATCH_API_HOST"],
port=app.config["PIPWATCH_API_PORT"]
)
log.info("Application started")
sys.exit(0)
| 9,160
|
def gsort_vcf(f, out_file, genome_file='mitylib/reference/b37d5.genome', remove_unsorted_vcf=False):
"""
use gsort to sort the records in a VCF file according to a .genome file.
:param f: the path to an unsorted vcf.gz file
:param out_file: the path to a resulting sorted vcf.gz file
:param genome_file: the .genome file corresponding to the reference genome. see https://github.com/brentp/gsort
:param remove_unsorted_vcf: if True, then the input file 'f' will be deleted.
:return: nothing
"""
logging.debug("Sorting, bgzipping {} -> {}".format(f, out_file))
logging.debug("gsort is using genome file " + genome_file)
gsort_cmd = "gsort {} {} | bgzip -cf > {}".format(f, genome_file, out_file)
logging.debug(gsort_cmd)
subprocess.run(gsort_cmd, shell=True)
logging.debug("Tabix indexing {}".format(out_file))
tabix(out_file)
if remove_unsorted_vcf:
os.remove(f)
| 9,161
|
def table(custom_headings, col_headings_formatted, rows, spec):
"""
Create a LaTeX table
Parameters
----------
custom_headings : None, dict
optional dictionary of custom table headings
col_headings_formatted : list
formatted column headings
rows : list of lists of cell-strings
Data in the table, pre-formatted
spec : dict
options for the formatter
Returns
-------
dict : contains key 'latex', which corresponds to a latex string representing the table
"""
longtables = spec['longtables']
table = "longtable" if longtables else "tabular"
if custom_headings is not None \
and "latex" in custom_headings:
latex = custom_headings['latex']
else:
latex = "\\begin{%s}[l]{%s}\n\hline\n" % \
(table, "|c" * len(col_headings_formatted) + "|")
latex += ("%s \\\\ \hline\n"
% (" & ".join(col_headings_formatted)))
for formatted_rowData in rows:
if len(formatted_rowData) > 0:
formatted_rowData_latex = [
(formatted_cell['latex'] if isinstance(formatted_cell, dict)
else formatted_cell) for formatted_cell in formatted_rowData]
latex += " & ".join(formatted_rowData_latex)
#MULTI-ROW support for *data* (non-col-header) rows of table. Currently
# unused (unneeded) - see multirow formatter that is commented out in formatters.py
#multirows = [ ("multirow" in el) for el in formatted_rowData_latex ]
#if any(multirows):
# latex += " \\\\ "
# last = True; lineStart = None; col = 1
# for multi,data in zip(multirows,formatted_rowData_latex):
# if last == True and multi == False:
# lineStart = col #line start
# elif last == False and multi == True:
# latex += "\cline{%d-%d} " % (lineStart,col) #line end
# last=multi
# res = _re.search("multicolumn{([0-9])}",data)
# if res: col += int(res.group(1))
# else: col += 1
# if last == False: #need to end last line
# latex += "\cline{%d-%d} "%(lineStart,col-1)
# latex += "\n"
#else:
latex += " \\\\ \hline\n"
latex += "\end{%s}\n" % table
return {'latex': latex}
| 9,162
|
def send_async_email(msg, html, text, attach=None, send_independently=True, ctype="other"):
"""
发送email
:param subject:
:param recipients:数组
:param text:
:param html:
:param attach:(<filename>,<content_type>)
:param send_independently:如果为True, 独立给recipients中的每个地址发送信息,
否则,一次发送, 收件人能看到其他收件人的邮箱
:return:
"""
# 检测插件
data = plugin_manager.call_plug(hook_name="send_email",
send_independently=send_independently,
msg=msg,
html=html,
text=text,
attach=attach)
if data == "__no_plugin__":
with app.app_context():
msg_obj = Message(
subject=msg["subject"],
html=html
)
if send_independently:
# 独立发送, 先连接好邮件服务器
with mail.connect() as conn:
for recipient in msg["recipients"]:
msg_obj.recipients = [recipient]
status, result_msg = send_email_process(msg_obj, conn)
else:
msg_obj.recipients = msg["recipients"]
status, result_msg = send_email_process(msg_obj)
log = {
"type": "email",
"error_info": result_msg,
'status': status,
'subject': msg_obj.subject,
'from': msg_obj.sender,
'to': list(msg["recipients"]),
'date': msg_obj.date,
'body': msg_obj.body,
'html': msg_obj.html,
'msgid': msg_obj.msgId,
'time': time.time(),
'msg_type': ctype
}
mdbs["sys"].db.sys_message.insert_one(log)
| 9,163
|
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay, 'rb')
try: recorded = pickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
| 9,164
|
def skipIfNoDB(test):
"""Decorate a test to skip if DB ``session`` is ``None``."""
@wraps(test)
def wrapper(self, db, *args, **kwargs):
if db.session is None:
pytest.skip('Skip because no DB.')
else:
return test(self, db, *args, **kwargs)
return wrapper
| 9,165
|
def rboxes2quads_numpy(rboxes):
"""
:param rboxes: ndarray, shape = (*, h, w, 5=(4=(t,r,b,l) + 1=angle))
Note that angle is between [-pi/4, pi/4)
:return: quads: ndarray, shape = (*, h, w, 8=(x1, y1,... clockwise order from top-left))
"""
# dists, shape = (*, h, w, 4=(t,r,b,l))
# angles, shape = (*, h, w)
h, w, _ = rboxes.shape[-3:]
dists, angles = rboxes[..., :4], rboxes[..., 4]
# shape = (*, h, w, 5=(t,r,b,l,offset), 2=(x,y))
pts = np.zeros(list(dists.shape[:-1]) + [5, 2], dtype=np.float32)
# assign pts for angle >= 0
dists_pos = dists[angles >= 0]
if dists_pos.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_pos, axis=-1)
shape = tops.shape
pts[angles >= 0] = np.moveaxis(np.array([[np.zeros(shape), -(tops+bottoms)],
[lefts+rights, -(tops+bottoms)],
[lefts+rights, np.zeros(shape)],
[np.zeros(shape), np.zeros(shape)],
[lefts, -bottoms]]), [0, 1], [-2, -1])
# assign pts for angle < 0
dists_neg = dists[angles < 0]
if dists_neg.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_neg, axis=-1)
shape = tops.shape
pts[angles < 0] = np.moveaxis(np.array([[-(lefts+rights), -(tops+bottoms)],
[np.zeros(shape), -(tops+bottoms)],
[np.zeros(shape), np.zeros(shape)],
[-(lefts+rights), np.zeros(shape)],
[-rights, -bottoms]]), [0, 1], [-2, -1])
# note that rotate clockwise is positive, otherwise, negative
angles *= -1
# rotate
# shape = (*, h, w, 2, 2)
R = np.moveaxis(np.array([[np.cos(angles), -np.sin(angles)],
[np.sin(angles), np.cos(angles)]]), [0, 1], [-2, -1])
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
pts = np.swapaxes(pts, -1, -2)
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
rotated_pts = R @ pts
# quads, shape = (*, h, w, 2=(x, y), 4=(t,r,b,l))
# offsets, shape = (*, h, w, 2=(x, y), 1=(offset))
quads, offsets = rotated_pts[..., :4], rotated_pts[..., 4:5]
# align
widths, heights = np.meshgrid(np.arange(w), np.arange(h))
# shape = (h, w, 2)
origins = np.concatenate((np.expand_dims(widths, -1), np.expand_dims(heights, -1)), axis=-1)
# shape = (*, h, w, 2=(x,y), 1)
origins = np.expand_dims(origins, axis=tuple(i for i in range(-1, rboxes.ndim - 3)))
quads += origins - offsets
quads[..., 0, :] = np.clip(quads[..., 0, :], 0, w)
quads[..., 1, :] = np.clip(quads[..., 1, :], 0, h)
# reshape
quads = np.swapaxes(quads, -1, -2).reshape(list(rboxes.shape[:-1]) + [8])
return quads
| 9,166
|
def _async_register_services(
hass: HomeAssistant,
coordinator: NZBGetDataUpdateCoordinator,
) -> None:
"""Register integration-level services."""
def pause(call: ServiceCall) -> None:
"""Service call to pause downloads in NZBGet."""
coordinator.nzbget.pausedownload()
def resume(call: ServiceCall) -> None:
"""Service call to resume downloads in NZBGet."""
coordinator.nzbget.resumedownload()
def set_speed(call: ServiceCall) -> None:
"""Service call to rate limit speeds in NZBGet."""
coordinator.nzbget.rate(call.data[ATTR_SPEED])
hass.services.async_register(DOMAIN, SERVICE_PAUSE, pause, schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_RESUME, resume, schema=vol.Schema({}))
hass.services.async_register(
DOMAIN, SERVICE_SET_SPEED, set_speed, schema=SPEED_LIMIT_SCHEMA
)
| 9,167
|
def cleanup(f):
"""
Remove dir if exists
:param f: Dir to remove
:type f: str
"""
if os.path.exists(f):
shutil.rmtree(f)
| 9,168
|
def repr_values(condition: Callable[..., bool], lambda_inspection: Optional[ConditionLambdaInspection],
resolved_kwargs: Mapping[str, Any], a_repr: reprlib.Repr) -> List[str]:
"""
Represent function arguments and frame values in the error message on contract breach.
:param condition: condition function of the contract
:param lambda_inspection:
inspected lambda AST node corresponding to the condition function (None if the condition was not given as a
lambda function)
:param resolved_kwargs: arguments put in the function call
:param a_repr: representation instance that defines how the values are represented.
:return: list of value representations
"""
# Hide _ARGS and _KWARGS if they are not part of the condition for better readability
if '_ARGS' in resolved_kwargs or '_KWARGS' in resolved_kwargs:
parameters = inspect.signature(condition).parameters
malleable_kwargs = cast(
MutableMapping[str, Any],
resolved_kwargs.copy() # type: ignore
)
if '_ARGS' not in parameters:
malleable_kwargs.pop('_ARGS', None)
if '_KWARGS' not in parameters:
malleable_kwargs.pop('_KWARGS', None)
selected_kwargs = cast(Mapping[str, Any], malleable_kwargs)
else:
selected_kwargs = resolved_kwargs
# Don't use ``resolved_kwargs`` from this point on.
# ``selected_kwargs`` is meant to be used instead for better readability of error messages.
if is_lambda(a_function=condition):
assert lambda_inspection is not None, "Expected a lambda inspection when given a condition as a lambda function"
else:
assert lambda_inspection is None, "Expected no lambda inspection in a condition given as a non-lambda function"
reprs = None # type: Optional[MutableMapping[str, Any]]
if lambda_inspection is not None:
variable_lookup = collect_variable_lookup(condition=condition, resolved_kwargs=selected_kwargs)
recompute_visitor = icontract._recompute.Visitor(variable_lookup=variable_lookup)
recompute_visitor.visit(node=lambda_inspection.node.body)
recomputed_values = recompute_visitor.recomputed_values
repr_visitor = Visitor(
recomputed_values=recomputed_values, variable_lookup=variable_lookup, atok=lambda_inspection.atok)
repr_visitor.visit(node=lambda_inspection.node.body)
reprs = repr_visitor.reprs
# Add original arguments from the call unless they shadow a variable in the re-computation.
#
# The condition arguments are often not sufficient to figure out the error. The user usually needs
# more context which is captured in the remainder of the call arguments.
if reprs is None:
reprs = dict()
for key in sorted(selected_kwargs.keys()):
val = selected_kwargs[key]
if key not in reprs and _representable(value=val):
reprs[key] = val
parts = [] # type: List[str]
# We need to sort in order to present the same violation error on repeated violations.
# Otherwise, the order of the reported arguments may be arbitrary.
for key in sorted(reprs.keys()):
value = reprs[key]
if isinstance(value, icontract._recompute.FirstExceptionInAll):
writing = ['{} was False, e.g., with'.format(key)]
for input_name, input_value in value.inputs:
writing.append('\n')
writing.append(' {} = {}'.format(input_name, a_repr.repr(input_value)))
parts.append(''.join(writing))
else:
parts.append('{} was {}'.format(key, a_repr.repr(value)))
return parts
| 9,169
|
def human_permissions(permissions, short=False):
"""Get permissions in readable form.
"""
try:
permissions = int(permissions)
except ValueError:
return None
if permissions > sum(PERMISSIONS.values()) or permissions < min(
PERMISSIONS.values()
):
return ""
rez = []
for k, v in PERMISSIONS.items():
if permissions & v == v:
rez.append(k)
if short:
return "".join(((x.split("_")[1][:1]).lower() for x in rez))
else:
return " | ".join(rez)
| 9,170
|
def predict():
"""
Prediction end point
Post a JSON holding the features and expect a prediction
Returns
-------
JSON
The field `predictions` will hold a list of 0 and 1's corresponding
to the predictions.
"""
logger.info('Starting prediction')
json_ = request.get_json()
query_df = pd.DataFrame(json_)
query = tm.prepare_data(query_df, train=False)
prediction = clf.predict(query)
prediction = [int(x) for x in prediction]
logger.info("Prediction is ready")
return jsonify({'prediction': prediction})
| 9,171
|
def run_program(program, cmdargs, stdin_f, stdout_f, stderr_f,
run=True, cmd_prepend="", run_from_cmd=True,
**kwargs):
"""Runs `program` with `cmdargs` using `subprocess.call`.
:param str stdin_f: File from which to take standard input
:param str stdout_f: File in which to put standard output
:param str stderr_f: File in which to put standard error
:param bool run: Whether to actually run `program`
If `True` the program return code is returned.
If false a string pointing to the script which will run
the program is returned
:param str cmd_prepend: Put in the beginning of the bash script
:param bool run_from_cmd: Run `program` using the generated bash
script instead of running it directly
"""
time_file_name = '.'.join(stdout_f.split('.')[:-1])+'.time'
cmd_file_name = '.'.join(stdout_f.split('.')[:-1])+'.sh'
with open(cmd_file_name, 'w') as cmd_file:
cmd = ' '.join([program]+cmdargs)
time_cmd = "/usr/bin/time -o {time_file}".format(time_file=time_file_name)
cmd = "{time_cmd} {cmd} 1> {stdout} 2> {stderr} \n".format(time_cmd=time_cmd,
cmd=cmd,
stdout=stdout_f,
stderr=stderr_f)
cmd = cmd_prepend + cmd
cmd_file.write(cmd)
if run:
with OpenWithNone(stdin_f, 'r') as input_file, open(stdout_f, 'w') as stdout_file, open(stderr_f, 'w') as stderr_file:
if run_from_cmd:
retcode = call(["bash", cmd_file_name], **kwargs)
else:
try:
with open(time_file_name, 'w') as time_file:
with print_time(time_file):
retcode = call([program]+cmdargs, stdin=input_file,
stdout=stdout_file, stderr=stderr_file, **kwargs)
except Exception as e:
print(e)
print('program ', program)
print('cmdargs', cmdargs)
print('stdin ', stdin_f)
print('stdout ', stdout_f)
print('stderr ', stderr_f)
# print 'kwargs ', kwargs
print(getcwd())
raise
replace_string_in_file(stdout_f, '\r', '\n')
return retcode
else:
return cmd_file_name
| 9,172
|
def test_invalid_login(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/login' page is posted to with invalid credentials (POST)
THEN check an error message is returned to the user
"""
response = test_client.post('/login',
data=dict(email='bernard@gmail.com',
password='badPa33w04d'),
follow_redirects=True)
assert response.status_code == 200
assert b"ERROR! Incorrect login credentials." in response.data
assert b"Feature Request App" in response.data
assert b"Logout" not in response.data
assert b"Login" in response.data
assert b"Register" in response.data
| 9,173
|
def second_step_red(x: np.array, y: np.array, z: np.array,
px: np.array, py: np.array, pz: np.array,
Fx: np.array, Fy: np.array, Fz: np.array,
z_start: float, z_stop: float) -> (np.array, np.array, np.array,
np.array, np.array, np.array):
""" Second step for Relativictic Difference Scheme
"""
n = int(len(x))
for i in prange(n):
if z[i] >= z_start and z[i] <= z_stop:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vx = px[i]/gamma
vy = py[i]/gamma
vz = pz[i]/gamma
b2 = 1 + Fx[i]**2 + Fy[i]**2 + Fz[i]**2
b1 = 2 - b2
b3 = 2 * (vx*Fx[i] + vy*Fy[i] + vz*Fz[i])
fx = 2 * (vy*Fz[i] - vz*Fy[i])
fy = 2 * (vz*Fx[i] - vx*Fz[i])
fz = 2 * (vx*Fy[i] - vy*Fx[i])
vx = (vx*b1 + fx + Fx[i]*b3)/b2
vy = (vy*b1 + fy + Fy[i]*b3)/b2
vz = (vz*b1 + fz + Fz[i]*b3)/b2
x[i] += vx
y[i] += vy
z[i] += vz
px[i] = vx*gamma
py[i] = vy*gamma
pz[i] = vz*gamma
else:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vz = pz[i]/gamma
z[i] += vz
return x, y, z, px, py, pz
| 9,174
|
def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
"""Drop a point from the tabu search list."""
if len(tabulist) < tabulistsize:
return tabulist
if tabustrategy == 'oldest':
tabulist.pop(0)
else:
distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1))
index = np.argmax(distance)
tabulist.pop(index)
return tabulist
| 9,175
|
def get_atom_feature_dims(list_acquired_feature_names):
""" tbd
"""
return list(map(len, [CompoundKit.atom_vocab_dict[name] for name in list_acquired_feature_names]))
| 9,176
|
def parse_selector(selector):
"""Parses a block of selectors like div .name #tag to class=.name, selector=div and id=#tag.
Returns (selector, id, class[]) """
m_class, m_id, m_selector, m_attr = [], None, None, {}
if selector is not None and type(selector) == str:
selector_labels = selector.split()
for label in selector_labels:
if label.startswith("."):
m_class.append(label)
elif label.startswith("#"):
if m_id is not None:
raise ValueError("Multiple id's are declared in block "+str(selector))
m_id = label
elif label.startswith("@@"):
attribute_block = str(label).split('=')
if len(attribute_block) < 2:
raise ValueError('Attribute does not match the \
format @@<attribute_name>=<attribute_value> without space')
attr = attribute_block[0]
value = attribute_block[1]
mattr[attr] = value
else:
if m_selector is not None:
raise ValueError("Multiple selectors are declared in block "+str(selector))
m_selector = label
if mattr and not m_selector:
raise AssertionError('If selection is done with attribute @@<attr_name>=<attr_value>,\
then it is must to have selector.\n Eg: <selector> @@<attr_name>=<attr_value>"')
return m_selector, m_id, m_class, mattr
| 9,177
|
def is_batch_enabled(release_id):
"""
Check whether batching is enabled for a release.
"""
details = get_release_details_by_id(release_id)
return details['data']['attributes']['enable_batching']
| 9,178
|
def create(tiles):
"""Handler."""
with futures.ThreadPoolExecutor(max_workers=8) as executor:
responses = executor.map(worker, tiles)
with contextlib.ExitStack() as stack:
sources = [
stack.enter_context(rasterio.open(tile)) for tile in responses if tile
]
dest, output_transform = merge(sources, nodata=-32767)
meta = {
"driver": "GTiff",
"count": 1,
"dtype": np.int16,
"nodata": -32767,
"height": dest.shape[1],
"width": dest.shape[2],
"compress": "DEFLATE",
"crs": "epsg:4326",
"transform": output_transform,
}
memfile = MemoryFile()
with memfile.open(**meta) as dataset:
dataset.write(dest)
return memfile
| 9,179
|
def write_rxn_rates(path, lang, specs, reacs, fwd_rxn_mapping):
"""Write reaction rate subroutine.
Includes conditionals for reversible reactions.
Parameters
----------
path : str
Path to build directory for file.
lang : {'c', 'cuda', 'fortran', 'matlab'}
Programming language.
specs : list of SpecInfo
List of species in the mechanism.
reacs : list of ReacInfo
List of reactions in the mechanism.
fwd_rxn_mapping : List of integers
The index of the reaction in the original mechanism
Returns
_______
None
"""
num_s = len(specs)
num_r = len(reacs)
rev_reacs = [i for i, rxn in enumerate(reacs) if rxn.rev]
num_rev = len(rev_reacs)
pdep_reacs = [i for i, rxn in enumerate(reacs) if rxn.thd_body or rxn.pdep]
pre = '__device__ ' if lang == 'cuda' else ''
filename = 'rates' + utils.header_ext[lang]
with open(os.path.join(path, filename), 'w') as file:
file.write(
'#ifndef RATES_HEAD\n'
'#define RATES_HEAD\n'
'\n'
'#include "header{}"\n'.format(utils.header_ext[lang]) +
'\n'
'{0}void eval_rxn_rates (const double,'
' const double, const double*, double*, double*);\n'
'{0}void eval_spec_rates (const double*,'
' const double*, const double*, double*, double*);\n'.format(pre)
)
if pdep_reacs:
file.write('{}void get_rxn_pres_mod (const double, const '
'double, const double*, double*);\n'.format(pre)
)
file.write('\n'
'#endif\n'
)
filename = 'rxn_rates' + utils.file_ext[lang]
with open(os.path.join(path, filename), 'w') as file:
line = ''
if lang == 'cuda': line = '__device__ '
if lang in ['c', 'cuda']:
file.write('#include "rates' + utils.header_ext[lang] + '"\n')
line += ('void eval_rxn_rates (const double T, const double pres,'
' const double * C, double * fwd_rxn_rates, '
'double * rev_rxn_rates) {\n'
)
elif lang == 'fortran':
line += ('subroutine eval_rxn_rates(T, pres, C, fwd_rxn_rates,'
' rev_rxn_rates)\n\n'
)
# fortran needs type declarations
line += (' implicit none\n'
' double precision, intent(in) :: '
'T, pres, C({})\n'.format(num_s)
)
line += (' double precision, intent(out) :: '
'fwd_rxn_rates({}), '.format(num_r) +
'rev_rxn_rates({})\n'.format(num_rev)
)
line += (' \n'
' double precision :: logT\n'
)
kf_flag = True
if rev_reacs and any([not r.rev_par for r in reacs]):
line += ' double precision :: kf, Kc\n'
kf_flag = False
if any([rxn.cheb for rxn in reacs]):
if kf_flag:
line += ' double precision :: kf, Tred, Pred\n'
kf_flag = False
else:
line += ' double precision :: Tred, Pred\n'
if any([rxn.plog for rxn in reacs]):
if kf_flag:
line += ' double precision :: kf, kf2\n'
kf_flag = False
else:
line += ' double precision :: kf2\n'
line += '\n'
elif lang == 'matlab':
line += ('function [fwd_rxn_rates, rev_rxn_rates] = '
'eval_rxn_rates (T, pres, C)\n\n'
' fwd_rxn_rates = zeros({},1);\n'.format(num_r) +
' rev_rxn_rates = fwd_rxn_rates;\n'
)
file.write(line)
get_array = utils.get_array
pre = ' '
if lang == 'c':
pre += 'double '
elif lang == 'cuda':
pre += 'register double '
line = (pre + 'logT = log(T)' +
utils.line_end[lang]
)
file.write(line)
file.write('\n')
kf_flag = True
if rev_reacs and any([not r.rev_par for r in reacs]):
kf_flag = False
if lang == 'c':
file.write(' double kf;\n'
' double Kc;\n'
)
elif lang == 'cuda':
file.write(' register double kf;\n'
' register double Kc;\n'
)
if any([rxn.cheb for rxn in reacs]):
# Other variables needed for Chebyshev
if lang == 'c':
if kf_flag:
file.write(' double kf;\n')
kf_flag = False
file.write(' double Tred;\n'
' double Pred;\n')
file.write(utils.line_start +
'double cheb_temp_0, cheb_temp_1' +
utils.line_end[lang]
)
dim = max(rxn.cheb_n_temp for rxn in reacs if rxn.cheb)
file.write(utils.line_start +
'double dot_prod[{}]'.format(dim) +
utils.line_end[lang]
)
elif lang == 'cuda':
if kf_flag:
file.write(' register double kf;\n')
kf_flag = False
file.write(' register double Tred;\n'
' register double Pred;\n')
file.write(utils.line_start +
'double cheb_temp_0, cheb_temp_1' +
utils.line_end[lang]
)
dim = max(rxn.cheb_n_temp for rxn in reacs if rxn.cheb)
file.write(utils.line_start +
'double dot_prod[{}]'.format(dim) +
utils.line_end[lang]
)
if any([rxn.plog for rxn in reacs]):
# Variables needed for Plog
if lang == 'c':
if kf_flag:
file.write(' double kf;\n')
file.write(' double kf2;\n')
if lang == 'cuda':
if kf_flag:
file.write(' register double kf;\n')
file.write(' register double kf2;\n')
file.write('\n')
def __get_arrays(sp, factor=1.0):
# put together all our coeffs
lo_array = [nu * factor] + [
sp.lo[6], sp.lo[0], sp.lo[0] - 1.0, sp.lo[1] / 2.0,
sp.lo[2] / 6.0, sp.lo[3] / 12.0, sp.lo[4] / 20.0,
sp.lo[5]
]
lo_array = [x * lo_array[0] for x in
[lo_array[1] - lo_array[2]] + lo_array[3:]
]
hi_array = [nu * factor] + [
sp.hi[6], sp.hi[0], sp.hi[0] - 1.0, sp.hi[1] / 2.0,
sp.hi[2] / 6.0, sp.hi[3] / 12.0, sp.hi[4] / 20.0,
sp.hi[5]
]
hi_array = [x * hi_array[0] for x in
[hi_array[1] - hi_array[2]] + hi_array[3:]
]
return lo_array, hi_array
for i_rxn in range(len(reacs)):
file.write(utils.line_start + utils.comment[lang] +
'rxn {}'.format(fwd_rxn_mapping[i_rxn]) + '\n')
rxn = reacs[i_rxn]
# if reversible, save forward rate constant for use
if rxn.rev and not rxn.rev_par and not (rxn.cheb or rxn.plog):
line = (' kf = ' + rxn_rate_const(rxn.A, rxn.b, rxn.E) +
utils.line_end[lang]
)
file.write(line)
elif rxn.cheb:
file.write(get_cheb_rate(lang, rxn))
elif rxn.plog:
# Special forward rate evaluation for Plog reacions
vals = rxn.plog_par[0]
file.write(' if (pres <= {:.4e}) {{\n'.format(vals[0]) +
' kf = ' +
rxn_rate_const(vals[1], vals[2], vals[3]) +
utils.line_end[lang]
)
for idx, vals in enumerate(rxn.plog_par[:-1]):
vals2 = rxn.plog_par[idx + 1]
line = (' }} else if ((pres > {:.4e}) '.format(vals[0]) +
'&& (pres <= {:.4e})) {{\n'.format(vals2[0]))
file.write(line)
line = (' kf = log(' +
rxn_rate_const(vals[1], vals[2], vals[3]) + ')'
)
file.write(line + utils.line_end[lang])
line = (' kf2 = log(' +
rxn_rate_const(vals2[1], vals2[2], vals2[3]) + ')'
)
file.write(line + utils.line_end[lang])
pres_log_diff = math.log(vals2[0]) - math.log(vals[0])
line = (' kf = exp(kf + (kf2 - kf) * (log(pres) - ' +
'{:.16e}) / '.format(math.log(vals[0])) +
'{:.16e})'.format(pres_log_diff)
)
file.write(line + utils.line_end[lang])
vals = rxn.plog_par[-1]
file.write(
' }} else if (pres > {:.4e}) {{\n'.format(vals[0]) +
' kf = ' +
rxn_rate_const(vals[1], vals[2], vals[3]) +
utils.line_end[lang] +
' }\n'
)
line = ' ' + get_array(lang, 'fwd_rxn_rates', i_rxn) + ' = '
# reactants
for i, isp in enumerate(rxn.reac):
nu = rxn.reac_nu[i]
# check if stoichiometric coefficient is double or integer
if utils.is_integer(nu):
# integer, so just use multiplication
for i in range(int(nu)):
line += '' + get_array(lang, 'C', isp) + ' * '
else:
line += ('pow(' + get_array(lang, 'C', isp) +
', {}) *'.format(nu)
)
# Rate constant: print if not reversible, or reversible but
# with explicit reverse parameters.
if (rxn.rev and not rxn.rev_par) or rxn.plog or rxn.cheb:
line += 'kf'
else:
line += rxn_rate_const(rxn.A, rxn.b, rxn.E)
line += utils.line_end[lang]
file.write(line)
if rxn.rev:
if not rxn.rev_par:
# line = ' Kc = 0.0' + utils.line_end[lang]
# file.write(line)
# sum of stoichiometric coefficients
sum_nu = 0
coeffs = {}
# go through product species
for isp, prod_sp in enumerate(rxn.prod):
# check if species also in reactants
if prod_sp in rxn.reac:
isp2 = rxn.reac.index(prod_sp)
nu = rxn.prod_nu[isp] - rxn.reac_nu[isp2]
else:
nu = rxn.prod_nu[isp]
# Skip species with zero overall
# stoichiometric coefficient.
if (nu == 0):
continue
sum_nu += nu
# get species object
sp = specs[prod_sp]
if not sp:
print('Error: species ' + prod_sp + ' in reaction '
'{} not found.\n'.format(i_rxn)
)
sys.exit()
lo_array, hi_array = __get_arrays(sp)
if not sp.Trange[1] in coeffs:
coeffs[sp.Trange[1]] = lo_array, hi_array
else:
coeffs[sp.Trange[1]] = [
lo_array[i] + coeffs[sp.Trange[1]][0][i]
for i in range(len(lo_array))
], [
hi_array[i] + coeffs[sp.Trange[1]][1][i]
for i in range(len(hi_array))
]
# now loop through reactants
for isp, reac_sp in enumerate(rxn.reac):
# Check if species also in products;
# if so, already considered).
if reac_sp in rxn.prod: continue
nu = rxn.reac_nu[isp]
sum_nu -= nu
# get species object
sp = specs[reac_sp]
if not sp:
print('Error: species ' + reac_sp + ' in reaction '
'{} not found.\n'.format(i_rxn)
)
sys.exit()
lo_array, hi_array = __get_arrays(sp, factor=-1.0)
if not sp.Trange[1] in coeffs:
coeffs[sp.Trange[1]] = lo_array, hi_array
else:
coeffs[sp.Trange[1]] = [
lo_array[i] +
coeffs[sp.Trange[1]][0][i]
for i in range(len(lo_array))
], [hi_array[i] +
coeffs[sp.Trange[1]][1][i]
for i in range(len(hi_array))
]
isFirst = True
for T_mid in coeffs:
# need temperature conditional for equilibrium constants
line = ' if (T <= {:})'.format(T_mid)
if lang in ['c', 'cuda']:
line += ' {\n'
elif lang == 'fortran':
line += ' then\n'
elif lang == 'matlab':
line += '\n'
file.write(line)
lo_array, hi_array = coeffs[T_mid]
if isFirst:
line = ' Kc = '
else:
if lang in ['cuda', 'c']:
line = ' Kc += '
else:
line = ' Kc = Kc + '
line += ('({:.16e} + '.format(lo_array[0]) +
'{:.16e} * '.format(lo_array[1]) +
'logT + T * ('
'{:.16e} + T * ('.format(lo_array[2]) +
'{:.16e} + T * ('.format(lo_array[3]) +
'{:.16e} + '.format(lo_array[4]) +
'{:.16e} * T))) - '.format(lo_array[5]) +
'{:.16e} / T)'.format(lo_array[6]) +
utils.line_end[lang]
)
file.write(line)
if lang in ['c', 'cuda']:
file.write(' } else {\n')
elif lang in ['fortran', 'matlab']:
file.write(' else\n')
if isFirst:
line = ' Kc = '
else:
if lang in ['cuda', 'c']:
line = ' Kc += '
else:
line = ' Kc = Kc + '
line += ('({:.16e} + '.format(hi_array[0]) +
'{:.16e} * '.format(hi_array[1]) +
'logT + T * ('
'{:.16e} + T * ('.format(hi_array[2]) +
'{:.16e} + T * ('.format(hi_array[3]) +
'{:.16e} + '.format(hi_array[4]) +
'{:.16e} * T))) - '.format(hi_array[5]) +
'{:.16e} / T)'.format(hi_array[6]) +
utils.line_end[lang]
)
file.write(line)
if lang in ['c', 'cuda']:
file.write(' }\n\n')
elif lang == 'fortran':
file.write(' end if\n\n')
elif lang == 'matlab':
file.write(' end\n\n')
isFirst = False
line = (' Kc = '
'{:.16e}'.format((chem.PA / chem.RU) ** sum_nu) +
' * exp(Kc)' +
utils.line_end[lang]
)
file.write(line)
line = ' ' + get_array(lang, 'rev_rxn_rates',
rev_reacs.index(i_rxn)
) + ' = '
# reactants (products from forward reaction)
for isp in rxn.prod:
nu = rxn.prod_nu[rxn.prod.index(isp)]
# check if stoichiometric coefficient is double or integer
if utils.is_integer(nu):
# integer, so just use multiplication
for i in range(int(nu)):
line += '' + get_array(lang, 'C', isp) + ' * '
else:
line += ('pow(' + get_array(lang, 'C', isp) +
', {}) * '.format(nu)
)
# rate constant
if rxn.rev_par:
# explicit reverse Arrhenius parameters
line += rxn_rate_const(rxn.rev_par[0],
rxn.rev_par[1],
rxn.rev_par[2]
)
else:
# use equilibrium constant
line += 'kf / Kc'
line += utils.line_end[lang]
file.write(line)
file.write('\n')
if lang in ['c', 'cuda']:
file.write('} // end eval_rxn_rates\n\n')
elif lang == 'fortran':
file.write('end subroutine eval_rxn_rates\n\n')
elif lang == 'matlab':
file.write('end\n\n')
return
| 9,180
|
def executeMouseEvent(flags, x, y, data=0):
"""
Mouse events generated with this rapper for L{winUser.mouse_event}
will be ignored by NVDA.
Consult https://docs.microsoft.com/en-us/windows/desktop/api/winuser/nf-winuser-mouse_event
for detailed parameter documentation.
@param flags: Controls various aspects of mouse motion and button clicking.
The supplied value should be one or a combination of the C{winUser.MOUSEEVENTF_*} constants.
@type flags: int
@param x: The mouse's absolute position along the x-axis
or its amount of motion since the last mouse event was generated.
@type x: int
@param y: The mouse's absolute position along the y-axis
or its amount of motion since the last mouse event was generated.
@type y: int
@param data: Additional data depending on what flags are specified.
This defaults to 0.
@type data: int
"""
with ignoreInjection():
winUser.mouse_event(flags, x, y, data, None)
| 9,181
|
def _rfftn_empty_aligned(shape, axes, dtype, order='C', n=None):
"""Patched version of :func:`sporco.fft.rfftn_empty_aligned`.
"""
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order)
| 9,182
|
def createErrLog(contents, path="."):
"""Writes an errors.log file with the formatting characters removed.
Args:
contents (str): The stuff to write to the log file.
path (str): Path to write errors.log (by default CWD).
Returns:
Nothing.
"""
fd = open(path + "/errors.log", "w")
toWrite = bcolors.stripFormatting(contents)
fd.write(toWrite)
fd.close()
| 9,183
|
def get_test_config():
"""
Returns a basic FedexConfig to test with.
"""
# Test server (Enter your credentials here)
return FedexConfig(key='xxxxxxxxxxxxxxxxx',
password='xxxxxxxxxxxxxxxxxxxxxxxxx',
account_number='xxxxxxxxx',
meter_number='xxxxxxxxxx',
use_test_server=True)
| 9,184
|
def write_data(movies, user, data_format='json'):
"""
"""
assert movies, 'no data to write'
date = datetime.now().strftime('%Y%m%d')
movies_clean = itertools.chain.from_iterable((json.loads(el) for el in movies))
movies_clean = tuple(movies_clean)
if data_format == 'all':
file_formats = ('csv', 'json')
else:
file_formats = (data_format, )
if 'json' in file_formats:
file_name = f'{user}_filmweb_{date}.json'
with open(file_name, 'w', encoding='utf-8') as out_file:
out_file.write(json.dumps(movies_clean))
logging.info(f'{file_name} written!')
if 'csv' in file_formats:
file_name = f'{user}_filmweb_{date}.csv'
with open(file_name, 'w', encoding='utf-8') as out_file:
writer = csv.DictWriter(out_file, fieldnames=CSV_ROWS, dialect='unix')
writer.writeheader()
for movie in movies_clean:
writer.writerow(movie)
logging.info(f'{file_name} written!')
return file_name
| 9,185
|
def download_file_insecure(url, target, cookies=None):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
if cookies:
raise NotImplementedError
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
| 9,186
|
def get_genotypes(
single_end: list,
paired_end: list,
metadata: str,
bam_dir: str,
intermediate_dir: str,
reference_genome_path: str,
mapping_quality: int,
blacklist_path: str,
snps_path: str,
processes: int,
memory: int,
skip_preprocessing: bool = False,
write_bam: bool = False,
algorithm_switch_bp: int = 70,
algorithm=None,
temp_dir=None
):
"""Obtain genotypes from sequencing data using QuASAR
Parameters
----------
single_end : list
List of single-end input files
paired_end : list
List of paired-end input files
metadata : dict
Dict of input file metadata
bam_dir : str
Directory to write BAM files
intermediate_dir : str
Directory to write intermediate pileup / bed files
reference_genome_path : str
Path to reference genome
mapping_quality : int
Minimum quality score for filtering alignment
blacklist_path : str
Path to ENCODE mappability blacklist
snps_path : str
Path to file containing SNPs to genotype
processes : int
Number of processes
memory : int
Memory limit
skip_preprocessing : bool
Indicator to skip preprocessing steps
write_bam : bool
Indicator to write a BAM file to disk
algorithm_switch_bp : int
Read length threshold for switching to `bwa mem`
algorithm : str or None
Force use of either `aln` or `mem` algorithm, if supplied
temp_dir
directory to use for temporary files
"""
n_single_end = len(single_end)
n_paired_end = len(paired_end)
if not metadata:
metadata_dict = {}
else:
with open(metadata, 'r') as f:
metadata_dict = json.load(f)
n_metadata = sum(len(x['libraries']) for x in metadata_dict.values())
def prepare_quasar_input_params(temp_dir_name, n, pe=False):
return {
'bam_dir': bam_dir if bam_dir else temp_dir_name,
'intermediate_dir': (
intermediate_dir if intermediate_dir
else temp_dir_name
),
'reference_genome_path': reference_genome_path,
'mapping_quality': mapping_quality,
'blacklist_path': blacklist_path,
'snps_path': snps_path,
'processes': max(1, int(processes / n)),
'memory': memory / min(processes, n),
'paired_end': pe,
'skip_preprocessing': skip_preprocessing,
'write_bam': write_bam,
'algorithm_switch_bp': algorithm_switch_bp,
'algorithm': algorithm,
'temp_dir': temp_dir
}
with tempfile.TemporaryDirectory(dir=temp_dir) as temp_dir_name:
with Pool(processes=min(processes, max(n_single_end, n_paired_end, n_metadata))) as pool:
if n_single_end > 0:
single_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_single_end, pe=False)
),
single_end
)
else:
single_end_quasar_input_paths = []
if n_paired_end > 0:
paired_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_paired_end, pe=True)
),
paired_end
)
else:
paired_end_quasar_input_paths = []
if n_metadata > 0:
meta_se, meta_pe = collate_metadata(metadata_dict)
if len(meta_se) > 0:
metadata_quasar_input_paths_se = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_se), pe=False)
),
meta_se
)
else:
metadata_quasar_input_paths_se = []
if len(meta_pe) > 0:
metadata_quasar_input_paths_pe = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_pe), pe=True)
),
meta_pe
)
else:
metadata_quasar_input_paths_pe = []
else:
metadata_quasar_input_paths_se, metadata_quasar_input_paths_pe = [], []
return pyQuASAR.genotype(
*filter(
None,
single_end_quasar_input_paths
+ paired_end_quasar_input_paths
+ metadata_quasar_input_paths_se
+ metadata_quasar_input_paths_pe
)
)
| 9,187
|
def async_handle_google_actions(hass, cloud, payload):
"""Handle an incoming IoT message for Google Actions."""
result = yield from ga.async_handle_message(
hass, cloud.gactions_config, payload)
return result
| 9,188
|
def get_picture_landmarks(filepath, predictor, logs=True):
"""
Do the doc!
"""
if logs:
print("Processing file: {}".format(filepath))
frame = cv2.imread(filepath)
lm = FLandmarks()
lm.extract_points(frame, predictor)
return lm
if logs:
print('\n')
| 9,189
|
def query_master(h5in, query, idfile='sami_query.lis',
verbose=True, returnID=True, overwrite=True):
""" Read a SAMI master table and perform a query """
"""
The query should be passed either as a string argument or as an ascii file
containing such a string. Adding '@list' functionality will facilitate
browser-generated queries farther down the track.
"""
import tables
# Interpret the 'query' argument (look for a filename).
if os.path.isfile(query):
fq = open(query, 'r')
query = fq.readlines()[0]
# Open-read h5file.
h5file = tables.openFile(h5in, mode = "r")
# Optionally open an ascii file to write IDs returned by the query.
if returnID:
# Check if the file exists, check overwrite flag:
if os.path.isfile(idfile):
if not overwrite:
raise SystemExit("The nominated output file ('"+idfile+"') "+
"already exists. Please raise the 'overwrite'"+
" flag or enter a different filename. ")
f = open(idfile, 'w')
idlist = []
# Identify the SAMI master table -- assumed to live in the root directory
master = h5file.root.SAMI_MASTER
# Define a Row Iterator for screen output.
def print_sami(s):
counter = 0
for tables.row in s:
name, z = tables.row['CATID'], tables.row['z_spec']
if verbose:
print(" Found SAMI galaxy %s at redshift z=%g" % (name, z))
counter += 1
idlist.append(name)
if returnID: f.write(str(name)+'\n')
print("\n Found "+str(counter)+" galaxies satisfying query:\n "+query)
print_sami(master.where(query))
h5file.close()
if returnID: f.close()
| 9,190
|
def conflict(next_x: int, s: tuple) -> bool:
"""Return a boolean that defines the conflict condition of the next queen's position"""
next_i = len(s)
for i in range(next_i):
if abs(s[i] - next_x) in (0, next_i - i):
return True
else:
return False
| 9,191
|
async def main_page():
"""Main page. Just for example."""
return APIResponse(message="ok")
| 9,192
|
def split_to_sublists(initial_list:list, n:int, strict:bool=True) -> List[list]:
"""Takes a list and splits it into sublists of size n
Parameters
----------
initial_list : list
The initial list to split into sublists
n : int
The size of each sublist
strict: bool
Whether to force an error if the length of the initial list is not divisible by n (split into even groups), default True
Returns
-------
List[list]
A list of lists of size n (unless strict is False, then the last list may be > n)
Examples
--------
### Split gallery images into sublists of 3
#### JINJA USAGE
```jinja2
{% if gallery|length % 3 == 0 %}
{% for sublist in gallery|split_to_sublists(3) %}
<div class="row">
<div class="col-md-4">
<img src="{{ sublist.0[0]['file_path'] }}" alt="{{ sublist.0[0]['file_path'].split()[-1] }}">
</div>
<div class="col-md-4">
<img src="{{ sublist.1[0]['file_path'] }}" alt="{{ sublist.1[0]['file_path'].split()[-1]}}">
</div>
<div class="col-md-4">
<img src="{{ sublist.2[0]['file_path'] }}" alt="{{ sublist.2[0]['file_path'].split()[-1] }}">
</div>
</div>
{% endfor %}
{% endif }
```
The above jinja is roughly equivalent to something like this in pure python:
```python
gallery = ["image 1" , "image 2", "image 3", "image 4" , "image 5", "image 6"]
if len(images) % 3 == 0:
for sublist in split_to_sublists(gallery, 3): # Returns [["image 1" , "image 2", "image 3"], ["image 4" , "image 5", "image 6"]]
... # Do stuff with each sublist
```
"""
if strict:
if not len(initial_list) % n == 0:
raise ValueError(f"Provided list was not of correct size: \n\tList: {initial_list}\n\tSegment size {n}")
result = []
for i in range(0, len(initial_list), n): # Create sublists up to size n
result.append( initial_list[i:i + n])
return result
| 9,193
|
def get_matched_files(dirPath=".", regex=None):
"""Get the abspath of the files whose name matches a regex
Only files will be returned, and directories are excluded.
Args:
dirPath (str): the directory to search
regex (regex): the regular expression to match the filename
Returns:
tuple of strings
"""
# check the exisitence of path
fns = []
_absDir = os.path.abspath(dirPath)
if os.path.isdir(_absDir):
for i in os.listdir(_absDir):
if regex != None:
if not re.match(regex, i):
continue
_fpath = os.path.join(_absDir, i)
if os.path.isfile(_fpath):
fns.append(_fpath)
return tuple(fns)
| 9,194
|
def get_sha1(req_path: Path) -> str:
""" For larger files sha1 algorithm is significantly faster than sha256 """
return get_hash(req_path, sha1)
| 9,195
|
def upload_attachment(page_id, file, comment, confluence_api_url, username, password, raw = None):
"""
Upload an attachement
:param page_id: confluence page id
:param file: attachment file
:param comment: attachment comment
:return: boolean
"""
content_type = mimetypes.guess_type(file)[0]
filename = os.path.basename(file)
if raw is None:
r = requests.get(file, stream=True)
r.raw.decode_content = True
else:
r = raw
file_to_upload = {
'comment': comment,
'file': (urlEncodeNonAscii(filename), r.raw, content_type, {'Expires': '0'})
}
attachment = get_attachment(page_id, filename, confluence_api_url, username, password)
if attachment:
url = '%s/rest/api/content/%s/child/attachment/%s/data' % (confluence_api_url, page_id, attachment.id)
else:
url = '%s/rest/api/content/%s/child/attachment/' % (confluence_api_url, page_id)
session = requests.Session()
session.auth = (username, password)
session.headers.update({'X-Atlassian-Token': 'no-check'})
res = session.post(url, files=file_to_upload)
return True
| 9,196
|
def test_run_and_track_job_failure(mocked_get_job_pods):
"""
Tests that a Failure phase in a pod triggers an exception in run_and_track_job
"""
client = MagicMock()
labels = {"l1": "label1"}
pod = pod_spec_from_dict("name_of_pod", dummy_pod_spec, labels=labels)
job = job_definition("my_job", 100, pod, labels, "my_namespace")
pod.status = MagicMock()
pod.status.phase = "Failed"
mocked_get_job_pods.return_value = [pod]
with pytest.raises(FailedJob) as e:
run_and_track_job(client, job, lambda x: x)
assert e.job == job
assert e.pod.metadata.name == pod.metadata.name
| 9,197
|
def _make_note(nl_transcript: str, tl_audio_file: str) -> Note:
"""
Creates an Anki note from a native langauge transcript and a target language audio file.
"""
return Note(model=_MODEL, fields=[f"[sound:{tl_audio_file}]", nl_transcript])
| 9,198
|
def bed2beddb_status(connection, **kwargs):
"""Searches for small bed files uploaded by user in certain types
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'bed2beddb_status')
my_auth = connection.ff_keys
check.action = "bed2beddb_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
check.summary = ''
# These are the accepted file types for this check
accepted_types = ['LADs', 'boundaries', 'domain calls', 'peaks']
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find bg files without bw files)
query = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title!=beddb"
"&status!=uploading&status!=to be uploaded by workflow"
"&status!=archived&status!=archived to project")
query += "".join(["&file_type=" + i for i in accepted_types])
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# build a second query for checking failed ones
query_f = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title=beddb"
"&extra_files.status=uploading"
"&extra_files.status=to be uploaded by workflow"
"&status!=uploading&status!=to be uploaded by workflow")
# add date
s_date = kwargs.get('start_date')
if s_date:
query_f += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query_f += '&lab.display_title=' + lab
# The search
res_one = ff_utils.search_metadata(query, key=my_auth)
res_two = ff_utils.search_metadata(query_f, key=my_auth)
res_all = res_one + res_two
missing = []
for a_file in res_all:
if not a_file.get('genome_assembly'):
missing.append(a_file['accession'])
res_all = [i for i in res_all if i.get('genome_assembly')]
if not res_all:
check.summary = 'All Good!'
return check
check = wfr_utils.check_runs_without_output(res_all, check, 'bedtobeddb', my_auth, start)
if missing:
check.full_output['missing_assembly'] = missing
msg = str(len(missing)) + ' files missing genome assembly'
check.brief_output.insert(0, msg)
return check
| 9,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.