content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def remove_header_comments_from_files(list_of_files: List[Union[str, Path]]) -> None:
"""
Check the files in the provided list for invalid headers (no type defined) and removes those
inplace when found.
Args:
list_of_files (List[Union[str, Path]]): list of Paths to **TFS** files meant to be checked.
The entries of the list can be strings or Path objects.
"""
for filepath in list_of_files:
LOGGER.info(f"Checking file: {filepath}")
with open(filepath, "r") as f:
f_lines = f.readlines()
delete_indicies = []
for index, line in enumerate(f_lines):
if line.startswith("*"):
break
if line.startswith("@") and len(line.split("%")) == 1:
delete_indicies.append(index)
if delete_indicies:
LOGGER.info(f" Found {len(delete_indicies):d} lines to delete.")
for index in reversed(delete_indicies):
deleted_line = f_lines.pop(index)
LOGGER.info(f" Deleted line: {deleted_line.strip():s}")
with open(filepath, "w") as f:
f.writelines(f_lines) | 5,330,800 |
def coeff_modulus_192(poly_modulus_degree):
"""
Returns the default coefficients modulus for a given polynomial modulus degree.
:param poly_modulus_degree: Polynomial modulus degree (1024, 2048, 4096, 8192, 16384, or 32768)
:return:
"""
return seal.coeff_modulus_128(poly_modulus_degree) | 5,330,801 |
def __add_statement(is_position: bool) -> Statement:
"""
Adds a new statement to the database
:param is_position: True if the statement should be a position
:return: New statement object
"""
db_statement = Statement(is_position=is_position)
DBDiscussionSession.add(db_statement)
DBDiscussionSession.flush()
return db_statement | 5,330,802 |
def draw_track(center: Path, left: Path, right: Path, **kwargs):
"""
Draws a center line and two side paths.
This works for e.g. linearized track + extruded sides
"""
Tracking(center.x, center.y, lw=2, color=blue_grey_light, **kwargs)
Tracking(left.x, left.y, lw=1, ls="--", color=blue_grey_light, **kwargs)
Tracking(right.x, right.y, lw=1, ls="--", color=blue_grey_light, **kwargs) | 5,330,803 |
def xgcd(a: int, b: int) -> tuple:
"""
Extended Euclidean algorithm.
Returns (g, x, y) such that a*x + b*y = g = gcd(a, b).
"""
x0, x1, y0, y1 = 0, 1, 1, 0
while a != 0:
(q, a), b = divmod(b, a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return b, x0, y0 | 5,330,804 |
def correct_predictions(output_probabilities, targets):
"""
计算与模型输出中的某些目标类匹配的预测数量
Args:
output_probabilities: 不同输出类的概率张量
targets: 实际目标类的索引
Returns:
返回:“output_probabilities”中正确预测的数量
"""
_, out_classes = output_probabilities.max(dim=1)
correct = (out_classes == targets).sum()
return correct.item() | 5,330,805 |
def test_compress_non_str():
"""Verify only str being accepted by compress."""
raw = "The quick brown fox jumps over the lazy dog."
with pytest.raises(TypeError):
smaz.compress([raw])
with pytest.raises(TypeError):
smaz.compress((raw,))
with pytest.raises(TypeError):
smaz.compress({"raw": raw})
with pytest.raises(TypeError):
smaz.compress(1)
with pytest.raises(TypeError):
smaz.compress(1.5)
with pytest.raises(TypeError):
smaz.compress(raw.encode("utf-8")) | 5,330,806 |
def get_base_url(url: str) -> str:
"""
Return base URL for given URL.
Example:
Return http://example.com for input http://example.com/path/path
Return scheme://netloc
"""
url = format_url(url)
parsed = parse_url(url)
return'{uri.SCHEME}://{uri.NETLOC}/'.format(uri=parsed) | 5,330,807 |
def setup_tutorial():
"""
Helper function to check correct configuration of tf and keras for tutorial
:return: True if setup checks completed
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
return True | 5,330,808 |
def split_value(s, splitters=["/", "&", ","]):
"""Splits a string. The first match in 'splitters' is used as the
separator; subsequent matches are intentionally ignored."""
if not splitters:
return [s.strip()]
values = s.split("\n")
for spl in splitters:
spl = re.compile(r"\b\s*%s\s*\b" % re.escape(spl), re.UNICODE)
if not filter(spl.search, values):
continue
new_values = []
for v in values:
new_values.extend([st.strip() for st in spl.split(v)])
return new_values
return values | 5,330,809 |
def plotStressxEpoch(data, title="", outdir=""):
"""
Plots mean stress of hidden layer when training, along time (epochs).
:param data: pandas dataframe, ordered by epoch
:param title: title for the plot
:param outdir: output dir to store the plot
"""
fig, ax = plt.subplots()
x=data["epoch"]
fix=[2,6]
cond=["neg","pos"]
for f in fix:
for c in cond:
meancol = "f%imean_%s"%(f,c)
plt.grid(b=True, color='black', alpha=0.1, linestyle='-', linewidth=1)
plt.errorbar(jitter(x),data[meancol],yerr=sem(data[meancol]), color=condition_palette["f"+str(f)+c], ls=["-","--"][c == "neg"])
plt.legend()
fontsize=14
plt.xlabel("Epoch", fontsize=fontsize)
plt.ylabel("Mean Correct", fontsize=fontsize)
plt.xlim(0,max(x))
plt.ylim(0,1.01)
plt.title(title, fontsize=fontsize)
fname = join(outdir, "stress_epoch.png")
plt.savefig(fname)
print("Saved plot in %s"%fname)
plt.clf() | 5,330,810 |
def smape(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculates symmetric mean absolute percentage error SMAPE
Args:
y_true (np.ndarray): Actual values Y
y_pred (np.ndarray): Predicted values Y
Returns:
[float]: smape
"""
error = np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))
return 100 * np.mean(error) | 5,330,811 |
def dealwithtype( x, t ):
""" return x and t as an array
broadcast values if shape of x != shape of y
and neither x or t are scalar
"""
x = np.asarray( x )
t = np.asarray( t )
if not x.shape and not t.shape:
pass
elif not x.shape:
x = x*np.ones_like( t )
elif not t.shape:
t = t*np.ones_like( x )
else:
x, t = np.meshgrid( x, t )
return x, t | 5,330,812 |
def format_img_size(img, C: FasterRcnnConfiguration):
""" formats the image size based on config """
img_min_side = float(C.resize_smallest_side_of_image_to)
(height, width, _) = img.shape
if width <= height:
ratio = img_min_side / width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side / height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio | 5,330,813 |
def test_atomic_language_min_length_2_nistxml_sv_iv_atomic_language_min_length_3_5(mode, save_output, output_format):
"""
Type atomic/language is restricted by facet minLength with value 9.
"""
assert_bindings(
schema="nistData/atomic/language/Schema+Instance/NISTSchema-SV-IV-atomic-language-minLength-3.xsd",
instance="nistData/atomic/language/Schema+Instance/NISTXML-SV-IV-atomic-language-minLength-3-5.xml",
class_name="NistschemaSvIvAtomicLanguageMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,330,814 |
def build_tile_count_map(tile_counts):
"""Build a map from a tile key to a count."""
tile_count_map = defaultdict(int)
for tile_count in tile_counts:
tile = tile_count.tile
tile_key = (tile.letter, tile.value, tile.is_blank)
tile_count_map[tile_key] = tile_count.count
return tile_count_map | 5,330,815 |
def test_resample_errors(fx_asset):
"""Sampling errors."""
with Image(filename=str(fx_asset.join('mona-lisa.jpg'))) as img:
with raises(TypeError):
img.resample(x_res='100')
with raises(TypeError):
img.resample(y_res='100')
with raises(ValueError):
img.resample(x_res=0)
with raises(ValueError):
img.resample(y_res=0)
with raises(ValueError):
img.resample(x_res=-5)
with raises(ValueError):
img.resample(y_res=-5) | 5,330,816 |
def format(session):
"""
Run black and isort on the codebase.
It is known that both formatters can have conflicts,
so try to run `nox -s lint` before commiting!
"""
session.install("-r", "tests/requirements.txt")
session.run("isort", ".")
session.run("black", ".") | 5,330,817 |
def after_feature(context: Context,
feature: Feature) -> NoReturn:
"""Running after each feature file is exercised. The feature passed in is an instance of Feature.
Args:
context (behave.runner.Context): used by behave framework, store with scenario, feature, user data and so on.
feature (behave.model.Feature): features
Returns:
"""
pass | 5,330,818 |
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, test).
Raises:
RuntimeError: If loaded image and label have different shape.
"""
dataset = os.path.basename(dataset_split)[:-4] #?
sys.stdout.write('Processing ' + dataset)
filenames = [x.strip('\n').split(' ') for x in open(dataset_split, 'r')]
# (image_path, target_path)
num_images = len(filenames)
num_per_shard = int(math.ceil(num_images / float(_NUM_SHARDS)))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
output_filename = os.path.join(
FLAGS.output_dir,
'%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, len(filenames), shard_id))
sys.stdout.flush()
# Read the image.
image_filename = filenames[i][0]
seg_filename = filenames[i][1]
image_data = tf.gfile.FastGFile(image_filename, 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
example = build_data.image_seg_to_tfexample(
image_data, "asd",#filenames[i][0],
height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush() | 5,330,819 |
def get_uid_to_user(restful_url):
"""Gets uid -> user mapping from restful url"""
query_url = restful_url + "/GetAllUsers"
resp = requests.get(query_url)
if resp.status_code != 200:
logger.warning("Querying %s failed.", query_url)
return {}
data = json.loads(resp.text)
uid_to_user = {}
for item in data:
try:
uid = int(item[1])
user = item[0]
uid_to_user[uid] = user
except Exception as e:
logger.warning("Parsing %s failed: %s", item, e)
return uid_to_user | 5,330,820 |
def test_filtered_instrument_keywords():
"""Test to see that the instrument specific service keywords are
different for all instruments"""
kw = []
for ins in JWST_INSTRUMENT_NAMES:
kw.append(mm.instrument_keywords(ins, caom=False)['keyword'].tolist())
assert kw[0] != kw[1] != kw[2] != kw[3] != kw[4] | 5,330,821 |
def test_select_deterministc_leaf_by_max_scores():
"""Some tests on :func:`select_deterministc_leaf_by_max_scores`"""
node_scoring_method = partial(ucb_scores, ucb_constant=10)
info = {}
# if only one leaf, should find it
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0}, root
)
root.add_child("a1", first_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
# a second, better, leaf should be picked instead
second_leaf = DeterministicNode(
{"prior": 0.1, "action": "a2", "n": 3, "qval": 5.0}, root
)
root.add_child("a2", second_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
second_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
assert info["ucb_tree_depth"].num == 2
# trying to add more nodes, should pick it
third_leaf = DeterministicNode(
{"prior": 0.1, "action": "a", "n": 3, "qval": -5.0}, second_leaf
)
second_leaf.add_child("s", third_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
third_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
# increasing q value of first (bad) leaf should make it favourable
first_leaf.stats["qval"] = 10000
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
assert info["ucb_tree_depth"].num == 4 | 5,330,822 |
def parse_text(text):
"""
Parse raw text format playlists, each line must contain a single.
track with artist and title separated by a single dash. eg Queen - Bohemian Rhapsody
:param str text:
:return: A list of tracks
"""
tracks: List[tuple] = []
for line in text.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
parts = line.split("-", 1)
if len(parts) != 2:
continue
artist, track = list(map(str.strip, parts))
if not artist or not track or (artist, track) in tracks:
continue
tracks.append((artist, track))
return tracks | 5,330,823 |
def set_tickers(request, tickers):
"""
Sets requested tickers in the Bloomberg's request.
Parameters
----------
request
request to be sent
tickers: List[str]
required tickers
"""
securities = request.getElement(SECURITIES)
for ticker in tickers:
securities.appendValue(ticker) | 5,330,824 |
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
n, l .... quantum numbers 'n' and 'l'
r .... radial coordinate
Z .... atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples::
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy import var
>>> var("r Z")
(r, Z)
>>> R_nl(1, 0, r, Z)
2*(Z**3)**(1/2)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
2**(1/2)*(Z**3)**(1/2)*(2 - Z*r)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
Z*r*6**(1/2)*(Z**3)**(1/2)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1::
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
2**(1/2)*(2 - r)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*3**(1/2)*(3 - 2*r + 2*r**2/9)*exp(-r/3)/27
For Silver atom, you would use Z=47::
>>> R_nl(1, 0, r, Z=47)
94*47**(1/2)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*94**(1/2)*(2 - 47*r)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*141**(1/2)*(3 - 94*r + 4418*r**2/9)*exp(-47*r/3)/27
The normalization of the radial wavefunction is::
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = S(n), S(l), S(r), S(Z)
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n+l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * laguerre_l(n_r, 2*l+1, r0) * exp(-r0/2) | 5,330,825 |
def generate_new_classes(start_cycle_number=0, class_number=50, input_stack="combined_stack.mrcs", pixel_size=1.2007, mask_radius=150, low_res=300, high_res=40, new_star_file="cycle_0.star", working_directory="~", automask=False, autocenter=True):
"""
Call out to cisTEM2 ``refine2d`` using :py:func:`subprocess.Popen` to generate a new set of *Ab Initio* classes.
Args:
start_cycle_number (int): Iteration number of the classification (indexes from 0 in :py:func:`execute_job_loop`)
class_number (int): Number of class seeds to generate (default 50)
input_stack (str): Filename of combined monolithic particle stack
pixel_size (int): Pixel size of image files, in Å.
mask_radius (int): Radius in Å to use for mask (default 150).
low_res (float): Low resolution cutoff for classification, in Å.
high_res (float): High resolution cutoff for classification, in Å.
new_star_file (str): Filename of starting cisTEM-formatted star file.
working_directory (str): Directory where data will output.
automask (bool): Automatically mask class averages
autocenter (bool): Automatically center class averages to center of mass.
Returns:
str: STDOUT of :py:func:`subprocess.Popen` call to ``refine2d``.
"""
live2dlog = logging.getLogger("live_2d")
automask_text = "No"
if automask is True:
automask_text = "Yes"
autocenter_text = "No"
if autocenter is True:
autocenter_text = "Yes"
input = "\n".join([
os.path.join(working_directory, input_stack), # Input MRCS stack
os.path.join(working_directory, new_star_file), # Input Star file
os.devnull, # Input MRC classes
os.devnull, # Output star file
os.path.join(working_directory, "cycle_{}.mrc".format(start_cycle_number)), # Output MRC class
str(class_number), # number of classes to generate for the first time - only use when starting a NEW classification
"1", # First particle in stack to use
"0", # Last particle in stack to use - 0 is the final.
"1", # Fraction of particles to classify
str(pixel_size), # Pixel Size
# "300", # keV
# "2.7", # Cs
# "0.07", # Amplitude Contrast
str(mask_radius), # Mask Radius in Angstroms
str(low_res), # Low Resolution Limit
str(high_res), # High Resolution Limit
"0", # Angular Search
"0", # XY search
"1", # Tuning
"2", # Tuning
"Yes", # Normalize?
"Yes", # INVERT?
"No", # Exclude blank edges
automask_text, # Automask
autocenter_text, # Autocenter
"No", # Dump Dats
"No.dat", # Datfilename
"1", # max threads
])
p = subprocess.Popen("refine2d", stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, _ = p.communicate(input=input.encode('utf-8'))
live2dlog.info(out.decode('utf-8')) | 5,330,826 |
def entity_by_name(name):
"""Adapt Entity.name (not Entity.class_name!) to entity."""
entities = zope.component.getUtility(
icemac.addressbook.interfaces.IEntities).getEntities(sorted=False)
for candidate in entities:
if candidate.name == name:
return candidate
raise ValueError("Unknown name: %r" % name) | 5,330,827 |
def _update_longitude_attrs(config, output_filename):
"""Update attributes of longitude."""
ncodir = config["s2smetric"]["ncodir"]
cmd = f"{ncodir}/ncatted"
cmd += " -a long_name,longitude,c,c,'longitude'"
cmd += " -a standard_name,longitude,c,c,'longitude'"
cmd += " -a units,longitude,c,c,'degree_east'"
cmd += f" {output_filename}"
_run_cmd(cmd, "[ERR] Problem with ncatted!") | 5,330,828 |
def export_single_floor(floor):
"""exports a single floor
"""
return mt.Floor(
*export_vertices(floor.Points),
id=str(next_id()),
ep_id=floor.Id,
type=str(id_map(floor.Type.Id))) | 5,330,829 |
def test_tcp_tls_verify_both():
"""
Test TCP TLS client server connection with verify certs for both client and server
"""
certDirPath = localTestCertDirPath()
assert os.path.exists(certDirPath)
serverKeyPath = os.path.join(certDirPath, 'server_key.pem') # local server private key
serverCertPath = os.path.join(certDirPath, 'server_cert.pem') # local server public cert
clientCaPath = os.path.join(certDirPath, 'client.pem') # remote client public cert
clientKeyPath = os.path.join(certDirPath, 'client_key.pem') # local client private key
clientCertPath = os.path.join(certDirPath, 'client_cert.pem') # local client public cert
serverCaPath = os.path.join(certDirPath, 'server.pem') # remote server public cert
assert os.path.exists(serverKeyPath)
assert os.path.exists(serverCertPath)
assert os.path.exists(clientCaPath)
assert os.path.exists(clientKeyPath)
assert os.path.exists(clientCertPath)
assert os.path.exists(serverCaPath)
serverCertCommonName = 'localhost' # match hostname uses servers's cert commonname
tymist = tyming.Tymist()
with openServer(cls=ServerTls,
tymist=tymist, ha=("", 6101), bs=16192,
keypath=serverKeyPath,
certpath=serverCertPath,
cafilepath=clientCaPath,
certify=ssl.CERT_REQUIRED,) as server, \
openClient(cls=ClientTls,
tymist=tymist, ha=("127.0.0.1", 6101), bs=16192,
certedhost=serverCertCommonName,
keypath=clientKeyPath,
certpath=clientCertPath,
cafilepath=serverCaPath,
certify=ssl.CERT_REQUIRED,
hostify=True,) as beta:
assert server.opened == True
assert server.eha == ('127.0.0.1', 6101)
assert server.ha == ('0.0.0.0', 6101)
assert beta.opened == True
assert beta.accepted == False
assert beta.connected == False
assert beta.cutoff == False
# Connect beta to server
while not(beta.connected and len(server.ixes) >= 1):
beta.serviceConnect()
server.serviceConnects()
time.sleep(0.01)
assert beta.accepted == True
assert beta.connected == True
assert beta.cutoff == False
assert beta.ca == beta.cs.getsockname()
assert beta.ha == beta.cs.getpeername()
ixBeta = server.ixes[beta.ca]
assert ixBeta.cs.getsockname() == beta.cs.getpeername()
assert ixBeta.cs.getpeername() == beta.cs.getsockname()
assert ixBeta.ca == beta.ca
assert ixBeta.ha == beta.ha
msgOut = b"Beta sends to Server\n"
beta.tx(msgOut)
while not( not beta.txes and ixBeta.rxbs):
beta.serviceTxes()
server.serviceReceivesAllIx()
time.sleep(0.01)
time.sleep(0.05)
server.serviceReceivesAllIx()
msgIn = bytes(ixBeta.rxbs)
assert msgIn == msgOut
ixBeta.clearRxbs()
msgOut = b'Server sends to Beta\n'
ixBeta.tx(msgOut)
while not (not ixBeta.txes and beta.rxbs):
server.serviceTxesAllIx()
beta.serviceReceives()
time.sleep(0.01)
msgIn = bytes(beta.rxbs)
assert msgIn == msgOut
beta.clearRxbs()
assert beta.opened == False
assert server.opened == False
"""Done Test""" | 5,330,830 |
def test_hapd_dup_network_global_wpa2(dev, apdev):
"""hostapd and DUP_NETWORK command (WPA2)"""
passphrase="12345678"
src_ssid = "hapd-ctrl-src"
dst_ssid = "hapd-ctrl-dst"
src_params = hostapd.wpa2_params(ssid=src_ssid, passphrase=passphrase)
src_ifname = apdev[0]['ifname']
src_hapd = hostapd.add_ap(apdev[0], src_params)
dst_params = { "ssid": dst_ssid }
dst_ifname = apdev[1]['ifname']
dst_hapd = hostapd.add_ap(apdev[1], dst_params, no_enable=True)
hapd_global = hostapd.HostapdGlobal()
for param in [ "wpa", "wpa_passphrase", "wpa_key_mgmt", "rsn_pairwise" ]:
dup_network(hapd_global, src_ifname, dst_ifname, param)
dst_hapd.enable()
dev[0].connect(dst_ssid, psk=passphrase, proto="RSN", pairwise="CCMP",
scan_freq="2412")
addr = dev[0].own_addr()
if "FAIL" in dst_hapd.request("STA " + addr):
raise Exception("Could not connect using duplicated wpa params")
tests = [ "a",
"no-such-ifname no-such-ifname",
src_ifname + " no-such-ifname",
src_ifname + " no-such-ifname no-such-param",
src_ifname + " " + dst_ifname + " no-such-param" ]
for t in tests:
if "FAIL" not in hapd_global.request("DUP_NETWORK " + t):
raise Exception("Invalid DUP_NETWORK accepted: " + t)
with alloc_fail(src_hapd, 1, "hostapd_ctrl_iface_dup_param"):
if "FAIL" not in hapd_global.request("DUP_NETWORK %s %s wpa" % (src_ifname, dst_ifname)):
raise Exception("DUP_NETWORK accepted during OOM") | 5,330,831 |
def tensor_lab2rgb(input):
"""
n * 3* h *w
"""
input_trans = input.transpose(1, 2).transpose(2, 3) # n * h * w * 3
L, a, b = input_trans[:, :, :, 0:1], input_trans[:, :, :, 1:2], input_trans[:, :, :, 2:]
y = (L + 16.0) / 116.0
x = (a / 500.0) + y
z = y - (b / 200.0)
neg_mask = z.data < 0
z[neg_mask] = 0
xyz = torch.cat((x, y, z), dim=3)
mask = xyz.data > 0.2068966
mask_xyz = xyz.clone()
mask_xyz[mask] = torch.pow(xyz[mask], 3.0)
mask_xyz[~mask] = (xyz[~mask] - 16.0 / 116.0) / 7.787
mask_xyz[:, :, :, 0] = mask_xyz[:, :, :, 0] * 0.95047
mask_xyz[:, :, :, 2] = mask_xyz[:, :, :, 2] * 1.08883
rgb_trans = torch.mm(mask_xyz.view(-1, 3), torch.from_numpy(rgb_from_xyz).type_as(xyz)).view(
input.size(0), input.size(2), input.size(3), 3
)
rgb = rgb_trans.transpose(2, 3).transpose(1, 2)
mask = rgb > 0.0031308
mask_rgb = rgb.clone()
mask_rgb[mask] = 1.055 * torch.pow(rgb[mask], 1 / 2.4) - 0.055
mask_rgb[~mask] = rgb[~mask] * 12.92
neg_mask = mask_rgb.data < 0
large_mask = mask_rgb.data > 1
mask_rgb[neg_mask] = 0
mask_rgb[large_mask] = 1
return mask_rgb | 5,330,832 |
def get_absolute_path(file_name, package_level=True):
"""Get file path given file name.
:param: [package_level] - Wheather the file is in/out side the
`gmail_api_wrapper` package
"""
if package_level:
# Inside `gmail_api_wrapper`
dirname = os.path.dirname(__file__)
else:
# Outside `gmail_api_wrapper`
dirname = os.path.join(os.path.dirname(__file__), os.pardir)
file_path = os.path.abspath(os.path.join(dirname, file_name))
return file_path | 5,330,833 |
def cancel_order(order, restock):
"""Cancel order and associated fulfillments.
Return products to corresponding stocks if restock is set to True.
"""
if restock:
restock_order_lines(order)
for fulfillment in order.fulfillments.all():
fulfillment.status = FulfillmentStatus.CANCELED
fulfillment.save(update_fields=['status'])
order.status = OrderStatus.CANCELED
order.save(update_fields=['status']) | 5,330,834 |
def thanos(planet: dict, finger: int) -> int:
""" Thanos can kill half lives of a world with a snap of the finger """
keys = planet.keys()
for key in keys:
if (++finger & 1) == 1:
# kill it
planet.pop(key)
return finger | 5,330,835 |
def ordered_list_item_to_percentage(ordered_list: List[str], item: str) -> int:
"""Determine the percentage of an item in an ordered list.
When using this utility for fan speeds, do not include "off"
Given the list: ["low", "medium", "high", "very_high"], this
function will return the following when when the item is passed
in:
low: 25
medium: 50
high: 75
very_high: 100
"""
if item not in ordered_list:
raise ValueError
list_len = len(ordered_list)
list_position = ordered_list.index(item) + 1
return (list_position * 100) // list_len | 5,330,836 |
def simple2tradition(line):
"""
将简体转换成繁体
"""
line = Converter('zh-hant').convert(line)
return line | 5,330,837 |
def normalize_colors(colors):
"""
If colors are integer 8bit values, scale to 0 to 1 float value used by opengl
:param colors:
:return:
"""
if colors.dtype is not np.float32:
colors = colors.astype(np.float32) / 255.0
return colors | 5,330,838 |
def load_text_data(path, word_dict):
"""
Read the given path, which should have one sentence per line
:param path: path to file
:param word_dict: dictionary mapping words to embedding
indices
:type word_dict: WordDictionary
:return: a tuple with a matrix of sentences and an array
of sizes
"""
max_len = 0
all_indices = []
sizes = []
with open(path, 'rb') as f:
for line in f:
tokens = line.decode('utf-8').split()
this_len = len(tokens)
if this_len > max_len:
max_len = this_len
sizes.append(this_len)
inds = [word_dict[token] for token in tokens]
all_indices.append(inds)
shape = (len(all_indices), max_len)
sizes = np.array(sizes)
matrix = np.full(shape, word_dict.eos_index, np.int32)
for i, inds in enumerate(all_indices):
matrix[i, :len(inds)] = inds
return matrix, sizes | 5,330,839 |
def test_identity_projection(projection_type):
"""Sanity check: identical input & output headers should preserve image."""
header_in = fits.Header.fromstring(IDENTITY_TEST_HDR, sep='\n')
data_in = np.random.rand(header_in['NAXIS2'], header_in['NAXIS1'])
if projection_type == 'flux-conserving':
data_out, footprint = reproject_exact((data_in, header_in), header_in)
elif projection_type.startswith('adaptive'):
data_out, footprint = reproject_adaptive((data_in, header_in), header_in,
order=projection_type.split('-', 1)[1])
else:
data_out, footprint = reproject_interp((data_in, header_in), header_in,
order=projection_type)
# When reprojecting with an identical input and output header,
# we may expect the input and output data to be similar,
# and the footprint values to be ~ones.
expected_footprint = np.ones((header_in['NAXIS2'], header_in['NAXIS1']))
np.testing.assert_allclose(footprint, expected_footprint)
np.testing.assert_allclose(data_in, data_out, rtol=1e-6) | 5,330,840 |
def dirty(graph):
"""
Return a set of all dirty nodes in the graph.
"""
# Reverse the edges to get true dependency
return {n: v for n, v in graph.node.items() if v.get('build') or v.get('test')} | 5,330,841 |
def hangman(secret_word):
""" secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Remember to make
sure that the user puts in a letter!
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
Follows the other limitations detailed in the problem write-up.
"""
letters_guessed = ''
g_remaining, w_remaining = 6, 3
user_input = ''
def input_validator(user_input):
nonlocal letters_guessed
if len(user_input) == 1 and user_input.encode().isalpha():
if user_input.isupper():
user_input = user_input.lower()
if user_input not in letters_guessed:
letters_guessed += user_input
return True
else:
return False
def invalid_char_penalty():
nonlocal g_remaining, w_remaining
if w_remaining > 0:
w_remaining -= 1
penalty = "You have " + str(w_remaining) + " warnings left:"
else:
g_remaining -= 1
penalty = "You have no warnings left so you lose one guess:"
return penalty
def wrong_guess_penalty():
nonlocal g_remaining
if user_input.lower() in ('a', 'e', 'i', 'o', 'u'):
g_remaining -= 2
else:
g_remaining -= 1
# print welcome message
print(
f"Welcome to the game Hangman!\n"
f"I am thinking of a word that is {len(secret_word)} letters long\n"
f"You have {w_remaining} warnings left.")
while g_remaining >= 1:
# before proceeding into the loop, check if the word has been guessed
if is_word_guessed(secret_word, letters_guessed) == True:
print(
f"----------\n"
f"Congratulations, you won!\n"
f"Your total score for this game is: {len(set(secret_word)) * g_remaining}"
)
return
# printing required statements and take user_input
print(
f"----------\n"
f"You have {g_remaining} guesses left\n"
f"Available Letters: {get_available_letters(letters_guessed)}"
)
user_input = input("Please guess a letter: ")
# if user entered nothing, give him a free pass and loop again.
if user_input == '':
continue
# if it's already been guessed, issue a penalty notice and jump to next iteration
if user_input.lower() in letters_guessed:
print(
f"Oops! You've already guessed that letter. "
f"{invalid_char_penalty()} {get_guessed_word(secret_word, letters_guessed)}"
)
continue
# if it's invalid (non-English alphabet, blank, or len > 1), give user a warning/penalty
if input_validator(user_input) == False:
print(
f"Oops! That is not a valid letter. "
f"{invalid_char_penalty()} {get_guessed_word(secret_word, letters_guessed)}"
)
# if user_input is valid, check if it's correct and print accordingly
if input_validator(user_input) == True:
if user_input.lower() not in secret_word:
wrong_guess_penalty()
print(f"Oops! That letter is not in my word: {get_guessed_word(secret_word, letters_guessed)}")
elif user_input.lower() in secret_word:
print(f"Good guess: {get_guessed_word(secret_word, letters_guessed)}")
# if g_remaining ran out, print end of game
print(
f"-----------\n"
f"Sorry, you ran out of guesses. The word was {secret_word}."
) | 5,330,842 |
def sin_potential(z):
"""Sin-like potential."""
z = tf.transpose(z)
x = z[0]
y = z[1]
# x, y = z
return 0.5 * ((y - w1(z)) / 0.4) ** 2 + 0.1 * tf.math.abs(x) | 5,330,843 |
def erode_label(image_numpy, iterations=2, mask_value=0):
""" For each iteration, removes all voxels not completely surrounded by
other voxels. This might be a bit of an aggressive erosion. Also I
would bet it is incredibly ineffecient. Also custom erosions in
multiple dimensions look a little bit messy.
"""
iterations = np.copy(iterations)
if isinstance(iterations, list):
if len(iterations) != 3:
print 'The erosion parameter does not have enough dimensions (3). Using the first value in the eroison parameter.'
else:
iterations == [iterations, iterations, iterations]
for i in xrange(max(iterations)):
kernel_center = 0
edges_kernel = np.zeros((3,3,3),dtype=float)
if iterations[2] > 0:
edges_kernel[1,1,0] = -1
edges_kernel[1,1,2] = -1
iterations[2] -= 1
kernel_center += 2
if iterations[1] > 0:
edges_kernel[1,0,1] = -1
edges_kernel[1,2,1] = -1
iterations[1] -= 1
kernel_center += 2
if iterations[0] > 0:
edges_kernel[0,1,1] = -1
edges_kernel[2,1,1] = -1
iterations[0] -= 1
kernel_center += 2
edges_kernel[1,1,1] = kernel_center
label_numpy = np.copy(image_numpy)
label_numpy[label_numpy != mask_value] = 1
label_numpy[label_numpy == mask_value] = 0
edge_image = signal.convolve(label_numpy, edges_kernel, mode='same')
edge_image[edge_image < 0] = -1
edge_image[np.where((edge_image <= kernel_center) & (edge_image > 0))] = -1
edge_image[edge_image == 0] = 1
edge_image[edge_image == -1] = 0
image_numpy[edge_image == 0] = mask_value
return image_numpy | 5,330,844 |
def _resolve_dir_against_charm_path(charm: CharmBase, *path_elements: str) -> str:
"""Resolve the provided path items against the directory of the main file.
Look up the directory of the main .py file being executed. This is normally
going to be the charm.py file of the charm including this library. Then, resolve
the provided path elements and, if the result path exists and is a directory,
return its absolute path; otherwise, return `None`.
"""
charm_dir = Path(str(charm.charm_dir))
if not charm_dir.exists() or not charm_dir.is_dir():
# Operator Framework does not currently expose a robust
# way to determine the top level charm source directory
# that is consistent across deployed charms and unit tests
# Hence for unit tests the current working directory is used
# TODO: updated this logic when the following ticket is resolved
# https://github.com/canonical/operator/issues/643
charm_dir = Path(os.getcwd())
alerts_dir_path = charm_dir.absolute().joinpath(*path_elements)
if not alerts_dir_path.exists():
raise InvalidAlertRulePathError(str(alerts_dir_path), "directory does not exist")
if not alerts_dir_path.is_dir():
raise InvalidAlertRulePathError(str(alerts_dir_path), "is not a directory")
return str(alerts_dir_path) | 5,330,845 |
def mpi_submit(nworker, nserver, pass_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
pass_envs enviroment variables to be added to the starting programs
"""
env = os.environ.copy()
for k, v in pass_envs.items():
env[k] = str(v)
sargs = ' '.join(args.command)
if args.hostfile is None:
cmd = 'mpirun -n %d' % (nworker + nserver)
else:
cmd = 'mpirun -n %d --hostfile %s ' % (nworker + nserver, args.hostfile)
for k, v in pass_envs.items():
# for mpich2
cmd += ' -env %s %s' % (k, v)
# for openmpi
# cmd += ' -x %s' % k
cmd += ' '
cmd += ' '.join(args.command)
cmd += ' '
cmd += ' '.join(unknown)
# print '%s' % cmd
# known issue: results do not show in emacs eshell
def run():
subprocess.check_call(cmd, shell = True, env = env)
thread = Thread(target = run, args=())
thread.setDaemon(True)
thread.start() | 5,330,846 |
def computeBFGridPoint(basis, U, gpi, gps):
"""
Compute the bilinear form for one grid point with the points
stored in gps
@param basis: basis of sparse grid function,
@param U: list of distributions
@param gpi: HashGridPoint
@param gps: list of HashGridPoint
"""
n = len(gps)
s = np.ndarray(gpi.getDimension(), dtype='float')
ans = DataVector(n)
# run over all grid points
for j, gpj in enumerate(gps):
# print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
ans[j] = computeBFPairwise(basis, U, gpi, gpj)
ans[j] = float(np.prod(s))
return ans | 5,330,847 |
def test_lateValidation():
"""
Test that you can populate a Query using strings and run the
validation afterwards
"""
y = q.Query('myToc', myStr="foo", myBool=True)
assert y.toc is None
assert y.tocName == 'myToc'
for attr in list(y[0].keys()):
assert isinstance(attr, str)
y[0].customAttribute = 'testing'
class myToc(TO):
class myStr(LimitedString()):
pass
class myBool(Bool()):
pass
py.test.raises(RuntimeError, y.validate) # Couldn't find TOC
y.toc = myToc
y.validate()
assert y.toc == myToc
for attr in list(y[0].keys()):
assert isinstance(attr, Attribute)
assert y[0].customAttribute == 'testing' | 5,330,848 |
def list_files(commit: Optional[str] = None,
pathspecs: Collection[PathOrStr] = (),
exclude: Collection[Pattern[str]] = (),
repo: Optional[Path] = None) -> List[Path]:
"""Lists files with git ls-files or git diff --name-only.
Args:
commit: commit to use as a base for git diff
pathspecs: Git pathspecs to use in git ls-files or diff
exclude: regular expressions for Posix-style paths to exclude
repo: repository path from which to run commands; defaults to Path.cwd()
"""
if repo is None:
repo = Path.cwd()
if commit:
files = _diff_names(commit, pathspecs, repo)
else:
files = _ls_files(pathspecs, repo)
git_root = root(repo=repo).resolve()
return sorted(file for file in files if not any(
e.search(file.relative_to(git_root).as_posix()) for e in exclude)) | 5,330,849 |
def merge_apply(las_paths_fn, las_fn):
"""Merge the las files into one, a few at a time.
This replaces the logic of HPC.daligner.
"""
io.rm_force(las_fn)
#all_las_paths = rel_to(io.deserialize(las_paths_fn), os.path.dirname(las_paths_fn))
all_las_paths = io.deserialize(las_paths_fn)
# Create symlinks, so system calls will be shorter.
all_syms = list()
for fn in all_las_paths:
symlink(fn)
all_syms.append(os.path.basename(fn))
curr_paths = sorted(all_syms)
# Merge a few at-a-time.
at_a_time = 250 # max is 252 for LAmerge
level = 1
while len(curr_paths) > 1:
level += 1
next_paths = list()
for i, paths in enumerate(ichunked(curr_paths, at_a_time)):
tmp_las = 'L{}.{}.las'.format(level, i+1)
paths_arg = ' '.join(paths)
cmd = 'LAmerge -v {} {}'.format(tmp_las, paths_arg)
io.syscall(cmd)
next_paths.append(tmp_las)
curr_paths = next_paths
io.syscall('mv -f {} {}'.format(curr_paths[0], 'keep-this'))
io.syscall('#rm -f *.las')
io.syscall('mv -f {} {}'.format('keep-this', las_fn)) | 5,330,850 |
def compute_smatch_batch(gold_filename, test_filename, starts, method ,
restart_threshold, concept_edges, precise,
missing, detailed):
"""
Compute SMATCH on two files with pairwise AMRs, one-AMR-per-line.
"""
ps, rs, fs = [], [],[]
try:
gold_file = open(gold_filename)
except IOError:
sys.stderr.write("ERROR: Could not open gold AMR file %s.\n" % gold_filename)
sys.exit(1)
try:
test_file =open(test_filename)
except IOError:
sys.stderr.write("ERROR: Could not open test AMR file %s.\n" % test_filename)
sys.exit(1)
tiburonfailct = 0
parsefailct = 0
totalct = 0
decodefailct = 0
emptylinect = 0
while True:
gold = gold_file.readline()
test = test_file.readline().strip()
if not gold: # EOF
break
gold = gold.strip()
if not gold:
sys.stderr.write("WARNING: Empty line in gold AMR file. Skipping entry.\n")
continue
totalct += 1
if gold:
try:
if concept_edges: # rebuild normal AMR with concepts attached to nodes.
amr_gold = Hgraph.from_string(gold)
amr_gold = Hgraph.from_concept_edge_labels(amr_gold)
else:
amr_gold = Hgraph.from_string(gold)
l = len(amr_gold.triples())
except Exception as e:
print >>sys.stderr, e
sys.stderr.write("WARNING: Could not parse gold AMR. Skipping entry.\n")
continue
if test and not test.startswith("#"):
try:
amr_test = Hgraph.from_string(test)
if concept_edges: # rebuild normal AMR with concepts attached to nodes.
amr_test = Hgraph.from_concept_edge_labels(amr_test)
else:
amr_test = Hgraph.from_string(test)
if precise:
p,r,f = compute_smatch_precise(amr_gold, amr_test)
else:
p,r,f = compute_smatch_hill_climbing(amr_gold, amr_test,
starts = starts, method = method,
restart_threshold = restart_threshold)
if detailed:
print "P:%f R:%f F:%f " % (p, r, f)
else:
sys.stdout.write(".")
sys.stdout.flush()
ps.append((p,l))
rs.append((r,l))
fs.append((f,l))
except pyparsing.ParseException:
parsefailct += 1
else:
if not missing:
rs.append((0.0, l))
ps.append((0.0, l))
fs.append((0.0, l))
else:
if test=="# Tiburon failed.":
tiburonfailct += 1
elif test=="# Decoding failed.":
decodefailct += 1
emptylinect += 1
if not missing:
rs.append((0.0, l))
ps.append((0.0, l))
fs.append((0.0, l))
sys.stdout.write("\n")
avgp = mean(ps)
avgr = mean(rs)
avgf = mean(fs)
print "Total: %i\tFail(empty line): %i\tFail(invalid AMR): %i" % (totalct, emptylinect, parsefailct)
print "MEAN SMATCH: P:%f R:%f F:%f " % (avgp, avgr, avgf) | 5,330,851 |
def sum_by_hexagon(df,resolution,pol,fr,to,vessel_type=[],gt=[]):
"""
Use h3.geo_to_h3 to index each data point into the spatial index of the specified resolution.
Use h3.h3_to_geo_boundary to obtain the geometries of these hexagons
Ex counts_by_hexagon(data, 8)
"""
if vessel_type:
df_aggreg=df[((df.dt_pos_utc.between(fr,to))&(df.StandardVesselType.isin(vessel_type)))]
else:
df_aggreg=df[df.dt_pos_utc.between(fr,to)]
if df_aggreg.shape[0]>0:
if gt:
df_aggreg=df_aggreg[df_aggreg.GrossTonnage.between(gt[0],gt[1])]
if resolution==8:
df_aggreg = df_aggreg.groupby(by = "res_8").agg({"co2_t":sum,"ch4_t":sum}).reset_index()
else:
df_aggreg = df_aggreg.assign(new_res=df_aggreg.res_8.apply(lambda x: h3.h3_to_parent(x,resolution)))
df_aggreg = df_aggreg.groupby(by = "new_res").agg({"co2_t":sum,"ch4_t":sum}).reset_index()
df_aggreg.columns = ["hex_id", "co2_t","ch4_t"]
df_aggreg["geometry"] = df_aggreg.hex_id.apply(lambda x:
{ "type" : "Polygon",
"coordinates":
[h3.h3_to_geo_boundary(x,geo_json=True)]
}
)
return df_aggreg
else:
return df_aggreg | 5,330,852 |
def _ibp_sub(lhs, rhs):
"""Propagation of IBP bounds through a substraction.
Args:
lhs: Lefthand side of substraction.
rhs: Righthand side of substraction.
Returns:
out_bounds: IntervalBound.
"""
return lhs - rhs | 5,330,853 |
def test_bool():
"""
>>> assert lua.decode('false') == False
>>> assert lua.decode('true') == True
>>> assert lua.encode(False) == 'false'
>>> assert lua.encode(True) == 'true'
"""
pass | 5,330,854 |
def toil_make_tool(
toolpath_object: CommentedMap,
loadingContext: cwltool.context.LoadingContext,
) -> Process:
"""
Emit custom ToilCommandLineTools.
This factory funciton is meant to be passed to cwltool.load_tool().
"""
if (
isinstance(toolpath_object, Mapping)
and toolpath_object.get("class") == "CommandLineTool"
):
return ToilCommandLineTool(toolpath_object, loadingContext)
return cwltool.workflow.default_make_tool(toolpath_object, loadingContext) | 5,330,855 |
def sentinel_s1(metadata):
""" Parse metadata and return basic Item
with rasterio.open('/Users/scott/Data/sentinel1-rtc/local_incident_angle.tif') as src:
...: metadata = src.profile
...: metadata.update(src.tags())
"""
def get_datetime(metadata):
''' retrieve UTC start time from tif metadata'''
times = []
for i in range(1, int(metadata['NUMBER_SCENES'])+1):
m = json.loads(metadata[f'SCENE_{i}_METADATA'])
times += [m['start_time'], m['end_time']]
return min(times)
def get_orbits(metadata):
''' https://forum.step.esa.int/t/sentinel-1-relative-orbit-from-filename/7042 '''
adjust = {'S1B':27, 'S1A':73}
abs_orbit = int(metadata['ABSOLUTE_ORBIT_NUMBER'])
rel_orbit = ((abs_orbit - adjust[metadata['MISSION_ID']]) % 175) + 1
return abs_orbit, rel_orbit
def get_geometry(metadata):
''' determine valid pixel footprint and bbox '''
# get MGRS grid square footprint
gridfile = op.join(op.dirname(__file__), 'sentinel1-rtc-conus-grid.geojson')
gf = gpd.read_file(gridfile)
gf.rename(columns=dict(id='tile'), inplace=True)
gf_grid = gf[gf.tile == metadata['TILE_ID']]
bbox = list(gf_grid.total_bounds)
# read GRD frame footprints
frames = []
for i in range(1, int(metadata['NUMBER_SCENES'])+1):
m = json.loads(metadata[f'SCENE_{i}_METADATA'])
frames.append(gpd.read_file(io.StringIO(m['footprint'])))
footprints = gpd.pd.concat(frames)
# get valid data footprint
intersection = gpd.overlay(gf_grid, footprints, how='intersection')
valid_footprint = intersection.unary_union.convex_hull
geom = {"type": "Polygon",
"coordinates":[list(valid_footprint.exterior.coords)]}
return bbox, geom
dt = parse(get_datetime(metadata))
abs_orbit, rel_orbit = get_orbits(metadata)
bbox, geom = get_geometry(metadata)
# Item properties
props = {
'datetime': dt.strftime('%Y-%m-%dT%H:%M:%SZ'),
'platform': metadata['MISSION_ID'],
'constellation': 'sentinel-1',
'instruments': ['c-sar'],
'gsd': 20,
'proj:epsg': metadata['crs'].to_epsg(),
'sentinel:utm_zone': metadata['TILE_ID'][:2],
'sentinel:latitude_band': metadata['TILE_ID'][3],
'sentinel:grid_square': metadata['TILE_ID'][4:],
'sentinel:product_id': metadata['SCENES'].split(','),
'sat:orbit_state': metadata['ORBIT_DIRECTION'],
'sat:absolute_orbit': abs_orbit,
'sat:relative_orbit': rel_orbit
}
# match key s3://sentinel-s1-rtc-indigo/tiles/RTC/1/IW/12/S/YJ/2016/S1B_20161121_12SYJ_ASC
DATE = metadata['DATE'].replace('-','')
orbNames = {'ascending':'ASC', 'decending':'DSC'}
ORB = orbNames[metadata['ORBIT_DIRECTION']]
id = f"{metadata['MISSION_ID']}_{DATE}_{metadata['TILE_ID']}_{ORB}"
item = {
'type': 'Feature',
'stac_version': __stac_version__,
'stac_extensions': ['sar', 'sat', 'proj'],
'id': id,
'bbox': bbox,
'geometry': geom,
'properties':props
}
return item | 5,330,856 |
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
"""
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc | 5,330,857 |
def test_data_dimension_after_dead_time_correction(crd_file):
"""Ensure ToF and data have the same dimensions - BF 2021-07-23."""
_, _, _, fname = crd_file
crd = CRDFileProcessor(Path(fname))
crd.spectrum_full()
crd.dead_time_correction(3)
assert crd.tof.ndim == crd.data.ndim | 5,330,858 |
def meanwave(signals):
""" This function computes the meanwave of various signals.
Given a set of signals, with the same number of samples, this function
returns an array representative of the meanwave of those signals - which is
a wave computed with the mean values of each signal's samples.
Parameters
----------
signals: matrix-like
the input signals.
Returns
-------
mw: array-like
the resulted meanwave
"""
return mean(signals,0) | 5,330,859 |
def set_difficulty():
"""Ask the difficult level and return the number of turns corresponding"""
if input("Choose a difficulty level. Type 'easy' or 'hard': ").lower() == "easy":
return EASY_TURNS
else:
return HARD_TURNS | 5,330,860 |
def delete_event_by_id(id, user_id):
"""Remove one event based on id."""
sql = "DELETE FROM events WHERE id = :id AND host_id = :user_id RETURNING title;"
db.session.execute(sql, {"id": id, "user_id": user_id})
db.session.commit()
return ["Event deleted."] | 5,330,861 |
def get_cmap_colors(cmap='jet',p=None,N=10):
"""
"""
cm = plt.get_cmap(cmap)
if p is None:
return [cm(i) for i in np.linspace(0,1,N)]
else:
normalize = matplotlib.colors.Normalize(vmin=min(p), vmax=max(p))
colors = [cm(normalize(value)) for value in p]
return colors | 5,330,862 |
def reader_factory(load_from, file_format):
"""Select and return instance of appropriate reader class for given file format.
Parameters
__________
load_from : str or file instance
file path or instance from which to read
file_format : str
format of file to be read
Returns
_______
Reader instance
"""
if file_format == 'hdf5':
reader = hdf5Reader(load_from)
elif file_format == 'pickle':
reader = PickleReader(load_from)
else:
raise NotImplementedError("Format '{}' has not been implemented.".format(file_format))
return reader | 5,330,863 |
def scrape_with_selenium(chrome, chrome_webdriver, url, xpath_tup_list, timeout):
"""Scrape using Selenium and Chrome."""
result_dic = {}
with SeleniumChromeSession(chrome=chrome, chrome_webdriver=chrome_webdriver) as driver:
wait_conditions = []
for xpath_tup in xpath_tup_list:
wait_conditions.append(WaitCondition(xpath_tup[0], By.XPATH, xpath_tup[1]))
try:
driver.get(url)
except WebDriverException as error:
logger.error(F'Issue: {error} for url "{url}"')
else:
scraper_wait = ScraperWait(wait_conditions)
try:
WebDriverWait(driver, timeout).until(scraper_wait)
except TimeoutException:
logger.error(F'Timeout waiting for url "{url}"')
else:
result_dic = scraper_wait.found_elements
return result_dic | 5,330,864 |
def take_element_screenshot(page_screenshot: Image.Image, bbox: Rectangle) -> Image.Image:
"""
Returns the cropped subimage with the coordinates given.
"""
w, h = page_screenshot.size
if bbox.area == 0:
raise ValueError(f"Rectangle {bbox} is degenerate")
if bbox not in Rectangle(Point(0, 0), Point(w, h)):
raise ValueError(f"Rectangle {bbox} not contained in the viewport {(0, 0, w, h)}")
return crop_image(page_screenshot, bbox) | 5,330,865 |
def officeOfRegistrar_forward(request, id):
"""form to set receiver and designation of forwarded file """
context = {"track_id": id}
return render(request, "officeModule/officeOfRegistrar/forwardingForm.html", context) | 5,330,866 |
def test_batching_hetero_topology(index_dtype):
"""Test batching two DGLHeteroGraphs where some nodes are isolated in some relations"""
g1 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'follows', 'developer'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0), (2, 1), (3, 1)]
}, index_dtype=index_dtype)
g2 = dgl.heterograph({
('user', 'follows', 'user'): [(0, 1), (1, 2)],
('user', 'follows', 'developer'): [(0, 1), (1, 2)],
('user', 'plays', 'game'): [(0, 0), (1, 0), (2, 1)]
}, index_dtype=index_dtype)
bg = dgl.batch_hetero([g1, g2])
assert bg.ntypes == g2.ntypes
assert bg.etypes == g2.etypes
assert bg.canonical_etypes == g2.canonical_etypes
assert bg.batch_size == 2
# Test number of nodes
for ntype in bg.ntypes:
assert bg.batch_num_nodes(ntype) == [
g1.number_of_nodes(ntype), g2.number_of_nodes(ntype)]
assert bg.number_of_nodes(ntype) == (
g1.number_of_nodes(ntype) + g2.number_of_nodes(ntype))
# Test number of edges
assert bg.batch_num_edges('plays') == [
g1.number_of_edges('plays'), g2.number_of_edges('plays')]
assert bg.number_of_edges('plays') == (
g1.number_of_edges('plays') + g2.number_of_edges('plays'))
for etype in bg.canonical_etypes:
assert bg.batch_num_edges(etype) == [
g1.number_of_edges(etype), g2.number_of_edges(etype)]
assert bg.number_of_edges(etype) == (
g1.number_of_edges(etype) + g2.number_of_edges(etype))
# Test relabeled nodes
for ntype in bg.ntypes:
assert list(F.asnumpy(bg.nodes(ntype))) == list(range(bg.number_of_nodes(ntype)))
# Test relabeled edges
src, dst = bg.all_edges(etype=('user', 'follows', 'user'))
assert list(F.asnumpy(src)) == [0, 1, 4, 5]
assert list(F.asnumpy(dst)) == [1, 2, 5, 6]
src, dst = bg.all_edges(etype=('user', 'follows', 'developer'))
assert list(F.asnumpy(src)) == [0, 1, 4, 5]
assert list(F.asnumpy(dst)) == [1, 2, 4, 5]
src, dst = bg.all_edges(etype='plays')
assert list(F.asnumpy(src)) == [0, 1, 2, 3, 4, 5, 6]
assert list(F.asnumpy(dst)) == [0, 0, 1, 1, 2, 2, 3]
# Test unbatching graphs
g3, g4 = dgl.unbatch_hetero(bg)
check_equivalence_between_heterographs(g1, g3)
check_equivalence_between_heterographs(g2, g4) | 5,330,867 |
def vector_quaternion_arrays_allclose(vq1, vq2, rtol=1e-6, atol=1e-6, verbose=0):
"""Check if all the entries are close for two vector quaternion numpy arrays.
Quaterions are a way of representing rigid body 3D rotations that is more
numerically stable and compact in memory than other methods such as a 3x3
rotation matrix.
This special comparison function is needed because for quaternions q == -q.
Vector Quaternion numpy arrays are expected to be in format
[x, y, z, qx, qy, qz, qw].
# Params
vq1: First vector quaternion array to compare.
vq2: Second vector quaternion array to compare.
rtol: relative tolerance.
atol: absolute tolerance.
# Returns
True if the transforms are within the defined tolerance, False otherwise.
"""
vq1 = np.array(vq1)
vq2 = np.array(vq2)
q3 = np.array(vq2[3:])
q3 *= -1.
v3 = vq2[:3]
vq3 = np.array(np.concatenate([v3, q3]))
comp12 = np.allclose(np.array(vq1), np.array(vq2), rtol=rtol, atol=atol)
comp13 = np.allclose(np.array(vq1), np.array(vq3), rtol=rtol, atol=atol)
if verbose > 0:
print(vq1)
print(vq2)
print(vq3)
print(comp12, comp13)
return comp12 or comp13 | 5,330,868 |
def test_gen_mocs_epoch_stmoc_failure(mocker) -> None:
"""
Tests the generation of all MOCs and STMOCs for a single epoch.
Also tests the update of the full STMOC.
Args:
mocker: The pytest mock mocker object.
Returns:
None
"""
base_stmoc = '/path/to/stmoc.fits'
mocker_exists = mocker.patch('vasttools.tools.Path.exists',
side_effect=[True, False]
)
with pytest.raises(Exception) as excinfo:
vtt.gen_mocs_epoch('1',
'',
'',
epoch_path='.',
base_stmoc=base_stmoc
)
exc_str = "{} does not exist".format(base_stmoc)
assert str(excinfo.value) == exc_str | 5,330,869 |
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
Lda = gensim.models.ldamodel.LdaModel
model = Lda(doc_term_matrix, num_topics=5, id2word = dictionary, passes=50)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values | 5,330,870 |
def bot_reply(ctx, smtp, fallback_delivto):
"""reply to stdin mail as a bot.
This command processes an incoming e-mail message for the bot
and sends a reply if the bot was addressed in a "To" header.
If the bot was only addressed in the CC header it will process
the mail but not reply.
If the bot replies, it will always do a group-reply: it replies
to the sender and CCs anyone that was in CC or To.
The reply message contains an Autocrypt header and details of what
was found and understood from the incoming mail.
If it is a group-reply and it is encrypted then the bot
also adds Autocrypt-Gossip headers as mandated by the Level 1 spec.
"""
account_manager = get_account_manager(ctx)
msg = mime.parse_message_from_file(sys.stdin)
From = msg["From"]
log = SimpleLog()
with log.s("reading headers", raising=True):
delivto = mime.get_delivered_to(msg, fallback_delivto)
log("determined Delivered-To: " + delivto)
maxheadershow = 60
with log.s("Got your mail, here is what i found in headers:"):
for hn in ("Message-ID Delivered-To From To Subject "
"Date DKIM-Signature Autocrypt").split():
if hn in msg:
value = trunc_string(msg.get(hn).replace("\n", "\\n"), maxheadershow)
log("{:15s} {}".format(hn + ":", value))
else:
log("{:15s} NOTFOUND".format(hn))
with log.s("And this is the mime structure i saw:"):
log(mime.render_mime_structure(msg))
account = account_manager.get_account_from_emailadr(delivto)
r = account.process_incoming(msg)
with log.s("processed incoming mail for account {}:".format(r.account.name)):
if r.pah.error:
log(r.pah.error)
else:
ps = r.peerstate
log("found peeraddr={} keyhandle={} prefer_encrypt={}".format(
ps.addr, ps.public_keyhandle, ps.prefer_encrypt))
log("\n")
reply_to_encrypted = False
if msg.get_content_type() == "multipart/encrypted":
log("Your message was encrypted.")
decrypted = account.decrypt_mime(msg)
log("It was encrypted to the following keys:{}".format(
decrypted.keyinfos))
reply_to_encrypted = True
log("have a nice day, {}".format(delivto))
log("")
log("P.S.: my current key {} is in the Autocrypt header of this reply."
.format(r.account.ownstate.keyhandle))
recom = account.get_recommendation([From], reply_to_encrypted)
ui_recommendation = recom.ui_recommendation()
log("P.P.S.: For this reply the encryption recommendation is {}"
.format(ui_recommendation))
if delivto not in msg["To"]:
# if we are not addressed directly we don't reply (to prevent
# loops between CCed bots)
return
addrlist = mime.get_target_fulladr(msg)
newlist = []
for realname, addr in set(addrlist):
if addr and addr != delivto:
newlist.append(mime.formataddr((realname, addr)))
reply_msg = mime.gen_mail_msg(
From=delivto, To=[From], Cc=newlist,
Subject="Re: " + msg.get("Subject", ""),
_extra={"In-Reply-To": msg["Message-ID"]},
Autocrypt=account.make_ac_header(delivto),
payload=six.text_type(log), charset="utf8",
)
if ui_recommendation == 'encrypt':
r = account.encrypt_mime(reply_msg, [From] + newlist)
reply_msg = r.enc_msg
assert mime.is_encrypted(reply_msg)
if smtp:
host, port = smtp.split(",")
send_reply(host, int(port), reply_msg)
click.echo("send reply through smtp: {}".format(smtp))
else:
click.echo(reply_msg.as_string()) | 5,330,871 |
def autocorr_quad(w, f, t, method = 'direct'):
"""
Calculate the vacuum state autocorrelation function
for propagation on a quadratic potential energy surface.
Parameters
----------
w : array_like
The harmonic frequency (in energy units) of each mode.
f : array_like
The derivative array, including up to at least second derivatives.
t : array_like
The time array, in units where :math:`\\hbar = 1`. (Alternatively,
the `t` array can be identified with :math:`t/\\hbar`.)
method : {'direct','integral','integral_log'}
The calculation method. See Notes
Returns
-------
C : ndarray
The autocorrelation function, :math:`C(t)`.
See also
--------
corr_quad_recursion_elements : Calculate quadratic correlator recursion coefficients
~nitrogen.math.spech_fft : Calculate the spectrum of an autocorrelation function
Notes
-----
For `method` = 'direct', a direct expression based on
a discontinuity-free BCH disentangling formula is used.
For `method` = 'integral', an alternative method is
used to first calculate the logarithmic derivative
of :math:`C(t)`. This is numerically integrated
by a cumulative version of Simpson's rule and then
exponentiated.
For `method` = 'integral_log', the integrated logarithm
is returned directly, without exponentiation. That is,
the branch-cut discontinuity-free logarithm of :math:`C(t)`
is returned.
For the integral methods, a sufficiently small time-step
in the `t` array is required for accurate results. The direct
method does not rely in numerical integration.
"""
n = len(w)
# Extract the gradient and hessian
F,K = _partition_darray(f, n)
h0 = f[0] # The energy offset
t = np.array(t)
if t.ndim != 1:
raise ValueError('t must be 1-dimensional')
if method == 'integral' or method == 'integral_log':
#
# Calculate the correlation function by
# integration of its logarithmic derivative
#
# Check for a valid time vector
if t[0] != 0:
raise ValueError('t[0] must be zero for integral methods')
if np.any( np.abs(np.diff(t) - (t[1]-t[0])) > 1e-8):
raise ValueError('Time vector must be uniformly spaced.')
#
# Calculate the correlator recursion coefficients
r,S,T = corr_quad_recursion_elements(w, f, t)
# Calculate the ODE coefficient sum
sumIH = 0
for i in range(n):
sumIH += 0.25 * ( (w[i] + K[i,i]) - (w[i] - K[i,i])*(r[:,i]**2 - T[:,i,i]))
sumIH += (-np.sqrt(0.5)) * F[i] * r[:,i]
for j in range(i): # j < i
sumIH += 0.5 * K[i,j] * (r[:,i] * r[:,j] - T[:,i,j])
g = (-1j) * sumIH # the derivative of the logarithm
#
# C'(t) = g * C(t)
#
# --> C(t) = exp[ integral of g(t) ]
#
# Integrate the logarithm via
# Simpson's 1/3 rule, cumulatively
#
logC = nitrogen.math.cumsimp(g, t)
# Add the energy offset phase correction
logC += -1j * h0 * t
if method == 'integral_log':
# Return the continuous logarithm of C
return logC
else:
# Return C
C = np.exp(logC)
return C
elif method == 'direct':
#
# Calculate the correlation function by
# the direct method
#
# First, calculate the propagation normal
# modes
rtW = np.diag(np.sqrt(w))
irW = np.diag(1/np.sqrt(w))
z2,L = np.linalg.eigh(rtW @ K @ rtW)
# Force L to have positive determinant!
if np.linalg.det(L) < 0:
L[:,0] *= -1
omega = np.sqrt(np.abs(z2))
sigma = np.array([1 if z2[i] > 0 else -1j for i in range(n)])
rtSO = np.diag(np.sqrt(sigma * omega))
irSO = np.diag(1/np.sqrt(sigma*omega))
LamP = irW @ L @ rtSO + rtW @ L @ irSO
LamM = irW @ L @ rtSO - rtW @ L @ irSO
iLamP = np.linalg.inv(LamP)
C = np.zeros_like(t, dtype = np.complex128)
def eta(x):
#
# eta(x) = (e^x - 1) / x
#
result_small = 1.0 + x/2 + x**2/6 + x**3/24 + x**4/120 + x**5/720 + x**6/5040
result_big = np.expm1(x) / (x + 1e-20)
result = np.choose(abs(x) > 1e-2,
[result_small, result_big])
return result
def zeta(x):
#
# zeta(x) = (e^x - x - 1) / x**2
#
result_small = 1/2 + x/6 + x**2/24 + x**3/120 + x**4/720 + x**5/5040 + x**6/40320
result_big = (np.expm1(x) - x) / (x + 1e-20)**2
result = np.choose(abs(x) > 1e-2,
[result_small, result_big])
return result
# Force all time values to be non-negative.
# Afterward, negative time can be evaluated
# via the hermiticity of C(t)
for i in range(len(t)):
tp = abs(t[i]) # The current time value
# The exp^- diagonal
em = np.diag(np.exp(-1j * tp * sigma*omega))
#
# Calculate det(exp[A'])**1/2:
#
# A factoring and eigendecomposition
# procedure ensures there are no
# branch-cut discontinuities
#
quad_term = np.exp(-1j * tp * sum(sigma*omega) / 2)
quad_term *= np.linalg.det(LamP / 2) ** -1
M = iLamP.T @ em @ iLamP @ LamM @ em @ LamM.T
evs = np.linalg.eigvals(M)
for a in evs:
quad_term *= np.sqrt(1 - a)**-1
#
# Calculate the gradient contributions
#
hp = -1j*tp*h0 # Trivial phase contribution
# The eta^- and zeta^- diagonal matrices
etam = np.diag(eta(-1j*tp*sigma*omega))
zetam = np.diag(zeta(-1j*tp*sigma*omega))
# First term
temp1 = iLamP @ LamM @ em @ LamM.T @ iLamP.T
G1 = -etam @ temp1 @ np.linalg.inv(np.eye(n) - em@temp1) @ etam
# Second term
temp2 = iLamP.T @ em @ iLamP @ LamM @ em @ LamM.T
temp3 = etam @ LamM.T @ np.linalg.inv(np.eye(n) - temp2) @ iLamP.T @ etam
G2 = -(temp3 + temp3.T)
# Third term
G3 = -2*zetam - etam @ LamM.T @ \
np.linalg.inv(np.eye(n) - temp2) @ \
iLamP.T @ em @ iLamP @ LamM @ etam
Gamma = G1 + G2 + G3
Fbar = (LamP - LamM).T @ F
hp += (tp/4)**2 * np.dot(Fbar, Gamma @ Fbar)
C[i] = quad_term * np.exp(hp)
# For negative time values, correct
# for the complex conjugate
if t[i] < 0:
C[i] = np.conjugate(C[i])
return C
else:
raise ValueError('Invalid method option') | 5,330,872 |
def train(**kwargs):
"""
This will be a two step process, first train disc only, then train gen only
:param kwargs:
:return:
"""
batch_size = kwargs['batch_size']
gen = kwargs['gen']
disc = kwargs['discriminator']
device = kwargs['device']
gen.to(device)
disc.to(device)
lr = kwargs['lr']
iters = int(kwargs['iterations'])
save_after = int(kwargs['save_after'])
models_dir = kwargs['save_dir']
try:
os.mkdir(models_dir)
except:
pass
half = int(batch_size/2)
data_loader, _ = get_dataloaders(batch_size=half)
disc_optimizer = opt.Adam(params=disc.parameters(), lr=lr)
gen_optimizer = opt.Adam(params=gen.parameters(), lr=lr)
criterion = nn.BCELoss()
real, fake = 0, 1 # labels
########################### train discriminator ############################
print('INFO: Training the discriminator (Generator frozen)')
for k in range(iters):
batch_loss, batch_correct = 0, 0
for j, (real_images, _) in enumerate(data_loader):
if real_images.shape[0] != half:
continue # skip the last part
# train on real and fake images alike
real_images = real_images.to(device)
noise = torch.Tensor(half, 1).to(device)
# freeze the generator for now
with torch.no_grad():
gen.eval()
gen_images = gen(noise).to(device)
disc.zero_grad() # it's important to not to accumulate any gradients
random_permutes = torch.randperm(batch_size)
batch_images = torch.cat((real_images, gen_images), dim=0)
batch_labels = torch.cat((torch.Tensor(half).fill_(real), torch.Tensor(half).fill_(fake)), dim=0)
batch_images = batch_images[random_permutes].to(device)
batch_labels = batch_labels[random_permutes].to(device)
batch_out = disc(batch_images)
batch_out = torch.argmax(batch_out, dim=1).float()
# print(batch_out.dtype, batch_labels.dtype)
loss = criterion(batch_out, batch_labels)
batch_correct += batch_out.eq(batch_labels.view_as(batch_out)).double().sum().item()/batch_size
batch_loss += loss.item()
# print(gen_images.shape)
############## order of steps is important
loss.backward()
clip_grad_norm_(disc.parameters(), max_norm=0.5)
disc_optimizer.step()
##############
if j % 10 == 0:
print('log: epoch {}: '.format(k)+'({})/({})'.format(j, len(data_loader)))
print('\n({})/({}) loss = {}, accuracy = {}'.format(k+1, iters,
batch_loss/len(data_loader),
batch_correct*100/len(data_loader)))
torch.save(disc.state_dict(), os.path.join(models_dir, 'model-{}.pt'.format(k+1)))
########################### train generator ################################
print('INFO: Training the generator (Discriminator frozen)')
# for k in range(iters):
# batch_loss, batch_correct = 0, 0
# for j, (real_images, _) in enumerate(data_loader):
# # train on real and fake images alike
# noise = torch.Tensor(half, 1)
# gen_images = gen(noise)
# # freeze the discriminator now
# with torch.no_grad():
# disc.eval()
# gen.zero_grad() # it's important to not to accumulate any gradients
# random_permutes = torch.randperm(batch_size)
# batch_images = torch.cat((real_images, gen_images), dim=0)
# batch_labels = torch.cat((torch.Tensor(half).fill_(real), torch.Tensor(half).fill_(fake)), dim=0)
# batch_images = batch_images[random_permutes]
# batch_labels = batch_labels[random_permutes]
# batch_out = disc(batch_images)
# batch_out = torch.argmax(batch_out, dim=1).float()
# # print(batch_out.dtype, batch_labels.dtype)
# loss = criterion(batch_out, batch_labels)
# batch_correct += batch_out.eq(batch_labels.view_as(batch_out))
# batch_loss += loss.item()
# # print(gen_images.shape)
# print('\r'+'log: epoch {}: '.format(k)+'|'*j+'({})/({})'.
# format(j, len(data_loader)), end='')
# print('\n({})/({}) loss = {}, accuracy = {}'.format(k+1, iters, batch_loss/iters, batch_correct/iters))
# torch.save(disc.state_dict(), os.path.join(models_dir, 'model-{}.pt'.format(k+1)))
#
# # if k % save_after == 0 and k > 0:
# images, results = gen_images.detach().numpy(), torch.argmax(discriminated_probs, dim=1).detach().numpy()
# images = images.squeeze(1).transpose(0, 1, 2)
# # print(np.unique(images))\
# for j in range(16):
# pl.subplot(4, 4, j + 1)
# # print(images.shape)
# pl.imshow(images[j, :, :])
# pl.title(results[j])
# pl.axis('off')
# pl.show()
pass | 5,330,873 |
def add_project(body):
"""
POST /api/projects
:param body:
:return:
"""
try:
return {
'title': 'Succeed to Create Project',
'detail': svcProject.add_project(body)
}, 200
except Exception as e:
raise DefaultError(title='Failed to Create Project', detail=str(e)) | 5,330,874 |
def new_client(user_id: str, session=DBSession) -> Client:
""" from user_id get a miniflux client
:param user_id: telegram chat_id
:param session: database session class
:type user_id: Union[int, str]
:raise UserNotBindError: user not bind a miniflux account
"""
session = session()
user = session.query(User).filter(User.id == user_id).first()
session.close()
if user is None:
raise UserNotBindError
return Client(SERBER_ADDR, username=user.username, password=user.password) | 5,330,875 |
def create_utility_meters(
hass: HomeAssistantType,
energy_sensor: Union[VirtualEnergySensor, GroupedEnergySensor],
sensor_config: dict,
) -> list[UtilityMeterSensor]:
"""Create the utility meters"""
utility_meters = []
if not sensor_config.get(CONF_CREATE_UTILITY_METERS):
return []
meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES)
for meter_type in meter_types:
name = f"{energy_sensor.name} {meter_type}"
entity_id = f"{energy_sensor.entity_id}_{meter_type}"
_LOGGER.debug("Creating utility_meter sensor: %s", name)
# Below is for BC purposes. Can be removed somewhere in the future
if AwesomeVersion(__short_version__) < "2021.10":
utility_meter = VirtualUtilityMeterSensor(
energy_sensor.entity_id, name, meter_type, entity_id
)
else:
if not DATA_UTILITY in hass.data:
hass.data[DATA_UTILITY] = {}
hass.data[DATA_UTILITY][entity_id] = {
CONF_SOURCE_SENSOR: energy_sensor.entity_id,
CONF_METER_TYPE: meter_type,
CONF_TARIFFS: [],
CONF_METER_NET_CONSUMPTION: False,
}
utility_meter = UtilityMeterSensor(
parent_meter=entity_id,
source_entity=energy_sensor.entity_id,
name=name,
meter_type=meter_type,
meter_offset=DEFAULT_OFFSET,
net_consumption=False,
)
hass.data[DATA_UTILITY][entity_id][DATA_TARIFF_SENSORS] = [utility_meter]
utility_meters.append(utility_meter)
return utility_meters | 5,330,876 |
def plot_all_K(name_or_paths, *args):
"""Plot the typical K-N plot.
Args:
name_or_paths: a list of str
"""
if isinstance(name_or_paths, str):
name_or_paths = [name_or_paths]
all_name_or_paths = name_or_paths[:]
for arg in args:
all_name_or_paths += arg
all_name_or_paths = set(all_name_or_paths)
# (n_orns, Ks)
results = {n: _get_K_vs_N(n) for n in all_name_or_paths}
def _plot_all_K(name_or_paths):
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_axes([0.2, 0.2, 0.7, 0.7])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
for name_or_path in name_or_paths:
ax = _plot_K_vs_N(ax, name_or_path, results)
ax.legend(bbox_to_anchor=(0., 1.05), loc=2, frameon=False)
ax.set_xlabel('Number of ORs (N)')
ax.set_ylabel('Expansion Input Degree (K)')
xticks = np.array([25, 50, 100, 200, 500, 1000])
ax.set_xticks(np.log(xticks))
ax.set_xticklabels([str(t) for t in xticks])
yticks = np.array([3, 10, 30, 100])
ax.set_yticks(np.log(yticks))
ax.set_yticklabels([str(t) for t in yticks])
ax.set_xlim(np.log([15, 1700]))
ax.set_ylim(np.log([2, 300]))
ax.grid(True, alpha=0.5)
name = '.'.join([Path(n).name for n in name_or_paths])
# All save to the same directory
save_fig('scaling', name)
_plot_all_K(name_or_paths)
for arg in args:
_plot_all_K(arg) | 5,330,877 |
def builtin_divmod(a, b):
"""Divide two numbers and take the quotient and remainder."""
aa, bb = BType.commonize(a, b)
dv, mv = divmod(aa.value, bb.value)
d = type(aa)(dv)
m = type(aa)(mv)
return (d, m) | 5,330,878 |
def format_adjacency(G: nx.Graph, adj: np.ndarray, name: str) -> xr.DataArray:
"""
Format adjacency matrix nicely.
Intended to be used when computing an adjacency-like matrix
off a graph object G.
For example, in defining a func:
```python
def my_adj_matrix_func(G):
adj = some_adj_func(G)
return format_adjacency(G, adj, "xarray_coord_name")
```
## Assumptions
1. `adj` should be a 2D matrix of shape (n_nodes, n_nodes)
1. `name` is something that is unique amongst all names used
in the final adjacency tensor.
## Parameters
- `G`: NetworkX-compatible Graph
- `adj`: 2D numpy array
- `name`: A unique name for the kind of adjacency matrix
being constructed.
Gets used in xarray as a coordinate in the "name" dimension.
## Returns
- An XArray DataArray of shape (n_nodes, n_nodes, 1)
"""
expected_shape = (len(G), len(G))
if adj.shape != expected_shape:
raise ValueError(
"Adjacency matrix is not shaped correctly, "
f"should be of shape {expected_shape}, "
f"instead got shape {adj.shape}."
)
adj = np.expand_dims(adj, axis=-1)
nodes = list(G.nodes())
return xr.DataArray(
adj,
dims=["n1", "n2", "name"],
coords={"n1": nodes, "n2": nodes, "name": [name]},
) | 5,330,879 |
def delete_contact(contacts: list[list[str]]) -> None:
"""Delete a contact from the contact list."""
contact_number: int = int(input(NUMBER_PROMPT))
if not _is_contact_number(contact_number, contacts):
display_contact_error(contact_number)
else:
contact: list[str] = contacts.pop(contact_number - 1)
_save_to_csv(contacts)
display_deleted(contact) | 5,330,880 |
def batch_local_stats_from_coords(coords, mask):
"""
Given neighborhood neighbor coordinates, compute bond distances,
2-hop distances, and angles in local neighborhood (this assumes
the central atom has coordinates at the origin)
"""
one_hop_ds, two_dop_d_mat = batch_distance_metrics_from_coords(coords, mask)
angles = batch_angles_from_coords(coords, mask)
return one_hop_ds, two_dop_d_mat, angles | 5,330,881 |
def resolve_sender_entities(act, lexical_distance=0):
"""
Given an Archive's activity matrix, return a dict of lists, each containing
message senders ('From' fields) that have been groups to be
probably the same entity.
"""
# senders orders by descending total activity
senders = act.sum(0).sort_values(ascending=False)
return resolve_entities(
senders, from_header_distance, threshold=lexical_distance
) | 5,330,882 |
def ddpg(
env: gym.Env,
agent: ContinuousActorCriticAgent,
epochs: int,
max_steps: int,
buffer_capacity: int,
batch_size: int,
alpha: float,
gamma: float,
polyak: float,
act_noise: float,
verbose: bool,
) -> List[float]:
"""Trains an agent using Deep Deterministic Policy Gradients algorithm
:param env: The environment to train the agent in
:type env: gym.Env
:param agent: The agent to train
:type agent: ContinuousActorCriticAgent
:param epochs: The number of epochs to train the agent for
:type epochs: int
:param max_steps: The max number of steps per episode
:type max_steps: int
:param buffer_capacity: Max capacity of the experience replay buffer
:type buffer_capacity: int
:param batch_size: Batch size to use of experiences from the buffer
:type batch_size: int
:param gamma: The discount factor
:type gamma: float
:param alpha: The learning rate
:type alpha: float
:param polyak: Interpolation factor in polyak averaging for target networks
:type polyak: float
:param act_noise: Standard deviation for Gaussian exploration noise added to policy at training time
:type act_noise: float
:param verbose: Whether to run in verbose mode or not
:type verbose: bool
:return: The total reward per episode
:rtype: List[float]
"""
pi_optimizer = optim.Adam(agent.pi.parameters(), lr=alpha)
q_optimizer = optim.Adam(agent.q.parameters(), lr=alpha)
target_pi = deepcopy(agent.pi).to(device)
target_q = deepcopy(agent.q).to(device)
experience_buf = Buffer(buffer_capacity)
total_rewards = []
for _ in tqdm(range(epochs), disable=not verbose):
s = torch.from_numpy(env.reset()).float()
done = False
reward = 0.0
steps = 0
while not done and steps < max_steps:
# Collect and save experience from the environment
# Add Gaussian noise to the action for exploration
a = agent.act(s) + torch.normal(mean=0.0, std=act_noise, size=(1,))
s_prime, r, done, _ = env.step(a)
s_prime = torch.from_numpy(s_prime).float()
reward += r
experience_buf.save(Experience(s, a, r, s_prime, done))
# Learn from previous experiences
experiences = experience_buf.get(batch_size)
loss = 0.0
states = torch.stack([e.state for e in experiences]).to(device)
actions = torch.stack([e.action for e in experiences]).to(device)
rewards = [e.reward for e in experiences]
next_states = torch.stack([e.next_state for e in experiences]).to(device)
dones = [e.done for e in experiences]
q_values = agent.q(torch.cat([states, actions], dim=-1))
next_qvalues = target_q(torch.cat([next_states, target_pi(next_states)], dim=-1))
# Keep a copy of the current Q-values to be used for the TD targets
td_targets = q_values.clone()
# Compute TD targets
for index in range(batch_size):
# Terminal states do not have a future value
if dones[index]:
next_qvalues[index] = 0.0
td_targets[index] = rewards[index] + gamma * next_qvalues[index]
# Compute TD error and loss (MSE)
loss = (td_targets - q_values) ** 2
loss = loss.mean()
# Update the value function
q_optimizer.zero_grad()
loss.sum().backward()
q_optimizer.step()
# Update the policy
# We use the negative loss because policy optimization is done using gradient _ascent_
# This is because in policy gradient methods, the "loss" is a performance measure that is _maximized_
loss = -agent.q(torch.cat([states, agent.pi(states)], dim=-1))
loss = loss.mean()
pi_optimizer.zero_grad()
loss.backward()
pi_optimizer.step()
# Update target networks with polyak averaging
with torch.no_grad():
for target_p, p in zip(target_pi.parameters(), agent.pi.parameters()):
target_p.copy_(polyak * target_p + (1.0 - polyak) * p)
with torch.no_grad():
for target_p, p in zip(target_q.parameters(), agent.q.parameters()):
target_p.copy_(polyak * target_p + (1.0 - polyak) * p)
s = s_prime
steps += 1
total_rewards.append(reward)
return total_rewards | 5,330,883 |
def import_module_from_path(full_path, global_name):
"""
Import a module from a file path and return the module object.
Allows one to import from anywhere, something ``__import__()`` does not do.
The module is added to ``sys.modules`` as ``global_name``.
:param full_path:
The absolute path to the module .py file
:param global_name:
The name assigned to the module in sys.modules. To avoid
confusion, the global_name should be the same as the variable to which
you're assigning the returned module.
"""
path, filename = os.path.split(full_path)
module, ext = os.path.splitext(filename)
sys.path.append(path)
try:
mymodule = __import__(module)
sys.modules[global_name] = mymodule
except ImportError:
raise ImportError('Module could not be imported from %s.' % full_path)
finally:
del sys.path[-1]
return mymodule | 5,330,884 |
def civic_eid26_statement():
"""Create a test fixture for CIViC EID26 statement."""
return {
"id": "civic.eid:26",
"description": "In acute myloid leukemia patients, D816 mutation is associated with earlier relapse and poorer prognosis than wildtype KIT.", # noqa: E501
"direction": "supports",
"evidence_level": "civic.evidence_level:B",
"proposition": "proposition:001",
"variation_origin": "somatic",
"variation_descriptor": "civic.vid:65",
"disease_descriptor": "civic.did:3",
"method": "method:001",
"supported_by": ["pmid:16384925"],
"type": "Statement"
} | 5,330,885 |
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# Use a dummy metaclass that replaces itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, '_TemporaryClass', (), {}) | 5,330,886 |
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
"""
Username should not be shorter than minlen
Username should always starts with letter and should consists of letters, numbers, dots and underscore
"""
if (len(username) < minlen):
return False
if not re.match(r'^[a-z][a-z0-9._]*$', username): # made changes in Regex
return False
return True | 5,330,887 |
def neg_mae_macro(y_trues, y_preds, labels, topics):
"""
As for absolute error, lower is better
Thus use negative value in order to share the same interface when tuning
dev data with other metrics
"""
return -mae_macro(y_trues, y_preds, labels, topics) | 5,330,888 |
def transcribe_from_google(tmp_dir):
"""
Transcribes assets in given tmp directory into text assets via Google Cloud Transcribe
"""
def tmp(path): return os.path.join(tmp_dir, path)
script = "#!/bin/bash\n \
export GOOGLE_APPLICATION_CREDENTIALS=~/.gcloud/gcloud-alexa-cli.json \n \
export ACCESS_TOKEN=`gcloud auth application-default print-access-token` \n \
echo $ACCESS_TOKEN \n \
"
with open(tmp("google-token.sh"), 'w') as fw:
fw.write(script)
# feeling dirty...
st = os.stat(tmp("google-token.sh"))
os.chmod(tmp("google-token.sh"), st.st_mode | stat.S_IEXEC)
p = subprocess.Popen("./google-token.sh", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tmp_dir)
stdout, stderr = p.communicate()
log("transcribe_from_google: executed google-token script, result: %s", p.returncode)
if p.returncode != 0:
log("transcribe_from_google: ERROR with google-token! %s", stderr)
return
token = str(stdout.strip())
token = token[2:len(token)-1]
log("transcribe_from_google: token is %s", token)
request_content = """{
"config": {
"encoding":"FLAC",
"sampleRateHertz": 16000,
"languageCode": "en-US",
"enableWordTimeOffsets": false
},
"audio": {
"content":"%s" } }""" % (open(tmp("result.base64"), 'r').read())
with open(tmp("request-transcribe.json"), 'w') as transcribe_write:
transcribe_write.write(request_content)
script = """#!/bin/bash
curl -s -H "Content-Type: application/json"\\
-H "Authorization: Bearer %s"\\
https://speech.googleapis.com/v1/speech:recognize \\
-d@request-transcribe.json > transcript-output.json
""" % (token)
with open(tmp("google-transcribe.sh"), 'w') as fw:
fw.write(script)
# feeling dirty...
st = os.stat(tmp("google-transcribe.sh"))
os.chmod(tmp("google-transcribe.sh"), st.st_mode | stat.S_IEXEC)
p = subprocess.Popen("./google-transcribe.sh", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tmp_dir)
stdout, stderr = p.communicate()
log("transcribe_from_google: executed google-transcribe script, result: %s", p.returncode)
if not os.path.exists(tmp("transcript-output.json")):
log("transcribe_from_google: Could not find transcript-output.json")
return
transcript = json.load(open(tmp("transcript-output.json"), 'r'))
if transcript == None or "results" not in transcript:
log("transcribe_from_google: No results from transcription")
return
log("transcribe_from_google: returning transcript text from %s", transcript)
text = transcript["results"][0]["alternatives"][0]["transcript"]
log("transcribe_from_google: %s", text)
return text | 5,330,889 |
def clean_data(df):
"""Cleans the a dataset provided as a DataFrame and returns the cleaned DataFrame.
Cleaning includes expanding the categories and cleaning them up.
Args:
df (DataFrame): Data, containing categories as a single column, as well as messages
Returns:
DataFrame: Cleaned DataFrame
"""
# Prepare data
categories = df.categories.str.split(';', expand = True)
row = categories.loc[0]
category_colnames = [x[:-2] for x in row]
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].str.slice(start=-1)
categories[column] = categories[column].astype(int)
df.drop(columns='categories', inplace=True)
df = pd.merge(left= df, right=categories, left_on=df.index, right_on=categories.index).drop('key_0', axis=1)
# Remove duplicates
df.drop_duplicates(subset='id', inplace=True)
# Remove rows that have a 2 in related, as this is assumed to be faulty data
implausible_related_count = (df['related'] == 2).sum()
df = df.loc[df.related != 2]
print(f'Dropped {implausible_related_count} faulty messages.')
return df | 5,330,890 |
def _validate_float(mapping: Mapping[str, Any],
ref: str) -> Optional[SchemaError]:
"""
Validate the definition of a float value.
:param mapping: representing the type definition to be validated
:param ref: reference to the type definition
:return: error, if any
"""
if 'minimum' in mapping and 'maximum' in mapping:
minimum = mapping['minimum']
maximum = mapping['maximum']
if minimum > maximum:
return SchemaError(
"minimum (== {}) > maximum".format(minimum), ref=ref)
excl_min = False if 'exclusive_minimum' not in mapping \
else bool(mapping['exclusive_minimum'])
excl_max = False if 'exclusive_maximum' not in mapping \
else bool(mapping['exclusive_maximum'])
if excl_min and excl_max:
if minimum == maximum:
return SchemaError(
message=(
"minimum (== {}) == maximum and "
"both are set to exclusive").format(minimum),
ref=ref)
elif not excl_min and excl_max:
if minimum == maximum:
return SchemaError((
"minimum (== {}) == maximum and "
"maximum is set to exclusive").format(minimum),
ref=ref)
elif excl_min and not excl_max:
if minimum == maximum:
return SchemaError((
"minimum (== {}) == maximum and "
"maximum is set to exclusive").format(minimum),
ref=ref)
elif not excl_min and not excl_max:
# If minimum == maximum it is ok to have
# >= minimum and <= maximum as a constraint.
pass
else:
raise AssertionError("Unexpected code path")
return None | 5,330,891 |
def greatest_product(number):
"""
Finds the greatest product of 5 consecutive digits in the 1000-digit number
"""
largest = 1
for n in range(1, 997, 1):
product = compute_product(number % (10 ** 5))
if product > largest:
largest = product
number //= 10
print('largest is', largest) | 5,330,892 |
def test_cray_crus_session_create_usage_info(cli_runner, rest_mock):
""" Test `cray crus` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['crus', 'session', 'create', '--help'])
outputs = [
"cli crus session create [OPTIONS]",
"--upgrade-template-id",
"--failed-label",
"--upgrading-label",
"--starting-label",
"--upgrade-step-size",
"--workload-manager-type",
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0 | 5,330,893 |
def tokenize_document(document: str) -> typing.List[str]:
"""
Helper method to tokenize the document.
:param document: The input document represented as a string.
:return: A list of tokens.
"""
try:
return nltk.tokenize.word_tokenize(document)
except LookupError:
nltk.download('punkt')
return nltk.tokenize.word_tokenize(document) | 5,330,894 |
def get_type_name_value(obj):
"""
Returns object type name from LLDB value.
It returns type name with asterisk if object is a pointer.
:param lldb.SBValue obj: LLDB value object.
:return: Object type name from LLDB value.
:rtype: str | None
"""
return None if obj is None else obj.GetTypeName() | 5,330,895 |
def fastqprint(fastq):
"""
Printing a fastq file
"""
for record in SeqIO.parse(fastq, "fastq"):
print("%s %s" % (record.id, record.seq))
return seq1.reverse_complement() | 5,330,896 |
def make_day(day: int, input_dir: Path, session_path: Path):
"""Make or read the input file for the day.
Reading preferred so we don't hammer the
fine folk's server at Advent of Code.
"""
session_id = load_session_id(session_path)
txt = do_get(day, session_id)
if not input_dir.exists():
input_dir.mkdir()
fname = FILE_NAMER.format(day)
with open(input_dir / fname, 'w') as f:
f.write(txt.strip()) | 5,330,897 |
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request") | 5,330,898 |
def encodePartList( part_instance: ObjectInstance,
vh_group_list: List[int]) -> dict:
""" Used for copying and pasting
TODO: unify encodePart and encodePartList
Args:
part_instance: The ``Part`` ``ObjectInstance``, to allow for instance
specific property copying
vh_group_list: List of virtual_helices IDs to encode to
be used with copy and paste serialization
Returns:
Dictionary representing the virtual helices with ordered lists of
properties, strands, etc to allow for copy and pasting becoming
different ID'd virtual helices
"""
part = part_instance.reference()
vh_group_list.sort()
# max_id_number_of_helices = part.getMaxIdNum()
# vh_insertions = part.insertions()
'''NOTE This SHOULD INCLUDE 'grid_type' key
'''
group_props = part.getModelProperties().copy()
assert('grid_type' in group_props)
if not group_props.get('is_lattice', True):
vh_props, origins, directions = part.helixProperties()
group_props['virtual_helices'] = vh_props
group_props['origins'] = origins
group_props['directions'] = directions
else:
vh_props, origins, directions = part.helixProperties(vh_group_list)
group_props['virtual_helices'] = vh_props
group_props['origins'] = origins
group_props['directions'] = directions
xover_list = []
strand_list = []
prop_list = []
vh_list = []
vh_group_set = set(vh_group_list)
def filter_xovers(x):
return (x[0] in vh_group_set and x[3] in vh_group_set)
def filter_vh(x):
return x[0] in vh_group_set
for id_num in vh_group_list:
offset_and_size = part.getOffsetAndSize(id_num)
if offset_and_size is None:
# add a placeholder
strand_list.append(None)
prop_list.append(None)
else:
offset, size = offset_and_size
vh_list.append((id_num, size))
fwd_ss, rev_ss = part.getStrandSets(id_num)
fwd_idxs, fwd_colors = fwd_ss.dump(xover_list)
rev_idxs, rev_colors = rev_ss.dump(xover_list)
strand_list.append((fwd_idxs, rev_idxs))
prop_list.append((fwd_colors, rev_colors))
# end for
remap = {x: y for x, y in zip(vh_group_list,
range(len(vh_group_list))
)}
group_props['vh_list'] = vh_list
group_props['strands'] = {'indices': strand_list,
'properties': prop_list
}
filtered_insertions = filter(filter_vh, part.dumpInsertions())
group_props['insertions'] = [(remap[x], y, z) for x, y, z in filtered_insertions]
filtered_xover_list = filter(filter_xovers, xover_list)
group_props['xovers'] = [(remap[a], b, c, remap[x], y, z)
for a, b, c, x, y, z in filtered_xover_list]
instance_props = part_instance.properties()
group_props['instance_properties'] = instance_props
vh_order = filter(lambda x: x in vh_group_set, group_props['virtual_helix_order'])
vh_order = [remap[x] for x in vh_order]
group_props['virtual_helix_order'] = vh_order
external_mods_instances = filter(filter_vh,
part.dumpModInstances(is_internal=False))
group_props['external_mod_instances'] = [(remap[w], x, y, z)
for w, x, y, z in external_mods_instances]
""" TODO Add in Document modifications
"""
return group_props | 5,330,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.