content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def save_figure(path: str) -> None:
"""Saves the current figure to the specified path"""
logging.info(f'Saving image: {path}')
plt.savefig(path, dpi=300, bbox_inches='tight') | 5,332,800 |
def merge_dictionaries(dict1, dict2):
""" Merges two dictionaries handling embedded lists and
dictionaries.
In a case of simple type, values from dict1 are preserved.
Args:
dict1, dict2 dictionaries to merge
Return merged dictionaries
"""
for k2, v2 in dict2.items():
if k2 not in dict1:
dict1[k2] = v2
else:
if isinstance(v2, list):
dict1[k2] = merge_lists(dict1[k2], v2)
elif isinstance(v2, dict):
dict1[k2] = merge_dictionaries(dict1[k2], v2)
else:
# if the type is int or strings we do nothing
# its already in dict1
pass
return dict1 | 5,332,801 |
def build(model_def, model_name, optimizer, loss_name, custom_objects=None):
"""build keras model instance in FastEstimator
Args:
model_def (function): function definition of tf.keras model or path of model file(h5)
model_name (str, list, tuple): model name(s)
optimizer (str, optimizer, list, tuple): optimizer(s)
loss_name (str, list, tuple): loss name(s)
custom_objects (dict): dictionary that maps custom
Returns:
model: model(s) compiled by FastEstimator
"""
with fe.distribute_strategy.scope() if fe.distribute_strategy else NonContext():
if isinstance(model_def, str):
model = tf.keras.models.load_model(model_def, custom_objects=custom_objects)
else:
model = model_def()
model = to_list(model)
model_name = to_list(model_name)
optimizer = to_list(optimizer)
loss_name = to_list(loss_name)
assert len(model) == len(model_name) == len(optimizer) == len(loss_name)
for idx, (m, m_n, o, l_n) in enumerate(zip(model, model_name, optimizer, loss_name)):
model[idx] = _fe_compile(m, m_n, o, l_n)
if len(model) == 1:
model = model[0]
return model | 5,332,802 |
def check_audio_file(audio_file):
"""
Check if the audio file contents and format match the needs of the speech service. Currently we only support
16 KHz, 16 bit, MONO, PCM audio format. All others will be rejected.
:param audio_file: file to check
:return: audio duration, if file matches the format expected, otherwise None
"""
# Verify that all wave files are in the right format
try:
with wave.open(audio_file) as my_wave:
frame_rate = my_wave.getframerate()
if frame_rate >= 8000 and my_wave.getnchannels() in [1, 2] \
and my_wave.getsampwidth() == 2 and my_wave.getcomptype() == 'NONE':
audio_duration = my_wave.getnframes() / frame_rate
return audio_duration
else:
raise InvalidAudioFormatError(
"File {0} is not in the right format, it must be: Mono/Stereo, 16bit, PCM, 8KHz or above. "
"Found: ChannelCount={1}, SampleWidth={2}, CompType={3}, FrameRate={4}. Ignoring input!".format(
audio_file,
my_wave.getnchannels(),
my_wave.getsampwidth(),
my_wave.getcomptype(),
frame_rate
)
)
except Exception as e:
raise InvalidAudioFormatError("Invalid wave file {0}, reason: {1} :{2}".format(audio_file, type(e).__name__, e)) | 5,332,803 |
def uniform(name):
"""
Calls the findUniform function from util.py to return the uniform bounds for the given molecule.
Input: name of molecule
Output: array of length [2] with the upper and lower bounds for the uniform prior
"""
prior = findUniform(name, 'd_h')
return prior | 5,332,804 |
def merge_two_dicts(x, y):
"""Merges two dicts, returning a new copy."""
z = x.copy()
z.update(y)
return z | 5,332,805 |
def num_utterances(dataset: ds.DatasetSplit):
"""Returns the total number of utterances in the dataset."""
return sum([len(interaction) for interaction in dataset.examples]) | 5,332,806 |
def __virtual__():
"""
Only return if requests and boto are installed.
"""
if HAS_LIBS:
return __virtualname__
else:
return False | 5,332,807 |
def register():
"""Register user"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form.get("username")
email = request.form.get("email")
password = request.form.get("password")
# Logs user into database
rows = db.execute("SELECT * FROM users WHERE username = ?",username)
email_check = db.execute("SELECT * FROM users WHERE email = ?",email)
# Check if Username is taken or not
if len(rows) != 0:
flash("Username Already Taken!", "danger")
return redirect("/register")
# Check if Email is taken or not
if len(email_check) != 0:
flash("Email Already Taken!", "danger")
return redirect("/register")
# Create a hashed password based on sha256 hashing function and store it into database
hashed_password = generate_password_hash(password, method='pbkdf2:sha256', salt_length=8)
db.execute("INSERT INTO users(email, username, hash) VALUES(?, ?, ?)",
email, username, hashed_password)
# Reddirect user back to login page after registering
flash("Register Successfully!", "success")
return redirect("/login")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("register.html") | 5,332,808 |
def _csd_multitaper(X, sfreq, n_times, window_fun, eigvals, freq_mask, n_fft,
adaptive):
"""Compute cross spectral density (CSD) using multitaper module.
Computes the CSD for a single epoch of data.
Parameters
----------
X : ndarray, shape (n_channels, n_times)
The time series data consisting of n_channels time-series of length
n_times.
sfreq : float
The sampling frequency of the data in Hertz.
n_times : int
Number of time samples
window_fun : ndarray
Window function(s) of length n_times. This corresponds to first output
of `dpss_windows`.
eigvals : ndarray | float
Eigenvalues associated with window functions.
freq_mask : ndarray
Which frequencies to use.
n_fft : int
Length of the FFT.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
"""
x_mt, _ = _mt_spectra(X, window_fun, sfreq, n_fft)
if adaptive:
# Compute adaptive weights
_, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
return_weights=True)
# Tiling weights so that we can easily use _csd_from_mt()
weights = weights[:, np.newaxis, :, :]
weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
else:
# Do not use adaptive weights
weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :, np.newaxis]
x_mt = x_mt[:, :, freq_mask]
# Calculating CSD
# Tiling x_mt so that we can easily use _csd_from_mt()
x_mt = x_mt[:, np.newaxis, :, :]
x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
csds = _csd_from_mt(x_mt, y_mt, weights, weights_y)
# FIXME: don't compute full matrix in the first place
csds = np.array([_sym_mat_to_vector(csds[:, :, i])
for i in range(csds.shape[-1])]).T
# Scaling by sampling frequency for compatibility with Matlab
csds /= sfreq
return csds | 5,332,809 |
def test_list_base64_binary_length_nistxml_sv_iv_list_base64_binary_length_1_2(mode, save_output, output_format):
"""
Type list/base64Binary is restricted by facet length with value 5.
"""
assert_bindings(
schema="nistData/list/base64Binary/Schema+Instance/NISTSchema-SV-IV-list-base64Binary-length-1.xsd",
instance="nistData/list/base64Binary/Schema+Instance/NISTXML-SV-IV-list-base64Binary-length-1-2.xml",
class_name="NistschemaSvIvListBase64BinaryLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,332,810 |
def _build_tmp_access_args(method, ip, ttl, port, direction, comment):
"""
Builds the cmd args for temporary access/deny opts.
"""
opt = _get_opt(method)
args = "{0} {1} {2}".format(opt, ip, ttl)
if port:
args += " -p {0}".format(port)
if direction:
args += " -d {0}".format(direction)
if comment:
args += " #{0}".format(comment)
return args | 5,332,811 |
def print_useful_remote_info(manager, experiment_name) -> None:
""" Once the local process of the remote run is over,
this information is shown to the user.
"""
logger.info("###########################################################")
logger.info(" IMPORTANT")
logger.info("###########################################################")
logger.info("")
logger.info("MODEL DOWNLOADING")
logger.info("-----------------")
logger.info("Once experiment is over, you can download all results by executing:")
logger.info(
f"scp -r -i <key> {manager.orchestrator.username}@{manager.orchestrator.host}:"
f"{manager.orchestrator.get_home_path()}/{experiment_name}/synced_results <local_dst>")
logger.info("")
logger.info("Where <key> is the private key used to create the cluster and <local_dst>")
logger.info("is a local directory to store the results.")
logger.info("")
logger.info("INSTANCES")
logger.info("---------")
if manager.factories_timeout > 0:
logger.info(
f"The factories instances will be terminated after {manager.factories_timeout} " +
"hour of unusage.")
elif manager.factories_timeout == -1:
logger.info("The factories instances will remain running " +
"(you are in charge of terminating them)")
elif manager.factories_timeout == 0:
logger.info("The factories instance will be terminated once the experiment is over")
if manager.orchestrator_timeout > 0:
logger.info(f"The orchestrator will be terminated after {manager.orchestrator_timeout} " +
"hour of unusage.")
elif manager.orchestrator_timeout == -1:
logger.info("The orchestrator instance will remain running " +
"(you are in charge of terminating it)")
elif manager.orchestrator_timeout == 0:
logger.info("The orchestrator instance will be terminated once the experiment is over") | 5,332,812 |
def add_two_values(value1, value2):
""" Adds two integers
Arguments:
value1: first integer value e.g. 10
value2: second integer value e.g. 2
"""
return value1 + value2 | 5,332,813 |
def test_four_snps_two_independent_trees_perfect_two_snps_not_causal():
"""
Two independent causal SNPs each sharing a haplotype with another, different SNP
via perfect phenotype associations plus an extra non-causal SNP (so five SNPs total)
Y = 0.5 * ( X1 && X3 ) + 0.5 * ( X2 && X4 )
This should yield two haplotypes from different trees, where X3 occurs in the first
and X4 occurs in the second
"""
# a function for splitting a list into a list of pairs
split_list = lambda pair: [pair[i : i + 2] for i in range(0, len(pair), 2)] + [
[1, 1],
[0, 0],
]
gens = _create_fake_gens(
np.array(list(map(split_list, product([0, 1], repeat=8))), dtype=np.bool_)
)
gts = gens.data
phens = _create_fake_phens(
0.5 * (gts[:, 0] & gts[:, 2]).sum(axis=1)
+ 0.5 * (gts[:, 1] & gts[:, 3]).sum(axis=1)
)
# TODO: we need to handle this case, somehow
# # run the treebuilder and extract the haplotypes
# builder = TreeBuilder(gens, phens)
# builder.run()
# tree = builder.tree
# haps = _view_tree_haps(tree)
assert False | 5,332,814 |
def init_filters(namespace=None):
"""Filters have an optional init method that runs before the site
is built.
"""
if namespace is None:
namespace = bf.config.filters
for name, filt in list(namespace.items()):
if "mod" in filt \
and type(filt.mod).__name__ == "module"\
and not filt.mod.__initialized:
try:
init_method = filt.mod.init
except AttributeError:
filt.mod.__initialized = True
continue
logger.debug("Initializing filter: " + name)
init_method()
filt.mod.__initialized = True | 5,332,815 |
def combine_nearby_breakends(events, distance=5000):
"""
1d clustering, prioritizing assembled breakpoint coords
"""
breakends = []
positions = get_positions(events)
for (chrom, orientation), cur_events in positions.groupby(["chrom", "orientation"]):
cur_events = cur_events.sort_values("pos")
groups = ((cur_events["pos"]-cur_events["pos"].shift()) > distance).cumsum()
for i, cur_group in cur_events.groupby(groups):
if cur_group["assembled"].any():
cur_combined = cur_group.loc[cur_group["assembled"]].copy()
cur_combined["assembled"] = True
else:
cur_orientations = cur_group["orientation"].unique()
cur_combined = pandas.DataFrame({"orientation":cur_orientations})
cur_combined["chrom"] = chrom
cur_combined["pos"] = int(cur_group["pos"].mean())
cur_combined["assembled"] = False
breakends.append(cur_combined)
return pandas.concat(breakends, ignore_index=True) | 5,332,816 |
def label_list(repo=None):
"""Dump labels within the given repo(s)."""
api = github.api(config=None) # TODO: config object
for idx, reponame in enumerate(repo or []):
if idx:
click.echo('')
dump_labels(api, reponame) | 5,332,817 |
def get_mongo_database(connection, database_name):
""" Access the database
Args:
connection (MongoClient): Mongo connection to the database
database_name (str): database to be accessed
Returns:
Database: the Database object
"""
try:
return connection.get_database(database_name)
except:
return None | 5,332,818 |
def make_mappings() -> Dict[str, Callable[[], None]]:
"""サンプル名と実行する関数のマッピングを生成します"""
# noinspection PyDictCreation
m = {}
extlib.regist_modules(m)
return m | 5,332,819 |
def S(state):
"""Stringify state
"""
if state == State.IDLE: return "IDLE"
if state == State.TAKING_OFF: return "TAKING_OFF"
if state == State.HOVERING: return "HOVERING"
if state == State.WAITING_ON_ASSIGNMENT: return "WAITING_ON_ASSIGNMENT"
if state == State.FLYING: return "FLYING"
if state == State.IN_FORMATION: return "IN_FORMATION"
if state == State.GRIDLOCK: return "GRIDLOCK"
if state == State.COMPLETE: return "\033[32;1mCOMPLETE\033[0m"
if state == State.TERMINATE: return "\033[31;1mTERMINATE\033[0m" | 5,332,820 |
def sram_cacti(
mem_sz_bytes=16, # in byte
origin_config_file=None,
target_config_file=None,
result_file=None
):
"""
run the cacti with input configuration, work for SRAM, whose size is either calculated to match the bw or pre-specified
"""
original = open(origin_config_file, 'r')
target = open(target_config_file, 'w')
target.write("-size (bytes) " + str(mem_sz_bytes) + "\n")
for entry in original:
target.write(entry)
original.close()
target.close()
if not os.path.exists("./simEff/cacti7/cacti"):
subprocess.call(["make", "all"], shell=True, cwd="./simEff/cacti7/")
time.sleep(20)
rep_cmd = "cp ./cacti ./cacti_" + target_config_file
subprocess.call([rep_cmd], shell=True, cwd="./simEff/cacti7/")
final_cmd = "./cacti_" + target_config_file + " -infile ../../" + target_config_file + " > ../../" + result_file
subprocess.call([final_cmd], shell=True, cwd="./simEff/cacti7/")
rm_cmd = "rm -rf ./cacti_" + target_config_file
subprocess.call([rm_cmd], shell=True, cwd="./simEff/cacti7/") | 5,332,821 |
def eval_classif_cross_val_roc(clf_name, classif, features, labels,
cross_val, path_out=None, nb_steps=100):
""" compute mean ROC curve on cross-validation schema
http://scikit-learn.org/0.15/auto_examples/plot_roc_crossval.html
:param str clf_name: name of selected classifier
:param obj classif: sklearn classifier
:param ndarray features: features in dimension nb_samples x nb_features
:param list(int) labels: annotation for samples
:param object cross_val:
:param str path_out: path for exporting statistic
:param int nb_steps: number of thresholds
:return:
>>> np.random.seed(0)
>>> labels = np.array([0] * 150 + [1] * 100 + [3] * 50)
>>> data = np.tile(labels, (6, 1)).T.astype(float)
>>> data += np.random.random(data.shape)
>>> data.shape
(300, 6)
>>> from sklearn.model_selection import StratifiedKFold
>>> cv = StratifiedKFold(n_splits=5, random_state=0)
>>> classif = create_classifiers()[DEFAULT_CLASSIF_NAME]
>>> fp_tp, auc = eval_classif_cross_val_roc(DEFAULT_CLASSIF_NAME, classif,
... data, labels, cv, nb_steps=10)
>>> fp_tp
FP TP
0 0.000000 0.0
1 0.111111 1.0
2 0.222222 1.0
3 0.333333 1.0
4 0.444444 1.0
5 0.555556 1.0
6 0.666667 1.0
7 0.777778 1.0
8 0.888889 1.0
9 1.000000 1.0
>>> auc
0.94444444444444442
>>> labels[-50:] -= 1
>>> data[-50:, :] -= 1
>>> path_out = 'temp_eval-cv-roc'
>>> os.mkdir(path_out)
>>> fp_tp, auc = eval_classif_cross_val_roc(DEFAULT_CLASSIF_NAME, classif,
... data, labels, cv, nb_steps=5, path_out=path_out)
>>> fp_tp
FP TP
0 0.00 0.0
1 0.25 1.0
2 0.50 1.0
3 0.75 1.0
4 1.00 1.0
>>> auc
0.875
>>> import shutil
>>> shutil.rmtree(path_out, ignore_errors=True)
"""
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, nb_steps)
labels_bin = np.zeros((len(labels), np.max(labels) + 1))
unique_labels = np.unique(labels)
assert all(unique_labels >= 0), \
'some labels are negative: %r' % unique_labels
for lb in unique_labels:
labels_bin[:, lb] = (labels == lb)
# since version change the CV is not iterable by default
if not hasattr(cross_val, '__iter__'):
cross_val = cross_val.split(features, labels)
count = 0.
for train, test in cross_val:
classif_cv = clone(classif)
classif_cv.fit(np.copy(features[train], order='C'),
np.copy(labels[train], order='C'))
proba = classif_cv.predict_proba(np.copy(features[test], order='C'))
# Compute ROC curve and area the curve
for i, lb in enumerate(unique_labels):
fpr, tpr, _ = metrics.roc_curve(labels_bin[test, lb], proba[:, i])
fpr = [0.] + fpr.tolist() + [1.]
tpr = [0.] + tpr.tolist() + [1.]
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
count += 1
# roc_auc = metrics.auc(fpr, tpr)
mean_tpr /= count
mean_tpr[-1] = 1.0
# mean_auc = metrics.auc(mean_fpr, mean_tpr)
df_roc = pd.DataFrame(np.array([mean_fpr, mean_tpr]).T, columns=['FP', 'TP'])
auc = metrics.auc(mean_fpr, mean_tpr)
if path_out is not None:
assert os.path.exists(path_out), 'missing: "%s"' % path_out
name_csv = NAME_CSV_CLASSIF_CV_ROC.format(clf_name, 'mean')
df_roc.to_csv(os.path.join(path_out, name_csv))
name_txt = NAME_TXT_CLASSIF_CV_AUC.format(clf_name, 'mean')
with open(os.path.join(path_out, name_txt), 'w') as fp:
fp.write(str(auc))
logging.debug('cross_val ROC: \n %r', df_roc)
return df_roc, auc | 5,332,822 |
def trace_get_watched_net(trace, i):
"""
trace_get_watched_net(Int_trace trace, unsigned int i) -> Int_net
Parameters
----------
trace: Int_trace
i: unsigned int
"""
return _api.trace_get_watched_net(trace, i) | 5,332,823 |
def dist_batch_tasks_for_all_layer_mdl_vs_adapted_mdl(
mdl: nn.Module,
spt_x: Tensor, spt_y: Tensor, qry_x: Tensor, qry_y: Tensor,
layer_names: list[str],
inner_opt: DifferentiableOptimizer,
fo: bool,
nb_inner_train_steps: int,
criterion: nn.Module,
metric_comparison_type: str = 'pwcca',
iters: int = 1,
effective_neuron_type: str = 'filter',
downsample_method: Optional[str] = None,
downsample_size: Optional[int] = None,
subsample_effective_num_data_method: Optional[str] = None,
subsample_effective_num_data_param: Optional[int] = None,
metric_as_sim_or_dist: str = 'dist',
force_cpu: bool = False,
training: bool = True,
copy_initial_weights: bool = False,
track_higher_grads: bool = False
) -> list[OrderedDict[LayerIdentifier, float]]:
"""
:param mdl:
:param spt_x: not as a tuple due to having to move them to gpu potentially.
:param spt_y:
:param qry_x:
:param qry_y:
:param layer_names:
:param inner_opt:
:param fo:
:param nb_inner_train_steps:
:param criterion:
:param metric_comparison_type:
:param iters:
:param effective_neuron_type:
:param downsample_method:
:param downsample_size:
:param subsample_effective_num_data_method:
:param subsample_effective_num_data_param:
:param metric_as_sim_or_dist:
:param force_cpu:
:param training:
:param copy_initial_weights:
:param track_higher_grads:
:return:
"""
# - [B, M, C, H, W] -> [B, L]
L: int = len(layer_names)
B: int = spt_x.size(0)
dists_per_batch_per_layer: list[OrderedDict[LayerIdentifier, float]] = []
for t in range(B):
spt_x_t, spt_y_t, qry_x_t, qry_y_t = spt_x[t], spt_y[t], qry_x[t], qry_y[t]
#
adapted_mdl: FuncModel = get_maml_adapted_model_with_higher_one_task(mdl,
inner_opt,
spt_x_t, spt_y_t,
training,
copy_initial_weights,
track_higher_grads,
fo,
nb_inner_train_steps,
criterion)
# - [M, C, H, W], [L] -> [L]
X: Tensor = qry_x_t
dists_per_layer: OrderedDict[LayerIdentifier, float] = dist_data_set_per_layer(mdl1=mdl,
mdl2=adapted_mdl,
X1=X,
X2=X,
layer_names1=layer_names,
layer_names2=layer_names,
metric_comparison_type=metric_comparison_type,
iters=iters,
effective_neuron_type=effective_neuron_type,
downsample_method=downsample_method,
downsample_size=downsample_size,
subsample_effective_num_data_method=subsample_effective_num_data_method,
subsample_effective_num_data_param=subsample_effective_num_data_param,
metric_as_sim_or_dist=metric_as_sim_or_dist,
force_cpu=force_cpu
)
assert len(dists_per_layer) == L
# - appending to [B, L]
dists_per_batch_per_layer.append(dists_per_layer)
#
# del adapted_mdl
# gc.collect()
assert len(dists_per_batch_per_layer) == B
# Invariant due to asserts: [B, L] list
# - [B, L] distances ready!
return dists_per_batch_per_layer | 5,332,824 |
async def send_image_to_room(client, room_id, image):
"""Send image to single room.
Arguments:
---------
client (nio.AsyncClient): The client to communicate with Matrix
room_id (str): The ID of the room to send the message to
image (str): file name/path of image
"""
logger.debug(f"send_image_to_room {room_id} {image}")
await send_image_to_rooms(client, [room_id], image) | 5,332,825 |
def on_connect(client, _userdata, _flags, _respons_code):
""" MQTT on connect """
logger.info('MQTT Connected. Subscribe "%s"', MQTT_TOPIC)
client.subscribe(MQTT_TOPIC) | 5,332,826 |
def simplify(ply_path, save_ply_path=None, target_perc=0.01, meshlabserver_path="meshlabserver"):
""" simplify mesh (load ply_path, simplify, and save to save_ply_path)
"""
script = \
"""
<!DOCTYPE FilterScript>
<FilterScript>
<filter name="Simplification: Quadric Edge Collapse Decimation">
<Param name="TargetFaceNum" description="Target number of faces" value="100000" type="RichInt" tooltip="The desired final number of faces."/>
<Param name="TargetPerc" description="Percentage reduction (0..1)" value="{TargetPerc}" type="RichFloat" tooltip="If non zero, this parameter specifies the desired final size of the mesh as a percentage of the initial size."/>
<Param name="QualityThr" description="Quality threshold" value="0.5" type="RichFloat" tooltip="Quality threshold for penalizing bad shaped faces.<br>The value is in the range [0..1]
 0 accept any kind of face (no penalties),
 0.5 penalize faces with quality < 0.5, proportionally to their shape
"/>
<Param name="PreserveBoundary" description="Preserve Boundary of the mesh" value="false" type="RichBool" tooltip="The simplification process tries to do not affect mesh boundaries during simplification"/>
<Param name="BoundaryWeight" description="Boundary Preserving Weight" value="1" type="RichFloat" tooltip="The importance of the boundary during simplification. Default (1.0) means that the boundary has the same importance of the rest. Values greater than 1.0 raise boundary importance and has the effect of removing less vertices on the border. Admitted range of values (0,+inf). "/>
<Param name="PreserveNormal" description="Preserve Normal" value="true" type="RichBool" tooltip="Try to avoid face flipping effects and try to preserve the original orientation of the surface"/>
<Param name="PreserveTopology" description="Preserve Topology" value="false" type="RichBool" tooltip="Avoid all the collapses that should cause a topology change in the mesh (like closing holes, squeezing handles, etc). If checked the genus of the mesh should stay unchanged."/>
<Param name="OptimalPlacement" description="Optimal position of simplified vertices" value="true" type="RichBool" tooltip="Each collapsed vertex is placed in the position minimizing the quadric error.
 It can fail (creating bad spikes) in case of very flat areas. 
If disabled edges are collapsed onto one of the two original vertices and the final mesh is composed by a subset of the original vertices. "/>
<Param name="PlanarQuadric" description="Planar Simplification" value="true" type="RichBool" tooltip="Add additional simplification constraints that improves the quality of the simplification of the planar portion of the mesh, as a side effect, more triangles will be preserved in flat areas (allowing better shaped triangles)."/>
<Param name="PlanarWeight" description="Planar Simp. Weight" value="0.001" type="RichFloat" tooltip="How much we should try to preserve the triangles in the planar regions. If you lower this value planar areas will be simplified more."/>
<Param name="QualityWeight" description="Weighted Simplification" value="false" type="RichBool" tooltip="Use the Per-Vertex quality as a weighting factor for the simplification. The weight is used as a error amplification value, so a vertex with a high quality value will not be simplified and a portion of the mesh with low quality values will be aggressively simplified."/>
<Param name="AutoClean" description="Post-simplification cleaning" value="true" type="RichBool" tooltip="After the simplification an additional set of steps is performed to clean the mesh (unreferenced vertices, bad faces, etc)"/>
<Param name="Selected" description="Simplify only selected faces" value="false" type="RichBool" tooltip="The simplification is applied only to the selected set of faces.
 Take care of the target number of faces!"/>
</filter>
<filter name="Select non Manifold Edges "/>
<filter name="Delete Selected Faces"/>
<filter name="Close Holes">
<Param name="MaxHoleSize" description="Max size to be closed " value="100" type="RichInt" tooltip="The size is expressed as number of edges composing the hole boundary"/>
<Param name="Selected" description="Close holes with selected faces" value="false" type="RichBool" tooltip="Only the holes with at least one of the boundary faces selected are closed"/>
<Param name="NewFaceSelected" description="Select the newly created faces" value="true" type="RichBool" tooltip="After closing a hole the faces that have been created are left selected. Any previous selection is lost. Useful for example for smoothing the newly created holes."/>
<Param name="SelfIntersection" description="Prevent creation of selfIntersecting faces" value="true" type="RichBool" tooltip="When closing an holes it tries to prevent the creation of faces that intersect faces adjacent to the boundary of the hole. It is an heuristic, non intersetcting hole filling can be NP-complete."/>
</filter>
</FilterScript>
"""
script = script.format(TargetPerc=target_perc)
tmp_dir = tempfile.mkdtemp()
script_path = os.path.join(tmp_dir, "script.mlx")
with open(script_path, "w") as f:
f.write(script)
save_ply_path = ply_path if save_ply_path is None else save_ply_path
os.system(f"{meshlabserver_path} -i {ply_path} -o {save_ply_path} -s {script_path}")
os.remove(script_path)
os.rmdir(tmp_dir) | 5,332,827 |
def adjust_payload(tree: FilterableIntervalTree,
a_node: FilterableIntervalTreeNode,
adjustment_interval: Interval,
adjustments: dict,
filter_vector_generator: Callable[[dict], int]=None)\
-> List[FilterableIntervalTreeNode]:
"""
Adjusts the payload of a node int its tree
:param tree: tee to be adjusted
:param a_node: node to adjust
:param adjustment_interval: the interval for which we would like to see the adjustments made
:param adjustments: the changes that we want to see made to the node's payload (only works for dictionaries)
:param filter_vector_generator: a function that returns a filter vector for each payload
:return: None
"""
if filter_vector_generator is None:
filter_vector_generator = lambda x: a_node.filter_vector
old_interval = a_node.key
remaining_intervals = old_interval.remove(adjustment_interval)
new_payload = a_node.payload.copy()
relevant_keys = adjustments.keys()
for key in relevant_keys:
old_property_value = new_payload.get(key)
if isinstance(old_property_value, numbers.Number):
new_payload[key] += adjustments[key]
else:
new_payload[key] = adjustments[key]
filter_vector = filter_vector_generator(new_payload)
remaining_nodes = \
[FilterableIntervalTreeNode(_, a_node.payload.copy(), a_node.filter_vector) for _ in remaining_intervals]
new_node = FilterableIntervalTreeNode(adjustment_interval, new_payload, filter_vector)
result_list = [new_node] + remaining_nodes
result_list = sorted(result_list, key=lambda node: node.key)
added_nodes = set()
first_item = result_list[0]
last_item = result_list[-1]
first_payload = first_item.payload
last_payload = last_item.payload
pre_node = get_predecessor_for_node(tree, a_node, qualifier=lambda x: x == first_payload)
post_node = get_successor_for_node(tree, a_node, qualifier=lambda x: x == last_payload)
delete_node(tree, a_node)
if pre_node and Interval.touches(pre_node.key, first_item.key) and pre_node.payload == first_item.payload:
consolidate_nodes(pre_node, first_item, tree)
added_nodes.add(first_item)
if post_node and Interval.touches(post_node.key, last_item.key) and post_node.payload == last_item.payload:
consolidate_nodes(last_item, post_node, tree)
added_nodes.add(last_item)
for node in result_list:
if node not in added_nodes:
add_node(tree, node)
return new_node | 5,332,828 |
def _read_cropped() -> Tuple[np.ndarray, np.ndarray]:
"""Reads the cropped data and labels.
"""
print('\nReading cropped images.')
path_cropped = os.path.join(DATA_FOLDER, FOLDER_CROPPED)
result = _recursive_read_cropped(path_cropped)
print('Done reading cropped images.')
return result | 5,332,829 |
def get_max(data, **kwargs):
"""
Assuming the dataset is loaded as type `np.array`, and has shape
(num_samples, num_features).
:param data: Provided dataset, assume each row is a data sample and \
each column is one feature.
:type `np.ndarray`
:param kwargs: Dictionary of differential privacy arguments \
for computing the maximum value of each feature across all samples, \
e.g., epsilon and delta, etc.
:type kwargs: `dict`
:return: A vector of shape (1, num_features) stores the maximum value \
of each feature across all samples.
:rtype: `np.array` of `float`
"""
try:
max_vec = np.max(data, axis=0)
except Exception as ex:
raise FLException('Error occurred when calculating '
'the maximum value. ' + str(ex))
return max_vec | 5,332,830 |
def find_u_from_v(matrix, v, singular_value):
"""
Finds the u column vector of the U matrix in the SVD UΣV^T.
Parameters
----------
matrix : numpy.ndarray
Matrix for which the SVD is calculated
v : numpy.ndarray
A column vector of V matrix, it is the eigenvector of the Gramian of `matrix`.
singular_value : float
A singular value of `matrix` corresponding to the `v` vector.
Returns
-------
numpy.ndarray
u column vector of the U matrix in the SVD.
"""
return matrix @ v / singular_value | 5,332,831 |
def transform(dataset, perm_idx, model, view):
"""
for view1 utterance, simply encode using view1 encoder
for view 2 utterances:
- encode each utterance, using view 1 encoder, to get utterance embeddings
- take average of utterance embeddings to form view 2 embedding
"""
model.eval()
latent_zs, golds = [], []
n_batch = (len(perm_idx) + BATCH_SIZE - 1) // BATCH_SIZE
for i in range(n_batch):
indices = perm_idx[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
v1_batch, v2_batch = list(zip(*[dataset[idx][0] for idx in indices]))
golds += [dataset[idx][1] for idx in indices]
if view == 'v1':
latent_z = model(v1_batch, encoder='v1')
elif view == 'v2':
latent_z_l = [model(conv, encoder='v1').mean(dim=0) for conv in v2_batch]
latent_z = torch.stack(latent_z_l)
latent_zs.append(latent_z.cpu().data.numpy())
latent_zs = np.concatenate(latent_zs)
return latent_zs, golds | 5,332,832 |
def double(items: List[str]) -> List[str]:
"""
Returns a new list that is the input list, repeated twice.
"""
return items + items | 5,332,833 |
def system_from_problem(problem: Problem) -> System:
"""Extracts the "system" part of a problem.
Args:
problem: Problem description
Returns:
A :class:`System` object containing a copy of the relevant parts of the problem.
"""
return System(
id=problem.id,
name=problem.name,
apps=tuple(w.app for w in problem.workloads),
instance_classes=problem.instance_classes,
performances=problem.performances,
) | 5,332,834 |
def get_service_endpoints(ksc, service_type, region_name):
"""Get endpoints for a given service type from the Keystone catalog.
:param ksc: An instance of a Keystone client.
:type ksc: :class: `keystoneclient.v3.client.Client`
:param str service_type: An endpoint service type to use.
:param str region_name: A name of the region to retrieve endpoints for.
:raises :class: `keystone_exceptions.EndpointNotFound`
"""
try:
catalog = {
endpoint_type: ksc.service_catalog.url_for(
service_type=service_type, endpoint_type=endpoint_type,
region_name=region_name)
for endpoint_type in ['publicURL', 'internalURL', 'adminURL']}
except keystone_exceptions.EndpointNotFound:
# EndpointNotFound is raised for the case where a service does not
# exist as well as for the case where the service exists but not
# endpoints.
log.error('could not retrieve any {} endpoints'.format(service_type))
raise
return catalog | 5,332,835 |
def test_bubblesort_order():
"""Check that bubble sort works.
"""
length = 5
test_list = [(length - 1 - i) for i in range(length)]
ordered_list = [i for i in range(length)]
util.bubblesort(test_list)
assert ordered_list == test_list | 5,332,836 |
def get_task_for_node(node_id):
""" Get a new task or previously assigned task for node """
# get ACTIVE task that was previously assigned to this node
query = Task.query.filter_by(node_id=node_id).filter_by(status=TaskStatus.ACTIVE)
task = query.first()
if task:
return task
node = Node.query.filter_by(id=node_id).one()
return _assign_task(node) | 5,332,837 |
def save_fields(df):
"""List of Well model from dataframe"""
# load fields from db
start = time.time()
counter = 0
for index, row in enumerate(df['name']):
field_name = df['name'][index]
if field_name.strip() != '' and field_name is not None:
field, created = OilField.objects.get_or_create(name=field_name)
for attr in df:
if attr != 'name':
setattr(field, attr, df[attr][index])
print(f'{field.name} is saved')
counter += 1
field.save()
print(f'\n\nTIME ELAPSED{time.time() - start} SEC on {counter} fields\n\n') | 5,332,838 |
async def test_edit_message_live_location_by_user(bot: Bot):
""" editMessageLiveLocation method test """
from .types.dataset import MESSAGE_WITH_LOCATION, LOCATION
msg = types.Message(**MESSAGE_WITH_LOCATION)
location = types.Location(**LOCATION)
# editing user's message
async with FakeTelegram(message_data=True):
result = await bot.edit_message_live_location(chat_id=msg.chat.id, message_id=msg.message_id,
latitude=location.latitude, longitude=location.longitude)
assert isinstance(result, bool) and result is True | 5,332,839 |
def loadProfileInfo(profileInfoPath, remote=None):
"""
Load profile information from a profileInfo.py file, set a default application information
file, and set the profile information's host if the application is running remotely
@param remote: Remote environment information if a remote host is passed to the pytest parser
@type remote: C{xpedite.transport.remote.Remote}
"""
from xpedite.profiler.profileInfo import loadProfileInfo
profileInfo = loadProfileInfo(os.path.join(dataDir, profileInfoPath))
profileInfo.appInfo = os.path.join(dataDir, 'xpedite-appinfo.txt')
if remote:
profileInfo.appHost = remote.host
return profileInfo | 5,332,840 |
def open_process(verbose, args, outputs):
""" Run the given arguments as a subprocess. Time out after TIMEOUT
seconds and report failures or stdout. """
report_output(outputs["stdout"],
verbose, "Writing", args)
proc = None
if outputs["stderr"] is not None:
try:
proc = Popen(args, stdout=subprocess.PIPE, shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
report_err(outputs["stderr"], "Failed executing: ", e)
if proc is None:
# Never even started
report_err(outputs["stderr"], "Process failed to start")
return proc | 5,332,841 |
def cik_list():
"""Get CIK list and use it as a fixture."""
return UsStockList() | 5,332,842 |
def get_eval_config(hidden_dim,
max_input_length=None,
num_input_timesteps=None,
model_temporal_relations=True,
node_position_dim=1,
num_input_propagation_steps=None,
token_vocab_size=None,
node_text_pad_token_id=None,
num_transformer_attention_heads=None,
num_edge_types=None,
num_time_edge_types=None,
use_relational_bias=False,
max_output_length=None,
type_vocab_size=None,
output_vocab_size=None,
num_output_propagation_steps=None,
use_pointer_candidate_masking=False,
jax2tf_compatible=None,
dropout_rate: float = 0.1):
"""Returns a model config for evaluating, which disables drop-out."""
return create_model_config(
is_training=False,
hidden_dim=hidden_dim,
max_input_length=max_input_length,
num_input_timesteps=num_input_timesteps,
model_temporal_relations=model_temporal_relations,
node_position_dim=node_position_dim,
num_input_propagation_steps=num_input_propagation_steps,
token_vocab_size=token_vocab_size,
node_text_pad_token_id=node_text_pad_token_id,
dropout_rate=dropout_rate,
num_transformer_attention_heads=num_transformer_attention_heads,
num_edge_types=num_edge_types,
num_time_edge_types=num_time_edge_types,
use_relational_bias=use_relational_bias,
max_output_length=max_output_length,
type_vocab_size=type_vocab_size,
output_vocab_size=output_vocab_size,
num_output_propagation_steps=num_output_propagation_steps,
use_pointer_candidate_masking=use_pointer_candidate_masking,
jax2tf_compatible=jax2tf_compatible) | 5,332,843 |
def service_start():
"""Connect grpc service
"""
while True:
try:
logger.debug('wait until triggered')
# wait until service-request triggered(eg. kws detected)
serviceflag.wait()
logger.debug('service starting...')
grpc_request()
serviceflag.clear()
except grpc.RpcError as rpc_error:
logger.debug('gRPC ERROR')
if rpc_error.code() == grpc.StatusCode.UNAVAILABLE:
logger.debug('The service is currently unavailable.')
elif rpc_error.code() == grpc.StatusCode.CANCELLED:
logger.debug('Channel closed!')
elif rpc_error.code() == grpc.StatusCode.UNKNOWN:
logger.debug('write after end')
else:
raise rpc_error
serviceflag.clear()
mic_off_ready()
except Exception as e:
logger.error('Error: ' + str(e))
else:
logger.error('UNKNOWN ERROR. retry grpc service.') | 5,332,844 |
def classNew(u_id):
"""
Allow an ADMIN to create a new class (ADMIN ONLY)
Returns: none
"""
myDb, myCursor = dbConnect()
data = request.get_json()
createNewClass(myCursor, myDb, data)
dbDisconnect(myCursor, myDb)
return dumps({}) | 5,332,845 |
def show(width, similar_list, by_ratio, show_differences, _argv):
"""show matched images"""
check_type_width(width) # fail fast
# Process all images, show user each sequence one by one
for similar_pair in similar_list:
if not similar_pair is None:
images = compute_image_differences(
similar_pair, by_ratio, show_differences)
show_images(width, images)
print('NOTE: Press the "0" key, to close opened windows')
cv2.waitKey(0) | 5,332,846 |
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)."""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g | 5,332,847 |
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Keba charging station sensors from config entry."""
keba: KebaKeContact = hass.data[DOMAIN][KEBA_CONNECTION]
entities: list[KebaSensor] = []
wallbox = keba.get_wallbox(config_entry.data[CONF_HOST])
entities.extend([KebaSensor(wallbox, description) for description in SENSOR_TYPES])
async_add_entities(entities, True) | 5,332,848 |
def augment(img_list: list, hflip: bool = True, rot: bool = True) -> List[np.ndarray]:
"""
Augments the image inorder to add robustness to the model
@param img_list: The List of images
@param hflip: If True, add horizontal flip
@param rot: If True, add 90 degrees rotation
@return: A list of the augmented images
"""
# horizontal flip OR rotate
hflip = hflip and np.random.random() < 0.5
vflip = rot and np.random.random() < 0.5
rot90 = rot and np.random.random() < 0.5
return [perform_augment(hflip, vflip, rot90, img) for img in img_list] | 5,332,849 |
def write_load_config(repo_dir, saved_state_path, changed_files=[]):
"""
Writes a .hhconfig that allows hh_client to launch hh_server from a saved
state archive.
repo_dir: Repository to run hh_server on
saved_state_path: Path to file containing saved server state
changed_files: list of strings
"""
with open(repo_dir + os.path.sep + '.hhconfig', 'w') as f:
f.write(r"""
# some comment
server_options_cmd = echo --load \"%s\"
""" % " ".join([saved_state_path] + changed_files)) | 5,332,850 |
def _neurovault_collections(parts, query):
"""Mocks the Neurovault API behind the `/api/collections/` path.
parts: the parts of the URL path after "collections"
ie [], ["<somecollectionid>"], or ["<somecollectionid>", "images"]
query: the parsed query string, e.g. {"offset": "15", "limit": "5"}
returns a dictionary of API results
See the neurovault API docs for details: https://neurovault.org/api-docs
"""
if parts:
return _neurovault_one_collection(parts)
collections, _ = _get_neurovault_data()
offset, limit = int(query.get("offset", 0)), int(query.get("limit", 2))
batch = collections.iloc[
offset: offset + limit].to_dict(orient="records")
return {"count": len(collections), "results": batch} | 5,332,851 |
def display_states():
""" Display the states"""
storage_states = storage.all(State)
return render_template('7-states_list.html', states=storage_states) | 5,332,852 |
def setAMtrajectoryFromPoints(phase, L, dL, timeline, overwriteInit = True, overwriteFinal = True):
"""
Define the AM value and it's time derivative trajectories as a linear interpolation between each points
Also set the initial / final values for L and dL to match the ones in the trajectory
:param phase:
:param L:
:param dL:
:param timeline:
:param overwrite: Default True : overwrite init/final values even if they exist
:return:
"""
phase.L_t = piecewise.FromPointsList(L,timeline.T)
phase.dL_t = piecewise.FromPointsList(dL,timeline.T)
if overwriteInit:
phase.L_init = L[:,0]
if overwriteInit:
phase.dL_init = dL[:,0]
if overwriteFinal:
phase.L_final = L[:,-1]
if overwriteFinal:
phase.dL_final= dL[:,-1] | 5,332,853 |
def recompress_folder(folders, path, extension):
"""Recompress folder"""
dest = runez.SYS_INFO.platform_id.composed_basename("cpython", path.name, extension=extension)
dest = folders.dist / dest
runez.compress(path, dest, logger=print)
return dest | 5,332,854 |
def guessMimetype(filename):
"""Return the mime-type for `filename`."""
path = pathlib.Path(filename) if not isinstance(filename, pathlib.Path) else filename
with path.open("rb") as signature:
# Since filetype only reads 262 of file many mp3s starting with null bytes will not find
# a header, so ignoring null bytes and using the bytes interface...
buf = b""
while not buf:
data = signature.read(_NUM_SIGNATURE_BYTES)
if not data:
break
data = data.lstrip(b"\x00")
if data:
data_len = len(data)
if data_len >= _NUM_SIGNATURE_BYTES:
buf = data[:_NUM_SIGNATURE_BYTES]
else:
buf = data + signature.read(_NUM_SIGNATURE_BYTES - data_len)
# Special casing .id3/.tag because extended filetype with add_type() prepends, meaning
# all mp3 would be labeled mimetype id3, while appending would mean each .id3 would be
# mime mpeg.
if path.suffix in ID3_MIME_TYPE_EXTENSIONS:
if Id3Tag().match(buf) or Id3TagExt().match(buf):
return Id3TagExt.MIME
return filetype.guess_mime(buf) | 5,332,855 |
def flag_last_object(seq):
""" Treat the last object in an iterable differently """
seq = iter(seq) # ensure this is an iterator
_a = next(seq)
for _b in seq:
yield _a, False
_a = _b
yield _a, True | 5,332,856 |
def collectMessages():
""" A generic stimulus invocation """
global rmlEngine
try:
stimuli = []
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
try:
actionID = jsonPayload["actionID"]
except KeyError:
errorMsg = "Missing required JSON parameter 'actionID'"
raise Exceptions.MissingActionError(errorMsg)
try:
ownerID = jsonPayload["ownerID"]
except KeyError:
errorMsg = "Missing required JSON parameter 'ownerID'"
raise Exceptions.InvalidControllerError()
try:
subjectID = jsonPayload["subjectID"]
except KeyError:
subjectID = ownerID
try:
objectID = jsonPayload["objectID"]
except KeyError:
objectID = ownerID
try:
objectID = jsonPayload["objectID"]
except KeyError:
objectID = ownerID
try:
insertionModeText = jsonPayload["insertionMode"]
if insertionModeText == 'head_clear':
insertionMode = ationInsertionTypes.HEAD_CLEAR
elif insertionModeText == 'head':
insertionMode = ationInsertionTypes.HEAD
elif insertionModeText == 'append':
insertionMode = ationInsertionTypes.APPEND
else:
errorMsg = "Invalid insertionMode parameter. Valid values are 'head', 'head_clear' and 'append'" %insertionModeText
raise Exceptions.InsertionModeError()
except KeyError:
insertionMode = ationInsertionTypes.HEAD_CLEAR
try:
rtparams = jsonPayload["actionParams"]
except KeyError:
rtparams = {}
actionInvocation = Engine.ActionRequest(actionID, insertionMode, rtparams, subjectID, objectID, ownerID)
rmlEngine.aQ.put(actionInvocation)
response.body = json.dumps({"status": stimuli})
response.status = 200
return response
except Exceptions.InvalidControllerError:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action. %s, %s" %(errorID, errorMsg)
response.status = 400
return response
except Exceptions.MissingActionError:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action. %s, %s" %(errorID, errorMsg)
response.status = 400
return response
except Exception as unusedE:
#When this exception happens, the actionID variable won't be in scope,
# But we can expect that actionID is available, or a MissingActionError would have been thrown.
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
actionID = jsonPayload["actionID"]
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
response.body = "Failed to post action %s. %s, %s" %(actionID, errorID, errorMsg)
response.status = 500
return response | 5,332,857 |
def filter_ptr_checks(props):
"""This function will filter out extra pointer checks.
Our support to primitives and overflow pointer checks is unstable and
can result in lots of spurious failures. By default, we filter them out.
"""
def not_extra_check(prop):
return extract_property_class(prop) not in ["pointer_arithmetic", "pointer_primitives"]
return list(filter(not_extra_check, props)) | 5,332,858 |
def makeKeylistObj(keylist_fname, includePrivate=False):
"""Return a new unsigned keylist object for the keys described in
'mirror_fname'.
"""
keys = []
def Key(obj): keys.append(obj)
preload = {'Key': Key}
r = readConfigFile(keylist_fname, (), (), preload)
klist = []
for k in keys:
k = thandy.keys.RSAKey.fromJSon(k)
if includePrivate and not k.isPrivateKey():
raise thandy.FormatException("Private key information not found.")
klist.append({'key': k.format(private=includePrivate), 'roles' : k.getRoles() })
result = { '_type' : "Keylist",
'ts' : formatTime(time.time()),
'keys' : klist }
KEYLIST_SCHEMA.checkMatch(result)
return result | 5,332,859 |
def getwpinfo(id,wps):
"""Help function to create description of WP inputs."""
try:
wpmin = max([w for w in wps if 'loose' in w.lower()],key=lambda x: len(x)) # get loose WP with most 'V's
wpmax = max([w for w in wps if 'tight' in w.lower()],key=lambda x: len(x)) # get tight WP with most 'V's
info = f"{id} working point: {wpmin}-{wpmax}"
except:
info = f"{id} working point: {', '.join(wps)}"
return info | 5,332,860 |
def build_word_dg(target_word, model, depth, model_vocab=None, boost_counter=None, topn=5):
""" Accept a target_word and builds a directed graph based on
the results returned by model.similar_by_word. Weights are initialized
to 1. Starts from the target_word and gets similarity results for it's children
and so forth, up to the specified depth.
Args
----
target_word (string): Root node.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
topn (int): Number of words to check against in the embedding model, default=5.
"""
_DG = init_digraph()
seen_set = set()
do_hs_boosting = (
boost_counter and model_vocab and target_word in model_vocab)
if do_hs_boosting:
weight_boost = log10(float(model.vocab[target_word].count)) * boost_counter[
target_word] if target_word in boost_counter else 0
_DG.add_weighted_edges_from([(target_word, word[0], weight_boost + word[1])
for word in model.similar_by_word(target_word, topn=topn)])
else:
_DG.add_weighted_edges_from([(target_word, word[0], word[1])
for word in model.similar_by_word(target_word, topn=topn)])
seen_set.add(target_word)
for _idx in range(1, depth):
current_nodes = _DG.nodes()
for node in current_nodes:
if node not in seen_set:
_DG.add_weighted_edges_from(
[(node, word[0], word[1]) for word in model.similar_by_word(node, topn=topn)])
seen_set.add(node)
return _DG | 5,332,861 |
def train(network, num_epochs, train_fn, train_batches, test_fn=None,
validation_batches=None, threads=None, early_stop=np.inf,
early_stop_acc=False, save_epoch_params=False, callbacks=None,
acc_func=onehot_acc, train_acc=False):
"""
Train a neural network by updating its parameters.
Parameters
----------
network : lasagne neural network handle
Network to be trained.
num_epochs: int
Maximum number of epochs to train
train_fn : theano function
Function that computes the loss and updates the network parameters.
Takes parameters from the batch iterators
train_batches : batch iterator
Iterator that yields mini batches from the training set. Must be able
to re-iterate multiple times.
test_fn : theano function
Function that computes loss and predictions of the network.
Takes parameters from the batch iterators.
validation_batches : batch iterator
Iterator that yields mini batches from the validation set. Must be able
to re-iterate multiple times.
threads : int
Number of threads to use to prepare mini batches. If None, use
a single thread.
early_stop : int
Number of iterations without loss improvement on validation set that
stops training.
early_stop_acc : boolean
Use validation accuracy instead of loss for early stopping.
save_epoch_params : str or False
Save neural network parameters after each epoch. If False, do not save.
If you want to save the parameters, provide a filename with an
int formatter so the epoch number can be inserted.
callbacks : list of callables
List of callables to call after each training epoch. Can be used to k
update learn rates or plot data. Functions have to accept the
following parameters: current epoch number, lists of per-epoch train
losses, train accuracies, validation losses, validation accuracies.
The last three lists may be empty, depending on other parameters.
acc_func : callable
Function to use to compute accuracies.
train_acc : boolean
Also compute accuracy for training set. In this case, the training
loss will be also re-computed after an epoch, which leads to lower
train losses than when not using this parameter.
Returns
-------
tuple of four lists
Train losses, trian accuracies, validation losses,
validation accuracies for each epoch
"""
if (test_fn is not None) != (validation_batches is not None):
raise ValueError('If test function is given, validation set is '
'necessary (and vice-versa)!')
best_val = np.inf if not early_stop_acc else 0.0
epochs_since_best_val_loss = 0
if callbacks is None:
callbacks = []
if callbacks is None:
callbacks = []
best_params = get_params(network)
train_losses = []
val_losses = []
val_accs = []
train_accs = []
if threads is not None:
def threaded(it):
return dmgr.iterators.threaded(it, threads)
else:
def threaded(it):
return it
for epoch in range(num_epochs):
timer = Timer()
timer.start('epoch')
timer.start('train')
try:
train_losses.append(
avg_batch_loss(threaded(train_batches), train_fn, timer))
except RuntimeError as e:
print(Colors.red('Error during training:'), file=sys.stderr)
print(Colors.red(str(e)), file=sys.stderr)
return best_params
timer.stop('train')
if save_epoch_params:
save_params(network, save_epoch_params.format(epoch))
if validation_batches:
val_loss, val_acc = avg_batch_loss_acc(
threaded(validation_batches), test_fn, acc_func)
val_losses.append(val_loss)
val_accs.append(val_acc)
if train_acc:
train_loss, tr_acc = avg_batch_loss_acc(
threaded(train_batches), test_fn, acc_func)
train_losses[-1] = train_loss
train_accs.append(tr_acc)
print('Ep. {}/{} {:.1f}s (tr: {:.1f}s th: {:.1f}s)'.format(
epoch + 1, num_epochs,
timer['epoch'], timer['train'], timer['theano']),
end='')
print(' tl: {:.6f}'.format(train_losses[-1]), end='')
if train_acc:
print(' tacc: {:.6f}'.format(tr_acc), end='')
if validation_batches:
# early stopping
cmp_val = val_losses[-1] if not early_stop_acc else -val_accs[-1]
if cmp_val < best_val:
epochs_since_best_val_loss = 0
best_val = cmp_val
best_params = get_params(network)
# green output
c = Colors.green
else:
epochs_since_best_val_loss += 1
# neutral output
c = lambda x: x
print(c(' vl: {:.6f}'.format(val_losses[-1])), end='')
print(c(' vacc: {:.6f}'.format(val_accs[-1])), end='')
if epochs_since_best_val_loss >= early_stop:
print(Colors.yellow('\nEARLY STOPPING!'))
break
else:
best_params = get_params(network)
print('')
for cb in callbacks:
cb(epoch, train_losses, val_losses, train_accs, val_accs)
# set the best parameters found
set_params(network, best_params)
return train_losses, val_losses, train_accs, val_accs | 5,332,862 |
def sample_normal_gamma(mu, lmbd, alpha, beta):
""" https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
tau = np.random.gamma(alpha, beta)
mu = np.random.normal(mu, 1.0 / np.sqrt(lmbd * tau))
return mu, tau | 5,332,863 |
async def _common_discover_entities(
current_entity_platform: EntityPlatform,
config_entry: ConfigEntry,
source_objects: Iterable[TObject],
object_code_getter: Callable[[TObject], TIdentifier],
entity_cls: Type[TSensor],
final_config: Optional[ConfigType] = None,
existing_entities: Optional[List[TSensor]] = None,
sensor_type_name: Optional[str] = None,
entity_code_getter: Callable[[TSensor], TIdentifier] = None,
log_prefix: Optional[str] = None,
) -> DiscoveryReturnType:
"""
Common entity discovery helper.
:param current_entity_platform: Entity platform used
:param config_entry: Configuration entry
:param final_config: Final configuration data
:param source_objects: Objects to use when creating entities
:param object_code_getter: Getter for identifier for objects
:param entity_cls: Entity class (subclass of `MESEntity`)
:param existing_entities: (optional) Existing entities list
(default: retrieved at runtime)
:param sensor_type_name: (optional) Sensor type name for log prefixing
(default: derrived from configuration key)
:param entity_code_getter: (optional) Getter for identifier for entities
(default: `code` property of provided entity class)
:param log_prefix: (optional) Log prefix to prepend to internal loggin
(default: empty string)
:return: Tuple[new entities list, async tasks]
"""
hass = current_entity_platform.hass
config_key = entity_cls.config_key
if final_config is None:
final_config = hass.data.get(DATA_FINAL_CONFIG, {}).get(config_entry.entry_id)
if final_config is None:
raise ValueError('Final configuration not available for entry "%s"' % (config_entry.entry_id,))
if sensor_type_name is None:
sensor_type_name = config_key
if sensor_type_name.endswith('s'):
sensor_type_name = sensor_type_name[:-1]
if log_prefix is None:
log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'discvr',
sensor_type_name
)
if entity_code_getter is None:
entity_code_getter = entity_cls.code
if current_entity_platform is None:
current_entity_platform = entity_platform.current_platform.get()
if existing_entities is None:
existing_entities = hass.data\
.get(DATA_ENTITIES, {})\
.get(config_entry.entry_id, {})\
.get(config_key, [])
entities = []
tasks = []
added_entities: Set[TSensor] = set(existing_entities or [])
entity_filter = final_config[CONF_ENTITIES][config_key]
name_formats = final_config[CONF_NAME_FORMAT][config_key]
scan_intervals = final_config[CONF_SCAN_INTERVAL][config_key]
for iter_object in source_objects:
identifier = object_code_getter(iter_object)
if not identifier:
_LOGGER.warning('No identifier on: %s: %s', iter_object, iter_object.data)
continue
log_sensor_type_name = sensor_type_name.ljust(7),
log_identifier = '*' + identifier[-5:]
granular_log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'discvr',
log_sensor_type_name,
log_identifier
)
if not entity_filter[identifier]:
_LOGGER.info(granular_log_prefix + 'Skipping setup/update due to filter')
continue
obj_entity = None
for entity in added_entities:
if entity_code_getter(entity) == identifier:
obj_entity = entity
break
entity_log_prefix = _make_log_prefix(
config_entry,
current_entity_platform,
'entity',
log_sensor_type_name,
log_identifier
)
if obj_entity is None:
_LOGGER.debug(granular_log_prefix + 'Setting up entity')
entities.append(
entity_cls.async_discover_create(
iter_object,
name_formats[identifier],
scan_intervals[identifier],
entity_log_prefix
)
)
else:
added_entities.remove(obj_entity)
if obj_entity.enabled:
_LOGGER.debug(granular_log_prefix + 'Updating entity')
update_task = obj_entity.async_discover_update(
iter_object,
name_formats[identifier],
scan_intervals[identifier],
entity_log_prefix
)
if update_task is not None:
tasks.append(update_task)
if entities:
register_update_services(entity_cls, current_entity_platform, log_prefix)
if added_entities:
_LOGGER.info(log_prefix + f'Removing {len(added_entities)} {sensor_type_name} entities')
tasks.extend(get_remove_tasks(hass, added_entities))
return entities, tasks | 5,332,864 |
def _dice(terms):
"""
Returns the elements of iterable *terms* in tuples of every possible length
and range, without changing the order. This is useful when parsing a list of
undelimited terms, which may span multiple tokens. For example:
>>> _dice(["a", "b", "c"])
[('a', 'b', 'c'), ('a', 'b'), ('b', 'c'), ('a',), ('b',), ('c',)]
"""
# remove all of the terms that look like delimiters
terms = filter(lambda x: _is_delimiter(x) == False, terms)
y = []
for n in range(len(terms), 0, -1):
for m in range(0, len(terms)-(n-1)):
y.append(tuple(terms[m:m+n]))
return y | 5,332,865 |
def test_call_wiz_cli_when_open_raises_error(
mocker, mock_stdin, mock_open):
"""
Calling wiz-cli with an error is raised opening the JSON file.
"""
valid_json = '{"key": "value"}'
mock_open.side_effect = OSError
mock_stdin.name = '<stdin>'
mock_stdin.read.return_value = valid_json
with pytest.raises(SystemExit) as e:
main(['gs', '-', 'testing'])
mock_open.assert_called_once() | 5,332,866 |
def parse_date(val, format):
"""
Attempts to parse the given string date according to the
provided format, raising InvalidDateError in case of problems.
@param str val (e.g. 2014-08-12)
@param str format (e.g. %Y-%m-%d)
@return datetime.date
"""
try:
return datetime.strptime(val, format).date()
except ValueError:
raise InvalidDateError("unable to parse %s" % val) | 5,332,867 |
def test_deserialize_dictionary_items():
"""
deserializes the given dictionary items from dictionary.
"""
values = DTO(bool_value='true', datetime_value='2000-10-20T12:10:43+00:00',
date_value='2004-11-01', invalid_date_value='2008-08-1',
list_value=['1', 'False '], list_string='[ 1, -2.3, 0.01.1, null]',
tuple_value=(' 23', 'None', ' -78 '), tuple_string='(3, false , -0)',
none_value='none', int_value='1001 ', float_value=' 2.4 ',
invalid_int='1 2', positive_float=' +405.0023', pool_class='assertionPool',
force_double_string='"123"', force_single_string="'true'")
converted_values = deserializer_services.deserialize(values)
assert converted_values is not None
assert isinstance(converted_values, dict)
assert converted_values.get('bool_value') is True
assert converted_values.get('invalid_date_value') == '2008-08-1'
assert converted_values.get('list_value') == [1, False]
assert converted_values.get('list_string') == [1, -2.3, '0.01.1', None]
assert converted_values.get('tuple_value') == (23, None, -78)
assert converted_values.get('tuple_string') == (3, False, '-0')
assert converted_values.get('none_value') is None
assert converted_values.get('int_value') == 1001
assert converted_values.get('float_value') == 2.4
assert converted_values.get('invalid_int') == '1 2'
assert converted_values.get('positive_float') == ' +405.0023'
assert converted_values.get('force_double_string') == '123'
assert converted_values.get('force_single_string') == 'true'
assert issubclass(converted_values.get('pool_class'), AssertionPool)
datetime_value = converted_values.get('datetime_value')
assert datetime_value.day == 20 and datetime_value.month == 10 and \
datetime_value.year == 2000 and datetime_value.second == 43 and \
datetime_value.minute == 10 and datetime_value.hour == 12
date_value = converted_values.get('date_value')
assert date_value.day == 1 and date_value.month == 11 and date_value.year == 2004 | 5,332,868 |
def get_capture_dimensions(capture):
"""Get the dimensions of a capture"""
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
return width, height | 5,332,869 |
def run_system_optimization(des_vars, subsystems, scalers, loop_number):
"""Method to run the top-level system optimization based on the disciplinary surrogate models.
:param des_vars: definition of design variables
:type des_vars: dict
:param subsystems: definition of the disciplinary surrogate models
:type subsystems: dict
:param scalers: scalers of all the system values
:type scalers: dict
:param loop_number: number of the BLISS iteration
:type loop_number: int
:return: tuple with Problem object and driver status
:rtype: tuple
"""
# Set up problem and model
prob = Problem()
prob.model = model = SsbjBLISS2000(
des_vars=des_vars,
subsystems=subsystems,
scalers=scalers,
loop_number=loop_number,
)
# Set driver
prob.driver = pyOptSparseDriver()
prob.driver.options["optimizer"] = "SLSQP"
prob.driver.opt_settings["MAXIT"] = 50
prob.driver.opt_settings["ACC"] = 1e-6
# Add design variables
for des_var, details in des_vars.items():
prob.model.add_design_var(
des_var, lower=details["lower"], upper=details["upper"]
)
# Add objective
model.add_objective("performance.R", scaler=-1.0)
# Add constraints
model.add_constraint("consistency_constraints.gc_D", equals=0.0)
model.add_constraint("consistency_constraints.gc_WE", equals=0.0)
model.add_constraint("consistency_constraints.gc_WT", equals=0.0)
model.add_constraint("consistency_constraints.gc_L", equals=0.0)
model.add_constraint("consistency_constraints.gc_Theta", equals=0.0)
model.add_constraint("consistency_constraints.gc_ESF", equals=0.0)
model.add_constraint("consistency_constraints.gc_WT_L", equals=0.0)
model.add_constraint("constraints.con_dpdx", upper=0.0)
# Add recorder
recorder = SqliteRecorder(
os.path.join(
cr_files_folder,
"ssbj_cr_{}_system_loop{:02d}.sql".format(cr_files_keyword, loop_number),
)
)
prob.driver.add_recorder(recorder)
prob.driver.recording_options["includes"] = []
prob.driver.recording_options["record_objectives"] = True
prob.driver.recording_options["record_constraints"] = True
prob.driver.recording_options["record_desvars"] = True
# prob.driver.recording_options['record_metadata'] = True
# Set up
prob.setup(mode="rev")
# View model
n2(
prob,
outfile=os.path.join(cr_files_folder, "bliss2000_sys_ssbj.html"),
show_browser=False,
)
# Run problem (either once (run_model) or full optimization (run_driver))
prob.run_driver()
# Report result in the log
print("- - - - - - - - - - - - - - - - - - - - - - - - - -")
print("\nOutcome of system optimization (BLISS loop: {})".format(loop_number))
print("\n\nDesign variables")
print("z_sh_low= ", des_vars["z_sh"]["lower"])
print("z_sh_val= ", prob["z_sh"])
print("z_sh_upp= ", des_vars["z_sh"]["upper"])
print("")
print("z_c_low= ", des_vars["z_c"]["lower"])
print("z_c_val= ", prob["z_c"])
print("z_c_upp= ", des_vars["z_c"]["upper"])
print("")
print("z_w_low= ", des_vars["z_w"]["lower"])
print("z_w_val= ", prob["z_w"])
print("z_w_upp= ", des_vars["z_w"]["upper"])
print("")
print("\nObjectives")
print("R_opt=", prob["performance.R"] * scalers["R"])
print("\nConstraints")
print("gc_D=", prob["consistency_constraints.gc_D"])
print("gc_WE=", prob["consistency_constraints.gc_WE"])
print("gc_WT=", prob["consistency_constraints.gc_WT"])
print("gc_L=", prob["consistency_constraints.gc_L"])
print("gc_Theta=", prob["consistency_constraints.gc_Theta"])
print("gc_ESF=", prob["consistency_constraints.gc_ESF"])
print("gc_WT_L=", prob["consistency_constraints.gc_WT_L"])
print("c_dpdx=", prob["constraints.con_dpdx"])
print("- - - - - - - - - - - - - - - - - - - - - - - - - -")
return prob, prob.driver.fail | 5,332,870 |
def analyze_lines(msarc, trcdict, slit, pixcen, order=2, function='legendre', maskval=-999999.9):
"""
.. todo::
This needs a docstring!
"""
# Analyze each spectral line
aduse = trcdict["aduse"]
arcdet = trcdict["arcdet"]
xtfits = trcdict["xtfit"]
ytfits = trcdict["ytfit"]
wmasks = trcdict["wmask"]
badlines = trcdict["badlines"]
xtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
ytilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
mtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
wtilt = np.ones((msarc.shape[1], arcdet.size)) * maskval
# For displaying later
xmodel = []
ymodel = []
for j in range(arcdet.size):
if not aduse[j]:
continue
xtfit = xtfits[j]
ytfit = ytfits[j]
wmask = wmasks[j]
xint = int(xtfit[0])
sz = (xtfit.size-1)//2
# Trim if we are off the detector
lastx = min(xint + 2 * sz + 1, msarc.shape[1])
if (lastx-xint) < xtfit.size: # Cut down
dx = (lastx-xint)-xtfit.size
xtfit = xtfit[:dx]
ytfit = ytfit[:dx]
wmask = wmask[np.where(wmask < (xtfit.size+dx))]
# Perform a scanning polynomial fit to the tilts
wmfit = np.where(ytfit != maskval)
if wmfit[0].size > order + 1:
cmfit = utils.func_fit(xtfit[wmfit], ytfit[wmfit], function, order, minx=0.0,
maxx=msarc.shape[1] - 1.0)
model = utils.func_val(cmfit, xtfit, function, minx=0.0, maxx=msarc.shape[1] - 1.0)
else:
aduse[j] = False
badlines += 1
continue
# Can this actually happen??
if maskval in model:
# Model contains masked values
aduse[j] = False
badlines += 1
continue
# Perform a robust polynomial fit to the traces
wmsk, mcoeff = utils.robust_polyfit(xtfit[wmask], ytfit[wmask], order, function=function,
sigma=2.0, minx=0.0, maxx=msarc.shape[1] - 1.0)
# Save model
model = utils.func_val(mcoeff, xtfit, function, minx=0.0, maxx=msarc.shape[1] - 1.0)
xmodel.append(xtfit)
ymodel.append(model)
# Save
xtilt[xint:lastx, j] = xtfit / (msarc.shape[1] - 1.0)
# These should be un-normalized for now
pcen = pixcen[arcdet[j], slit]
ytilt[xint:lastx, j] = model[pcen-int(xtfit[wmask[0]])]
mtilt[xint:lastx, j] = model
# Save
trcdict['xmodel'] = xmodel
trcdict['ymodel'] = ymodel
trcdict["aduse"] = aduse
# Return
all_tilts = (xtilt, ytilt, mtilt, wtilt)
return badlines, all_tilts | 5,332,871 |
def ad_modify_user_pwd_by_mail(user_mail_addr, old_password, new_password):
"""
通过mail修改某个用户的密码
:param user_mail_addr:
:return:
"""
conn = __ad_connect()
user_dn = ad_get_user_dn_by_mail(user_mail_addr)
result = conn.extend.microsoft.modify_password(user="%s" % user_dn, new_password="%s" % new_password,
old_password="%s" % old_password)
conn.unbind()
return result | 5,332,872 |
def write_flow(flow: np.ndarray, flow_file: str) -> None:
"""Write the flow in disk.
This function is modified from
https://lmb.informatik.uni-freiburg.de/resources/datasets/IO.py
Copyright (c) 2011, LMB, University of Freiburg.
Args:
flow (ndarray): The optical flow that will be saved.
flow_file (str): The file for saving optical flow.
"""
with open(flow_file, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f) | 5,332,873 |
def batch_generator(batch_size=64):
"""
Generate the training batch.
"""
while True:
X_batch = []
y_batch = []
data = get_batch_data(batch_size)
for img_file, steer in data:
im = load_img(img_file)
im, steer = random_transform_img(im, steer)
im = Image.fromarray(im)
im = crop_resize_img(im)
im = np.asarray(im)
im = cv2.cvtColor(im, cv2.COLOR_RGB2YUV)
X_batch.append(im)
y_batch.append(steer)
yield (np.array(X_batch), np.array(y_batch)) | 5,332,874 |
def markup_record(record_text, record_nr, modifiers, targets, output_dict):
""" Takes current Patient record, applies context algorithm,
and appends result to output_dict
"""
# Is used to collect multiple sentence markups. So records can be complete
context = pyConText.ConTextDocument()
# Split record into sentences making use of TextBlob
blob = TextBlob(record_text.lower())
# print(blob)
count = 0
markup_result = []
# Add markup per sentence
for sentence in blob.sentences:
m = markup_sentence(sentence.raw, modifiers=modifiers, targets=targets)
markup_result.append(m)
count = count + 1
print("\nFor record number:", record_nr)
print("Number of sentences that have been marked up:", count)
# print("\nMarkup result:")
# print(markup_result)
# Add sentence markup to contextDocument
for sentence_markup in markup_result:
context.addMarkup(sentence_markup)
# Append context object and xml to output dictionary,
# with as key the record number
context_xml = context.getXML()
output_dict.update({record_nr: {"object": context, "xml": context_xml}})
return(output_dict) | 5,332,875 |
def print_class_based_stats(class2stats):
""" Print statistics of class-based evaluation results """
for class_name in class2stats:
correct_count = np.sum(class2stats[class_name])
total_count = len(class2stats[class_name])
class_acc = np.average(class2stats[class_name])
class_acc = str(round(class_acc, 3)) + f'% ({correct_count}/{total_count})'
formatted_str = get_formatted_string((class_name, class_acc), str_max_len=20)
print(formatted_str)
print() | 5,332,876 |
def del_account(account):
"""
function to delete a saved account
"""
account.delete_account() | 5,332,877 |
def write_pymods_from_comp(the_parsed_component_xml , opt , topology_model):
"""
Writes python modules for a component xml
"the_parsed_component_xml"
"""
global BUILD_ROOT
global DEPLOYMENT
global VERBOSE
parsed_port_xml_list = []
parsed_serializable_xml_list = []
#uses the topology model to process the items
#checks if the topology model exists
if topology_model == None:
PRINT.info("Topology model was not specified. Please also input a topology model when running this command.")
raise IOError
port_type_files_list = the_parsed_component_xml.get_port_type_files()
for port_file in port_type_files_list:
port_file = search_for_file("Port", port_file)
xml_parser_obj = XmlPortsParser.XmlPortsParser(port_file)
parsed_port_xml_list.append(xml_parser_obj)
del(xml_parser_obj)
serializable_type_files_list = the_parsed_component_xml.get_serializable_type_files()
for serializable_file in serializable_type_files_list:
serializable_file = search_for_file("Serializable", serializable_file)
xml_parser_obj = XmlSerializeParser.XmlSerializeParser(serializable_file) # Telemetry/Params can only use generated serializable types
# check to make sure that the serializables don't have things that channels and parameters can't have
# can't have external non-xml members
if len(xml_parser_obj.get_include_header_files()):
print("ERROR: Component include serializables cannot use user-defined types. file: " % serializable_file)
sys.exit(-1)
parsed_serializable_xml_list.append(xml_parser_obj)
del(xml_parser_obj)
model = CompFactory.CompFactory.getInstance()
component_model = model.create(the_parsed_component_xml, parsed_port_xml_list, parsed_serializable_xml_list)
instChannelWriter = InstChannelWriter.InstChannelWriter()
instCommandWriter = InstCommandWriter.InstCommandWriter()
instEventWriter = InstEventWriter.InstEventWriter()
if opt.dict_dir == None:
if VERBOSE:
print("Dictionary output directory not specified!, defaulting to cwd")
opt.dict_dir = os.getcwd()
os.environ["DICT_DIR"] = opt.dict_dir
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
for command_model in component_model.get_commands():
if VERBOSE:
print("Generating command dict %s"%command_model.get_mnemonic())
instCommandWriter.DictStartWrite(command_model , topology_model)
instCommandWriter.DictHeaderWrite(command_model , topology_model)
instCommandWriter.DictBodyWrite(command_model , topology_model)
for parameter_model in component_model.get_parameters():
if VERBOSE:
print("Generating parameter dict %s"%parameter_model.get_name())
instCommandWriter.DictStartWrite(parameter_model , topology_model)
instCommandWriter.DictHeaderWrite(parameter_model , topology_model)
instCommandWriter.DictBodyWrite(parameter_model , topology_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
for event_model in component_model.get_events():
if VERBOSE:
print("Generating event dict %s"%event_model.get_name())
instEventWriter.DictStartWrite(event_model , topology_model)
instEventWriter.DictHeaderWrite(event_model , topology_model)
instEventWriter.DictBodyWrite(event_model , topology_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
for channel_model in component_model.get_channels():
if VERBOSE:
print("Generating channel dict %s"%channel_model.get_name())
instChannelWriter.DictStartWrite(channel_model , topology_model)
instChannelWriter.DictHeaderWrite(channel_model , topology_model)
instChannelWriter.DictBodyWrite(channel_model , topology_model) | 5,332,878 |
def get_file_name(file_name):
"""
Returns a Testsuite name
"""
testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None)
if testsuite_stack:
if '/' in testsuite_stack.filename:
split_character = '/'
else:
split_character = '\\'
return testsuite_stack.filename.split(split_character)[-1].split(".")[0]
else:
return "" | 5,332,879 |
def set_defaults(project=None, connection=None):
"""Set defaults either explicitly or implicitly as fall-back.
Uses the arguments to call the individual default methods.
:type project: string
:param project: Optional. The name of the project to connect to.
:type connection: :class:`gcloud.pubsub.connection.Connection`
:param connection: Optional. A connection provided to be the default.
"""
set_default_project(project=project)
set_default_connection(connection=connection) | 5,332,880 |
def Hcolloc(outf, (prefixes,topics,segments)):
"""this is the regular colloc grammar.
it ignores topics.
It generates words via two levels of hierarchy (as a control for the HT models)."""
outf.write("1 1 Collocs --> Collocs Colloc\n")
for prefix in prefixes:
outf.write("1 1 Collocs --> T_%s\n"%prefix)
outf.write("Colloc --> Words\n")
outf.write("1 1 Words --> Word\n")
outf.write("1 1 Words --> Words Word\n")
outf.write("Word --> BaseWord\n")
outf.write("BaseWord --> Segments\n")
outf.write("1 1 Segments --> Segment\n")
outf.write("1 1 Segments --> Segments Segment\n")
for segment in segments:
outf.write("1 1 Segment --> %s\n"%segment) | 5,332,881 |
def track_edge_matrix_by_spt(batch_track_bbox, batch_track_frames, history_window_size=50):
"""
:param batch_track_bbox: B, M, T, 4 (x, y, w, h)
:return:
"""
B, M, T, _ = batch_track_bbox.size()
batch_track_xy = batch_track_bbox[:, :, :, :2]
batch_track_wh = batch_track_bbox[:, :, :, 2:]
batch_track_t = batch_track_frames[:, :, :, None]
batch_track_diff_t = 1 - torch.abs(batch_track_t[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_t[:, :, None, :, :].expand(-1, -1, T, -1, -1)) / history_window_size
batch_track_diff_xy = 1 - torch.abs(batch_track_xy[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_xy[:, :, None, :, :].expand(-1, -1, T, -1, -1))
batch_track_diff_wh = 1 - torch.abs(batch_track_wh[:, :, :, None, :].expand(-1, -1, -1, T, -1) - (batch_track_wh[:, :, None, :, :].expand(-1, -1, T, -1, -1)))
# B, M, T, T, 5
track_edge_matrix = torch.cat([batch_track_diff_t, batch_track_diff_xy, batch_track_diff_wh], dim=-1)
return track_edge_matrix | 5,332,882 |
def _file(space, fname, flags=0, w_ctx=None):
""" file - Reads entire file into an array
'FILE_USE_INCLUDE_PATH': 1,
'FILE_IGNORE_NEW_LINES': 2,
'FILE_SKIP_EMPTY_LINES': 4,
'FILE_NO_DEFAULT_CONTEXT': 16,
"""
if not is_in_basedir(space, 'file', fname):
space.ec.warn("file(%s): failed to open stream: %s " %
(fname, 'Operation not permitted'))
return space.w_False
if flags > 23 or flags < 0:
space.ec.warn("file(): '%d' flag is not supported" % flags)
return space.w_False
if fname == "":
space.ec.warn("file(): Filename cannot be empty")
return space.w_False
ignore_new_lines = flags & 2 != 0
skip_empty_lines = flags & 4 != 0
try:
_fname = rpath.normpath(fname)
arr_list = []
fstream = open(_fname)
line = fstream.readline()
while line != '':
if ignore_new_lines:
line = line.rstrip('\n')
if skip_empty_lines and line == "":
line = fstream.readline()
continue
arr_list.append(space.newstr(line))
line = fstream.readline()
return space.new_array_from_list(arr_list)
except OSError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False
except IOError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False | 5,332,883 |
def prepare_features(tx_nan, degree, mean_nan=None, mean=None, std=None):
"""Clean and prepare for learning. Mean imputing, missing value indicator, standardize."""
# Get column means, if necessary
if mean_nan is None: mean_nan = np.nanmean(tx_nan,axis=0)
# Replace NaNs
tx_val = np.where(np.isnan(tx_nan), mean_nan, tx_nan)
# Polynomial features
tx = build_poly(tx_val, degree)
const_col = tx.shape[1]-1
# Add NaN indicator columns
nan_cols = np.flatnonzero(np.any(np.isnan(tx_nan), axis=0))
ind_cols = np.empty((tx_nan.shape[0], nan_cols.shape[0]))
ind_cols = np.where(np.isnan(tx_nan[:,nan_cols]), 1, 0)
tx = np.c_[tx, ind_cols]
# Standardize
tx, mean, std = standardize_numpy(tx, mean, std)
tx[:,const_col] = 1.0
return tx, mean, std, mean_nan, nan_cols | 5,332,884 |
def _write_matt2(model, name, mids, nmaterials, op2, op2_ascii, endian):
"""writes the MATT2"""
#Record - MATT2(803,8,102)
#Word Name Type Description
#1 MID I Material identification number
#2 TID(15) I TABLEMi entry identification numbers
#17 UNDEF None
key = (803, 8, 102)
nfields = 17
spack = Struct(endian + b'17i')
nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii)
for mid in sorted(mids):
mat = model.MATT2[mid]
data = [
mat.mid,
mat.g11_table,
mat.g12_table,
mat.g13_table,
mat.g22_table,
mat.g23_table,
mat.g33_table,
mat.rho_table,
mat.a1_table,
mat.a2_table,
mat.a3_table,
0,
mat.ge_table,
mat.st_table,
mat.sc_table,
mat.ss_table,
0,
]
assert None not in data, data
#print('MATT2', data, len(data))
assert len(data) == nfields, len(data)
op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:]))
op2.write(spack.pack(*data))
return nbytes | 5,332,885 |
def process_gen(job_obj, gen_log_loc, gen_log_name):
"""
Runs after a rocoto workflow has been generated
Checks to see if an error has occurred
"""
logger = logging.getLogger('BUILD/PROCESS_GEN')
gen_log = f'{gen_log_loc}/{gen_log_name}'
error_string = 'ERROR'
error_msg = 'err_msg'
gen_failed = False
if os.path.exists(gen_log):
with open(gen_log) as fname:
for line in fname:
if error_string in line or error_msg in line:
job_obj.comment_append('Generating Workflow Failed')
gen_failed = True
logger.info('Generating workflow failed')
if gen_failed:
job_obj.comment_append(f'{line.rstrip()}') | 5,332,886 |
def all_subclasses(cls):
"""Returns all known (imported) subclasses of a class."""
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in all_subclasses(s)] | 5,332,887 |
def _find_popular_codon(aa):
"""
This function returns popular codon from a 4+ fold degenerative codon.
:param aa: dictionary containing amino acid information.
:return:
"""
codons = [c[:2] for c in aa["codons"]]
counts = []
for i in range(len(codons)):
pc = codons[i]
count = 0
for j in range(len(codons)):
if codons[j] == pc:
count += 1
counts.append(count)
# find index of the higest entry
highest = 0
for i in range(len(counts)):
if counts[i] > counts[highest]:
highest = i
return aa["codons"][highest] | 5,332,888 |
def extract_feature_variables2NetCDF(res='4x5',
interpolate_nans=True,
add_derivative_vars=True):
"""
Construct a NetCDF of feature variables for testing
Parameters
-------
res (str): horizontal resolution of dataset (e.g. 4x5)
interpolate_nans (bool): interpolate to fill the NaN values
add_derivative_vars (bool): add the derivative feature variables
Returns
-------
(None)
"""
import xarray as xr
# - Local variables
# Temp vars variables
TEMP_K_var = 'WOA_TEMP_K'
TEMP_var = 'WOA_TEMP'
# Which vairables to extract (map variables to indexing to use)
vars2use = {
TEMP_var: 'WOA_025x025',
'WOA_Nitrate': 'WOA_1x1',
'WOA_Phosphate': 'WOA_1x1',
'WOA_Salinity': 'WOA_025x025',
'Depth_GEBCO': 'GEBCO_1min',
'SeaWIFs_ChlrA': 'SeaWIFs_ChlrA_9km',
'SWrad': 'SWrad_1_9',
'DOC': 'DOC_1x1',
'DOCaccum': 'DOCaccum_1x1',
'Prod': 'Prod_1min',
'WOA_MLDvd': 'WOA_MLD_1x1',
'WOA_MLDpt': 'WOA_MLD_1x1',
'WOA_MLDpd': 'WOA_MLD_1x1',
'WOA_Dissolved_O2': 'WOA_1x1',
'WOA_Silicate': 'WOA_1x1',
}
# Functions to extract these values?
funcs4vars = {
TEMP_var: get_WOA_TEMP4indices,
'WOA_Nitrate': get_WOA_Nitrate4indices,
'WOA_Phosphate': get_WOA_Phosphate4indices,
'WOA_Salinity': get_WOA_Salinity4indices,
'Depth_GEBCO': get_Depth_GEBCO4indices,
'SeaWIFs_ChlrA': get_SeaWIFs_ChlrA4indices,
'SWrad': get_RAD4indices,
'DOC': get_DOC4indices,
'DOCaccum': get_DOC_accum4indices,
'Prod': get_Prod4indices,
'WOA_MLDvd': extract_MLD_file4indices,
'WOA_MLDpt': extract_MLD_file4indices,
'WOA_MLDpd': extract_MLD_file4indices,
'WOA_Dissolved_O2': get_WOA_Dissolved_O2_4indices,
'WOA_Silicate': get_WOA_Silicate4indices,
}
# Are some of these values only availibe for annual period
annual_data_only = 'Depth_GEBCO'
months = range(1, 13)
dates = [datetime.datetime(1970, i, 1, 0, 0) for i in months]
# Get indicies for boxes (calculated offline)
filename = 'Oi_prj_indices4feature_variable_inputs_{}.csv'.format(res)
df_IND = pd.read_csv(filename)
# - Loop variables and add values to NetCDF
# Loop variables and extract en masse
ds_vars = []
for var in vars2use:
# Get lon and lat indices
lon_idx = df_IND[vars2use[var]+'_LON']
lat_idx = df_IND[vars2use[var]+'_LAT']
# if data is monthly, then extract by month
if (var not in annual_data_only):
# Loop and extract by month
data4var = []
for n_month, month in enumerate(months):
# extract data for indicies
if 'WOA_MLD' in var:
vals = funcs4vars[var](lon_idx=lon_idx, lat_idx=lat_idx,
month=month,
var2use=var.split('WOA_MLD')[-1])
else:
vals = funcs4vars[var](lon_idx=lon_idx, lat_idx=lat_idx,
month=month)
# Construct DataFrame by unstacking
lat = df_IND['lat']
lon = df_IND['lon']
df = pd.DataFrame(vals, index=[lat, lon]).unstack()
# Convert to Dataset
lon4ds = list(df.columns.levels[1])
lat4ds = list(df.index)
arr = df.values[None, ...]
date = dates[n_month]
data4var += [
xr.Dataset(
data_vars={var: (['time', 'lat', 'lon', ], arr)},
coords={'lat': lat4ds, 'lon': lon4ds, 'time': [date]})
]
# Combine months into a single dataframe
ds_var = xr.concat(data4var, dim='time')
else:
# Extract for a single month
vals = funcs4vars[var](lon_idx=lon_idx, lat_idx=lat_idx,
month=month)
# Construct DataFrame by unstacking
lat = df_IND['lat']
lon = df_IND['lon']
df = pd.DataFrame(vals, index=[lat, lon]).unstack()
# Convert to Dataset
lon4ds = list(df.columns.levels[1])
lat4ds = list(df.index)
arr = df.values
# Save without time dimension
ds_var = xr.Dataset(data_vars={var: (['lat', 'lon', ], arr)},
coords={'lat': lat4ds, 'lon': lon4ds})
# Save variable to list
ds_vars += [ds_var]
# Combine all variables into a single Dataset
ds = xr.merge(ds_vars)
# save NetCDF of feature vairables
ds.to_netcdf('Oi_prj_feature_variables_{}_TEST.nc'.format(res))
# Interpolate NaNs?
if interpolate_nans:
ds = ancillaries.interpolate_NaNs_in_feature_variables(ds, res=res,
save2NetCDF=False)
# Save interpolated version
ext_str = '_INTERP_NEAREST'
filename = 'Oi_prj_feature_variables_{}{}.nc'.format(res, ext_str)
ds.to_netcdf(filename)
# Add derived variables?
if add_derivative_vars:
ds = add_derivitive_variables(ds)
# Save interpolated version
ext_str = '_INTERP_NEAREST_DERIVED'
filename = 'Oi_prj_feature_variables_{}{}.nc'.format(res, ext_str)
ds.to_netcdf(filename) | 5,332,889 |
def initialize_cluster_details(scale_version, cluster_name, username,
password, scale_profile_path,
scale_replica_config):
""" Initialize cluster details.
:args: scale_version (string), cluster_name (string),
username (string), password (string), scale_profile_path (string),
scale_replica_config (bool)
"""
cluster_details = {}
cluster_details['scale_version'] = scale_version
cluster_details['scale_cluster_clustername'] = cluster_name
cluster_details['scale_service_gui_start'] = "True"
cluster_details['scale_gui_admin_user'] = username
cluster_details['scale_gui_admin_password'] = password
cluster_details['scale_gui_admin_role'] = "Administrator"
cluster_details['scale_sync_replication_config'] = scale_replica_config
cluster_details['scale_cluster_profile_name'] = str(
pathlib.PurePath(scale_profile_path).stem)
cluster_details['scale_cluster_profile_dir_path'] = str(
pathlib.PurePath(scale_profile_path).parent)
return cluster_details | 5,332,890 |
def test_extract_lambda():
"""
`extract_lambda` should support all possible orderings of the variables it
encounters.
"""
expr = Expression.fromstring(r"foo(\a.a,\a.a)")
extracted = extract_lambda(expr)
eq_(len(extracted), 2) | 5,332,891 |
def load(data_home=None):
"""Load RWC-Genre dataset
Args:
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets`
Returns:
(dict): {`track_id`: track data}
"""
if data_home is None:
data_home = utils.get_default_dataset_path(DATASET_DIR)
rwc_popular_data = {}
for key in track_ids():
rwc_popular_data[key] = Track(key, data_home=data_home)
return rwc_popular_data | 5,332,892 |
def collate_spectra_by_source(source_list, tolerance, unit=u.arcsec):
"""Given a list of spec1d files from PypeIt, group the spectra within the
files by their source object. The grouping is done by comparing the
position of each spectra (using either pixel or RA/DEC) using a given tolerance.
Args:
source_list (list of :obj:`SourceObject`): A list of source objects, one
SpecObj per object, ready for collation.
tolerance (float):
Maximum distance that two spectra can be from each other to be
considered to be from the same source. Measured in floating
point pixels or as an angular distance (see ``unit`` argument).
unit (:obj:`astropy.units.Unit`):
Units of ``tolerance`` argument if match_type is 'ra/dec'.
Defaults to arcseconds. Ignored if match_type is 'pixel'.
Returns:
(list of `obj`:SourceObject): The collated spectra as SourceObjects.
"""
collated_list = []
for source in source_list:
# Search for a collated SourceObject that matches this one.
# If one can't be found, treat this as a new collated SourceObject.
found = False
for collated_source in collated_list:
if collated_source.match(source.spec_obj_list[0],
source.spec1d_header_list[0],
tolerance, unit):
collated_source.combine(source)
found = True
if not found:
collated_list.append(copy.deepcopy(source))
return collated_list | 5,332,893 |
def tolist(obj):
"""
Convert given `obj` to list.
If `obj` is not a list, return `[obj]`, else return `obj` itself.
"""
if not isinstance(obj, list):
return [obj]
return obj | 5,332,894 |
def bip44_tree(config: dict, cls=hierarchy.Node) -> hierarchy.Node:
"""
Return the root node of a BIP44-compatible partially ordered hierarchy.
https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
The `config` parameter is a dictionary of the following form:
- the keys of the dictionary are crypto-coins;
- the values of the dictionary specify the number of accounts to generate for each coin,
and the number of public/private addresses to generate for each account.
As an example:
{'BTC': (
(1, 2), (4, 5), (0, 1)
)}
The previous dictionary represents a single coin, BTC.
There are three accounts, that respectively have 1, 4, and 0 private addresses and 2, 5, and 1 public addresses.
"""
master_node = cls(0, tag='m')
purpose_node = cls(44 + constants.CryptoConstants.BIP32_HARDENED_INDEX.value, tag="44'")
master_node.edges.append(purpose_node)
for coin, coin_config in config.items():
assert isinstance(coin, str)
assert coin_config
coin_node = cls(constants.CoinType[coin].value, coin)
purpose_node.edges.append(coin_node)
for i, (n_private_addresses, n_public_addresses) in enumerate(coin_config):
assert n_private_addresses > 0 or n_public_addresses > 0
account_node = cls(i)
coin_node.edges.append(account_node)
public_node = cls(0, 'XPUB')
account_node.edges.append(public_node)
private_node = cls(1, 'XPRV')
account_node.edges.append(private_node)
previous_node = private_node
for j in range(n_private_addresses):
private_address_node = cls(j)
previous_node.edges.append(private_address_node)
previous_node = private_address_node
previous_node = public_node
for j in range(n_public_addresses):
public_address_node = cls(j)
previous_node.edges.append(public_address_node)
previous_node = public_address_node
return master_node | 5,332,895 |
def delete_useless_vrrp_subnets(client, to_delete, project_id):
"""
:param 'Client' client
:param dict((prefix_length, type, master_region, slave_region),
(state:quantity)) to_delete
:rtype: list
"""
result = []
vrrp_subnets = client.vrrp.list(project_id=project_id)
for key in to_delete:
vrrp_to_delete = [vrrp for vrrp in vrrp_subnets if (
int(vrrp.cidr.split('/')[1]), "ipv4",
vrrp.master_region, vrrp.slave_region) == key]
vrrp_to_delete.sort(key=itemgetter("status"), reverse=True)
for vrrp in vrrp_to_delete[:to_delete.get(key)]:
client.vrrp.delete(vrrp.id)
result.append(vrrp.id)
return result | 5,332,896 |
def test_first():
"""
Returns first element of sequence
"""
assert fandango.functional.first((a for a in (1,2,3))) is 1 | 5,332,897 |
def test_case_0():
"""test_case_0."""
assert increment_string("foo") == "foo1" | 5,332,898 |
def test_atomic_g_month_enumeration_4_nistxml_sv_iv_atomic_g_month_enumeration_5_5(mode, save_output, output_format):
"""
Type atomic/gMonth is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/gMonth/Schema+Instance/NISTSchema-SV-IV-atomic-gMonth-enumeration-5.xsd",
instance="nistData/atomic/gMonth/Schema+Instance/NISTXML-SV-IV-atomic-gMonth-enumeration-5-5.xml",
class_name="NistschemaSvIvAtomicGMonthEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,332,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.