content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def correct_name(name):
"""
Ensures that the name of object used to create paths in file system do not
contain characters that would be handled erroneously (e.g. \ or / that
normally separate file directories).
Parameters
----------
name : str
Name of object (course, file, folder, etc.) to correct
Returns
-------
corrected_name
Corrected name
"""
corrected_name = name.replace(" ", "_")
corrected_name = corrected_name.replace("\\", "_")
corrected_name = corrected_name.replace("/", "_")
corrected_name = corrected_name.replace(":", "_")
return corrected_name | 34,900 |
def getLivePosSnap(*args, **kwargs):
""" Get live pos snap """
pass | 34,901 |
def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
logger.info("multiprocessing: %s" % fn_name)
fn = get_fn(fn_name, parallel)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel | 34,902 |
def get_ifc2x3_element_type(element: int) -> ifc_2x3_element_type:
"""Get IFC element type.
Args:
element (int): element ID
Returns:
ifc_2x3_element_type: ifc type
""" | 34,903 |
def containers_start(
port=8080, rmq='docker', mongo='docker', memcached='docker',
provision=False, **kwargs):
"""
Start all appropriate containers. This is, at least, worker and girder.
Optionally, mongodb and rabbitmq are included.
:param port: default port to expose.
:param rmq: 'docker' to use a docker for rabbitmq, 'host' to use the docker
host, otherwise the IP for the rabbitmq instance, where DOCKER_HOST
maps to the docker host and anything else is passed through.
:param mongo: 'docker' to use a docker for mongo, 'host' to use the docker
host, otherwise the IP for the mongo instance, where DOCKER_HOST maps
to the docker host and anything else is passed through. The database
is always 'girder'. Any other value is considered a docker version.
:param provision: if True, reprovision after starting. Otherwise, only
provision if the histomictk container is created.
"""
client = docker_client()
env = {
'HOST_UID': os.popen('id -u').read().strip(),
'HOST_GID': os.popen('id -g').read().strip(),
}
sockpath = '/var/run/docker.sock'
if os.path.exists(sockpath):
env['HOST_DOCKER_GID'] = str(os.stat(sockpath).st_gid)
else:
try:
env['HOST_DOCKER_GID'] = os.popen('getent group docker').read().split(':')[2]
except Exception:
pass
network_create(client, BaseName)
for key in ImageList:
func = 'container_start_' + key
if func in globals():
if globals()[func](
client, env, key, port=port, rmq=rmq, mongo=mongo,
memcached=memcached, provision=provision, **kwargs):
provision = True
if provision:
containers_provision(**kwargs) | 34,904 |
def test_calibrate_new_mlflow_run():
"""Assert that a new mlflow run is created."""
atom = ATOMClassifier(X_bin, y_bin, experiment="test", random_state=1)
atom.run("GNB")
run = atom.gnb._run
atom.gnb.calibrate()
assert atom.gnb._run is not run | 34,905 |
def hsv_to_hsl(hsv):
"""
HSV to HSL.
https://en.wikipedia.org/wiki/HSL_and_HSV#Interconversion
"""
h, s, v = hsv
s /= 100.0
v /= 100.0
l = v * (1.0 - s / 2.0)
return [
HSV._constrain_hue(h),
0.0 if (l == 0.0 or l == 1.0) else ((v - l) / min(l, 1.0 - l)) * 100,
l * 100
] | 34,906 |
def is_ci() -> bool:
"""Returns if current execution is running on CI
Returns:
`True` if current executions is on CI
"""
return os.getenv('CI', 'false') == 'true' | 34,907 |
def reindex_faces(mesh, ordering):
"""
Reorder the faces of the given mesh, returning a new mesh.
Args:
mesh (lacecore.Mesh): The mesh on which to operate.
ordering (np.arraylike): An array specifying the order in which
the original faces should be arranged.
Returns:
lacecore.Mesh: The reindexed mesh.
"""
from .._mesh import Mesh
vg.shape.check(locals(), "ordering", (mesh.num_f,))
unique_values = np.unique(ordering)
if not np.array_equal(unique_values, np.arange(mesh.num_f)):
raise ValueError(
"Expected new face indices to be unique, and range from 0 to {}".format(
mesh.num_f - 1
)
)
return Mesh(
v=mesh.v,
f=mesh.f[ordering],
face_groups=None
if mesh.face_groups is None
else mesh.face_groups.reindexed(ordering),
) | 34,908 |
def check_exact_match(line, expected_line):
"""
Uses regular expressions to find an exact (not partial) match for 'expected_line' in 'line', i.e.
in the example below it matches 'foo' and succeeds:
line value: '66118.999958 - INFO - [MainThread] - ly_test_tools.o3de.asset_processor - foo'
expected_line: 'foo'
:param line: The log line string to search,
i.e. '9189.9998188 - INFO - [MainThread] - example.tests.test_system_example - Log Monitoring test 1'
:param expected_line: The exact string to match when searching the line param,
i.e. 'Log Monitoring test 1'
:return: An exact match for the string if one is found, None otherwise.
"""
# Look for either start of line or whitespace, then the expected_line, then either end of the line or whitespace.
# This way we don't partial match inside of a string. So for example, 'foo' matches 'foo bar' but not 'foobar'
regex_pattern = re.compile("(^|\\s){}($|\\s)".format(re.escape(expected_line)), re.UNICODE)
if regex_pattern.search(line) is not None:
return expected_line
return None | 34,909 |
def rounding_filters(filters, w_multiplier):
""" Calculate and round number of filters based on width multiplier. """
if not w_multiplier:
return filters
divisor = 8
filters *= w_multiplier
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters) | 34,910 |
def process(request, service, identifier):
"""
View that displays a detailed description for a WPS process.
"""
wps = get_wps_service_engine(service)
wps_process = wps.describeprocess(identifier)
context = {'process': wps_process,
'service': service,
'is_link': abstract_is_link(wps_process)}
return render(request, 'tethys_wps/process.html', context) | 34,911 |
def update(pipeline_id, name, description):
"""Submits a request to CARROT's pipelines update mapping"""
# Create parameter list
params = [
("name", name),
("description", description),
]
return request_handler.update("pipelines", pipeline_id, params) | 34,912 |
def test_failed_split_line_line_algo_png() -> None:
"""Failed to compute line for split line with line detection alog."""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(
__file__, "failed_split_line_line_algo.png"
),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.96),
Angle.deg(90.08),
),
ConstString.separation_double_page_y(): ("range", 2573, 2576),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.16),
Angle.deg(0.16),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(-0.16),
Angle.deg(0.21),
),
ConstString.image_crop(1, "x1"): ("range", 407, 414),
ConstString.image_crop(1, "y1"): ("range", 2925, 2928),
ConstString.image_crop(1, "x2"): ("range", 2419, 2426),
ConstString.image_crop(1, "y2"): ("range", 3171, 3174),
ConstString.image_crop(2, "x1"): ("range", 1180, 1192),
ConstString.image_crop(2, "y1"): ("range", 1724, 1750),
ConstString.image_crop(2, "x2"): ("range", 1181, 1193),
ConstString.image_crop(2, "y2"): ("range", 1725, 1751),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 2917, 2930),
ConstString.image_border(1, 2): ("range", 308, 323),
ConstString.image_border(1, 3): ("range", 224, 224),
ConstString.image_border(1, 4): ("range", 224, 224),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 1752, 1753),
ConstString.image_border(2, 2): ("range", 1753, 1753),
ConstString.image_border(2, 3): ("range", 1239, 1239),
ConstString.image_border(2, 4): ("range", 1239, 1239),
},
) | 34,913 |
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`. """
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens) | 34,914 |
def load(name: str, *args, **kwargs) -> Any:
"""
Loads the unit specified by `name`, initialized with the given arguments
and keyword arguments.
"""
entry = get_entry_point(name)
return entry.assemble(*args, **kwargs) | 34,915 |
def get_gate_names_2qubit() -> List[str]:
"""Return the list of valid gate names of 2-qubit gates."""
names = []
names.append("cx")
names.append("cz")
names.append("swap")
names.append("zx90")
names.append("zz90")
return names | 34,916 |
def test_insert(connection: Connection) -> None:
"""Insert and delete queries are handled properly."""
def test_empty_query(c: Cursor, query: str) -> None:
assert c.execute(query) == -1, "Invalid row count returned"
assert c.rowcount == -1, "Invalid rowcount value"
assert c.description is None, "Invalid description"
with raises(DataError):
c.fetchone()
with raises(DataError):
c.fetchmany()
with raises(DataError):
c.fetchall()
with connection.cursor() as c:
c.execute("DROP TABLE IF EXISTS test_insert_tb")
c.execute(
"CREATE FACT TABLE test_insert_tb(id int, sn string null, f float,"
"d date, dt datetime, b bool, a array(int)) primary index id"
)
test_empty_query(
c,
"INSERT INTO test_insert_tb VALUES (1, 'sn', 1.1, '2021-01-01',"
"'2021-01-01 01:01:01', true, [1, 2, 3])",
)
assert (
c.execute("SELECT * FROM test_insert_tb ORDER BY test_insert_tb.id") == 1
), "Invalid data length in table after insert"
assert_deep_eq(
c.fetchall(),
[
[
1,
"sn",
1.1,
date(2021, 1, 1),
datetime(2021, 1, 1, 1, 1, 1),
1,
[1, 2, 3],
],
],
"Invalid data in table after insert",
) | 34,917 |
def positive_leading_quat(quat):
"""Returns the positive leading version of the quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
The equivalent quaternion [w, i, j, k] with w > 0.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
quat = np.where(np.tile(quat[..., 0:1] < 0, quat.shape[-1]), -quat, quat)
return quat | 34,918 |
def test_aperture_photometry_with_error_units():
"""Test aperture_photometry when error has units (see #176)."""
data1 = np.ones((40, 40), dtype=float)
data2 = u.Quantity(data1, unit=u.adu)
error = u.Quantity(data1, unit=u.adu)
radius = 3
true_flux = np.pi * radius * radius
unit = u.adu
position = (20, 20)
table1 = aperture_photometry(data2, CircularAperture(position, radius),
error=error)
assert_allclose(table1['aperture_sum'].value, true_flux)
assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux))
assert table1['aperture_sum'].unit == unit
assert table1['aperture_sum_err'].unit == unit | 34,919 |
def comp_avg_silh_metric(data_input, cluster_indices, silh_max_samples, silh_distance):
"""
Given a input data matrix and an array of cluster indices, returns the
average silhouette metric for that clustering result (computed across all clusters).
Parameters
----------
data_input : ndarray
Data to be clustered (each row contains a n-dimensional data sample)
cluster_indices : list
List containing for each data point (each row in data input) its cluster id
silh_max_samples: int
Maximum number of samples to compute the silhouette metric (higher for
more exact values at higher computing costs)
silh_distance: string
Metric to use when calculating distance between instances
e.g. 'euclidean', 'manhattan', 'cosine'
Returns
-------
avg_silhouette : float
Silhouette metric averaged across all clusters
"""
# Sample data for computing the silhouette metric
input_data_x_sample = None
cluster_indices_sample = None
for curr_cluster_id in set(cluster_indices):
list_occurrences = [
i for i, x in enumerate(cluster_indices) if x == curr_cluster_id
]
if input_data_x_sample is None:
input_data_x_sample = data_input[list_occurrences[0:silh_max_samples]]
else:
input_data_x_sample = np.vstack(
(
input_data_x_sample,
data_input[list_occurrences[0:silh_max_samples]],
)
)
if cluster_indices_sample is None:
cluster_indices_sample = np.array(cluster_indices)[
list_occurrences[0:silh_max_samples]
]
else:
cluster_indices_sample = np.hstack(
(
cluster_indices_sample,
np.array(cluster_indices)[list_occurrences[0:silh_max_samples]],
)
)
# Compute mean silhouette for each class and the average across all classes
try:
silh_array = metrics.silhouette_samples(
input_data_x_sample,
np.asarray(cluster_indices_sample),
metric=silh_distance,
)
np_silh_samples = np.column_stack((cluster_indices_sample, silh_array.tolist()))
df_silh_samples = pd.DataFrame(
data=np_silh_samples[0:, 0:], columns=["cluster_id", "silhouette"]
)
df_silh_mean_per_class = df_silh_samples.groupby(
["cluster_id"]
).mean() # .sort_values(by='cluster_id')
df_silh_mean_per_class.reset_index(level=0, inplace=True)
df_silh_mean_per_class.sort_values(by="cluster_id")
avg_silhouette = df_silh_mean_per_class["silhouette"].mean()
except ValueError:
avg_silhouette = np.nan
return avg_silhouette | 34,920 |
def convert_lgg_data_tromso_reseksjon(path):
"""convert_lgg_data"""
convert_table = get_convert_table()
conn = sqlite3.connect(util.DB_PATH)
cursor = conn.cursor()
for volume in glob.glob(path + "*.nii"):
if "label" in volume:
continue
case_id = re.findall(r'\d+\b', volume)
if len(case_id) != 1:
print("ERROR", volume, case_id)
return
case_id = int(case_id[0])
print(volume)
if not os.path.exists(volume):
print("ERROR, volume missing", volume, case_id)
return
image_type = 'pre'
volume_label = path + 'T' + str(case_id) + '-label.nii'
if not os.path.exists(volume_label):
volume_label = path + 'T' + str(case_id) + '_label.nii'
if not os.path.exists(volume_label):
print("ERROR, no label", volume_label, case_id)
return
subgroup = convert_table.get(case_id)
print(volume, image_type, case_id, subgroup)
convert_and_save_dataset(case_id, cursor, image_type, [volume_label], volume, 2, subgroup, "", True)
conn.commit()
cursor.close()
conn.close() | 34,921 |
def _validate_args(func, args, keywords):
"""Signal error if an alias is of the wrong type."""
for indx in list(range(len(args))) + list(keywords.keys()):
if indx not in func.ALIAS_ARGS:
continue
# Get argument type and translate
arg = _getarg(indx, args, keywords)
argtype = func.ALIAS_ARGS[indx]
if isinstance(arg, int) and not argtype[-4:] == 'code':
name = indx if isinstance(indx, str) else func.ARGNAMES[indx]
body_or_frame = argtype.split('_')[0]
cspyce1.setmsg('%s code %d not found in kernel pool' %
(body_or_frame, arg))
cspyce1.sigerr('SPICE(' + body_or_frame.upper() + 'IDNOTFOUND)')
elif isinstance(arg, str) and not argtype[-4:] == 'name':
name = indx if isinstance(indx, str) else func.ARGNAMES[indx]
body_or_frame = argtype.split('_')[0]
cspyce1.setmsg('%s name "%s" not found in kernel pool' %
(body_or_frame, arg))
cspyce1.sigerr('SPICE(' + body_or_frame.upper() + 'NAMENOTFOUND)') | 34,922 |
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
) | 34,923 |
def test_precommit_install(
check_dir_mock: Mock,
cli_fs_runner: CliRunner,
):
"""
GIVEN None
WHEN the command is run
THEN it should create a pre-commit git hook script
"""
result = cli_fs_runner.invoke(
cli,
["install", "-m", "local"],
)
hook = open(".git/hooks/pre-commit", "r")
hook_str = hook.read()
assert SAMPLE_PRE_COMMIT == hook_str
assert "pre-commit successfully added in .git/hooks/pre-commit\n" in result.output
assert result.exit_code == 0 | 34,924 |
def prepare_url(base_url, path, url_params=None):
"""Prepare url from path and params"""
if url_params is None:
url_params = {}
url = '{0}{1}'.format(base_url, path)
if not url.endswith('/'):
url += '/'
url_params_str = urlencode(url_params)
if url_params_str:
url += '?' + url_params_str
return url | 34,925 |
def extract_at_interval(da: xr.DataArray, interval) -> xr.DataArray:
"""Reduce size of an Error Grid by selecting data at a fixed interval along
both the number of high- and low-fidelity samples.
"""
return da.where(
da.n_high.isin(da.n_high[slice(None, None, interval)]) *
da.n_low.isin(da.n_low[slice(None, None, interval)])
) | 34,926 |
def compute_log_zT_var(log_rho_var, log_seebeck_sqr_var, log_kappa_var):
"""Compute the variance of the logarithmic thermoelectric figure
of merit zT.
"""
return log_rho_var + log_seebeck_sqr_var + log_kappa_var | 34,927 |
def main(global_config, **settings):
""" Very basic pyramid app """
config = Configurator(settings=settings)
config.include('pyramid_swagger')
config.add_route(
'sample_nonstring',
'/sample/nonstring/{int_arg}/{float_arg}/{boolean_arg}',
)
config.add_route('standard', '/sample/{path_arg}/resource')
config.add_route('get_with_non_string_query_args', '/get_with_non_string_query_args')
config.add_route('post_with_primitive_body', '/post_with_primitive_body')
config.add_route('post_with_form_params', '/post_with_form_params')
config.add_route('post_with_file_upload', '/post_with_file_upload')
config.add_route('sample_post', '/sample')
config.include(include_samples, route_prefix='/sample')
config.add_route('throw_400', '/throw_400')
config.add_route('swagger_undefined', '/undefined/path')
config.add_route('echo_date', '/echo_date')
config.add_route('echo_date_json_renderer', '/echo_date_json_renderer')
config.add_route('post_endpoint_with_optional_body', '/post_endpoint_with_optional_body')
config.scan()
return config.make_wsgi_app() | 34,928 |
def main(args):
"""
Function analyze the `Red Sequence` using by cross-matching catalogues
from `RedMapper` (http://risa.stanford.edu/redmapper/) and NOAO Data-Lab
catalogue.
"""
## Reading all elements and converting to python dictionary
param_dict = vars(args)
## Checking for correct input
param_vals_test(param_dict)
# Creating instance of `ReadML` with the input parameters
param_dict['rs_args'] = RedSeq(**param_dict)
## Adding extra variables
param_dict = add_to_dict(param_dict)
## Program message
Prog_msg = param_dict['Prog_msg']
##
## Creating Folder Structure
proj_dict = param_dict['rs_args'].proj_dict
proj_dict = directory_skeleton(param_dict, proj_dict)
##
## Choice of cosmology
## Choosing cosmological model
( cosmo_model ,
cosmo_params) = cosmo_create(cosmo_choice=param_dict['cosmo_choice'])
param_dict['cosmo_model' ] = cosmo_model
param_dict['cosmo_params'] = cosmo_params
##
## Printing out project variables
key_skip = ['Prog_msg', 'z_centers', 'z_bins', 'z_arr','y_bins','x_bins']
print('\n'+50*'='+'\n')
for key, key_val in sorted(param_dict.items()):
if not (key in key_skip):
print('{0} `{1}`: {2}'.format(Prog_msg, key, key_val))
print('\n'+50*'='+'\n')
## -- Main Analysis -- ##
analysis_main(param_dict, proj_dict) | 34,929 |
def plot_decision_boundaries(model, X, y,
probability=False, show_input=False,
feature_names=None, class_names=None, alpha=0.5, size=30):
"""Plots the decision boundaries for a classifier with 2 features.
This is good way to visualize the decision boundaries of various classifiers
to build intution about them.
It is assumed that the model is already trained.
:param model: the classification model
:param X: the training input samples as a dataframe or a 2-d array
:param y: the target values
:param probability: flag to indicate whether to plot class predictions or probabilities
:param show_input: flag to indicate whether or not to show input points
:param feature_names: names of the columns; used to label the axes
:param class_names: names of the classes; used to label the colorbar
:param alpha: the alpha value for the plots
:param size: the size value for the input points
"""
if isinstance(X, pd.DataFrame):
# take feature names from the dataframe if possible
if feature_names is None:
feature_names = X.columns
X = X.values
if isinstance(y, pd.DataFrame):
y = y.values
xx, yy = _make_mesh(X, y, n=100)
Z = _predict_mesh(model, xx, yy, probability=probability)
if probability is False:
num_classes = len(np.unique(y))
cmap = plt.cm.get_cmap('viridis', num_classes)
ticks = range(num_classes)
if class_names is not None:
tick_labels = class_names
else:
tick_labels = ticks
label = "class"
else:
ticks = None
tick_labels = None
cmap = "viridis"
if class_names is not None:
label = "probability({})".format(class_names[0])
else:
label = "probability(class 0)"
cs = plt.contourf(xx, yy, Z, cmap=cmap, alpha=alpha)
colorbar = plt.colorbar(cs, ticks=ticks, label=label)
if tick_labels is not None:
colorbar.set_ticklabels(tick_labels)
if show_input:
colors = y
if colors.ndim == 2:
colors = colors.reshape(-1)
plt.scatter(x = X[:,0], y = X[:,1], c=colors,
s=size, cmap="viridis", alpha=max(alpha, 0.5))
if feature_names is not None:
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1]) | 34,930 |
async def cardMovedUp(cardName, destination, channel, author):
"""
Discord embed send function using a specific embed style
Args:
cardName (STR): Name of a given card
destination (STR): Name of a given distentation
channel (OBJ): Discord channel object
author (str, optional): Name of author. Defaults to "Unknown".
"""
if (config.lastAction != f"Card MovedUP {cardName}"):
embed = discord.Embed(title="🚨 WEE WOO WEE WOO 🚨", colour=discord.Colour(0x12e4ff), url="https://discordapp.com", description="```css\nWe need more people like you!\nYou're a star!💪🌟```")
embed.set_image(url="")
embed.set_thumbnail(url="")
embed.set_author(name="", url="", icon_url="")
embed.set_footer(text="", icon_url="")
embed.add_field(name=f"", value=f"", inline = False)
config.lastAction = f"Card MovedUP {cardName}"
await channel.send(embed=embed) | 34,931 |
def get_name():
"""MUST HAVE FUNCTION! Returns plugin name."""
return "ASP.NET MVC" | 34,932 |
def tag_images(
x_test,
test_case_images,
tag_list_file,
image_source,
clean=False
):
"""
Performs the tagging of abnormal images using RTEX@R
:param test_case_ids:
:param x_test:
:param x_test:
:param tag_list_file:
:param clean: if True the prediction is performed in any case if False a dump file is loaded if it exists
:return: a dict containing the abnormal image paths
:param num: number of images that should be returned
:param abnormal: whether to return abnormal cases or not
"""
rtex_t_model = load_model("data/models/rtex_t/iu_xray_tag_cxn.hdf5")
dump_file_name = f"data/{image_source}_rtex_t_tags_pre_calc.txt"
if not clean and os.path.isfile(dump_file_name):
print("Using pre-stored RTEX@T results from dump file!")
predicted_tags_file = open(dump_file_name, "r")
test_tag_probs = np.loadtxt(predicted_tags_file)
else:
print("Performing RTEX@T predictions!")
test_tag_probs = rtex_t_model.predict(x_test, batch_size=16, verbose=1)
predicted_tags_file = open(dump_file_name, "w")
np.savetxt(predicted_tags_file, test_tag_probs)
tag_df = pd.read_csv(tag_list_file, header=None)
tag_list = tag_df[0].to_list()
best_threshold = 0.097
tagging_results = {}
# for each exam, assign all tags above threshold
for i in range(len(test_tag_probs)):
predicted_tags = []
for j in range(len(tag_list)):
if test_tag_probs[i, j] >= best_threshold:
predicted_tags.append(tag_list[j])
tagging_results[list(test_case_images.keys())[i]] = ";".join(predicted_tags)
results = tagging_results
return results, tag_list, rtex_t_model | 34,933 |
def close_debug_windows():
"""
Close all debugging related views in active window.
"""
window = sublime.active_window()
# Remember current active view
current_active_view = window.active_view()
for view in window.views():
if is_debug_view(view):
window.focus_view(view)
window.run_command('close')
window.run_command('hide_panel', {'panel': 'output.xdebug'})
# Restore focus to current active view
window.focus_view(current_active_view) | 34,934 |
def test_consensus_cluster_membership():
"""Tests reading cluster membership from a consensus cluster."""
with create_cluster('consensus', nodes=3) as cluster:
node = cluster.node(1)
assert len(node.client.cluster.nodes()) == 3, "number of nodes is not equal to 3"
assert node.client.cluster.node().id == node.name, "node identifier is not " + node.name | 34,935 |
def load_CIFAR(model_mode):
"""
Loads CIFAR-100 or CIFAR-10 dataset and maps it to Target Model and Shadow Model.
:param model_mode: one of "TargetModel" and "ShadowModel".
:param num_classes: one of 10 and 100 and the default value is 100
:return: Tuple of numpy arrays:'(x_train, y_train), (x_test, y_test), member'.
:raise: ValueError: in case of invalid `model_mode`.
"""
if model_mode not in ['TargetModel', 'ShadowModel']:
raise ValueError('model_mode must be one of TargetModel, ShadowModel.')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data(label_mode='fine')
if model_mode == "TargetModel":
(x_train, y_train), (x_test, y_test) = (x_train[40000:50000], y_train[40000:50000]), \
(x_test, y_test)
elif model_mode == "ShadowModel":
(x_train, y_train), (x_test, y_test) = (x_train[:10000], y_train[:10000]), \
(x_train[10000:20000], y_train[10000:20000])
y_train = tf.keras.utils.to_categorical(y_train, num_classes=100)
m_train = np.ones(y_train.shape[0])
y_test = tf.keras.utils.to_categorical(y_test, num_classes=100)
m_test = np.zeros(y_test.shape[0])
member = np.r_[m_train, m_test]
return (x_train, y_train), (x_test, y_test), member | 34,936 |
def _jbackslashreplace_error_handler(err):
"""
Encoding error handler which replaces invalid characters with Java-compliant Unicode escape sequences.
:param err: An `:exc:UnicodeEncodeError` instance.
:return: See https://docs.python.org/2/library/codecs.html?highlight=codecs#codecs.register_error
"""
if not isinstance(err, UnicodeEncodeError):
raise err
return _escape_non_ascii(err.object[err.start:err.end]), err.end | 34,937 |
def calculate_spark_settings(instance_type, num_slaves, max_executor=192,
memory_overhead_coefficient=0.15,
num_partitions_factor=3):
"""
More info:
http://c2fo.io/c2fo/spark/aws/emr/2016/07/06/apache-spark-config-cheatsheet/
"""
all_instances = load_emr_instance()
inst = all_instances[instance_type]
executor_memory_upper_bound = 64
executor_core_upper_bound = 5
available_memory = inst["memory"] - 1
available_cpu = inst["cpu"] - 1
parallelism_per_core = 2
ret = []
for x in range(1, max_executor):
total_memory_per_executor = math.floor(available_memory / x)
unused_memory_per_node = available_memory - (
x * total_memory_per_executor)
total_core_per_executor = available_cpu / x
unused_cores_per_node = available_cpu - (
x * math.floor(total_core_per_executor))
overhead_mem = total_memory_per_executor * memory_overhead_coefficient
mem_executer = total_memory_per_executor - overhead_mem
cores_executer = math.floor(available_cpu / x)
if (total_memory_per_executor < executor_memory_upper_bound and
total_core_per_executor < executor_core_upper_bound):
ret.append({
"executor_per_node": x,
"overhead_mem": max(overhead_mem * 1024, 384),
"unused_cores": unused_cores_per_node,
"mem_executer": mem_executer,
"cores_executer": cores_executer,
"unused_mem": unused_memory_per_node,
"sum": unused_cores_per_node + unused_memory_per_node
})
val, idx = min(
(val, idx) for (idx, val) in enumerate([x["sum"] for x in ret]))
# plot(max_executor, ret)
opt = ret[idx]
executer_instances = (opt["executor_per_node"] * num_slaves) - 1
parallelism = (
executer_instances * opt["cores_executer"] * parallelism_per_core)
num_partitions = int(opt["cores_executer"] * executer_instances
* num_partitions_factor)
print("Optimal numPartitions: %s " % num_partitions, file=sys.stderr)
ret = {
"spark.executor.instances": str(executer_instances),
"spark.executor.memoryOverhead": "%sm" % int(opt["overhead_mem"]),
"spark.executor.memory": "%sm" % int(opt["mem_executer"] * 1024),
"spark.driver.memoryOverhead": "%sm" % int(opt["overhead_mem"]),
"spark.driver.memory": "%sm" % int(opt["mem_executer"] * 1024),
"spark.driver.maxResultSize": "%sm" % int(opt["mem_executer"] * 1024),
"spark.executor.cores": str(int(opt["cores_executer"])),
"spark.driver.cores": str(int(opt["cores_executer"])),
"spark.default.parallelism": str(int(parallelism)),
}
return ret, opt | 34,938 |
def run_on_host(con_info, command):
"""
Runs a command on a target pool of host defined in a hosts.yaml file.
"""
# Paramiko client configuration
paramiko.util.log_to_file(base + "prt_paramiko.log")
UseGSSAPI = (paramiko.GSS_AUTH_AVAILABLE)
DoGSSAPIKeyExchange = (paramiko.GSS_AUTH_AVAILABLE)
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
if not UseGSSAPI and not DoGSSAPIKeyExchange:
client.connect(
con_info[1]['IP'],
port=con_info[1]['PORT'],
username=con_info[1]['USER'],
key_filename=str(base + 'prt_rsa.key')
)
else:
client.connect(
con_info[1]['IP'],
port=con_info[1]['PORT'],
username=con_info['USER'],
key_filename=str(Path.home()) + 'prt_rsa.key',
gss_auth=UseGSSAPI,
gss_kex=DoGSSAPIKeyExchange,
)
con_status = str('Connection Succeeded')
stdin, stdout, stderr = client.exec_command(command)
results_dict = {
'name': con_info[0],
'uname': con_info[1]['NAME'],
'status': con_status,
'stdout': [x.replace('\n', '') for x in stdout.readlines()],
'stderr': [x.replace('\n', '') for x in stderr.readlines()]
}
client.close()
except Exception as error:
con_status = str("Connection Failed : PRT Caught exception(%s: %s" % (error.__class__, error) + ')')
results_dict = {
'name': con_info[0],
'uname': con_info[1]['NAME'],
'status': con_status,
'stdout': [],
'stderr': []
}
try:
client.close()
except Exception:
pass
return results_dict | 34,939 |
def main(argv=None):
"""
"""
print("starting GUI...")
input_shapefile, info_file, output_shapefile = gui_inputs()
print("Start renaming fields...")
batch_rename_field_shapefile(input_shapefile, info_file, output_shapefile)
print("... renaming completed!") | 34,940 |
def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump):
"""
you can call the function as:
split(paddle.dataset.cifar.train10(), line_count=1000,
suffix="imikolov-train-%05d.pickle")
the output files as:
|-imikolov-train-00000.pickle
|-imikolov-train-00001.pickle
|- ...
|-imikolov-train-00480.pickle
:param reader: is a reader creator
:param line_count: line count for each file
:param suffix: the suffix for the output files, should contain "%d"
means the id for each file. Default is "%05d.pickle"
:param dumper: is a callable function that dump object to file, this
function will be called as dumper(obj, f) and obj is the object
will be dumped, f is a file object. Default is cPickle.dump.
"""
if not callable(dumper):
raise TypeError("dumper should be callable.")
lines = []
indx_f = 0
for i, d in enumerate(reader()):
lines.append(d)
if i >= line_count and i % line_count == 0:
with open(suffix % indx_f, "w") as f:
dumper(lines, f)
lines = []
indx_f += 1
if lines:
with open(suffix % indx_f, "w") as f:
dumper(lines, f) | 34,941 |
def extract(root_data_folder, dry=False):
"""
Extracts behaviour only
"""
extract_session.bulk(root_data_folder, dry=dry, glob_flag='**/extract_me.flag') | 34,942 |
def uniqueColor(string):
"""
Returns a color from the string.
Same strings will return same colors, different strings will return different colors ('randomly' different)
Internal: string =md5(x)=> hex =x/maxhex=> float [0-1] =hsv_to_rgb(x,1,1)=> rgb =rgb_to_int=> int
:param string: input string
:return: int color
"""
return sum(round(c * 255) << d for c, d in zip(colorsys.hsv_to_rgb(int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) / 2 ** 128, 1, 1), [16, 8, 0])) | 34,943 |
def _quoteattr(data, entities={}):
""" Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities['\n']=' '
entities['\r']=''
data = _escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data | 34,944 |
def external_login_confirm_email_get(auth, uid, token):
"""
View for email confirmation links when user first login through external identity provider.
HTTP Method: GET
When users click the confirm link, they are expected not to be logged in. If not, they will be logged out first and
redirected back to this view. After OSF verifies the link and performs all actions, they will be automatically
logged in through CAS and redirected back to this view again being authenticated.
:param auth: the auth context
:param uid: the user's primary key
:param token: the verification token
"""
user = User.load(uid)
if not user:
raise HTTPError(http.BAD_REQUEST)
destination = request.args.get('destination')
if not destination:
raise HTTPError(http.BAD_REQUEST)
# if user is already logged in
if auth and auth.user:
# if it is a wrong user
if auth.user._id != user._id:
return auth_logout(redirect_url=request.url)
# if it is the expected user
new = request.args.get('new', None)
if destination in campaigns.get_campaigns():
# external domain takes priority
campaign_url = campaigns.external_campaign_url_for(destination)
if not campaign_url:
campaign_url = campaigns.campaign_url_for(destination)
return redirect(campaign_url)
if new:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True)
return redirect(web_url_for('dashboard'))
# token is invalid
if token not in user.email_verifications:
raise HTTPError(http.BAD_REQUEST)
verification = user.email_verifications[token]
email = verification['email']
provider = verification['external_identity'].keys()[0]
provider_id = verification['external_identity'][provider].keys()[0]
# wrong provider
if provider not in user.external_identity:
raise HTTPError(http.BAD_REQUEST)
external_status = user.external_identity[provider][provider_id]
try:
ensure_external_identity_uniqueness(provider, provider_id, user)
except ValidationError as e:
raise HTTPError(http.FORBIDDEN, e.message)
if not user.is_registered:
user.register(email)
if email.lower() not in user.emails:
user.emails.append(email.lower())
user.date_last_logged_in = timezone.now()
user.external_identity[provider][provider_id] = 'VERIFIED'
user.social[provider.lower()] = provider_id
del user.email_verifications[token]
user.verification_key = generate_verification_key()
user.save()
service_url = request.url
if external_status == 'CREATE':
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
mimetype='html',
user=user
)
service_url += '&{}'.format(urllib.urlencode({'new': 'true'}))
elif external_status == 'LINK':
mails.send_mail(
user=user,
to_addr=user.username,
mail=mails.EXTERNAL_LOGIN_LINK_SUCCESS,
external_id_provider=provider,
)
# redirect to CAS and authenticate the user with the verification key
return redirect(cas.get_login_url(
service_url,
username=user.username,
verification_key=user.verification_key
)) | 34,945 |
def date_since_epoch(date, unit='day'):
""" Get the date for the specified date in unit
:param date: the date in the specified unit
:type date: int
:param unit: one of 'year', 'month' 'week', 'day', 'hour', 'minute',
or 'second'
:return: the corresponding date
:rtype: ee.Date
"""
epoch = ee.Date(EE_EPOCH.isoformat())
return epoch.advance(date, unit) | 34,946 |
def main():
"""
This program should implement a console program
that asks 3 inputs (a, b, and c)
from users to compute the roots of equation
ax^2 + bx + c = 0
Output format should match with 3 conditions, which have different number of root .
"""
print('stanCode Quadratic Solver')
a = int(input('Enter a:'))
b = int(input('Enter b:'))
c = int(input('Enter c:'))
if b*b - 4*a*c > 0:
# This equation should compute the roots of ax^2 + bx + c = 0
d = math.sqrt(b ** 2 - 4 * a * c)
# answer1 and answer3 are for the condition of 2 roots (d>0)
answer1 = (-b + d) / (2 * a)
answer3 = (-b - d) / (2 * a)
# this code should show two roots when d > 0
print('two roots: '+str(answer1)+' , '+str(answer3))
elif b*b - 4*a*c == 0:
# answer2 is for the condition of 1 root(d=0)
answer2 = -b / (2 * a)
# this code should show one roots when d = 0
print('one roots: '+str(answer2))
else:
# this condition shoe when d < 0
print('no real roots') | 34,947 |
def eval_assoc(param_list, meta):
"""
Evaluate the assoication score between a given text and
a list of categories or statements.
Param 1 - string, the text in question
Param 2 - list of strings, the list of categories to associate Param 1 to
"""
data = {
'op': 'eval_assoc',
'text': param_list[0],
'cats': param_list[1]
}
return BART_API.post(data)['sorted_associations'] | 34,948 |
def quiver3d(*args, **kwargs):
"""Wraps `mayavi.mlab.quiver3d`
Args:
*args: passed to `mayavi.mlab.quiver3d`
**kwargs: Other Arguments are popped, then kwargs is passed to
`mayavi.mlab.quiver3d`
Keyword Arguments:
cmap (str, None, False): see :py:func:`apply_cmap`
alpha (number, sequence): see :py:func:`apply_cmap`
clim (sequence): see :py:func:`apply_cmap`
symmetric (bool): see :py:func:`apply_cmap`
logscale (bool): see :py:func:`apply_cmap`
Returns:
TYPE: Description
"""
kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs)
quivers = mlab.quiver3d(*args, **kwargs)
apply_cmap(quivers, mode='scalar', **cmap_kwargs)
apply_cmap(quivers, mode='vector', **cmap_kwargs)
return quivers | 34,949 |
def get_instance_types(self):
"""
Documentation:
---
Description:
Generate SSH pub
"""
instance_types = sorted([instance_type["InstanceType"] for instance_type in self.ec2_client.describe_instance_types()["InstanceTypes"]])
return instance_types | 34,950 |
def analysis_result():
"""
use the model predict the test data , then analysis the y_predict and y_ture
"""
train_x, train_y, test_x, test_y = eunite_data.load_eunite_train_data()
model = keras.models.load_model("./model/load_model")
# test_x.shape = 229 * 13
y_predict = model.predict(train_x)
draw_image(y_predict, train_y)
mae = np.mean(np.abs(y_predict - train_y))
mae_percent = np.mean(mae / train_y)
print("example_number =%s, mean mae= %s" % (test_x.shape[0], mae))
print("example_number =%s, mean mae percent= %s" % (test_x.shape[0], mae_percent)) | 34,951 |
def import_zip(wiki, file):
"""
Import a zip into a wiki
Zip should contain two directories:
pages
page.txt # path: page
page
subpage.txt # path: page/subpage
assets
internal_id.jpg
Page extensions must be .txt or .md - both are treated as pretext format
Asset extensions should indicate filetype.
Page titles are expected on the first line, with the prefix "TITLE: "; if
the first line does not match this format, it will be considered to be part
of the content, and the title will be based on the path.
These can optionally be held inside a directory whose name matches the zip,
for example:
mywiki.zip
mywiki
pages
assets
"""
# Test filename
zipname, ext = os.path.splitext(file.name)
if (
not isinstance(file, InMemoryUploadedFile)
and not zipfile.is_zipfile(file.temporary_file_path)
) or ext.lower() != ".zip":
raise ZipImportError("File is not a zip file")
# Open the zip file
try:
zf = zipfile.ZipFile(file)
except zipfile.BadZipFile as e:
raise ZipImportError("Bad zip file: %s" % e)
except zipfile.LargeZipFile:
raise ZipImportError("Bad zip file: ZIP64 not enabled")
filenames = [info.filename for info in zf.infolist()]
# Check the zip for optional root container
path_con = zipname + "/"
path_pages = "pages/"
path_assets = "assets/"
if path_con in filenames:
path_pages = path_con + path_pages
path_assets = path_con + path_assets
# Check the structure of the zip
if path_pages not in filenames:
raise ZipImportError("Pages directory not found")
if path_assets not in filenames:
raise ZipImportError("Assets directory not found")
# Import the pages
pages = [filename for filename in filenames if filename.startswith(path_pages)]
for filename in pages:
# Find page path and read content
page_path, ext = os.path.splitext(filename[len(path_pages) :])
if ext.lower() not in [".txt", ".pt"]:
continue
content = zf.read(filename)
# Get or create Page
try:
page = models.Page.objects.get(wiki=wiki, path=page_path)
except models.Page.DoesNotExist:
page = models.Page(wiki=wiki, path=page_path)
# Set title and content
lines = content.splitlines()
if lines and lines[0].startswith(TITLE_PREFIX):
page.title = lines.pop(0)[TITLE_PREFIX:].strip()
else:
page.title = title_from_path(page_path)
page.content = "\n".join(lines)
page.save() | 34,952 |
def ret_digraph_points(sed, digraph):
"""Finds the digraph points of the subject extracted data.
Parameters
----------
`sed` (object) "_subject","_track_code", "data": [{"digraph","points"}]
Returns
---------
(list) The points of the particular digraph found
"""
ret = [d['points'] for d in sed['data'] if d['digraph'] == digraph]
if ret == []:
pynocomm.send_to_node(
'**Warning: No digraph points found in ret_digraph_points, digraph:' + digraph)
_foo = 1
else:
ret = ret[0]
return ret | 34,953 |
async def test_async_setup_entry_host_unavailable(hass):
"""Test async_setup_entry when host is unavailable."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
with patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=socket.gaierror,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id) is False
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_SETUP_RETRY
next_update = utcnow() + timedelta(seconds=45)
async_fire_time_changed(hass, next_update)
with patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=socket.gaierror,
):
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is None | 34,954 |
def _sizeof_fmt(num):
"""Format byte size to human-readable format.
https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size
Args:
num (float): Number of bytes
"""
for x in ["bytes", "KB", "MB", "GB", "TB", "PB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0 | 34,955 |
def gatherData(gatherLFP = True):
"""
Function for/to <short description of `netpyne.sim.gather.gatherData`>
Parameters
----------
gatherLFP : bool
<Short description of gatherLFP>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
sim.timing('start', 'gatherTime')
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
simDataVecs = ['spkt', 'spkid', 'stims', 'dipole'] + list(sim.cfg.recordTraces.keys())
if sim.cfg.recordDipoles:
_aggregateDipoles()
simDataVecs.append('dipole')
singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
nodeData = {'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0: # simData
print(' Gathering only sim data...')
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val, dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allPops = ODict() # pops
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
# gather cells, pops and sim data
else:
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids}
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
else: # if single node, save data in same format as for multiple nodes for consistency
if sim.cfg.createNEURONObj:
sim.net.allCells = [Dict(c.__getstate__()) for c in sim.net.cells]
else:
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
sim.net.allPops = ODict()
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.allSimData = Dict()
for k in list(sim.simData.keys()): # initialize all keys of allSimData dict
sim.allSimData[k] = Dict()
for key,val in sim.simData.items(): # update simData dics of dics of h.Vector
if key in simDataVecs+singleNodeVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
else:
sim.allSimData[key] = val # update simData dicts which are not Vectors
## Print statistics
sim.pc.barrier()
if sim.rank == 0:
sim.timing('stop', 'gatherTime')
if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime']))
print('\nAnalyzing...')
sim.totalSpikes = len(sim.allSimData['spkt'])
sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells])
if sim.cfg.createPyStruct:
if sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0
sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sim.totalSynapses
sim.numCells = len(sim.net.allCells)
if sim.totalSpikes > 0:
sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate
else:
sim.firingRate = 0
if sim.numCells > 0:
sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell
sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell
else:
sim.connsPerCell = 0
sim.synsPerCell = 0
print((' Cells: %i' % (sim.numCells) ))
print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell)))
if sim.totalSynapses != sim.totalConnections:
print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell)))
if 'runTime' in sim.timingData:
print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate)))
print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts)))
print((' Run time: %0.2f s' % (sim.timingData['runTime'])))
if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData:
trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates, list) else None
sim.allSimData['popRates'] = sim.analysis.popAvgRates(tranges=trange)
if 'plotfI' in sim.cfg.analysis:
sim.analysis.calculatefI() # need to call here so data is saved to file
sim.allSimData['avgRate'] = sim.firingRate # save firing rate
return sim.allSimData | 34,956 |
def stations_by_distance(stations, p):
"""For a list of stations (MonitoringStation object) and
coordinate p (latitude, longitude), returns list of tuples
(station, distance) sorted by the distance from the given
coordinate p"""
# Create the list of (stations, distance) tuples
station_dist = []
# Append data to the list
for station in stations:
station_dist.append((station, haversine(p, station.coord)))
# Return station_dist list sorted by the distance from p
return sorted_by_key(station_dist, 1) | 34,957 |
def process(fh: TextIO, headers: Optional[Dict[str, str]],
writer: csv.DictWriter, args: Args) -> int:
"""
Process the file into Mongo (client)
First 5 columns are: STREAM, DATE, STATION, REP, #GRIDS
Columns after that are the measurements
"""
reader = csv.DictReader(fh, delimiter=',')
flds = reader.fieldnames
values = defaultdict(list) # to average replicates
# Parse file into values for each variable, station, and date
for i, row in enumerate(reader, start=1):
# Base record has station/date
station = get_station(row.get('STATION', ''))
date = get_date(row.get('DATE', ''))
if not all([date, station]):
continue
for fld in filter(lambda f: f != '', flds[5:]):
raw_val = row[fld].strip()
if raw_val == '':
continue
# Remove leading "="?
if raw_val.startswith('='):
raw_val = raw_val[1:]
# Try to convert value to float
val = None
try:
val = float(raw_val)
except Exception:
continue
if val is not None:
values[(fld, station, date)].append(val)
# Write the averages for each variable, station, and date
num_written = 0
for key, replicates in values.items():
fld, station, date = key
# Maybe convert "ACENTR" -> "Ephemeroptera Baetidae Acentrella spp."
variable = headers.get(fld.upper(), fld) if headers else fld
# Take the average of the values
val = mean(replicates)
print(f'{fld} {station} {date} => {val}')
writer.writerow({
'source': args.source,
'unit': '',
'location_name': station,
'location_type': 'station',
'variable_name': fld,
'variable_desc': variable,
'collected_on': date,
'value': val,
'medium': args.medium
})
num_written += 1
return num_written | 34,958 |
def dipole_moment_programs():
""" Constructs a list of program modules implementing
static dipole moment output readers.
"""
return pm.program_modules_with_function(pm.Job.DIP_MOM) | 34,959 |
def generate_api_v2(model_output_dir, output, level, state, fips):
"""The entry function for invocation"""
# Load all API Regions
selected_dataset = combined_datasets.load_us_timeseries_dataset().get_subset(
aggregation_level=level, exclude_county_999=True, state=state, fips=fips,
)
_logger.info(f"Loading all regional inputs.")
model_output = pyseir.run.PyseirOutputDatasets.read(model_output_dir)
api_v2_pipeline.generate_from_loaded_data(model_output, output, selected_dataset, _logger) | 34,960 |
def _SafeTcSetPgrp(fd, pgrp):
"""Set |pgrp| as the controller of the tty |fd|."""
try:
curr_pgrp = os.tcgetpgrp(fd)
except OSError as e:
# This can come up when the fd is not connected to a terminal.
if e.errno == errno.ENOTTY:
return
raise
# We can change the owner only if currently own it. Otherwise we'll get
# stopped by the kernel with SIGTTOU and that'll hit the whole group.
if curr_pgrp == os.getpgrp():
os.tcsetpgrp(fd, pgrp) | 34,961 |
def test_review_submission_list_query_bootcamp_run_id(admin_drf_client):
"""
The review submission list view should return a list of submissions filtered by bootcamp run id
"""
submission = ApplicationStepSubmissionFactory.create(is_review_ready=True)
bootcamp_run = submission.bootcamp_application.bootcamp_run
ApplicationStepSubmissionFactory.create_batch(3)
url = reverse("submissions_api-list")
resp = admin_drf_client.get(url, {"bootcamp_run_id": bootcamp_run.id})
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [SubmissionReviewSerializer(instance=submission).data],
"facets": {
"bootcamp_runs": [
{
"id": bootcamp_run.id,
"title": bootcamp_run.title,
"start_date": serializer_date_format(bootcamp_run.start_date),
"end_date": serializer_date_format(bootcamp_run.end_date),
"count": 1,
}
],
"review_statuses": [
{"review_status": submission.review_status, "count": 1}
],
},
} | 34,962 |
def scan(self, s, pfits=True):
"""
Returns
-------
namedtuple('spectrum', 'w f berv bjd blaze drift timeid sn55 ')
w - wavelength
f - flux
berv - Barycentric Earth Radial Velocity
bjd - Barycentric Julian Day
blaze - Blaze filename
drift - Used RV Drift
sn55 - S_N order center55
Example
-------
>>> read_carm_vis(filename)
"""
HIERARCH = 'HIERARCH '
hdulist = self.hdulist = pyfits.open(s) # slow 30 ms
self.header = hdr = hdulist[0].header
self.drs = hdr.get('PIPELINE', 'DRS')
if self.drs == 'CERES':
self.instname = hdr['INST']
self.drsberv = hdr.get('BARYCENTRIC CORRECTION (KM/S)', np.nan)
self.drsbjd = hdr.get('MBJD', np.nan) + 2400000.5 # same as MJD!?
self.dateobs = hdr['HIERARCH SHUTTER START DATE'] + 'T' + hdr['HIERARCH SHUTTER START UT']
self.mjd = hdr.get('HIERARCH MJD')
self.drift = np.nan
self.e_drift = np.nan
self.fileid = self.dateobs
self.calmode = "%s,%s,%s" % (hdr.get('SCI-OBJ', ''), hdr.get('CAL-OBJ', ''), hdr.get('SKY-OBJ', ''))
self.timeid = self.fileid
self.ccf.rvc = hdr.get('RV', np.nan)
self.ccf.err_rvc = hdr.get('RV_E', np.nan)
self.ra = hdr['HIERARCH RA']
self.de = hdr['HIERARCH DEC']
self.airmass = hdr.get('HIERARCH TARG AIRMASS START', np.nan)
self.exptime = hdr['HIERARCH TEXP (S)']
self.tmmean = hdr.get(HIERARCH+'CARACAL TMEAN', 0.0)
if self.exptime: self.tmmean /= self.exptime # normalise
if self.tmmean == 0: self.tmmean = 0.5
# estimate SNR
f = hdulist[0].section[1]
self.snr = np.nanmedian(np.abs(f[:,1:]/(f[:,1:]- f[:,:-1])), axis=1)
self.sn55 = self.snr[55]
hdr['OBJECT'] = hdr['HIERARCH TARGET NAME']
else:
# IRAF header, *_wave.fits
self.instname = hdr['INSTRUME']
self.drsberv = hdr.get('BERV', np.nan)
self.drsbjd = hdr.get('HJD', np.nan)
self.dateobs = hdr['DATE-OBS']
self.mjd = hdr.get('JD', Time(self.dateobs, scale='utc').jd) - 2400000.5
# for HPF spectra the drift is already included in the wavelength solution
self.drift = hdr.get(HIERARCH+'CARACAL DRIFT FP RV', hdr.get(HIERARCH+'CARACAL DRIFT RV', np.nan))
self.e_drift = hdr.get(HIERARCH+'CARACAL DRIFT FP E_RV', hdr.get(HIERARCH+'CARACAL DRIFT RVERR', np.nan))
self.sn55 = hdr.get('SNR 36', 50)
self.fileid = hdr.get('DATE-OBS', 0) #fileid[fileid.index('(')+1:fileid.index(')')]
self.timeid = self.fileid
self.calmode = "%s,%s,%s" % (hdr.get('SCI-OBJ', ''), hdr.get('CAL-OBJ', ''), hdr.get('SKY-OBJ', ''))
#calmodedict = {'objcal':'OBJ,CAL','objsky':'OBJ,SKY'}
#if calmode in calmodedict: calmode = calmodedict[calmode]
self.ccf.rvc = hdr.get(HIERARCH+'CARACAL SERVAL RV', np.nan)
self.ccf.err_rvc = hdr.get(HIERARCH+'CARACAL SERVAL E_RV', np.nan)
self.ra = hdr['RA']
self.de = hdr['DEC']
self.airmass = hdr.get('AIRMASS', np.nan)
self.exptime = hdr['EXPTIME']
self.tmmean = hdr.get(HIERARCH+'CARACAL TMEAN', 0.0)
if self.exptime: self.tmmean /= self.exptime # normalise
if self.tmmean == 0: self.tmmean = 0.5
# estimate SNR
f = hdulist[0].section
if len(f.hdu.shape) == 3:
# We have cube. Assuming f[0] flux, f[1] similar flux (linear) f[2] error
f = f[0]
self.snr = np.median(np.abs(f[:,1:]/(f[:,1:]- f[:,:-1])), axis=1)
self.sn55 = self.snr[55] | 34,963 |
def run_experiment(params: Dict[str, Dict]):
"""
This function runs the experiments for the given parameters and stores the results at the end.
:param params: a dictionary containing all different parameters for all different modules.
"""
# Create the action space; initialize the environment; initialize the DQN model
action_space = create_action_space(params=params["action_space"], **params["environment"])
env = BenchmarkEnvironment(action_space=action_space, **params["environment"],
max_days=params["simulation"]["max_days"], type=params["simulation"]["type"])
model = SVM(**params["model"])
# Run the simulation
final, days, df = run_learning(env=env, model=model, params=params["simulation"], show_output=False)
# Update the parameters with the number of features
params["model"].update({"number of features": env.action_space.shape[1]})
params["model"].update({"business rules": ["amount < 5000"]})
# Store the results
store_results(params, final, days, df) | 34,964 |
def kappa(a, b, c, d):
""" GO term 2
| yes | no |
-------------------------------
GO | yes | a | b |
term1 | no | c | d |
kapa(GO_1, GO_2) = 1 - (1 - po) / (1 - pe)
po = (a + d) / (a + b + c + d)
marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d)
marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d)
pe = (marginal_a + marginal_b) / (a + b + c + d)
"""
a = float(len(a))
b = float(len(b))
c = float(len(c))
d = float(len(d))
po = (a + d) / (a + b + c + d)
marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d)
marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d)
pe = (marginal_a + marginal_b) / (a + b + c + d)
#print (f" {a} | {b}\n {c} | {d}")
return 1 - (1 - po) / (1 - pe) | 34,965 |
def test_8():
"""
Test outputs of compute_direction_XY(), with n=16, m=100,
no_vars=m.
"""
np.random.seed(91)
n = 16
m = 100
no_vars = m
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_XY
(n, m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == 0)
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1) | 34,966 |
def test_CreativeProject_integration_ask_tell_ask_works(covars, model_type, train_X, train_Y,
covars_proposed_iter, covars_sampled_iter,
response_sampled_iter, monkeypatch):
"""
test that both surrogate model and acquisition functions are added and updated following two rounds of ask-tell.
Monkeypatch "_read_covars_manual_input" and "_read_response_manual_input" from ._observe.py to circumvent manual
input via builtins.input. This automatically tests the new functionality of random start by starting from no data
(train_X, train_Y)
"""
# initialize the class
# random_start = True is default, so this tests random start
cc = TuneSession(covars=covars, model=model_type)
# set attributes on class (to simulate previous iterations of ask/tell functionality)
cc.train_X = train_X
cc.proposed_X = train_X
cc.train_Y = train_Y
cc.model["covars_proposed_iter"] = covars_proposed_iter
cc.model["covars_sampled_iter"] = covars_sampled_iter
cc.model["response_sampled_iter"] = response_sampled_iter
# define decorator to add 1.0 to all entries in monkeypatched returned data. This to be able to tell that the last
# entry (from second "tell") is different than the first, and know that it has been overwritten
def add_one(func):
@functools.wraps(func)
def wrapper_add_one(*args, **kwargs):
wrapper_add_one.num_calls += 1
output = func(*args, **kwargs)
return output + wrapper_add_one.num_calls
wrapper_add_one.num_calls = 0
return wrapper_add_one
# monkeypatch "_read_covars_manual_input"
candidate_tensor = torch.tensor([[tmp[0] for tmp in covars]], dtype=torch.double)
@add_one
def mock_read_covars_manual_input(additional_text):
return candidate_tensor
monkeypatch.setattr(cc, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_read_response_manual_input"
resp_tensor = torch.tensor([[12]], dtype=torch.double)
@add_one
def mock_read_response_manual_input(additional_text):
return resp_tensor
monkeypatch.setattr(cc, "_read_response_manual_input", mock_read_response_manual_input)
# run the ask method
cc.ask()
# run the tell method
cc.tell()
# test that data is added to pretty formats
assert cc.x_data.shape[0] == 1
for i in range(candidate_tensor.size()[1]):
col = cc.x_data.columns[i]
assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 1
assert cc.y_data.shape[0] == 1
assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 1
# grab the model state
surrogate_model1 = cc.model["model"]
# run the ask method AGAIN
cc.ask()
# grab the acquisition function
acq_func1 = cc.acq_func["object"]
# run the tell method AGAIN
cc.tell()
# test that new rows are added to pretty format data
print(candidate_tensor)
print(cc.x_data)
assert cc.x_data.shape[0] == 2
for i in range(candidate_tensor.size()[1]):
col = cc.x_data.columns[i]
assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 2
assert cc.y_data.shape[0] == 2
assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 2
# grab the model state
surrogate_model2 = cc.model["model"]
# run the ask method a THIRD TIME
cc.ask()
# grab the acquisition function
acq_func2 = cc.acq_func["object"]
# assert that both model and acquisition functions exist
assert cc.model["model"] is not None
assert cc.acq_func["object"] is not None
# assert that surrogate model has updated
assert surrogate_model1 != surrogate_model2
# assert that acquisition function has updated
assert acq_func1 != acq_func2 | 34,967 |
def plot_sources(azimuth, elevation, distance=1.6):
"""Display sources in a 3D plot
Arguments:
azimuth (np.ndarray): azimuth of the sources in degree. Must be same length as elevation
elevation (np.ndarray): elevation of the sources in degree. Must be same length as azimuth
distance (float | np.ndarray): distance of the sources to the listener. Can be either an array
with the same length as azimuth and elevation or a single float if all sources have the same
distance - meaning they are arranged in a sphere or circle. """
ax = Axes3D(plt.figure())
azimuth = np.deg2rad(azimuth)
elevation = np.deg2rad(elevation-90)
x = distance * np.sin(elevation) * np.cos(azimuth)
y = distance * np.sin(elevation) * np.sin(azimuth)
z = distance * np.cos(elevation)
ax.scatter(x, y, z, c='b', marker='.')
ax.scatter(0, 0, 0, c='r', marker='o') | 34,968 |
def ferret_init(id):
"""
Initialization for the stats_chisquare Ferret PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns chi-square test stat. and prob. (and num. good categories, N) " \
"that sample counts of cat. data matches pop. expected counts. ",
"axes": axes_values,
"argnames": ( "SAMPLE_CNTS", "EXPECT_CNTS", "DELTA_DEGFREE", ),
"argdescripts": ( "Sample counts of categorical data",
"Expected counts or relative frequencies (will be adjusted)",
"Difference from standard (N-1) degrees of freedom (num. computed parameters)", ),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY, pyferret.FLOAT_ONEVAL, ),
"influences": ( false_influences, false_influences, false_influences, ),
}
return retdict | 34,969 |
def has(key):
"""Checks if the current context contains the given key."""
return not not (key in Context.currentContext.values) | 34,970 |
def pilot_realignement():
"""
Realignement
============
"""
# Pilot imports
import os
from caps.toy_datasets import get_sample_data
from capsul.study_config import StudyConfig
from pclinfmri.preproc.pipeline import SpmRealignement
"""
Study configuration
-------------------
We first define the working directory and guarantee this folder exists on
the file system:
"""
working_dir = "/volatile/nsap/pclinfmri/spmrealignement"
if not os.path.isdir(working_dir):
os.makedirs(working_dir)
"""
And then define the study configuration (here we activate the smart
caching module that will be able to remember which process has already been
processed):
"""
study_config = StudyConfig(
modules=["SmartCachingConfig", "MatlabConfig", "SPMConfig",
"NipypeConfig"],
use_smart_caching=True,
matlab_exec="/neurospin/local/bin/matlab",
use_matlab=True,
spm_directory="/i2bm/local/spm8",
use_spm=True,
output_directory=working_dir)
"""
Load the toy dataset
--------------------
To do so, we use the get_sample_data function to download the toy
dataset on the local file system (here localizer data):
"""
toy_dataset = get_sample_data("localizer")
"""
The toy_dataset is an Enum structure with some specific elements of
interest:
* **??**: ??.
Processing definition
---------------------
First create the
:ref:`slice timing pipeline <pclinfmri.preproc.pipeline.SliceTiming>` that
define the different step of the processings:
"""
pipeline = SpmRealignement()
print pipeline.get_input_spec()
"""
Now we need now to parametrize this pipeline:
"""
pipeline.fmri_file = toy_dataset.fmri
pipeline.register_to_mean = True
"""
The pipeline is now ready to be run:
"""
study_config.run(pipeline, executer_qc_nodes=True, verbose=1)
"""
Results
-------
Finally, we print the pipeline outputs:
"""
print("\nOUTPUTS\n")
for trait_name, trait_value in pipeline.get_outputs().items():
print("{0}: {1}".format(trait_name, trait_value)) | 34,971 |
def VGG_16(weights_path=None):
"""
Creates a convolutional keras neural network, training it with data from ct scans from both datasets.
Using the VGG-16 architecture.
----
Returns the model
"""
X_train, Y_train = loadfromh5(1, 2, 19)
X_train1, Y_train1 = loadfromh5(2, 2, 19)
X_train.extend(X_train1)
Y_train.extend(Y_train1)
X_train = np.asarray(X_train).reshape(np.asarray(X_train).shape[0], 64, 64, 1)
# X_train = np.transpose(X_train, (0,3,1,2))
print(X_train.shape)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(64, 64, 1)))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="sgd", metrics=["mse"])
K.set_value(model.optimizer.learning_rate, 0.001)
model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)
if weights_path:
model.load_weights(weights_path)
return model | 34,972 |
def test_io_stc_h5(tmpdir, is_complex, vector):
"""Test IO for STC files using HDF5."""
if vector:
stc = _fake_vec_stc(is_complex=is_complex)
else:
stc = _fake_stc(is_complex=is_complex)
pytest.raises(ValueError, stc.save, tmpdir.join('tmp'),
ftype='foo')
out_name = tmpdir.join('tmp')
stc.save(out_name, ftype='h5')
stc.save(out_name, ftype='h5') # test overwrite
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc')
stc5 = read_source_estimate(out_name + '-stc.h5')
pytest.raises(RuntimeError, read_source_estimate, out_name,
subject='bar')
for stc_new in stc3, stc4, stc5:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2) | 34,973 |
def test_cross_correlate_masked_over_axes():
"""Masked normalized cross-correlation over axes should be
equivalent to a loop over non-transform axes."""
# See random number generator for reproducible results
np.random.seed(23)
arr1 = np.random.random((8, 8, 5))
arr2 = np.random.random((8, 8, 5))
m1 = np.random.choice([True, False], arr1.shape)
m2 = np.random.choice([True, False], arr2.shape)
arr1 = cp.asarray(arr1)
arr2 = cp.asarray(arr2)
m1 = cp.asarray(m1)
m2 = cp.asarray(m2)
# Loop over last axis
with_loop = cp.empty_like(arr1, dtype=np.complex128)
for index in range(arr1.shape[-1]):
with_loop[:, :, index] = cross_correlate_masked(arr1[:, :, index],
arr2[:, :, index],
m1[:, :, index],
m2[:, :, index],
axes=(0, 1),
mode='same')
over_axes = cross_correlate_masked(
arr1, arr2, m1, m2, axes=(0, 1), mode='same')
cp.testing.assert_array_almost_equal(with_loop, over_axes) | 34,974 |
def nested_dict_itervalue(nested):
"""
http://stackoverflow.com/questions/10756427/loop-through-all-nested-dictionary-values
:param nested:
Nested dictionary.
:return:
Iteratoer of values.
>>> list(nested_dict_iter({'a': {'b': {'c': 1, 'd': 2},
'e': {'f': 3, 'g': 4}
},
'h': {'i': 5, 'j': 6}
}))
[1, 2, 3, 4, 5, 6]
"""
for value in nested.itervalues():
if isinstance(value, collections.Mapping):
for inner_value in nested_dict_itervalue(value):
yield inner_value
else:
yield value | 34,975 |
def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the davies upper-bound"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_davies(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p))
return res.x | 34,976 |
def test_direct_evaluation(infix, values, expected):
"""Test direct evaluation of an infix against some values"""
assert evaluate(infix, values) == expected | 34,977 |
def public(request):
"""browse public repos. Login not required"""
username = request.user.get_username()
public_repos = DataHubManager.list_public_repos()
# This should really go through the api... like everything else
# in this file.
public_repos = serializers.serialize('json', public_repos)
return render_to_response("public-browse.html", {
'login': username,
'repo_base': 'repo_base',
'repos': [],
'public_repos': public_repos}) | 34,978 |
def get_boundaries_old(im,su=5,sl=5,valley=5,cutoff_max=1.,plt_val=False):
"""Bintu et al 2018 candidate boundary calling"""
im_=np.array(im)
ratio,ration,center,centern=[],[],[],[]
for i in range(len(im)):
x_im_l,y_im_l = [],[]
x_im_r,y_im_r = [],[]
xn_im_l,yn_im_l = [],[]
xn_im_r,yn_im_r = [],[]
for j in range(sl):
xn_im_l.extend(i+j-np.arange(su)-1)
yn_im_l.extend([i+j]*su)
xn_im_r.extend(i+j+sl-np.arange(su)-1)
yn_im_r.extend([i+j+sl]*su)
x_im_l.extend(i+j+np.arange(su)+1)
y_im_l.extend([i+j]*su)
x_im_r.extend(i+j+sl+np.arange(su)+1)
y_im_r.extend([i+j+sl]*su)
x_im_l,y_im_l,x_im_r,y_im_r = list(map(np.array,[x_im_l,y_im_l,x_im_r,y_im_r]))
xn_im_l,yn_im_l,xn_im_r,yn_im_r = list(map(np.array,[xn_im_l,yn_im_l,xn_im_r,yn_im_r]))
in_image = np.all(x_im_l>=0) and np.all(x_im_r>=0) and np.all(y_im_l>=0) and np.all(y_im_r>=0)
in_image = in_image and np.all(x_im_l<len(im)) and np.all(x_im_r<len(im)) and np.all(y_im_l<len(im)) and np.all(y_im_r<len(im))
in_imagen = np.all(xn_im_l>=0) and np.all(xn_im_r>=0) and np.all(yn_im_l>=0) and np.all(yn_im_r>=0)
in_imagen = in_imagen and np.all(xn_im_l<len(im)) and np.all(xn_im_r<len(im)) and np.all(yn_im_l<len(im)) and np.all(yn_im_r<len(im))
if in_image:
val_l,val_r = np.nanmean(im_[x_im_l,y_im_l]),np.nanmean(im_[x_im_r,y_im_r])
ratio.append(val_l/val_r)
center.append(i+sl)
if in_imagen:
val_l,val_r = np.nanmean(im_[xn_im_l,yn_im_l]),np.nanmean(im_[xn_im_r,yn_im_r])
ration.append(val_r/val_l)
centern.append(i+sl)
if False:#i==9:
plt.figure(figsize=(20,20))
plt.plot(xn_im_l,yn_im_l,'mo')
plt.plot(xn_im_r,yn_im_r,'go')
plt.plot(x_im_l,y_im_l,'ro')
plt.plot(x_im_r,y_im_r,'bo')
plt.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000)
plt.show()
print(x_im_l,y_im_l,x_im_r,y_im_r)
center,ratio=np.array(center),np.array(ratio)
centern,ration=np.array(centern),np.array(ration)
max_ratio = np.zeros(len(im))+np.nan
max_ratio[center]=ratio
max_ratio[centern]=np.nanmax([max_ratio[centern],ration],axis=0)
local_max_good = get_ind_loc_max(max_ratio,cutoff_max=cutoff_max,valley=valley)
#local_max_goodn = get_ind_loc_max(ration,cutoff_max=cutoff_max,valley=valley)
###Plotting
if plt_val:
#plt.close('all')
plt.figure(figsize=(12,7))
#print local_max_good,local_max_goodn
plt.plot(center,np.log(ratio),'o-')
plt.plot(centern,np.log(ration),'o-')
plt.plot(np.log(max_ratio),'k-')
if len(local_max_good)>0:
plt.plot(local_max_good,np.log(max_ratio[local_max_good]),'o')
plt.show()
fig, ax = plt.subplots(figsize=(12,7))
if len(local_max_good)>0:
ax.plot(local_max_good[:],local_max_good[:],'go',ms=10,mec='k',mew=2)
#cax = ax.imshow(set_diag(img,np.nanmax(img)),interpolation='nearest',cmap='bwr')#,vmax=1000,vmin=0)
cax = ax.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000,vmin=0)
cbar = fig.colorbar(cax)
plt.show()
return local_max_good,max_ratio[local_max_good] | 34,979 |
def zfsr32(val, n):
"""zero fill shift right for 32 bit integers"""
return (val >> n) if val >= 0 else ((val + 4294967296) >> n) | 34,980 |
def n_optimize_fn(step: int) -> int:
"""`n_optimize` scheduling function."""
if step <= FLAGS.change_n_optimize_at:
return FLAGS.n_optimize_1
else:
return FLAGS.n_optimize_2 | 34,981 |
def compute_receptive_field(model, img_size=(1, 3, 3)):
"""Computes the receptive field for a model.
The receptive field is computed using the magnitude of the gradient of the
model's output with respect to the input.
Args:
model: Model for hich to compute the receptive field. Assumes NCHW input.
img_size: The (channels, height, width) of the input to the model.
"""
c, h, w = img_size
img = torch.randn((1, c, h, w), requires_grad=True)
model(img)[0, 0, h // 2, w // 2].mean().backward()
grad = img.grad.abs()[0, 0, :, :]
return torch.where(grad > 0, torch.ones_like(grad), torch.zeros_like(grad)) | 34,982 |
def bound_free_emission(r_packet, time_explosion, numba_plasma, continuum, continuum_id):
"""
Bound-Free emission - set the frequency from photo-ionization
Parameters
----------
r_packet : tardis.montecarlo.montecarlo_numba.r_packet.RPacket
time_explosion : float
numba_plasma : tardis.montecarlo.montecarlo_numba.numba_interface.NumbaPlasma
continuum : tardis.montecarlo.montecarlo_numba.numba_interface.Continuum
continuum_id : int
"""
inverse_doppler_factor = get_inverse_doppler_factor(
r_packet.r, r_packet.mu, time_explosion
)
comov_nu = continuum.sample_nu_free_bound(
r_packet.current_shell_id,
continuum_id)
r_packet.nu = comov_nu * inverse_doppler_factor
current_line_id = get_current_line_id(
comov_nu,
numba_plasma.line_list_nu
)
r_packet.next_line_id = current_line_id
if montecarlo_configuration.full_relativity:
r_packet.mu = angle_aberration_CMF_to_LF(
r_packet, time_explosion, r_packet.mu
) | 34,983 |
def flippv(pv, n):
"""Flips the meaning of an index partition vector.
Parameters
----------
pv : ndarray
The index partition to flip.
n : integer
The length of the dimension to partition.
Returns
-------
notpv : ndarray
The complement of pv.
Example:
>>> import numpy as np
>>> import locate
>>> pv = np.array([0,3,5])
>>> locate.flippv(pv,8)
array([1, 2, 4, 6, 7])
"""
tf = np.ones(n, dtype=bool)
tf[pv] = False
return tf.nonzero()[0] | 34,984 |
def test_create_api_message_defaults(hass):
"""Create a API message response of a request with defaults."""
request = get_new_request("Alexa.PowerController", "TurnOn", "switch#xy")
directive_header = request["directive"]["header"]
directive = messages.AlexaDirective(request)
msg = directive.response(payload={"test": 3})._response
assert "event" in msg
msg = msg["event"]
assert msg["header"]["messageId"] is not None
assert msg["header"]["messageId"] != directive_header["messageId"]
assert msg["header"]["correlationToken"] == directive_header["correlationToken"]
assert msg["header"]["name"] == "Response"
assert msg["header"]["namespace"] == "Alexa"
assert msg["header"]["payloadVersion"] == "3"
assert "test" in msg["payload"]
assert msg["payload"]["test"] == 3
assert msg["endpoint"] == request["directive"]["endpoint"]
assert msg["endpoint"] is not request["directive"]["endpoint"] | 34,985 |
def PseAAC(fn, pt):
"""
creating PseAAC matirx under user dir
"""
cmd = 'python ' + root_path + 'tool/BioSeq-Analysis2/pse.py ' + fn + '/test.fasta Protein -method PC-PseAAC-General ' + \
'-f csv -out ' + fn + '/' + pt + '_PseAAC.txt -lamada 2 -w 0.3 -labels -1'
os.system(cmd) | 34,986 |
def is_dir(path: Union[str, Path]) -> bool:
"""Check if the given path is a directory
:param path: path to be checked
"""
if isinstance(path, str):
path = Path(path)
if path.exists():
return path.is_dir()
else:
return str(path).endswith("/") | 34,987 |
def log_binlog_upload(instance, binlog):
""" Log to the master that a binlog has been uploaded
Args:
instance - a hostAddr object
binlog - the full path to the binlog file
"""
zk = MysqlZookeeper()
binlog_creation = datetime.datetime.fromtimestamp(os.stat(binlog).st_atime)
replica_set = zk.get_replica_set_from_instance(instance)[0]
master = zk.get_mysql_instance_from_replica_set(replica_set)
conn = connect_mysql(master, 'scriptrw')
cursor = conn.cursor()
sql = ("REPLACE INTO {metadata_db}.{tbl} "
"SET hostname = %(hostname)s, "
" port = %(port)s, "
" binlog = %(binlog)s, "
" binlog_creation = %(binlog_creation)s, "
" uploaded = NOW() ").format(
metadata_db=METADATA_DB, tbl=BINLOG_ARCHIVING_TABLE_NAME)
metadata = {
'hostname': instance.hostname,
'port': str(instance.port),
'binlog': os.path.basename(binlog),
'binlog_creation': binlog_creation
}
cursor.execute(sql, metadata)
conn.commit() | 34,988 |
def work(out_dir: str, in_coord: str, in_imd_path: str, in_topo_path: str, in_perttopo_path: str, in_disres_path: str,
nmpi: int = 1, nomp: int = 1, out_trg: bool = False,
gromos_bin: str = None, work_dir: str = None):
"""
Executed by repex_EDS_long_production_run as worker_scripts
#TODO: This was used to do TIs, it will go in future to the pygromos package
Parameters
----------
out_dir : str
final output dir
in_coord : str
input coordinates
in_imd_path : str
input imd-parameter file
in_topo_path : str
input topology
in_perttopo_path : str
input pertubation
in_disres_path : str
input disres
nmpi : int, optional
number of mpi cores (default: 1)
nomp : int, optional
number of omp cores (default: 1)
out_trg : str, optional
gromos_bin : str, optional
path to gromos binary (default: None)
work_dir : str, optional
work directory (default: None)
Returns
-------
int
0 if code was passed through.
"""
# WORKDIR SetUP
if ((isinstance(work_dir, type(None)) or work_dir == "None") and "TMPDIR" in os.environ):
work_dir = os.environ["TMPDIR"]
print("using TmpDir")
elif (isinstance(work_dir, type(None)) and work_dir == "None"):
print("Could not find TMPDIR!\n Switched to outdir for work")
work_dir = out_dir
print("workDIR: " + work_dir)
if (not os.path.isdir(work_dir)):
bash.make_folder(work_dir)
os.chdir(work_dir)
print("workDIR: " + work_dir)
md = mdGromosXX.GromosXX(bin=gromos_bin)
# RUN
try:
print(spacer + "\n start MD " + str(os.path.basename(imd_path)) + "\n")
out_prefix = os.path.splitext(os.path.basename(imd_path))[0]
md_failed = False
try:
md_run = md.md_mpi_run(in_topo_path=in_topo_path, in_coord_path=in_coord, in_imd_path=in_imd_path,
nmpi=nmpi,
in_pert_topo_path=in_perttopo_path, out_trg=out_trg,
in_disres_path=in_disres_path, out_prefix=out_prefix, nomp=nomp, verbose=True)
except Exception as err:
print("Failed! process returned: \n Err: \n" + "\n".join(err.args))
md_failed = True
if (out_dir != work_dir):
os.system("mv " + work_dir + "/* " + out_dir)
# post simulation cleanup
if not (isinstance(work_dir, type(None)) and work_dir == "None" and "TMPDIR" in os.environ):
bash.remove_folder(work_dir, verbose=True)
# bash.move_file(work_dir + "/*", out_dir)
# bash.remove_file(out_dir + "/slave*.out")
# os.system("rmdir "+work_dir)
except Exception as err:
print("\nFailed during simulations: ", file=sys.stderr)
print(type(err), file=sys.stderr)
print(err.args, file=sys.stderr)
exit(1)
return 0 | 34,989 |
def test_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (extra check)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev,
extra="DPPAuthDirection,Mutual,") | 34,990 |
def _replace_dendro_colours(
colours,
above_threshold_colour="C0",
non_cluster_colour="black",
colorscale=None
):
""" Returns colorscale used for dendrogram tree clusters.
Keyword arguments:
colorscale -- Colors to use for the plot in rgb format.
Should have 8 colours.
"""
from itertools import cycle
if isinstance(colorscale, str):
colorscale = _mpl_cmap_to_str(colorscale)
elif colorscale is None:
colorscale = [
'rgb(0,116,217)', # instead of blue
'rgb(35,205,205)', # cyan
'rgb(61,153,112)', # green
'rgb(40,35,35)', # black
'rgb(133,20,75)', # magenta
'rgb(255,65,54)', # red
'rgb(255,255,255)', # white
'rgb(255,220,0)', # yellow
]
else:
assert isinstance(colorscale, (list, tuple)), \
"colorscale must be a list or tuple of strings"
assert all(isinstance(c, str) for c in colorscale), \
"colorscale must be a list or tuple of strings"
original_colours = set(colours)
original_colours.remove(above_threshold_colour)
colour_map = dict(zip(original_colours, cycle(colorscale)))
colour_map[above_threshold_colour] = non_cluster_colour
return [colour_map[c] for c in colours] | 34,991 |
def document_to_vector(lemmatized_document, uniques):
"""
Converts a lemmatized document to a bow vector
representation.
1/0 for word exists/doesn't exist
"""
#print(uniques)
# tokenize
words = re.findall(r'\w+', lemmatized_document.lower())
# vector = {}
vector = [0]*len(uniques)
# list of the words is accessible via vector.keys()
# list of 0/1 is accessible via vector.values()
# seen = []
for i in range(len(uniques)):
for j in range(len(words)):
if uniques[i] == words[j]:
vector[i] = 1
continue
return vector | 34,992 |
def ubcOcTree(FileName_Mesh, FileName_Model, pdo=None):
"""
Description
-----------
Wrapper to Read UBC GIF OcTree mesh and model file pairs. UBC OcTree models are defined using a 2-file format. The "mesh" file describes how the data is descritized. The "model" file lists the physical property values for all cells in a mesh. A model file is meaningless without an associated mesh file. This only handles OcTree formats
Parameters
----------
`FileName_Mesh` : str
- The OcTree Mesh filename as an absolute path for the input mesh file in UBC OcTree Mesh Format
`FileName_Model` : str
- The model filename as an absolute path for the input model file in UBC OcTree Model Format.
`pdo` : vtk.vtkUnstructuredGrid, optional
- The output data object
Returns
-------
Returns a vtkUnstructuredGrid generated from the UBC 2D/3D Mesh grid. Mesh is defined by the input mesh file. Cell data is defined by the input model file.
"""
# Construct/read the mesh
mesh = ubcOcTreeMesh(FileName_Mesh, pdo=pdo)
# Read the model data
# - read model file for OcTree format
if FileName_Model is not None:
model = ubcModel3D(FileName_Model)
# Place the model data onto the mesh
mesh = placeModelOnOcTreeMesh(mesh, model)
return mesh | 34,993 |
def main(featsFolder, jsonPath, modelPath, maxDur, normalised, cuda):
"""
Adding wav2vec2 features to a given dataset and writting its reference to a json file
Example
----------
python wav2vec2.py -f "xlsr_53_56k_cut30" -d 29.99 -n True -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/xlsr_53_56k.pt"
python wav2vec2.py -f "FlowBERT_2952h_base_cut30_noNorm" -d 29.99 -n False -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/FlowBERT_2952h_base.pt"
python wav2vec2.py -f "FlowBERT_2952h_base_cut30" -d 29.99 -n True -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/FlowBERT_2952h_base.pt"
python wav2vec2.py -f "FlowBERT_2952h_large_cut30" -d 29.99 -n True -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/FlowBERT_2952h_large.pt"
python wav2vec2.py -f "FlowBERT_2952h_large_noNorm_cut30" -d 29.99 -n False -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/FlowBERT_2952h_large.pt"
python wav2vec2.py -f "xlsr_53_56k_cut30" -d 29.98 -n True -j "/mnt/HD-Storage/Datasets/AlloSat/data.json" -m "/mnt/HD-Storage/Models/xlsr_53_56k.pt"
python wav2vec2.py -f "FlowBERT_2952h_base_cut30" -d 29.99 -n False -j /home/getalp/alisamis/Datasets/AlloSat/data.json -m /home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/2952h_base/checkpoint_best.pt
python wav2vec2.py -f "FlowBERT_2952h_base_cut30_noNorm" -d 29.99 -n False -j "/mnt/HD-Storage/Datasets/Recola_46_S/data.json" -m "/mnt/HD-Storage/Models/FlowBERT_2952h_base.pt"
not working: python wav2vec2.py -f "wav2vec2-large-xlsr-53-french" -d 29.99 -n True -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/wav2vec2-large-xlsr-53-french.zip"
python wav2vec2.py -f "mls_french_base_cut30" -d 29.98 -n False -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/mls_french_base/checkpoint_best.pt"
python wav2vec2.py -f "xlsr_53_56k_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/alisamis/Models/xlsr_53_56k.pt"
python wav2vec2.py -f "mls_french_base_cut30" -d 29.98 -n False -j "/home/getalp/alisamis/Datasets/Recola_46/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/mls_french_base/checkpoint_best.pt"
python wav2vec2.py -f "mls_french_base_cut30" -d 29.98 -n False -j "/mnt/HD-Storage/Datasets/Recola_46/data.json" -m "/mnt/HD-Storage/Models/mls_french_base/checkpoint_best.pt"
python wav2vec2.py -f "libri960_big_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/Recola_46/data.json" -m "/home/getalp/dinarelm/work/data/Exchance/wav2vec/models/libri960_big.pt"
python wav2vec2.py -f "libri960_big_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/dinarelm/work/data/Exchance/wav2vec/models/libri960_big.pt"
python wav2vec2.py -f "mls_french_large_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/Recola_46/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/mls_french_large/checkpoint_best.pt" && \
python wav2vec2.py -f "FlowBERT_2952h_base_cut30" -d 29.98 -n False -j "/home/getalp/alisamis/Datasets/Recola_46/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/2952h_base/checkpoint_best.pt" && \
python wav2vec2.py -f "FlowBERT_2952h_large_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/Recola_46/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/2952h_large/checkpoint_best.pt" && \
python wav2vec2.py -f "FlowBERT_2952h_large_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/2952h_large/checkpoint_best.pt" && \
python wav2vec2.py -f "mls_french_large_cut30" -d 29.98 -n True -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/mls_french_large/checkpoint_best.pt"
python wav2vec2.py -f "mls_french_base_cut30" -d 29.98 -n False -j "/home/getalp/alisamis/Datasets/AlloSat/data.json" -m "/home/getalp/nguyen35/flowbert_ssl_resources/wav2vec2.0_models/mls_french_base/checkpoint_best.pt"
"""
# cp = torch.load(modelPath, map_location=torch.device('cpu'))
# model = Wav2VecModel.build_model(cp['args'], task=None)
# model.load_state_dict(cp['model'])
# model.eval()
# cp = torch.load(modelPath, map_location=torch.device('cpu'))
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([modelPath])
model = model[0]
if cuda: model = model.cuda()
model.eval()
samples = loadFromJson(jsonPath)
for i, ID in enumerate(samples.keys()):
sample = samples[ID]
wavePath = sample["path"]
wavsFolder = wavePath.split(os.sep)[0]
waveFullPath = os.path.join(os.path.split(jsonPath)[0], wavePath)
featsLocalPath = wavePath.replace(wavsFolder, featsFolder).replace(".wav", ".csv")
featsLocalPath = os.path.join("Feats", featsLocalPath)
featsFullPath = os.path.join(os.path.split(jsonPath)[0], featsLocalPath)
# print(featsLocalPath, featsFullPath)
dim = makeFeatsCsv(waveFullPath, featsFullPath, model, maxDur, normalised, cuda=cuda)
if dim == 0: continue
featsDict = getFeatsDict(dim, featsFolder, featsLocalPath)
samples[ID]["features"][featsDict["ID"]] = featsDict
# saveToJson(jsonPath, sample)
printProgressBar(i + 1, len(samples), prefix = 'Adding wav2vec features:', suffix = 'Complete', length = "fit")
with open(jsonPath, 'w') as jsonFile:
json.dump(samples, jsonFile, indent=4, ensure_ascii=False) | 34,994 |
def invert_and_write_to_disk(term2doc, results_path, block_num):
"""
Takes as an input a list of term-doc_id pairs, creates an inverted index out of them,
sorts alphabetically by terms to allow merge and saves to a block file.
Each line represents a term and postings list, e.g. abolish 256 1 278 2 295 2
I.e. term doc_id_1 term_freq_1 doc_id_2 term_freq_2 ...
:param term2doc: list of term-doc_id pairs
:param results_path: where to save block files
:param block_num: block number to use for naming a file - 'spimi_block_n.dat', use block_num for 'n'
"""
index = {}
for pair in term2doc:
if index.__contains__(pair[0]):
l = index.get(pair[0])
if l[len(l) - 1][0] == pair[1]:
l[len(l) - 1] = (l[len(l) - 1][0], l[len(l) - 1][1] + 1)
else:
l.append((pair[1], 1))
else:
index[pair[0]] = [(pair[1], 1)]
with open(results_path + 'spimi_block_' + str(block_num) + '.dat', 'w') as f:
keys = list(index.keys())
keys.sort()
for key in keys:
s = key
l = index.get(key)
for item in l:
s += ' ' + str(item[0]) + ' ' + str(item[1])
f.write(s + '\n') | 34,995 |
def load_colormaps():
"""Return the provided colormaps."""
return load_builtin_data('colormaps') | 34,996 |
def discriminator(hr_images, scope, dim):
"""
Discriminator
"""
conv_lrelu = partial(conv, activation_fn=lrelu)
def _combine(x, newdim, name, z=None):
x = conv_lrelu(x, newdim, 1, 1, name)
y = x if z is None else tf.concat([x, z], axis=-1)
return minibatch_stddev_layer(y)
def _conv_downsample(x, dim, ksize, name):
y = conv2d_downscale2d(x, dim, ksize, name=name)
y = lrelu(y)
return y
with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
with tf.compat.v1.variable_scope("res_4x"):
net = _combine(hr_images[1], newdim=dim, name="from_input")
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = conv_lrelu(net, dim, 3, 1, "conv2")
net = conv_lrelu(net, dim, 3, 1, "conv3")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("res_2x"):
net = _combine(hr_images[2], newdim=dim, name="from_input", z=net)
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = conv_lrelu(net, dim, 3, 1, "conv2")
net = conv_lrelu(net, dim, 3, 1, "conv3")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("res_1x"):
net = _combine(hr_images[4], newdim=dim, name="from_input", z=net)
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv")
net = _conv_downsample(net, dim, 3, "conv_down")
with tf.compat.v1.variable_scope("bn"):
dim *= 2
net = conv_lrelu(net, dim, 3, 1, "conv1")
net = _conv_downsample(net, dim, 3, "conv_down1")
net = minibatch_stddev_layer(net)
# dense
dim *= 2
net = conv_lrelu(net, dim, 1, 1, "dense1")
net = conv(net, 1, 1, 1, "dense2")
net = tf.reduce_mean(net, axis=[1, 2])
return net | 34,997 |
def addchallenges(request) :
"""
管理员添加新的题目
"""
if request.user.is_superuser :
if request.method == 'POST' :
success = 0
form = forms.AddChallengeForm(request.POST, request.FILES)
if form.is_valid() :
success = 1
print(request.FILES)
if request.FILES :
i = models.Challenges(file=request.FILES['file'],
name=request.POST['name'],
category=request.POST['category'],
description=request.POST['description'],
points=request.POST['points'],
challenge_id=assignID(request.POST['name']),
flag=request.POST['flag'],
author=request.POST['author'])
i.save()
else :
i = models.Challenges(
name=request.POST['name'],
category=request.POST['category'],
description=request.POST['description'],
points=request.POST['points'],
challenge_id=assignID(request.POST['name']),
flag=request.POST['flag'],
author=request.POST['author'])
i.save()
return render(request, 'addchallenges.html', {'form':form,'success':success})
else :
form = forms.AddChallengeForm()
return render(request, 'addchallenges.html', {'form':form})
else :
return redirect("/") | 34,998 |
def _decompose_ridge(Xtrain, alphas, n_alphas_batch=None, method="svd",
negative_eigenvalues="zeros"):
"""Precompute resolution matrices for ridge predictions.
To compute the prediction::
Ytest_hat = Xtest @ (XTX + alphas * Id)^-1 @ Xtrain^T @ Ytrain
where XTX = Xtrain^T @ Xtrain,
this function precomputes::
matrices = (XTX + alphas * Id)^-1 @ Xtrain^T.
Parameters
----------
Xtrain : array of shape (n_samples_train, n_features)
Concatenated input features.
alphas : float, or array of shape (n_alphas, )
Range of ridge regularization parameter.
n_alphas_batch : int or None
If not None, returns a generator over batches of alphas.
method : str in {"svd"}
Method used to diagonalize the kernel.
negative_eigenvalues : str in {"nan", "error", "zeros"}
If the decomposition leads to negative eigenvalues (wrongly emerging
from float32 errors):
- "error" raises an error.
- "zeros" remplaces them with zeros.
- "nan" returns nans if the regularization does not compensate
twice the smallest negative value, else it ignores the problem.
Returns
-------
matrices : array of shape (n_alphas, n_samples_test, n_samples_train) or \
(n_alphas, n_features, n_samples_train) if test is not None
Precomputed resolution matrices.
alpha_batch : slice
Slice of the batch of alphas.
"""
backend = get_backend()
use_alpha_batch = n_alphas_batch is not None
if n_alphas_batch is None:
n_alphas_batch = len(alphas)
if method == "svd":
# SVD: X = U @ np.diag(eigenvalues) @ Vt
U, eigenvalues, Vt = backend.svd(Xtrain, full_matrices=False)
else:
raise ValueError("Unknown method=%r." % (method, ))
for start in range(0, len(alphas), n_alphas_batch):
batch = slice(start, start + n_alphas_batch)
ev_weighting = eigenvalues / (alphas[batch, None] + eigenvalues ** 2)
# negative eigenvalues can emerge from incorrect kernels,
# or from float32
if eigenvalues[0] < 0:
if negative_eigenvalues == "nan":
ev_weighting[alphas[batch] < -eigenvalues[0] * 2, :] = \
backend.asarray(backend.nan, type=ev_weighting.dtype)
elif negative_eigenvalues == "zeros":
eigenvalues[eigenvalues < 0] = 0
elif negative_eigenvalues == "error":
raise RuntimeError(
"Negative eigenvalues. Make sure the kernel is positive "
"semi-definite, increase the regularization alpha, or use"
"another solver.")
else:
raise ValueError("Unknown negative_eigenvalues=%r." %
(negative_eigenvalues, ))
matrices = backend.matmul(Vt.T, ev_weighting[:, :, None] * U.T)
if use_alpha_batch:
yield matrices, batch
else:
return matrices, batch
del matrices | 34,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.