content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def cdfRosconi(cdfThickness=np.linspace(0,1,1000),
alpha=1.71e11, beta=8.17, gamma=55.54):
"""
TODO: Not Yet Implemented
* Input to this function has units of mm for default parameters.
** Default values of alpha, beta and gamma derived from:
Rosconi et al. Quantitative approach to the stochastics of bone remodeling. 2012.
thickness - range of thicknesses (in mm) considered in the pdf calculation.
"""
# Thickness distribution (using default parameters, thickness is in mm)
def distributionRosconi(t, alpha=alpha, beta=beta, gamma=gamma):
return alpha*(t**beta)*np.exp(-gamma*t)
pdf = distributionRosconi(cdfThickness, alpha, beta, gamma)
pdf = pdf / np.sum(pdf)
cdf = np.cumsum(pdf)
return cdfThickness, cdf
| 13,300
|
def compute_error_model(model_metadata, X_test, y_test, target,error_metric):
"""Computes the model MRR based on test data
:param model_metadata: a dictionary containing metadata about a model
:param X_test: a dataframe containing features specfic to the model being evaluated
:param y_test: a dataframe of target labels
:param target: the column which contains the actual labels for training data
:param error_metric: error metric to evalualte model performance on (MAE, RMSE, etc.)
:return: the computed error
"""
model_pipeline = get_prediction_pipeline(model_metadata)
pred_prices = model_pipeline.predict(X_test)
error = compute_error(y_test, pred_prices, error_metric)
return error
| 13,301
|
def column(df, s, column) -> ReturnType:
"""Gets the series of the column named `column`
"""
return df.loc[s, column].to_numpy(), 0
| 13,302
|
def directory_hash(root_path, verbose, hash_format, ignore_list, ignore_spec_file):
"""
[TMP] Creates the directory hash of a given folder
"""
if not os.path.isabs(root_path):
root_path = os.path.join(os.getcwd(), root_path)
# store the directory hashes of sub folders so we can use it when calculating the hash of the parent folder
dir_hash_mappings = {}
ignore_spec = ignore.MHLIgnoreSpec(None, ignore_list, ignore_spec_file)
for folder_path, children in post_order_lexicographic(root_path, ignore_spec.get_path_spec()):
dir_hash_context = DirectoryHashContext(hash_format)
for item_name, is_dir in children:
item_path = os.path.join(folder_path, item_name)
if is_dir:
if not dir_hash_context:
continue
hash_string = dir_hash_mappings.pop(item_path)
else:
hash_string = create_filehash(hash_format, item_path)
dir_hash_context.append_hash(hash_string, item_name)
dir_hash = dir_hash_context.final_hash_str()
dir_hash_mappings[folder_path] = dir_hash
if folder_path == root_path:
logger.info(f" calculated root hash: {hash_format}: {dir_hash}")
elif verbose:
logger.info(f"directory hash for: {folder_path} {hash_format}: {dir_hash}")
| 13,303
|
def bubble_up(heap: List[Node], index: int) -> None:
"""Fixes the binary heap after an insertion
When an insertion happens, the binary heap may be violated, with a smaller
value at the leaves. Bubble up will make the smaller value "surface" on the
heap and preserve the heap invariant.
Parameters
----------
heap : List[Node]
A binary min heap in the form of a list of Nodes
index : int
The index to bubble up from, mostly the index of the insertion
"""
parent = get_parent(index)
if index != parent and heap[index].p < heap[parent].p:
heap[index], heap[parent] = heap[parent], heap[index]
heap[index].index, heap[parent].index = index, parent
bubble_up(heap, parent)
| 13,304
|
def main():
"""Main entry point."""
run.Run(ZeroR)
| 13,305
|
def features_to_id(features, intervals):
"""Convert list of features into index using spacings provided in intervals"""
id = 0
for k in range(len(intervals)):
id += features[k] * intervals[k]
# Allow 0 index to correspond to null molecule 1
id = id + 1
return id
| 13,306
|
def index_initial(n_batch, n_ch, tensor=True):
"""Tensor batch and channel index initialization.
Args:
n_batch (Int): Number of batch.
n_ch (Int): Number of channel.
tensor (bool): Return tensor or numpy array
Returns:
Tensor: Batch index
Tensor: Channel index
"""
batch_index = []
for i in range(n_batch):
batch_index.append([[i]] * n_ch)
ch_index = []
for i in range(n_ch):
ch_index += [[i]]
ch_index = [ch_index] * n_batch
if tensor:
batch_index = torch.tensor(batch_index)
ch_index = torch.tensor(ch_index)
if torch.cuda.is_available():
batch_index = batch_index.cuda()
ch_index = ch_index.cuda()
return batch_index, ch_index
| 13,307
|
def which(binary_name, pathvar=None):
""" Deduces the path corresponding to an executable name,
as per the UNIX command `which`. Optionally takes an
override for the $PATH environment variable.
Always returns a string - an empty one for those
executables that cannot be found.
"""
from distutils.spawn import find_executable
if not hasattr(which, 'pathvar'):
which.pathvar = os.getenv("PATH", DEFAULT_PATH)
return find_executable(binary_name, pathvar or which.pathvar) or ""
| 13,308
|
def cpc_autostart_group():
"""
Command group for managing the auto-start list of a CPC (in DPM mode).
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
| 13,309
|
def load_titanic(test_size=0.2, random_state=1, cache_dir=None, cache_subdir='datasets'):
""" load titanic database """
path = find_path(DatasetEnum.titanic, cache_dir=cache_dir, cache_subdir=cache_subdir)
df = pd.read_csv(path, sep=",", na_values=["?"], keep_default_na=True)
# Shuffle DF and compute train/test split
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
idx = int(len(df) * (1 - test_size))
df_train = df.loc[:idx]
df_test = df.loc[idx:]
# Filter columns and build X, y
y_train = df_train["survived"].values
del df_train["survived"]
y_test = df_test["survived"].values
del df_test["survived"]
infos = {}
return df_train, y_train, df_test, y_test, infos
| 13,310
|
def mock_function_fail(*args, **kwargs):
"""
Mock a function that 'fails', i.e., returns a 1.
"""
print("\nmock> f({}) ==> 1".format(args)) # pragma: no cover
return 1 # pragma: no cover
| 13,311
|
def get_field_result(client_id, field_id, count=1):
"""
на входе: id-поля, id-карты,
выход: последний результат поля
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT directions_napravleniya.client_id, directions_issledovaniya.napravleniye_id,
directions_issledovaniya.research_id, directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s as time_confirmation,
to_char(directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s, 'DD.MM.YYYY') as date_confirm,
directions_paraclinicresult.value, directions_paraclinicresult.field_id
FROM directions_issledovaniya
LEFT JOIN directions_napravleniya
ON directions_issledovaniya.napravleniye_id=directions_napravleniya.id
LEFT JOIN directions_paraclinicresult
ON directions_issledovaniya.id=directions_paraclinicresult.issledovaniye_id
WHERE directions_napravleniya.client_id = %(client_p)s
and directions_paraclinicresult.field_id = %(field_id)s
and directions_issledovaniya.time_confirmation is not NULL
ORDER BY directions_issledovaniya.time_confirmation DESC LIMIT %(count_p)s
""",
params={'client_p': client_id, 'field_id': field_id, 'count_p': count, 'tz': TIME_ZONE},
)
row = cursor.fetchall()
return row
| 13,312
|
def write_pipeline_files(
output_dir: str,
labels_filename: str,
config_info_list: List[ConfigFileInfo],
inference_params: Dict[str, Any],
items_for_inference: ItemsForInference,
):
"""Writes the config files and scripts for manually running pipeline."""
# Use absolute path for all files that aren't contained in the output dir.
labels_filename = os.path.abspath(labels_filename)
# Preserve current working directory and change working directory to the
# output directory, so we can set local paths relative to that.
old_cwd = os.getcwd()
os.chdir(output_dir)
new_cfg_filenames = []
train_script = "#!/bin/bash\n"
# Add head type to save path suffix to prevent overwriting.
for cfg_info in config_info_list:
if not cfg_info.dont_retrain:
if (
cfg_info.config.outputs.run_name_suffix is not None
and len(cfg_info.config.outputs.run_name_suffix) > 0
):
# Keep existing suffix if defined.
suffix = "." + cfg_info.config.outputs.run_name_suffix
else:
suffix = ""
# Add head name.
suffix = "." + cfg_info.head_name + suffix
# Update config.
cfg_info.config.outputs.run_name_suffix = suffix
for cfg_info in config_info_list:
if cfg_info.dont_retrain:
# Use full absolute path to already training model
trained_path = os.path.normpath(os.path.join(old_cwd, cfg_info.path))
new_cfg_filenames.append(trained_path)
else:
# We're training this model, so save config file...
# First we want to set the run folder so that we know where to find
# the model after it's trained.
# We'll use local path to the output directory (cwd).
# Note that setup_new_run_folder does things relative to cwd which
# is the main reason we're setting it to the output directory rather
# than just using normpath.
cfg_info.config.outputs.runs_folder = ""
training.setup_new_run_folder(cfg_info.config.outputs)
# Now we set the filename for the training config file
new_cfg_filename = f"{cfg_info.head_name}.json"
# Save the config file
cfg_info.config.save_json(new_cfg_filename)
# Keep track of the path where we'll find the trained model
new_cfg_filenames.append(cfg_info.config.outputs.run_path)
# Add a line to the script for training this model
train_script += f"sleap-train {new_cfg_filename} {labels_filename}\n"
# Write the script to train the models which need to be trained
with open(os.path.join(output_dir, "train-script.sh"), "w") as f:
f.write(train_script)
# Build the script for running inference
inference_script = "#!/bin/bash\n"
# Object with settings for inference
inference_task = InferenceTask(
labels_filename=labels_filename,
trained_job_paths=new_cfg_filenames,
inference_params=inference_params,
)
for item_for_inference in items_for_inference.items:
# We want to save predictions in output dir so use local path
prediction_output_path = (
f"{os.path.basename(item_for_inference.path)}.predictions.slp"
)
# Use absolute path to video
item_for_inference.use_absolute_path = True
# Get list of cli args
cli_args, _ = inference_task.make_predict_cli_call(
item_for_inference=item_for_inference, output_path=prediction_output_path,
)
# And join them into a single call to inference
inference_script += " ".join(cli_args) + "\n"
# And write it
with open(os.path.join(output_dir, "inference-script.sh"), "w") as f:
f.write(inference_script)
# Restore the working directory
os.chdir(old_cwd)
| 13,313
|
def parse_basic_profile_forms():
"""Parses and validates basic profile forms in the request.
Returns:
A dictionary containing user profile.
Raises:
ValueError: When validation failed.
"""
return {
'display_name': get_form_string('display_name', 32),
'contact_email': get_form_string('contact_email', 256),
'member_names': get_form_string('member_names', 4096),
'nationalities': get_form_string('nationalities', 1024, allow_empty=True),
'languages': get_form_string('languages', 1024, allow_empty=True),
'source_url': get_form_string('source_url', 2083, allow_empty=True),
}
| 13,314
|
def test_lines():
"""Test that the correct line numbers are given in a chrome.manifest."""
c = chrome_manifest("""
zero foo bar
one bar foo
two abc def
#comment
four def abc
""".strip())
assert next(c.get_entries('zero'))['line'] == 1
assert next(c.get_entries('one'))['line'] == 2
assert next(c.get_entries('two'))['line'] == 3
assert next(c.get_entries('four'))['line'] == 5
| 13,315
|
def main():
"""Main entrance for training"""
args = parser.parse_args()
print(sys.argv)
context.set_context(mode=context.GRAPH_MODE)
if args.GPU:
context.set_context(device_target='GPU')
# parse model argument
assert args.model.startswith(
"tinynet"), "Only Tinynet models are supported."
_, sub_name = args.model.split("_")
net = tinynet(sub_model=sub_name,
num_classes=args.num_classes,
drop_rate=0.0,
drop_connect_rate=0.0,
global_pool="avg",
bn_tf=False,
bn_momentum=None,
bn_eps=None)
print("Total number of parameters:", count_params(net))
input_size = net.default_cfg['input_size'][1]
val_data_url = os.path.join(args.data_path, 'val')
val_dataset = create_dataset_val(args.batch_size,
val_data_url,
workers=args.workers,
distributed=False,
input_size=input_size)
loss = LabelSmoothingCrossEntropy(smooth_factor=args.smoothing,
num_classes=args.num_classes)
loss.add_flags_recursive(fp32=True, fp16=False)
eval_metrics = {'Validation-Loss': Loss(),
'Top1-Acc': Top1CategoricalAccuracy(),
'Top5-Acc': Top5CategoricalAccuracy()}
ckpt = load_checkpoint(args.ckpt)
load_param_into_net(net, ckpt)
net.set_train(False)
model = Model(net, loss, metrics=eval_metrics)
metrics = model.eval(val_dataset, dataset_sink_mode=False)
print(metrics)
| 13,316
|
def fixture_success(request):
"""
Test Cases:
1. Hitting uncovered route as base user (logged in flow). Will return 200
since uncovered route is an open endpoint and thus Anonymous users can also
access it.
2. Hitting uncovered route as base user and HEAD request
3. Hitting uncovered route as admin user and HEAD request
4. Hitting uncovered route as super admin user and GET request
5. Hitting uncovered route as super admin user and HEAD request
6. Hitting uncovered route as anonymous user and GET request
7. Hitting uncovered route as anonymous user and HEAD request
8. Hitting covered route as admin user and GET request
9. Hitting covered route as admin user and HEAD request
10. Hitting covered route as super admin user and POST request
11. Hitting covered route as super admin user and GET request
12. Hitting covered route as super admin user and HEAD request
"""
db.create_all()
base_user, admin_user, super_admin_user = config_data_setup()
data_to_send = [
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': base_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': base_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': super_admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': super_admin_user,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/uncovered_route',
'user': None,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/uncovered_route',
'user': None,
'function': app.view_functions['uncovered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/covered_route',
'user': admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/covered_route',
'user': admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'POST',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'GET',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
},
{
'input': {
'method': 'HEAD',
'url_rule': '/covered_route',
'user': super_admin_user,
'function': app.view_functions['covered_route']
},
'output': {
'status_code': 200
}
}
]
request.addfinalizer(tear_down)
return app, data_to_send
| 13,317
|
def get_quote_name(quote_number: int) -> Optional[str]:
""" used to help applications look up quote names based on the number
users.
"""
assert type(quote_number) in (int, type(None))
if quote_number is None:
return None
for key, value in csv.__dict__.items():
if value == quote_number:
return key
else:
raise ValueError('invalid quote_number: {}'.format(quote_number))
| 13,318
|
def calc_floodzone(row):
"""Extracts the FEMAZONE of an SFHA based on each row's attributes.
This function acts on individual rows of a pandas DataFrame using
the apply built-in.
Parameters
----------
row : Pandas Series
A row of a pandas DataFrame
Returns
-------
str
The flood zone designation for an SFHA
"""
if row["FLD_ZONE"] == 'AO':
zone = 'AO' + str(round(row['DEPTH']))
elif row["FLD_ZONE"] == 'AH':
zone = 'AH' + str(round(row["STATIC_BFE"]))
else:
zone = row["FLD_ZONE"]
return zone
| 13,319
|
def trans_you(ori_image, img_db, target_size=(8, 8)):
"""Transfer original image to composition of images.
Parameters
----------
ori_image : numpy.ndarray
the original image
img_db : h5py.File
image datasets
target_size : tuple
Returns
-------
res_img : numpy.ndarray
result image
"""
tot_pixels = ori_image.shape[0]*ori_image.shape[1]
image_idx = img_idx(tot_pixels)
res_img = np.zeros_like(ori_image)
res_img = imresize(res_img,
(res_img.shape[0]*target_size[0],
res_img.shape[1]*target_size[1]))
for i in xrange(ori_image.shape[0]):
for j in xrange(ori_image.shape[1]):
idx = image_idx[i*ori_image.shape[1]+j]
img = get_img(img_db, idx)
pixel = ori_image[i, j, :]
img = trans_img(img, pixel, target_size)
res_img[i*target_size[0]:(i+1)*target_size[0],
j*target_size[1]:(j+1)*target_size[1]] = img
print ("[MESSAGE] Row %i is processed." % (i+1))
return res_img
| 13,320
|
def new_assessment():
"""
RESTful CRUD controller to create a new 'complete' survey
- although the created form is a fully custom one
"""
# Load Model
table = s3db.survey_complete
s3db.table("survey_series")
def prep(r):
if r.interactive:
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id is None:
# The URL is bad, without a series id we're lost so list all series
redirect(URL(c="survey", f="series", args=[], vars={}))
if len(request.post_vars) > 0:
id = s3db.survey_save_answers_for_series(series_id,
None, # Insert
request.post_vars)
response.confirmation = \
s3.crud_strings["survey_complete"].msg_record_created
r.method = "create"
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Not sure why we need to repeat this & can't do it outside the prep/postp
viewing = get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = get_vars.get("series", None)
if not series_id:
series_id = r.id
if output["form"] is None:
# The user is not authorised to create so switch to read
redirect(URL(c="survey", f="series",
args=[series_id, "read"],
vars={}))
# This is a bespoke form which confuses CRUD, which displays an
# error "Invalid form (re-opened in another window?)"
# So so long as we don't have an error in the form we can
# delete this error.
elif response.error and not output["form"]["error"]:
response.error = None
s3db.survey_answerlist_dataTable_post(r)
form = s3db.survey_buildQuestionnaireFromSeries(series_id, None)
urlimport = URL(c=module, f="complete", args=["import"],
vars={"viewing":"%s.%s" % ("survey_series", series_id),
"single_pass":True}
)
buttons = DIV(A(T("Upload Completed Assessment Form"),
_href=urlimport,
_id="Excel-import",
_class="action-btn"
),
)
output["form"] = TAG[""](buttons, form)
return output
s3.postp = postp
return crud_controller(module, "complete",
method = "create",
rheader = s3db.survey_series_rheader
)
| 13,321
|
async def random_pokemon(connection: asyncpg.Connection, /) -> types.Pokemon:
"""Returns a random :class:`types.Pokemon`."""
records = await tables.Pokemon.fetch(connection)
return await _pokemon(connection, random.choice(records))
| 13,322
|
def remove_space(text):
"""
Funcion que elimina espacios
:param str text: texto a procesar
"""
return re.sub(r"\s+", " ", text).strip()
| 13,323
|
def getFilePathBase():
"""
获取请求url文件的文件路径
:return: php->base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
header("Content-Type:application/json");
$res = array();$res["path"] = dirname(__FILE__);
echo ("<ek>");
echo json_encode($res);
echo ("</ek>");
die();
"""
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
| 13,324
|
def extractHeldSimple(q, factoryConfig=None):
"""All Held Glideins: JobStatus == 5
q: dictionary of Glideins from condor_q
factoryConfig (FactoryConfig): Factory configuartion (NOT USED, for interface)
Returns:
dict: dictionary of Held Glideins from condor_q
"""
# Held==5
qheld = q.fetchStored(lambda el: el["JobStatus"] == 5)
qheld_list = list(qheld.keys())
return qheld_list
| 13,325
|
def neighbors(stats1, stats2, max_val=1e5):
"""stats from cv.connectedComponentsWithStats."""
pts1 = np.concatenate(
(stats1[:, :2], stats1[:, :2] + stats1[:, 2:4]), axis=0)
pts2 = np.concatenate(
(stats2[:, :2], stats2[:, :2] + stats2[:, 2:4]), axis=0)
dist = np.abs(pts1[:, None] - pts2).sum(axis=2)
eye = np.eye(dist.shape[0], dtype=dist.dtype)
R = (dist + eye * max_val).argmin(axis=1)
return R.reshape((2, -1)).T
| 13,326
|
def advect(gridc, gridx, gridy, scalars, ibmf, ibmx, ibmy, velc, options):
"""
"""
nx, ny = gridc.nx, gridc.ny
dx, dy = gridc.dx, gridc.dy
dt = scalars.dt
lset_iter = options['lset_redistance']
extrap_iter = options['extrap_solid']
u = gridx[velc][0,0,:,:].transpose()
v = gridy[velc][0,0,:,:].transpose()
phi = gridc[ibmf][0,0,:,:].transpose()
lmx = gridc[ibmx][0,0,:,:].transpose()
lmy = gridc[ibmy][0,0,:,:].transpose()
adfx = numpy.zeros_like(phi)
adfy = numpy.zeros_like(phi)
ddsn = numpy.zeros_like(phi)
#--------------Advect dynamic X-Y grid---------------
_interface.advect_dynamic_grid(phi,lmx,u,v,dt,dx,dy,nx+2,ny+2)
_interface.advect_dynamic_grid(phi,lmy,u,v,dt,dx,dy,nx+2,ny+2)
#---------Find normal vectors---------------------
_interface.normal_vector_solid(phi,adfx,adfy,dx,dy,nx+2,ny+2)
#---------Extrapolate X grid----------------------
_interface.directional_derivative(phi,lmx,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
for _iter in range(extrap_iter):
_interface.constant_extrapolation(phi,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
for _iter in range(extrap_iter):
_interface.linear_extrapolation(phi,lmx,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
gridc.fill_guard_cells(ibmx)
#---------Extrapolate Y grid----------------------
_interface.directional_derivative(phi,lmy,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
for _iter in range(extrap_iter):
_interface.constant_extrapolation(phi,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
for _iter in range(extrap_iter):
_interface.linear_extrapolation(phi,lmy,ddsn,adfx,adfy,dx,dy,nx+2,ny+2)
gridc.fill_guard_cells(ibmy)
#--------Advect solid interface-------------------
_interface.advect_solid(phi,u,v,dt,dx,dy,nx+2,ny+2)
gridc.fill_guard_cells(ibmf)
#--------Redistance solid interface---------------
phi_old = numpy.copy(phi)
lsDT = numpy.sqrt(dx**2 + dy**2)/2.
for _iter in range(lset_iter):
_interface.redistance_solid(phi,phi_old,lsDT,dx,dy,nx+2,ny+2)
gridc.fill_guard_cells(ibmf)
return
| 13,327
|
def test_destroy_as_user(user, short_link):
"""An logged in user should not be able to delete a short link"""
client = get_api_client(user=user)
url = _get_short_link_url(short_link)
response = client.delete(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
| 13,328
|
def shortest_substring_containing_characters(text: str, char_set: set) -> Optional[str]:
"""
O(n) & O(k)
"""
start = 0
end = -1
count_char = defaultdict(int) # char and its count
found_set = set()
for index, char in enumerate(text):
if char in char_set:
count_char[char] += 1
found_set.add(char)
if len(found_set) == len(char_set):
new_start = start
new_end = index
while text[new_start] not in char_set or count_char[text[new_start]] > 1:
if text[new_start] in count_char:
count_char[text[new_start]] -= 1
new_start += 1
if end < start or (new_end - new_start) < (end - start):
end = new_end
start = new_start
return text[start: end + 1] if end > start else None
| 13,329
|
def refine_grid(
grid,
cb,
grid_additions=(50, 50),
ntrail=2,
blurs=((), ()),
metric=None,
atol=None,
rtol=None,
extremum_refinement=None,
snr=False,
):
"""Refines an existing grid by adding points to it.
Parameters
----------
grid : array
cb : callbable
Function to be evaluated (note that noise is handled poorly).
grid_additions : iterable of ints (even numbers)
Sequence specifying how many gridpoints to add each time.
ntrail : int (>= 2)
Number of points to include in the look-ahead extrapolation.
blurs : pair of iterables of ints (of same length)
Blur fractions of absolute residuals to neighbors.
atol : float
Absolute tolerance to be fulfilled by all absolute residuals for early exit.
rtol : float
Relative tolerance to be fulfilled by all absolute residuals for early exit.
extremum_refinement : locator (callable), n (int), predicate (callable)
Between each grid addition a callable for locating the extremum (e.g. np.argmax)
can be evaluated. The integer specifies how many gridpoints that should be inserted
on each side (one side if on boundary) of the extremum.
snr : bool
Use signal-to-noise ratio the lower the grid-addition-weight of potential noise.
Returns
-------
(grid, errors)
"""
for na in grid_additions:
if (na % 2) != 0:
raise ValueError("Need even number of grid points for each addition")
if extremum_refinement == "max":
extremum_refinement = (np.argmax, 1, lambda y, i: True)
elif extremum_refinement == "min":
extremum_refinement = (np.argmin, 1, lambda y, i: True)
def add_to(adds, grd, res, ys):
na = np.sum(adds)
if na == 0:
return grd, res, ys
nextresults = np.empty(grd.size + na, dtype=object)
nextresults[0] = res[0]
nexty = np.empty(grd.size + na)
nexty[0] = ys[0]
nextgrid = np.empty(grd.size + na)
nextgrid[0] = grd[0]
ptr = 1
yslices = []
for gi, nloc in enumerate(adds):
nextgrid[ptr : ptr + nloc + 1] = np.linspace(
grd[gi], grd[gi + 1], 2 + nloc
)[1:]
nextresults[ptr + nloc] = res[gi + 1]
nexty[ptr + nloc] = ys[gi + 1]
if nloc > 0:
yslices.append(slice(ptr, ptr + nloc))
ptr += nloc + 1
newresults = cb(np.concatenate([nextgrid[yslc] for yslc in yslices]))
newy = (
newresults if metric is None else np.array([metric(r) for r in newresults])
)
ystart, ystop = 0, 0
for yslc in yslices:
ystop += yslc.stop - yslc.start
nextresults[yslc] = newresults[ystart:ystop]
nexty[yslc] = newy[ystart:ystop]
ystart = ystop
return nextgrid, nextresults, nexty
results = cb(grid)
y = np.array(
results if metric is None else [metric(r) for r in results], dtype=np.float64
)
for na in grid_additions:
if extremum_refinement:
extremum_cb, extremum_n, predicate_cb = extremum_refinement
argext = extremum_cb(y)
if predicate_cb(y, argext):
additions = np.zeros(grid.size - 1, dtype=int)
if argext > 0: # left of
additions[argext - 1] = extremum_n
elif argext < grid.size - 1: # right of
additions[argext] = extremum_n
grid, results, y = add_to(additions, grid, results, y)
additions = np.zeros(grid.size - 1, dtype=int)
done = True if atol is not None or rtol is not None else False
slcs, errs = [], []
for direction in ("fw", "bw"):
est, slc = interpolate_ahead(grid, y, ntrail, direction)
err = np.abs(y[slc] - est)
if atol is not None:
done = done and np.all(err < atol)
if rtol is not None:
done = done and np.all(err / y[slc] < rtol)
slcs.append(slc)
errs.append(err)
if snr:
all_errs = np.array(
[[0.0] * ntrail + errs[0].tolist(), errs[1].tolist() + [0.0] * ntrail]
)
min__max = np.amin(all_errs, axis=0) / np.amax(all_errs, axis=0)
dgrid = np.diff(grid)
delta = np.empty_like(grid)
delta[0] = dgrid[0] ** -2
delta[-1] = dgrid[-1] ** -2
delta[1:-1] = 1 / (dgrid[:-1] * dgrid[1:])
lndelta = np.log(delta)
normlndelta = lndelta - np.max(lndelta)
for i in range(2):
errs[i] *= (1.0 + 1e-8) - min__max[slcs[i]]
errs[i] *= np.exp(normlndelta[slcs[i]])
for direction, blur, slc, err in zip(("fw", "bw"), blurs, slcs, errs):
for ib, b in enumerate(blur, 1):
blur_slices = (slice(ib, None), slice(None, -ib))
err[blur_slices[direction == "bw"]] += (
b * err[blur_slices[direction == "fw"]]
)
rerr = np.array(np.round(err * na / 2 / np.sum(err)), dtype=int)
delta = np.sum(rerr) - na // 2
if delta == 0:
pass
else:
sorted_indices = np.argsort(rerr)
for i in sorted_indices[-abs(delta) :]:
rerr[i] += 1 if delta < 0 else -1
if np.sum(rerr) - na // 2:
raise ValueError("Balancing failed.")
additions[
slice(ntrail - 1, None)
if direction == "fw"
else slice(None, 1 - ntrail)
] += rerr
grid, results, y = add_to(additions, grid, results, y)
if done:
break
return grid, results
| 13,330
|
def last_week(today: datetime=None, tz=None):
"""
Returns last week begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = today - timedelta(weeks=1, days=today.weekday())
begin = datetime(year=begin.year, month=begin.month, day=begin.day)
return localize_time_range(begin, begin + timedelta(days=7), tz)
| 13,331
|
def get_velocity_limits():
"""
"""
velocity_limits = {}
for i in range(6):
try:
velocity_limits['a{}'.format(i+1)] = float(pm.textField(
't_A{}vel'.format(i+1),
q=True,
text=True))
except ValueError:
pm.error('Robot velocity limits must be floats')
return velocity_limits
| 13,332
|
def adjust_learning_rate(learning_rate,optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate * (0.1 ** (epoch // 25))
print(str(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 13,333
|
def when_click_nth_element(context, position, ordinal, words, element_name):
""" Tells the browser to click on the nth element within the element of the given identifier.
::
When the user clicks on the 2nd Entry in the "Table"
"""
logger.info(f'Clicking on {position}{ordinal} "{words}" of the element: "{element_name}".')
try:
page_element = context.page[element_name]
except context.page.ElementNotFound:
raise AssertionError(f'The {element_name} was not found on the page')
try:
nth_element = page_element.find_by_xpath(f'.//{words}')[position-1]
except (IndexError, splinter.exceptions.ElementDoesNotExist):
raise AssertionError(f'The {element_name} does not have a {position}{ordinal} {words}')
try:
nth_element.click()
except (AttributeError, selenium.common.exceptions.ElementNotVisibleException):
raise AssertionError(
f'The {element_name} does not have a {position}{ordinal} {words} that is clickable'
)
| 13,334
|
def decode_item_length(encoded_data: Bytes) -> int:
"""
Find the length of the rlp encoding for the first object in the
encoded sequence.
Here `encoded_data` refers to concatenation of rlp encoding for each
item in a sequence.
NOTE - This is a helper function not described in the spec. It was
introduced as the spec doesn't discuss about decoding the RLP encoded
data.
Parameters
----------
encoded_data :
RLP encoded data for a sequence of objects.
Returns
-------
rlp_length : `int`
"""
# Can't decode item length for empty encoding
ensure(len(encoded_data) > 0)
first_rlp_byte = Uint(encoded_data[0])
# This is the length of the big endian representation of the length of
# rlp encoded object byte stream.
length_length = Uint(0)
decoded_data_length = 0
# This occurs only when the raw_data is a single byte whose value < 128
if first_rlp_byte < 0x80:
# We return 1 here, as the end formula
# 1 + length_length + decoded_data_length would be invalid for
# this case.
return 1
# This occurs only when the raw_data is a byte stream with length < 56
# and doesn't fall into the above cases
elif first_rlp_byte <= 0xB7:
decoded_data_length = first_rlp_byte - 0x80
# This occurs only when the raw_data is a byte stream and doesn't fall
# into the above cases
elif first_rlp_byte <= 0xBF:
length_length = first_rlp_byte - 0xB7
ensure(length_length < len(encoded_data))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_data[1] != 0)
decoded_data_length = Uint.from_be_bytes(
encoded_data[1 : 1 + length_length]
)
# This occurs only when the raw_data is a sequence of objects with
# length(concatenation of encoding of each object) < 56
elif first_rlp_byte <= 0xF7:
decoded_data_length = first_rlp_byte - 0xC0
# This occurs only when the raw_data is a sequence of objects and
# doesn't fall into the above cases.
elif first_rlp_byte <= 0xFF:
length_length = first_rlp_byte - 0xF7
ensure(length_length < len(encoded_data))
# Expectation is that the big endian bytes shouldn't start with 0
# while trying to decode using RLP, in which case is an error.
ensure(encoded_data[1] != 0)
decoded_data_length = Uint.from_be_bytes(
encoded_data[1 : 1 + length_length]
)
return 1 + length_length + decoded_data_length
| 13,335
|
def pxor(a1, a2, fmt=None):
"""Bitwise XOR"""
return c2repr(_inconv(a1) ^ _inconv(a2), fmt)
| 13,336
|
def ensure_dir_exists(path):
"""Ensure that the directory tree to the path exists."""
split_path = os.path.splitext(path)
if split_path[0] != path: # path is file
make_path = os.path.dirname(split_path[0])
else: # path is dir
make_path = split_path[0]
os.makedirs(make_path, exist_ok = True)
| 13,337
|
def milestone_2_test_1_initial_val(lattice_grid_shape: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""
Return initial conditions
Args:
lattice_grid_shape: lattice grid [lx, ly]
Returns:
density with 0.5, but one peak in the middle, velocities 0
"""
density = np.ones(lattice_grid_shape) * 0.5
density[lattice_grid_shape[0] / 2, lattice_grid_shape[1] / 2] = 0.6
velocity = np.ones(lattice_grid_shape) * 0.0
return density, velocity
| 13,338
|
def test_element_html_call_get_attribute(monkeypatch, browser_driver):
"""Calls el_or_xpath WebElement attr get_attribute"""
called = []
class FakeWebElement:
def get_attribute(self, val):
called.append(('get_attribute', val))
return 42
@browser_driver.register
class FakeDriver:
pass
# This is needed to pass type checks in element_html()
monkeypatch.setattr(core, 'WebElement', FakeWebElement)
b = Browser(FakeDriver())
fake_el = FakeWebElement()
retval = b.element_html(fake_el, core.HTMLProperty.outer)
assert retval == 42
assert called == [
('get_attribute', core.HTMLProperty.outer.value)
]
| 13,339
|
def moving_dictators(session, system_ids):
"""
Show newly controlling dictators in the last 5 days.
Show all controlling dictators in monitored systems.
Subqueries galore, you've been warned.
Returns: A list of messages to send.
"""
gov_dic = session.query(Government.id).\
filter(Government.text.in_(["Anarchy", "Dictatorship"])).\
scalar_subquery()
control_state_id = session.query(PowerState.id).\
filter(PowerState.text == "Control").\
scalar_subquery()
current = sqla_orm.aliased(FactionState)
pending = sqla_orm.aliased(FactionState)
sys = sqla_orm.aliased(System)
sys_control = sqla_orm.aliased(System)
dics = session.query(Influence, sys.name, Faction.name, Government.text,
current.text, pending.text,
sqla.func.ifnull(sys_control.name, 'N/A').label('control')).\
join(sys, Influence.system_id == sys.id).\
join(Faction, Influence.faction_id == Faction.id).\
join(Government, Faction.government_id == Government.id).\
join(current, Influence.state_id == current.id).\
join(pending, Influence.pending_state_id == pending.id).\
outerjoin(
sys_control, sqla.and_(
sys_control.power_state_id == control_state_id,
sys_control.dist_to(sys) < 15
)
).\
filter(Influence.system_id.in_(system_ids),
Government.id.in_(gov_dic)).\
order_by('control', sys.name).\
all()
look_for = [sqla.and_(InfluenceHistory.system_id == inf[0].system_id,
InfluenceHistory.faction_id == inf[0].faction_id)
for inf in dics]
time_window = time.time() - (60 * 60 * 24 * 2)
inf_history = session.query(InfluenceHistory).\
filter(sqla.or_(*look_for)).\
filter(InfluenceHistory.updated_at >= time_window).\
order_by(InfluenceHistory.system_id, InfluenceHistory.faction_id,
InfluenceHistory.updated_at.desc()).\
all()
pair_hist = {}
for hist in inf_history:
key = "{}_{}".format(hist.system_id, hist.faction_id)
pair_hist[key] = hist
lines = [["Control", "System", "Faction", "Gov", "Date",
"Inf", "Inf (2 days ago)", "State", "Pending State"]]
for dic in dics:
key = "{}_{}".format(dic[0].system_id, dic[0].faction_id)
try:
lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3],
dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)),
"{:5.2f}".format(round(pair_hist[key].influence, 2)), dic[-3], dic[-2]]]
except KeyError:
lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3],
dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)), "N/A",
dic[-3], dic[-2]]]
prefix = "**\n\nInf Movement Anarchies/Dictators**)\n"
prefix += "N/A: Means no previous information, either newly expanded to system or not tracking.\n"
return cog.tbl.format_table(lines, header=True, prefix=prefix)
| 13,340
|
def get_alt_for_q_with_constant_mach(q, mach, tol=5., SI=False, nmax=20):
# type: (float, float, float, bool, int) -> float
"""
Gets the altitude associated with a dynamic pressure.
Parameters
----------
q : float
the dynamic pressure lb/ft^2 (SI=Pa)
mach : float
the mach to hold constant
tol : float; default=5.
tolerance in feet/meters
SI : bool
should SI units be used; default=False
Returns
-------
alt : float
the altitude in ft (SI=m)
"""
pressure = 2 * q / (1.4 * mach ** 2) # gamma = 1.4
alt = get_alt_for_pressure(pressure, tol=tol, SI=SI, nmax=nmax)
return alt
| 13,341
|
def buy(amount, token, price, account):
"""Put a buy-order for a token to the hive-engine market
"""
stm = shared_blockchain_instance()
if stm.rpc is not None:
stm.rpc.rpcconnect()
if not stm.is_hive:
print("Please set a Hive node")
return
if account is None:
account = stm.config["default_account"]
market = Market(blockchain_instance=stm)
if not unlock_wallet(stm):
return
tx = market.buy(account, amount, token, price)
tx = json.dumps(tx, indent=4)
print(tx)
| 13,342
|
def addMetadataFlags(metadataChunk, numberOfMetadataChunks):
"""Adds binary flag the number of metadata chunks this upload has (uint8).
Arguments:
metadataChunk {bytes} -- First metadata chunk already encrypted, but before signing.
numberOfMetadataChunks {int} -- Self-explanatory.
Returns:
bytes -- Metadata chunk ready to be signed.
"""
#pylint: disable=E1111
numberFlag = np.uint8(numberOfMetadataChunks).tobytes()
fullMetadataChunk = b"".join([numberFlag, metadataChunk])
return fullMetadataChunk
| 13,343
|
def save_object(obj, file_name, pickle_format=2):
"""Save a Python object by pickling it.
Unless specifically overridden, we want to save it in Pickle format=2 since this
will allow other Python2 executables to load the resulting Pickle. When we want
to completely remove Python2 backward-compatibility, we can bump it up to 3. We
should never use pickle.HIGHEST_PROTOCOL as far as possible if the resulting
file is manifested or used, external to the system.
"""
file_name = os.path.abspath(file_name)
# Avoid filesystem race conditions (particularly on network filesystems)
# by saving to a random tmp file on the same filesystem, and then
# atomically rename to the target filename.
tmp_file_name = file_name + ".tmp." + uuid4().hex
try:
with open(tmp_file_name, "wb") as f:
pickle.dump(obj, f, pickle_format)
f.flush() # make sure it's written to disk
os.fsync(f.fileno())
os.rename(tmp_file_name, file_name)
finally:
# Clean up the temp file on failure. Rather than using os.path.exists(),
# which can be unreliable on network filesystems, attempt to delete and
# ignore os errors.
try:
os.remove(tmp_file_name)
except EnvironmentError as e: # parent class of IOError, OSError
if getattr(e, "errno", None) != errno.ENOENT: # We expect ENOENT
logger.info("Could not delete temp file %r", tmp_file_name, exc_info=True)
# pass through since we don't want the job to crash
| 13,344
|
def table(a):
"""get tabular view of obj, if available, else return obj"""
if misc.istablarray(a):
return a.__view__('table')
return a
| 13,345
|
def parse_quadrupole(line):
"""
Quadrupole (type 1)
V1: zedge
V2: quad gradient (T/m)
V3: file ID
If > 0, then include fringe field (using Enge function) and
V3 = effective length of quadrupole.
V4: radius (m)
V5: x misalignment error (m)
V6: y misalignment error (m)
V7: rotation error x (rad)
V8: rotation error y (rad)
V9: rotation error z (rad)
If V9 != 0, skew quadrupole
V10: rf quadrupole frequency (Hz)
V11: rf quadrupole phase (degree)
"""
v = v_from_line(line)
d={}
d['zedge'] = float(v[1])
d['b1_gradient'] = float(v[2])
if float(v[3]) > 0:
d['L_effective'] = float(v[3])
else:
d['file_id'] = int(v[3])
d['radius'] = float(v[4])
d2 = parse_misalignments(v[5:10])
d.update(d2)
if len(v) > 11:
d['rf_frequency'] = float(v[10])
d['rf_phase_deg'] = float(v[11])
return(d)
| 13,346
|
def cases_vides(pave):
"""fonction qui cherche toutes les cases vides ayant des cases adjacentes
pleines dans un pavé (où pavé est un tableau de tuiles ou de cases vides)
retourne le tableau contenant les positions de ces cases vides et les
cases adjacentes en fonction de leur position"""
result = []
for i in range(len(pave)):
for j in range(len(pave)):
if pave[i][j] == None:
position = Position((i, j), None, None, None, None)
if is_in_array(i + 1, j, pave) and pave[i + 1][j] != None:
position.Bot = pave[i + 1][j]
if is_in_array(i - 1, j, pave) and pave[i - 1][j] != None:
position.Top = pave[i - 1][j]
if is_in_array(i, j + 1, pave) and pave[i][j + 1] != None:
position.Right = pave[i][j + 1]
if is_in_array(i, j - 1, pave) and pave[i][j - 1] != None:
position.Left = pave[i][j - 1]
if position.Top != None or position.Bot != None or position.Left != None or position.Right != None:
result.append(position)
return result
| 13,347
|
def _create_scalar_tensor(vals, tensor=None):
"""Create tensor from scalar data"""
if not isinstance(vals, (tuple, list)):
vals = (vals,)
return _create_tensor(np.array(vals), tensor)
| 13,348
|
def is_importable(name):
""" Determines if a given package name can be found.
:param str name: The name of the pacakge
:returns: True if the package can be found
:rtype: bool
"""
return bool(importlib.util.find_spec(name))
| 13,349
|
def make_pipeline(*steps, **kwargs):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators
A list of estimators.
memory : None, str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
See Also
--------
imblearn.pipeline.Pipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
memory = kwargs.pop("memory", None)
verbose = kwargs.pop('verbose', False)
if kwargs:
raise TypeError(
'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0])
)
return Pipeline(skpipeline._name_estimators(steps),
memory=memory, verbose=verbose)
| 13,350
|
def mock_load_json():
"""Mock load_json."""
with patch("openpeerpower.components.ios.load_json", return_value={}):
yield
| 13,351
|
def say_hello(name):
"""
Log client's name which entered our application and send message to it
"""
logging.info('User %s entered', name)
return 'Hello {}'.format(name)
| 13,352
|
def dual_edges_2(vertices):
"""
Compute the dual edge vectors of a triangle, expressed in the
triangle plane orthonormal basis.
:param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the
space)).
:returns: The triangle dual edge vectors (3 by 2 matrix with the coordinates for edge i in row i).
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
t = dual_edges(vertices)
t2 = numpy.zeros((3, 2))
for i in range(3):
t2[i] = in_triangleplane_coords(vertices, t[i])
return t2
| 13,353
|
def test_uncompress():
""" Test the uncompress function """
# Given
""" test_input and expected """
# When
result = uncompress(test_input)
# Then
assert result == expected
| 13,354
|
def merge_dicts(dictionaries):
"""Merges multiple separate dictionaries into a single dictionary.
Parameters
----------
dictionaries : An iterable container of Python dictionaries.
Returns
-------
merged : A single dictionary that represents the result of merging the all the
dicts in ``dictionaries``.
Example
-------
The primary purpose of this function is to create a single dictionary
by combining multiple singleton dictionaries, as shown in the following example:
>>> dicts = [{'a': 1}, {'b': 2}, {'c': 3}]
>>> eb.merge_dicts(dicts)
{'a': 1, 'c': 3, 'b': 2}
"""
merged = dictionaries[0].copy()
for i in range(1, len(dictionaries)):
merged.update(dictionaries[i])
return merged
| 13,355
|
def noise_distribution_to_cost_function(
noise_distribution: Union[str, Callable]
) -> Callable[[str], str]:
"""
Parse noise distribution string to a cost function definition amici can
work with.
The noise distributions listed in the following are supported. :math:`m`
denotes the measurement, :math:`y` the simulation, and :math:`\\sigma` a
distribution scale parameter
(currently, AMICI only supports a single distribution parameter).
- `'normal'`, `'lin-normal'`: A normal distribution:
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma}\\
exp\\left(-\\frac{(m-y)^2}{2\\sigma^2}\\right)
- `'log-normal'`: A log-normal distribution (i.e. log(m) is
normally distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m}\\
exp\\left(-\\frac{(\\log m - \\log y)^2}{2\\sigma^2}\\right)
- `'log10-normal'`: A log10-normal distribution (i.e. log10(m) is
normally distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m \\log(10)}\\
exp\\left(-\\frac{(\\log_{10} m - \\log_{10} y)^2}{2\\sigma^2}\\right)
- `'laplace'`, `'lin-laplace'`: A laplace distribution:
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma}
\\exp\\left(-\\frac{|m-y|}{\\sigma}\\right)
- `'log-laplace'`: A log-Laplace distribution (i.e. log(m) is Laplace
distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m}
\\exp\\left(-\\frac{|\\log m - \\log y|}{\\sigma}\\right)
- `'log10-laplace'`: A log10-Laplace distribution (i.e. log10(m) is
Laplace distributed):
.. math::
\\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m \\log(10)}
\\exp\\left(-\\frac{|\\log_{10} m - \\log_{10} y|}{\\sigma}\\right)
- `'binomial'`, `'lin-binomial'`: A (continuation of a discrete) binomial
distribution, parameterized via the success probability
:math:`p=\\sigma`:
.. math::
\\pi(m|y,\\sigma) = \\operatorname{Heaviside}(y-m) \\cdot
\\frac{\\Gamma(y+1)}{\\Gamma(m+1) \\Gamma(y-m+1)}
\\sigma^m (1-\\sigma)^{(y-m)}
- `'negative-binomial'`, `'lin-negative-binomial'`: A (continuation of a
discrete) negative binomial distribution, with with `mean = y`,
parameterized via success probability `p`:
.. math::
\\pi(m|y,\\sigma) = \\frac{\\Gamma(m+r)}{\\Gamma(m+1) \\Gamma(r)}
(1-\\sigma)^m \\sigma^r
where
.. math::
r = \\frac{1-\\sigma}{\\sigma} y
The distributions above are for a single data point.
For a collection :math:`D=\\{m_i\\}_i` of data points and corresponding
simulations :math:`Y=\\{y_i\\}_i` and noise parameters
:math:`\\Sigma=\\{\\sigma_i\\}_i`, AMICI assumes independence,
i.e. the full distributions is
.. math::
\\pi(D|Y,\\Sigma) = \\prod_i\\pi(m_i|y_i,\\sigma_i)
AMICI uses the logarithm :math:`\\log(\\pi(m|y,\\sigma)`.
In addition to the above mentioned distributions, it is also possible to
pass a function taking a symbol string and returning a log-distribution
string with variables '{str_symbol}', 'm{str_symbol}', 'sigma{str_symbol}'
for y, m, sigma, respectively.
:param noise_distribution: An identifier specifying a noise model.
Possible values are
{`'normal'`, `'lin-normal'`, `'log-normal'`, `'log10-normal'`,
`'laplace'`, `'lin-laplace'`, `'log-laplace'`, `'log10-laplace'`,
`'binomial'`, `'lin-binomial'`, `'negative-binomial'`,
`'lin-negative-binomial'`, `<Callable>`}
For the meaning of the values see above.
:return: A function that takes a strSymbol and then creates a cost
function string (negative log-likelihood) from it, which can be
sympified.
"""
if isinstance(noise_distribution, Callable):
return noise_distribution
if noise_distribution in ['normal', 'lin-normal']:
y_string = '0.5*log(2*pi*{sigma}**2) + 0.5*(({y} - {m}) / {sigma})**2'
elif noise_distribution == 'log-normal':
y_string = '0.5*log(2*pi*{sigma}**2*{m}**2) ' \
'+ 0.5*((log({y}) - log({m})) / {sigma})**2'
elif noise_distribution == 'log10-normal':
y_string = '0.5*log(2*pi*{sigma}**2*{m}**2*log(10)**2) ' \
'+ 0.5*((log({y}, 10) - log({m}, 10)) / {sigma})**2'
elif noise_distribution in ['laplace', 'lin-laplace']:
y_string = 'log(2*{sigma}) + Abs({y} - {m}) / {sigma}'
elif noise_distribution == 'log-laplace':
y_string = 'log(2*{sigma}*{m}) + Abs(log({y}) - log({m})) / {sigma}'
elif noise_distribution == 'log10-laplace':
y_string = 'log(2*{sigma}*{m}*log(10)) ' \
'+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}'
elif noise_distribution in ['binomial', 'lin-binomial']:
# Binomial noise model parameterized via success probability p
y_string = '- log(Heaviside({y} - {m})) - loggamma({y}+1) ' \
'+ loggamma({m}+1) + loggamma({y}-{m}+1) ' \
'- {m} * log({sigma}) - ({y} - {m}) * log(1-{sigma})'
elif noise_distribution in ['negative-binomial', 'lin-negative-binomial']:
# Negative binomial noise model of the number of successes m
# (data) before r=(1-sigma)/sigma * y failures occur,
# with mean number of successes y (simulation),
# parameterized via success probability p = sigma.
r = '{y} * (1-{sigma}) / {sigma}'
y_string = f'- loggamma({{m}}+{r}) + loggamma({{m}}+1) ' \
f'+ loggamma({r}) - {r} * log(1-{{sigma}}) ' \
f'- {{m}} * log({{sigma}})'
else:
raise ValueError(
f"Cost identifier {noise_distribution} not recognized.")
def nllh_y_string(str_symbol):
y, m, sigma = _get_str_symbol_identifiers(str_symbol)
return y_string.format(y=y, m=m, sigma=sigma)
return nllh_y_string
| 13,356
|
def parc_brainmaps(gene):
"""
Generates brainplots of `gene` expression values for each manuscript
Parameters
----------
gene : str
Gene for which brainplots should be generated
"""
fig_dir = FIG_DIR / 'brainmaps'
fig_dir.mkdir(parents=True, exist_ok=True)
aparc = nib.freesurfer.read_annot(
'/opt/freesurfer/subjects/fsaverage/label/lh.aparc.annot'
)[0]
aparc[aparc == -1] = 0
aparc = abagen.images._relabel(aparc)
ids = np.unique(aparc)
for study, params in PARAMETERS.iterrows():
data = pd.read_hdf(DATA_DIR / f'{study}.h5')
data.loc[0] = -100
data = np.asarray(data.loc[ids, gene])
# mean, std = data[1:].mean(), data[1:].std(ddof=1)
# vmin, vmax = mean - (2 * std), mean + (2 * std)
brain = Brain('fsaverage', 'lh', 'inflated', background='white',
size=1000, offscreen=True)
brain.add_data(data[aparc], thresh=-99,
colorbar=False, colormap='BuPu',
min=data[1:].min(), max=data[1:].max())
brain.save_image(fig_dir / f'{gene.lower()}_{study}.png')
brain.close()
| 13,357
|
def serializer(cls, o):
"""
Custom class level serializer.
"""
# You can provide a custom serialize/deserialize logic for certain types.
if cls is datetime:
return o.strftime('%d/%m/%y')
# Raise SerdeSkip to tell serde to use the default serializer/deserializer.
else:
raise SerdeSkip()
| 13,358
|
def parse_args():
""" parse command-line arguments """
usage = """Usage: bcfg2_svnlog.py [options] -r <revision> <repos>"""
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", help="Be verbose", action="count")
parser.add_option("-c", "--config", help="Config file",
default="/etc/bcfg2_svnlog.conf")
parser.add_option("-r", "--rev", help="Revision")
parser.add_option("--stdout", help="Print log message to stdout")
try:
(options, args) = parser.parse_args()
except OptionError:
parser.print_help()
raise SystemExit(1)
if not len(args):
parser.print_help()
raise SystemExit(1)
get_logger(options.verbose)
return (options, args.pop())
| 13,359
|
def compute_com(kpt_ids, pose_keypoints):
"""Computes center of mass from available points for each pose.
Requires at least one arm (shoulder, elbow, wrist), neck and hips.
Required keypoints to return result: at least one arm with hip, neck and [nose OR ear]
:param kpt_id: IDs of keypoints in pose_keypoints. Corresponds to kpt_names.
:param pose_keypoints: keypoints for parts of a pose. All types are in kpt_names.
:return COM/BOS tuple: tuple of main center of mass' x,y coordinates (ndarray), segment COMs (ndarray),
BOS coordinates (list of list of int)
"""
C_pts = [] # minor center of mass points
BOS = [[-1, -1], [-1, -1]] # base of support
COM = array([-1, -1]).astype(int32) # final center of mass
# legs are 3.5 to 4 heads
# 25 and 20: 20 front, 5 back
# Find length from nose/ears to neck and multiply 0.5 for front foot, 0.14 for back foot.
## Heuristics
no_right = False
no_left = False
for r_id in right_profile:
if r_id not in kpt_ids:
no_right = True
break
for l_id in left_profile:
if l_id not in kpt_ids:
no_left = True
break
face_id = -1
for f_id in face_profile:
if f_id in kpt_ids:
face_id = f_id
break
if face_id == -1:
return (COM, array(C_pts), BOS)
elif no_right and no_left:
return (COM, array(C_pts), BOS)
## Transformation
"""Two scenarios
(1) Front/Back of body: do nothing
(2) Side of body: copy point to side if needed
"""
if not no_right and no_left:
for indx in range(prof_len):
r_id = right_profile[indx]
l_id = left_profile[indx]
if pose_keypoints[l_id, 0] == -1:
pose_keypoints[l_id] = pose_keypoints[r_id]
elif no_right and not no_left:
for indx in range(prof_len):
r_id = right_profile[indx]
l_id = left_profile[indx]
if pose_keypoints[r_id, 0] == -1:
pose_keypoints[r_id] = pose_keypoints[l_id]
## Compute COM sections
face_pt = pose_keypoints[face_id]
neck_pt = pose_keypoints[1]
head_vector = (neck_pt - face_pt) # points down
nose_neck_len = sqrt(sum(head_vector * head_vector))
head_vector[0] = 0 # project to y-axis
# head_vector[1] = head_vector[1] * 1.5
r_sho_pt = pose_keypoints[2]
l_sho_pt = pose_keypoints[5]
upperRidge_pt = (r_sho_pt + l_sho_pt)/2
r_hip_pt = pose_keypoints[8]
l_hip_pt = pose_keypoints[11]
lowerRidge_pt = (r_hip_pt + l_hip_pt)/2
# Thorax COM
thorax_vector = (lowerRidge_pt - upperRidge_pt) * proximal_ratios[0]
C_pts.append((upperRidge_pt + thorax_vector).tolist())
# Upper Arms COM
r_elb_pt = pose_keypoints[3]
l_elb_pt = pose_keypoints[6]
r_uparm_vector = (r_sho_pt - r_elb_pt) * proximal_ratios[1]
l_uparm_vector = (l_sho_pt - l_elb_pt) * proximal_ratios[1]
C_pts.append((r_uparm_vector + r_elb_pt).tolist())
C_pts.append((l_uparm_vector + l_elb_pt).tolist())
# Forearms COM
r_forarm_vector = (r_elb_pt - pose_keypoints[4]) * proximal_ratios[2]
l_forarm_vector = (l_elb_pt - pose_keypoints[7]) * proximal_ratios[2]
C_pts.append((r_forarm_vector + pose_keypoints[4]).tolist())
C_pts.append((l_forarm_vector + pose_keypoints[7]).tolist())
# Thigh COM and Leg COM (OR) Total Leg COM (if pts missing)
# Right Side
if pose_keypoints[9,0] == -1: # missing leg estimation
r_total_leg_com = (head_vector * proximal_ratios[6]) + r_hip_pt
C_pts.append([0,0])
C_pts.append([0,0])
C_pts.append(r_total_leg_com.tolist())
BOS[0] = ((head_vector * 3.5) + r_hip_pt).tolist()
else:
r_knee_pt = pose_keypoints[9]
r_thigh_vector = (r_hip_pt - r_knee_pt) * proximal_ratios[3]
C_pts.append((r_thigh_vector + r_knee_pt).tolist())
if pose_keypoints[10, 0] == -1: # missing ankle estimation
r_leg_com = (head_vector * proximal_ratios[5]) + r_knee_pt
C_pts.append(r_leg_com.tolist())
BOS[0] = ((head_vector * 1.75) + r_knee_pt).tolist()
else:
r_ankle_pt = pose_keypoints[10]
r_leg_vector = (r_knee_pt - r_ankle_pt) * proximal_ratios[4]
C_pts.append((r_leg_vector + r_ankle_pt).tolist())
BOS[0] = r_ankle_pt.tolist()
C_pts.append([0,0])
# Left Side
if pose_keypoints[12,0] == -1: # missing leg estimation
l_total_leg_com = (head_vector * proximal_ratios[6]) + l_hip_pt
C_pts.append([0,0])
C_pts.append([0,0])
C_pts.append(l_total_leg_com.tolist())
BOS[1] = ((head_vector * 3.5) + l_hip_pt).tolist()
else:
l_knee_pt = pose_keypoints[12]
l_thigh_vector = (l_hip_pt - l_knee_pt) * proximal_ratios[3]
C_pts.append((l_thigh_vector + l_knee_pt).tolist())
if pose_keypoints[13, 0] == -1: # missing ankle estimation
l_leg_com = (head_vector * proximal_ratios[5]) + l_knee_pt
C_pts.append(l_leg_com.tolist())
BOS[1] = ((head_vector * 1.75) + l_knee_pt).tolist()
else:
l_ankle_pt = pose_keypoints[13]
l_leg_vector = (l_knee_pt - l_ankle_pt) * proximal_ratios[4]
C_pts.append((l_leg_vector + l_ankle_pt).tolist())
BOS[1] = l_ankle_pt.tolist()
C_pts.append([0,0])
## Compute COM from C_pts, and BOS
C_pts = array(C_pts, dtype=int32)
COM = sum(C_pts * mass_ratios, axis=0).astype(int32)
# was BOS[0][0] == BOS[1][0]
if no_left^no_right: # sagittal spreading; greedy approach
min1, min2, min3, min4 = [-1, -1, -1, -1]
if no_left: # facing towards right of image
min1 = round(BOS[0][0] - (nose_neck_len * 0.14)) # constants 0.14 and 0.5 based on my estimates
min2 = round(BOS[1][0] - (nose_neck_len * 0.14)) # of nose-neck length and foot length relative
max1 = round(BOS[0][0] + (nose_neck_len * 0.5)) # to ankle point.
max2 = round(BOS[1][0] + (nose_neck_len * 0.5))
else: # facing towards left of image
min1 = round(BOS[0][0] - (nose_neck_len * 0.5))
min2 = round(BOS[1][0] - (nose_neck_len * 0.5))
max1 = round(BOS[0][0] + (nose_neck_len * 0.14))
max2 = round(BOS[1][0] + (nose_neck_len * 0.14))
if min1 < min2:
BOS[0][0] = min1
else:
BOS[0][0] = min2
if max1 > max2:
BOS[1][0] = max1
else:
BOS[1][0] = max2
return (COM, C_pts, BOS)
| 13,360
|
def test_grid_three_size_more_at_end():
"""
"""
expected = (
(0, 0), (1, 0), (2, 0), (3, 0),
(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1),
(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2),
(0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3)
)
actual= Autogrid((2,3,4), 2).grid_tuples(26)
assert tuple(actual) == expected
| 13,361
|
def certificate(cert_name):
"""Return the path to the PEM file with the given name."""
return os.path.join(os.path.dirname(__file__), 'lib', cert_name)
| 13,362
|
def _MinimumLineCount(text: str, min_line_count: int) -> str:
"""Private implementation of minimum number of lines.
Args:
text: The source to verify the line count of.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src is less than min_line_count long.
"""
if len(text.strip().split("\n")) < min_line_count:
raise errors.NoCodeException
return text
| 13,363
|
def RationalQuadratic1d(
grid,
corrlen,
sigma,
alpha,
prior=None,
mu_basis=None,
mu_hyper=None,
energy=0.99
) -> Formula:
"""Rational quadratic kernel formula
"""
kernel_kwargs = {
"corrlen": corrlen,
"sigma": sigma,
"alpha": alpha
}
_Formula = create_from_kernel1d(utils.rational_quadratic)
return _Formula(
grid=grid,
prior=prior,
mu_basis=mu_basis,
mu_hyper=mu_hyper,
energy=energy,
**kernel_kwargs
)
| 13,364
|
def pandas2csv(df, out_file, index=False, header=True):
"""Write pandas dataframe or series to CSV file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
index - write index to file?
header - write header row to file?
"""
pandas2file(partial(df.to_csv, index=index, header=header), out_file)
| 13,365
|
def plot_feature_importance(feature_keys, feature_importances, ax=None, **kwargs):
"""
Plot features importance after model training (typically from scikit-learn)
Parameters
----------
feature_keys: list of string
feature_importances: `numpy.ndarray`
ax: `matplotlib.pyplot.axes`
Returns
-------
ax
"""
ax = plt.gca() if ax is None else ax
sort_mask = np.argsort(feature_importances)[::-1]
ax.bar(np.array(feature_keys)[sort_mask], np.array(feature_importances)[sort_mask], **kwargs)
for t in ax.get_xticklabels():
t.set_rotation(45)
ax.set_title("Feature importances")
return ax
| 13,366
|
def test_atomic_g_month_max_inclusive_2_nistxml_sv_iv_atomic_g_month_max_inclusive_3_3(mode, save_output, output_format):
"""
Type atomic/gMonth is restricted by facet maxInclusive with value
--04.
"""
assert_bindings(
schema="nistData/atomic/gMonth/Schema+Instance/NISTSchema-SV-IV-atomic-gMonth-maxInclusive-3.xsd",
instance="nistData/atomic/gMonth/Schema+Instance/NISTXML-SV-IV-atomic-gMonth-maxInclusive-3-3.xml",
class_name="NistschemaSvIvAtomicGMonthMaxInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 13,367
|
def simulate_timestamps_till_horizon(mu, alpha, beta, Thorizon = 60, \
seed=None, node=None, output_rejected_data=False):
"""
Inputs:
mu, alpha, beta are parameters of intensity function of HP
"""
#################
# Initialisation
#################
rng = default_rng(seed) # get instance of random generator
t = 0 # initialise current time to be 0
i = 0 # set event counter to be 0
epsilon = 10**(-10) # This was used in many HP code
lambda_star = mu # upper bound at current time t = 0
ts = np.array([]); accepted_event_intensity = [lambda_star]
# containter for rejected time points and their correspodning intensities
rejected_points = []; rpy = []
# M_y stores upper bound of current times while M_x stores their x-values
M_x = []; M_y = []
#################
# Begin loop
#################
while(t < Thorizon):
previous_lambda_star = lambda_star; previous_t = t
# compute upper bound of intensity using intensity function
lambda_star = intensity_func(t+epsilon, ts, mu, alpha, beta)
u = rng.uniform(0,1) # draw a uniform random number between (0,1)
tau = -np.log(u)/lambda_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time
M_x += [previous_t,t]
M_y += [previous_lambda_star]
s = rng.uniform(0,1)# draw another standard uniform random number
# compute intensity function at current time t
lambda_t = intensity_func(t, ts, mu, alpha, beta)
if (t >= Thorizon):
break
##########################
## Rejection Sampling test
if s <= lambda_t/lambda_star:
ts = np.append(ts, float(t))
if (node != None):
ts = np.append(ts, [float(t), np.array([node])])
accepted_event_intensity.append(lambda_t)
i += 1
else:
rejected_points += [t]
rpy += [lambda_t]
if output_rejected_data:
return ts, i, accepted_event_intensity, rejected_points, rpy
return ts
| 13,368
|
def jp_inference_on_dataset(model, data_loader, evaluator):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.forward` accurately.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = get_world_size()
logger = logging.getLogger(__name__)
logger.info("Start inference on {} images".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
print("rank",comm.get_rank(),"is processing batch",idx)
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs) #RUN THE MODEL!!!!!!!!!
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
evaluator.process(inputs, outputs)
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / img per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
| 13,369
|
def boolToYes(b):
"""Convert a Boolean input into 'yes' or 'no'
Args:
b (bool): The Boolean value to be converted
Returns:
str: 'yes' if b is True, and 'no' otherwise.
"""
if b:
return "yes"
else:
return "no"
| 13,370
|
def top_symptoms(dic, title):
"""Find and plot top symptoms in the dictionary based on count
Args:
dic (dict): Dictionary containing text-count pair
Returns:
[dictionary]: Top 5 symptoms with their count
"""
assert isinstance(dic, dict) and len(dic) > 0, "dic is not a nonempty dictionary"
labels = []
sizes = []
counts = 0
top5 = sorted(dic, key=dic.get, reverse=True)[:5]
others = sorted(dic, key=dic.get, reverse=True)[5:]
for i in others:
counts += dic[i]
for i in top5:
labels.append(i)
sizes.append(dic[i])
labels.append("OTHER")
sizes.append(counts)
fig = go.Figure(data=[go.Pie(labels=labels, values=sizes, hole=0.3)])
fig.update_layout(
title=title,
template=None,
title_x=0.5,
width=1000,
height=900,
margin=dict(l=20, r=20, t=50, b=20),
legend=dict(font=dict(size=25, color="black")),
)
fig.show()
return top5
| 13,371
|
def _calc_z(h: DataArray, zice: DataArray, zeta: DataArray,
s: DataArray, Cs: DataArray,
hc: float, Vtransform: int) -> DataArray:
"""
Calculate grid z-coord depth given water depth (h), iceshelf depth (zice),
sea surface (zeta), and vertical grid transformation parameters.
Inputs:
h, zice, zeta - bathymetry extracted by set_hzz
s, Cs, hc, Vtransform - ROMS grid transformation parameters
Output:
z - depth of rho/w points
"""
if Vtransform == 1:
z0 = hc*s + (h-zice-hc)*Cs
z = zeta*(1.0+z0/(h-zice)) + z0 - zice
elif Vtransform == 2:
z0 = (hc*s + (h-zice)*Cs) / (hc+h-zice)
z = zeta + (zeta+h-zice)*z0 - zice
return z
| 13,372
|
def gap_loss(preds, D, A):
"""
This module implement the loss function in paper [Azada Zazi, Will Hang. et al, 2019] Nazi, Azade & Hang, Will & Goldie, Anna & Ravi, Sujith & Mirhoseini, Azalia. (2019). GAP: Generalizable Approximate Graph Partitioning Framework.
Args:
preds (tensor(float)): output predited value, have size n x g
D (tensor(float)): degree of nodes, have size n x 1
A (tensor(bool)): adjacent matrix of graph, have size n x n
Returns:
float: the results of the loss function
"""
temp = tf.matmul(tf.transpose(preds), D)
temp = tf.div(preds, temp)
temp = tf.matmul(temp, tf.transpose(1-preds))
temp = tf.multiply(temp, A)
return tf.reduce_sum(temp)
| 13,373
|
def do_eval_standalone(args_opt):
"""
do eval standalone
"""
ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)
ckpt_file = get_ckpt(ckpt_file)
print('ckpt file:', ckpt_file)
task = task_cfg[args_opt.task_name]
student_net_cfg.seq_length = task.seq_length
eval_cfg.batch_size = args_opt.batch_size
eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name, DATA_NAME)
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args.device_id)
eval_dataset = create_tinybert_dataset(batch_size=eval_cfg.batch_size,
device_num=1,
rank=0,
do_shuffle='false',
data_dir=eval_data_dir,
data_type=args_opt.dataset_type,
seq_length=task.seq_length,
task_type=task.task_type,
drop_remainder=False)
print('eval dataset size:', eval_dataset.get_dataset_size())
print('eval dataset batch size:', eval_dataset.get_batch_size())
eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0, phase_type='student')
param_dict = load_checkpoint(ckpt_file)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', key)
new_key = re.sub('^bert.', '', new_key)
new_param_dict[new_key] = value
load_param_into_net(eval_model, new_param_dict)
eval_model.set_train(False)
columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
callback = task.metrics()
for step, data in enumerate(eval_dataset.create_dict_iterator()):
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, token_type_id, label_ids = input_data
_, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)
callback.update(logits, label_ids)
print('eval step: {}, {}: {}'.format(step, callback.name, callback.get_metrics()))
metrics = callback.get_metrics()
print('The best {}: {}'.format(callback.name, metrics))
| 13,374
|
def test_stager(rse_factory, did_factory, root_account, replica_client):
"""
Submit a real transfer to FTS and rely on the gfal "mock" plugin to report a simulated "success"
https://gitlab.cern.ch/dmc/gfal2/-/blob/master/src/plugins/mock/README_PLUGIN_MOCK
"""
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', rse_type=RSEType.TAPE)
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id]
distance_core.add_distance(src_rse_id, dst_rse_id, ranking=10)
rse_core.add_rse_attribute(src_rse_id, 'staging_buffer', dst_rse)
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', 'https://fts:8446')
did = did_factory.upload_test_file(src_rse)
replica = replica_core.get_replica(rse_id=src_rse_id, **did)
replica_client.add_replicas(rse=dst_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'state': 'C',
'bytes': replica['bytes'], 'adler32': replica['adler32'], 'md5': replica['md5']}])
request_core.queue_requests(requests=[{'dest_rse_id': dst_rse_id,
'scope': did['scope'],
'name': did['name'],
'rule_id': '00000000000000000000000000000000',
'attributes': {
'source_replica_expression': src_rse,
'activity': 'Some Activity',
'bytes': replica['bytes'],
'adler32': replica['adler32'],
'md5': replica['md5'],
},
'request_type': RequestType.STAGEIN,
'retry_count': 0,
'account': root_account,
'requested_at': datetime.now()}])
stager(once=True, rses=[{'id': rse_id} for rse_id in all_rses])
replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, max_wait_seconds=2 * MAX_POLL_WAIT_SECONDS, **did)
assert replica['state'] == ReplicaState.AVAILABLE
| 13,375
|
def number_of_hole(img, hole_img, hole_counter):
""" 判斷hole的數量去執行相對應的函式
0個hole執行zero_of_hole
1個hole執行one_of_hole
2個hole執行my_text.set("Answer : 8")
大於2個hole則執行my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) """
switcher = {
0:zero_of_hole,
1:one_of_hole,
2:lambda x1, x2:my_text.set("Answer : 8") #參數x1, x2從未使用, 為了return function
}
func = switcher.get(hole_counter, lambda x1, x2:my_text.set("Error : holes number = " + str(hole_counter) + "( > 2 )")) #參數x1, x2從未使用, 為了return function
return func(img, hole_img)
| 13,376
|
def get_merged_by_value_coords(spans_value, digits=None):
"""returns adjacent spans merged if they have the same value. Assumes
[(start, end, val), ..] structure and that spans_value is sorted in
ascending order.
Arguments:
- digits: if None, any data can be handled and exact values are
compared. Otherwise values are rounded to that many digits.
"""
assert len(spans_value[0]) == 3, 'spans_value must have 3 records per row'
starts, ends, vals = zip(*spans_value)
indices_distinct_vals = get_run_start_indices(vals, digits=digits)
data = []
i = 0
for index, val in indices_distinct_vals:
start = starts[index]
end = ends[index]
prev_index = max(index-1, 0)
try:
data[-1][1] = ends[prev_index]
except IndexError:
pass
data.append([start, end, val])
if index < len(ends):
data[-1][1] = ends[-1]
return data
| 13,377
|
def vint_mask_for_length(length):
"""
Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors).
:arg length: the length of the variable-length integer
:type length: int
:returns: the bitmask for the first byte of the variable-length integer
:rtype: int
"""
return 0b10000000 >> (length - 1)
| 13,378
|
def parse(tokens):
"""
S-expr ::= ( S-expr* ) | AtomSymbol | ' S-expr
' S-expr = (quote S-expr)
"""
from itertools import tee
def _parse(tokens):
while True:
token = next(tokens)
if token == "(":
s_expr = []
while True:
tokens, tokens_clone = tee(tokens)
if next(tokens_clone) == ")":
next(tokens)
return tuple(s_expr)
s_expr.append(_parse(tokens))
elif token == ")" or token == "":
raise Exception("parse error")
elif token == "'":
return "quote", _parse(tokens)
else:
return token
s_expr = _parse(tokens)
if next(tokens) != "":
raise Exception("parse error")
return s_expr
| 13,379
|
def test_bad_predict():
"""Test bad input to predict."""
secs = pysecs.SECS(sec_df_loc=[[1., 0., R_EARTH + 1e6],
[-1., 0., R_EARTH + 1e6]])
# Calling predict with the wrong shape
pred_loc = np.array([[0, 0]])
with pytest.raises(ValueError, match="Prediction locations"):
secs.predict(pred_loc)
# Calling predict before fitting
pred_loc = np.array([[0, 0, R_EARTH]])
with pytest.raises(ValueError, match="There are no currents associated"):
secs.predict(pred_loc)
| 13,380
|
def preproc(raw,
dark=None,
flat=None,
solidangle=None,
polarization=None,
absorption=None,
mask=None,
dummy=None,
delta_dummy=None,
normalization_factor=1.0,
empty=None,
split_result=False,
variance=None,
dark_variance=None,
poissonian=False,
dtype=numpy.float32
):
"""Common preprocessing step for all integration engines
:param data: raw value, as a numpy array, 1D or 2D
:param mask: array non null where data should be ignored
:param dummy: value of invalid data
:param delta_dummy: precision for invalid data
:param dark: array containing the value of the dark noise, to be subtracted
:param flat: Array containing the flatfield image. It is also checked for dummies if relevant.
:param solidangle: the value of the solid_angle. This processing may be performed during the rebinning instead. left for compatibility
:param polarization: Correction for polarization of the incident beam
:param absorption: Correction for absorption in the sensor volume
:param normalization_factor: final value is divided by this
:param empty: value to be given for empty bins
:param split_result: set to true to separate signal from normalization and
return an array of float2, float3 (with variance) ot float4 (including counts)
:param variance: provide an estimation of the variance, enforce
split_result=True and return an float3 array with variance in second position.
:param dark_variance: provide an estimation of the variance of the dark_current,
enforce split_result=True and return an float3 array with variance in second position.
:param poissonian: set to "True" for assuming the detector is poissonian and variance = max(1, raw + dark)
:param dtype: dtype for all processing
All calculation are performed in single precision floating point (32 bits).
NaN are always considered as invalid values
if neither empty nor dummy is provided, empty pixels are 0.
Empty pixels are always zero in "split_result" mode.
When set to False, i.e the default, the pixel-wise operation is:
.. math::
I = \\frac{raw - dark}{flat \\cdot solidangle \\cdot polarization \\cdot absorption}
Invalid pixels are set to the dummy or empty value.
When split_result is set to True, each result is a float2
or a float3 (with an additional value for the variance) as such:
I = [:math:`raw - dark`, :math:`variance`, :math:`flat \\cdot solidangle \\cdot polarization \\cdot absorption`]
If split_result is 4, then the count of pixel is appended to the list, i.e. 1 or 0 for masked pixels
Empty pixels will have all their 2 or 3 or 4 values to 0 (and not to dummy or empty value)
If poissonian is set to True, the variance is evaluated as raw + dark, with a minimum of 1.
"""
if isinstance(dtype, str):
dtype = numpy.dtype(dtype).type
shape = raw.shape
out_shape = list(shape)
if split_result or (variance is not None) or poissonian:
if split_result == 4:
out_shape += [4]
elif (variance is not None) or poissonian:
out_shape += [3]
else:
out_shape += [2]
split_result = True
size = raw.size
if (mask is None) or (mask is False):
mask = numpy.zeros(size, dtype=bool)
else:
assert mask.size == size, "Mask array size is correct"
mask = numpy.ascontiguousarray(mask.ravel(), dtype=bool)
if (dummy is not None) and (delta_dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = dtype(delta_dummy)
elif (dummy is not None):
check_dummy = True
cdummy = dtype(dummy)
ddummy = 0.0
else:
check_dummy = False
cdummy = dtype(empty or 0.0)
ddummy = 0.0
signal = numpy.ascontiguousarray(raw.ravel(), dtype=dtype)
normalization = numpy.zeros_like(signal) + normalization_factor
if variance is not None:
variance = numpy.ascontiguousarray(variance.ravel(), dtype=dtype)
elif poissonian:
variance = numpy.maximum(1.0, signal) # this makes a copy
# runtime warning here
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (signal == cdummy)
else:
mask |= (abs(signal - cdummy) <= ddummy)
if dark is not None:
assert dark.size == size, "Dark array size is correct"
dark = numpy.ascontiguousarray(dark.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (dark == cdummy)
else:
mask |= abs(dark - cdummy) < ddummy
signal -= dark
if poissonian:
variance += dark
elif dark_variance is not None:
variance += dark_variance
if flat is not None:
assert flat.size == size, "Flat array size is correct"
flat = numpy.ascontiguousarray(flat.ravel(), dtype=dtype)
if check_dummy:
# runtime warning here
if ddummy == 0:
mask |= (flat == cdummy)
else:
mask |= abs(flat - cdummy) <= ddummy
normalization *= flat
if polarization is not None:
assert polarization.size == size, "Polarization array size is correct"
normalization *= numpy.ascontiguousarray(polarization.ravel(), dtype=dtype)
if solidangle is not None:
assert solidangle.size == size, "Solid angle array size is correct"
normalization *= numpy.ascontiguousarray(solidangle.ravel(), dtype=dtype)
if absorption is not None:
assert absorption.size == size, "Absorption array size is correct"
normalization *= numpy.ascontiguousarray(absorption.ravel(), dtype=dtype)
mask |= numpy.logical_not(numpy.isfinite(signal))
mask |= numpy.logical_not(numpy.isfinite(normalization))
mask |= (normalization == 0)
if variance is not None:
mask |= numpy.logical_not(numpy.isfinite(variance))
if split_result:
result = numpy.zeros(out_shape, dtype=dtype)
signal[mask] = 0.0
normalization[mask] = 0.0
result[..., 0] = signal.reshape(shape)
if out_shape[-1] == 4:
if variance is not None:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
result[..., 3] = 1.0 - mask.reshape(shape)
elif variance is None:
result[:, :, 1] = normalization.reshape(shape)
else:
variance[mask] = 0.0
result[..., 1] = variance.reshape(shape)
result[..., 2] = normalization.reshape(shape)
else:
result = signal / normalization
result[mask] = cdummy
result.shape = shape
return result
| 13,381
|
def lB_2_T(lB, T0=298, sigma=4E-10, ret_res=False):
"""Solves for temperature at given Bjerrum length under condition from Adhikari et al. 2019 that lB/l = 1.2 at 298 K."""
def cond(T, lB, sigma=sigma):
"""condition function whose root gives the temperature T given Bjerrum length lB."""
return lB_fn(T, sigma=sigma) - lB
T = scipy.optimize.fsolve(cond, T0, args=(lB,))[0]
if ret_res:
res = np.abs(lB_fn(T, sigma=sigma) - lB)
return T, res
return T
| 13,382
|
def make_datacls(
cls_name: str,
fields: Iterable[Union[tuple[str, type], tuple[str, type, dataclasses.Field]]],
init: bool = True,
**kwargs,
) -> type:
"""
Return a new dataclass. This function wraps the Python dataclasses.make_dataclass
function, with the following changes to the generated __init__ method:
• initialization method only processes keyword arguments
• initialization method ignores unexpected keyword arguments
• fields (with default values or not) can be declared in any order
• Optional[...] fields default to None if no default value is specified
Keyword arguments are passed on to the dataclasses.make_dataclass function.
"""
dataclass = dataclasses.make_dataclass(
cls_name=cls_name,
fields=fields,
init=False,
**kwargs,
)
if init:
dataclass.__init__ = _datacls_init(dataclass)
return dataclass
| 13,383
|
def square_loss(X, y, theta, reg_beta=0.0):
"""Computes squared loss and gradient.
Based on mean square margin loss.
X: (k, n) data items.
y: (k, 1) result (+1 or -1) for each data item in X.
theta: (n, 1) parameters.
reg_beta: optional regularization strength, for L2 regularization.
Returns (loss, dtheta) where loss is the aggregate numeric loss for this
theta, and dtheta is (n, 1) gradients for theta based on that loss.
Note: the mean (division by k) helps; otherwise, the loss is very large and
a tiny learning rate is required to prevent divergence in the beginning of
the search.
"""
k, n = X.shape
margin = y * X.dot(theta)
diff = margin - 1
loss = np.dot(diff.T, diff) / k + np.dot(theta.T, theta) * reg_beta / 2
dtheta = np.zeros_like(theta)
for j in range(n):
dtheta[j, 0] = (2 * np.dot((diff * y).T, X[:, j]) / k +
reg_beta * theta[j, 0])
return loss.flat[0], dtheta
| 13,384
|
def test_add_node(empty_dag):
"""Test add node."""
dag = empty_dag
dag.add_node('a')
assert dag.graph == {'a': set()}
| 13,385
|
def open_mf_wrf_dataset(paths, chunks=None, compat='no_conflicts', lock=None,
preprocess=None):
"""Open multiple WRF files as a single WRF dataset.
Requires dask to be installed. Note that if your files are sliced by time,
certain diagnostic variable computed out of accumulated variables (e.g.
PRCP) won't be available, because not computable lazily.
This code is adapted from xarray's open_mfdataset function. The xarray
license is reproduced in the salem/licenses directory.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/\*.nc" or an
explicit list of files to open.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks`` .
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see
xarray's full documentation for more details.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
lock : False, True or threading.Lock, optional
This argument is passed on to :py:func:`dask.array.from_array`. By
default, a per-variable lock is used when reading data from netCDF
files with the netcdf4 and h5netcdf engines to avoid issues with
concurrent access when using dask's multithreaded backend.
Returns
-------
xarray.Dataset
"""
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
# TODO: current workaround to dask thread problems
dask.set_options(get=dask.get)
if lock is None:
lock = _default_lock(paths[0], 'netcdf4')
datasets = [open_wrf_dataset(p, chunks=chunks or {}, lock=lock)
for p in paths]
file_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
combined = xr.auto_combine(datasets, concat_dim='time', compat=compat)
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
# drop accumulated vars if needed (TODO: make this not hard coded)
vns = ['PRCP', 'PRCP_C', 'PRCP_NC']
vns = [vn for vn in vns if vn in combined.variables]
combined = combined.drop(vns)
return combined
| 13,386
|
def main():
"""Plot barchart from COGclassifier count results"""
# Get argument values
args = get_args()
infile: Path = args.infile
outfile: Path = args.outfile
width: int = args.width
height: int = args.height
bar_width: int = args.bar_width
y_limit: Optional[int] = args.y_limit
percent_style: bool = args.percent_style
sort: bool = args.sort
# Plot barchart
plot_cog_classifier_barchart(
df=pd.read_csv(infile, sep="\t"),
html_outfile=outfile,
fig_width=width,
fig_height=height,
bar_width=bar_width,
y_limit=y_limit,
percent_style=percent_style,
sort=sort,
)
| 13,387
|
def parse_args_and_add_yaml_variables(parser: ArgumentParser,
yaml_config_file: Optional[Path] = None,
project_root: Optional[Path] = None,
fail_on_unknown_args: bool = False) -> ParserResult:
"""
Reads arguments from sys.argv, modifies them with secrets from local YAML files,
and parses them using the given argument parser.
:param project_root: The root folder for the whole project. Only used to access a private settings file.
:param parser: The parser to use.
:param yaml_config_file: The path to the YAML file that contains values to supply into sys.argv.
:param fail_on_unknown_args: If True, raise an exception if the parser encounters an argument that it does not
recognize. If False, unrecognized arguments will be ignored, and added to the "unknown" field of the parser result.
:return: The parsed arguments, and overrides
"""
settings_from_yaml = read_all_settings(yaml_config_file, project_root=project_root)
return parse_arguments(parser,
settings_from_yaml=settings_from_yaml,
fail_on_unknown_args=fail_on_unknown_args)
| 13,388
|
def load_graph(model_file):
"""Loads a TensorFlow graph from file."""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
| 13,389
|
def report_failure(result: Dict[str, Any], context: Dict[str, Any]):
"""Write out failure information from a given check in a context."""
prepare_context_with_error_information(context=context, result=result)
print_validation_error(context=context)
| 13,390
|
def find_children(node, tag, xml_ns, ns_key):
"""
Finds the collection of children nodes
Parameters
----------
node : ElementTree.Element
tag : str
xml_ns : None|dict
ns_key : None|str
"""
if xml_ns is None:
return node.findall(tag)
elif ns_key is None:
return node.findall('default:{}'.format(tag), xml_ns)
else:
return node.findall('{}:{}'.format(ns_key, tag), xml_ns)
| 13,391
|
def normalize_string(string, ignore_spaces, ignore_punctuation):
"""Normalizes strings to prepare them for crashing comparison."""
string = string.upper()
if ignore_punctuation:
string = re.sub(r"[^1-9a-z \n\r\t]", "", string, flags=re.I)
if ignore_spaces:
string = re.sub(r"\w+", "", string)
else:
string = string.strip()
string = re.sub(r"[ \n\r\t]+", " ", string)
return string
| 13,392
|
def walkfiles(thisdir):
"""
walkfiles(D) -> iterator over files in D, recursively. Yields full file paths.
Adapted from path.py by Jason Orendorff.
"""
for child in os.listdir(thisdir):
thischild = join(thisdir, child)
if isfile(thischild):
yield thischild
elif isdir(thischild):
for f in walkfiles(thischild):
yield f
| 13,393
|
def create_sitemap(app: sphinx.application.Sphinx, exception):
"""Generates the sitemap.xml from the collected HTML page links"""
if (
not app.config["html_theme_options"].get("site_url", "")
or exception is not None
or not app.sitemap_links
):
return
filename = app.outdir + "/sitemap.xml"
print(
"Generating sitemap for {0} pages in "
"{1}".format(
len(app.sitemap_links), sphinx.util.console.colorize("blue", filename)
)
)
root = ElementTree.Element("urlset")
root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9")
for link in app.sitemap_links:
url = ElementTree.SubElement(root, "url")
ElementTree.SubElement(url, "loc").text = link
app.sitemap_links[:] = []
ElementTree.ElementTree(root).write(filename)
| 13,394
|
def word_to_forms(word):
"""Return all possible forms for a word.
Args:
word (unicode)
Returns:
forms (set[unicode])
"""
forms = set()
lemmas = lemmatize(word)
for lemma in lemmas:
forms.update(lemma_to_forms(lemma))
return forms
| 13,395
|
def load_decamCorners():
"""
Returns the CCD corners of the DECam camera.
Returns:
decamCorners : *list* of *float*
A list of the angular degree offsets of the CCD corners.
"""
with open('%s/DECam_corners.dat' % data_dir) as f:
corners_dct = eval(''.join(f.readlines()))
decamCorners = [v for v in corners_dct.values()]
return decamCorners
| 13,396
|
def reduce_expr(expr):
"""
Reduces a boolean algebraic expression based on the identity X + XY = X
Args:
expr (str): representation of the boolean algebraic expression
Returns:
A string representing the reduced algebraic expression
"""
reduced = True
for term in expr:
matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)]
if(matches):
reduced = False
if reduced:
return expr
new_expr = []
temp_expr = expr
for term in expr:
#find the term that differs with it by at most one position
matches = [t for t in expr if t!=term and len(set(term).intersection(set(t))) == len(term)]
if(matches):
new_expr.append(term)
temp_expr.remove(term)
for match in matches:
temp_expr.remove(match)
#if such a term is found reduce it by the rule x+ xy =x
#remove both terms from the list and add to new expression
#if no term is found add the term to the next expression
expr = reduce_expr(new_expr+temp_expr)
return expr
| 13,397
|
def test_spread_radix_sort():
"""
Test spread numbers array with radix_sort.
"""
left, right = radix_sort_test([0xffff, 0xfffe, 0xfffd, 3, 2, 1])
assert left == right
| 13,398
|
def get_mask(images, b_threshold=0) :
""" Return a mask computed from baseline image
"""
b0 = baseline(images, b_threshold)
skull_mask = medipy.segmentation.skull(b0)
skull = medipy.logic.apply_mask(b0, skull_mask, 0, 0)
directory = tempfile.mkdtemp()
medipy.io.save(skull, os.path.join(directory, "skull.nii.gz"))
bet = medipy.fsl.BET(
os.path.join(directory, "skull.nii.gz"), os.path.join(directory, "bet.nii.gz"),
intensity_threshold=0.2, create_brain_mask=True)
bet()
mask = medipy.io.load(bet.brain_mask)
shutil.rmtree(directory)
return mask
| 13,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.