content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def unreduced_coboundary(morse_complex, akq, cell_ix):
""" Helper """
return unreduced_cells(akq, morse_complex.get_coboundary(cell_ix)) | 31,100 |
def __align(obj: Union[Trace, EventLog], pt: ProcessTree, max_trace_length: int = 1,
max_process_tree_height: int = 1, parameters=None):
"""
this function approximates alignments for a given event log or trace and a process tree
:param obj: event log or single trace
:param pt: process tree
:param max_trace_length: specifies when the recursive splitting stops based on the trace's length
:param max_process_tree_height: specifies when the recursive splitting stops based on the tree's height
:return:
"""
assert isinstance(pt, ProcessTree)
if isinstance(obj, Trace):
e = EventLog()
e.append(obj)
obj = e
assert isinstance(obj, EventLog)
pt = process_tree_to_binary_process_tree(pt)
pt = EfficientTree(pt)
parameters[Parameters.SUBTREE_ALIGN_CACHE] = {}
return __approximate_alignments_for_log(obj, pt, max_trace_length, max_process_tree_height,
parameters=parameters) | 31,101 |
def scanProgramTransfersCount(program, transfersCount=None, address=None, args={}):
"""
Scan pools by active program, sort by transfersCount
"""
return resource.scan(**{**{
'type': 'pool',
'index': 'activeProgram',
'indexValue': program,
'sort': 'transfersCount',
'sortValue': transfersCount,
'keyValue': address,
}, **args}) | 31,102 |
def def_use_error(node, report=MAIN_REPORT):
"""
Checks if node is a name and has a def_use_error
Args:
node (str or AstNode or CaitNode): The Name node to look up.
report (Report): The report to attach data to. Defaults to MAIN_REPORT.
Returns:
True if the given name has a def_use_error
"""
if not isinstance(node, str) and node.ast_name != "Name":
raise TypeError
from pedal.tifa.commands import get_issues
from pedal.tifa.feedbacks import initialization_problem
def_use_issues = get_issues(initialization_problem)
if not isinstance(node, str):
node_id = node.id
else:
node_id = node
has_error = False
for issue in def_use_issues:
name = issue.fields['name']
if name == node_id:
has_error = True
break
return has_error | 31,103 |
def task_new_callback(data):
"""
On receiving a task:new event, add a task to an asset in the
Avalon mongodb.
"""
# Log in to API
gazu.client.set_host("{0}/api".format(os.environ["GAZU_URL"]))
gazu.log_in(os.environ["GAZU_USER"], os.environ["GAZU_PASSWD"])
task = gazu.task.get_task(data["task_id"])
entity = task["entity"]
project = task["project"]
task_type = task["task_type"]
project_name = lib.get_consistent_name(project["name"])
task_name = lib.get_consistent_name(task_type["name"])
# Get Avalon Asset Id.
entity_id = lib.get_asset_data(project["id"], entity["id"])
os.environ["AVALON_PROJECT"] = project_name
db.uninstall()
db.install()
# Find the asset in Avalon
avalon_entity = db.find_one({
"_id": db.ObjectId(entity_id),
"type": "asset"})
if avalon_entity["data"] is not None:
if "tasks" in avalon_entity["data"]:
avalon_entity["data"]["tasks"].append(task_name)
else:
avalon_entity["data"]["tasks"] = [task_name]
else:
avalon_entity["data"]["tasks"] = [task_name]
db.replace_one(
{"_id": db.ObjectId(entity_id), "type": "asset"}, avalon_entity)
db.uninstall()
logger.info("Added new \"{2}\" Task to \"{0}\" in Project \"{1}\"".format(
avalon_entity["name"], project["name"], task_type["name"])) | 31,104 |
def get_ref_inst(ref):
"""
If value is part of a port on an instance, return that instance,
otherwise None.
"""
root = ref.root()
if not isinstance(root, InstRef):
return None
return root.inst | 31,105 |
def not_found(error):
"""
Renders 404 page
:returns: HTML
:rtype: flask.Response
"""
view_args["title"] = "Not found"
return render_template("404.html", args=view_args), 404 | 31,106 |
def xml_escape(x):
"""Paranoid XML escaping suitable for content and attributes."""
res = ''
for i in x:
o = ord(i)
if ((o >= ord('a')) and (o <= ord('z'))) or \
((o >= ord('A')) and (o <= ord('Z'))) or \
((o >= ord('0')) and (o <= ord('9'))) or \
i in ' !#$%()*+,-./:;=?@\^_`{|}~':
res += i
else:
res += '&#%d;' % o
return res | 31,107 |
def read_tab(filename):
"""Read information from a TAB file and return a list.
Parameters
----------
filename : str
Full path and name for the tab file.
Returns
-------
list
"""
with open(filename) as my_file:
lines = my_file.readlines()
return lines | 31,108 |
def build_model(stage_id, batch_size, real_images, **kwargs):
"""Builds progressive GAN model.
Args:
stage_id: An integer of training stage index.
batch_size: Number of training images in each minibatch.
real_images: A 4D `Tensor` of NHWC format.
**kwargs: A dictionary of
'start_height': An integer of start image height.
'start_width': An integer of start image width.
'scale_base': An integer of resolution multiplier.
'num_resolutions': An integer of number of progressive resolutions.
'stable_stage_num_images': An integer of number of training images in
the stable stage.
'transition_stage_num_images': An integer of number of training images
in the transition stage.
'total_num_images': An integer of total number of training images.
'kernel_size': Convolution kernel size.
'colors': Number of image channels.
'to_rgb_use_tanh_activation': Whether to apply tanh activation when
output rgb.
'fmap_base': Base number of filters.
'fmap_decay': Decay of number of filters.
'fmap_max': Max number of filters.
'latent_vector_size': An integer of latent vector size.
'gradient_penalty_weight': A float of gradient norm target for
wasserstein loss.
'gradient_penalty_target': A float of gradient penalty weight for
wasserstein loss.
'real_score_penalty_weight': A float of Additional penalty to keep the
scores from drifting too far from zero.
'adam_beta1': A float of Adam optimizer beta1.
'adam_beta2': A float of Adam optimizer beta2.
'generator_learning_rate': A float of generator learning rate.
'discriminator_learning_rate': A float of discriminator learning rate.
Returns:
An inernal object that wraps all information about the model.
"""
kernel_size = kwargs['kernel_size']
colors = kwargs['colors']
resolution_schedule = make_resolution_schedule(**kwargs)
num_blocks, num_images = get_stage_info(stage_id, **kwargs)
current_image_id = tf.train.get_or_create_global_step()
current_image_id_inc_op = current_image_id.assign_add(batch_size)
tf.summary.scalar('current_image_id', current_image_id)
progress = networks.compute_progress(
current_image_id, kwargs['stable_stage_num_images'],
kwargs['transition_stage_num_images'], num_blocks)
tf.summary.scalar('progress', progress)
real_images = networks.blend_images(
real_images, progress, resolution_schedule, num_blocks=num_blocks)
def _num_filters_fn(block_id):
"""Computes number of filters of block `block_id`."""
return networks.num_filters(block_id, kwargs['fmap_base'],
kwargs['fmap_decay'], kwargs['fmap_max'])
def _generator_fn(z):
"""Builds generator network."""
to_rgb_act = tf.tanh if kwargs['to_rgb_use_tanh_activation'] else None
return networks.generator(
z,
progress,
_num_filters_fn,
resolution_schedule,
num_blocks=num_blocks,
kernel_size=kernel_size,
colors=colors,
to_rgb_activation=to_rgb_act)
def _discriminator_fn(x):
"""Builds discriminator network."""
return networks.discriminator(
x,
progress,
_num_filters_fn,
resolution_schedule,
num_blocks=num_blocks,
kernel_size=kernel_size)
########## Define model.
z = make_latent_vectors(batch_size, **kwargs)
gan_model = tfgan.gan_model(
generator_fn=lambda z: _generator_fn(z)[0],
discriminator_fn=lambda x, unused_z: _discriminator_fn(x)[0],
real_data=real_images,
generator_inputs=z)
########## Define loss.
gan_loss = define_loss(gan_model, **kwargs)
########## Define train ops.
gan_train_ops, optimizer_var_list = define_train_ops(gan_model, gan_loss,
**kwargs)
gan_train_ops = gan_train_ops._replace(
global_step_inc_op=current_image_id_inc_op)
########## Generator smoothing.
generator_ema = tf.train.ExponentialMovingAverage(decay=0.999)
gan_train_ops, generator_vars_to_restore = add_generator_smoothing_ops(
generator_ema, gan_model, gan_train_ops)
class Model(object):
pass
model = Model()
model.stage_id = stage_id
model.batch_size = batch_size
model.resolution_schedule = resolution_schedule
model.num_images = num_images
model.num_blocks = num_blocks
model.current_image_id = current_image_id
model.progress = progress
model.num_filters_fn = _num_filters_fn
model.generator_fn = _generator_fn
model.discriminator_fn = _discriminator_fn
model.gan_model = gan_model
model.gan_loss = gan_loss
model.gan_train_ops = gan_train_ops
model.optimizer_var_list = optimizer_var_list
model.generator_ema = generator_ema
model.generator_vars_to_restore = generator_vars_to_restore
return model | 31,109 |
def reflect(engine):
"""
Reflects the database tables.
"""
metadata.reflect(bind=engine)
LOGGER.info("Reflected %d tables", len(metadata.tables)) | 31,110 |
def int2(c):
""" Parse a string as a binary number """
return int(c, 2) | 31,111 |
def info():
"""Shows info, a short summary of intro section."""
st.sidebar.markdown("""
---\n
[International University - VNU-HCM](https://hcmiu.edu.vn/en/)\n
[Streamlit](https://www.streamlit.io/)\n
[GitHub](https://github.com/minhlong94/SWE_IT076IU)\n
""") | 31,112 |
def from_serializer(
serializer: serializers.Serializer,
api_type: str,
*,
id_field: str = "",
**kwargs: Any,
) -> Type[ResourceObject]:
"""
Generate a schema from a DRF serializer.
:param serializer: The serializer instance.
:param api_type: The JSON API resource type.
:param id_field: The 'id" field of the resource.
If left empty, it is either "id" for non-model serializers, or
for model serializers, it is looked up on the model.
:param kwargs: Extra options (like links and transforms) passed to the schema.
:return: The new schema class.
"""
# get_fields() should return them in the order of Meta.fields
serializer_name = type(serializer).__name__
attrs: List[str] = []
rels: List[str] = []
if not id_field:
# If this is a model serializer, we can reach in to the model
# and look for the model's PK.
if isinstance(serializer, serializers.ModelSerializer):
model = serializer.Meta.model
for db_field in model._meta.get_fields():
if getattr(db_field, "primary_key", False):
id_field = db_field.attname
break
if not id_field:
raise ValueError(f"Unable to find primary key from model: {model}")
else:
# Otherwise, just assume it's "id"
id_field = "id"
for field_name, field in serializer.get_fields().items():
if field_name != id_field:
if isinstance(field, serializers.RelatedField):
rels.append(field_name)
else:
attrs.append(field_name)
values: Dict[str, Any] = {
"id": id_field,
"type": api_type,
"attributes": attrs,
"relationships": rels,
}
values.update(**kwargs)
return type(f"{serializer_name}_AutoSchema", (ResourceObject,), values) | 31,113 |
def user_input():
"""Input prompt"""
# Printing statement to signal the user that we are waiting for input.
user_input = input("Please type in your name\n")
# Printing a message based on the input.
print(f"Welcome, {user_input}!") | 31,114 |
def test_discarded_duplicate_children():
"""Verify that duplicate children are not added twice"""
trials_history = TrialsHistory()
trials = [DummyTrial(i, []) for i in range(3)]
trials_history.update(trials)
assert trials_history.children == [0, 1, 2]
trials = [DummyTrial(i, [trials[i].id]) for i in range(3)]
assert all(trial.id == trial.parents[0] for trial in trials)
trials_history.update(trials)
assert trials_history.children == [0, 1, 2] | 31,115 |
def print_safety_margin(local_map, divisions):
"""Prints the safety margin of the given local_map, where each cell is divided into divisions^2 components.
Args:
local_map (2D np.ndarray): the local map
divisions (int): each grid cell will have a safety_margin sample taken for divisions^2 times
Returns:
Nothing. The function prints the graph using pyplot
"""
score_map = []
below_zero = 0
total = 0
for i in np.arange(-0.49, len(local_map)-0.51, 1/divisions):
vector_map = []
for j in np.arange(-0.49, len(local_map[0])-0.51, 1/divisions):
margin = safety_margin([i, j], scaling_factor, beta, cutoff_radius, threshold, local_map)
vector_map.append(-1*margin)
if margin <= 0:
below_zero += 1
total += 1
score_map.append(vector_map)
# Apply a mask at 0
masked_scores = np.ma.masked_where(np.array(score_map) <= 0, score_map)
plt.subplot(1, 2, 1)
plt.imshow(masked_scores, interpolation='nearest', cmap="YlGnBu")
plt.title('Safety Margin Value Heat Map')
plt.subplot(1, 2, 2)
plt.imshow(-1*local_map, interpolation='nearest', cmap="RdBu")
plt.title('Obstacle Probability Heat Map')
plt.tight_layout()
plt.show()
print(str(below_zero/total)) | 31,116 |
def inv_rotate_pixpts(pixpts_rot, angle):
"""
Inverse rotate rotated pixel points to their original positions.
Keyword arguments:
pixpts_rot -- namedtuple of numpy arrays of x,y pixel points rotated
angle -- rotation angle in degrees
Return value:
pixpts -- namedtuple of numpy arrays of pixel x,y points in
original positions
"""
deg2rad = np.pi/180.
angle_rad = angle*deg2rad
xpix_pts = pixpts_rot.x*np.cos(angle_rad) + pixpts_rot.y*np.sin(angle_rad)
ypix_pts = -pixpts_rot.x*np.sin(angle_rad) + pixpts_rot.y*np.cos(angle_rad)
PixPoints = namedtuple('PixPoints', 'x y')
pixpts = PixPoints(xpix_pts, ypix_pts)
return pixpts | 31,117 |
def logged(func=None, level=logging.DEBUG, name=None, msg=None):
"""Decorator to log the function, with the duration.
Args:
-----
func (function): the function to log
level (logging.OBJECT): INFO, DEBUG, WARNING ...
name (str): name of the logger
msg (str): message to log
Return:
-------
log the duration of the function
"""
if func is None:
return functools.partial(logged, level=level, name=name, msg=msg)
logger = name if name else Logger(
func.__name__ + ".log", logging.INFO)
logmsg = msg if msg else func.__name__
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper to use it as a decorator."""
start = time.time()
result = func(*args, **kwargs)
end = time.time()
msg = ":".join([str(func.__name__), str(end - start)])
logger.log(level, logmsg)
logger.log(level, msg)
return result
return wrapper | 31,118 |
def properties(classes):
"""get all property (p-*, u-*, e-*, dt-*) classnames
"""
return [c.partition("-")[2] for c in classes if c.startswith("p-")
or c.startswith("u-") or c.startswith("e-") or c.startswith("dt-")] | 31,119 |
def test_main():
"""Mock to ensure program flow."""
with patch.object(base, "get_all_git_repos") as mock_parse:
with patch.object(base, "display_credit", return_value=(0, 1)) as disp:
base.main()
mock_parse.assert_called_once_with(sys.argv)
assert disp.called | 31,120 |
def adapt_p3_histogram(codon_usages, purge_unwanted=True):
"""Returns P3 from each set of codon usage for feeding to hist()."""
return [array([c.positionalGC(purge_unwanted=True)[3] for c in curr])\
for curr in codon_usages] | 31,121 |
def send_data_wrapper():
""" wrapper to send data """
logger.debug("--- Chunk creation time: %s seconds ---" % (time.time() - track['start_time']))
send_data_to_if(track['current_row'], track['mode'])
track['chunk_count'] += 1
reset_track() | 31,122 |
def handle_logout_response(response):
"""
Handles saml2 logout response.
:param response: Saml2 logout response
"""
if len(response) > 1:
# Currently only one source is supported
return HttpResponseServerError("Logout from several sources not supported")
for entityid, logout_info in response.items():
if isinstance(logout_info, tuple):
# logout_info is a tuple containing header information and a HTML message.
binding, http_info = logout_info
if binding == BINDING_HTTP_POST:
# Display content defined in logout response
body = "".join(http_info["data"])
return HttpResponse(body)
elif binding == BINDING_HTTP_REDIRECT:
# Redirect to address defined in logout response
return HttpResponseRedirect(_get_location(http_info))
else:
# Unknown binding
return HttpResponseServerError("Logout binding not supported")
else: # result from logout, should be OK
pass
return HttpResponseServerError("Failed to log out") | 31,123 |
def get_value_beginning_of_year(idx, col, validate=False):
"""
Devuelve el valor de la serie determinada por df[col] del
primer día del año del índice de tiempo 'idx'.
"""
beggining_of_year_idx = date(year=idx.date().year, month=1, day=1)
return get_value(beggining_of_year_idx, col, validate) | 31,124 |
def doc2vec_embedder(corpus: List[str], size: int = 100, window: int = 5) -> List[float]:
"""
Given a corpus of texts, returns an embedding (representation
of such texts) using a fine-tuned Doc2Vec embedder.
ref: https://radimrehurek.com/gensim/models/doc2vec.html
"""
logger.info(f"Training Doc2Vec with: size={size}, window={window}")
tagged_documents = [TaggedDocument(doc.split(), [i]) for i, doc in enumerate(corpus)]
model = Doc2Vec(tagged_documents, vector_size=size, window=window, min_count=3, workers=16)
def embedder(documents: List[str]) -> List[float]:
"""Generates an embedding using a Doc2Vec"""
return scale_vectors([model.infer_vector(doc.split()) for doc in documents])
return embedder | 31,125 |
def PremIncome(t):
"""Premium income"""
return SizePremium(t) * PolsIF_Beg1(t) | 31,126 |
def _setter_name(getter_name):
""" Convert a getter name to a setter name.
"""
return 'set' + getter_name[0].upper() + getter_name[1:] | 31,127 |
def get_bel_node_by_pathway_name():
"""Get Reactome related eBEL nodes by pathway name."""
pathway_name = request.args.get('pathway_name')
sql = f'''SELECT
@rid.asString() as rid,
namespace,
name,
bel,
reactome_pathways
FROM
protein
WHERE
pure=true AND
"{pathway_name}" in reactome_pathways
'''
return _get_paginated_ebel_query_result(sql) | 31,128 |
def get_model(model_file, log=True):
"""Load a model from the specified model_file."""
model = load_model(model_file)
if log:
print('Model successfully loaded on rank ' + str(hvd.rank()))
return model | 31,129 |
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret | 31,130 |
def main():
""" Script with tools and setup configuration for Linux machines """
pass | 31,131 |
def schedule_delete_default_vpc(account_id, region, role):
"""Schedule a delete_default_vpc on a thread
:param account_id: The account ID to remove the VPC from
:param org_session: The Organization class instance
:param region: The name of the region the VPC is resided
"""
ec2_client = role.client('ec2', region_name=region)
delete_default_vpc(ec2_client, account_id, region, role) | 31,132 |
def usage(rc):
"""
Print usage for this script to stdout.
Parameters
rc: Exit status (int)
"""
str = "\n"
str = str + "pdb2pqr (Version %s)\n" % __version__
str = str + "\n"
str = str + "This module takes a PDB file as input and performs\n"
str = str + "optimizations before yielding a new PDB-style file as\n"
str = str + "output\n"
str = str + "\n"
str = str + "Usage: pdb2pqr.py [options] --ff=<forcefield> <path> <output-path>\n"
str = str + " Required Arguments:\n"
str = str + " <forcefield> : The forcefield to use - currently amber\n"
str = str + " charmm, parse, and tyl06 are supported.\n"
str = str + " <path> : The path to the PDB file or an ID\n"
str = str + " to obtain from the PDB archive\n"
str = str + " <output-path> : The desired output name of the PQR file\n"
str = str + " to be generated\n"
str = str + " Optional Arguments:\n"
str = str + " --nodebump : Do not perform the debumping operation\n"
str = str + " --noopt : Do not perform hydrogen optimization\n"
str = str + " --chain : Keep the chain ID in the output PQR file\n"
str = str + " --assign-only : Only assign charges and radii - do not add\n"
str = str + " atoms, debump, or optimize.\n"
str = str + " --clean : Do no optimization, atom addition, or\n"
str = str + " parameter assignment, just return the\n"
str = str + " original PDB file in aligned format.\n"
str = str + " --ffout=<name>: Instead of using the standard canonical\n"
str = str + " naming scheme for residue and atom names,\n"
str = str + " use the names from the given forcefield.\n"
str = str + " --with-ph=<ph>: Use propka to calculate pKas and apply them\n"
str = str + " to the molecule given the pH value. Actual\n"
str = str + " PropKa results will be output to \n"
str = str + " <output-path>.propka.\n"
str = str + " --apbs-input : Create a template APBS input file based on\n"
str = str + " the generated PQR file.\n"
str = str + " --ligand=<path>: Calculate the parameters for the ligand in\n"
str = str + " mol2 format at the given path. Pdb2pka must\n"
str = str + " be compiled\n"
str = str + " --verbose (-v): Print information to stdout\n"
str = str + " --help (-h): Display the usage information\n"
# Check to see if there are usage statements from the
# extensions directory
extensions = getAvailableExtensions()
if len(extensions) > 0:
str = str + "\n Optional Arguments from Extensions Directory:\n"
for ext in extensions:
str += extensions[ext].usage()
str = str + "\n"
sys.stderr.write(str)
sys.exit(rc) | 31,133 |
def main(opt):
"""
Tests SRVP.
Parameters
----------
opt : DotDict
Contains the testing configuration.
"""
##################################################################################################################
# Setup
##################################################################################################################
# -- Device handling (CPU, GPU)
opt.train = False
if opt.device is None:
device = torch.device('cpu')
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.device)
device = torch.device('cuda:0')
torch.cuda.set_device(0)
# Seed
random.seed(opt.test_seed)
np.random.seed(opt.test_seed)
torch.manual_seed(opt.test_seed)
# cuDNN
assert torch.backends.cudnn.enabled
# Load LPIPS model
global lpips_model
lpips_model = PerceptualLoss(opt.lpips_dir)
##################################################################################################################
# Load XP config
##################################################################################################################
xp_config = helper.load_json(os.path.join(opt.xp_dir, 'config.json'))
nt_cond = opt.nt_cond if opt.nt_cond is not None else xp_config.nt_cond
nt_test = opt.nt_gen if opt.nt_gen is not None else xp_config.seq_len_test
##################################################################################################################
# Load test data
##################################################################################################################
print('Loading data...')
xp_config.data_dir = opt.data_dir
xp_config.seq_len = nt_test
dataset = data.load_dataset(xp_config, train=False)
testset = dataset.get_fold('test')
test_loader = DataLoader(testset, batch_size=opt.batch_size, collate_fn=data.collate_fn, pin_memory=True)
##################################################################################################################
# Load model
##################################################################################################################
print('Loading model...')
model = srvp.StochasticLatentResidualVideoPredictor(xp_config.nx, xp_config.nc, xp_config.nf, xp_config.nhx,
xp_config.ny, xp_config.nz, xp_config.skipco, xp_config.nt_inf,
xp_config.nh_inf, xp_config.nlayers_inf, xp_config.nh_res,
xp_config.nlayers_res, xp_config.archi)
state_dict = torch.load(os.path.join(opt.xp_dir, 'model.pt'), map_location='cpu')
model.load_state_dict(state_dict)
model.to(device)
model.eval()
##################################################################################################################
# Eval
##################################################################################################################
print('Generating samples...')
torch.set_grad_enabled(False)
best_samples = defaultdict(list)
worst_samples = defaultdict(list)
results = defaultdict(list)
cond = []
cond_rec = []
gt = []
random_samples = [[] for _ in range(5)]
# Evaluation is done by batch
for batch in tqdm(test_loader, ncols=80, desc='evaluation'):
# Data
x = batch.to(device)
assert nt_test <= len(x)
x = x[:nt_test]
x_cond = x[:nt_cond]
x_target = x[nt_cond:]
cond.append(x_cond.cpu().mul(255).byte().permute(1, 0, 3, 4, 2))
gt.append(x_target.cpu().mul(255).byte().permute(1, 0, 3, 4, 2))
# Predictions
metric_best = {}
sample_best = {}
metric_worst = {}
sample_worst = {}
# Encode conditional frames and extracts skip connections
skip = model.encode(x_cond)[1] if model.skipco != 'none' else None
# Generate opt.n_samples predictions
for i in range(opt.n_samples):
# Infer latent variables
x_rec, y, _, w, _, _, _, _ = model(x_cond, nt_cond, dt=1 / xp_config.n_euler_steps)
y_0 = y[-1]
if i == 0:
x_rec = x_rec[::xp_config.n_euler_steps]
cond_rec.append(x_rec.cpu().mul(255).byte().permute(1, 0, 3, 4, 2))
# Use the model in prediction mode starting from the last inferred state
y_os = model.generate(y_0, [], nt_test - nt_cond + 1, dt=1 / xp_config.n_euler_steps)[0]
y = y_os[xp_config.n_euler_steps::xp_config.n_euler_steps].contiguous()
x_pred = model.decode(w, y, skip).clamp(0, 1)
# Pixelwise quantitative eval
mse = torch.mean(F.mse_loss(x_pred, x_target, reduction='none'), dim=[3, 4])
metrics_batch = {
'psnr': 10 * torch.log10(1 / mse).mean(2).mean(0).cpu(),
'ssim': _ssim_wrapper(x_pred, x_target).mean(2).mean(0).cpu(),
'lpips': _lpips_wrapper(x_pred, x_target).mean(0).cpu()
}
x_pred_byte = x_pred.cpu().mul(255).byte().permute(1, 0, 3, 4, 2)
if i < 5:
random_samples[i].append(x_pred_byte)
for name, values in metrics_batch.items():
if i == 0:
metric_best[name] = values.clone()
sample_best[name] = x_pred_byte.clone()
metric_worst[name] = values.clone()
sample_worst[name] = x_pred_byte.clone()
continue
# Best samples
idx_better = _get_idx_better(name, metric_best[name], values)
metric_best[name][idx_better] = values[idx_better]
sample_best[name][idx_better] = x_pred_byte[idx_better]
# Worst samples
idx_worst = _get_idx_worst(name, metric_worst[name], values)
metric_worst[name][idx_worst] = values[idx_worst]
sample_worst[name][idx_worst] = x_pred_byte[idx_worst]
# Compute metrics for best samples and register
for name in sample_best.keys():
best_samples[name].append(sample_best[name])
worst_samples[name].append(sample_worst[name])
results[name].append(metric_best[name])
# Store best, worst and random samples
samples = {f'random_{i + 1}': torch.cat(random_sample).numpy() for i, random_sample in enumerate(random_samples)}
samples['cond_rec'] = torch.cat(cond_rec)
for name in best_samples.keys():
samples[f'{name}_best'] = torch.cat(best_samples[name]).numpy()
samples[f'{name}_worst'] = torch.cat(worst_samples[name]).numpy()
results[name] = torch.cat(results[name]).numpy()
##################################################################################################################
# Compute FVD
##################################################################################################################
print('Computing FVD...')
cond = torch.cat(cond, 0).permute(1, 0, 4, 2, 3).float().div(255)
gt = torch.cat(gt, 0).permute(1, 0, 4, 2, 3).float().div(255)
ref = torch.cat([cond, gt], 0)
hyp = torch.from_numpy(samples['random_1']).clone().permute(1, 0, 4, 2, 3).float().div(255)
hyp = torch.cat([cond, hyp], 0)
fvd = fvd_score(ref, hyp)
##################################################################################################################
# Print results
##################################################################################################################
print('\n')
print('Results:')
for name, res in results.items():
print(name, res.mean(), '+/-', 1.960 * res.std() / np.sqrt(len(res)))
print(f'FVD', fvd)
##################################################################################################################
# Save samples
##################################################################################################################
np.savez_compressed(os.path.join(opt.xp_dir, 'results.npz'), **results)
for name, res in samples.items():
np.savez_compressed(os.path.join(opt.xp_dir, f'{name}.npz'), samples=res) | 31,134 |
def _sample_weight(kappa, dim, num_samples):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4.0 * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1.0 - b) / (1.0 + b)
c = kappa * x + dim * np.log(1 - x ** 2)
results = []
n = 0
while True:
z = np.random.beta(dim / 2.0, dim / 2.0, size=num_samples)
w = (1.0 - (1.0 + b) * z) / (1.0 - (1.0 - b) * z)
u = np.random.uniform(low=0, high=1, size=num_samples)
mask = kappa * w + dim * np.log(1.0 - x * w) - c >= np.log(u)
results.append(w[mask])
n += sum(mask)
if n >= num_samples:
break
results = np.concatenate(results)[:num_samples]
return results | 31,135 |
def variable_time_collate_fn3(
batch,
args,
device=torch.device("cpu"),
data_type="train",
data_min=None,
data_max=None,
):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
len_tt = [ex[1].size(0) for ex in batch]
maxlen = np.max(len_tt)
enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)
enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)
enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
currlen = tt.size(0)
enc_combined_tt[b, :currlen] = tt.to(device)
enc_combined_vals[b, :currlen] = vals.to(device)
enc_combined_mask[b, :currlen] = mask.to(device)
enc_combined_vals, _, _ = utils.normalize_masked_data(
enc_combined_vals, enc_combined_mask, att_min=data_min, att_max=data_max
)
if torch.max(enc_combined_tt) != 0.0:
enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)
data_dict = {
"observed_data": enc_combined_vals,
"observed_tp": enc_combined_tt,
"observed_mask": enc_combined_mask,
}
return data_dict | 31,136 |
def merge_action(args):
"""Entry point for the "merge" CLI command."""
complete_func(
print, args.elf, args.m4hex, args.m0hex
) | 31,137 |
def run_map_reduce(files, mapper, n):
"""Runner to execute a map-reduce reduction of cowrie log files using mapper and files
Args:
files (list of files): The cowrie log files to be used for map-reduce reduction.
mapper (MapReduce): The mapper processing the files using map_func and reduce_func.
n (int): We want the n most commands / ips / etc. of the cowrie log files.
Returns:
result (list): List of map-reduced cowrie log data.
"""
# main work
counts = mapper(files)
counts.sort(key=operator.itemgetter(1))
counts.reverse()
data = split_data_by_events(counts, n)
result = build_json(data)
return result | 31,138 |
def metadata_record_dictize(pkg, context):
"""
Based on ckan.lib.dictization.model_dictize.package_dictize
"""
model = context['model']
is_latest_revision = not(context.get('revision_id') or
context.get('revision_date'))
execute = _execute if is_latest_revision else _execute_with_revision
# package
if is_latest_revision:
if isinstance(pkg, model.PackageRevision):
pkg = model.Package.get(pkg.id)
result = pkg
else:
package_rev = model.package_revision_table
q = select([package_rev]).where(package_rev.c.id == pkg.id)
result = execute(q, package_rev, context).first()
if not result:
raise tk.ObjectNotFound
result_dict = d.table_dictize(result, context)
if result_dict.get('title'):
result_dict['title'] = result_dict['title'].strip()
result_dict['display_name'] = result_dict['title'] or result_dict['name'] or result_dict['id']
# extras
if is_latest_revision:
extra = model.package_extra_table
else:
extra = model.extra_revision_table
q = select([extra]).where(extra.c.package_id == pkg.id)
result = execute(q, extra, context)
result_dict['extras'] = ckan_model_dictize.extras_list_dictize(result, context)
return result_dict | 31,139 |
def is_data_by_filename(fname):
"""
TODO
this is super adhoc. FIXME
"""
return "Run201" in fname | 31,140 |
async def 서버상태(ctx):
"""Show server's status
It shows
Server's CPU Usage
Server's RAM Usage
"""
embed = discord.Embed(title="현재 서버 상태")
cpu = str(psutil.cpu_percent())
ram = str(psutil.virtual_memory())
print(cpu + "\n" + ram)
embed.add_field(name="CPU Usage: ", value=cpu, inline=False)
embed.add_field(name="RAM Usage: ", value=ram, inline=False)
await ctx.message.delete()
await ctx.send(embed=embed) | 31,141 |
def test_add_metrics_distributed() -> None:
"""Test add_metrics_distributed."""
# Prepare
history = History()
# Execute
history.add_metrics_distributed(rnd=0, metrics={"acc": 0.9})
# Assert
assert len(history.losses_distributed) == 0
assert len(history.losses_centralized) == 0
assert len(history.metrics_distributed) == 1
assert len(history.metrics_centralized) == 0
assert (0, 0.9) == history.metrics_distributed["acc"][0] | 31,142 |
def colorbar_set_label_parallel(cbar,label_list,hpos=1.2,vpos=-0.3,
ha='left',va='center',
force_position=None,
**kwargs):
"""
This is to set colorbar label besie the colorbar.
Parameters:
-----------
cbar: the colorbar used to set.
hpos: the left position of labels, used in vertical colorbar.
vpos: the below position of labels, used in horizontal colorbar.
force_position:
1. In case of a tuple, should be the fraction of the first small one
and the number of remaining equal-length sections. Eg., (0.3,12)
2. In case of a np.ndarray or list with values in the unit of axes
fraction, will be directly used to position the texts.
Example:
--------
/homel/ychao/python/script/set_label_parallel_colorbar.py
"""
def get_yloc(first,num):
"""
first is the fraction of the first small downward arrow; num is the
number of remaining equal-length sections on the colorbar.
"""
first_pos = first/2.
second_pos = np.arange(first + 0.5,num,1)
all_pos = np.array([first_pos] + list(second_pos))
return all_pos/(first+num)
cbar.set_ticklabels([])
cbar.ax.tick_params(right='off',left='off')
#get the text position.
yloc=(cbar.values-cbar.boundaries[0])/(cbar.boundaries[-1]-cbar.boundaries[0])
if force_position is not None:
if isinstance(force_position,(tuple)) and len(force_position) == 2:
yloc = get_yloc(*force_position)
elif isinstance(force_position,(np.ndarray,list)):
yloc = force_position
else:
raise ValueError("Cannot understand force_position")
if len(label_list) != len(yloc):
raise ValueError("the lenght of cbar segments and label list are not equal!")
else:
if cbar.orientation == 'vertical':
for label,ypos in zip(label_list,yloc):
cbar.ax.text(hpos,ypos,label,ha=ha,va=va,**kwargs)
elif cbar.orientation == 'horizontal':
for label,ypos in zip(label_list,yloc):
cbar.ax.text(ypos,vpos,label,ha=ha,va=va,**kwargs) | 31,143 |
def read_csv(file, tz):
"""
Reads the file into a pandas dataframe, cleans data and rename columns
:param file: file to be read
:param tz: timezone
:return: pandas dataframe
"""
ctc_columns = {1: 'unknown_1',
2: 'Tank upper', # temperature [deg C]
3: 'unknown_3',
4: 'Tank lower', # temperature [deg C]
5: 'unknown_5',
6: 'unknown_6',
7: 'Primary flow 1', # temperature [deg C]
8: 'Return flow', # temperature [deg C]
9: 'unknown_9',
10: 'Heater', # electric power [kW]
11: 'L1', # electric current [A]
12: 'L2', # electric current [A]
13: 'L3', # electric current [A]
14: 'unknown_14',
15: 'unknown_15',
16: 'unknown_16',
17: 'unknown_17',
18: 'unknown_18',
19: 'unknown_19',
20: 'unknown_20',
21: 'Charge pump', # speed [%]
22: 'unknown_22',
23: 'Heat pump flow', # temperature [deg C]
24: 'Heat pump return', # temperature [deg C]
25: 'unknown_25',
26: 'unknown_26',
27: 'unknown_27',
28: 'unknown_28',
29: 'unknown_29',
30: 'unknown_30',
31: 'unknown_31',
32: 'Compressor L1', # electric current [A]
33: 'Compressor' # on/off [-]
}
df = pd.read_csv(file, header=None, index_col=0, parse_dates=True, usecols=[i for i in range(34)])
df.index = df.index.tz_localize(tz, ambiguous='NaT')
df = df.loc[df.index.notnull()]
df = df.loc[~df.index.duplicated(keep='first')]
df.rename(columns=ctc_columns, inplace=True)
df['Compressor'] = np.where(df['Compressor'] == 'ON', 1, 0)
return df | 31,144 |
def get_tokenizer_from_saved_model(saved_model: SavedModel) -> SentencepieceTokenizer:
"""
Get tokenizer from tf SavedModel.
:param SavedModel saved_model: tf SavedModel.
:return: tokenizer.
:rtype: SentencepieceTokenizer
"""
# extract functions that contain SentencePiece somewhere in there
functions_with_sp = [
f
for f in saved_model.meta_graphs[0].graph_def.library.function
if "sentencepiecetokenizeop" in str(f).lower()
]
assert len(functions_with_sp) == 1
# find SentencePieceOp (contains the model) in the found function
nodes_with_sp = [
n for n in functions_with_sp[0].node_def if n.op == "SentencepieceOp"
]
assert len(nodes_with_sp) == 1
# we can pretty much save the model into a file since it does not change
model = nodes_with_sp[0].attr["model"].s
# instantiate the model
tokenizer = SentencepieceTokenizer(model)
return tokenizer | 31,145 |
def heapq_merge(*iters, **kwargs):
"""Drop-in replacement for heapq.merge with key support"""
if kwargs.get('key') is None:
return heapq.merge(*iters)
def wrap(x, key=kwargs.get('key')):
return key(x), x
def unwrap(x):
_, value = x
return value
iters = tuple((wrap(x) for x in it) for it in iters)
return (unwrap(x) for x in heapq.merge(*iters)) | 31,146 |
def parse_args():
""" parse CLI arguments
Parameters
----------
args : :class: `list`
A list of arguments; generally, this should be ``sys.argv``.
Resturns
----------
:class: `argpase.Namespace`
An object returned by ``argparse.parse_args``.
"""
parser = argparse.ArgumentParser(description='RPA to create language cards on Anki', prog='clac', usage='%(prog)s <word-or-list> [-y|--yes-rpa] [-r|--auto-remove]', epilog='Do you want to help? Collabore on my project! :)')
parser.add_argument('word_or_list', help='A single word or a path to a line-separated .txt file in which each line is a single word. CLAC will first assume that the argument is a path. If it fails, the argument will be treated as a single word.')
parser.add_argument('-y', '--yes-rpa', help='Optional argument to run the RPA as soon as it saves the word.', action='store_true') # optional argument
# parser.add_argument('-r', '--auto-remove', help='Optional argument to remove the folders used to save the data.', action='store_true') # TODO
return parser.parse_args() | 31,147 |
def parse_archive_links(html):
"""Parse the HTML of an archive links page."""
parser = _ArchiveLinkHTMLParser()
parser.feed(html)
return parser.archive_links | 31,148 |
def processing_requests():
"""
Handles the request for what is in processing.
:return: JSON
"""
global processing
global processing_mutex
rc = []
response.content_type = "application/json"
with processing_mutex:
if processing:
rc.append(processing)
return json.dumps(rc) | 31,149 |
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.IRModule]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.IRModule]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, IRModule):
a, b = b, a
return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)} | 31,150 |
def get_urls_from_loaded_sitemapindex(sitemapindex):
"""Get all the webpage urls in a retrieved sitemap index XML"""
urls = set()
# for loc_elem in sitemapindex_elem.findall('/sitemap/loc'):
for loc_elem in sitemapindex.findall('//{http://www.sitemaps.org/schemas/sitemap/0.9}loc'):
urls.update(get_urls_from_sitemap(loc_elem.text))
for loc_elem in sitemapindex.findall('//loc'):
urls.update(get_urls_from_sitemap(loc_elem.text))
return urls | 31,151 |
def genLinesegsnp(verts, colors = [], thickness = 2.0):
"""
gen objmnp
:param objpath:
:return:
"""
segs = LineSegs()
segs.setThickness(thickness)
if len(colors) == 0:
segs.setColor(Vec4(.2, .2, .2, 1))
else:
segs.setColor(colors[0], colors[1], colors[2], colors[3])
for i in range(len(verts)-1):
segs.moveTo(verts[i][0], verts[i][1], verts[i][2])
segs.drawTo(verts[i+1][0], verts[i+1][1], verts[i+1][2])
objmnp = NodePath('linesegs')
objmnp.attachNewNode(segs.create())
objmnp.setTransparency(TransparencyAttrib.MAlpha)
return objmnp | 31,152 |
def ncores_traditional_recommendation_process(user_model_df, user_model_genres_distr_df, user_expected_items_df,
items_mapping_dict, user_blocked_items_df, recommender_label,
popularity_df, transaction_mean, control_count=None, start_time=None):
"""
A user by time
Responsible for: the recommender algorithm prediction,
the models to be used in the pos process
and the pos processing
:param start_time:
:param control_count:
:param user_blocked_items_df:
:param transaction_mean: the users transactions mean
:param popularity_df: DataFrame with items popularity
:param user_model_df: All user transactions
:param user_model_genres_distr_df: The user genres distribution
:param user_expected_items_df: The user expected items in the final recommendation
:param items_mapping_dict: A dict with all items in the system
:param recommender_label: The recommender algorithm label
:return: None
"""
# Get known items ids by the user
items_ids = items_mapping_dict.keys()
know_items_ids = user_model_df[ITEM_LABEL].unique().tolist()
blocked_items_ids = user_blocked_items_df[ITEM_LABEL].unique().tolist()
items_ids = set(items_ids) - set(blocked_items_ids)
# Get unknown items ids by the user
unknowing_items_ids = list(set(items_ids) - set(know_items_ids))
user_candidate_items_max_df = popularity_df[popularity_df[ITEM_LABEL].isin(unknowing_items_ids)]
user_candidate_items_max_df.sort_values(by=[TRANSACTION_VALUE_LABEL], ascending=False)
user_candidate_items_max_df = user_candidate_items_max_df[:CANDIDATES_LIST_SIZE]
user_candidate_items_max_dict = user_transactions_df_to_item_mapping(user_candidate_items_max_df,
items_mapping_dict)
user_evaluation_results_df = pos_processing_calibration(user_model_genres_distr_df=user_model_genres_distr_df,
candidates_items_mapping=user_candidate_items_max_dict,
user_expected_items_ids=user_expected_items_df[
ITEM_LABEL].tolist(),
recommender_label=recommender_label,
transaction_mean=transaction_mean)
if control_count is not None and control_count % 100 == 0:
logger.info(' '.join(['PId:', str(os.getpid()), '->', 'Total of users done:', str(control_count),
'->', 'Total time:', str(datetime.timedelta(seconds=time.time() - start_time))]))
return user_evaluation_results_df | 31,153 |
def enhance_puncta(img, level=7):
"""
Removing low frequency wavelet signals to enhance puncta.
Dependent on image size, try level 6~8.
"""
if level == 0:
return img
wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='sym')
back = resize(np.array(wp['d'*level].data), img.shape, order=3, mode='reflect')/(2**level)
cimg = img - back
cimg[cimg < 0] = 0
return cimg | 31,154 |
def thumbnail(img, size = (1000,1000)):
"""Converts Pillow images to a different size without modifying the original image
"""
img_thumbnail = img.copy()
img_thumbnail.thumbnail(size)
return img_thumbnail | 31,155 |
def test_cray_ims_deleted_base(cli_runner, rest_mock):
""" Test cray ims base command """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['ims', 'deleted'])
assert result.exit_code == 0
outputs = [
"public-keys",
"recipes",
"images",
]
compare_output(outputs, result.output) | 31,156 |
def _MapAnomaliesToMergeIntoBug(dest_issue, source_issue):
"""Maps anomalies from source bug to destination bug.
Args:
dest_issue: an IssueInfo with both the project and issue id.
source_issue: an IssueInfo with both the project and issue id.
"""
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
bug_id=int(source_issue.issue_id),
project_id=(source_issue.project or 'chromium'),
).get_result()
bug_id = int(dest_issue.issue_id)
for a in anomalies:
a.bug_id = bug_id
a.project_id = (dest_issue.project or 'chromium')
ndb.put_multi(anomalies) | 31,157 |
def calculate_new_ratings(P1, P2, winner, type):
"""
calculate and return the new rating/rating_deviation for both songs
Args:
P1 (tuple or float): rating data for song 1
P2 (tuple or float): rating data for song 2
winner (str): left or right
type (str): elo or glicko
Returns:
tuple: newly calculated ratings, rating_deviations
"""
s1, s2 = None, None
if winner == 'left':
s1, s2 = 1, 0
elif winner == 'right':
s1, s2 = 0, 1
if type == 'elo':
return calculate_elo(P1, P2, s1), calculate_elo(P2, P1, s2)
elif type == 'glicko':
return calculate_glicko_rating(P1, P2, s1), calculate_glicko_rating(P2, P1, s2) | 31,158 |
def age(a):
"""age in yr - age(scale factor)"""
return _cosmocalc.age(a) | 31,159 |
def cli_parser() -> argparse.ArgumentParser:
"""Create parser with set arguments."""
parser = argparse.ArgumentParser(
# Also possible to add prog title to output,
# if ommitted the filename is used (e.g. cli-simple.py)
prog="CLI-COPERNICUS-DOWNLOAD",
description="A simple example app to aid download of data from Copernicus",
epilog=" --- ",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Arguments in argparse can be optional, positional or required
# Add named arguments (that is required for the tool to run)
# Set the argument type and limit choices from a list
parser.add_argument(
"--portal", type=str, help="Data source portal", required=True, choices=["COP"]
)
parser.add_argument(
"--dataset",
type=str,
help="Dataset to be downloaded",
required=True,
choices=["E-OBS", "OTHER"],
)
parser.add_argument(
"--dryrun",
type=str,
help="Dry run of dataset to be downloaded, "
"test only, no data will be downloaded",
required=True,
choices=["True", "False"],
)
# Returns a parser object
return parser | 31,160 |
def test_get_policies_for_resource_for_non_existing_index():
""" Testing get_policies_for_resource
for non existing index 'not_index'.
"""
cluster = False
indices = ['not_index']
resource_policies = get_policies_for_resource(
cluster, indices, SAMPLE_POLICIES
)
expected_resource_policies = []
assert resource_policies == expected_resource_policies | 31,161 |
def run_tests():
# type: () -> None
"""Run all the tests."""
# my-py's typeshed does not have defaultTestLoader and TestLoader type information so suppresss
# my-py type information.
all_tests = unittest.defaultTestLoader.discover(start_dir="tests") # type: ignore
runner = XMLTestRunner(verbosity=2, failfast=False, output='results')
result = runner.run(all_tests)
sys.exit(not result.wasSuccessful()) | 31,162 |
def _replace_ext_area_by_impedances_and_shunts(
net_eq, bus_lookups, impedance_params, shunt_params, net_internal,
return_internal, show_computing_time=False, calc_volt_angles=True, imp_threshold=1e-8):
"""
This function implements the parameters of the equivalent shunts and equivalent impedance
"""
# --- drop all external elements
eg_buses_pd = bus_lookups["bus_lookup_pd"]["e_area_buses"] + \
bus_lookups["bus_lookup_pd"]["g_area_buses"]
pp.drop_buses(net_eq, eg_buses_pd)
try:
_runpp_except_voltage_angles(net_eq, calculate_voltage_angles=calc_volt_angles,
tolerance_mva=1e-6, max_iteration=100)
except:
logger.error("The power flow did not converge.")
# --- drop all branch elements except switches between boundary buses
drop_internal_branch_elements(net_eq, bus_lookups["boundary_buses_inclusive_bswitch"])
# --- drop shunt elements attached to boundary buses
traget_shunt_idx = net_eq.shunt.index[net_eq.shunt.bus.isin(bus_lookups[
"boundary_buses_inclusive_bswitch"])]
net_eq.shunt.drop(traget_shunt_idx, inplace=True)
# --- create impedance
not_very_low_imp = (impedance_params.rft_pu.abs() > imp_threshold) | (
impedance_params.xft_pu.abs() > imp_threshold) | (
impedance_params.rtf_pu.abs() > imp_threshold) | (
impedance_params.xtf_pu.abs() > imp_threshold) | (
impedance_params.from_bus.isin(set(net_eq.gen.bus)|set(net_eq.ext_grid.bus)) &
impedance_params.to_bus.isin(set(net_eq.gen.bus)|set(net_eq.ext_grid.bus)))
new_imps = impedance_params[["from_bus", "to_bus", "rft_pu", "xft_pu", "rtf_pu",
"xtf_pu"]].loc[not_very_low_imp]
max_idx = net_eq.impedance.index.max() if net_eq.impedance.shape[0] else 0
new_imps.index = range(max_idx+1, max_idx+1+sum(not_very_low_imp))
new_imps["name"] = "eq_impedance"
new_imps["sn_mva"] = net_eq.sn_mva
new_imps["in_service"] = True
net_eq["impedance"] = pd.concat([net_eq["impedance"], new_imps])
# --- create switches instead of very low impedances
new_sws = impedance_params[["from_bus", "to_bus"]].loc[~not_very_low_imp].astype(int)
new_sws.rename(columns={"from_bus": "bus", "to_bus": "element"}, inplace=True)
max_idx = net_eq.switch.index.max() if net_eq.switch.shape[0] else 0
new_sws.index = range(max_idx+1, max_idx+1+sum(~not_very_low_imp))
new_sws["et"] = "b"
new_sws["name"] = "eq_switch"
new_sws["closed"] = True
new_sws["z_ohm"] = 0
net_eq["switch"] = pd.concat([net_eq["switch"], new_sws])
# If some buses are connected through switches, their shunts are connected in parallel
# to same bus. The shunt parameters needs to be adapted. TODO
if not not_very_low_imp.all():
fb = impedance_params.from_bus[~not_very_low_imp].values.tolist()
tb = impedance_params.to_bus[~not_very_low_imp].values.tolist()
# fb_values = shunt_params.parameter[shunt_params.bus_pd.isin(fb)].values
# tb_values = shunt_params.parameter[shunt_params.bus_pd.isin(tb)].values
# adapted_params = fb_values * tb_values / (tb_values + fb_values)
# shunt_params.parameter[shunt_params.bus_pd.isin(tb)] = adapted_params
shunt_params.drop(shunt_params.index[shunt_params.bus_pd.isin(fb)], inplace=True)
shunt_params.drop(shunt_params.index[shunt_params.bus_pd.isin(tb)], inplace=True)
# --- create shunts
max_idx = net_eq.shunt.index.max() if net_eq.shunt.shape[0] else 0
shunt_buses = shunt_params.bus_pd.values.astype(int)
new_shunts = pd.DataFrame({"bus": shunt_buses,
"q_mvar": -shunt_params.parameter.values.imag * net_eq.sn_mva,
"p_mw": shunt_params.parameter.values.real * net_eq.sn_mva
}, index=range(max_idx+1, max_idx+1+shunt_params.shape[0]))
new_shunts["name"] = "eq_shunt"
new_shunts["vn_kv"] = net_eq.bus.vn_kv.loc[new_shunts.bus.values].values
new_shunts["step"] = 1
new_shunts["max_step"] = 1
new_shunts["in_service"] = True
net_eq["shunt"] = pd.concat([net_eq["shunt"], new_shunts])
_runpp_except_voltage_angles(net_eq, calculate_voltage_angles=calc_volt_angles,
tolerance_mva=1e-6, max_iteration=100) | 31,163 |
def _read_array(raster, band, bounds):
""" Read array from raster
"""
if bounds is None:
return raster._gdal_dataset.ReadAsArray()
else:
x_min, y_min, x_max, y_max = bounds
forward_transform = affine.Affine.from_gdal(*raster.geo_transform)
reverse_transform = ~forward_transform
px_min, py_max = reverse_transform * (x_min, y_min)
px_max, py_min = reverse_transform * (x_max, y_max)
x_size = int(px_max - px_min) + 1
y_size = int(py_max - py_min) + 1
if band is not None:
return raster._gdal_dataset.GetRasterBand(band).ReadAsArray(int(px_min),
int(py_min),
x_size,
y_size)
else:
return raster._gdal_dataset.ReadAsArray(int(px_min),
int(py_min),
x_size,
y_size) | 31,164 |
def makePlayerInfo(pl_name):
""" Recupere toutes les infos d'un player
:param arg1: nom du joueur
:type arg1: chaine de caracteres
:return: infos du player : budget, profit & ventes (depuis le debut de la partie), boissons a vendre ce jour
:rtype: Json
"""
info = calculeMoneyInfo(pl_name, 0)
drinkInfo = makeDrinkOffered(pl_name)
return ({ "cash" : info['cash'], "profit" : info['profit'], "sales" : info['sales'], "drinksOffered" : drinkInfo }) | 31,165 |
def _FinalizeHeaders(found_fields, headers, flags):
"""Helper to organize the final headers that show in the report.
The fields discovered in the user objects are kept separate from those
created in the flattening process in order to allow checking the found
fields against a list of those expected. Unexpected fields are identified.
If the report is a subset of all fields, the headers are trimmed.
Args:
found_fields: A set of the fields found in all the user objects.
headers: A set of the fields created in the flattening helpers.
Will return with the complete set of fields to be printed.
flags: Argparse flags object with csv_fields.
Returns:
Sorted list of headers.
"""
# Track known fields to notify user if/when fields change. A few are known
# but not printed (they are denormalized and replaced below):
expected_fields = set(_UserDictionaryParser.GetExpectedUserFields())
if found_fields > expected_fields:
unexpected_fields = ', '.join(found_fields - expected_fields)
log_utils.LogWarning(
'Unexpected user fields noticed: %s.' % unexpected_fields)
headers |= found_fields
headers -= set(['emails', 'name', 'nonEditableAliases'])
# Prune the headers reference object that is used outside this
# function by using discard() if a subset of fields is desired.
if flags.csv_fields:
extra_csv_fields = set(flags.csv_fields) - headers
if extra_csv_fields:
print '** Ignoring unknown csv_fields: %s.' % ', '.join(
sorted(extra_csv_fields))
for field in list(headers):
if field not in flags.csv_fields:
headers.discard(field)
return sorted(headers) | 31,166 |
def create_zip(archive, compression, cmd, verbosity, interactive, filenames):
"""Create a ZIP archive with the zipfile Python module."""
try:
with zipfile.ZipFile(archive, 'w') as zfile:
for filename in filenames:
if os.path.isdir(filename):
write_directory(zfile, filename)
else:
zfile.write(filename)
except Exception as err:
msg = "error creating %s: %s" % (archive, err)
raise util.PatoolError(msg)
return None | 31,167 |
def count_documents(project, commit_interval):
"""Calculate counts for documents.
Arguments:
project (Project): The ``Project`` to run counts for.
commit_interval (int): This method will commit the counts every this
many times.
"""
count = 0
logger = logging.getLogger(__name__)
project_logger = ProjectLogger(logger, project)
documents = project.get_documents()
project_logger.info("Calculating counts for documents")
for document in documents:
document.sentence_count = len(document.all_sentences)
document.save(False)
count += 1
if count % commit_interval == 0:
db.session.commit()
# project_logger.info("Calculating count for document %s/%s", count,
# len(documents))
db.session.commit()
project_logger.info('Counted %s documents.', len(documents)) | 31,168 |
def test_create(random, tmpdir):
"""Test if new virtual environments can be created."""
path = str(tmpdir.join(random))
venv = api.VirtualEnvironment(path)
try:
venv.create()
except subprocess.CalledProcessError as exc:
assert False, exc.output
assert tmpdir.join(random).check() | 31,169 |
def get_outmost_points(contours):
"""Get the bounding rectangle of all the contours"""
all_points = np.concatenate(contours)
return get_bounding_rect(all_points) | 31,170 |
def dhcp_release_packet(eth_dst='ff:ff:ff:ff:ff:ff',
eth_src='00:01:02:03:04:05',
ip_src='0.0.0.0',
ip_dst='255.255.255.255',
src_port=68,
dst_port=67,
bootp_chaddr='00:01:02:03:04:05',
bootp_ciaddr='1.2.3.4',
dhcp_server_ip='1.2.3.4'):
"""
Return a dhcp release packet
Supports a few parameters:
@param eth_dst Destination MAC, should be broadcast address
@param eth_src Source MAC, should be address of client
@param ip_src Source IP, should be default route IP address
@param ip_dst Destination IP, broadcast IP address
@param src_port Source Port, 68 for DHCP client
@param dst_port Destination Port, 67 for DHCP Server
@param bootp_chaddr MAC Address of client
@param bootp_ciaddr Client IP Address
@param dhcp_server_ip IP address of DHCP server
"""
pkt = scapy.Ether(dst=eth_dst, src=eth_src)/ \
scapy.IP(src=ip_src, dst=ip_dst)/ \
scapy.UDP(sport=src_port, dport=dst_port)/ \
scapy.BOOTP(chaddr=bootp_chaddr, ciaddr=bootp_ciaddr)/ \
scapy.DHCP(options=[('message-type', 'release'), ('server_id', dhcp_server_ip), ('end')])
return pkt | 31,171 |
def get_unique_dir(log_dir='', max_num=100, keep_original=False):
"""Get a unique dir name based on log_dir.
If keep_original is True, it checks the list
{log_dir, log_dir-0, log_dir-1, ..., log_dir-[max_num-1]}
and returns the first non-existing dir name. If keep_original is False
then log_dir is excluded from the list.
"""
if keep_original and not os.path.exists(log_dir):
if log_dir == '':
raise ValueError('log_dir cannot be empty with keep_original=True.')
return log_dir
else:
for i in range(max_num):
_dir = '{}-{}'.format(log_dir, i)
if not os.path.exists(_dir):
return _dir
raise ValueError('Too many dirs starting with {}.'.format(log_dir)) | 31,172 |
def dcfc_30_e_plus_360(start: Date, asof: Date, end: Date, freq: Optional[Decimal] = None) -> Decimal:
"""
Computes the day count fraction for the "30E+/360" convention.
:param start: The start date of the period.
:param asof: The date which the day count fraction to be calculated as of.
:param end: The end date of the period (a.k.a. termination date).
:return: Day count fraction.
>>> ex1_start, ex1_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 28)
>>> ex2_start, ex2_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 29)
>>> ex3_start, ex3_asof = datetime.date(2007, 10, 31), datetime.date(2008, 11, 30)
>>> ex4_start, ex4_asof = datetime.date(2008, 2, 1), datetime.date(2009, 5, 31)
>>> round(dcfc_30_e_plus_360(start=ex1_start, asof=ex1_asof, end=ex1_asof), 14)
Decimal('0.16666666666667')
>>> round(dcfc_30_e_plus_360(start=ex2_start, asof=ex2_asof, end=ex2_asof), 14)
Decimal('0.16944444444444')
>>> round(dcfc_30_e_plus_360(start=ex3_start, asof=ex3_asof, end=ex3_asof), 14)
Decimal('1.08333333333333')
>>> round(dcfc_30_e_plus_360(start=ex4_start, asof=ex4_asof, end=ex4_asof), 14)
Decimal('1.33333333333333')
"""
## Get the new start date, if required:
if start.day == 31:
start = datetime.date(start.year, start.month, 30)
## Get the new asof date, if required:
if asof.day == 31:
asof = asof + datetime.timedelta(days=1)
## Compute number of days:
nod = (asof.day - start.day) + 30 * (asof.month - start.month) + 360 * (asof.year - start.year)
## Done, compute and return the day count fraction:
return nod / Decimal(360) | 31,173 |
def generate_meta_config(output_file='../data/meta_config.json'):
"""
Generate a meta configuration JSON file for the data generation pipeline.
This spits out a valid template of a meta configuration file as the one
required by the `WaveformConfigGenerator`. Just run this once to have a
valid template and then you can go ahead an tweak the individual numbers in
that template. All the defaults are considered sane choices.
"""
print("Generate meta config file {}...".format(output_file), end=' ')
n_samples = 1024
# If the duration is < 0.0 the whole waveform will be used.
# All waveforms will be padded to the right with zeros to the length of the
# longest generated waveform.
# Then the injection times are overriden(!) in the config file coming with
# the output h5 file with the fraction where the maximum of the waveform
# lies within the whole sample.
duration = -1.
sample_rate = 4096
# Default parameters passed to simulator in case a parameter is not sampled
# Possible approximants: ['TaylorEt', 'SEOBNRv3_opt', 'IMRPhenomA',
# 'IMRPhenomC', 'IMRPhenomB', 'EOBNRv2', 'SEOBNRv4_opt', 'PhenSpinTaylor',
# 'PhenSpinTaylorRD', 'NR_hdf5', 'SEOBNRv3_pert', 'EOBNRv2HM',
# 'SpinTaylorT4', 'TaylorT1', 'TaylorT3', 'TaylorT2', 'HGimri', 'TaylorT4',
# 'IMRPhenomD', 'IMRPhenomPv2', 'SEOBNRv1', 'SpinDominatedWf', 'SEOBNRv3',
# 'SEOBNRv2', 'SpinTaylorT1', 'SEOBNRv4', 'SpinTaylorT2', 'EccentricTD',
# 'SEOBNRv2_opt', 'SEOBNRv3_opt_rk4']
default_parameters = {
'mass1': 1.0,
'mass2': 1.0,
'spin1z': 0.0,
'spin2z': 0.0,
'lambda1': 0.0,
'lambda2': 0.0,
'distance': 1000.0,
'coa_phase': 0.0,
'inclination': 0.0,
'delta_t': 1.0 / sample_rate,
'f_lower': 15.0,
'approximant': 'SEOBNRv4',
'injection_time': 0.95
# id does not have a default because it must be unique
}
# Specify the parameters that should be varied and the ranges (uniform)
# For the chosen range consult
# https://www.lsc-group.phys.uwm.edu/ligovirgo/cbcnote/O2/OfflineTuningVerificationInjections
update_list = {
'masses': [2., 50.],
# 'injection_time': [0.5, 0.9],
'distance': [400., 800.]
}
# Collect everything in a big meta config dictionary
meta_config = {
'n_samples': n_samples,
'duration': duration,
'sample_rate': sample_rate,
'default_parameters': default_parameters,
'update_list': update_list,
}
with open(output_file, 'w') as f:
json.dump(meta_config, f, sort_keys=True, indent=2)
print("DONE -- Saved as {}".format(output_file)) | 31,174 |
def get_current_func_info_by_traceback(self=None, logger=None) -> None:
"""
通过traceback获取函数执行信息并打印
use eg:
class A:
def a(self):
def cc():
def dd():
get_current_func_info_by_traceback(self=self)
dd()
cc()
def b():
get_current_func_info_by_traceback()
aa = A()
aa.a()
b()
# -> A.a.cc.dd in line_num: 131 invoked
# -> <module>.b in line_num: 136 invoked
:param self: 类的self
:param logger:
:return:
"""
try:
extract_stack_info = extract_stack()
# pprint(extract_stack_info)
# 除类名外的函数名调用组合str
detail_func_invoked_info = ''
for item in extract_stack_info[1:-1]:
# extract_stack_info[1:-1]不包含get_current_func_info_by_traceback
tmp_str = '{}' if detail_func_invoked_info == '' else '.{}'
detail_func_invoked_info += tmp_str.format(item[2])
# print(detail_func_invoked_info)
# func_name = extract_stack_info[-2][2],
line_num = extract_stack_info[-2][1]
_print(msg='-> {}.{} in line_num: {} invoked'.format(
# class name
extract_stack_info[0][2] if self is None else self.__class__.__name__,
detail_func_invoked_info,
line_num,),
logger=logger,
log_level=1,)
except Exception as e:
_print(msg='遇到错误:', logger=logger, exception=e, log_level=2)
return None | 31,175 |
def augment_timeseries_shift(x: tf.Tensor, max_shift: int = 10) -> tf.Tensor:
"""Randomly shift the time series.
Parameters
----------
x : tf.Tensor (T, ...)
The tensor to be augmented.
max_shift : int
The maximum shift to be randomly applied to the tensor.
Returns
-------
x : tf.Tensor
The augmented tensor.
"""
# shift the data by removing a random number of later time points
dt = tf.random.uniform(shape=[], minval=0, maxval=max_shift, dtype=tf.int32)
return x[:-dt, ...] | 31,176 |
def main(argv):
"""
Main function.
"""
imageRawData = pre.imageprepare(argv)
imagevalue = prefictint(imageRawData)
print("识别结果是:", imagevalue) | 31,177 |
def test_sae_pk_sec_2(dev, apdev):
"""SAE-PK with Sec 2"""
check_sae_pk_capab(dev[0])
dev[0].set("sae_groups", "")
ssid = "SAE-PK test"
pw = "dwxm-zv66-p5ue"
m = "431ff8322f93b9dc50ded9f3d14ace22"
pk = "MHcCAQEEIAJIGlfnteonDb7rQyP/SGQjwzrZAnfrXIm4280VWajYoAoGCCqGSM49AwEHoUQDQgAEeRkstKQV+FSAMqBayqFknn2nAQsdsh/MhdX6tiHOTAFin/sUMFRMyspPtIu7YvlKdsexhI0jPVhaYZn1jKWhZg=="
run_sae_pk(apdev[0], dev[0], ssid, pw, m, pk) | 31,178 |
def endpoint(fun):
"""Decorator to denote a method which returns some result to the user"""
if not hasattr(fun, '_zweb_post'):
fun._zweb_post = []
fun._zweb = _LEAF_METHOD
fun._zweb_sig = _compile_signature(fun, partial=False)
return fun | 31,179 |
def show(path, file, counter):
"""Shows file where pattern was find."""
if counter > 0:
print(os.path.join(path, file), ": Nombre d'occurences :", counter) | 31,180 |
def test_query_album_1():
"""Test query --album"""
import json
import os
import os.path
import osxphotos
from osxphotos.cli import query
runner = CliRunner()
cwd = os.getcwd()
result = runner.invoke(
query,
[
"--json",
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
"--album",
"Pumpkin Farm",
],
)
assert result.exit_code == 0
json_got = json.loads(result.output)
assert len(json_got) == 3 | 31,181 |
def focal_prob(attn, batch_size, queryL, sourceL):
"""
consider the confidence g(x) for each fragment as the sqrt
of their similarity probability to the query fragment
sigma_{j} (xi - xj)gj = sigma_{j} xi*gj - sigma_{j} xj*gj
attn: (batch, queryL, sourceL)
"""
# -> (batch, queryL, sourceL, 1)
xi = attn.unsqueeze(-1).contiguous()
# -> (batch, queryL, 1, sourceL)
xj = attn.unsqueeze(2).contiguous()
# -> (batch, queryL, 1, sourceL)
xj_confi = torch.sqrt(xj)
xi = xi.view(batch_size*queryL, sourceL, 1)
xj = xj.view(batch_size*queryL, 1, sourceL)
xj_confi = xj_confi.view(batch_size*queryL, 1, sourceL)
# -> (batch*queryL, sourceL, sourceL)
term1 = torch.bmm(xi, xj_confi)
term2 = xj * xj_confi
funcF = torch.sum(term1-term2, dim=-1) # -> (batch*queryL, sourceL)
funcF = funcF.view(batch_size, queryL, sourceL)
fattn = torch.where(funcF > 0, torch.ones_like(attn),
torch.zeros_like(attn))
return fattn | 31,182 |
def get_fibonacci_iterative(n: int) -> int:
"""
Calculate the fibonacci number at position 'n' in an iterative way
:param n: position number
:return: position n of Fibonacci series
"""
a = 0
b = 1
for i in range(n):
a, b = b, a + b
return a | 31,183 |
def get_sample_content(filename):
"""Return sample content form file."""
with open(
"tests/xml/{filename}".format(
filename=filename), encoding="utf-8") as file:
return file.read() | 31,184 |
def create_contrasts(task):
"""
Create a contrasts list
"""
contrasts = []
contrasts += [('Go', 'T', ['GO'], [1])]
contrasts += [('GoRT', 'T', ['GO_rt'], [1])]
contrasts += [('StopSuccess', 'T', ['STOP_SUCCESS'], [1])]
contrasts += [('StopUnsuccess', 'T', ['STOP_UNSUCCESS'], [1])]
contrasts += [('StopUnsuccessRT', 'T', ['STOP_UNSUCCESS_rt'], [1])]
contrasts += [('Go-StopSuccess', 'T', ['GO', 'STOP_SUCCESS'], [1, -1])]
contrasts += [('Go-StopUnsuccess', 'T', ['GO', 'STOP_UNSUCCESS'], [1, -1])]
contrasts += [('StopSuccess-StopUnsuccess', 'T',
['STOP_SUCCESS', 'STOP_UNSUCCESS'], [1, -1])]
# add negative
repl_w_neg = []
for con in contrasts:
if '-' not in con[0]:
newname = 'neg_%s' % con[0]
else:
newname = "-".join(con[0].split("-")[::-1])
new = (newname, 'T', con[2], [-x for x in con[3]])
repl_w_neg.append(con)
repl_w_neg.append(new)
return repl_w_neg | 31,185 |
def update_anomaly_pred_folder(pred_folder, save_path, brain_mask_folder, brain_as_nifti, data_path, print_progress=False, rot=True):
"""
Adjust Anomaly segmentation predictions on a folder organised as follow : pred folder contains sub-folder for each volume
prediction as slices in XX_anomalies.bmp and anomaly map as XX_map_anomalies.png. The sub-folders are named by the
volume id, similar to the dataset and the brain masks.
----------
INPUT
|---- pred_folder (str) path to the folder containing th predictions to adjusts.
|---- save_path (str) path where to save the adjusted predictions.
|---- brain_mask_folder (str) where to find the brain masks for each volumes. Brain masks can be given as folder
| of .bmp slices or as a Nifti volume. The folder name or Nifti volume must be named as the volume id
| (0 padded to 3 digits).
|---- brain_as_nifti (bool) specified whether brain mask are passed as Nifti or folder of .bmp.
|---- data_path (str) path to the data where a 'ct_info.csv' gives ground_truth bmp filenames
|---- print_progress (bool) whether to print progress with a progress bar.
|---- rot (bool) whether to rotate the brain mask by 90° counterclockwise.
OUTPUT
|---- None
"""
# load data_info_csv
data_info = pd.read_csv(os.path.join(data_path, 'ct_info.csv'), index_col=0)
# get image size from config file
cfg = AttrDict.from_json_path(os.path.join(os.path.dirname(pred_folder.strip('/')), 'config.json'))
im_size = cfg.data.size
# iterate over each volumes predictions
out_info = []
for vol_folder in glob.glob(os.path.join(pred_folder, '*/')):
id = int(vol_folder.split('/')[-2])
# make volume output dir
os.makedirs(os.path.join(save_path, f'{id}'), exist_ok=True)
# get brain mask if nifti
if brain_as_nifti:
try:
brain_vol = nib.load(os.path.join(brain_mask_folder, f'{id:03}.nii')).get_fdata()
except FileNotFoundError:
brain_vol = nib.load(os.path.join(brain_mask_folder, f'{id:03}.nii.gz')).get_fdata()
# iterate over Slices
n_slice = brain_vol.shape[2]#len(glob.glob(os.path.join(vol_folder, '*.bmp')))
#assert n_slice == brain_vol.shape[2], f'The number of slice between the prediction {id} and the brain mask does not match. {n_slice} vs {brain_vol.shape[2]}.'
for slice in range(1, n_slice+1):
# load pred
if os.path.exists(os.path.join(vol_folder, f'{slice}_anomalies.bmp')): # check if pred is there
pred = io.imread(os.path.join(vol_folder, f'{slice}_anomalies.bmp'))
ad_map = io.imread(os.path.join(vol_folder, f'{slice}_map_anomalies.png'))
save_im = True
else:
pred, ad_map = np.zeros([im_size, im_size]), np.zeros([im_size, im_size])
save_im = False
# adjust brain mask size to match prediction
if brain_as_nifti:
brain_slice = skimage.transform.resize(brain_vol[:,:,slice-1], pred.shape, order=0)
else:
brain_slice = io.imread(os.path.join(brain_mask_folder, f'{id:03}/{slice}.bmp'))
brain_slice = skimage.transform.resize(brain_slice, pred.shape, order=0, preserve_range=True)
if rot:
brain_slice = np.rot90(brain_slice, axes=(0,1))
# keep only prediction inside the brain
new_pred = img_as_bool(pred) * img_as_bool(brain_slice)
new_ad_map = ad_map * img_as_bool(brain_slice)
# load the ground truth and adjust the size
target_fn = data_info.loc[(data_info.PatientNumber == id) & (data_info.SliceNumber == slice), 'mask_fn'].values[0]
if target_fn != 'None':
target = io.imread(os.path.join(data_path, target_fn))
target = skimage.transform.resize(target, new_pred.shape, order=0, preserve_range=True)
else:
target = np.zeros_like(new_pred)
target = img_as_bool(target)
# compute the confusion matrix
tn, fp, fn, tp = confusion_matrix(target.ravel(), new_pred.ravel(), labels=[0,1]).ravel()
# save new prediction
if save_im:
new_pred_fn = os.path.join(save_path, f'{id}', f'{slice}_anomalies.bmp')
io.imsave(new_pred_fn, img_as_ubyte(new_pred), check_contrast=False)
new_ad_map_fn = os.path.join(save_path, f'{id}', f'{slice}_map_anomalies.png')
io.imsave(new_ad_map_fn, img_as_ubyte(new_ad_map), check_contrast=False)
# append results
label = data_info.loc[(data_info.PatientNumber == id) & (data_info.SliceNumber == slice), 'Hemorrhage'].values[0]
out_info.append({'volID':id, 'slice':slice, 'label':label, 'TP':tp, 'TN':tn, 'FP':fp, 'FN':fn, 'pred_fn': f'{id}/{slice}_anomalies.bmp', 'map_fn': f'{id}/{slice}_map_anomalies.png'})
# print progress
if print_progress:
print_progessbar(slice-1, n_slice, Name=f'Volume {id:03} ; Slice', Size=40, erase=False)
# make df of results for folder
slice_df = pd.DataFrame(out_info)
volume_df = slice_df[['volID', 'label', 'TP', 'TN', 'FP', 'FN']].groupby('volID').agg({'label':'max', 'TP':'sum', 'TN':'sum', 'FP':'sum', 'FN':'sum'})
# compute Dice
slice_df['Dice'] = (2*slice_df.TP + 1) / (2*slice_df.TP + slice_df.FP + slice_df.FN + 1)
volume_df['Dice'] = (2*volume_df.TP + 1) / (2*volume_df.TP + volume_df.FP + volume_df.FN + 1)
# save df
slice_df.to_csv(os.path.join(save_path, 'slice_prediction_scores.csv'))
volume_df.to_csv(os.path.join(save_path, 'volume_prediction_scores.csv'))
# save outputs json
avg_dice_ICH = volume_df.loc[volume_df.label == 1, 'Dice'].mean(axis=0)
avg_dice = volume_df.Dice.mean(axis=0)
outputs = {'dice all':avg_dice, 'dice positive':avg_dice_ICH}
with open(os.path.join(os.path.dirname(save_path), "outputs.json"), 'w') as fn:
json.dump(outputs, fn)
with open(os.path.join(os.path.dirname(save_path), "config.json"), 'w') as fn:
json.dump(cfg, fn) | 31,186 |
def project(raster_path, boxes):
"""Project boxes into utm"""
with rasterio.open(raster_path) as dataset:
bounds = dataset.bounds
pixelSizeX, pixelSizeY = dataset.res
#subtract origin. Recall that numpy origin is top left! Not bottom left.
boxes["left"] = (boxes["xmin"] * pixelSizeX) + bounds.left
boxes["right"] = (boxes["xmax"] * pixelSizeX) + bounds.left
boxes["top"] = bounds.top - (boxes["ymin"] * pixelSizeY)
boxes["bottom"] = bounds.top - (boxes["ymax"] * pixelSizeY)
# combine column to a shapely Box() object, save shapefile
boxes['geometry'] = boxes.apply(
lambda x: shapely.geometry.box(x.left, x.top, x.right, x.bottom), axis=1)
boxes = geopandas.GeoDataFrame(boxes, geometry='geometry')
#set projection, (see dataset.crs) hard coded here
boxes.crs = {'init': "{}".format(dataset.crs)}
#Select columns
boxes = boxes[["left", "bottom", "right", "top", "score", "label", "geometry"]]
return boxes | 31,187 |
def mean_relative_error(preds: Tensor, target: Tensor) -> Tensor:
"""
Computes mean relative error
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with mean relative error
Example:
>>> from torchmetrics.functional import mean_relative_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_relative_error(x, y)
tensor(0.1250)
.. deprecated:: v0.4
Use :func:`torchmetrics.functional.mean_absolute_percentage_error`. Will be removed in v0.5.
"""
warn(
"Function `mean_relative_error` was deprecated v0.4 and will be removed in v0.5."
"Use `mean_absolute_percentage_error` instead.", DeprecationWarning
)
sum_rltv_error, n_obs = _mean_absolute_percentage_error_update(preds, target)
return _mean_absolute_percentage_error_compute(sum_rltv_error, n_obs) | 31,188 |
async def shutdown():
"""Cleanly prepare for, and then perform, shutdown of the bot.
This currently:
- expires all non-saveable reaction menus
- logs out of discord
- saves all savedata to file
"""
menus = list(bbGlobals.reactionMenusDB.values())
for menu in menus:
if not menu.saveable:
await menu.delete()
botLoggedIn = False
await bbGlobals.client.logout()
saveAllDBs()
print(datetime.now().strftime("%H:%M:%S: Data saved!")) | 31,189 |
def test_runs01():
"""runs"""
out = getoutput('{} -s 3 {}'.format(prg, const))
expected = open('test-outs/const.seed3.width70.len500').read()
assert out.rstrip() == expected.rstrip() | 31,190 |
def run():
""" Run the full script step-by-step"""
# Load data
df_sessions = read_sessions_data()
df_engagements = read_engagements_data()
print(df_sessions.head())
print(df_engagements.head())
# Transform data
df_engagements = filter_for_first_engagements(df_engagements)
df = merge_dataframes_on_user_id(df_sessions, df_engagements)
df = remove_sessions_after_first_engagement(df)
df = add_conversion_metric(df)
df = add_pageviews_cumsum(df)
df.to_csv('output\\df_transformed.csv')
# Fit model using logistic regression
logistic_regression_results = run_logistic_regression(df)
predict_probabilities(logistic_regression_results)
# Visualize results
visualize_results(df) | 31,191 |
def render_dendrogram(dend: Dict["str", Any], plot_width: int, plot_height: int) -> Figure:
"""
Render a missing dendrogram.
"""
# list of lists of dcoords and icoords from scipy.dendrogram
xs, ys, cols = dend["icoord"], dend["dcoord"], dend["ivl"]
# if the number of columns is greater than 20, make the plot wider
if len(cols) > 20:
plot_width = 28 * len(cols)
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
toolbar_location=None,
tools="",
)
# round the coordinates to integers, and plot the dendrogram
xs = [[round(coord) for coord in coords] for coords in xs]
ys = [[round(coord, 2) for coord in coords] for coords in ys]
fig.multi_line(xs=xs, ys=ys, line_color="#8073ac")
# extract the horizontal lines for the hover tooltip
h_lns_x = [coords[1:3] for coords in xs]
h_lns_y = [coords[1:3] for coords in ys]
null_mismatch_vals = [coord[0] for coord in h_lns_y]
source = ColumnDataSource(dict(x=h_lns_x, y=h_lns_y, n=null_mismatch_vals))
h_lns = fig.multi_line(xs="x", ys="y", source=source, line_color="#8073ac")
hover_pts = HoverTool(
renderers=[h_lns],
tooltips=[("Average distance", "@n{0.1f}")],
line_policy="interp",
)
fig.add_tools(hover_pts)
# shorten column labels if necessary, and override coordinates with column names
cols = [f"{col[:16]}..." if len(col) > 18 else col for col in cols]
axis_coords = list(range(5, 10 * len(cols) + 1, 10))
axis_overrides = dict(zip(axis_coords, cols))
fig.xaxis.ticker = axis_coords
fig.xaxis.major_label_overrides = axis_overrides
fig.xaxis.major_label_orientation = np.pi / 3
fig.yaxis.axis_label = "Average Distance Between Clusters"
fig.grid.visible = False
return fig | 31,192 |
def sort_by_fullname(data: List[dict]) -> List[dict]:
""" sort data by full name
:param data:
:return:
"""
logging.info("Sorting data by fullname...")
try:
data.sort(key=lambda info: info["FULL_NAME"], reverse=False)
except Exception as exception:
logging.exception(exception)
raise
logging.info("Sort data by fullname successfully!")
return data | 31,193 |
def cityscapes_txt(root, data_folder, split):
"""
:param root: str, root directory
:param data_folder: str, image(leftImg8bit) or label(gtFine_labelIds)
:param split: str, train, eval, test
:return: txt file of files paths
"""
im_dir: str = os.path.join(root, data_folder, split)
if not os.path.exists(os.path.join('datasets/source_dataset', 'image_list')):
os.makedirs(os.path.join('datasets/source_dataset', 'image_list'))
list_file = open(r"{}/image_list/{}_{}.txt".format('datasets/source_dataset', data_folder, split), "w+")
if data_folder == 'leftImg8bit' or data_folder == 'translation':
print('im here city')
for dirpath, dirnames, filenames in os.walk(im_dir):
for filename in filenames:
list_file.write(os.path.join(dirpath, filename)+'\n')
elif data_folder == 'gtFine_labelIds':
for dirpath, dirnames, filenames in os.walk(im_dir):
for filename in filenames:
if filename.endswith('gtFine_labelIds.png'):
list_file.write(os.path.join(dirpath, filename)+'\n')
list_file.close() | 31,194 |
def get_vrf_interface(device, vrf):
""" Gets the subinterfaces for vrf
Args:
device ('obj'): device to run on
vrf ('str'): vrf to search under
Returns:
interfaces('list'): List of interfaces under specified vrf
None
Raises:
None
"""
log.info("Getting the interfaces under vrf {vrf}".format(vrf=vrf))
try:
out = device.parse("show vrf {vrf}".format(vrf=vrf))
except SchemaEmptyParserError:
return None
if out and "vrf" in out and vrf in out["vrf"]:
return out["vrf"][vrf].get("interfaces", None) | 31,195 |
def rsptext(rsp,subcode1=0,subcode2=0,erri='',cmd='',subcmd1='',subcmd2=''):
""" Adabas response code to text conversion """
global rspplugins
if rsp in rspplugins:
plugin = rspplugins[rsp] # get the plugin function
return plugin(rsp, subcode1=subcode1, subcode2=subcode2,
cmd=cmd,subcmd1=subcmd1,subcmd2=subcmd2)
c1=chr(subcode1 & 0xff)
c2=chr( (subcode1 >> 8)& 0xff)
c3=chr(subcode2 & 0xff)
c4=chr( (subcode2 >> 8)& 0xff)
if subcode2 == 0:
if subcode1>>16:
c1=chr( (subcode1 >> 24)& 0xff)
c2=chr( (subcode1 >> 16)& 0xff)
if c1 > '\x80' and c2 > '\x80':
c1 = str2asc(c1)
c2 = str2asc(c2)
if c1>' ' and c2>' ': # ff = field name if both bytes > ' '
ff='"'+c1+c2+'"'
elif c3>' ' and c4>' ':
ff='"'+c3+c4+'"'
else:
ff=''
if subcode2==0 and subcode1==0:
ss=''
else:
ss=' sub=%d,%d X%04X,%04X %s' % (subcode1,subcode2,subcode1,subcode2,ff)
if erri:
ss+=' errinf=%08X %r' % (erri,erri)
if rsp in rspdict:
subx='' # subcode text
rspx = rspdict[rsp]
if type(rspx) == type( (1,)) : # tuple type ?
subdict = rspx[1] # subcode dictionary
rspx=rspx[0] # response code text
sx2 = subcode2 & 0xffff
sx1 = subcode1 & 0xffff
subx = ''
if sx2 and sx2 in subdict:
subx += ' - \n\tSubcode %d: %s' % (sx2,subdict[sx2])
elif sx1 and sx1 in subdict:
subx = ' - \n\tSubcode %d: %s' % (sx1,subdict[sx1])
elif rsp==132: # if LOB resp & subcode not listed
subx = ' - \n\t'+rspdict.get(subcode2,'No details for subcode')
return 'Adabas Response %d%s: %s%s' %\
(rsp, ss, rspx, subx)
else:
return 'Adabas Response %s: no explanation available' % rsp | 31,196 |
def new_event_notification(producer_url):
"""Pull data from producer when notified"""
enqueued_method = 'frappe.event_streaming.doctype.event_producer.event_producer.pull_from_node'
jobs = get_jobs()
if not jobs or enqueued_method not in jobs[frappe.local.site]:
frappe.enqueue(enqueued_method, queue='default', **{'event_producer': producer_url}) | 31,197 |
def to_dataframe(data: xr.DataArray, *args, **kwargs) -> pd.DataFrame:
"""
Replacement for `xr.DataArray.to_dataframe` that adds the attrs for the given
DataArray into the resultant DataFrame.
Parameters
----------
data : xr.DataArray
the data to convert to DataFrame
Returns
-------
pd.DataFrame
a pandas DataFrame containing the data in the given DataArray, including the
global attributes
"""
df = data.to_dataframe(*args, **kwargs)
for k, v in data.attrs.items():
df[k] = v
return df | 31,198 |
def setup_logging(
default_path='logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'
):
"""Setup logging configuration from a yaml file.
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level) | 31,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.