content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def trackPlot(mat, fig=None, groups=None, ratios=None, labels=None, cmap=None, norm=None, is2D=False, xticks=False):
"""
This function takes a matrix and generates a track figure with several panel according to a group structure that groups
several rows/cols of the matrix into one panel. This can be done for rows only or for columns and rows. So if the input
is a 10x10 matrix and we have a grouping of 2,4,3,1, then the final figure will have 4 panels, splitting the matrix into
the respective groups. When option is2D is true, the same grouping is also applied to the columns. There is obviously room
for extension ...
Input:
mat - data matrix containing the values
fig - figure object to place the panels into
groups - grouping vector (is all ones per default, a single panel per row)
ratios - the relative proportion each panel takes in the full plot (default to group values)
labels - row labels for the matrix (needs to have as many entries as there are rows in the matrix)
cmap - color map to apply to the single groups
is2D - apply grouping to both columns and rows (rows only is default)
xticks - set xticks
Output:
Returns a 2 tuple containing the figure object and an array with the axes objects corresponding to the single groups.
fig - figure
ax - axes
"""
if fig is None:
fig = plt.figure(figsize=(10, 10), dpi=200)
if groups is None:
groups = np.ones((mat.shape[0],), dtype='int')
if ratios is None:
ratios = groups
if labels is not None:
assert(labels.shape[0] == mat.shape[0])
if cmap is None:
cmap = np.array([plt.get_cmap('Blues')] * groups.shape[0], dtype='object')
else:
assert(cmap.shape[0] == groups.shape[0])
if norm is None:
norm = np.array([plt.Normalize(-1.0, 1.0)] * groups.shape[0], dtype='object')
else:
assert(norm.shape[0] == groups.shape[0])
if is2D:
gs = gridspec.GridSpec(groups.shape[0], groups.shape[0], height_ratios=ratios, hspace=0.05, width_ratios=ratios, wspace=0.05)
last_col = 0
axes = np.zeros((groups.shape[0], groups.shape[0]), dtype='object')
for col in range(groups.shape[0]):
last_row = 0
for row in range(groups.shape[0]):
axes[row, col] = fig.add_subplot(gs[row, col])
axes[row, col].imshow(mat[last_row:last_row+groups[row], :][:, last_col:last_col+groups[col]], aspect='auto', origin='upper', interpolation='nearest', cmap=cmap[row], norm=norm[row])
if xticks and row == 0:
axes[row, col].set_xticks(np.arange(groups[col]))
axes[row, col].xaxis.tick_top()
if labels is not None:
axes[row, col].set_xticklabels(labels[last_col:last_col+groups[col]], rotation=90)
else:
axes[row, col].set_xticks([])
if col == 0:
axes[row, col].set_yticks(np.arange(groups[row]))
if labels is not None:
axes[row, col].set_yticklabels(labels[last_row:last_row+groups[row]])
else:
axes[row, col].set_yticks([])
last_row += groups[row]
last_col += groups[col]
else:
axes = np.zeros((groups.shape[0], ), dtype='object')
gs = gridspec.GridSpec(groups.shape[0], 1, height_ratios=ratios, hspace=0.05)
last_row = 0
for row in range(groups.shape[0]):
axes[row] = fig.add_subplot(gs[row, 0])
# if density is not None and row in density:
# ax.fill_between(np.arange(mat.shape[1]),
# else:
axes[row].imshow(mat[last_row:last_row+groups[row], :], aspect='auto', origin='lower', interpolation='nearest', cmap=cmap[row], norm=norm[row])
axes[row].set_xticks([])
axes[row].set_yticks(np.arange(groups[row]))
if labels is not None:
axes[row].set_yticklabels(labels[last_row:last_row+groups[row]])
last_row += groups[row]
return (fig, axes) | 9b59507cb46d5ff4196e43311771357d5b2826a2 | 3,636,200 |
from typing import List
from typing import Optional
import sys
def concatenate(
*,
target_list: List[str],
is_colored: bool = False,
number_x: Optional[int] = None,
):
"""api to concatenate movie/picture (note: keyword-only argument)
Args:
target_list (List[str]): list of movies, pictures or directories where pictures are stored.
is_colored (bool, optional): flag to output in color. Defaults to False.
number_x (int, optional): number of targets concatenated in x direction. max is 5. if this variable is None, this will be selected using GUI window
Returns:
return (List[str], optional): list of processed pictures, directories where pictures are stored, and movies. if no process is executed, None is returned
"""
if not target_list:
sys.exit("no target is given!")
m_list, p_list, d_list = process.sort_target_type(target_list)
return_list: List[str] = []
if not m_list and not p_list and not d_list:
sys.exit("no movie, picture, directory is given!")
if m_list:
r = process.ConcatenatingMovie(target_list=m_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
if p_list:
r = process.ConcatenatingPicture(target_list=p_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
if d_list:
r = process.ConcatenatingPictureDirectory(target_list=d_list,
is_colored=is_colored,
number_x=number_x).execute()
if r is not None:
return_list.extend(r)
return return_list if return_list else None | 65b428de0f15374d0f5ca3b4f43b0eaed9185f80 | 3,636,201 |
def split_at(n, coll):
"""
Returns a tuple of ``(take(n, coll), drop(n coll))``.
"""
if n <= 0:
return [], coll
if coll is None:
return [], []
# Unfortunately we must consume all elements for the first case because
# unlike Clojure's lazy lists, Python's generators yield their elements
# only once.
taken = []
for i, e in enumerate(coll):
taken.append(e)
if i+1 >= n:
break
return taken, _iter(coll, n) | 7c97e3ad7910b116e01c70925888ea371be11c72 | 3,636,202 |
def clip(x: ArrayLike, lo: ArrayLike = None, up: ArrayLike = None) -> ShapeletsArray:
"""
Element-wise, limits the values in an array
Parameters
----------
x: ArrayLike
Input array expression
lo: Optional ArrayLike (defaults: None)
Low values
up: Optional ArrayLike (defaults: None)
High values
Returns
-------
ShapeletsArray
A new array with the result of the element-wise operation.
Notes
-----
The first parameter must resolve to a dimensional array. Broadcasting
rules will be applied to the rest of the parameters.
Examples
--------
>>> import shapelets.compute as sc
>>> a = sc.array([0,1,2,3]).T
>>> sc.clip(a, 1, 2)
[1 4 1 1]
0 1 2 3
>>> up_vals = sc.array([0,0,1,1]).T
>>> sc.clip(a, up = up_vals)
[1 4 1 1]
0 0 1 1
"""
return _pygauss.clip(x, lo, up) | 48c70dad730574a31b018c94c54cc785c680d6a1 | 3,636,203 |
def determineNewest(uid, homeType):
"""
Construct a query to determine the modification time of the newest object
in a given home.
@param uid: the UID of the home to scan.
@type uid: C{str}
@param homeType: The type of home to scan; C{ECALENDARTYPE},
C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: A select query that will return a single row containing a single
column which is the maximum value.
@rtype: L{Select}
"""
if homeType == ENOTIFICATIONTYPE:
return Select(
[Max(schema.NOTIFICATION.MODIFIED)],
From=schema.NOTIFICATION_HOME.join(
schema.NOTIFICATION,
on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
)
homeTypeName = {ECALENDARTYPE: "CALENDAR",
EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
home = getattr(schema, homeTypeName + "_HOME")
bind = getattr(schema, homeTypeName + "_BIND")
child = getattr(schema, homeTypeName)
obj = getattr(schema, homeTypeName + "_OBJECT")
return Select(
[Max(obj.MODIFIED)],
From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
) | 88cfcd264639c6dc76807b4155592321e3a899b3 | 3,636,204 |
def get_data(limit = None, filename = "C:/Users/Marcel/OneDrive/Python Courses/Machine Learning/train.csv"):
"""
Reads the MNIST dataset and outputs X and Y.
One can set a limit to the number of rows (number of samples) by editing the 'limit'
"""
print("Reading in and transforming data...")
dataset = pd.read_csv(filename).values
np.random.shuffle(dataset)
X = dataset[:, 1:] / 255
Y = dataset[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
print("Done reading in data...", len(Y))
return X, Y | 00933eb2b42180abbd8cf0e326d6e3b0c336131a | 3,636,205 |
def vech(A): # TODO: why not just use A[np.triu_indices(A.shape[0])]?
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length = A.shape[1]
vechvec = []
for i in range(length):
b = i
while b < length:
vechvec.append(A[b, i])
b = b + 1
vechvec = np.asarray(vechvec)
return vechvec | 197e700e1a0a010fedbb4aac7ca9545e49c07574 | 3,636,206 |
def file_size(value, fmt="{value:.1f} {suffix}", si=False):
"""
Takes a raw number of bytes and returns a humanized filesize.
"""
if si:
base = 1000
suffixes = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
else:
base = 1024
suffixes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
max_suffix_index = len(suffixes) - 1
for i, suffix in enumerate(suffixes):
unit = base ** (i + 1)
if value < unit or i == max_suffix_index:
return fmt.format(value=(base * value / unit), suffix=suffix) | 272250966c0d301a86a136a7e84af6049e9fe47f | 3,636,207 |
def login():
"""
This method logs the user into the account.
It checks the username and the password in the database.
---
Args: None
Returns: If log in is successful redirects the user to account page
"""
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if user.confirmation == True:
if check_password_hash(user.password, password): # check if hashes are the same
login_user(user, remember=False)
return redirect(url_for('views.account'))
else:
flash('Incorrect password', category = 'error')
else:
flash('Please confirm your email address first.', category = 'error')
return redirect(url_for('auth.login'))
else:
flash('Account does not exist!', category = 'error')
return render_template("login.html", user=current_user) | 02ebd2b43b2f32465544efc8b8b5ff1684877fa8 | 3,636,208 |
def index_select_op_tensor(input, dim, index):
"""
input.index_select(dim, index) -> Tensor
See :func:`oneflow.index_select`
"""
return index_select_op(input, dim, index) | 8049ec1541c9120d505e07d1bd944fdeaf05d4ef | 3,636,209 |
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies | b583f52835bc813e11092515aedb4d9943c7637c | 3,636,210 |
import torch
def validate_coot(config, model,
val_loader,
epoch,
constrastive_loss,
cmc_loss,
writer,
logger,
use_cuda=True):
"""Validate COOT model
Args:
model: COOT model
dataloader
epoch: current epoch number
constrastive_loss: MaxMargingRanking loss
cmc_loss: Cross-modal cycle-consistensy loss
writer: tensorboard writer
logger
use_cuda (bool): use GPU
Returns:
Retrieval performance
"""
model.eval()
max_step = len(val_loader)
# collect embeddings
vid_emb_list = []
par_emb_list = []
clip_emb_list = []
sent_emb_list = []
for step, data_dict in enumerate(val_loader):
(vid_id, vid_frames, vid_frames_mask, vid_frames_len, par_cap_vectors,
par_cap_mask, par_cap_len, clip_num, clip_frames, clip_frames_len,
clip_frames_mask, sent_num, sent_cap_vectors, sent_cap_mask,
sent_cap_len) = unpack_data(data_dict, use_cuda)
if step == 0:
print(f"ids {vid_id[:4]}...")
# forward pass
(vid_emb, clip_emb, vid_context, clip_emb_reshape, clip_emb_mask,
clip_emb_lens) = model.encode_video(vid_frames, vid_frames_mask,
vid_frames_len, clip_num,
clip_frames, clip_frames_len,
clip_frames_mask)
(par_emb, sent_emb, par_context, sent_emb_reshape, sent_emb_mask,
sent_emb_lens) = model.encode_paragraph(par_cap_vectors, par_cap_mask,
par_cap_len, sent_num,
sent_cap_vectors,
sent_cap_mask, sent_cap_len)
loss = compute_constrastive_loss(config, constrastive_loss, vid_emb, par_emb,
clip_emb, sent_emb, vid_context,
par_context)
loss += compute_cmc_loss(cmc_loss, config.CONFIG.TRAIN.LOSS_CYCLE_CONS_W, clip_emb_reshape,
clip_emb_mask, clip_emb_lens,
sent_emb_reshape, sent_emb_mask,
sent_emb_lens)
# collect embeddings
vid_emb_list.extend(vid_emb.detach().cpu())
par_emb_list.extend(par_emb.detach().cpu())
#clip-sentence embeddings
clip_emb_list.extend(clip_emb.detach().cpu())
sent_emb_list.extend(sent_emb.detach().cpu())
# logging
if step % 10 == 0:
logger.info(f"Val [{step}/{max_step}] Loss {loss.item():.4f}")
vid_emb_list = torch.stack(vid_emb_list, 0)
par_emb_list = torch.stack(par_emb_list, 0)
clip_emb_list = torch.stack(clip_emb_list, 0)
sent_emb_list = torch.stack(sent_emb_list, 0)
# video text retrieval
vid_emb_list = F.normalize(vid_emb_list).numpy()
par_emb_list = F.normalize(par_emb_list).numpy()
v2p_res, _ = coot_utils.compute_retr_vid_to_par(vid_emb_list, par_emb_list)
p2v_res, _ = coot_utils.compute_retr_par_to_vid(vid_emb_list, par_emb_list)
sum_at_1 = v2p_res["r1"] + p2v_res["r1"]
logger.info(coot_utils.EVALHEADER)
logger.info(coot_utils.retrieval_results_to_str(p2v_res, "Par2Vid"))
logger.info(coot_utils.retrieval_results_to_str(v2p_res, "Vid2Par"))
# clip sentence retrieval
clip_emb_list = F.normalize(clip_emb_list).numpy()
sent_emb_list = F.normalize(sent_emb_list).numpy()
c2s_res, _ = coot_utils.compute_retr_vid_to_par(clip_emb_list,
sent_emb_list)
s2c_res, _ = coot_utils.compute_retr_par_to_vid(clip_emb_list,
sent_emb_list)
c2s_sum_at_1 = c2s_res["r1"] + s2c_res["r1"]
logger.info(coot_utils.EVALHEADER)
logger.info(coot_utils.retrieval_results_to_str(s2c_res, "Sen2Shot"))
logger.info(coot_utils.retrieval_results_to_str(c2s_res, "Shot2Sen"))
writer.add_scalar('val_loss_epoch', loss, epoch)
writer.add_scalar('val_R1_Sentence2Clip_epoch', s2c_res["r1"], epoch)
writer.add_scalar('val_R5_Sentence2Clip_acc_epoch', s2c_res["r5"], epoch)
writer.add_scalar('val_R10_Sentence2Clip_acc_epoch', s2c_res["r10"], epoch)
writer.add_scalar('val_loss_epoch', loss, epoch)
writer.add_scalar('val_R1_Clip2Sentence_epoch', c2s_res["r1"], epoch)
writer.add_scalar('val_R5_Clip2Sentence_acc_epoch', c2s_res["r5"], epoch)
writer.add_scalar('val_R10_Clip2Sentence_acc_epoch', c2s_res["r10"], epoch)
return ((v2p_res, p2v_res, sum_at_1), (c2s_res, s2c_res, c2s_sum_at_1)) | b6abd4fc6c6cb60ab1a82682754fd5f514c41ccf | 3,636,211 |
def iniStressProfile((z,dz),(zMin,zMax),ma):
"""initial acoustic stress profile
\param[in] z z-axis
\param[in] dz axial increment
\param[in] zMin start of new tissue layer
\param[in] zMax end of new tissue layer
\param[in] ma absorption coefficient
\param[out] p0 initial stress profile
"""
mu_z = np.zeros(z.size)
mu_z[int((zMin-z[0])/dz):int((zMax-z[0])/dz)] = ma
return mu_z*np.exp(-np.cumsum(mu_z*dz)) | 60cfe2f240ea30bb278164b7211d6acaf7d4f778 | 3,636,212 |
def OpenCredentials(cred_path: str):
"""
Opens and parses an AWS credentials file.
:param cred_path: Path to the file containing the credentials
:return: A dict containing the credentials
"""
with open(cred_path) as file:
keys, values = map(lambda s: s.strip().split(','), file)
credentials = dict(zip(keys, values))
return credentials | 2f224a92b6c3999a45f6d73bb90504663614a1ac | 3,636,213 |
def get_follow_users():
"""
Get all the users stored in the cookie
"""
follow_users = []
if "follow" in request.cookies:
follow_users = request.cookies["follow"]
follow_users = follow_users.split(delim)
return follow_users | df212f387af02938e4afc6baac24692598dd0f7f | 3,636,214 |
def generate_user_agent(os=None, navigator=None, device_type=None):
"""
Generates HTTP User-Agent header
:param os: limit list of os for generation, possible values:
"win", "linux", "mac", "android", "ios", "all"
:type os: string or list/tuple or None
:param navigator: limit list of browser engines for generation, possible values:
"chrome", "firefox", "ie", "edge", "safari", "opera", "all"
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "all"
:return: User-Agent string
:rtype: string
:raises InvalidOption: if could not generate user-agent for
any combination of allowed oses and navigators
:raise InvalidOption: if any of passed options is invalid
"""
device_type, os_id, navigator_id = pick_config_ids(
device_type, os, navigator)
system = build_system_components(os_id, navigator_id)
app = build_app_components(os_id, navigator_id)
ua_template = choose_ua_template(os_id, navigator_id, app)
user_agent = ua_template.format(system=system, app=app)
return user_agent | cc5b4c251b088d61f00255c148c32a508c77dd1a | 3,636,215 |
def list_upcoming_assignments_calendar_events(request_ctx, **request_kwargs):
"""
Returns the current user's upcoming events, i.e. the same things shown
in the dashboard 'Coming Up' sidebar.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: List upcoming assignments, calendar events
:rtype: requests.Response (with void data)
"""
path = '/v1/users/self/upcoming_events'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response | 2c7380eabe82f7180da3777a8cebc75a20345a32 | 3,636,216 |
def mse_loss(y,loc):
""" Mean squared error loss function
Use mean-squared error to regress to the expected value
Parameters:
loc: mean
"""
loss = (y-loc)**2
return K.mean(loss) | 9a3a1dc45680cf3ad8434d24c511041f7f054750 | 3,636,217 |
def _get_license_key_outputs(session):
"""Returns the account id and policy ARN for the license key secret if they exist"""
global __cached_license_key_nr_account_id
global __cached_license_key_policy_arn
if __cached_license_key_nr_account_id and __cached_license_key_policy_arn:
return __cached_license_key_nr_account_id, __cached_license_key_policy_arn
output_values = _get_stack_output_value(session, ["NrAccountId", "ViewPolicyARN"])
__cached_license_key_nr_account_id = output_values.get("NrAccountId")
__cached_license_key_policy_arn = output_values.get("ViewPolicyARN")
return __cached_license_key_nr_account_id, __cached_license_key_policy_arn | a04aad32d9b192987462396023b117ecda91ea14 | 3,636,218 |
def get_sample_untransformed(shape, distribution_type, distribution_params,
seed):
"""Get a distribution based on specification and parameters.
Parameters can be a list, in which case each of the list members is used to
generate one row (or column?) of the resulting sample matrix. Otherwise, the
same parameters are used for the whole matrix.
Args:
shape: Tuple/List representing the shape of the output
distribution_type: DistributionType object
distribution_params: Dict of distributon parameters
seed: random seed to be used
Returns:
sample: TF Tensor with a sample from the distribution
"""
if isinstance(distribution_params, list):
if len(shape) != 2 or len(distribution_params) != shape[1]:
raise ValueError("If distribution_params is a list, the desired 'shape' "
"should be 2-dimensional and number of elements in the "
"list should match 'shape[1]'")
all_samples = []
for curr_params in distribution_params:
curr_samples = get_one_sample_untransformed([shape[0], 1],
distribution_type,
curr_params, seed)
all_samples.append(curr_samples)
return tf.concat(all_samples, axis=1)
else:
return get_one_sample_untransformed(shape, distribution_type,
distribution_params, seed) | 4b5a69920b8501ef4f57c9802dcea997c5df8587 | 3,636,219 |
import params
def __convert_sysctl_dict_to_text():
"""
Convert sysctl configuration dict to text with each property value pair separated on new line
"""
sysctl_file_content = "### HAWQ System Parameters ###########\n"
for key, value in params.hawq_sysctl.iteritems():
if not __valid_input(value):
raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key))
sysctl_file_content += "{0} = {1}\n".format(key, value)
return sysctl_file_content | ccea96f72c0730ee072b7fc174f02d6f302282d6 | 3,636,220 |
from typing import Union
from typing import List
from typing import Dict
from typing import Any
from typing import Optional
from typing import cast
import requests
def vizualScript(
inputds: str,
script: Union[List[Dict[str, Any]], Dict[str, Any]],
script_needs_compile: bool = False,
properties: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Create a view that implements a sequence of vizual commands over a fixed input table
Parameters
----------
inputds: string
The internal name of the dataset to apply the input script to
script: list[dictionary] or dictionary
The sequence of vizual commands to apply the input script to.
If not a list, the parameter will be assumed to be a singleton
command and wrapped in a list.
script_needs_compile: boolean
Set to true if mimir should preprocess the script to provide more
spreadsheet-like semantics (e.g., lazy evaluation of expression
cells)
Returns
-------
dictionary of
- "name": The name of the created view
- "script": The compiled version of the script (or just script if
script_needs_compile = False)
"""
properties = {} if properties is None else properties
script_list: List[Dict[str, Any]]
if type(script) is list:
script_list = cast(List[Dict[str, Any]], script)
else:
script_list = [cast(Dict[str, Any], script)]
req_json = {
"input" : inputds,
"script" : script_list,
# "resultName": Option[String],
"compile": script_needs_compile,
"properties" : properties
}
# print(_mimir_url + "vizual/create")
# print(json.dumps(req_json))
resp = readResponse(requests.post(_mimir_url + 'vizual/create', json=req_json))
assert("name" in resp)
assert("script" in resp)
return resp | 93ded6562e49110134ecbf1fade7c5c2b7a57582 | 3,636,221 |
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape')) | bfbde83654a817ab12a85d4fa321eba6d731174d | 3,636,222 |
from datetime import datetime
import logging
def parse_date(date_str):
"""
>>> parse_date("22 April 2011 at 20:34")
datetime.datetime(2011, 4, 22, 20, 34)
>>> parse_date("9 July 2011")
datetime.datetime(2011, 7, 9, 0, 0)
>>> parse_date("September 2003")
datetime.datetime(2003, 9, 1, 0, 0)
"""
try:
return datetime.strptime(date_str, "%d %B %Y at %H:%M")
except Exception:
logging.info("Parsing date: {0} - date incomplete".format(date_str))
try:
# We could directly parse all dates with parser,
# but this allows to have logging only for incomplete dates
return parser.parse(
date_str, default=datetime(
year=datetime.now().year, month=1, day=1))
except Exception:
relative_time = parse_relative_time(date_str)
if relative_time:
return datetime.now() - relative_time
fuzzy_t = parse_fuzzy_time(date_str)
if fuzzy_t:
fuzzy_time = fuzzy_t[0]
delta = fuzzy_t[1]
return datetime.now().replace(
hour=fuzzy_time.hour,
minute=fuzzy_time.minute,
second=fuzzy_time.second) - delta
else:
logging.error("Failed to parse date: {0}".format(date_str))
return datetime.now() | cc56ce8517d9efd1de5f41a2ca645a54530635db | 3,636,223 |
import os
import tempfile
def readTmpFile( processPid ):
""" Read the temp file """
fileName = os.path.join( tempfile.gettempdir(), 'mms-' + str( processPid ) )
if not os.path.isfile( fileName ):
return None
f = open( fileName )
try:
fileContent = f.read()
# Handle the legacy json files
if fileContent.startswith( '{' ):
os.remove( fileName )
return None
resBson = bson.decode_all( fileContent )
if len(resBson) != 1:
return None
return resBson[0]
finally:
f.close() | a2b3bba95fce0c2d1c6014cb55daa03748733b5a | 3,636,224 |
def pkcs7_unpad_strict(data, block_size=16):
"""Same as `pkcs7_unpad`, but throw exception on incorrect padding.
Mostly used to showcase the padding oracle attack.
"""
pad = data[-1]
if ord(pad) < 1 or ord(pad) > block_size:
raise Exception('Invalid padding length')
for i in range(2, ord(pad)+1):
if data[-i] != pad:
raise Exception('Invalid padding character')
return data[:-ord(pad)] | 0cb7c2d66c30de8bac54ca714dfa05a29d4f0cbd | 3,636,225 |
def cut(d1: dict, d2: dict) -> dict:
"""Removes the keys/values in `d1` to `d2` if they do
not already exist (non-mutating action)
Examples:
.. highlight:: python
.. code-block:: python
from map_ops.operations import cut
d1 = {"foo": 1, "bar": 1}
d2 = {"foo": 2, "baz": 2}
cut(d1, d2)
{"baz": 2}
Args:
d1: A Python dict
d2: A Python dict
Returns:
A Python dict
"""
return cut_(d1, d2) | 4985d9cb6ff804149a4fddd85bd2c01dd72b9f0d | 3,636,226 |
def chiresponse(A,x):
"""
Deprecated, just use normal "response" function above!
The response function used in the chi squared fitting portion of the simulation.
Meant to imitate the actual response of a scintillator.
Inputs 2 vectors, and responds with a cos^x dependence.
Parameters
----------
A : float
The angle between the two vectors who's response is meant to be imitated.
Returns
-------
A : float
The cosine dependence based on the angle, includes a mask so that terms corresponding to angular separations beyond pi/2 are 0, imitating what would happen if a GRB didn't strike the face of a detector. Further simulations of this effect are neccessary in a different software package to confirm this assumption, but its okay for now.
"""
#meant to imitate the response of the detectors for effective area vs. angle, found to be around .77
# print(length(A),length(B))
#if cosine is negative,
mask = A > np.pi/2.
A[mask] = 0
A[~mask] = pow(abs(np.cos(A[~mask])),x)
return A | 95a07a63e63277a091100fa8519b329c6b2f90de | 3,636,227 |
def _remove_trailing_string(content, trailing):
"""
Strip trailing component `trailing` from `content` if it exists.
Used when generating names from view classes.
"""
if content.endswith(trailing) and content != trailing:
return content[:-len(trailing)]
return content | 775bafba5ea518e03499c9351b74ac472c265c9a | 3,636,228 |
from typing import List
from typing import Optional
import os
def data_downloader(genome_ids: List[str],
output_directory: Optional[str] = None,
metadata: Optional[str] = None) -> List[str]:
"""
Parameters
----------
genome_ids
A list of assembly accession id's
output_directory
Directory to look for and save data into to
metadata
A file containing metadata for the genomes to be downloaded
Returns
-------
List[str]
The filepaths to the fasta files for each id requested.
Raises
------
ValueError
If an invalid channel is selected
"""
metadata_cols = ['ftp_path', '# assembly_accession']
if metadata is None:
genomes_metadata = pd.read_csv(default_metadata(),
sep='\t', index_col=False)
elif os.path.exists(metadata):
genomes_metadata = pd.read_csv(metadata, sep='\t',
index_col=False)
if not all(genomes_metadata.columns.contains(val_) for val_ in
metadata_cols):
raise ValueError("metadata must at least contain columns "
"for all of the following: {}"
.format(metadata_cols))
else:
raise ValueError("Argument `metadata` must be a valid filepath or "
"default `None`")
if output_directory is None:
output_directory = os.path.curdir
genomes_metadata.set_index('# assembly_accession', inplace=True)
possible_ids = set(genomes_metadata.index)
for id_ in genome_ids:
if id_ not in possible_ids:
raise ValueError('Assembly accession ID \'{}\' is not in metadata'
.format(id_))
# make sure all genomes are downloaded (download if not)
fasta_filenames = _ensure_all_data(genome_ids,
genomes_metadata,
output_directory)
return fasta_filenames | 4412d0cc6894dff652d5f84a05b106c255e36a7c | 3,636,229 |
def find_loop_size( public_key, subject=7 ):
"""
To transform a subject number, start with the value 1.
Then, a number of times called the loop size, perform the following steps:
- Set the value to itself multiplied by the subject number.
- Set the value to the remainder after dividing the value by 20201227
After the desired loop size, the subject number 7 is transformed into the
public key itself.
"""
loops = 0
value = 1
while value != public_key:
loops += 1
value *= subject
value = value % 20201227
return loops | 831f5f3e9867b06640493226fa35a89251f5aad5 | 3,636,230 |
def lambda_local_ep(ngl, ind_passive, passive_el, disp_vector, dyna_stif, coord, connect, E, v, rho):
""" Calculates the lambda parameter of the local elastic potential energy function.
Args:
ngl (:obj:`int`): Degrees of freedom.
ind_passive (:obj:`numpy.array`): Index of passive elements.
passive_el (:obj:`numpy.array`): Passive element nodes.
disp_vector (:obj:`numpy.array`): Displacement vector.
dyna_stif (:obj:`numpy.array`): Dynamic stiffness matrix.
omega_par (:obj:`float`): 2 * pi * frequency.
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
E (:obj:`float`): Elastic modulus.
v (:obj:`float`): Poisson's ratio.
rho (:obj:`float`): Density.
Returns:
Lambda parameter solution.
"""
aux1 = np.zeros(ngl, dtype=complex)
fadj = 0
for i, el in enumerate(passive_el):
Ke, _ = fc.matricesQ4(el, coord, connect, E, v, rho)
aux1[ind_passive[i]] = Ke@disp_vector[ind_passive[i]].conjugate()
fadj += aux1
aux1[:] = 0
fadj *= -1/2
lam = spsolve(dyna_stif, fadj)
return lam | 63925ebbd28710f1d9d3e3c64d8f364de1f76e36 | 3,636,231 |
from re import T
def from_spanning_matroid(matroid: tuple[set[T], list[set[T]]]) -> list[set[T]]:
"""Construct flats from a matroid defined by spanning sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by spanning sets.
Returns:
list[set[T]]: The flats of a given matroid.
"""
E, _ = matroid
return from_closure_matroid((E, closure_function.from_spanning_matroid(matroid))) | 9b2ff51cec3be92b8442e9bc807f14a8ee4412dc | 3,636,232 |
import functools
def suppress_traceback(debug: bool = True) -> None:
"""
Decorator to suppress traceback when in debug mode.
Parameters
----------
debug: bool
turn on debug mode or not
Returns
-------
None
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with _DebugTraceback(debug) as tb:
return func(*args, **kwargs)
return wrapper
return decorator | c5e595f274f2af21a2397d0f7592208a4284b360 | 3,636,233 |
import os
from nipype.utils.filemanip import split_filename
from clinica.utils.atlas import (
AtlasAbstract,
JHUDTI811mm,
JHUTracts01mm,
JHUTracts251mm,
)
from clinica.utils.statistics import statistics_on_atlas
def statistics_on_atlases(in_registered_map, name_map, prefix_file=None):
"""Computes a list of statistics files for each atlas.
Args:
in_registered_map (str): Map already registered on atlases.
name_map (str): Name of the registered map in CAPS format.
prefix_file (Opt[str]):
<prefix_file>_space-<atlas_name>_map-<name_map>_statistics.tsv
Returns:
List of paths leading to the statistics TSV files.
"""
in_atlas_list = [JHUDTI811mm(), JHUTracts01mm(), JHUTracts251mm()]
atlas_statistics_list = []
for atlas in in_atlas_list:
if not isinstance(atlas, AtlasAbstract):
raise TypeError("Atlas element must be an AtlasAbstract type")
if prefix_file is None:
_, base, _ = split_filename(in_registered_map)
filename = (
f"{base}_space-{atlas.get_name_atlas()}"
f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv"
)
else:
filename = (
f"{prefix_file}_space-{atlas.get_name_atlas()}"
f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv"
)
out_atlas_statistics = os.path.abspath(os.path.join(os.getcwd(), filename))
statistics_on_atlas(in_registered_map, atlas, out_atlas_statistics)
atlas_statistics_list.append(out_atlas_statistics)
return atlas_statistics_list | e28a639539adeaa55691215511df0695403b2499 | 3,636,234 |
import tempfile
import os
def render_projection_from_filelist(files: list) -> str:
"""Render a full projection montage from the given list of files
Returns the filename to the output image, which must be manually
deleted after use.
"""
temp_dir = tempfile.TemporaryDirectory()
convert_filelist_to_tempfiles(files, temp_dir.name)
# by using jpeg for the output format, we avoid including the
# rendered projections in the subsequent renders
max_file = os.path.join(temp_dir.name, "max.jpg")
min_file = os.path.join(temp_dir.name, "min.jpg")
avg_file = os.path.join(temp_dir.name, "avg.jpg")
convert_tempfiles_to_projection(temp_dir.name, max_file, ProjectionType.MAX)
convert_tempfiles_to_projection(temp_dir.name, min_file, ProjectionType.MIN)
convert_tempfiles_to_projection(temp_dir.name, avg_file, ProjectionType.AVG)
output_filename = tempfile.mktemp(suffix='.jpg')
make_montage(min_file, max_file, avg_file, output_filename)
temp_dir.cleanup()
return output_filename | 69da08ca82e31bca28ef5dc19c2e4a078be72830 | 3,636,235 |
def get_value_for_attribute(attribute):
"""For a given key return the value.
Args:
attribute (str): Some metadata key.
Returns:
str: The value of the requested key, if key isn't present then None.
"""
path = '/computeMetadata/v1/instance/attributes/%s' % attribute
try:
http_response = _issue_http_request(
HTTP_GET, path, REQUIRED_METADATA_HEADER)
return http_response.read()
except (TypeError, ValueError, errors.MetadataServerHttpError):
LOGGER.exception('Unable to read value for attribute key %s '
'from metadata server.', attribute)
return None | 2b61f018988db90165f06e975a6478ba3f606652 | 3,636,236 |
def list_selected_groups(remote):
"""Returns a list of unique facegroup IDs for the current face selection (requires an active selection)"""
cmd1 = mmapi.StoredCommands()
key1 = cmd1.AppendSelectCommand_ListSelectedFaceGroups()
remote.runCommand(cmd1)
groups1 = mmapi.vectori()
cmd1.GetSelectCommandResult_ListSelectedFaceGroups(key1, groups1);
return vectori_to_list(groups1); | 2ec5346c6e34c8fc35670a3061d65a92ec518c47 | 3,636,237 |
def make_user_variable(
id_name, cluster_name, w_name, d_name, y_tree_name, y_name, x_name_ord,
x_name_unord, x_name_always_in_ord, z_name_list,
x_name_always_in_unord, z_name_split_ord, z_name_split_unord,
z_name_mgate, z_name_amgate, x_name_remain_ord, x_name_remain_unord,
x_balance_name_ord, x_balance_name_unord):
"""Put variable names in dictionary."""
def check_none(name):
if name is None:
return []
return name
variable_dict = {'id_name': check_none(id_name),
'cluster_name': check_none(cluster_name),
'w_name': check_none(w_name),
'd_name': check_none(d_name),
'y_tree_name': check_none(y_tree_name),
'y_name': check_none(y_name),
'x_name_ord': check_none(x_name_ord),
'x_name_unord': check_none(x_name_unord),
'x_name_always_in_ord': check_none(x_name_always_in_ord),
'z_name_list': check_none(z_name_list),
'x_name_always_in_unord': check_none(
x_name_always_in_unord),
'z_name_ord': check_none(z_name_split_ord),
'z_name_unord': check_none(z_name_split_unord),
'z_name_mgate': check_none(z_name_mgate),
'z_name_amgate': check_none(z_name_amgate),
'x_name_remain_ord': check_none(x_name_remain_ord),
'x_name_remain_unord': check_none(x_name_remain_unord),
'x_balance_name_ord': check_none(x_balance_name_ord),
'x_balance_name_unord': check_none(x_balance_name_unord),
}
return variable_dict | d7f9f85a75df28e1db7f3dee71d625bbe99c6106 | 3,636,238 |
import re
def get_skip_report_step_by_index(skip_report_list):
"""Parse the missed step from skip a report.
Based on the index within the skip report file (each line a report), the
missed step for this entry gets extracted. In case no step could be found,
the whole entry could not been parsed or no report for this index exists,
the step is 'None'.
"""
def extract_step(index):
skip_report_entry = (
skip_report_list[index] if index < len(skip_report_list) else ""
)
step_findings = re.findall(
"^([0-9]+),0x[0-9,a-f]+,[0-9,-]+ [0-9,:]+$", skip_report_entry.strip()
)
step = int(step_findings[0]) if len(step_findings) == 1 else None
return step
return extract_step | 7aa46050702aba07902ceec586175fce2226e1e3 | 3,636,239 |
import os
def write_api(entrypoint, kind="node", pkg_path=None, overwrite=False):
"""
"""
entrypoint_name = entrypoint['Name'].replace(".", "_").lower()
class_name = entrypoint['NewName']
class_dir = entrypoint['Module']
class_type = entrypoint['Type']
class_file = class_name.lower()
doc_builder = DocBuilder()
doc_builder.class_name = class_name
doc_builder.class_module = class_dir
doc_builder.desc = entrypoint['Desc']
doc_builder_core = DocBuilder()
doc_builder_core.class_name = class_name
doc_builder_core.class_module = class_dir
doc_builder_core.desc = entrypoint['Desc']
banner = COPYRIGHT_NOTICE + CODEGEN_WARNING
if verbose:
print(class_name)
###################################
# create function
funobjs = create_py(entrypoint, kind=kind)
visible_args = [
arg for arg in funobjs['inputs'] if isinstance(
arg.hidden, Missing)]
doc_args = [
DocParameter(
name=arg.new_name_converted,
desc=arg.desc) for arg in visible_args if
arg.name_converted != 'column']
doc_builder.add_manifest_args(doc_args)
doc_builder_core.add_manifest_args(doc_args)
# see what column param type is
column_arg = [
arg for arg in visible_args if arg.name_converted == 'column']
# columns for entrypoint
hidden_args = [arg for arg in funobjs['inputs']
if not isinstance(arg.hidden, Missing)]
columns_entrypoint = [arg for arg in hidden_args]
# In a function header, arguments must appear in this order:
# * any normal arguments(name);
# * any default arguments (name=value);
# * the *name (or* in 3.X) form;
# * any name or name=value keyword-only arguments (in 3.X);
# * the **name form.
class_args = [arg.get_arg() for arg in visible_args if isinstance(
arg.default, Missing) and arg.name_converted != 'column']
class_args += [arg.get_arg() for arg in visible_args if not isinstance(
arg.default, Missing) and arg.name_converted != 'column']
class_args = ',\n '.join(class_args)
entrypoint_args_map = [arg for arg in visible_args if isinstance(
arg.default, Missing) and arg.name_converted != 'column']
entrypoint_args_map += [arg for arg in visible_args if not isinstance(
arg.default, Missing) and arg.name_converted != 'column']
entrypoint_args_map = [
"%s=%s" %
(arg.name_converted,
arg.name_assignment) for arg in entrypoint_args_map]
entrypoint_args_map = '\n'.join(entrypoint_args_map)
args_map = [arg for arg in visible_args if isinstance(
arg.default, Missing) and arg.name_converted != 'column']
args_map += [arg for arg in visible_args if not isinstance(
arg.default, Missing) and arg.name_converted != 'column']
api_args_map = [
"%s=%s" %
(arg.new_name_converted,
arg.new_name_converted) for arg in args_map]
api_args_map = '\n'.join(api_args_map)
core_args_map = [
"%s=%s" %
(arg.new_name_converted,
arg.name_core_assignment) for arg in args_map]
core_args_map = '\n'.join(core_args_map)
fun_settings_body = None
if class_type == 'Component':
fun_settings_body = "\n ".join(
[arg.get_body() for arg in funobjs['settings']])
dots = "..."
if "." in class_dir:
dots = "...."
imports = [
arg.get_import(
prefix=(
"%sentrypoints." %
dots)) for arg in visible_args if
arg.get_import() is not None]
imports = '\n'.join(imports)
# write the class to a file
py_path = module_to_path(class_dir, pkg_path)
if not os.path.exists(py_path):
os.makedirs(py_path)
file = os.path.join(py_path, ".".join([class_file, "py"]))
if os.path.exists(file) and not overwrite:
raise FileExistsError(
"file {} exists, set 'overwrite = TRUE' to overwrite.".format(
file))
write_class(
entrypoint,
class_name,
class_type,
file,
class_file,
class_dir,
banner,
class_args,
api_args_map,
doc_builder,
column_arg,
hidden_args)
# Generating test classes is broken. Commented out for now.
# write the class test to a file
# py_path = os.path.join(pkg_path, "tests", *class_dir.split("."))
# if not os.path.exists(py_path): os.makedirs(py_path)
#
# file = os.path.join(py_path, "test_" + ".".join([class_file, "py"]))
# if os.path.exists(file) and not overwrite:
# raise FileExistsError("file {} exists, set 'overwrite = TRUE'
# to overwrite.".format(file))
#
# write_class_test(class_name, file)
# write the core class to a file
py_path = os.path.join(pkg_path, "internal", "core",
*class_dir.split("."))
if not os.path.exists(py_path):
os.makedirs(py_path)
file = os.path.join(py_path, ".".join([class_file, "py"]))
if os.path.exists(file) and not overwrite:
raise FileExistsError(
"file {} exists, set 'overwrite = TRUE' to overwrite.".format(
file))
write_core_class(
entrypoint,
entrypoint_name,
class_name,
class_type,
file,
class_file,
class_dir,
banner,
imports,
class_args,
core_args_map,
entrypoint_args_map,
doc_builder_core,
column_arg,
columns_entrypoint,
fun_settings_body,
hidden_args)
return funobjs | 794429bc29f24cff13f1ecbb65c4e6c1af3931e3 | 3,636,240 |
from typing import List
def constraint_notes_are(sequence: FiniteSequence, beat_offset: int, pitches: List[int]) -> bool:
"""Tells us if the context note on the given beat_offset
has the same pitches as the given list of pitches
"""
if beat_offset > sequence.duration:
return True
offset_event = sequence.event_at(beat_offset)
return sorted(offset_event.pitches) == sorted(pitches) | 26a0182f708f310af0fa022e7ca6e3e34951fe3c | 3,636,241 |
import os
def make_anuga_params():
"""Function to make the example ANUGA parameters."""
params = pt.modelParams()
path = os.path.join(os.path.dirname(__file__), 'ex_anuga_data.npz')
data = np.load(path)
# pull depth and stage from that data
depth = data['depth']
qx = data['qx']
qy = data['qy']
# define the params variables
params.stage = np.copy(depth)
params.depth = depth
params.qx = qx
params.qy = qy
params.dx = 10.
params.theta = 1.0
params.model = 'Anuga'
return params | 218132f33adefbe29e251dd6f29d1efc56af26b1 | 3,636,242 |
from re import T
def clip(tensor: T.Tensor, a_min: T.Scalar=None,
a_max: T.Scalar=None) -> T.Tensor:
"""
Return a tensor with its values clipped between a_min and a_max.
Args:
tensor: A tensor.
a_min (optional): The desired lower bound on the elements of the tensor.
a_max (optional): The desired upper bound on the elements of the tensor.
Returns:
tensor: A new tensor with its values clipped between a_min and a_max.
"""
return tensor.clip(a_min, a_max) | d80d27711f5b257b9b132a313018c74d365d1159 | 3,636,243 |
def remove_objects_from_args(args, # type: Iterable[Any]
kwargs, # type: Dict[str, Any]
pvalue_class # type: Union[Type[T], Tuple[Type[T], ...]]
):
# type: (...) -> Tuple[List[Any], Dict[str, Any], List[T]]
"""For internal use only; no backwards-compatibility guarantees.
Replaces all objects of a given type in args/kwargs with a placeholder.
Args:
args: A list of positional arguments.
kwargs: A dictionary of keyword arguments.
pvalue_class: A class object representing the types of arguments that must
be replaced with a placeholder value (instance of ArgumentPlaceholder).
Returns:
A 3-tuple containing a modified list of positional arguments, a modified
dictionary of keyword arguments, and a list of all objects replaced with
a placeholder value.
"""
pvals = []
def swapper(value):
pvals.append(value)
return ArgumentPlaceholder()
new_args = [swapper(v) if isinstance(v, pvalue_class) else v for v in args]
# Make sure the order in which we process the dictionary keys is predictable
# by sorting the entries first. This will be important when putting back
# PValues.
new_kwargs = dict((k, swapper(v)) if isinstance(v, pvalue_class) else (k, v)
for k, v in sorted(kwargs.items()))
return (new_args, new_kwargs, pvals) | e68d59e00f18357f83817bc49248a0297a869624 | 3,636,244 |
import re
def reminder_validator(input_str):
"""
Allows a string that matches utils.REMINDER_REGEX.
Raises ValidationError otherwise.
"""
match = re.match(REMINDER_REGEX, input_str)
if match or input_str == '.':
return input_str
else:
raise ValidationError('Expected format: <number><w|d|h|m> '
'<popup|email|sms>. (Ctrl-C to exit)\n') | 3dc1895a19170ec8143ed6b62020d0e4b87f174b | 3,636,245 |
def evaluate_nll(confidences, true_labels, log_input=True, eps=1e-8, reduction="mean"):
"""
Args:
confidences (Array): An array with shape [N, K,].
true_labels (Array): An array with shape [N,].
log_input (bool): Specifies whether confidences are already given as log values.
eps (float): Small value to avoid evaluation of log(0) when log_input is False.
reduction (str): Specifies the reduction to apply to the output.
Returns:
An array of negative log-likelihood with shape [1,] when reduction in ["mean", "sum",], or
raw negative log-likelihood values with shape [N,] when reduction in ["none",].
"""
log_confidences = confidences if log_input else jnp.log(confidences + eps)
true_target = onehot(true_labels, num_classes=log_confidences.shape[1])
raw_results = -jnp.sum(true_target * log_confidences, axis=-1)
if reduction == "none":
return raw_results
elif reduction == "mean":
return jnp.mean(raw_results)
elif reduction == "sum":
return jnp.sum(raw_results)
else:
raise NotImplementedError(f'Unknown reduction=\"{reduction}\"') | f2694b495f3856269fcdf5c934b3ffabe45a0491 | 3,636,246 |
import re
def parse(features: str) -> AirPlayFlags:
"""Parse an AirPlay feature string and return what is supported.
A feature string have one of the following formats:
- 0x12345678
- 0x12345678,0xabcdef12 => 0xabcdef1212345678
"""
match = re.match(r"^0x([0-9A-Fa-f]{1,8})(?:,0x([0-9A-Fa-f]{1,8})|)$", features)
if match is None:
raise ValueError(f"invalid feature string: {features}")
value, upper = match.groups()
if upper is not None:
value = upper + value
return AirPlayFlags(int(value, 16)) | ae4e69cb3c03f5c1252067c491a8e05875d642de | 3,636,247 |
def _prefix_with_swift_module(path, resource_info):
"""Prepends a path with the resource info's Swift module, if set.
Args:
path: The path to prepend.
resource_info: The resource info struct.
Returns: The path with the Swift module name prepended if it was set, or just
the path itself if there was no module name.
"""
swift_module = resource_info.swift_module
if swift_module:
return swift_module + "-" + path
return path | f2a12f59a3c30c09fa20d65b806779ad47f49b90 | 3,636,248 |
from datetime import datetime
def str2datetime(dt, format=None):
"""
convert a string into a datetime object, it can be:
- 2013-05-24 18:49:46
- 2013-05-24 18:49:46.568
@param dt string
@param format format for the conversion, the most complete one is
``%Y-%m-%d %H:%M:%S.%f``
which you get by default
@rtype datetime
@return datetime
"""
if "+" in dt:
dt = dt.split("+")[0].strip()
elif " -" in dt:
dt = dt.split(" -")[0].strip()
if format is None:
if " " in dt:
if "." in dt:
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
elif "T" in dt:
if "." in dt:
return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
else:
return datetime.datetime.strptime(dt, "%Y-%m-%d")
else:
return datetime.datetime.strptime(dt, format) | b304eb4b0bdaf87efda333475f879fb64a8f690d | 3,636,249 |
import os
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
def run_func_motion_correct(func_reorient, out_dir=None, run=True):
"""Run the 'func_motion_correct_workflow' function to execute the modular
workflow with the provided inputs.
:type func_reorient: str
:param func_reorient: Filepath to the deobliqued, reoriented functional
timeseries.
:type out_dir: str
:param out_dir: (default: None) The output directory to write the results
to; if left as None, will write to the current directory.
:type run: bool
:param run: (default: True) Will run the workflow; if set to False, will
connect the Nipype workflow and return the workflow object
instead.
:rtype: str
:return: (if run=True) The filepath of the generated anatomical_reorient
file.
:rtype: Nipype workflow object
:return: (if run=False) The connected Nipype workflow object.
:rtype: str
:return: (if run=False) The base directory of the workflow if it were to
be run.
"""
output = "func_motion_correct"
workflow = pe.Workflow(name='%s_workflow' % output)
if not out_dir:
out_dir = os.getcwd()
workflow_dir = os.path.join(out_dir, "workflow_output", output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["func_reorient"] = func_reorient
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_func_motion_correct')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, ds, 'func_motion_correct')
ds = pe.Node(nio.DataSink(), name='datasink_coordinate_transformation')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["coordinate_transformation"]
workflow.connect(node, out_file, ds, 'coordinate_transformation')
if run:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "func_motion_correct",\
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir | 7d2d2b4fa5f842eef38c42954424cd504cfa7b7f | 3,636,250 |
from ..utils import sampling
def get_zoomin(self, scale=1.0):
"""
Returns a spherical region encompassing maximally refined cells.
Moved from Amr class.
What should it do??
Parameters
----------
scale : float
The radius of the returned sphere is scaled by 'scale'.
"""
imin = np.where(self.dm['m'] == self.dm['m'].min())
xr = [self.dm['px'][imin].min(), self.dm['px'][imin].max()]
yr = [self.dm['py'][imin].min(), self.dm['py'][imin].max()]
zr = [self.dm['pz'][imin].min(), self.dm['pz'][imin].max()]
xc = 0.5 * sum(xr)
yc = 0.5 * sum(yr)
zc = 0.5 * sum(zr)
radius = 0.5 * max([xr[1]-xr[0], yr[1]-yr[0], zr[1]-zr[0]]) * scale
#print(radius)
return sampling.set_region(centers=[xc, yc, zc], radius=radius) | 52a4ead516a1b11a0e6fc6c1791488e62bbbe222 | 3,636,251 |
def parse_repeating_time_interval_to_days(date_str):
"""Parsea un string con un intervalo de tiempo con repetición especificado
por la norma ISO 8601 en una cantidad de días que representa ese intervalo.
Devuelve 0 en caso de que el intervalo sea inválido.
"""
intervals = {'Y': 365, 'M': 30, 'W': 7, 'D': 1, 'H': 0, 'S': 0}
if date_str.find('R/P') != 0: # Periodicity mal formada
return 0
date_str = date_str.strip('R/P')
days = 0
index = 0
for interval in intervals:
value_end = date_str.find(interval)
if value_end < 0:
continue
try:
days += int(float(date_str[index:value_end]) * intervals[interval])
# Valor de accrualPeriodicity inválido, se toma como 0
except ValueError:
continue
index = value_end
# Si el número de días es menor lo redondeamos a 1
return max(days, 1) | c417c0fc971ae0c94f651634ef5fb27f1accff24 | 3,636,252 |
def get_pk_and_validate(model):
"""
:param model:
:return:
"""
hits = []
for field in get_model_fields(model):
extra_attrs = field.field_info.extra
if extra_attrs.get('primary_key'):
hits.append(field)
hit_count = len(hits)
if hit_count != 1:
raise ERRORS.PROGRAMMING_ERROR.exception(
'错误:模型{}已定义了{}个主键,分别是{}.提示:一个模型有且只有一个主键'.format(
model,
hit_count,
','.join([hit.name for hit in hits]))
)
return hits[0] | 3da628de19d9280f3fcacd232bc7de084596bc09 | 3,636,253 |
def lat_to_y(lat):
"""Convert latitude to Web-Mercator
Args:
lat: a latutude value
Returns:
float: a Web-Mercator y coordinate
"""
r = 6_378_137 # radius of the Earth at the equator
return log(tan((90 + lat) * pi / 360)) * r | 142770214f9503653ed8d2c38be39e69730dc4c6 | 3,636,254 |
import os
import platform
import xdg
def find_file(filename):
"""Find a file of given name on the file system.
This function is intended to use in tests and demo applications
to locate data files without resorting to absolute paths. You may
use it for your code as well.
It looks in the following locations:
* If an absolute filename is given, it is used
* Check whether the given relative path exists with respect to the current working directory
* Check whether the given relative path exists with respect to the specified XDG data directory (e.g. through the environment variable :code:`XDG_DATA_DIRS`).
:param filename:
The (relative) filename to search for
:type filename: str
:return: An absolute filename
"""
# If the path is absolute, do not change it
if os.path.isabs(filename):
return filename
# Gather a list of candidate paths for relative path
candidates = []
# Use the current working directory
candidates.append(os.path.join(os.getcwd(), filename))
# Use the XDG data directories
if platform.system() in ["Linux", "Darwin"]:
for xdg_dir in xdg.xdg_data_dirs():
candidates.append(os.path.join(xdg_dir, filename))
# Iterate through the list to check for file existence
for candidate in candidates:
if os.path.exists(candidate):
return candidate
raise FileNotFoundError(
f"Cannot locate file {filename}. Tried the following locations: {', '.join(candidates)}"
) | 516d0b03f2254b37605a4b04bc8d3401fecb23d0 | 3,636,255 |
def generate_lasso_mask(image, selectedData):
"""
Generates a polygon mask using the given lasso coordinates
:param selectedData: The raw coordinates selected from the data
:return: The polygon mask generated from the given coordinate
"""
height = image.size[1]
y_coords = selectedData["lassoPoints"]["y"]
y_coords_corrected = [height - coord for coord in y_coords]
coordinates_tuple = list(zip(selectedData["lassoPoints"]["x"], y_coords_corrected))
mask = Image.new("L", image.size)
draw = ImageDraw.Draw(mask)
draw.polygon(coordinates_tuple, fill=255)
return mask | 10831928275f5799814576e71a8faf3af019b35d | 3,636,256 |
from math import pi
def guitar(C):
"""Triangular wave (pulled guitar string)."""
L = 0.75
x0 = 0.8*L
a = 0.005
freq = 440
wavelength = 2*L
c = freq*wavelength
w = 2*pi*freq
num_periods = 1
T = 2*pi/w*num_periods
# Choose dt the same as the stability limit for Nx=50
dt = L/50./c
def I(x):
return a*x/x0 if x < x0 else a/(L-x0)*(L-x)
umin = -1.2*a; umax = -umin
cpu, all_u = viz(I, 0, 0, c, L, dt, C, T, umin, umax,
animate=True, tool='scitools')
# checking
#for e in all_u:
# print e[int(len(all_u[1])/2)] | 0bd0ae7f5a720f330f27b1d16cbdadaea76535fd | 3,636,257 |
def start_threads_dict():
"""
获取指定URL起始THREADS字典
:return: dict-->配置中指定URL起始THREADS字典
"""
temp_dict = dict()
enable_flag = const.CONF.get('Auto_Test.assign_start_threads', 'ENABLE')
if enable_flag and isinstance(enable_flag, str):
if enable_flag.lower() == 'true':
temp_dict = const.CONF['Auto_Test.assign_start_threads']
return temp_dict | 45829c774b0b0a539fc9bede8989940dcae7a863 | 3,636,258 |
from typing import Optional
def exists_in_s3(s3_path: str) -> Optional[bool]:
"""Check whether a fully specified s3 path exists.
Args:
s3_path: Full path on s3 in format "s3://<bucket_name>/<obj_path>".
Returns:
Boolean of whether the file exists on s3 (None if there was an error.)
"""
bucket, key = decompose_s3_path(s3_path)
s3_client = boto3.client("s3")
try:
s3_client.head_object(Bucket=bucket, Key=key)
except botocore.exceptions.ClientError:
return False
except Exception as e:
print(
f"ERROR: unexpected exception checking existence of s3_path={s3_path}"
f": {e}"
)
return None
return True | 2f72fcf0f5f56d45e7cc9a63d0b572ef8eb652af | 3,636,259 |
from ethpm.uri import check_if_chain_matches_chain_uri
from typing import List
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:
"""
Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance.
"""
matching_uris = [
uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)
]
if not matching_uris:
raise EthPMValidationError("Package has no matching URIs on chain.")
elif len(matching_uris) != 1:
raise EthPMValidationError(
f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}."
)
return matching_uris[0] | efc814456b74f2783de2c1fc214cba11b7b6a9ad | 3,636,260 |
import time
def ListAndWaitForObjects(service, counting_start_time,
expected_set_of_objects, object_prefix):
"""List objects and wait for consistency.
Args:
service: the ObjectStorageServiceBase object to use.
counting_start_time: The start time used to count for the inconsistency
window.
expected_set_of_objects: The set of expectation.
object_prefix: The prefix of objects to list from.
Returns:
result_consistent: Is the list consistent
list_count: Count of the lists before the result is consistent.
list_latency: Latency of the list request that is consistent.
total_wait_time: Total time waited before it's consistent.
"""
total_wait_time = 0
list_count = 0
result_consistent = False
list_latency = 0
while total_wait_time < LIST_CONSISTENCY_WAIT_TIME_LIMIT:
list_start_time = time.time()
list_result = service.ListObjects(FLAGS.bucket, object_prefix)
list_count += 1
list_latency = time.time() - list_start_time
if expected_set_of_objects.difference(set(list_result)):
total_wait_time = time.time() - counting_start_time
continue
else:
result_consistent = True
break
return result_consistent, list_count, list_latency, total_wait_time | d2a3c2704cca699c588c4382b16643c23d578553 | 3,636,261 |
from typing import List
def load_mbb_player_boxscore(seasons: List[int]) -> pd.DataFrame:
"""Load men's college basketball player boxscore data
Example:
`mbb_df = sportsdataverse.mbb.load_mbb_player_boxscore(seasons=range(2002,2022))`
Args:
seasons (list): Used to define different seasons. 2002 is the earliest available season.
Returns:
pd.DataFrame: Pandas dataframe containing the
player boxscores available for the requested seasons.
Raises:
ValueError: If `season` is less than 2002.
"""
data = pd.DataFrame()
if type(seasons) is int:
seasons = [seasons]
for i in seasons:
if int(i) < 2002:
raise SeasonNotFoundError("season cannot be less than 2002")
i_data = pd.read_parquet(MBB_PLAYER_BOX_URL.format(season = i), engine='auto', columns=None)
data = data.append(i_data)
#Give each row a unique index
data.reset_index(drop=True, inplace=True)
return data | 1967c629045b1c7a0bc11e27a37e70ca8c12d8b7 | 3,636,262 |
from datetime import datetime
def countdown(code, input):
""" .countdown <month> <day> <year> - displays a countdown to a given date. """
error = '{red}Please use correct format: %scountdown <month> <day> <year>' % code.prefix
text = input.group(2).strip()
if ' ' in text:
text = text.split()
elif '/' in text:
text = text.split('/')
elif '.' in text:
text = text.split('.')
else:
return code.say(error)
if len(text) != 3:
return code.say(error)
if not text[0].isdigit() or not text[1].isdigit() or not text[2].isdigit():
return code.say(error)
month, day, year = text
try:
diff = datetime.datetime(
int(year), int(month), int(day)) - datetime.datetime.today()
except ValueError:
return code.say('{red}Incorrect input!')
output = []
output.append(str(diff.days) + " day(s)")
output.append(str(diff.seconds / 60 / 60) + " hour(s)")
output.append(
str(diff.seconds / 60 - diff.seconds / 60 / 60 * 60) + " minute(s)")
output.append(month + "/" + day + "/" + year)
code.say(' - '.join(output)) | e06aa49396c3348f9641889d2dc58ef19f65a821 | 3,636,263 |
def split_rdd(rdd):
"""
Separate a rdd into two weighted rdds train(70%) and test(30%)
:param rdd
"""
SPLIT_WEIGHT = 0.7
(rdd_train, rdd_test) = rdd.randomSplit([SPLIT_WEIGHT, 1 - SPLIT_WEIGHT])
return rdd_train, rdd_test | 082439fb41108da171610dc3d03ab1f8f9f021c5 | 3,636,264 |
def QuickSort(A, l, r):
"""
Arguments:
A -- total number list
l -- left index of input list
r -- right index of input list
Returns:
ASorted -- sorted list
cpNum -- Number of comparisons
"""
# Number of comparisons
cpNum = r - l
# Base case
if cpNum == 0:
return [A[l]], 0
elif cpNum < 0:
return [], 0
# Partition part
A[l], A[r] = A[r], A[l] # Swap the first and the last element
p = A[l]
i = l + 1
for j in range(l + 1, r + 1):
if A[j] < p:
A[j], A[i] = A[i], A[j]
i += 1
A[l], A[i-1] = A[i-1], A[l]
# Recursion call
ALeft, cpNumLeft = QuickSort(A, l, i-2)
ARight, cpNumRight = QuickSort(A, i, r)
ASorted = ALeft + [p] + ARight
cpNum = cpNum + cpNumLeft + cpNumRight
return ASorted, cpNum | 26092d222d93d8b931ab6f2d5c539ac5c9e00b2f | 3,636,265 |
import os
def get_cache_dir():
"""get directory to store data cached by application
"""
return os.path.join(get_userdata_dir(), CACHE_DIR) | 41aa21bb53e4b534cb251ae980a6b31bc6db1cc1 | 3,636,266 |
from typing import Counter
def checksum(input):
""" Checksum by counting items that have duplicates and/or triplicates and multiplying"""
checksum_twos = 0
checksum_threes = 0
for id in input:
c = [v for k,v in Counter(id).items()]
if 2 in c:
checksum_twos += 1
if 3 in c:
checksum_threes += 1
return checksum_threes * checksum_twos | 8ba72e795b5868a852ce0c9c234ca35088057538 | 3,636,267 |
def Rescale(UnscaledMatrix, Scales):
"""Forces a matrix of raw (user-supplied) information
(for example, # of House Seats, or DJIA) to conform to
svd-appropriate range.
Practically, this is done by subtracting min and dividing by
scaled-range (which itself is max-min).
"""
# Calulate multiplicative factors
InvSpan = []
for scale in Scales:
InvSpan.append(1 / float(scale["max"] - scale["min"]))
# Recenter
OutMatrix = ma.copy(UnscaledMatrix)
cols = UnscaledMatrix.shape[1]
for i in range(cols):
OutMatrix[:,i] -= Scales[i]["min"]
# Rescale
NaIndex = isnan(OutMatrix)
OutMatrix[NaIndex] = 0
OutMatrix = dot(OutMatrix, diag(InvSpan))
OutMatrix[NaIndex] = nan
return OutMatrix | 9201078f3395aa11e75529f01f8e6486409c1347 | 3,636,268 |
def winrate_of(node: sgf.Node) -> float:
"""
The winrate of the node/position is defined as winrate of the most visited child.
"""
max_visits = 0
winrate = 0
variations = ([] if node.next == None else [node.next]) + node.variations
for child in variations:
if "B" in child.properties or "W" in child.properties:
try:
info = parse_comment(child.properties["C"][0])
if info[1] > max_visits:
max_visits = info[1]
winrate = info[0]
except:
pass
if max_visits == 0:
return None
return winrate | d7893fdd0295f6b43fec258351c201ae8d789f1a | 3,636,269 |
def extract_kernel_version(kernel_img_path):
"""
Extracts the kernel version out of the given image path.
The extraction logic is designed to closely mimick the logic Zipl configuration to BLS
conversion script works, so that it is possible to identify the possible issues with kernel
images.
:param str kernel_img_path: The path to the kernel image.
:returns: Extracted kernel version from the given path
:rtype: str
"""
# Mimick bash substitution used in the conversion script, see:
# https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168
if 'vmlinuz-' in kernel_img_path:
fragments = kernel_img_path.rsplit('/vmlinuz-', 1)
return fragments[1] if len(fragments) > 1 else fragments[0]
fragments = kernel_img_path.rsplit('/', 1)
return fragments[1] if len(fragments) > 1 else fragments[0] | 2f75b220ff3e68b8c2ae2a046b7c604a786b05b8 | 3,636,270 |
def about(request):
""" About view """
try:
about = About.objects.get().description
except:
about = "No information here yet."
return render(request, 'about_page.html', {'about': about}) | 60ea35c1c50f54c4c54ea207f48cd7805f27571a | 3,636,271 |
def concat_strings(string_list):
"""
Concatenate all the strings in possibly-nested string_list.
@param list[str]|str string_list: a list of strings
@rtype: str
>>> list_ = (["The", "cow", "goes", "moo", "!"])
>>> concat_strings(list_)
'The cow goes moo !'
>>> list_ = (["This", "sentence", "is actually", \
"constructed", ["from", ["other"], "smaller"], "strings"])
>>> concat_strings(list_)
'This sentence is actually constructed from other smaller strings'
"""
if isinstance(string_list, str):
# string_list is a str
return string_list
else:
return " ".join([concat_strings(elem) for elem in string_list]) | bbeb884e2cd4c689ce6e61c147558c993acc5f09 | 3,636,272 |
def produce_grid(tuple_of_limits, grid_spacing):
"""Produce a 2D grid for the simulation system.
The grid is based on the tuple of Cartesian Coordinate limits calculated in
an earlier step.
Parameters
----------
tuple_of_limits : tuple
``x_min, x_max, y_min, y_max``
grid_spacing : float
grid size in all directions in ångström
Returns
-------
grid : array
``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]``
"""
x_min, x_max, y_min, y_max = tuple_of_limits
grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]
return grid | 9ce30e74e4740cdbde520eb71156c6ce4799304e | 3,636,273 |
def intensity_histogram_measures(regionmask, intensity):
"""Computes Intensity Distribution features
This functions computes features that describe the distribution characteristic of the instensity.
Args:
regionmask=binary image
intensity= intensity image
"""
feat= Intensity_Histogram_Measures([np.percentile(intensity[regionmask], 0),
np.percentile(intensity[regionmask], 25),
np.percentile(intensity[regionmask], 50),
np.percentile(intensity[regionmask], 75),
np.percentile(intensity[regionmask], 100),
np.mean(intensity[regionmask]),
stats.mode(intensity[regionmask],axis = None)[0][0],
np.std(intensity[regionmask]),
stats.skew(intensity[regionmask]),
stats.kurtosis(intensity[regionmask])]
)
return feat | f1bcb7a3517555d68193c9e526d0016eee81d4b5 | 3,636,274 |
def match(input_character, final_answer):
"""
:param input_character: str, allow users to input a string that will be verified whether
there are any matches with the final answer.
:param final_answer: str, the final answer.
:return: str, return the matching result that could consist of '-' and letters.
"""
result = ""
for f in final_answer:
if f == input_character:
result += input_character
else:
result += '-'
if final_answer.find(input_character) != -1:
print('You are correct!')
else:
print('There is no ' + input_character + '\'s in the word.')
return result | 4323cd2eefa00126baad11576cdc9a29fe94ec0b | 3,636,275 |
import os
import json
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename, "r") as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs) | c120ba1f231430e3c0ed44130b65f6a67b3facc2 | 3,636,276 |
def render_error(request, status=500, title=_('Oops!'),
err_msg=_('An error occured')):
"""Render any error page with a given error code, title and text body
Title and description are passed through as-is to allow html. Make
sure no user input is contained therein for security reasons. The
description will be wrapped in <p></p> tags.
"""
return Response(render_template(request, 'mediagoblin/error.html',
{'err_code': status, 'title': title, 'err_msg': err_msg}),
status=status) | 26c5bc2a6699a1065bc492d8ca8c83d4146dae58 | 3,636,277 |
def affaire_spatial(request):
"""
Get modification affaire by affaire_fille
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
results = request.dbsession.query(VAffaire).filter(
VAffaire.date_cloture == None
).filter(
VAffaire.date_envoi == None
).filter(
VAffaire.abandon == False
).filter(
VAffaire.localisation_e != 0
).filter(
VAffaire.localisation_n != 0
).all()
affaires = []
counter = 0
for result in results:
affaires.append({
'type': 'Feature',
'id': counter,
'geometry': {
'type': 'Point',
'coordinates': [result.localisation_e, result.localisation_n]
},
'properties': {
'number': str(result.id)
}
})
counter += 1
return affaires | 1f1743765f0c044a1070b5ee3cc7a2328233c24a | 3,636,278 |
def get_cosets(big_galois: GaloisGroup, small_galois: GaloisGroup) -> SetOfCosets:
"""
Given a big group `big_galois` and a subgroup `small_galois`, return the cosets
of small_galois \\ big_galois.
Args:
big_galois: A `GaloisGroup` whose cosets to examine
small_galois: The acting subgroup of big_galois
Returns:
A colletion of cosets. Each coset is a frozenset of `Permutation`s.
"""
return frozenset(frozenset(h * g for h in small_galois) for g in big_galois) | b312a490727f860f8e1423cf0ab7e877e8df6ba4 | 3,636,279 |
import argparse
def create_parser(args):
""" Function which add the command line arguments required for the cyclomatic complexity report parser"""
# Create the parser
cyclo_parser = argparse.ArgumentParser(description='cyclomatic complexity gate Parser')
# Add the arguments
cyclo_parser.add_argument('--cyclo', metavar='--c', type=int, help='cyclo benchmark')
return cyclo_parser.parse_args(args) | 46ddafdf458c20d323bc86974525f91320a33ae3 | 3,636,280 |
import calendar
def get_month_day_range(date):
"""
For a date 'date' returns the start and end date for the month of 'date'.
Month with 31 days:
>>> date = datetime.date(2011, 7, 27)
>>> get_month_day_range(date)
(datetime.date(2011, 7, 1), datetime.date(2011, 7, 31))
Month with 28 days:
>>> date = datetime.date(2011, 2, 15)
>>> get_month_day_range(date)
(datetime.date(2011, 2, 1), datetime.date(2011, 2, 28))
"""
first_day = date.replace(day = 1)
last_day = date.replace(day = calendar.monthrange(date.year, date.month)[1])
return first_day, last_day | 610ff43b0e637afba780119c76181c6ff033a299 | 3,636,281 |
def measure_of_risk_callback(app):
"""
Attaches the callback function for the component
rendered in the measure_of_risk function
Args:
app = the dash app
Returns:
None
"""
component_id = 'measure-risk'
@app.callback(
Output(component_id + 'out', 'children'),
[Input(component_id, 'value')])
def _callback(value):
options['Measure of risk'] = value
return '' | 6beabaae619a94db258d41e1f9062de4018fbf68 | 3,636,282 |
from typing import Union
from typing import Optional
from typing import Tuple
from typing import cast
from typing import List
def dot(
a: Union[float, ArrayLike],
b: Union[float, ArrayLike],
*,
dims: Optional[Tuple[int, int]] = None
) -> Union[float, Array]:
"""
Get dot product of simple numbers, vectors, and matrices.
Matrices will be detected and the appropriate logic applied
unless `dims` is provided. `dims` should simply describe the
number of dimensions of `a` and `b`: (2, 1) for a 2D and 1D array.
Providing `dims` will sidestep analyzing the matrix for a more
performant operation. Anything dimensions above 2 will be treated
as an ND x MD scenario and the actual dimensions will be extracted
regardless due to necessity.
"""
if dims is None or dims[0] > 2 or dims[1] > 2:
shape_a = shape(a)
shape_b = shape(b)
dims_a = len(shape_a)
dims_b = len(shape_b)
# Handle matrices of N-D and M-D size
if dims_a and dims_b and dims_a > 2 or dims_b > 2:
if dims_a == 1:
# Dot product of vector and a M-D matrix
cols1 = list(_extract_dims(cast(MatrixLike, b), dims_b - 2))
shape_c = shape_b[:-2] + shape_b[-1:]
return cast(
Matrix,
reshape(
[[_vector_dot(cast(VectorLike, a), cast(VectorLike, c)) for c in col] for col in cols1],
shape_c
)
)
else:
# Dot product of N-D and M-D matrices
# Resultant size: `dot(xy, yz) = xz` or `dot(nxy, myz) = nxmz`
cols2 = list(_extract_dims(cast(ArrayLike, b), dims_b - 2)) if dims_b > 1 else cast(ArrayLike, [[b]])
rows = list(_extract_dims(cast(ArrayLike, a), dims_a - 1))
m2 = [
[[sum(cast(List[float], multiply(row, c))) for c in cast(VectorLike, col)] for col in cols2]
for row in rows
]
shape_c = shape_a[:-1]
if dims_b != 1:
shape_c += shape_b[:-2] + shape_b[-1:]
return cast(Matrix, reshape(cast(Array, m2), shape_c))
else:
dims_a, dims_b = dims
# Optimize to handle arrays <= 2-D
if dims_a == 1:
if dims_b == 1:
# Dot product of two vectors
return _vector_dot(cast(VectorLike, a), cast(VectorLike, b))
elif dims_b == 2:
# Dot product of vector and a matrix
return cast(Vector, [_vector_dot(cast(VectorLike, a), col) for col in zipl(*cast(MatrixLike, b))])
elif dims_a == 2:
if dims_b == 1:
# Dot product of matrix and a vector
return cast(Vector, [_vector_dot(row, cast(VectorLike, b)) for row in cast(MatrixLike, a)])
elif dims_b == 2:
# Dot product of two matrices
return cast(
Matrix,
[[_vector_dot(row, col) for col in zipl(*cast(MatrixLike, b))] for row in cast(MatrixLike, a)]
)
# Trying to dot a number with a vector or a matrix, so just multiply
return multiply(a, b, dims=(dims_a, dims_b)) | 298f9ea31386eff1dee33aec4fa47a29fd5a6f20 | 3,636,283 |
def read_float64(field: str) -> np.float64:
"""Read a float64."""
return np.float64(field) if field != "" else np.nan | f26da82fa22e79a370facad2d787ce3aee70723a | 3,636,284 |
def write_hypergraph(hgr, colored = False):
"""
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(hgr):
dotG.set_name('hypergraph')
else:
dotG.set_name(hgr.name)
colortable = {}
colorcount = 0
# Add all of the nodes first
for node in hgr.nodes():
newNode = pydot.Node(str(node), hyper_node_type = 'hypernode')
dotG.add_node(newNode)
for hyperedge in hgr.hyperedges():
if (colored):
colortable[hyperedge] = colors[colorcount % len(colors)]
colorcount += 1
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge', \
color = str(colortable[hyperedge]), \
shape = 'point')
else:
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge')
dotG.add_node(newNode)
for link in hgr.links(hyperedge):
newEdge = pydot.Edge(str(hyperedge), str(link))
dotG.add_edge(newEdge)
return dotG.to_string() | 2e25eecd84ea0d8724c6f4618478e1cd7a7676d6 | 3,636,285 |
def sort_gtf(gtf_path, out_path):
"""Sorts a GTF file based on its chromosome, start position, line number.
:param gtf_path: path to GTF file
:type gtf_path: str
:return: path to sorted GTF file, set of chromosomes in GTF file
:rtype: tuple
"""
logger.info('Sorting {}'.format(gtf_path))
gtf = GTF(gtf_path)
return gtf.sort(out_path) | 5cb1289b81a0c05a138cac5d2ae6213f46d47110 | 3,636,286 |
def leia_dinheiro(msg):
"""
-> Recebe um valor digitado pelo usuário e verifica se é um
valor númerico válido
:param msg: Mensagem a ser mostrada ao usuário
:return: Retorno o valor digitado pelo usuário caso seja válido
"""
while True:
num = input(msg).strip().replace(',', '.') # Substitui as vírgulas por pontos
if num.replace('.', '').isdigit(): # 'Exluí' os pontos
num = float(num)
break
else:
print(f'\033[1;31mERRO! \"{num}\" não é um preço válido.\033[m')
return num | aa8e21243009af1fde6d6c5e9cb611acff36369e | 3,636,287 |
from typing import OrderedDict
def format_analyse(parsed_tokens, to_1d_flag=False):
"""
入力
parsed_tokens # list(list(str)) : 変数毎の変数名/インデックスがtokenizedなトークンリスト
出力
res,dic # FormatNode,OrderedDict<str:VariableInformation> : フォーマット情報のノードと変数の情報を保持した辞書を同時に返す
"""
appearances = {}
dic = OrderedDict()
pos = 0
# 出現位置とかインデックスとしての最小値・最大値をメモ
for token in parsed_tokens:
idxs = token[1:]
varname = token[0]
if varname not in dic:
dic[varname] = VariableInformation(varname, len(idxs))
appearances[varname] = []
appearances[varname].append(pos)
# print(idxs)
for i, idx in enumerate(idxs):
dic[varname].indexes[i].reflesh_min(idx)
dic[varname].indexes[i].reflesh_max(idx)
pos += 1
# フォーマットノードの構築
processed = set()
root = FormatNode(pointers=[])
for i in range(len(parsed_tokens)):
varname = parsed_tokens[i][0]
if varname in processed:
continue
dim = len(dic[varname].indexes)
if dim == 2 and to_1d_flag:
dic[varname].indexes = dic[varname].indexes[:-1]
dim = 1
if dim == 0:
root.pointers.append(FormatNode(varname))
processed.add(varname)
elif dim == 1:
if len(appearances[varname]) >= 2:
# assume it's a arithmetic sequence
span = appearances[varname][1] - appearances[varname][0]
elif len(appearances[varname]) == 1:
# or mono
span = 1
zipped_varnames = [token[0] for token in parsed_tokens[i:i + span]]
for vname in zipped_varnames:
processed.add(vname)
root.pointers.append(
FormatNode(pointers=[FormatNode(varname=vname) for vname in zipped_varnames],
index=dic[varname].indexes[0]
)
)
elif dim == 2:
processed.add(varname)
inner_node = FormatNode(pointers=[FormatNode(
varname=varname)], index=dic[varname].indexes[1])
root.pointers.append(FormatNode(
pointers=[inner_node], index=dic[varname].indexes[0]))
else:
raise NotImplementedError
return root, dic | 76d42cc83f26c31273ae6aadd42479861c49ff69 | 3,636,288 |
def KAMA(df: pd.DataFrame, window: int = 10, pow1: int = 2, pow2: int = 30) -> pd.DataFrame:
"""
Kaufman's Adaptive Moving Average (KAMA) is an indicator that
indicates both the volatility and trend of the market.
"""
df_with_signal = df.copy()
df_with_signal["signal"] = kama(df["close"], window, pow1, pow2)
return df_with_signal | 79f23b1c840a4bc0860d9bc84fceceedb1c1b6b3 | 3,636,289 |
def cast_to_str(obj):
"""Return a string representation of a Seq or SeqRecord.
Args:
obj (str, Seq, SeqRecord): Biopython Seq or SeqRecord
Returns:
str: String representation of the sequence
"""
if isinstance(obj, str):
return obj
if isinstance(obj, Seq):
return str(obj)
if isinstance(obj, SeqRecord):
return str(obj.seq)
else:
raise ValueError('Must provide a string, Seq, or SeqRecord object.') | cd4100c6ef41b9ff33346349f6c70ac30e9ccd20 | 3,636,290 |
def _octet_bits(o):
"""
Get the bits of an octet.
:param o: The octets.
:return: The bits as a list in LSB-to-MSB order.
:rtype: list
"""
if not isinstance(o, int):
raise TypeError("o should be an int")
if not (0 <= o <= 255):
raise ValueError("o should be between 0 and 255 inclusive")
bits = [0] * 8
for i in range(8):
if 1 == o & 1:
bits[i] = 1
o = o >> 1
return bits | f472a2ab65702e59439b7693260abf040d4e7742 | 3,636,291 |
from aiida.common.links import LinkType
from aiida.orm.data import Data
from aiida.orm.calculation import Calculation
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.calculation.inline import InlineCalculation
import hashlib
import os
def _collect_calculation_data(calc):
"""
Recursively collects calculations from the tree, starting at given
calculation.
"""
calcs_now = []
for d in calc.get_inputs(node_type=Data, link_type=LinkType.INPUT):
for c in d.get_inputs(node_type=Calculation, link_type=LinkType.CREATE):
calcs = _collect_calculation_data(c)
calcs_now.extend(calcs)
files_in = []
files_out = []
this_calc = {
'uuid' : calc.uuid,
'files': [],
}
if isinstance(calc, JobCalculation):
retrieved_abspath = calc.get_retrieved_node().get_abs_path()
files_in = _collect_files(calc._raw_input_folder.abspath)
files_out = _collect_files(os.path.join(retrieved_abspath, 'path'))
this_calc['env'] = calc.get_environment_variables()
stdout_name = '{}.out'.format(aiida_executable_name)
while stdout_name in [files_in,files_out]:
stdout_name = '_{}'.format(stdout_name)
stderr_name = '{}.err'.format(aiida_executable_name)
while stderr_name in [files_in,files_out]:
stderr_name = '_{}'.format(stderr_name)
if calc.get_scheduler_output() is not None:
files_out.append({
'name' : stdout_name,
'contents': calc.get_scheduler_output(),
'md5' : hashlib.md5(calc.get_scheduler_output()).hexdigest(),
'sha1' : hashlib.sha1(calc.get_scheduler_output()).hexdigest(),
'role' : 'stdout',
'type' : 'file',
})
this_calc['stdout'] = stdout_name
if calc.get_scheduler_error() is not None:
files_out.append({
'name' : stderr_name,
'contents': calc.get_scheduler_error(),
'md5' : hashlib.md5(calc.get_scheduler_error()).hexdigest(),
'sha1' : hashlib.sha1(calc.get_scheduler_error()).hexdigest(),
'role' : 'stderr',
'type' : 'file',
})
this_calc['stderr'] = stderr_name
elif isinstance(calc, InlineCalculation):
# Calculation is InlineCalculation
python_script = _inline_to_standalone_script(calc)
files_in.append({
'name' : inline_executable_name,
'contents': python_script,
'md5' : hashlib.md5(python_script).hexdigest(),
'sha1' : hashlib.sha1(python_script).hexdigest(),
'type' : 'file',
})
shell_script = '#!/bin/bash\n\nverdi run {}\n'.format(inline_executable_name)
files_in.append({
'name' : aiida_executable_name,
'contents': shell_script,
'md5' : hashlib.md5(shell_script).hexdigest(),
'sha1' : hashlib.sha1(shell_script).hexdigest(),
'type' : 'file',
})
elif isinstance(calc, WorkCalculation):
# We do not know how to recreate a WorkCalculation so we pass
pass
else:
raise ValueError('calculation is of an unexpected type {}'.format(type(calc)))
for f in files_in:
if os.path.basename(f['name']) == aiida_executable_name:
f['role'] = 'script'
else:
f['role'] = 'input'
this_calc['files'].append(f)
for f in files_out:
if os.path.basename(f['name']) != calc._SCHED_OUTPUT_FILE and \
os.path.basename(f['name']) != calc._SCHED_ERROR_FILE:
if 'role' not in f.keys():
f['role'] = 'output'
this_calc['files'].append(f)
calcs_now.append(this_calc)
return calcs_now | f63a22ee3bcecced6503b27e894288241d8d9d3f | 3,636,292 |
import networkx
def to_graph(l):
"""
Credit: Jochen Ritzel
https://stackoverflow.com/questions/4842613/merge-lists-that-share-common-elements
"""
G = networkx.Graph()
for part in l:
# each sublist is a bunch of nodes
G.add_nodes_from(part)
# it also implies a number of edges:
G.add_edges_from(to_edges(part))
return G | da50880d4b056ffcf538b305dc418cdb11d2f41f | 3,636,293 |
def convert_mcmc_labels(param_keys, unit_labels=False):
"""Returns sequence of formatted MCMC parameter labels
"""
keys = list(param_keys)
for i, key in enumerate(keys):
if 'qb' in key:
label_str = r'$Q_\mathrm{b,' + f'{key[-1]}' + '}$'
elif 'mdot' in key:
label_str = rf'$\dot{{m}}_{key[-1]}$'
elif 'Mdot' in key:
label_str = rf'$\dot{{M}}_{key[-1]}$'
else:
if unit_labels:
label_str = full_label(key)
else:
label_str = quantity_label(key)
keys[i] = label_str
return keys | d4059cd0b43f7968d0a59e7f760d9a226c4e6af1 | 3,636,294 |
def superuser_exempt(filter_authorization_verification):
"""Decorator to exempt any superuser from filtering, authorization, or verification functions."""
def superuser_exempt_fun(request, arg):
if request.user.is_superuser:
if isinstance(arg, QuerySet):
return arg
else:
return True
return filter_authorization_verification(request, arg)
return superuser_exempt_fun | 1fa3752971be8ba291fd454c0704673b6687af9d | 3,636,295 |
import os
def relName(path, cwd=None, root=None):
"""Return pathname relative to `cwd`.
If possible, returns a relative pathname for path. The rules are:
1. If the file is in or below `cwd` then a simple relative name is
returned. For example: 'dir/fred.c'.
2. If both the file and `cwd` are in or below `root` then a relative
path is also generated, but it will contain double dots. For
example: '../../dir/fred.c'.
3. If neither (1) or (2) applies then the absolute path is returned.
:Param cwd:
Used as the current directory. It defaults to ``{os.getcwd()``.
:Param root:
Defines the root directory, which determines whether a relative
pathname can be returned. It defaults to ``projectRoot``.
"""
relRoot = os.path.normpath((root or projectRoot)) + os.sep
cwd = os.path.abspath((cwd or os.getcwd())) + os.sep
if path == cwd or path == cwd[:-1]:
return "."
if path.startswith(cwd):
# The relative name is below the CWD, so we simply strip off the
# leading parts.
return path[len(cwd):]
if path.startswith(relRoot) and cwd.startswith(relRoot):
# The path is below the nominal root but parallel to the CWD. We need
# to add some '../' parts.
relToRootPath = path[len(relRoot):]
relToRootCWD = cwd[len(relRoot):-1]
count = 0
while count < 1000 and relToRootCWD and relToRootCWD != os.sep:
relToRootCWD, b = os.path.split(relToRootCWD)
relToRootPath = ".." + os.sep + relToRootPath
assert count < 1000
return relToRootPath
return path | 59b096e1e080441177a6a4d72c2e8c7a2e30b3df | 3,636,296 |
def paf_to_lastz(job, paf_file, sort_secondaries=True):
"""
Makes lastz output using paftools.js. Also splits the input paf_file into two files
in the output, one for the primary and the other for secondary.
sort_secondaries bool, if true, will cause fxn to return two files instead of one.
"""
primary = list()
primary_mapqs = list()
secondary = list()
secondary_mapqs = list()
if not sort_secondaries:
print("putting all mappings into primary")
with open(job.fileStore.readGlobalFile(paf_file)) as inf:
for line in inf:
primary.append(line)
primary_mapqs.append(line.split()[11])
else:
# print("in paf_to_Lastz - looking for the cg tag.")
with open(job.fileStore.readGlobalFile(paf_file)) as inf:
for line in inf:
if "tp:A:P" in line or "tp:A:I" in line:
#then the line is a primary output file.
primary.append(line)
primary_mapqs.append(line.split()[11])
else:
#then the line is a secondary output file.
secondary.append(line)
secondary_mapqs.append(line.split()[11])
# write output to files; convert to lastz:
lines = [primary, secondary]
mapqs = [primary_mapqs, secondary_mapqs]
sort_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
paftool_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
fixed_paftool_files = [job.fileStore.getLocalTempFile() for i in range(len(lines))]
out_files = [job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile()) for i in range(len(lines))]
print("lines in primary:", len(lines[0]))
print("len(lines in secondary:", len(lines[1]))
stderr_debug = job.fileStore.getLocalTempFile()
for i in range(len(lines)):
with open(sort_files[i], "w") as sortf:
# convert list to file for paftools input
sortf.writelines(lines[i])
cactus_call(parameters=["paftools.js", "view", "-f", "lastz-cigar", sort_files[i]], outfile=paftool_files[i])
fix_negative_strand_mappings(paftool_files[i], fixed_paftool_files[i])
add_original_mapqs( mapqs[i], fixed_paftool_files[i], job.fileStore.readGlobalFile(out_files[i]))
# check that the lines going into paftools.js are in same order as lines going out.
with open(job.fileStore.readGlobalFile(out_files[0])) as inf:
i = 0
for line in inf:
#comparing primary from paf to final lastz output.
paf_parsed = lines[0][i].split()
lastz_parsed = line.split()
if (lastz_parsed[3] == "+" and paf_parsed[2] != lastz_parsed[1]) or (lastz_parsed[3] == "-" and paf_parsed[2] != lastz_parsed[2]):
raise ValueError("Lines differ between paf and paftools.js lastz output! Paftools.js may be acting in an unexpected manner. paf line: " + lines[0][i] + " lastz line " + line)
i += 1
if not sort_secondaries:
return out_files[0]
else:
return out_files | 3bde7a90c71452bdc189e20bbc1e220179c2d35d | 3,636,297 |
from mindquantum import Circuit
def u1(lambd, q):
"""Openqasm u1 gate."""
return Circuit().rz(lambd, q) | 723035f9e3822e1a7ae385fba5bee51f457936a8 | 3,636,298 |
def get_simple_object(key='slug', model=None, self=None):
"""
get_simple_object() => Retrieve object instance.
params => key, model, self
return => object (instane)
"""
try:
if key == 'id':
id = self.kwargs['id']
instance = model.objects.get(id=id)
else:
slug = self.kwargs['slug']
instance = model.objects.get(slug=slug)
except model.DoesNotExist:
raise Http404('Not found!!!')
except model.MultipleObjectsReturned:
if key == 'id':
id = self.kwargs['id']
instance = model.objects.filter(id=id).first()
else:
slug = self.kwargs['slug']
instance = model.objects.filter(slug=slug).first()
except:
raise Http404("Something went wrong !!!")
return instance | 218a742e8652b3edebd6be9676e10c7b5dd709ab | 3,636,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.