content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def kernel_primitive_zhao_vec(x, s0=0.08333, theta=0.242):
"""
Calculates the primitive of the Zhao kernel for given values.
Optimized using nd-arrays and vectorization.
:param x: points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:param c0: normalization constant
:return: primitives evaluated at given points
"""
c0 = 1.0 / s0 / (1 - 1.0 / -theta)
res = np.copy(x)
res[x < 0] = 0
res[(x <= s0) & (x >= 0)] = c0 * res[(x <= s0) & (x >= 0)]
res[x > s0] = c0 * (s0 + (s0 * (1 - (res[x > s0] / s0) ** -theta)) / theta)
return res
| 13,500
|
def capture_output(function, *args):
""" captures the printed output from a function """
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
with captured_output() as (out, err):
function(*args)
if err.errors:
return False
return out.getvalue()
| 13,501
|
def enable_pretty_logging_at_debug(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output only at DEBUG level
"""
if level == logging.DEBUG:
enable_pretty_logging(logger, level, log_file, backupCount, maxBytes)
else:
logger.addHandler(logging.NullHandler())
| 13,502
|
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
| 13,503
|
async def ban(message: types.Message):
"""
Заблокировать пользователя.
"""
try:
name = message.reply_to_message.from_user.full_name
user_id = message.reply_to_message.from_user.id
if user_id == BOT_ID: # Попытка забанить бота
await bot.send_message(message.chat.id, random.choice(random_mess))
return
split_message = message.text.split()[1:]
time_ban = split_message[0]
time_calc = calculate_time(time_ban)
# Указана причина и время бана - забанить на указаное время
if len(split_message) >= 2 and time_ban[:-1].isdigit():
cause = message.text.split(time_ban)[-1]
until = math.floor(time.time()) + time_calc[0] * 60
await bot.kick_chat_member(message.chat.id,
message.reply_to_message.from_user.id,
until_date=until)
await bot.send_message(message.chat.id,
f'[{name}](tg://user?id={user_id}) забанен на {str(time_calc[0])} {time_calc[1]}\n'
f'Причина: {italic(cause)}.')
# Указана причина бана - забанить навсегда
elif not split_message[0][:-1].isdigit():
cause = message.text[5:]
await bot.kick_chat_member(message.chat.id, user_id)
await bot.send_message(message.chat.id,
f'[{name}](tg://user?id={user_id}) забанен навсегда.\n'
f'Причина: {italic(cause)}.')
# Указано только время бана - показать ошибку
else:
raise AttributeError
except (AttributeError, IndexError, ValueError, TypeError):
sent_m = await bot.send_message(message.chat.id, text_messages['wrong_ban_syntax'])
call_later(15, bot.delete_message, sent_m.chat.id, sent_m.message_id, loop=loop)
| 13,504
|
def drop_tables(config_name=None):
"""
Drop all tables despite existing constraints
Source https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything # noqa E501
:param config_name: a dict key which specifies which Config class to select
in config.config dict. The Config class encapsulates all db parameters such
as user, pw, host, port, and name of the db. See config.py for more info.
"""
config = _select_config(config_name)
engine = create_engine(config.SQLALCHEMY_DATABASE_URI)
conn = engine.connect()
# the transaction only applies if the DB supports
# transactional DDL, i.e. Postgresql, MS SQL Server
trans = conn.begin()
inspector = reflection.Inspector.from_engine(engine)
# gather all data first before dropping anything.
# some DBs lock after things have been dropped in
# a transaction.
metadata = MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
ForeignKeyConstraint((), (), name=fk['name'])
)
t = Table(table_name, metadata, *fks)
tbs.append(t)
all_fks.extend(fks)
for fkc in all_fks:
conn.execute(DropConstraint(fkc))
for table in tbs:
conn.execute(DropTable(table))
trans.commit()
| 13,505
|
def train_segmentation_model(
TRAIN_IMG_PATH: str,
TRAIN_LABELS_PATH: str,
VAL_IMG_PATH: str,
VAL_LABELS_PATH: str,
model_save_name: str = "segmentation_model.pt",
) -> Module:
"""The approach which has been used for training best categorization model
as defined and described in the given report.
Parameters
----------
TRAIN_IMG_PATH : str
Path to raw train images.
TRAIN_LABELS_PATH : str
Path to train labels.
VAL_IMG_PATH : str
Path to raw validation images.
VAL_LABELS_PATH : str
Path to train labels.
model_save_name : str
Name of the trained model.
Returns
-------
Module
Trained model.
"""
train_dataset = SegmentationDataset(
TRAIN_IMG_PATH, TRAIN_LABELS_PATH, TRANSFORMATIONS_SEG, TRANSFORMATIONS_TORCH
)
val_dataset = SegmentationDataset(
VAL_IMG_PATH, VAL_LABELS_PATH, TRANSFORMATIONS_SEG, TRANSFORMATIONS_TORCH
)
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=2, shuffle=False)
torch_device = device("cuda") if cuda.is_available() else device("cpu")
model = Unet(CATEGORIES)
model.to(torch_device)
loss = CrossEntropyLoss()
optimizer = RMSprop(model.parameters(), lr=0.0001, momentum=0.99)
scheduler = ReduceLROnPlateau(optimizer, patience=5)
model = train_model(
model,
train_loader,
val_loader,
loss,
optimizer,
torch_device,
scheduler,
EPOCHS,
)
save(model.state_dict(), model_save_name)
| 13,506
|
def cvCascades(argv=sys.argv[1:]):
"""Control the OpenCV cascade Examples.
Please see :meth:`cv2ArgumentParser <pycharmers.utils.argparse_utils.cv2ArgumentParser>` for arguments.
Note:
When you run from the command line, execute as follows::
$ cv-cascades --cam 0 --radio-width 200
"""
parser = cv2ArgumentParser(prog="cv-cascades", description="OpenCV cascade examples", add_help=True)
args = parser.parse_args(argv)
# Collect All cascades.
cascade_names, cascades, states = [],[],[]
for name,value in OPENCV_CASCADES.items():
m = re.match(pattern=r"^haarcascades:haarcascade_(.+)$", string=name)
if m is not None:
try:
cascade = cascade_creator(cascade=name)
cascades.append(cascade)
states.append(len(states)==0)
cascade_names.append(m.group(1))
except Exception as e:
print(name, e)
project = cv2Project(args=args, cascade_names=cascade_names)
def func(frame, monitor, gui_x, frame_height, cascade_names, **kwargs):
cvui.text(where=monitor, x=gui_x+20, y=5, text="[Cascade List]")
idx = cvui.radiobox(where=monitor, x=gui_x, y=25, labels=cascade_names, states=states)
cascade = cascades[idx]
name = cascade_names[idx]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for bbox in cascade.detectMultiScale(gray):
draw_bboxes_xywh(frame=frame, bboxes=bbox, infos=[{"color":cv2GREEN, "text": name}])
return frame
project.wrap(func=func)
| 13,507
|
def new_creator_report(file_obj, keys, source):
"""Add CrossRef creators to existing Eprints record"""
print(f"Processing {len(keys)} eprint records for creators")
all_creators = []
if source.split(".")[-1] == "ds":
dot_paths = ["._Key", ".creators.items", ".related_url.items"]
labels = ["eprint_id", "items", "urls"]
all_metadata = get_records(dot_paths, "dois", source, keys, labels)
for metadata in all_metadata: # , redirect_stdout=True):
creators = []
key = metadata["eprint_id"]
for item in metadata["urls"]:
if "url" in item:
url = item["url"].strip()
if "type" in item:
itype = item["type"].strip().lower()
if "description" in item:
description = item["description"].strip().lower()
if itype == "doi" and description == "article":
doi = url
break
doi = doi.split("doi.org/")[1]
url = (
"https://api.crossref.org/works/" + doi + "?mailto=library@caltech.edu"
)
response = requests.get(url)
crossref_a = response.json()["message"]["author"]
# Build up existing creator list
ex_creators = {}
existing = metadata["items"]
for author in existing:
ex_creators[author["name"]["family"]] = author
for author in crossref_a:
crossref_needed = True
if "family" in author:
if author["family"] in ex_creators:
existing = ex_creators[author["family"]]
existing_given = existing["name"]["given"].replace(
"\u2009", " "
)
author_given = author["given"].replace("\u2009", " ")
if author_given == existing_given:
creators.append(existing)
crossref_needed = False
elif author_given.replace(".", "") == existing_given.replace(
".", ""
):
creators.append(existing)
crossref_needed = False
elif author_given[0] == existing_given[0]:
print(
f"""Possible data loss from CaltechAUTHORS in
record {key}: {existing} Crossref:
{author}"""
)
if crossref_needed:
if "family" in author:
creators.append(
{
"name": {
"family": author["family"],
"given": author["given"],
}
}
)
else:
creators.append({"name": {"family": author["name"]}})
all_creators.append({"id": key, "creators": creators})
json.dump(all_creators, file_obj, indent=4, ensure_ascii=False)
| 13,508
|
async def shutdown():
""" Close all open database connections """
global engines
for engine in engines.values():
engine.close()
await asyncio.gather(*[engine.wait_closed() for engine in engines.values()])
| 13,509
|
def extract_data(state: str, data) -> List[List[Any]]:
"""
Collects
"""
try:
extracted = []
for sample in data['_return']['traces']:
for obs in sample['trace']:
# TODO-Detail - put detail re the purpose of obs['q'] - I don't know what/why this
# logic exists, it's obviously to sanitise data but unclear on what/why
# TODO-idiosyncratic: was < 999 prior to refactor, this means that 998 is the max
# accepted number, this would presumably be 999, but I can't say for sure
if int(obs['q']) >= 999:
continue
obsdate = datetime.datetime.strptime(str(obs['t']), '%Y%m%d%H%M%S').date()
objRow = [state, sample['site'], 'WATER', obsdate, obs['v'], obs['q']]
extracted.append(objRow)
except KeyError:
extracted = []
return extracted
| 13,510
|
def sort(ctx, targets="."):
"""Sort module imports."""
print("sorting imports ...")
args = ["isort", "-rc", "--atomic", targets]
ctx.run(" ".join(args))
| 13,511
|
def merge(input_list: List, low: int, mid: int, high: int) -> List:
"""
sorting left-half and right-half individually
then merging them into result
"""
result = []
left, right = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0))
input_list[low : high + 1] = result + left + right
return input_list
| 13,512
|
def fc_caps(activation_in,
pose_in,
ncaps_out,
name='class_caps',
weights_regularizer=None):
"""Fully connected capsule layer.
"The last layer of convolutional capsules is connected to the final capsule
layer which has one capsule per output class." We call this layer 'fully
connected' because it fits these characteristics, although Hinton et al. do
not use this teminology in the paper.
See Hinton et al. "Matrix Capsules with EM Routing" for detailed description.
Author:
Ashley Gritzman 27/11/2018
Args:
activation_in:
(batch_size, child_space, child_space, child_caps, 1)
(64, 7, 7, 8, 1)
pose_in:
(batch_size, child_space, child_space, child_caps, 16)
(64, 7, 7, 8, 16)
ncaps_out: number of class capsules
name:
weights_regularizer:
Returns:
activation_out:
score for each output class
(batch_size, ncaps_out)
(64, 5)
pose_out:
pose for each output class capsule
(batch_size, ncaps_out, 16)
(64, 5, 16)
"""
with tf.variable_scope(name) as scope:
# Get shapes
shape = pose_in.get_shape().as_list()
batch_size = shape[0]
child_space = shape[1]
child_caps = shape[3]
with tf.variable_scope('v') as scope:
# In the class_caps layer, we apply same multiplication to every spatial
# location, so we unroll along the batch and spatial dimensions
# (64, 5, 5, 32, 16) -> (64*5*5, 32, 16)
pose = tf.reshape(
pose_in,
shape=[batch_size * child_space * child_space, child_caps, 16])
activation = tf.reshape(
activation_in,
shape=[batch_size * child_space * child_space, child_caps, 1],
name="activation")
# (64*5*5, 32, 16) -> (65*5*5, 32, 5, 16)
votes = utl.compute_votes(pose, ncaps_out, weights_regularizer)
# (65*5*5, 32, 5, 16)
assert (
votes.get_shape() ==
[batch_size * child_space * child_space, child_caps, ncaps_out, 16])
logger.info('class_caps votes original shape: {}'
.format(votes.get_shape()))
with tf.variable_scope('coord_add') as scope:
# (64*5*5, 32, 5, 16)
votes = tf.reshape(
votes,
[batch_size, child_space, child_space, child_caps, ncaps_out,
votes.shape[-1]])
votes = coord_addition(votes)
with tf.variable_scope('routing') as scope:
# Flatten the votes:
# Combine the 4 x 4 spacial dimensions to appear as one spacial dimension # with many capsules.
# [64*5*5, 16, 5, 16] -> [64, 5*5*16, 5, 16]
votes_flat = tf.reshape(
votes,
shape=[batch_size, child_space * child_space * child_caps,
ncaps_out, votes.shape[-1]])
activation_flat = tf.reshape(
activation,
shape=[batch_size, child_space * child_space * child_caps, 1])
spatial_routing_matrix = utl.create_routing_map(child_space=1, k=1, s=1)
logger.info('class_caps votes in to routing shape: {}'
.format(votes_flat.get_shape()))
pose_out, activation_out = em.em_routing(votes_flat,
activation_flat,
batch_size,
spatial_routing_matrix)
activation_out = tf.squeeze(activation_out, name="activation_out")
pose_out = tf.squeeze(pose_out, name="pose_out")
logger.info('class_caps activation shape: {}'
.format(activation_out.get_shape()))
logger.info('class_caps pose shape: {}'.format(pose_out.get_shape()))
tf.summary.histogram("activation_out", activation_out)
return activation_out, pose_out
| 13,513
|
def drawKernelIds00(ax, layer, inputSize, outputSize, fontsize, markersize,
synGroupColors):
"""Draw the kernel ids into an existing figure.
The displayed ids are not interleaved and are not grouped into partitions.
The ids are color-coded according to the unique synapse group they belong
to.
:param plt.Axes ax: Matplotlib axes.
:param Layer layer: The layer to visualize.
:param int inputSize: Number of input neurons.
:param int outputSize: Number of output neurons.
:param int fontsize: Fontsize of numeric values in plot.
:param int markersize: Size of the squares drawn at each id location.
:param list synGroupColors: Color scheme to distinguish synapse groups.
"""
ax.clear()
ax.axis('off')
ax.set_xlim(-0.5, inputSize)
ax.set_ylim(-0.5, outputSize)
ax.invert_yaxis()
kMapFull = np.zeros((outputSize, inputSize), int)
synGroupIdsFull = -np.ones((outputSize, inputSize), int)
for relCoreId, partition in enumerate(layer.partitions):
kMapInterleaved = np.zeros((partition.sizeInterleaved, inputSize), int)
synGroupIdsInterleaved = -np.ones((partition.sizeInterleaved,
inputSize), int)
for inputAxonGroup in partition.inputAxonGroups:
for relSrcId, srcId in enumerate(inputAxonGroup.srcNodeIds):
for synEntry in inputAxonGroup.synGroup.synEntries[relSrcId]:
# Skip recurrent connections used in soft reset.
if synEntry.synFmt.softReset:
continue
destIds = inputAxonGroup.cxBase + synEntry.getCxIds()
kMapInterleaved[destIds, srcId] = synEntry.kernelIds
synGroupIdsInterleaved[destIds, srcId] = \
inputAxonGroup.synGroup.id
# Remove extra compartments from soft reset,
lim = outputSize if partition.resetMode == 'soft' else None
# Undo interleaving.
permutedDestCxIdxs = partition.compartmentGroup.cxIds
relToAbsDestCxIdxMap = partition.compartmentGroup.relToAbsDestCxIdxMap
relToAbsDestCxIdxMap = relToAbsDestCxIdxMap[:lim]
kMapCore = kMapInterleaved[permutedDestCxIdxs]
kMapFull[relToAbsDestCxIdxMap] = kMapCore[:lim]
synGroupIdsCore = synGroupIdsInterleaved[permutedDestCxIdxs]
synGroupIdsFull[relToAbsDestCxIdxMap] = synGroupIdsCore[:lim]
# Draw colors.
y, x = np.nonzero(synGroupIdsFull >= 0)
colors = synGroupIdsFull[y, x]
cmap = synGroupColors
ax.scatter(x, y, c=colors, cmap=cmap, marker='s', s=markersize)
# Draw kernelIds.
for i in range(kMapFull.shape[0]):
for j in range(kMapFull.shape[1]):
val = kMapFull[i, j]
if val > 0:
ax.text(j, i, str(val),
va='center', ha='center', fontsize=fontsize)
| 13,514
|
def delete_host(deleter_email, host_id, cluster_ids):
"""Delete the given host.
:param host_id: id of the host
:type host_id: int
:param cluster_ids: list of cluster id
:type cluster_ids: list of int
"""
try:
delete.delete_host(
host_id, cluster_ids, deleter_email
)
except Exception as error:
logging.exception(error)
| 13,515
|
def get_optional_relations():
"""Return a dictionary of optional relations.
@returns {relation: relation_name}
"""
optional_interfaces = {}
if relation_ids('ceph'):
optional_interfaces['storage-backend'] = ['ceph']
if relation_ids('neutron-plugin'):
optional_interfaces['neutron-plugin'] = ['neutron-plugin']
if relation_ids('shared-db') or relation_ids('pgsql-db'):
optional_interfaces['database'] = ['shared-db', 'pgsql-db']
return optional_interfaces
| 13,516
|
def rf_plot(X, y, a=None, b=None, name1='data', name2=None):
"""
This function returns parity plot of regression result by Random
Forest.
Parameters
----------
X: an array or array-like predictors.
It should be scaled by StandardScaler.
y: an array or array-like target.
It should has compatible dimension with input X.
a, b: an array or array-like, optional.
another set of data, such as a = X_test, b = y_test.
Returns
-------
matplotlib scatter plot.
A parity plot that shows relationship between predicted values and
actual values from train-set and test-set.
"""
modelRF, RF_fit = rf_regress(X, y)
print("RF error for train set", mean_squared_error(y, modelRF.predict(X)))
if a.any() and b.any() is not None:
modelRF_test, RF_fit_test = rf_regress(a, b)
print("testset error", mean_squared_error(b, modelRF.predict(a)))
plt.figure(figsize=(8, 8))
plt.scatter(y, RF_fit.predict(X), label=name1)
if a.any() and b.any() is not None:
plt.scatter(b, RF_fit_test.predict(a), label=name2)
plt.plot([0, 10], [0, 10], lw=4, color='black')
plt.legend()
plt.xlabel('Actual Output')
plt.ylabel('Predicted Output')
return
| 13,517
|
def splitmod(n, k):
"""
Split n into k lists containing the elements of n in positions i (mod k).
Return the heads of the lists and the tails.
"""
heads = [None]*k
tails = [None]*k
i = 0
while n is not None:
if heads[i] is None:
heads[i] = n
if tails[i] is not None:
tails[i].next = n
tails[i] = n
n.next, n = None, n.next
i = (i+1)%k
return heads, tails
| 13,518
|
def cli(repo, **kwargs):
"""
Create a label.
The label color must be specified as a six-character hex code, e.g.,
`ff00ff`.
"""
kwargs["color"] = kwargs["color"].lstrip('#')
print_json(repo.labels.post(json=kwargs))
| 13,519
|
def dirname_to_prefix(dirname):
"""Return filename prefix from dirname"""
return os.path.basename(dirname.strip('/')).split("-", maxsplit=1)[1]
| 13,520
|
def find_flats(flats, flat2_finder=find_flat2):
"""Find flat pairs."""
file1s = sorted([item.strip() for item in flats
if item.find('flat1') != -1])
return [(f1, flat2_finder(f1)) for f1 in file1s]
| 13,521
|
def binary_accuracy(a,b):
"""
Calculate the binary acc.
"""
return ((a.argmax(dim=1) == b).sum().item()) / a.size(0)
| 13,522
|
def add_ice_post_arrow_hq_lq_arguments2(parser):
"""Add quiver QV threshold to mark an isoform as high-quality or low-quality."""
# if isinstance(parser, PbParser):
# #parser = _wrap_parser(parser)
# arg_parser = parser.arg_parser.parser
# tcp = parser.tool_contract_parser
# tcp.add_float(BaseConstants.HQ_ARROW_MIN_ACCURACY_ID, "hq_arrow_min_accuracy",
# default=BaseConstants.HQ_ARROW_MIN_ACCURACY_DEFAULT,
# name="Minimum Quiver|Arrow Accuracy", description=BaseConstants.HQ_ARROW_MIN_ACCURACY_DESC)
# tcp.add_int(BaseConstants.QV_TRIM_FIVEPRIME_ID, "qv_trim_5",
# default=BaseConstants.QV_TRIM_FIVEPRIME_DEFAULT,
# name="Trim QVs 5'", description=BaseConstants.QV_TRIM_FIVEPRIME_DESC)
# tcp.add_int(BaseConstants.QV_TRIM_THREEPRIME_ID, "qv_trim_3",
# default=BaseConstants.QV_TRIM_THREEPRIME_DEFAULT,
# name="Trim QVs 3'", description=BaseConstants.QV_TRIM_THREEPRIME_DESC)
# else:
# assert isinstance(parser, argparse.ArgumentParser)
# arg_parser = parser
arg_parser = parser
icq_gp = arg_parser.add_argument_group("IceArrow High QV/Low QV arguments")
icq_gp.add_argument("--hq_arrow_min_accuracy",
type=float,
default=BaseConstants.HQ_ARROW_MIN_ACCURACY_DEFAULT,
dest="hq_arrow_min_accuracy",
help=BaseConstants.HQ_ARROW_MIN_ACCURACY_DESC)
icq_gp.add_argument("--qv_trim_5",
type=int,
default=BaseConstants.QV_TRIM_FIVEPRIME_DEFAULT,
dest="qv_trim_5",
help=BaseConstants.QV_TRIM_FIVEPRIME_DESC)
icq_gp.add_argument("--qv_trim_3",
type=int,
default=BaseConstants.QV_TRIM_THREEPRIME_DEFAULT,
dest="qv_trim_3",
help=BaseConstants.QV_TRIM_THREEPRIME_DESC)
icq_gp.add_argument("--hq_min_full_length_reads",
type=int,
default=2,
help="Minimum number of FL support to be an HQ isoform (default: 2)")
icq_gp = arg_parser.add_argument_group("IceArrow2 HQ/LQ IO arguments")
icq_gp.add_argument("--hq_isoforms_fa",
default=None,
type=str,
dest="hq_isoforms_fa",
help="Arrow polished, high quality isoforms " +
"in FASTA, default: root_dir/output/all_arrowed_hq.fasta")
icq_gp.add_argument("--hq_isoforms_fq",
default=None,
type=str,
dest="hq_isoforms_fq",
help="Arrow polished, high quality isoforms " +
"in FASTQ, default: root_dir/output/all_arrowed_hq.fastq")
icq_gp.add_argument("--lq_isoforms_fa",
default=None,
type=str,
dest="lq_isoforms_fa",
help="Arrow polished, low quality isoforms " +
"in FASTA, default: root_dir/output/all_arrowed_lq.fasta")
icq_gp.add_argument("--lq_isoforms_fq",
default=None,
type=str,
dest="lq_isoforms_fq",
help="Arrow polished, low quality isoforms " +
"in FASTQ, default: root_dir/output/all_arrowed_lq.fastq")
return parser
| 13,523
|
def dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values):
"""
Evaluates model on a dev set
"""
batch_size = 256
#print("tset:",tset)
user_te = np.array(list(tset.keys()))
#print("user_te:",user_te)
user_te2 = user_te[:, np.newaxis]
#user_te2 = user_te
ll = int(len(user_te) / batch_size) + 1
recall50 = []
recall100 = []
recall200 = []
ndcg50 = []
ndcg100 = []
ndcg200 = []
for batch_num in range(int(ll)):
print(batch_num/ll*100,"%")
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(user_te))
# u_batch 是每个batch中的一个对user的一个list
u_batch = user_te2[start_index:end_index]
# batch_users 是这个batch中user的个数
batch_users = end_index - start_index
num_user = train_m.shape[0]#user总数
num_movie = train_m.shape[1]#item总数
user_list = user_te[start_index:end_index]
batch_rating_pairs = generate_pair(user_list, num_movie)
batch_dec_graph = generate_dec_graph(batch_rating_pairs, num_user, num_movie).to(args.device)
Two_Stage = False
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, batch_dec_graph, dataset.user_feature, dataset.movie_feature, Two_Stage)
#pred_ratings = th.softmax(pred_ratings, dim=1)
#print("pred_rating",pred_ratings.shape)
pred_ratings = pred_ratings.cpu().detach().numpy()
#pred_argmax = np.argmax(pred_ratings, axis=1)
pred_index = np.zeros_like(pred_ratings[:,0])
for j in range(len(pred_index)):
#pred_index[j][pred_argmax[j]] = 1
pred_index[j] = pred_ratings[j][1]
#print("pred_rating",pred_index[0:10])
#real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
#real_pred_ratings = (th.from_numpy(pred_index).to(args.device) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
real_pred_ratings = th.from_numpy(pred_index).to(args.device)
print("real_pred_ratings", th.sum(real_pred_ratings>=1))
u_b = user_te[start_index:end_index]
real_pred_ratings = real_pred_ratings.cpu()
#print("pred_shape:", real_pred_ratings.shape)
pre = real_pred_ratings.reshape(batch_users, -1)
#print("pred_shape:", pre.shape)
#pre = np.reshape(real_pred_ratings, (batch_users, num_movie))
pre = pre.detach().numpy()
idx = np.zeros_like(pre, dtype=bool)
idx[train_m[u_b].nonzero()] = True
pre[idx] = -np.inf
recall = []
for kj in [50, 100, 200]:
idx_topk_part = np.argpartition(-pre, kj, 1)
# print pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
# print idx_topk_part
pre_bin = np.zeros_like(pre, dtype=bool)
pre_bin[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]] = True
# print pre_bin
true_bin = np.zeros_like(pre, dtype=bool)
true_bin[test_m[u_b].nonzero()] = True
tmp = (np.logical_and(true_bin, pre_bin).sum(axis=1)).astype(np.float32)
#print("tmp:",tmp)
recall.append(tmp / np.minimum(kj, true_bin.sum(axis=1)))
#print("recall:",tmp / np.minimum(kj, true_bin.sum(axis=1)))
# print tmp
#print("recall:",recall)
ndcg = []
for kj in [50, 100, 200]:
# 获取前20个元素的大致序号
idx_topk_part = np.argpartition(-pre, kj, 1)
#print("pre:",pre.shape)
#
#print("idx_topk_part[:, :kj]:",idx_topk_part[:, :kj])
#获取每个用户对应的前20个预测的index
topk_part = pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
#print("topk_part:",topk_part[0:2])
idx_part = np.argsort(-topk_part, axis=1)
# 将预测分数进行排序,从大到校输出index的值
#print("idx_part:",idx_part[0:2])
idx_topk = idx_topk_part[np.arange(end_index - start_index)[:, np.newaxis], idx_part]
# 得到原来的序列中的对应index
#print("idx_topk:",idx_topk[0:2])
tp = np.log(2) / np.log(np.arange(2, kj + 2))
test_batch = test_m[u_b]
#print("test_batch:",test_batch)
DCG = (test_batch[np.arange(batch_users)[:, np.newaxis], idx_topk].toarray() * tp).sum(axis=1)
# 就只计算真实结果在预测结果中的第几号的dcg
#print("tp:",tp)
#print("DCG:",DCG)
IDCG = np.array([(tp[:min(n, kj)]).sum()
for n in test_batch.getnnz(axis=1)])
#print("IDCG:",np.array([(tp[:min(n, kj)]).sum()
# for n in test_batch.getnnz(axis=1)]))
ndcg.append(DCG / IDCG)
#print("ndcg:",ndcg)
recall50.append(recall[0])
recall100.append(recall[1])
recall200.append(recall[2])
ndcg50.append(ndcg[0])
ndcg100.append(ndcg[1])
ndcg200.append(ndcg[2])
recall50 = np.hstack(recall50)
recall100 = np.hstack(recall100)
recall200 = np.hstack(recall200)
ndcg50 = np.hstack(ndcg50)
ndcg100 = np.hstack(ndcg100)
ndcg200 = np.hstack(ndcg200)
print("recall50:",recall50[0:10])
print("ndcg50:", ndcg50.shape)
print("recall50:", np.mean(recall50), "ndcg50:",np.mean(ndcg50))
print("recall100:",np.mean(recall100),"ndcg100:", np.mean(ndcg100))
print("recall200:",np.mean(recall200), "ndcg200:",np.mean(ndcg200))
#f1.write(str(np.mean(recall100)) + ' ' + str(np.mean(ndcg100)) + '\n')
#f1.flush()
return np.mean(recall50), np.mean(recall100), np.mean(recall200), np.mean(ndcg50), np.mean(ndcg100), np.mean(ndcg200)
| 13,524
|
def clear_image_threads(serial_number):
"""
Stops all running threads and clears all thread dictionaries
:param serial_number: serial number of active stream deck
"""
global stop_animation
stop_animation_keys = list(stop_animation.keys())
for key in stop_animation_keys:
if key[0] == serial_number:
stop_animation[key] = True
time.sleep(.5)
del stop_animation[key]
global clock_threads
clock_thread_keys = list(clock_threads.keys())
for dict_key in clock_thread_keys:
if dict_key[0] == serial_number:
clock_thread = clock_threads[dict_key]
del clock_threads[dict_key]
clock_thread.join()
| 13,525
|
def email_change_view(request, extra_context={},
success_url='email_verification_sent',
template_name='email_change/email_change_form.html',
email_message_template_name='email_change_request',
form_class=EmailChangeForm):
"""Allow a user to change the email address associated with the
user account.
"""
if request.method == 'POST':
form = form_class(username=request.user.username,
data=request.POST,
files=request.FILES)
if form.is_valid():
email = form.cleaned_data.get('email')
# First clean all email change requests made by this user
# Except subscription email validation
qs = EmailChangeRequest.objects.filter(user=request.user) \
.exclude(email=request.user.email)
qs.delete()
# Create an email change request
email_request = EmailChangeRequest.objects.create(
user=request.user,
email=email
)
email_request.send(email_message_template_name)
return redirect(success_url)
else:
form = form_class(username=request.user.username)
context = RequestContext(request, extra_context)
context['form'] = form
return render_to_response(template_name, context_instance=context)
| 13,526
|
def check_is_pair(record1, record2):
"""Check if the two sequence records belong to the same fragment.
In an matching pair the records are left and right pairs
of each other, respectively. Returns True or False as appropriate.
Handles both Casava formats: seq/1 and seq/2, and 'seq::... 1::...'
and 'seq::... 2::...'.
"""
if hasattr(record1, 'quality') or hasattr(record2, 'quality'):
if not (hasattr(record1, 'quality') and hasattr(record2, 'quality')):
raise ValueError("both records must be same type (FASTA or FASTQ)")
lhs1, rhs1 = _split_left_right(record1.name)
lhs2, rhs2 = _split_left_right(record2.name)
# handle 'name/1'
if lhs1.endswith('/1') and lhs2.endswith('/2'):
subpart1 = lhs1.split('/', 1)[0]
subpart2 = lhs2.split('/', 1)[0]
assert subpart1
if subpart1 == subpart2:
return True
# handle '@name 1:rst'
elif lhs1 == lhs2 and rhs1.startswith('1:') and rhs2.startswith('2:'):
return True
return False
| 13,527
|
def decompress_bzip2_from_hdu(hdu):
"""Decompress data in a PyFits HDU object using libz2.
"""
import bz2
data = hdu.data.field(0)
source_type = np.dtype(hdu.header['PCSRCTP'])
return (np.fromstring(bz2.decompress(data.tostring()),
dtype=source_type),
numpy_type_to_fits_type(source_type))
| 13,528
|
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid : string
A grid in string form.
Returns:
grid : dict
Keys are the boxes (e.g., 'A1').
Values are the values in each box (e.g., '8').
If the box has no value, then the value will be '123456789'.
"""
# Change assertion for different-sized grids
assert len(grid) == 81
digits = '123456789'
values = []
for c in grid:
if c == '.':
values.append(digits)
elif c in digits:
values.append(c)
# Sanity check that values is size it should be
assert len(values) == 81
return dict(zip(boxes, values))
| 13,529
|
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile("nightly_(.*)")
build_group_regex = re.compile("nightly_.*-builds-([^.]+).*")
files = []
try:
with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp:
# extract version
version_str = tag_regex.match(tag_name).group(1)
# extract filepath w/ version
# then list all ftp hits with that path
path_template = config["ftp"]["path"]
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ["type"]))
# get all ftp hits of type file
for entry in file_entries:
if entry[1]["type"] == "file":
files.append(entry[0])
except error_perm:
print("Received permanent FTP error!")
return []
out_data = []
for file in files:
# from the file list, extract only nightly files
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
# x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post
if "x64" in group_match:
group_match = group_match.replace("x64", "Win64")
# construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary
for mirror in config["ftp"]["mirrors"]:
download_url = mirror.format(type=build_type, version=version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
# Form the List[ReleaseFile] list with the download URL links
out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))
return out_data
| 13,530
|
def trade_model(data, column='percentile_7Y', show=False, show_annual_invest=True):
"""
交易模型:
1、低估:买入、适中:保持不变、高估:卖出
"""
df = data.copy()
# 去除无滚动百分位数据
df.dropna(inplace=True)
# 找每个月第一个交易日
month_first_date = first_trade_date_in_month(df)
# 假设每个月第一个交易日增加5000元可支配
month_invest_const = 5000
available_cash = 0 # 可用资金
stock_q = 0 # 股票数量(为计算方便,可以使用小数表示)
# 图形展示数据:累计投入、当前持有股票资产、变现回报
trade_date = []
invest_cash = []
stock_assets = []
return_cash = []
# 买入记录
trades = {}
df_return = pd.DataFrame(columns=('date', 'invest', 'stock', 'return'))
for index, row in df.iterrows():
# 首先还是遵守标准定投思想,投还是不投,不考虑投多少问题。卖出的资产直接入袋为安,不参与定投
trade_date.append(index)
if month_first_date.__contains__(index):
# available_cash = available_cash + month_invest_const
# 当月不投下月自动清空
available_cash = month_invest_const
if row[column] < 0.4 and available_cash > 0:
# 较低估值区间, 买入
afford_q = available_cash / row['close']
stock_q += afford_q
invest_cash.append(available_cash)
trades[index] = available_cash # 加入买入记录
available_cash = 0
return_cash.append(0)
elif row[column] > 0.6 and stock_q > 0:
# 过高估值区间, 卖出
selled_p = month_invest_const / row['close'] # 卖掉份数
stock_q = stock_q - selled_p
invest_cash.append(0)
return_cash.append(month_invest_const)
else:
# 不做任何操作
invest_cash.append(0)
return_cash.append(0)
stock_assets.append(stock_q * row['close'])
df_return['date'] = trade_date
df_return['invest'] = invest_cash
df_return['stock'] = stock_assets
df_return['return'] = return_cash
df_return['invest_cumsum'] = df_return['invest'].cumsum()
df_return['return_cumsum'] = df_return['return'].cumsum()
df_return['hold'] = df_return['return_cumsum'] + df_return['stock']
# 设置data为index
df_return['date'] = pd.to_datetime(df_return['date']) # 转换时间类型
df_return.set_index(['date'], inplace=True)
df_return.index.name = None # 去掉索引列名
df_return['close'] = df['close']
print(df_return.head())
# 计算年化收益
earings = CalReturns.annual_returns(trades, df_return.index[-1], df_return['hold'][-1])
print('年化收益率:%s' % earings)
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df_return[['invest_cumsum', 'hold', 'close']].plot(ax=ax, secondary_y=['close'], figsize=(16, 9),
colormap='coolwarm')
plt.show()
if show_annual_invest:
"""展示年度投入与收益, 📊柱状图 (年度投入、年度剩余))"""
trade_year = [date.year for date in trade_date]
df_g = pd.DataFrame(columns=('date', 'invest'))
df_g['date'] = trade_year
df_g['invest'] = invest_cash
df_view = df_g.groupby('date').sum() # group by
fig, ax = plt.subplots(1, figsize=(16, 9))
df_view[['invest']].plot(ax=ax, figsize=(16, 9), kind='bar')
plt.show()
| 13,531
|
def variables(i, o):
""" WRITEME
:type i: list
:param i: input L{Variable}s
:type o: list
:param o: output L{Variable}s
:returns:
the set of Variables that are involved in the subgraph that lies between i and o. This
includes i, o, orphans(i, o) and all values of all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0]
| 13,532
|
def new_instance(): # display_callback=None):
"""
Create a new instance of Ghostscript
This instance is passed to most other API functions.
"""
# :todo: The caller_handle will be provided to callback functions.
display_callback=None
instance = gs_main_instance()
rc = libgs.gsapi_new_instance(pointer(instance), display_callback)
if rc != 0:
raise GhostscriptError(rc)
return instance
| 13,533
|
def ldns_resolver_dnssec_cd(*args):
"""LDNS buffer."""
return _ldns.ldns_resolver_dnssec_cd(*args)
| 13,534
|
def print_dialog(line, speaker, show_speaker=False):
"""Print the line and speaker, formatted appropriately."""
if show_speaker:
speaker = speaker.title()
print('{} -- {}'.format(line.__repr__(), speaker))
else:
print(line)
| 13,535
|
def spf_record_for(hostname, bypass_cache=True):
"""Retrieves SPF record for a given hostname.
According to the standard, domain must not have multiple SPF records, so
if it's the case then an empty string is returned.
"""
try:
primary_ns = None
if bypass_cache:
primary_ns = get_primary_nameserver(hostname)
txt_records = query_dns(hostname, 'txt', primary_ns)
spf_records = [r for r in txt_records if r.strip().startswith('v=spf')]
if len(spf_records) == 1:
return spf_records[0]
except Exception as e:
log.exception(e)
return ''
| 13,536
|
def remove_file_handlers():
"""Remove all file handlers from package logger."""
log = get_logger()
handlers = log.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
log.removeHandler(handler)
| 13,537
|
def raw_to_df(
rawfile: os.PathLike,
n_channels: int = 2048,
fmt: np.dtype = ">i8",
order: str = "F",
) -> pd.DataFrame:
"""Read a binary raw data file, and returns a dataframe."""
bin_raw = read_file(rawfile, "rb")
dt = np.dtype(fmt)
np_data = np.frombuffer(bin_raw, dtype=dt)
bytes_per_data = dt.alignment
total_bytes = len(bin_raw)
n_records = int(total_bytes / n_channels / bytes_per_data)
np_data = np_data.reshape(n_channels, n_records, order=order)
return pd.DataFrame(np_data)
| 13,538
|
def check_number_of_entries(data, n_entries=1):
"""Check that data has more than specified number of entries"""
if not data.size > n_entries:
msg = (f"Data should have more than {n_entries} entries")
raise ValueError(msg)
| 13,539
|
def cleanup():
"""Deactivates webpages and deletes html lighthouse reports."""
shutil.rmtree('.lighthouseci')
pattern = 'COMMUNITY_DASHBOARD_ENABLED = .*'
replace = 'COMMUNITY_DASHBOARD_ENABLED = False'
common.inplace_replace_file(FECONF_FILE_PATH, pattern, replace)
pattern = '"ENABLE_ACCOUNT_DELETION": .*'
replace = '"ENABLE_ACCOUNT_DELETION": false,'
common.inplace_replace_file(CONSTANTS_FILE_PATH, pattern, replace)
| 13,540
|
def get_duplicated_members(first_name, last_name):
"""同じ名前の持つメンバーが存在するかどうか
:param first_name:
:param last_name:
:return:
"""
first_name = first_name.strip() if first_name else None
last_name = last_name.strip() if last_name else None
queryset = models.Member.objects.filter(
first_name=first_name,
last_name=last_name,
)
return queryset
| 13,541
|
def get_oauth_id():
"""Returns user email ID if OAUTH token present, or None."""
try:
user_email = oauth.get_current_user(SCOPE).email()
except oauth.Error as e:
user_email = None
logging.error('OAuth failure: {}'.format(e))
return user_email
| 13,542
|
def check_for_publication(form, formsets, user_data):
"""
Run additional validation across forms fields for status LILACS-Express and LILACS
"""
valid = valid_descriptor = valid_url = True
# regex match starts with S (Serial) and ends with (as) analytic
regex_sas = r"^S.*as$"
Sas_record = re.search(regex_sas, form.document_type)
status = form.cleaned_data.get('status')
user_role = user_data['service_role'].get('LILDBI')
# for LILACS status and not Serie Source is required at least one primary descriptor
if status == 1 and form.document_type != 'S':
valid_descriptor = check_descriptor(form, formsets['descriptor'])
# for LILACS indexed check url/fulltext/page
if form.is_LILACS and status != -1:
# for journal article (Sas record) check for electronic_address OR fulltext file #159
if Sas_record:
valid_url = check_url_or_attachment(form, formsets['attachment'])
elif form.document_type != 'S' and form.document_type != 'Mc':
# for other types of analytic records check for page or electronic_address #160
valid_url = check_url_or_page(form, formsets['attachment'])
if not valid_descriptor or not valid_url:
valid = False
return valid
| 13,543
|
def test_ChooseScraper():
"""
Tests the scraper for Amazon, Bestbuy, and Walmart URLs.
"""
# Testing Amazon case
scraper = Scraper()
stock_info, cost = scraper.ChooseScraper(amazon_URL)
assert stock_info == "Error Occurred" or stock_info == "In Stock"
# Testing BestBuy case
scraper = Scraper()
stock_info, cost = scraper.ChooseScraper(bestBuy_URL)
assert stock_info == "Error Occurred" or stock_info == "In Stock"
# Testing Walmart case
scraper = Scraper()
stock_info, cost = scraper.ChooseScraper(walmart_URL)
assert stock_info == "In Stock"
| 13,544
|
def signal_handler(sig: int, frame) -> None:
"""Sets reaction to interrupt from keyboard"""
global LISTEN
if LISTEN:
print(' SIGINT received.\n Terminating...')
LISTEN = False
else:
print(f' It\'s can take some time due connection delay, '\
'please be patient')
| 13,545
|
def _generate_var_name(prefix, field_name):
"""
Generate the environment variable name, given a prefix
and the configuration field name.
Examples:
>>> _generate_var_name("", "some_var")
"SOME_VAR"
>>> _generate_var_name("my_app", "some_var")
"MY_APP_SOME_VAR"
:param prefix: the prefix to be used, can be empty
:param field_name: the name of the field from which the variable is derived
"""
return (
"_".join((prefix, field_name)).upper()
if prefix
else field_name.upper()
)
| 13,546
|
def VelocityPostProcessingChooser(transport):
"""
pick acceptable velocity postprocessing based on input
"""
tryNew = True
velocityPostProcessor = None
if transport.conservativeFlux is not None:
if (transport.mesh.parallelPartitioningType == 0 and transport.mesh.nLayersOfOverlap==0): #element-based partition
logEvent("Cannot specify conservative flux if partitioned by element with no element overlaps")
exit()
ppcomps = []
pptypes = {}
for ci in list(transport.conservativeFlux.keys()):
if (transport.conservativeFlux[ci] == 'p1-nc' and
isinstance(transport.u[ci].femSpace,FemTools.NC_AffineLinearOnSimplexWithNodalBasis)):
ppcomps.append(ci)
pptypes[ci] = 'p1-nc'
#end p1-nc for comp ci
elif 'pwl' in transport.conservativeFlux[ci]:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] in ['point-eval','dg-point-eval','point-eval-gwvd']: #tjp addin for gwvd
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] == 'pwc':
ppcomps.append(ci)
pptypes[ci] = 'pwc'
elif 'sun-' in transport.conservativeFlux[ci]:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
elif transport.conservativeFlux[ci] in ['dg','dg-bdm']:
ppcomps.append(ci)
pptypes[ci] = transport.conservativeFlux[ci]
else:
logEvent("Unrecognized conservative flux", transport.conservativeFlux[ci])
#for ci
if tryNew:
velocityPostProcessor = AggregateVelocityPostProcessor(pptypes,transport)
else:
velocityPostProcessor = VelocityPostProcessor_Original(pptypes,
transport,
ppcomps)
#conservative flux specified
return velocityPostProcessor
| 13,547
|
def get_node_hierarchical_structure(graph: nx.Graph, node: str, hop: int):
"""
explore hierarchical neighborhoods of node
"""
layers = [[node]]
curLayer = {node}
visited = {node}
for _ in range(hop):
if len(curLayer) == 0:
break
nextLayer = set()
for neighbor in curLayer:
for next_hop_neighbor in nx.neighbors(graph, neighbor):
if next_hop_neighbor not in visited:
nextLayer.add(next_hop_neighbor)
visited.add(next_hop_neighbor)
curLayer = nextLayer
layers.append(list(nextLayer))
return layers
| 13,548
|
def svn_utf_cstring_from_utf8_string(*args):
"""svn_utf_cstring_from_utf8_string(svn_string_t src, apr_pool_t pool) -> svn_error_t"""
return _core.svn_utf_cstring_from_utf8_string(*args)
| 13,549
|
def fitness_sum(element):
"""
Test fitness function.
"""
return np.sum(element)
| 13,550
|
def Stepk(k, basetree=[]): # XXX. make sure basetree is passed as expected.
"""Try to solve the puzzle using assumptions.
k --> The step number. (1st step is solving exactly,
2nd step is solving using 1 assumption,
3rd step is solving using 2 assumptions and so on.)
Note: The assumption level of this step will be k-1.
basetree --> list of parent assumption levels.
It helps in getting the tree structure of (nested)
assumptions.
Example- basetree = [3,2] --> This means that this Stepk function has been
called (recursively) from another Stepk function (with k = 3) which was
itself called from another Stepk function (with k = 4).
==============
Return value:
==============
1 - puzzle was solved in this step.
0 - puzzle was not solved in this step.
"""
# Note: If the puzzle being solved does not have a unique solution and
# the parameter k is large (say 5 or more) then this function will give
# one of the many possible solutions.
# But whichever solution it gives, it will be definately correct!
print "Puzzle complete?"
if isPuzzleComplete():
print "> Complete!"
return 1
else:
print "> Not yet!"
assumptionleveltree = basetree + [k - 1]
print "\n(New Assumption Level.\nAssumption Tree: %s\n" \
"Saving puzzle...)\n" % assumptionleveltree
initialpuzzle, initiallabelrestrictionscount = SavePuzzle()
for row in xrange(9):
for col in xrange(9):
# substitute for sudokucellswithonly2possibilities
if (not (IsCellEmpty(row, col) and
(lenLabelsPermissible(row, col) == 3))):
continue # ==3 becoz 1st is a ''
_labels = GetPermissibleLabels(row, col, 2)
for i in (0, 1): # iterate through the permissible labels.
# XXX. improve this
if i == 0:
otherlabel = _labels[1]
else:
otherlabel = _labels[0]
print "Assuming %s in cell (%d,%d)\n[Other can be %s]\n" \
% (_labels[i], row + 1, col + 1, otherlabel)
setSudokuCellLabel(row, col, _labels[i])
if k != 2:
print "(Entering into nested\nassumption...)\n"
SolveUptoSteps(k - 1, assumptionleveltree)
if k != 2:
print "(Exiting from nested\nassumption...)\n"
print "Puzzle complete?"
if isPuzzleComplete():
# This means that the assumption taken above was
# correct and the puzzle got solved. Hence, return 1.
print "> Complete!" \
# add this later.. (Assumption Level Tree: %s)
return 1
else:
print "> Not yet!\n\nAssumption correct?"
if isPuzzleCorrect():
# This means that the puzzle is incompletely filled
# and it cannot be decided from this point whether
# the assumption taken above is correct or
# incorrect.
print "Maybe. Can't say anything\nas of now."\
" Assumption was\n%s in (%d,%d)\n" \
% (_labels[i], row + 1, col + 1)
# caching
if i == 0:
# This is caching, for speeding up the solve
# process. If 'label' is the 1st of the 2
# permissible labels then save the solution, it
# might be possible that the 2nd of the 2
# permissible options is definitely incorrect,
# (and consequently this assumption is correct)
# so we will need this solution!
# (better to save it, rather than finding it
# again later.)
print "Saving the above puzzle.\n" \
"Will be useful if other\n" \
"assumption (on same cell)\n"\
"is definitely incorrect.\n"
temppuzzle, templabelrestrictionscount = \
SavePuzzle()
# As it cannot be decided standing at this point
# whether the above assumption is correct or
# incorrect, revert to initial conditions and try
# the other options!
print "Reverting to this puzzle\n"\
"(saved at the beginning \n"\
"of this assumption) -"
LoadPuzzle(initialpuzzle,
initiallabelrestrictionscount)
PrintPuzzle()
else:
# This means that puzzle is incorrectly filled, so
# it is sure that the above asumption is definately
# incorrect, so the other among the 2 permissible
# labels is definately correct.
print "Definately incorrect!\n" \
"[%s in cell (%d,%d)]\n" \
% (_labels[i], row + 1, col + 1)
# decide whether label is the 1st of the permissible
# the 1st labels or the 2nd one.
if i == 1:
# This means that the assumption we took
# (2nd of the 2 permissible labels) is
# incorrect, & as this assumption is incorrect,
# the 1st of the 2 assumptions is definately
# correct. Moreover, the puzzle solution to
# the 1st permissible label is already saved in
# temppuzzle, so just load it.
print "Hence previous assumption\n" \
"was correct - \n" \
"[%s in cell (%d,%d)]\n" \
"Revert to the its\n" \
"solution puzzle. \n" \
"(Good, I had saved it!\n" \
"Saved my time!)" \
% (otherlabel, row + 1, col + 1)
PrintPuzzle()
LoadPuzzle(temppuzzle,
templabelrestrictionscount)
else:
print "Hence, defintely correct-\n" \
"[%s in cell (%d,%d)]\n" \
% (otherlabel, row + 1, col + 1)
# This means that 2nd of the 2 permissible
# labels is correct, so revert to the puzzle
# that was at the beginning of the outermost
# for loop and then set the 2nd of the
# 2 permissible labels.
LoadPuzzle(initialpuzzle,
initiallabelrestrictionscount)
setSudokuCellLabel(row, col, _labels[1])
# Delete all the variables defined at this point,
# as this function will be going into a recursive
# loop from here on, and this data, unnecessarily,
# will form a stack.
del initialpuzzle
del initiallabelrestrictionscount
del row
del col
del _labels
del i
del otherlabel
# Now, the puzzle solution has moved one step
# ahead, so try to solve it further using the
# "less complex", "previous" steps.
if k != 2:
print "(Entering into nested\nassumption...)\n"
SolveUptoSteps(k - 1, assumptionleveltree)
if k != 2:
print "(Exiting from nested\nassumption...)\n"
# Finally, repeat this step again to solve the
# puzzle further. (it is quite possile that in the
# previous step itself, the puzzle might have got
# solved. If so, it will just enter this function
# (in recursion) and return from the very
# 1st check)
return(Stepk(k, basetree))
# If this part is getting executed means this function did not help
# in solving the puzzle any further.
print "Didn't get anything from\nthis Assumption Level.\n" \
"Assumption Tree: %s\n" % assumptionleveltree
return 0
| 13,551
|
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('anthill.framework.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
return import_string(path)(*args, **kwargs)
| 13,552
|
def ghetto(RDnet, attIndex, param) :
"""assign proportion param[0] of nodes in net a value of param[1] at attIndex in a ghetto"""
#these should become parameters later on
per1 = param[0]
#per0 = 1.0-per1
curr1 = 0
#start with a seed
seedind = random.randint(1,len(RDnet.nodes()))
seednode = RDnet.nodes()[seedind-1]
seednode.attlist[attIndex] = 1
currWave = [seednode]
#until get necessary fraction
while curr1*1.0/len(RDnet.nodes()) < per1 :
#the next-current wave growth allows a smooth blossoming out of the 1 nodes
nextWave = []
for node in currWave :
potentials = [x for x in RDnet.neighbors(node) if x.attlist[attIndex] != param[1]]
for pot in potentials :
pot.attlist[attIndex] = param[1]
curr1+=1
nextWave.extend(potentials)
currWave = nextWave
| 13,553
|
def validate_twilio_request():
"""Ensure a request is coming from Twilio by checking the signature."""
validator = RequestValidator(current_app.config['TWILIO_AUTH_TOKEN'])
if 'X-Twilio-Signature' not in request.headers:
return False
signature = request.headers['X-Twilio-Signature']
if 'SmsSid' in request.form:
url = url_for('check_raffle', _external=True)
else:
return False
return validator.validate(url, request.form, signature.encode('UTF-8'))
| 13,554
|
def paper_selection(text=[], keywords=[]):
"""
This function calculates the similarity between keywords or phrases relating a text. So it is possible to compare
several texts and keywords in once to see which text is the best relating special keywords. Also a plot is
generated, where it is possible to see the scores of all paper and keywords
:param text: This is a list of texts which you want to compare with the keywords
:param keywords: The keywords in this list are used to compare the single texts.
:return:
"""
df = PaperSelection.paper_importance(text, keywords)
fig = PaperSelection.plot_paper_selection(df)
return df, fig
| 13,555
|
def six_records_sam(tmpdir):
"""Copy the six_records.sam file to temporary directory."""
src = py.path.local(os.path.dirname(__file__)).join(
'files', 'six_records.sam')
dst = tmpdir.join('six_records.sam')
src.copy(dst)
yield dst
dst.remove()
| 13,556
|
def list_subpackages(package_trail,verbose=False):
""" package_trails = list_subpackages(package_trail)
returns a list of package trails
Inputs:
package_trail : a list of dependant package names, as strings
example: os.path -> ['os','path']
Outputs:
package_trails : a list of package trails
can be processed with >>> map( '.'.join, package_trails )
"""
# imports
import os
# error checking
if isinstance(package_trail,str):
package_trail = [package_trail]
elif not isinstance(package_trail,(list,tuple)):
raise Exception('%s is not iterable' % package)
# print current package
if verbose:
print('.'.join(package_trail))
# get absolute path for package
package_dir = os.path.abspath( os.path.join(*package_trail) )
# find all packages
packages = [
p for p in os.listdir( package_dir ) \
if ( os.path.isdir(os.path.join(package_dir, p)) and # package is a directory
os.path.isfile(os.path.join(package_dir, p, '__init__.py')) ) # and has __init__.py
]
# append package trail
packages = [ package_trail + [p] for p in packages ]
# recursion, check for sub packages
packages = [ subpackage \
for package in packages \
for subpackage in list_subpackages(package,verbose) ]
# include this package trail
package_trails = [package_trail] + packages
# done!
return package_trails
| 13,557
|
def _get_out_of_bounds_window(radius, padding_value):
"""Return a window full of padding_value."""
return padding_value * np.ones((2 * radius + 1, 2 * radius + 1), dtype=int)
| 13,558
|
def open_fw(file_name, encoding=ENCODING, encode=True):
"""Open file for writing respecting Python version and OS differences.
Sets newline to Linux line endings on Python 3
When encode=False does not set encoding on nix and Python 3 to keep as bytes
"""
if sys.version_info >= (3, 0, 0):
if encode:
file_obj = io.open(file_name, "w", newline="", encoding=encoding)
else:
file_obj = io.open(file_name, "w", newline="")
else:
file_obj = io.open(file_name, "wb")
return file_obj
| 13,559
|
def covid_API_request(
location: str = "Exeter",
location_type: str = "ltla") -> dict[str]:
"""Requests current COVID data from the Cov19API for a given area.
Uses the Cov19API to request the most recent COVID data for
a given area. Returns data as a list of comma separated strings.
Args:
location: The requested COVID data location.
location_type: The type of area requested ("nation" or "ltla").
Returns:
A dictionary containing a csv file containing COVID information
for an area, indexed by the area's name.
"""
requested_area = ["areaType="+location_type, "areaName="+location]
requested_data = {
"areaCode": "areaCode",
"areaName": "areaName",
"areaType": "areaType",
"date": "date",
"cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate",
"hospitalCases": "hospitalCases",
"newCasesBySpecimenDate": "newCasesBySpecimenDate"
}
logging.info("Requesting COVID data for %s...", location)
api = Cov19API(filters=requested_area, structure=requested_data)
data = api.get_csv()
covid_data[location] = data.split("\n")[:-1]
logging.info("COVID data for %s updated.", location)
return covid_data
| 13,560
|
def resize(img, height, width, is_flow, mask=None):
"""Resize an image or flow field to a new resolution.
In case a mask (per pixel {0,1} flag) is passed a weighted resizing is
performed to account for missing flow entries in the sparse flow field. The
weighting is based on the resized mask, which determines the 'amount of valid
flow vectors' that contributed to each individual resized flow vector. Hence,
multiplying by the reciprocal cancels out the effect of considering non valid
flow vectors.
Args:
img: tf.tensor, image or flow field to be resized of shape [b, h, w, c]
height: int, heigh of new resolution
width: int, width of new resolution
is_flow: bool, flag for scaling flow accordingly
mask: tf.tensor, mask (optional) per pixel {0,1} flag
Returns:
Resized and potentially scaled image or flow field (and mask).
"""
def _resize(image, mask=None):
# _, orig_height, orig_width, _ = img.shape.as_list()
orig_height = tf.shape(input=image)[1]
orig_width = tf.shape(input=image)[2]
if mask is not None:
# multiply with mask, to ensure non-valid locations are zero
image = tf.math.multiply(image, mask)
# resize image
img_resized = tf.compat.v2.image.resize(
image, (int(height), int(width)), antialias=True)
# resize mask (will serve as normalization weights)
mask_resized = tf.compat.v2.image.resize(
mask, (int(height), int(width)), antialias=True)
# normalize sparse flow field and mask
img_resized = tf.math.multiply(
img_resized, tf.math.reciprocal_no_nan(mask_resized))
mask_resized = tf.math.multiply(
mask_resized, tf.math.reciprocal_no_nan(mask_resized))
else:
# normal resize without anti-alaising
img_resized = tf.compat.v2.image.resize(image, (tf.cast(height,
tf.int32),
tf.cast(width,
tf.int32)))
if is_flow:
# If image is a flow image, scale flow values to be consistent with the
# new image size.
scaling = tf.reshape([
float(height) / tf.cast(orig_height, tf.float32),
float(width) / tf.cast(orig_width, tf.float32)
], [1, 1, 1, 2])
img_resized *= scaling
if mask is not None:
return img_resized, mask_resized
return img_resized
# Apply resizing at the right shape.
shape = img.shape.as_list()
if img.shape.rank == 3:
if mask is not None:
img_resized, mask_resized = _resize(img[None], mask[None])
return img_resized[0], mask_resized[0]
else:
return _resize(img[None])[0]
if img.shape.rank == 4:
# Input at the right shape.
return _resize(img, mask)
if img.shape.rank > 4:
# Reshape input to [b, h, w, c], resize and reshape back.
outer_shape = tf.shape(input=img)[:-3]
required_shape = tf.concat([[-1], tf.shape(input=img)[-3:]], axis=0)
img_flattened = tf.reshape(img, required_shape)
if mask is not None:
mask_flattened = tf.reshape(mask, required_shape)
img_resized, mask_resized = _resize(img_flattened, mask_flattened)
else:
img_resized = _resize(img_flattened)
final_shape = tf.concat(
[outer_shape, tf.shape(input=img_resized)[-3:]], axis=0)
result_img = tf.reshape(img_resized, final_shape)
if mask is not None:
final_mask_shape = tf.concat(
[outer_shape, tf.shape(input=mask_resized)[-3:]], axis=0)
result_mask = tf.reshape(mask_resized, final_mask_shape)
return result_img, result_mask
return result_img
else:
raise ValueError('Cannot resize an image of shape', shape)
| 13,561
|
def render(ops_info, is_module):
"""Render the module or stub file."""
yield MODULE_PREAMBLE if is_module else STUBFILE_PREAMBLE
for cls_name, method_blocks in ops_info.items():
yield CLASS_PREAMBLE.format(cls_name=cls_name, newline="\n" * is_module)
yield from _render_classbody(method_blocks, is_module)
| 13,562
|
def num_decodings2(enc_mes):
"""
:type s: str
:rtype: int
"""
if not enc_mes or enc_mes.startswith('0'):
return 0
stack = [1, 1]
for i in range(1, len(enc_mes)):
if enc_mes[i] == '0':
if enc_mes[i-1] == '0' or enc_mes[i-1] > '2':
# only '10', '20' is valid
return 0
stack.append(stack[-2])
elif 9 < int(enc_mes[i-1:i+1]) < 27:
# '01 - 09' is not allowed
stack.append(stack[-2]+stack[-1])
else:
# other case '01, 09, 27'
stack.append(stack[-1])
return stack[-1]
| 13,563
|
def spot2Cmyk(spot, default=None):
"""Answers the CMYK value of spot color. If the value does not exist,
answer default of black. Note that this is a double conversion:
spot-->rgb-->cmyk
>>> '%0.2f, %0.2f, %0.2f, %0.2f' % spot2Cmyk(300)
'0.78, 0.33, 0.00, 0.22'
>>> # Nonexistent spot colors map to default or black.
>>> spot2Cmyk(10000000)
(0, 0, 0, 1)
"""
return rgb2Cmyk(spot2Rgb(spot, default=default))
| 13,564
|
def clean(text):
"""
Removes irrelevant parts from :param: text.
"""
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
# for left, right in options.ignored_tag_patterns:
# for m in left.finditer(text):
# spans.append((m.start(), m.end()))
# for m in right.finditer(text):
# spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
# for tag in options.discardElements:
# text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', '«').replace('>>', '»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(' (,:\.\)\]»)', r'\1', text)
text = re.sub('\([^a-zA-Z\d]*\)', '', text)
text = re.sub('(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
text = text.replace(' , ', ', ')
if keep_tables:
# the following regular expressions are used to remove the wikiml chartacters around table strucutures
# yet keep the content. The order here is imporant so we remove certain markup like {| and then
# then the future html attributes such as 'style'. Finally we drop the remaining '|-' that delimits cells.
text = re.sub(r'!(?:\s)?style=\"[a-z]+:(?:\d+)%;\"', r'', text)
text = re.sub(r'!(?:\s)?style="[a-z]+:(?:\d+)%;[a-z]+:(?:#)?(?:[0-9a-z]+)?"', r'', text)
text = text.replace('|-', '')
text = text.replace('|', '')
text = text.replace('(; ', '(')
text = text.strip()
return text
| 13,565
|
def get_offset(t0,t1,zone,station,gps):
"""
Determine UTC to local Local offset to be applied.
Parameters
----------
t0 : datetime
Starting timestamp
t1 : datetime
End timestamp
zone : str
Define timing zone, either Local or UTC
city : str
City where the sensor is located
Return
------
offset : datetime
Offset time to match time in targeted filename
"""
# Identifying the time zone
utc_zone = tz.gettz('UTC')
# Format input timestamp into UTC time
utc_epoch = t0.replace(tzinfo=utc_zone)
# Get time in local California time
local_epoch = utc_epoch.astimezone(tz.gettz('America/Los_Angeles'))
# Calculate offset between UTC and PST timestamps
utc2pst = datetime.utcoffset(local_epoch).total_seconds()
# Consider UTC to PST offset if requested time is before fix date
utc2pst = utc2pst if t0<datetime(2017,12,7) else 0
# Look-up table to identify station's location over time
locations = numpy.array([[1,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')],
[1,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ],
[2,datetime(2015,11,1),datetime.max ,tz.gettz('America/Los_Angeles')],
[3,datetime(2015,11,1),datetime(2017,10,6),tz.gettz('America/Los_Angeles')],
[3,datetime(2017,10,6),datetime.max ,tz.gettz('America/New_York') ],
[4,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')],
[4,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ]])
# Identify the location for requested data
for n,start,end,loc in locations:
if n==station and start<t0<end:
local_zone = loc
# Identifying the time zone
utc_zone = tz.gettz('UTC')
# Format input timestamp into UTC time
utc_epoch = t0.replace(tzinfo=utc_zone)
# Get time in local California time
local_epoch = utc_epoch.astimezone(local_zone)
# Calculate offset between Local and UTC timestamps
utc2local = datetime.utcoffset(local_epoch).total_seconds()
# Check if first version of timing data
if t1<datetime(2016,6,10):
# Calculate offset between provided UTC to local timestamps
offset = -utc2local if zone=='UTC' else 0
# Check if second version of timing data
if t0>datetime(2016,6,10):
# Calculate offset between provided local to UTC timestamps
offset = -utc2local if zone=='Local' and gps=='on' else 0
return utc2local,offset,utc2pst
| 13,566
|
def notify_user(user, channel, message, url):
"""
Notifies a single user.
See notify().
"""
notify([user], channel, message, url)
| 13,567
|
def my_render_template(html, **arguments):
"""Call render_template with comparison_types as one of the arguments.
:param string html: name of the template
:param **arguments: other arguments to be passed while rendering template
"""
arguments.setdefault(
'comparison_types', ComparisonType.get_cache(g.db_session)
)
return render_template(html, **arguments)
| 13,568
|
def find_or_create_qualification(qualification_name, description,
must_be_owned=True):
"""Query amazon to find the existing qualification name, return the Id. If
it exists and must_be_owned is true but we don't own it, this prints an
error and returns none. If it doesn't exist, the qualification is created
"""
qual_id = find_qualification(
qualification_name,
must_be_owned=must_be_owned
)
if qual_id is False:
return None
if qual_id is not None:
return qual_id
# Create the qualification, as it doesn't exist yet
client = boto3.client(
service_name='mturk',
region_name='us-east-1',
endpoint_url='https://mturk-requester-sandbox.us-east-1.amazonaws.com'
)
response = client.create_qualification_type(
Name=qualification_name,
Description=description,
QualificationTypeStatus='Active',
)
return response['QualificationType']['QualificationTypeId']
| 13,569
|
def delete_file(ssh: paramiko.SSHClient, file_name: str) -> None:
"""
Delete file named file_name via ssh.
:param ssh: sshclient with opened connection
:param file_name: name of file to be unlocked
"""
ssh.exec_command("rm -rf {}".format(file_name))
| 13,570
|
def generate(root_node, link_types, identifier, ancestor_depth, descendant_depth, process_out, process_in, engine,
verbose, output_format, show):
"""
Generate a graph from a ROOT_NODE (specified by pk or uuid).
"""
# pylint: disable=too-many-arguments
from aiida.tools.visualization import Graph
print_func = echo.echo_info if verbose else None
link_types = {"all": (), "logic": ("input_work", "return"), "data": ("input_calc", "create")}[link_types]
echo.echo_info("Initiating graphviz engine: {}".format(engine))
graph = Graph(engine=engine, node_id_type=identifier)
echo.echo_info("Recursing ancestors, max depth={}".format(ancestor_depth))
graph.recurse_ancestors(
root_node,
depth=ancestor_depth,
link_types=link_types,
annotate_links="both",
include_process_outputs=process_out,
print_func=print_func)
echo.echo_info("Recursing descendants, max depth={}".format(descendant_depth))
graph.recurse_descendants(
root_node,
depth=descendant_depth,
link_types=link_types,
annotate_links="both",
include_process_inputs=process_in,
print_func=print_func)
output_file_name = graph.graphviz.render(
filename='{}.{}'.format(root_node.pk, engine), format=output_format, view=show, cleanup=True)
echo.echo_success("Output file: {}".format(output_file_name))
| 13,571
|
def hard_to_soft(Y_h, k):
"""Converts a 1D tensor of hard labels into a 2D tensor of soft labels
Source: MeTaL from HazyResearch, https://github.com/HazyResearch/metal/blob/master/metal/utils.py
Args:
Y_h: an [n], or [n,1] tensor of hard (int) labels in {1,...,k}
k: the largest possible label in Y_h
Returns:
Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the soft
label for item i and label j
"""
Y_h = Y_h.clone()
if Y_h.dim() > 1:
Y_h = Y_h.squeeze()
assert Y_h.dim() == 1
assert (Y_h >= 0).all()
assert (Y_h < k).all()
n = Y_h.shape[0]
Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device)
for i, j in enumerate(Y_h):
Y_s[i, int(j)] = 1.0
return Y_s
| 13,572
|
def find_negations(doc, neg_comma=True, neg_modals=True, debug=False):
"""
Takes as input a list of words and returns the positions (indices) of the words
that are in the context of a negation.
:param list doc: a list of words (strings)
:param bool neg_comma: if True, the negation context ends on a comma
:param bool neg_modals: if True, include negation modals in the set of negation words
:param bool debug: if True, print the text color coded by context
:return set: a set of the word positions inside a negation
"""
doc_context = []
append = doc_context.append
negation_stopset = neg_puncts | {","} if neg_comma else set()
negation_startset = negation_words | negation_modals if neg_modals else set()
# status == "normal" means outside of parentheses
# status == "parentheses" means inside parentheses
# status[XXX] == True means that the context XXX is negated
# status[XXX] == False means that the context XXX is affirmative
status = {"normal": False, "parentheses": False}
# pointer to the current context
current = "normal"
for i, tok in enumerate(doc):
if tok in negation_startset:
status[current] = True
if debug:
cprint(tok, 'red', attrs=['bold'], end=' ')
continue
if tok in negation_stopset | contrast_words:
if debug:
if status[current]:
cprint(tok, 'green', attrs=['bold'], end=' ')
else:
print(tok, end=" ")
status[current] = False
continue
if tok == "(":
current = "parentheses"
if debug:
cprint(tok, 'green', attrs=['bold'], end=' ')
continue
if tok == ")":
status[
"parentheses"] = False # in order to be false the next time it goes in to a parentheses
current = "normal"
if debug:
cprint(tok, 'green', attrs=['bold'], end=' ')
continue
if debug:
if status[current]:
cprint(tok, 'magenta', end=' ')
else:
print(tok, end=" ")
if status[current]:
append(i)
if debug:
print()
# input("press to continue...")
return set(doc_context)
| 13,573
|
def test_status_format(input, expected):
""" test various formatting cases embodied in utils.status_messages.status_format """
assert status_format(input) == expected
| 13,574
|
def apply_all_menus(output, manual_change, dates):
"""Apply the change to all dates in the applicable range. If no menu exist for a day, it will be created."""
print(f"Matching all menus from {manual_change.resto} between {manual_change.start} to {manual_change.end}")
print("====================================================================")
for applicable_date in manual_change.date_range():
year = applicable_date.year
month = applicable_date.month
day = applicable_date.day
# Get existing file if it exists
for resto in manual_change.resto:
path = f"{output}/menu/{resto}/{year}/{month}/{day}.json"
try:
with open(path, 'r') as f:
menu = json.loads(f.read())
except IOError:
os.makedirs(os.path.dirname(path), exist_ok=True)
menu = {'open': False, 'date': applicable_date.strftime('%Y-%m-%d'), 'meals': [], 'vegetables': []}
# Apply the changes
_new_content = manual_change.replacer(path, menu)
dates[resto][_new_content["date"]] = _new_content
new_content = json.dumps(_new_content)
with open(path, 'w+') as f:
f.write(new_content)
| 13,575
|
def int_pow(base: int, power: int, modulus: int=None, safe: bool=True):
"""
Calculate `base` raised to `power`, optionally mod `modulus`
The python standard library offers the same functionality,
and this function exists only as a proof of Concept.
This function only aims to support positive integer operands.
the `safe` parameter only applies to modular exponentiation.
for values with a large hamming weight, the recursion limit
can be hit quite easily, as one round of recursion is needed
for every set bit. If `safe` is set to true, the recursion
depth is adjusted accordingly during the computation, then
restored.
---------------------------------------------------------------
Benchmark compared to native python pow():
pow(a, b, c) 10k times using random pool of a, b, c { [2, 99999999999999999999999999999999999999999999999999999]:
702 ms ± 5.44 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
int_pow(a, b, c) 10k times using same pool:
1.31 s ± 2.81 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
if base < 0 or power < 0 or (modulus and modulus < 0):
raise ValueError("Invalid operand. Only positive integer operands allowed.")
def pow_nomod(base: int, power: int):
"""Calculate `base` raised to `power`."""
# Keep a copy
base_ = base
for _ in range(power - 1):
base *= base_
return base
if not modulus:
return pow_nomod(base, power)
# Here the fun part comes.
# There exists an optimization for modular exponentiation which
# allows for a much faster computation than (base**power) % modulus.
# the identity `(a * b) mod n = (a mod n) * (b mod n) mod n` aids us here.
# We start by splitting the power up in a sum of powers of two.
n = 0
po2 = []
while power >> n:
# if the bit is set, we have a match:
if power & (1 << n):
po2.append(n)
n += 1
# We can now represent our evaluation as an expression of the form:
# (base**(2**a_0) * base**(2**a_1) * ... * base**(2**a_2) ) % modulus
# which we can calculate quite fast using the identity below
# Take the highest power of two and evaluate it using our identity.
# We can fill the cache with the results of all the lower powers, mod n.
highest = po2[-1]
# cache for `base` raised to powers of two, modulus `n`.
# the indices shall denote the power.
cache = [None] * (highest + 1)
result = cache[0] = base % modulus # base**1 # modulus
# Square, then reduce modulo `modulus`
for cycle in range(highest):
result *= result
result %= modulus
cache[cycle + 1] = result
def product_mod_n(args: List[int], n: int):
"""
Calculate (base**(2**a_0) * base**(2**a_1) * ... * base**(2**a_k)) mod n, with every `a` in cache.
"""
# BEWARE: this function can easily exceed python max recursion depth (of 1000).
# for values with a large hamming weight, adjust the recursion depth limit accordingly.
# Identity: (a * b) mod n = (a mod n) * (b mod n) mod n
# this can be applied recursively with relative ease.
# Recursion ending condition:
if len(args) == 1:
return cache[args[0]]
#
return (cache[args.pop()]) * (product_mod_n(args, n)) % n
if safe:
# Make sure we won't hit the recursion limit
old = getrecursionlimit()
setrecursionlimit(999999999)
result = product_mod_n(po2, modulus)
setrecursionlimit(old)
return result
else:
return product_mod_n(po2, modulus)
| 13,576
|
def add_webhook(doc, session):
"""Use Mandrill API to add the webhook"""
r = session.post(get_api_url("/webhooks/add.json"), data=json.dumps({
"key": doc.password,
"url": get_webhook_post_url(),
"description": _("Frappé Mandrill Integration"),
"events": [
# subscribe to these events
# NOTE: 'deferral' event wasn't allowed at the time of making this
"send",
"hard_bounce",
"soft_bounce",
"open",
"click",
"spam",
"unsub",
"reject"
]
}))
if r.status_code != 200:
# something went wrong
frappe.msgprint(_("Could not activate Mandrill Integration"))
frappe.errprint(r.text)
return
# store its value in Email Account
mandrill_webhook_key = r.json()["auth_key"]
doc.db_set("mandrill_webhook_key", mandrill_webhook_key)
| 13,577
|
def iter_compare_dicts(dict1, dict2, only_common_keys=False, comparison_op=operator.ne):
"""
A generator for comparation of values in the given two dicts.
Yields the tuples (key, pair of values positively compared).
By default, the *difference* of values is evaluated using the usual != op, but can be changed
by passing other comparison_op (a function of two arguments returning True/False).
For example: operator.eq for equal values, operator.is_not for not identical objects.
You can also require comparison only over keys existing in both dicts (only_common_keys=True).
Otherwise, you will get the pair with the Python built-in Ellipsis placed for dict with
that key missing. (Be sure to test for Ellipsis using the 'is' operator.)
>>> d1 = dict(a=1, b=2, c=3)
>>> d2 = dict(a=1, b=20, d=4)
>>> dict(iter_compare_dicts(d1, d2, only_common_keys=True))
{'b': (2, 20)}
>>> dict(iter_compare_dicts(d1, d2, only_common_keys=True, comparison_op=operator.eq))
{'a': (1, 1)}
>>> dict(iter_compare_dicts(d1, d2))
{'c': (3, Ellipsis), 'b': (2, 20), 'd': (Ellipsis, 4)}
>>> dict(iter_compare_dicts(d1, d2, comparison_op=operator.eq))
{'a': (1, 1), 'c': (3, Ellipsis), 'd': (Ellipsis, 4)}
"""
keyset1, keyset2 = set(dict1), set(dict2)
for key in (keyset1 & keyset2):
pair = (dict1[key], dict2[key])
if reduce(comparison_op, pair):
yield key, pair
if not only_common_keys:
for key in (keyset1 - keyset2):
yield key, (dict1[key], Ellipsis)
for key in (keyset2 - keyset1):
yield key, (Ellipsis, dict2[key])
| 13,578
|
def truncate_decimal_places(value: decimal.Decimal, places: int = 1) -> float:
"""
Truncate a float (i.e round towards zero) to a given number of decimal places.
NB: Takes a decimal but returns a float!
>>> truncate_decimal_places(12.364, 1)
12.3
>>> round_decimal_places(-12.364, 1)
-12.3 # -12.3 is bigger than -12.4
>>> round_decimal_places(12.364, 0)
12.0 # rounding to 0 returns float with no decmial part
"""
if places == 0:
quantize_string = "1"
else:
quantize_string = "0." + ((places - 1) * "0") + "1"
exponent = decimal.Decimal(quantize_string)
decimal_result = value.quantize(exponent, rounding=decimal.ROUND_DOWN)
return float(decimal_result)
| 13,579
|
def site_url(self, url):
"""
Return the fully qualified URL for the given URL fragment.
"""
try:
# In Django < 1.9, `live_server_url` is decorated as a `property`, but
# we need to access it on the class.
base_url = self.testclass.live_server_url.__get__(self.testclass)
except AttributeError:
# Dango 1.9 updates `live_server_url` to be a `classproperty`.
base_url = self.testclass.live_server_url
return urljoin(base_url, url)
| 13,580
|
def myisinteger(
num: int) -> bool:
"""
Checks if num is an integer
"""
val = 1 if num == floor(num) else 0
return val
| 13,581
|
def _get_timeunit(min_time: pd.Timestamp, max_time: pd.Timestamp, dflt: int) -> str:
"""Auxillary function to find an appropriate time unit. Will find the
time unit such that the number of time units are closest to dflt."""
dt_secs = {
"year": 60 * 60 * 24 * 365,
"quarter": 60 * 60 * 24 * 91,
"month": 60 * 60 * 24 * 30,
"week": 60 * 60 * 24 * 7,
"day": 60 * 60 * 24,
"hour": 60 * 60,
"minute": 60,
"second": 1,
}
time_rng_secs = (max_time - min_time).total_seconds()
prev_bin_cnt, prev_unit = 0, "year"
for unit, secs_in_unit in dt_secs.items():
cur_bin_cnt = time_rng_secs / secs_in_unit
if abs(prev_bin_cnt - dflt) < abs(cur_bin_cnt - dflt):
return prev_unit
prev_bin_cnt = cur_bin_cnt
prev_unit = unit
return prev_unit
| 13,582
|
def get_resource_path(relative_path):
"""
relative_path = "data/beach.jpg"
relative_path = pathlib.Path("data") / "beach.jpg"
relative_path = os.path.join("data", "beach.jpg")
"""
rel_path = pathlib.Path(relative_path)
dev_base_path = pathlib.Path(__file__).resolve().parent.parent
base_path = getattr(sys, "_MEIPASS", dev_base_path)
return base_path / rel_path
| 13,583
|
def data_sample(df, x, y, group_number, quantile):
"""
分组选点法
x: 分组变量
y: 取值变量
"""
group_width = (np.max(df[x]) - np.min(df[x])) / group_number # 分组宽度
x_group = np.arange(np.min(df[x]), np.max(df[x]), group_width) # 分组的X
# 选取每组中设定的分位数的点, 对点数大于零的组选点
if len(quantile) == 3:
data_x = np.array([])
data_y = np.array([])
for i in x_group:
if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0:
temp_y = np.array(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile))
temp_x = np.array([(i + group_width / 4), (i + group_width / 2), (i + 3 * group_width / 4)])
data_x = np.concatenate([data_x, temp_x], axis = 0)
data_y = np.concatenate([data_y, temp_y], axis = 0)
elif len(quantile) == 1:
data_x = []
data_y = []
for i in x_group:
if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0:
temp_y = float(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile))
temp_x = float(i + group_width / 2)
data_x.append(temp_x)
data_y.append(temp_y)
return data_x, data_y
| 13,584
|
def spec_col_file(filename):
"""
Specify an INI file with column names to be automatically used in plots.
The column-label-pairs must be placed under the INI section `[Columns]`.
:param filename: A path to the INI file.
"""
cfg = ConfigParser()
cfg.read(filename, encoding='utf8')
_col_labels.update(cfg['Columns'])
| 13,585
|
def index(request):
""" Shows all challenges related to the current user """
profile = request.user.get_profile()
chall_user = profile.get_extension(ChallengeUser)
challs = ChallengeGame.get_active(chall_user)
played = ChallengeGame.get_played(chall_user)[:10]
if not chall_user.is_eligible():
messages.error(request, _('Your race can\'t play. Go home'))
return render_to_response('challenge/index.html',
{'challenges': challs, 'played': played, 'challuser': chall_user, 'challenge': ChallengeGame},
context_instance=RequestContext(request))
| 13,586
|
def hansen(threshold, geojson, begin, end, logger):
"""For a given threshold and geometry return a dictionary of ha area.
The threshold is used to identify which band of loss and tree to select.
asset_id should be 'projects/wri-datalab/HansenComposite_14-15'
Methods used to identify data:
Gain band is a binary (0 = 0, 255=1) of locations where tree cover increased
over data collction period. Calculate area of gain, by converting 255 values
to 1, and then using a trick to convert this to pixel area
(1 * pixelArea()). Finally, we sum the areas over a given polygon using a
reducer, and convert from square meters to hectares.
Tree_X bands show percentage canopy cover of forest, If missing, no trees
present. Therefore, to count the tree area of a given canopy cover, select
the band, convert it to binary (0=no tree cover, 1 = tree cover), and
identify pixel area via a trick, multiplying all 1 vals by image.pixelArea.
Then, sum the values over a region. Finally, divide the result (meters
squared) by 10,000 to convert to hectares
"""
asset_id = 'projects/wri-datalab/HansenComposite_14-15'
d = {}
begin = int(begin.split('-')[0][2:])
end = int(end.split('-')[0][2:])
region = get_region(geojson)
reduce_args = {'reducer': ee.Reducer.sum().unweighted(),
'geometry': region,
'bestEffort': True,
'scale': 90}
gfw_data = ee.Image(asset_id)
loss_band = 'loss_{0}'.format(threshold)
cover_band = 'tree_{0}'.format(threshold)
# Identify 2000 forest cover at given threshold
tree_area = gfw_data.select(cover_band).gt(0).multiply(
ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo()
d['tree-extent'] = squaremeters_to_ha(tree_area[cover_band])
# Identify tree gain over data collection period
gain = gfw_data.select('gain').divide(255.0).multiply(
ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo()
d['gain'] = squaremeters_to_ha(gain['gain'])
# Identify area lost from begin year up untill end year
tmp_img = gfw_data.select(loss_band)
loss_area_img = tmp_img.gte(begin).And(tmp_img.lte(end)).multiply(ee.Image.pixelArea())
loss_total = loss_area_img.reduceRegion(**reduce_args).getInfo()
d['loss'] = squaremeters_to_ha(loss_total[loss_band])
return d
| 13,587
|
def rectangluarMask(image):
"""
this function will take an image as an input and created a rectangluar mask(image sized) and in the center of canvas
"""
mask = np.zeros(image.shape[:2], dtype = 'uint8')
(cX, cY) = (image.shape[1]//2, image.shape[0]//2)
cv2.rectangle(mask, (cX-75, cY-75), (cX+75, cY+75), 255, -1)
# cv2.imshow('Rectangle Mask', mask)
# cv2.waitKey(0)
return mask
| 13,588
|
def get_artist_listen_for_change_streams(artist: Artist=None):
"""
Computation steps:
1. Define start and end dates
2. Create stream filters for the current artist
3. aggregate the streams from the Model
4. Return just the number (maybe a dict idk)
"""
# Validate argument data types
if not isinstance(artist, Artist): raise TypeError("Param 'artist' must be an Artist object")
# 1
start_date = datetime.date(year=2020, month=6, day=22)
end_date = datetime.date(year=2020, month=6, day=28)
# 2
stream_song_filter = Q(song__uploaded_by=artist)
stream_time_filter = Q(timestamp__gte=start_date, timestamp__lte=end_date)
# 3
streams = Stream.objects.filter(stream_song_filter, stream_time_filter)
stream_count = streams.aggregate(num_streams=Count('id'))
return stream_count
| 13,589
|
def flip_dict(d):
"""Returns a dict with values and keys reversed.
Args:
d: The dict to flip the values and keys of.
Returns:
A dict whose keys are the values of the original dict, and whose values
are the corresponding keys.
"""
return {v: k for k, v in d.items()}
| 13,590
|
def branch_exists(branch: str) -> bool:
""" Check if the branch exists in the current Git repo. """
try:
subprocess.check_call(
["git", "rev-parse", "--quiet", "--verify", branch],
stdout=subprocess.DEVNULL,
)
return True
except subprocess.CalledProcessError:
return False
| 13,591
|
def assert_equal(actual: Literal["oim"], desired: Literal["oim"]):
"""
usage.statsmodels: 2
"""
...
| 13,592
|
def has_sample(args):
"""Returns if some kind of sample id is given in args.
"""
return args.sample or args.samples or args.sample_tag
| 13,593
|
def get_search_selection(config: models.Config) -> models.Config:
"""Gets search criteria for search mode"""
search_selection: models.SearchSelection = models.SearchSelection()
print('\nPlease select what system you want to search')
print('Press Enter to do a general site wide search')
helpers.print_console_list()
while True:
user_input: str = sys.stdin.readline()
try:
if user_input == '\n':
search_selection.System = 'general'
config.Query.SearchSelections = search_selection
break
if not (int(user_input) > 17 or int(user_input) < 0):
search_selection.System = \
helpers.get_selection_from_num(int(user_input))
config.Query.SearchSelections = search_selection
break
else:
print('Not a selection')
print('Please select a value from the list')
except ValueError:
print('Please select a value from the list')
continue
print('Input what rom you want to search for')
search_selection.Query = sys.stdin.readline()
return config
| 13,594
|
def make_key_type(func: Callable[..., Any]) -> Type[CallKey]:
"""Construct a type representing a functions signature."""
sig = inspect.signature(func)
# make a format string that unpacks and names the parameters nicely
repr_fmt = (
(
func.__name__
if "<locals>" in func.__qualname__
else func.__module__ + "." + func.__qualname__
)
+ "("
+ ", ".join(name + "={!r}" for name in sig.parameters.keys())
+ ")"
)
# patch the repr so it looked pretty
def _repr(self: Any) -> str:
return repr_fmt.format(*self[:-1])
key_type = type(
func.__name__,
(
namedtuple(
func.__name__,
tuple(sig.parameters.keys()) + ("func__",),
defaults=tuple(p.default for p in sig.parameters.values()) + (func,),
module=func.__module__,
),
CallKey,
),
{
"__repr__": _repr,
"__func__": func,
"__module__": func.__module__,
"__signature__": sig,
"from_call": classmethod(_from_call),
},
)
return key_type
| 13,595
|
def gen_string(prop=None):
"""
Generate String value
:param prop: dict
Examples: {'minLength': 10, 'maxLength': 154}
{'pattern': '^\\d+\\w*$'}
"""
if not prop:
prop = {}
min_length = prop.get("minLength", 1)
max_length = prop.get("maxLength", 1024)
pattern = prop.get("pattern", None)
if pattern:
if min_length or max_length:
# TODO implement pattern with min/max length
raise NotImplementedError
return Xeger().xeger(pattern)
return random_string(strlen=randint(min_length, max_length))
| 13,596
|
async def test_error_fetching_new_version_bad_json(hass, aioclient_mock):
"""Test we handle json error while fetching new version."""
aioclient_mock.post(updater.UPDATER_URL, text="not json")
with patch(
"homeassistant.helpers.system_info.async_get_system_info",
Mock(return_value=mock_coro({"fake": "bla"})),
):
res = await updater.get_newest_version(hass, MOCK_HUUID, False)
assert res is None
| 13,597
|
def process_documents(args: dict[str, bool]) -> None:
"""Process the documents.
Args:
args (dict[str, bool]): The processing steps based on CLI arguments.
"""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# Connect to the database.
db.driver.connect_db()
# Check the version of the database.
check_db_up_to_date()
cfg.glob.run_run_id = db.dml.select_run_run_id_last() + 1
# Load the data from the database table 'language'.
load_data_from_dbt_language()
# Process the documents in the inbox file directory.
if args[cfg.glob.RUN_ACTION_PROCESS_INBOX]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_INBOX
process_inbox_directory()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Convert the scanned image pdf documents to image files.
if args[cfg.glob.RUN_ACTION_PDF_2_IMAGE]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_PDF2IMAGE
process_convert_pdf_2_image()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Convert the image documents to pdf files.
if args[cfg.glob.RUN_ACTION_IMAGE_2_PDF]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_TESSERACT
process_convert_image_2_pdf()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Convert the non-pdf documents to pdf files.
if args[cfg.glob.RUN_ACTION_NON_PDF_2_PDF]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_PANDOC
process_convert_non_pdf_2_pdf()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Extract text and metadata from pdf documents.
if args[cfg.glob.RUN_ACTION_TEXT_FROM_PDF]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_PDFLIB
process_extract_text_from_pdf()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Store the document structure from the parser result.
if args[cfg.glob.RUN_ACTION_STORE_FROM_PARSER]:
start_time_process = time.perf_counter_ns()
process_store_from_parser()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Create document token.
if args[cfg.glob.RUN_ACTION_TOKENIZE]:
start_time_process = time.perf_counter_ns()
cfg.glob.document_current_step = cfg.glob.DOCUMENT_STEP_TOKENIZE
process_tokenize()
utils.progress_msg(f"Time : {round((time.perf_counter_ns() - start_time_process) / 1000000000, 2) :10.2f} s")
# Disconnect from the database.
db.driver.disconnect_db()
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 13,598
|
def standardize_data(data, eps=None):
"""
Standardize each image data to have zero mean and unit standard-deviation (z-score)
Inputs:
data: [np.ndarray] unnormalized data
Outputs:
data: [np.ndarray] normalized data
"""
if eps is None:
eps = 1.0 / np.sqrt(data[0,...].size)
data, orig_shape = reshape_data(data, flatten=True)[:2] # Adds channel dimension if it's missing
num_examples = data.shape[0]
data_axis = tuple(range(data.ndim)[1:]) # standardize each example individually
data_mean = np.mean(data, axis=data_axis, keepdims=True)
data_true_std = np.std(data, axis=data_axis, keepdims=True)
data_std = np.where(data_true_std >= eps, data_true_std,
eps*np.ones_like(data_true_std))
for idx in range(data.shape[0]): # TODO: Broadcasting should work here
data[idx, ...] = (data[idx, ...] - data_mean[idx]) / data_std[idx]
if data.shape != orig_shape:
data = reshape_data(data, out_shape=orig_shape)[0]
return data, data_mean, data_std
| 13,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.