content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def linkCount(tupleOfLists, listNumber, lowerBound, upperBound):
"""Counts the number of links in one of the lists passed.
This function is a speciality function to aid in calculating
statistics involving the number of links that lie in a given
range. It is primarily intended as a private helper function. The
parameters are:
tupleOfLists -- usually a linkograph entry.
listNumber -- a list of the indicies in entry that should be
considered.
lowerBound -- the lowest index that should be considered.
upperBound -- the highest index that should be considered.
Example: a typical tupleOfLists is ({'A', 'B'}, {1,2}, {4,5}) a
listNumber of [1] would only consider the links in {1,2}, a
listNumber of [2] would only consider the links in {4,5} and a
listNumber of [1,2] would consider the links in both {1,2}, and
{4,5}.
"""
summation = 0
for index in listNumber:
summation += len({link for link in tupleOfLists[index]
if link >= lowerBound
and link <= upperBound})
return summation
| 25,600
|
def field_groups(pairs, aligned_fields):
"""
Yield lists of (marker, value) pairs where all pairs in the list
are aligned. Unaligned fields will be returned as the only pair in
the list, and repeating groups (e.g. where they are wrapped) will
be returned separately.
"""
group = []
seen = set()
for mkr, val in pairs:
# unaligned or repeated fields start over grouping
if mkr not in aligned_fields or mkr in seen:
if group:
yield group
group = []
seen = set()
if mkr not in aligned_fields:
yield [(mkr, val)]
continue
group.append((mkr, val))
seen.add(mkr)
# yield the last group if non-empty
if group:
yield group
| 25,601
|
def encode_md5(plain_text):
"""
Encode the plain text by md5
:param plain_text:
:return: cipher text
"""
plain_text = plain_text + EXT_STRING
encoder = md5()
encoder.update(plain_text.encode('utf-8'))
return encoder.hexdigest()
| 25,602
|
def delta(x, y, assume_normal=True, percentiles=[2.5, 97.5],
min_observations=20, nruns=10000, relative=False, x_weights=1, y_weights=1):
"""
Calculates the difference of means between the samples (x-y) in a
statistical sense, i.e. with confidence intervals.
NaNs are ignored: treated as if they weren't included at all. This is done
because at this level we cannot determine what a NaN means. In some cases,
a NaN represents missing data that should be completely ignored, and in some
cases it represents inapplicable (like PCII for non-ordering customers) - in
which case the NaNs should be replaced by zeros at a higher level. Replacing
with zeros, however, would be completely incorrect for return rates.
Computation is done in form of treatment minus control, i.e. x-y
Args:
x (array_like): sample of a treatment group
y (array_like): sample of a control group
assume_normal (boolean): specifies whether normal distribution
assumptions can be made
percentiles (list): list of percentile values for confidence bounds
min_observations (integer): minimum number of observations needed
nruns (integer): only used if assume normal is false
relative (boolean): if relative==True, then the values will be returned
as distances below and above the mean, respectively, rather than the
absolute values. In this case, the interval is mean-ret_val[0] to
mean+ret_val[1]. This is more useful in many situations because it
corresponds with the sem() and std() functions.
x_weights (list): weights for the x vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
y_weights (list): weights for the y vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
Returns:
tuple:
* mu (float): mean value of the difference
* c_i (dict): percentile levels (index) and values
* ss_x (int): size of x excluding NA values
* ss_y (int): size of y excluding NA values
* _x (float): absolute mean of x
* _y (float): absolute mean of y
"""
# Checking if data was provided
if x is None or y is None:
raise ValueError('Please provide two non-None samples.')
# Coercing missing values to right format
_x = np.array(x, dtype=float) * x_weights
_y = np.array(y, dtype=float) * y_weights
x_nan = np.isnan(_x).sum()
y_nan = np.isnan(_y).sum()
if x_nan > 0:
warnings.warn('Discarding ' + str(x_nan) + ' NaN(s) in the x array!')
if y_nan > 0:
warnings.warn('Discarding ' + str(y_nan) + ' NaN(s) in the y array!')
ss_x = sample_size(_x)
ss_y = sample_size(_y)
# Checking if enough observations are left after dropping NaNs
if min(ss_x, ss_y) < min_observations:
# Set mean to nan
mu = np.nan
# Create nan dictionary
c_i = dict(list(zip(percentiles, np.empty(len(percentiles)) * np.nan)))
else:
# Computing the mean
mu = _delta_mean(_x, _y)
# Computing the confidence intervals
if assume_normal:
c_i = normal_sample_difference(x=_x, y=_y, percentiles=percentiles,
relative=relative)
else:
c_i, _ = bootstrap(x=_x, y=_y, percentiles=percentiles, nruns=nruns,
relative=relative)
# Return the result structure
return mu, c_i, ss_x, ss_y, np.nanmean(_x), np.nanmean(_y)
| 25,603
|
def CircleCircumference(curve_id, segment_index=-1):
"""Returns the circumference of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The circumference of the circle if successful.
"""
return circle.Circumference
| 25,604
|
def set_authentication(application, authentication):
"""Set the wether API needs to be authenticated or not."""
if not isinstance(authentication, bool):
raise TypeError("Authentication flag must be of type <bool>")
def handler(sender, **kwargs):
g.authentication_ = authentication
with appcontext_pushed.connected_to(handler, application):
yield
| 25,605
|
def generate_handshake(info_hash, peer_id):
"""
The handshake is a required message and must be the first message
transmitted by the client. It is (49+len(pstr)) bytes long in the form:
<pstrlen><pstr><reserved><info_hash><peer_id>
Where:
pstrlen: string length of <pstr>, as a single raw byte
pstr: string identifier of the protocol
reserved: eight (8) reserved bytes. All current implementations use all
zeroes. Each bit in these bytes can be used to change the behavior of the
protocol.
info_hash: 20-byte SHA1 hash of the info key in the meta info file. This is
the same info_hash that is transmitted in tracker requests.
peer_id: 20-byte string used as a unique ID for the client. This is usually
the same peer_id that is transmitted in tracker requests
In version 1.0 of the BitTorrent protocol:
pstrlen = 19 and pstr = "BitTorrent protocol".
:param info_hash:
:param peer_id:
:return:
"""
pstr = b"BitTorrent protocol"
pstrlen = bytes(chr(len(pstr)))
reserved = b"\x00" * 8 # 8 zeroes
handshake = pstrlen + pstr + reserved + info_hash + peer_id
assert len(handshake) == 49 + len(pstr)
assert pstrlen == bytes(chr(19))
return handshake
| 25,606
|
def tokenizer_decorator(func, **kwargs):
"""
This decorator wraps around a tokenizer function.
It adds the token to the info dict and removes the found token from the given name.
"""
if not callable(func):
raise TypeError(f"func {func} not callable")
@wraps(func)
def wrapper(name, info, **kwargs):
try:
if ("patterns" and "token_name") in kwargs:
token = func(name, **kwargs)
elif "reference_date" in kwargs:
token = func(name, reference_date=kwargs.get("reference_date", None))
elif "template_file_found" in kwargs:
token = func(
name, template_file_found=kwargs.get("template_file_found", None)
)
else:
token = func(name)
except TypeError as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
except Exception as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
if not token:
# logger.warning(f'Wrapper no token found for {func}, {name}')
return name, info
str_token_values = [i for i in token.values() if isinstance(i, str)]
str_token_values_in_name = [i for i in str_token_values if i in name]
if str_token_values:
for val in str_token_values_in_name:
val_is_subset = [
i
for i in str_token_values_in_name
if val in i and len(i) > len(val)
]
if not val_is_subset:
name = replace_and_strip(name, val, **kwargs)
info.update(**token)
# print("wrapper token:",info,'\nname',name)
return name, info
return wrapper
| 25,607
|
def reverse_complement( seq ):
"""
Biological reverse complementation. Case in sequences are retained, and
IUPAC codes are supported. Code modified from:
http://shootout.alioth.debian.org/u32/program.php?test=revcomp&lang=python3&id=4
"""
return seq.translate(_nt_comp_table)[::-1]
| 25,608
|
def maybe_extract_from_zipfile(zip_file):
"""
Extract files needed for Promtimer to run if necessary. Files needed by Promtimer are:
* everything under the stats_snapshot directory; nothing is extracted if the
stats_snapshot directory is already present
* couchbase.log: extracted if not present
"""
root = zipfile.Path(zip_file)
for p in root.iterdir():
if is_cbcollect_dir(p):
stats_snapshot_exists = snapshot_dir_exists(pathlib.Path(p.name))
logging.debug("{}/stats_snapshot exists: {}".format(p.name, stats_snapshot_exists))
extracting = False
for item in zip_file.infolist():
item_path = path.join(*item.filename.split('/'))
should_extract = False
if is_stats_snapshot_file(item.filename):
should_extract = not stats_snapshot_exists
elif item.filename.endswith(COUCHBASE_LOG):
should_extract = not path.exists(item_path)
if should_extract:
logging.debug("zipfile item:{}, exists:{}".format(item_path, path.exists(item_path)))
if not extracting:
extracting = True
logging.info('extracting stats, couchbase.log from cbcollect zip:{}'
.format(zip_file.filename))
zip_file.extract(item)
| 25,609
|
def computeLPS(s, n):
"""
Sol with better comle
"""
prev = 0 # length of the previous longest prefix suffix
lps = [0]*(n)
i = 1
# the loop calculates lps[i] for i = 1 to n-1
while i < n:
if s[i] == s[prev]:
prev += 1
lps[i] = prev
i += 1
else:
# This is tricky. Consider the example.
# AAACAAAA and i = 7. The idea is similar
# to search step.
if prev != 0:
prev = lps[prev-1]
# Also, note that we do not increment i here
else:
lps[i] = 0
i += 1
print(lps)
return lps[n-1]
| 25,610
|
def roundedCorner(pc, p1, p2, r):
"""
Based on Stackoverflow C# rounded corner post
https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon
"""
def GetProportionPoint(pt, segment, L, dx, dy):
factor = float(segment) / L if L != 0 else segment
return PVector((pt.x - dx * factor), (pt.y - dy * factor))
# Vector 1
dx1 = pc.x - p1.x
dy1 = pc.y - p1.y
# Vector 2
dx2 = pc.x - p2.x
dy2 = pc.y - p2.y
# Angle between vector 1 and vector 2 divided by 2
angle = (atan2(dy1, dx1) - atan2(dy2, dx2)) / 2
# The length of segment between angular point and the
# points of intersection with the circle of a given radius
tng = abs(tan(angle))
segment = r / tng if tng != 0 else r
# Check the segment
length1 = sqrt(dx1 * dx1 + dy1 * dy1)
length2 = sqrt(dx2 * dx2 + dy2 * dy2)
min_len = min(length1, length2)
if segment > min_len:
segment = min_len
max_r = min_len * abs(tan(angle))
else:
max_r = r
# Points of intersection are calculated by the proportion between
# length of vector and the length of the segment.
p1Cross = GetProportionPoint(pc, segment, length1, dx1, dy1)
p2Cross = GetProportionPoint(pc, segment, length2, dx2, dy2)
# Calculation of the coordinates of the circle
# center by the addition of angular vectors.
dx = pc.x * 2 - p1Cross.x - p2Cross.x
dy = pc.y * 2 - p1Cross.y - p2Cross.y
L = sqrt(dx * dx + dy * dy)
d = sqrt(segment * segment + max_r * max_r)
circlePoint = GetProportionPoint(pc, d, L, dx, dy)
# StartAngle and EndAngle of arc
startAngle = atan2(p1Cross.y - circlePoint.y, p1Cross.x - circlePoint.x)
endAngle = atan2(p2Cross.y - circlePoint.y, p2Cross.x - circlePoint.x)
# Sweep angle
sweepAngle = endAngle - startAngle
# Some additional checks
if sweepAngle < 0:
startAngle, endAngle = endAngle, startAngle
sweepAngle = -sweepAngle
if sweepAngle > PI:
startAngle, endAngle = endAngle, startAngle
sweepAngle = TWO_PI - sweepAngle
# Draw result using graphics
# noStroke()
with pushStyle():
noStroke()
beginShape()
vertex(p1.x, p1.y)
vertex(p1Cross.x, p1Cross.y)
vertex(p2Cross.x, p2Cross.y)
vertex(p2.x, p2.y)
endShape(CLOSE)
line(p1.x, p1.y, p1Cross.x, p1Cross.y)
line(p2.x, p2.y, p2Cross.x, p2Cross.y)
arc(circlePoint.x, circlePoint.y, 2 * max_r, 2 * max_r,
startAngle, startAngle + sweepAngle, OPEN)
| 25,611
|
def depth_first_search(starting_nodes, visitor, outedges_func = node.outward):
"""
depth_first_search
go through all starting points and DFV on each of them
if they haven't been seen before
"""
colours = defaultdict(int) # defaults to WHITE
if len(starting_nodes):
for start in starting_nodes:
if colours[start] == WHITE:
visitor.start_vertex(start)
depth_first_visit(start, visitor, colours, outedges_func)
else:
#
# go through all nodes, maintaining order
#
for start in node._all_nodes:
if colours[start] == WHITE:
visitor.start_vertex(start)
depth_first_visit(start, visitor, colours, outedges_func)
| 25,612
|
def synthetic_data(n_points=1000, noise=0.05,
random_state=None, kind="unit_cube",
n_classes=None, n_occur=1, legacy_labels=False, **kwargs):
"""Make a synthetic dataset
A sample dataset generators in the style of sklearn's
`sample_generators`. This adds other functions found in the Matlab
toolkit for Dimensionality Reduction
Parameters
----------
kind: {'unit_cube', 'swiss_roll', 'broken_swiss_roll', 'twinpeaks', 'difficult'}
The type of synthetic dataset
legacy_labels: boolean
If True, try and reproduce the labels from the Matlab Toolkit for
Dimensionality Reduction. (overrides any value in n_classes)
This usually only works if algorithm-specific coefficient choices
(e.g. `height` for swiss_roll) are left at their default values
n_points : int, optional (default=1000)
The total number of points generated.
n_classes: None or int
If None, target vector is based on underlying manifold coordinate
If int, the manifold coordinate is bucketized into this many classes.
n_occur: int
Number of occurrences of a given class (along a given axis)
ignored if n_classes = None
noise : double or None (default=0.05)
Standard deviation of Gaussian noise added to the data.
If None, no noise is added.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
Additional Parameters
---------------------
difficult:
n_dims: int (default 5)
Number of dimensions to embed
swiss_roll:
broken_swiss_roll:
height: float (default 30.)
scaling to apply to y dimension
Returns
-------
X : array of shape [n_points, 2]
The generated samples.
y : array of shape [n_points]
The labels for class membership of each point.
"""
generator = check_random_state(random_state)
metadata = {
"synthetic_type": kind,
"n_points": n_points,
"noise": noise
}
if kind == 'unit_cube':
x = 2 * (generator.rand(n_points) - 0.5)
y = 2 * (generator.rand(n_points) - 0.5)
z = 2 * (generator.rand(n_points) - 0.5)
X = np.column_stack((x, y, z))
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(X, shift_factors=shift, scale_factors=scale, n_occur=n_occur, n_classes=n_classes)
metadata['manifold_coords'] = np.concatenate((x,y,z), axis=0).T
elif kind == 'twinpeaks':
inc = 1.5 / np.sqrt(n_points)
x = np.arange(-1, 1, inc)
xy = 1 - 2 * generator.rand(2, n_points)
z = np.sin(np.pi * xy[0, :]) * np.tanh(3 * xy[1, :])
X = np.vstack([xy, z * 10.]).T # + noise * generator.randn(n_points, 3)
t = xy.T
metadata['manifold_coords'] = t
if legacy_labels is True:
labels = np.remainder(np.sum(np.round((X + np.tile(np.min(X, axis=0), (X.shape[0], 1))) / 10.), axis=1), 2)
elif n_classes is None:
labels = 1-z
else:
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'swiss_roll':
height = kwargs.pop('height', 30.)
t = 1.5 * np.pi * (1.0 + 2.0 * generator.rand(n_points))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'broken_swiss_roll':
height = kwargs.pop('height', 30.)
np1 = int(np.ceil(n_points / 2.0))
t1 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(np1) * 0.4))
t2 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(n_points - np1) * 0.4 + 0.6))
t = np.concatenate((t1, t2))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'difficult':
n_dims = kwargs.pop("n_dims", 5)
points_per_dim = int(np.round(float(n_points ** (1.0 / n_dims))))
l = np.linspace(0, 1, num=points_per_dim)
t = np.array(list(_combn(l, n_dims)))
X = np.vstack((np.cos(t[:,0]),
np.tanh(3 * t[:,1]),
t[:,0] + t[:,2],
t[:,3] * np.sin(t[:,1]),
np.sin(t[:,0] + t[:,4]),
t[:,4] * np.cos(t[:,1]),
t[:,4] + t[:,3],
t[:,1],
t[:,2] * t[:,3],
t[:,0])).T
tt = 1 + np.round(t)
# Generate labels for dataset (2x2x2x2x2 checkerboard pattern)
labels = np.remainder(tt.sum(axis=1), 2)
metadata['n_dims'] = n_dims
metadata['manifold_coords'] = t
else:
raise Exception(f"Unknown synthetic dataset type: {kind}")
if noise is not None:
X += noise * generator.randn(*X.shape)
return X, labels, metadata
| 25,613
|
def parse_args():
"""
Argument Parser
"""
parser = argparse.ArgumentParser(description="Wiki Text Extractor")
parser.add_argument("-i", "--input_dir", dest="input_dir", type=str, metavar="PATH",
default="./extracted",help="Input directory path ")
parser.add_argument("-o", "--output_dir", dest="output_dir", type=str, metavar="PATH",
default="./wiki_text",help="Output directory path")
parser.add_argument("-t", "--output_type", dest="output_type", type=int, metavar="INT",
default=1, choices=[1,2], help="Output in a single file or multiple file")
args = parser.parse_args()
return args
| 25,614
|
def _load_default_profiles():
# type: () -> Dict[str, Any]
"""Load all the profiles installed on the system."""
profiles = {}
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
if _is_abstract_profile(name):
continue
definition = _read_profile_definition(path)
try:
recursively_expand_base_profiles(definition)
except Exception:
logger.error("Could not expand base profile %s", path)
raise
profiles[name] = {'definition': definition}
return profiles
| 25,615
|
async def reload(ctx, cog: str = None):
""" Reload cogs """
async def reload_cog(ctx, cog):
""" Reloads a cog """
try:
bot.reload_extension(f"cogs.{cog}")
await ctx.send(f"Reloaded {cog}")
except Exception as e:
await ctx.send(f"Couldn't reload {cog}, " + str(e))
if not cog:
for cog in os.listdir(Path("./cogs")):
if cog.endswith(".py"):
cog = cog.replace(".py", "")
await reload_cog(ctx, cog)
else:
if os.path.exists(Path(f"./cogs/{cog}.py")):
await reload_cog(ctx, cog)
else:
await ctx.send(f"{cog} doesn't exist")
| 25,616
|
def train_model(model, train_loader, valid_loader, learning_rate, device,
epochs):
"""Trains a model with train_loader and validates it with valid_loader
Arguments:
model -- Model to train
train_loader -- Data to train
valid_loader -- Data to validate the training
learning_rate -- Learning rate
device -- Device where the computations will be executed
epochs -- Number of epochs to train
Returns:
The trained model
"""
# Our loss function will be 'negative log likelihood'
criterion = nn.NLLLoss()
# We only want to optimize our classifier parameters
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# makes PyTorch use 'device' to compute
model.to(device)
criterion.to(device)
print_every = 25
step = 0
for epoch in range(epochs): # for each epoch
running_loss = 0
print("Epoch: {}/{}".format(epoch+1, epochs))
print("==========")
for inputs, labels in train_loader: # for each batch of data / label
step += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # resets gradients to zero
output = model.forward(inputs) # feed forward
loss = criterion(output, labels) # calculate the loss
loss.backward() # back propagate the loss
optimizer.step() # do gradient descent (update weights)
running_loss += loss.item()
if step % print_every == 0:
model.eval() # Turn off dropout to make the validation pass
# Turn off gradients for the validation pass
with torch.no_grad():
valid_loss, accuracy = validate_model(model, valid_loader,
criterion, device)
print("Training Loss: {:.3f}.. ".format(
running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}".format(
accuracy/len(valid_loader)))
running_loss = 0
model.train() # enable dropout back
model.eval() # Turn off dropout to make the validation pass
with torch.no_grad(): # Turn off gradients for the validation pass
valid_loss, accuracy = validate_model(
model, valid_loader, criterion, device)
print("\nEpoch: {}/{}.. ".format(epoch+1, epochs),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}\n".format(
accuracy/len(valid_loader)))
model.train() # enable dropout back
return model
| 25,617
|
def get_weapon_techs(fighter=None):
"""If fighter is None, return list of all weapon techs.
If fighter is given, return list of weapon techs fighter has."""
if fighter is None:
return weapon_tech_names
else:
return [t for t in fighter.techs if get_tech_obj(t).is_weapon_tech]
| 25,618
|
def read_csv(
_0: str,
/,
*,
_: None,
cache_dates: bool,
chunksize: None,
comment: None,
compression: Literal["infer"],
converters: None,
date_parser: None,
dayfirst: bool,
decimal: Literal["."],
delim_whitespace: bool,
delimiter: None,
dialect: None,
doublequote: bool,
dtype: None,
encoding: None,
engine: None,
error_bad_lines: bool,
escapechar: None,
false_values: None,
float_precision: None,
header: Literal["infer"],
index_col: None,
infer_datetime_format: bool,
iterator: bool,
keep_date_col: bool,
keep_default_na: bool,
kwargs: Dict[str, Union[bool, None, Literal["infer", ",", "test.csv"]]],
lineterminator: None,
low_memory: bool,
mangle_dupe_cols: bool,
memory_map: bool,
na_filter: bool,
na_values: None,
names: None,
nrows: int,
parse_dates: List[List[Literal["col4", "col2"]]],
prefix: None,
quotechar: Literal['"'],
quoting: int,
sep: Literal[","],
skip_blank_lines: bool,
skipfooter: int,
skipinitialspace: bool,
skiprows: None,
squeeze: bool,
thousands: None,
true_values: None,
usecols: None,
verbose: bool,
warn_bad_lines: bool,
):
"""
usage.modin: 1
"""
...
| 25,619
|
def xml_to_values(l):
"""
Return a list of values from a list of XML data potentially including null values.
"""
new = []
for element in l:
if isinstance(element, dict):
new.append(None)
else:
new.append(to_float(element))
return new
| 25,620
|
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options
| 25,621
|
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
| 25,622
|
def reverse_readline(filename, buf_size=8192):
"""a generator that returns the lines of a file in reverse order"""
with open(filename) as fh:
segment = None
offset = 0
fh.seek(0, os.SEEK_END)
file_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
fh.seek(file_size - offset)
buffer = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.split('\n')
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concact the segment to the last line of new chunk
# instead, yield the segment first
if buffer[-1] is not '\n':
lines[-1] += segment
else:
yield segment
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
if len(lines[index]):
yield lines[index]
# Don't yield None if the file was empty
if segment is not None:
yield segment
| 25,623
|
def normalize_skeleton(joints):
"""Normalizes joint positions (NxMx2 or NxMx3, where M is 14 or 16) from parent to child order. Each vector from parent to child is normalized with respect to it's length.
:param joints: Position of joints (NxMx2) or (NxMx3)
:type joints: numpy.ndarray
:return: Normalzed position of joints (NxMx2) or (NxMx3)
:rtype: numpy.ndarray
"""
assert len(joints.shape) == 3
assert joints.shape[1] == 14 or joints.shape[1] == 16
assert joints.shape[-1] == 2 or joints.shape[-1] == 3
hip = 0
if joints.shape[1] == 14:
names = NAMES_14
else:
names = NAMES_16
neck = names.index('Neck')
joints_ = joints.copy()
joints_ -= joints_[:, :1, :]
spine = joints_[:, neck, :] - joints_[:, hip, :]
spine_norm = np.linalg.norm(spine, axis=1).reshape(-1, 1)
adjacency = adjacency_list(joints_.shape[1])
queue = []
queue.append(0)
while len(queue) > 0:
current = queue.pop(0)
for child in adjacency[current]:
queue.append(child)
prnt_to_chld = joints[:, child, :] - joints[:, current, :]
prnt_to_chld_norm = np.linalg.norm(prnt_to_chld, axis=1).reshape(-1, 1)
prnt_to_chld_unit = prnt_to_chld / prnt_to_chld_norm
joints_[:, child, :] = joints_[:, current, :] + (prnt_to_chld_unit * (prnt_to_chld_norm / (spine_norm + 1e-8)))
return joints_
| 25,624
|
def parseAreas(area):
"""Parse the strings into address. This function is highly customized and demonstrates the general steps for transforming raw covid cases data to a list of address searchable in Google Map.
Arguments:
area: raw data downloaded from a news app
Return:
l: a list of human-readable address searchable in Google Map
"""
#FIXME: This function ideally should be generalized if the data source is still news app
l = []
ll = area.split(";")
for k in ll:
kk = k.split("、")
if len(kk)>1:
if len(kk[1])<=3: # all members of kk belong to the same residential area
l.append(kk[0][:(len(kk[0])-len(kk[1]))])
else: # members of kk belong to different residential area
l.append(kk[0])
for mm in range(1,len(kk)):
if kk[0][2]== "区":
kk[mm] = kk[0][:3] + kk[mm]
elif kk[0][3]== "区":
kk[mm] = kk[0][:4] + kk[mm]
l.append(kk[mm])
else:
l.append(k)
return(l)
| 25,625
|
def is_valid_instruction(instr: int, cpu: Cpu = Cpu.M68000) -> bool:
"""Check if an instruction is valid for the specified CPU type"""
return bool(lib.m68k_is_valid_instruction(instr, cpu.value))
| 25,626
|
def wrap_text(translations, linewrap=0):
"""Pretty print translations.
If linewrap is set to 0 disble line wrapping.
Parameters
----------
translations : list
List of word translations.
linewrap : int
Maximum line length before wrapping.
"""
# pylint: disable=too-many-locals
def wrap(text, width=linewrap, findent=0, sindent=0, bold=False):
if width == 0:
text = " " * findent + text
else:
text = textwrap.fill(
text,
width=width,
initial_indent=" " * findent,
subsequent_indent=" " * sindent,
)
# don't use bold when stdout is pipe or redirect
if bold and sys.stdout.isatty():
text = "\033[0;1m" + text + "\033[0m"
return text
indent = 5
result = []
for i1, trans in enumerate(translations):
if i1 > 0:
result.append("\n")
for w in trans.word:
result.append(wrap(w, bold=True))
for i2, t in enumerate(trans.parts_of_speech):
if i2 > 0:
result.append("")
if t.part:
result.append("[{part}]".format(part=t.part))
for i3, m in enumerate(t.meanings, 1):
if i3 > 1:
result.append("")
meaning = "{index:>3}. {meanings}".format(
index=i3, meanings=", ".join(m.meaning)
)
result.append(wrap(meaning, sindent=indent, bold=True))
eindent = indent + 1
for e in m.examples:
result.append("")
result.append(wrap(e[0], findent=eindent, sindent=eindent))
if len(e) == 2 and e[1]:
result.append(wrap(e[1], findent=eindent, sindent=eindent + 1))
return "\n".join(result)
| 25,627
|
def test_list_qname_max_length_nistxml_sv_iv_list_qname_max_length_1_3(mode, save_output, output_format):
"""
Type list/QName is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/QName/Schema+Instance/NISTSchema-SV-IV-list-QName-maxLength-1.xsd",
instance="nistData/list/QName/Schema+Instance/NISTXML-SV-IV-list-QName-maxLength-1-3.xml",
class_name="NistschemaSvIvListQnameMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 25,628
|
def docker_available():
"""Check if Docker can be run."""
returncode = run.run(["docker", "images"], return_code=True)
return returncode == 0
| 25,629
|
def is_bc(symbol):
"""
判断是否背驰
:param symbol:
:return:
"""
bars = get_kline(symbol, freq="30min", end_date=datetime.now(), count=1000)
c = CZSC(bars, get_signals=get_selector_signals)
factor_ = Factor(
name="背驰选股",
signals_any=[
Signal("30分钟_倒1笔_三笔形态_向下盘背_任意_任意_0"),
Signal("30分钟_倒1笔_基础形态_底背驰_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类一买_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类二买_任意_任意_0"),
],
signals_all=[
# Signal("30分钟_倒0笔_潜在三买_构成中枢_任意_任意_0")
]
)
# c.open_in_browser()
if factor_.is_match(c.signals):
return True
else:
return False
| 25,630
|
def IEEE2030_5Time(dt_obj, local=False):
""" Return a proper IEEE2030_5 TimeType object for the dt_obj passed in.
From IEEE 2030.5 spec:
TimeType Object (Int64)
Time is a signed 64 bit value representing the number of seconds
since 0 hours, 0 minutes, 0 seconds, on the 1st of January, 1970,
in UTC, not counting leap seconds.
:param dt_obj: Datetime object to convert to IEEE2030_5 TimeType object.
:param local: dt_obj is in UTC or Local time. Default to UTC time.
:return: Time XSD object
:raises: If utc_dt_obj is not UTC
"""
if dt_obj.tzinfo is None:
raise Exception("IEEE 2030.5 times should be timezone aware UTC or local")
if dt_obj.utcoffset() != timedelta(0) and not local:
raise Exception("IEEE 2030.5 TimeType should be based on UTC")
if local:
return xsd_models.TimeType(valueOf_=int(time.mktime(dt_obj.timetuple())))
else:
return xsd_models.TimeType(valueOf_=int(calendar.timegm(dt_obj.timetuple())))
| 25,631
|
def learning_rate_schedule(params, global_step):
"""Handles learning rate scaling, linear warmup, and learning rate decay.
Args:
params: A dictionary that defines hyperparameters of model.
global_step: A tensor representing current global step.
Returns:
A tensor representing current learning rate.
"""
base_learning_rate = params['base_learning_rate']
lr_warmup_step = params['lr_warmup_step']
first_lr_drop_step = params['first_lr_drop_step']
second_lr_drop_step = params['second_lr_drop_step']
batch_size = params['batch_size']
scaling_factor = params['gpu_num'] * batch_size / ssd_constants.DEFAULT_BATCH_SIZE
adjusted_learning_rate = base_learning_rate * scaling_factor
learning_rate = (tf.cast(global_step, dtype=tf.float32) /
lr_warmup_step) * adjusted_learning_rate
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
| 25,632
|
def generate_async(seq_name, count):
"""Generates sequence numbers.
Supports up to 5 QPS. If we need more, we will need to implement something
more advanced.
Args:
name: name of the sequence.
count: number of sequence numbers to allocate.
Returns:
The generated number. For a returned number i, numbers [i, i+count) can be
used by the caller.
"""
yield _migrate_entity_async(seq_name)
@ndb.transactional_tasklet
def txn():
seq = ((yield NumberSequence.get_by_id_async(seq_name)) or
NumberSequence(id=seq_name))
result = seq.next_number
seq.next_number += count
yield seq.put_async()
raise ndb.Return(result)
started = utils.utcnow()
number = yield txn()
ellapsed_ms = (utils.utcnow() - started).total_seconds() * 1000
if ellapsed_ms > 1000: # pragma: no cover
logging.warning(
'sequence number generation took > 1s\n'
'it took %dms\n'
'sequence: %s', ellapsed_ms, seq_name
)
else:
logging.info('sequence number generation took %dms', ellapsed_ms)
metrics.SEQUENCE_NUMBER_GEN_DURATION_MS.add(
ellapsed_ms, fields={'sequence': seq_name}
)
raise ndb.Return(number)
| 25,633
|
def deadNode(uid):
"""
Remove a node from the neighbors.
"""
global universe
try:
lookupNode(uid).destroyTCPConnection()
neighborStrategy.removeNeighbor(uid)
del universe[uid]
deadMessage = message.DeadNodeMessage(uid)
deadMessage.send()
finally:
knownDead.add(uid)
debug("Dead#"+uid, monitor=True)
debug("removing dead node uid:" + uid, info=True)
| 25,634
|
def run(
command,
cwd=None,
capture_output=False,
input=None,
check=True,
**subprocess_run_kwargs
):
""" Wrapper for subprocess.run() that sets some sane defaults """
logging.info("Running {} in {}".format(" ".join(command), cwd or os.getcwd()))
if isinstance(input, str):
input = input.encode("utf-8")
env = os.environ.copy()
env["HOMEBREW_NO_AUTO_UPDATE"] = "1"
return subprocess.run(
command,
cwd=cwd,
input=input,
stdout=subprocess.PIPE if capture_output else None,
check=check,
env=env,
**subprocess_run_kwargs
)
| 25,635
|
def test_is_zip_file(zipfile):
"""Verify is_repo_url works."""
assert is_zip_file(zipfile) is True
| 25,636
|
def profile(step):
"""
Profiles a Pipeline step and save the results as HTML file in the project output
directory.
Usage:
@profile
def step(self):
pass
"""
@wraps(step)
def wrapper(*arg, **kwargs):
pipeline_instance = arg[0]
project = pipeline_instance.project
with Profiler() as profiler:
result = step(*arg, **kwargs)
output_file = project.get_output_file_path("profile", "html")
output_file.write_text(profiler.output_html())
pipeline_instance.log(f"Profiling results at {output_file.resolve()}")
return result
return wrapper
| 25,637
|
def make_dataframe_wrapper(DataFrame):
"""
Prepares a "delivering wrapper" proxy class for DataFrame.
It makes DF.loc, DF.groupby() and other methods listed below deliver their
arguments to remote end by value.
"""
from modin.pandas.series import Series
conn = get_connection()
class ObtainingItems:
def items(self):
return conn.obtain_tuple(self.__remote_end__.items())
def iteritems(self):
return conn.obtain_tuple(self.__remote_end__.iteritems())
ObtainingItems = _deliveringWrapper(Series, mixin=ObtainingItems)
class DataFrameOverrides(_prepare_loc_mixin()):
@classmethod
def _preprocess_init_args(
cls,
data=None,
index=None,
columns=None,
dtype=None,
copy=None,
query_compiler=None,
):
(data,) = conn.deliver((data,), {})[0]
return (), dict(
data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy,
query_compiler=query_compiler,
)
@property
def dtypes(self):
remote_dtypes = self.__remote_end__.dtypes
return ObtainingItems(__remote_end__=remote_dtypes)
DeliveringDataFrame = _deliveringWrapper(
DataFrame,
[
"groupby",
"agg",
"aggregate",
"__getitem__",
"astype",
"drop",
"merge",
"apply",
"applymap",
],
DataFrameOverrides,
"DataFrame",
)
return DeliveringDataFrame
| 25,638
|
def download_cmems_ts(lats, lons, t0, tf, variables, fn=None):
"""Subset CMEMS output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
t0 = datetime for start of time series
tf = datetime for end of time series
variables = list of variables in ["zos", "uo", "vo", "so", "thetao"]
:returns:
Xarray Dataset of selected variables
"""
validate_datetime(t0)
validate_datetime(tf)
try:
validate_cmems_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, zos = fix_zos(variables)
request = (
"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
"longitude[0:1:4319],latitude[0:1:2040],depth[0:1:49],time[0:1:10012]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = cmemslon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = cmemslon2index(lons[1], coords)
lat_ll = cmemslat2index(lats[0], coords)
lat_ur = cmemslat2index(lats[1], coords)
t0i = time2index(t0, coords)
tfi = time2index(tf, coords)
request = (
f"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
f"longitude[{lon_ll}:1:{lon_ur}],latitude[{lat_ll}:1:{lat_ur}],depth[0:1:49],time[{t0i}:1:{tfi}],"
)
request = request + "".join(
[
f"{variable}[{t0i}:1:{tfi}][0:1:49][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
for variable in _variables
]
)
# append surf_el if present
if zos is not None:
request = (
request + f"{zos}[{t0i}:1:{tfi}][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
)
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds
| 25,639
|
def construct_gpu_info(statuses):
""" util for unit test case """
m = {}
for status in statuses:
m[status.minor] = status
m[status.uuid] = status
return m
| 25,640
|
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
max_tokens=10000):
"""Return the iterator and the vocabulary of the time machine dataset."""
data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter,
max_tokens)
return data_iter, data_iter.vocab
| 25,641
|
def scan_paths(paths, only_detect, recursive, module_filter):
"""
Scans paths for known bots and dumps information from them
@rtype : dict
@param paths: list of paths to check for files
@param only_detect: only detect known bots, don't process configuration information
@param recursive: recursively traverse folders
@param module_filter: if not None, only modules in list will be used
@return: dictionary of file to dictionary of information for each file
"""
results = {}
while len(paths) != 0:
file_path = abspath(paths[0])
del paths[0]
if isfile(file_path):
with open(file_path, mode='rb') as file_handle:
file_content = file_handle.read()
r = scan_file_data(file_content, module_filter, only_detect)
if r is not None:
results[file_path] = r
elif isdir(file_path):
for p in listdir(file_path):
p = join(file_path, p)
if isfile(p) or (isdir(p) and recursive):
paths.append(p)
return results
| 25,642
|
def decode_shift(s: str):
"""
takes as input string encoded with encode_shift function. Returns decoded string.
Example solution:
# line 1
decoded_str = ''
# line 2
for ch in s:
# line 3
v = (ord(ch) - 5 - ord('a'))
# line 4
v = (v + ord('a'))
# line 5
decoded_str += chr(v)
# line 6
return decoded_str
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("3")
# END OF SOLUTION
| 25,643
|
def gather_guids (env):
"""
Returns and array of guids
"""
#ProjectGUID="{F4B0B7E4-A405-4EB1-A74F-0765181FE3BC}"
guidpattern = re.compile ("\"{(\S+-\S+-\S+-\S+-\S+)}\"")
namepattern = re.compile ("Name=\"(\S+)\"")
ret = ""; vcproj_dir = env["vcproj_dir"]; guids = {}
for file in os.listdir(vcproj_dir):
if file.endswith(".vcproj"):
name = ""; guid = ""
for l in open(os.path.join(vcproj_dir, file), "r+").readlines():
m = guidpattern.search (l)
if m: guid = m.group(1)
m = namepattern.search (l)
if m and name == "": name = m.group(1)
if guid != "" and name != "": break
guids.update ({name : guid.upper()})
env["guids"] = guids
| 25,644
|
def generate_masks(input_size, output_size=1, observed=None):
"""
Generates some basic input and output masks.
If C{input_size} is an integer, the number of columns of the mask will be
that integer. If C{input_size} is a list or tuple, a mask with multiple channels
is created, which can be used with RGB images, for example.
By default, the input region will cover the upper half of the mask, also known as a
*causal neighborhood*. If any of the channels is observed, the input region in that
channel will cover a full square neighborhood around the output region.
Examples:
>>> input_mask, output_mask = generate_masks(8, 2)
>>> input_mask, output_mask = generate_masks([3, 7, 7], 1, [1, 0, 0])
@type input_size: C{int} / C{list}
@param input_size: determines the size of the input region
@type output_size: C{int}
@param output_size: determines the size of the output region
@type observed: C{list}
@param observed: can be used to indicate channels which are observed
@rtype: C{tuple}
@return: one input mask and one output mask
"""
if not iterable(input_size):
if iterable(observed):
input_size = [input_size] * len(observed)
else:
input_size = [input_size]
if observed is None:
observed = [False] * len(input_size)
if len(observed) != len(input_size):
raise ValueError("Incompatible `input_size` and `observed`.")
num_channels = len(input_size)
num_cols = max(input_size)
num_rows = num_cols if any(observed) else (num_cols + 1) // 2 + output_size // 2
input_mask = zeros([num_rows, num_cols, num_channels], dtype='bool')
output_mask = zeros_like(input_mask)
tmp1 = (num_cols + 1) // 2
tmp2 = output_size // 2
tmp3 = (output_size + 1) // 2
for k in range(num_channels):
offset = tmp1 - (input_size[k] + 1) // 2
if observed[k]:
input_mask[
offset:num_cols - offset,
offset:num_cols - offset, k] = True
else:
input_mask[offset:tmp1 + tmp2, offset:num_cols - offset, k] = True
for i in range(output_size):
input_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:, k] = False
output_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:tmp1 + output_size // 2, k] = True
if input_mask.shape[2] == 1:
input_mask.resize(input_mask.shape[0], input_mask.shape[1])
output_mask.resize(output_mask.shape[0], output_mask.shape[1])
return input_mask, output_mask
| 25,645
|
def parse_config_file(config):
"""
Load config file (primarily for endpoints)
"""
fail = False
with open(config, 'r') as fp:
content = yaml.load(fp.read())
if 'endpoints' not in content.keys():
return
for title, items in content['endpoints'].items():
if not 'url' in items.keys():
fail = True
logging.error("no url found in endpoint '%s'", title)
if not items['url'].startswith('http'):
fail = True
logging.error("non HTTP(S) url found in endoint '%s'", title)
if not items['url'].startswith('https'):
logging.warning("non SSL url found in endoint '%s'", title)
if fail:
logging.info("stopping execution due to blocking config issues")
sys.exit(1)
return content
| 25,646
|
def average(time_array,height_array,data_array,height_bin_size=100,time_bin_size=3600):
"""
average: function that averages the radar signal by height and time
Args:
time_array: numpy 1d array with timestamps
height_array: numpy 1d array with height range
data_array: numpy 2d array size len(time_array) X len(height_array)
height_bin_size: the averaging window in meters
time_bin_size: the averaging window in seconds
Returns:
time: returns the new time dimension
height: returns the new height dimension
averaged: the data averaged size len(time) X len(height)
"""
past_time = time_array[0]
bins_time = []
for time in time_array:
if past_time + time_bin_size > time:
continue
else:
bins_time.append((past_time,time))
past_time = time
bins_time.append((time,time_array[-1]))
bin_range_time = [bini[0] for bini in bins_time]
pixel_in_bin_time = []
for time in time_array:
pixel_in_bin_time.append(find_bin(time,bins_time))
max_val_time = np.max(pixel_in_bin_time)
pixel_in_bin_time = np.array(pixel_in_bin_time)
bins = create_bins(height_array[0],height_array[-1],height_bin_size)
bin_range = [bini[0] for bini in bins]
pixel_in_bin = []
for height in height_array:
pixel_in_bin.append(find_bin(height,bins))
max_val = np.max(pixel_in_bin)
pixel_in_bin = np.array(pixel_in_bin)
averaged = np.zeros((len(bins_time),len(bins)))
for i in range(max_val_time+1):
for j in range(max_val+1):
min_time = np.where(pixel_in_bin_time==i)[0][0]
max_time = np.where(pixel_in_bin_time==i)[0][-1]
min_height = np.where(pixel_in_bin==j)[0][0]
max_height = np.where(pixel_in_bin==j)[0][-1]
temp_selection = data_array[min_time:max_time,min_height:max_height]
temp_average = np.nanmean(temp_selection)
averaged[i,j] = temp_average
time = bin_range_time
height = bin_range
return time,height,averaged
| 25,647
|
def mkdir_if_not_exist(dir_name):
"""
make directory if not exist
:param dir_name:
:return:
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
| 25,648
|
def daily_report(api, space_name, charts, num_days=1, end_time=None):
"""Get a report of SLO Compliance for the previous day(s)
Returns: list of dicts of threshold breaches. Example Dict format:
{u'measure_time': 1478462400, u'value': 115.58158333333334}
:param api: An instance of sloc_report.LibratoApi
:param space_name: The name of the space where the charts are located
:param charts: A list of dicts containing the SLO thresholds, indexed
by the chart names (see _enumerate_sloc_charts() for an
example data structure)
:param num_days: Number of days to get report for. Default is 1 day
:param end_time: The time that the report should count back from.
Default: now
"""
sloc_charts = _enumerate_sloc_charts(api, space_name, charts)
if end_time is None:
end_time = sloc_time.time_now()
# get start and end times for each day
days = sloc_time.get_day_times(num_days, end_time)
threshold_breaches = []
# loop through every day for every chart
# TODO: decide on a better data structure - or return an object per chart?
for chart in sloc_charts:
chart_breaches = {
'chart_name': chart.metric(), 'total': 0, 'breaches': []
}
for day in days:
response = _get_composite_with_retry(
api, chart,
start_time=day[0],
end_time=day[1]
)
# build a list of threshold breaches
for s in response['measurements'][0]['series']:
if s['value'] > chart.threshold:
chart_breaches['total'] += 1
# chart_breaches['breaches'].append(s)
threshold_breaches.append(chart_breaches)
return threshold_breaches
| 25,649
|
def download_paris_dataset(root: str):
"""
Download the Paris dataset archive and expand it in the folder provided as parameter
"""
images_url = "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_1.tgz"
download_and_extract_archive(images_url, root)
images_url = "https://www.robots.ox.ac.uk/~vgg/data/parisbuildings/paris_2.tgz"
download_and_extract_archive(images_url, root)
# Flatten dir structure
paris_img_dir = os.path.join(root, "paris")
for file_type in os.listdir(paris_img_dir):
file_type_path = os.path.join(paris_img_dir, file_type)
for img in os.listdir(file_type_path):
from_path = os.path.join(file_type_path, img)
shutil.move(from_path, root)
metadata_url = (
"http://cmp.felk.cvut.cz/revisitop/data/datasets/rparis6k/gnd_rparis6k.pkl"
)
download_url(metadata_url, root)
| 25,650
|
def new_pitch():
"""
route to new pitch form
:return:
"""
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
pitch = form.pitch.data
category = form.category.data
fresh_pitch = Pitch(title=title, pitch_actual=pitch, category=category, user_id=current_user.id)
fresh_pitch.save_pitch()
return redirect(url_for('.profile', uname=current_user.username))
title = 'New pitch'
return render_template('new_pitch.html' , title=title, pitch_form=form)
| 25,651
|
def get_entity_bios(seq,id2label):
"""Gets entities from sequence.
note: BIOS
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
# >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC']
# >>> get_entity_bios(seq)
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("S-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[2] = indx
chunk[0] = tag.split('-')[1]
chunks.append(chunk)
chunk = (-1, -1, -1)
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif tag.startswith('I-') and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks
| 25,652
|
def early_stopping_train(model, X, Y_, x_test, y_test, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
best_model, best_accuracy = None, 0
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
# evaluate the model on the test dataset
probs = eval(model, x_test)
Y = np.argmax(probs, axis=1)
accuracy, recall, matrix = data.eval_perf_multi(Y, y_test)
print("Current accuracy on testset: ", accuracy)
if accuracy > best_accuracy:
best_model = copy.copy(model)
best_accuracy = accuracy
return best_model
| 25,653
|
def my_vtk_grid_props(vtk_reader):
"""
Get grid properties from vtk_reader instance.
Parameters
----------
vtk_reader: vtk Reader instance
vtk Reader containing information about a vtk-file.
Returns
----------
step_x : float
For regular grid, stepsize in x-direction.
step_y : float
For regular grid, stepsize in y-direction.
npts_x : float
Number of cells in x-direction.
npts_y : float
Number of cells in y-direction.
low_m_x : float
Middle of first x cell
high_m_x : float
Middle of last x cell
low_m_y : float
Middle of first y cell
high_m_y : float
Middle of last y cell
low_x : float
Edge of first x cell
high_x : float
Edge of last x cell
low_y : float
Edge of first y cell
high_y : float
Edge of last y cell
Notes
----------
0: step_x
1: step_y
2: npts_x
3: npts_y
4: low_m_x - Middle of cells: first x cell
5: high_m_x - Middle of cells: last x cell
6: low_m_y - Middle of cells: first y cell
7: high_m_y - Middle of cells: last y cell
8: low_x - Edge of cells: first x cell
9: high_x - Edge of cells: last x cell
10: low_y - Edge of cells: first y cell
11: high_y - Edge of cells: last y cell
"""
vtk_output = vtk_reader.GetOutput()
# Read attributes of the vtk-Array
# num_cells = vtk_output.GetNumberOfCells()
# num_points = vtk_output.GetNumberOfPoints()
# whole_extent = vtk_output.GetExtent()
grid_bounds = vtk_output.GetBounds()
grid_dims = vtk_output.GetDimensions()
# Grid information
step_x = (grid_bounds[1] - grid_bounds[0]) / (grid_dims[0] - 1)
step_y = (grid_bounds[3] - grid_bounds[2]) / (grid_dims[1] - 1)
if grid_bounds[0] == 0.0: # CELLS
npts_x = grid_dims[0] - 1
npts_y = grid_dims[1] - 1
low_m_x = grid_bounds[0] + 0.5 * step_x
high_m_x = grid_bounds[1] - 0.5 * step_x
low_m_y = grid_bounds[2] + 0.5 * step_y
high_m_y = grid_bounds[3] - 0.5 * step_y
low_x = grid_bounds[0]
high_x = grid_bounds[1]
low_y = grid_bounds[2]
high_y = grid_bounds[3]
else: # POINTS
npts_x = grid_dims[0]
npts_y = grid_dims[1]
low_m_x = grid_bounds[0]
high_m_x = grid_bounds[1]
low_m_y = grid_bounds[2]
high_m_y = grid_bounds[3]
low_x = grid_bounds[0] - 0.5 * step_x
high_x = grid_bounds[1] + 0.5 * step_x
low_y = grid_bounds[2] - 0.5 * step_y
high_y = grid_bounds[3] + 0.5 * step_y
return step_x, step_y, \
npts_x, npts_y, \
low_m_x, high_m_x, low_m_y, high_m_y, \
low_x, high_x, low_y, high_y
| 25,654
|
def _set_jit_fusion_options():
"""Set PyTorch JIT layer fusion options."""
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10):
# nv fuser
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False) # fuser1
torch._C._jit_set_nvfuser_enabled(True) # fuser2
torch._C._debug_set_autodiff_subgraph_inlining(False)
else:
# legacy pytorch fuser
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
| 25,655
|
def make_env(stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
env = grc.RemoteEnv('tmp/sock')
env = SonicDiscretizer(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
if stack:
env = FrameStack(env, 4)
return env
| 25,656
|
def plot_distr_cumsum(result, measure="degree", scale=['log', 'log'], figures=[], prefix="", show_std=True, show_figs=True, mode="safe", colors=('r', 'b')):
""" plots the cummulative distribution functions
special care has to be taken because averaging these is not trivial in comparison to e.g. degree
"""
maj_name=f'{measure}_distr_cumsum_maj'
min_name=f'{measure}_distr_cumsum_min'
maj_x = f'{measure}_distr_cumsum_maj_x'
min_x = f'{measure}_distr_cumsum_min_x'
tmp=result.groupby(['homophily']).agg({maj_name : list, min_name:list, min_x:list, maj_x:list})
maj = []
for x,y in zip(tmp[maj_x], tmp[maj_name]):
x_out, mean_out, std_out = cumsum_mean(x,y, mode=mode)
maj.append((x_out, mean_out, std_out))
mino = []
for x,y in zip(tmp[min_x], tmp[min_name]):
x_out, mean_out, std_out = cumsum_mean(x,y,mode=mode)
mino.append((x_out, mean_out, std_out))
if len(figures)==0:
figures = [plt.Figure() for _ in range(len(tmp.index))]
for fig in figures:
if len(fig.axes)==0:
ax = fig.add_subplot()
for h, (min_xx, min_vals, min_std), (maj_xx, maj_vals, maj_std), fig in zip(tmp.index, maj, mino, figures):
plt.figure()
x=min_xx
x2=maj_xx
ax = fig.axes[0]
ax.set_xscale(scale[0])
ax.set_yscale(scale[1])
if show_std:
ax.errorbar(x,min_vals, yerr=min_std, label=prefix + "min", color=colors[0])
ax.errorbar(x2,maj_vals,yerr=maj_std, label=prefix + "maj", color=colors[1])
else:
ax.plot(x,min_vals,label=prefix + "min", color=colors[0])
ax.plot(x2,maj_vals, label=prefix + "maj", color=colors[1])
#print(maj_vals)
ax.set_xlabel(f"{measure}")
ax.set_ylabel(f"{measure} distrubution")
ax.set_title(f"h={h}")
ax.legend()
return figures
| 25,657
|
def recvall(sock, n, silent=False):
"""Helper function for recv_msg()."""
data = b''
while len(data) < n:
try:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
except (socket.error, OSError) as e:
if not silent:
print("recvall() - Socket error:\n\t" + str(e), file=sys.stderr)
print(current_thread().name, file=sys.stderr)
raise ConnectionError
return data
| 25,658
|
def install_jenkins(dest_folder=".", fLOG=print, install=True, version=None):
"""
install `Jenkins <http://jenkins-ci.org/>`_ (only on Windows)
@param dest_folder where to download the setup
@param fLOG logging function
@param install install (otherwise only download)
@param version version to install (unused)
@return temporary file
.. versionadded:: 1.1
"""
if version is not None:
raise ValueError("cannot specify a version")
if not sys.platform.startswith("win"):
raise NotImplementedError(
"SciTE can only be installed on Windows at the moment")
url = "http://mirrors.jenkins.io/war/latest/jenkins.war"
outfile = os.path.join(dest_folder, "jenkins.war")
if not os.path.exists(outfile):
download_file(url, outfile)
if install:
raise NotImplementedError("Does not install jenkins.war")
return outfile
| 25,659
|
def main():
"""Runs the program.
"""
# If the first flag is a -y, call the install main function directly.
# When the install is run with user and root installs, the calling script is re-run with -y in the beginning.
if len(sys.argv) >= 2 and sys.argv[1].lower() == "-y":
installMain()
return
# Get the install options.
presetIndices = getOptions(presets)
installOptions = []
for presetIndex in presetIndices:
for option in presets[presetIndex]["install"]:
if option not in installOptions:
installOptions.append(option)
# Run the install.
installMain(installOptions)
| 25,660
|
def state_space_model(A, z_t_minus_1, B, u_t_minus_1):
"""
Calculates the state at time t given the state at time t-1 and
the control inputs applied at time t-1
"""
state_estimate_t = (A @ z_t_minus_1) + (B @ u_t_minus_1)
return state_estimate_t
| 25,661
|
def write_object_info(session, obj_type, id, outfile, path=os.getcwd()):
"""
Write all data for an object to a file as JSON.
Args:
session: SQLAlchemy session object.
obj_type (string): Class of the object to write.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
id (int): Id of the object to write.
outfile (str): File name. Usually a text file.
path (str): Path of the file to write. Defaults to the current directory.
Raises:
MultipleResultsFound: More than one object of given class in database has the given id.
NoResultsFound: No object of given class in database has the given id.
"""
obj = get_object_by_id(session, obj_type, id)
write_data(obj.asdict(), path, outfile)
| 25,662
|
def get_post(id , check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
#u = User.query(User ).filter(User.posts.id == id ).first()
post = db_session.query(Post).filter(Post.id == id).first()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
| 25,663
|
def hexdump(data, address=0, width=16):
""" Hexdump of the given bytes.
"""
hex_chars_width = width * 3 - 1 + ((width - 1) // 8)
for piece in chunks(data, chunk_size=width):
hex_chars = []
for part8 in chunks(piece, chunk_size=8):
hex_chars.append(
' '.join(hex(j)[2:].rjust(2, '0') for j in part8))
ints = ' '.join(hex_chars)
chars = ''.join(chr(j) if j in range(33, 127) else '.' for j in piece)
line = '{} {} |{}|'.format(
hex(address)[2:].rjust(8, '0'),
ints.ljust(hex_chars_width),
chars
)
print(line)
address += width
| 25,664
|
def get_pixel(x, y):
"""Get the RGB value of a single pixel.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7
"""
global _pixel_map
return _pixel_map[y][x]
| 25,665
|
def check_for_NAs(func: Callable) -> Callable:
"""
This decorator function checks whether the input string qualifies as an
NA. If it does it will return True immediately. Otherwise it will run
the function it decorates.
"""
def inner(string: str, *args, **kwargs) -> bool:
if re.fullmatch("^|0|NA$", string) is not None:
return True
else:
return func(string, *args, **kwargs)
return inner
| 25,666
|
def integrateEP_w0_ode( w_init: np.ndarray, w0: Union[ Callable, np.ndarray ], w0prime: Union[ Callable, np.ndarray ],
B: np.ndarray, s: np.ndarray, s0: float = 0, ds: float = None,
R_init: np.ndarray = np.eye( 3 ), Binv: np.ndarray = None, arg_check: bool = True,
wv_only: bool = False ) -> (np.ndarray, np.ndarray, np.ndarray):
""" integrate Euler-Poincare equation for needle shape sensing for given intrinsic angular deformation
using scipy.integrate
Author: Dimitri Lezcano
Args:
w_init: 3-D initial deformation vector
w0: Callable function or N x 3 intrinsic angular deformation
w0prime: Callable function or N x 3 d/ds w0
B: 3 x 3 needle stiffness matrix
s: the arclengths desired (Not implemented)
s0: (Default = 0) the initial length to start off with
ds: (Default = None) the arclength increments desired
Binv: (Default = None) inv(B) Can be provided for numerical efficiency
R_init: (Default = 3x3 identity) SO3 matrix for initial rotation angle
arg_check: (Default = False) whether to check if the arguments are valid
wv_only: (Default = False) whether to only integrate wv or not.
Return:
(N x 3 needle shape, N x 3 x 3 SO3 matrices of orientations), N x 3 angular deformation)
(None, None, N x 3 angular deformation) if 'wv_only' is True
"""
if arg_check:
assert (w_init.size == 3)
w_init = w_init.flatten()
assert (B.shape == (3, 3))
assert (geometry.is_SO3( R_init ))
assert (s0 >= 0)
# if
# argument parsing
s = s[ s >= s0 ]
if Binv is None:
Binv = np.linalg.inv( B )
elif arg_check:
assert (Binv.shape == (3, 3))
# setup intrinsic curvature functions
if callable( w0 ):
w0_fn = w0
else:
w0_fn = interpolate.interp1d( s, w0.T, fill_value='extrapolate' )
# w0_fn = lambda t: jit_linear_interp1d( t, w0, s )
if callable( w0prime ):
w0prime_fn = w0prime
else:
w0prime_fn = interpolate.interp1d( s, w0prime.T, fill_value='extrapolate' )
# w0prime_fn = lambda t: jit_linear_interp1d( t, w0prime, s )
# perform integration
ode_EP = lambda s, wv: differential_EPeq( wv, s, w0_fn, w0prime_fn, B, Binv )
wv = odeint( ode_EP, w_init, s, full_output=False, hmin=ds/2, h0=ds/2, tfirst=True )
# wv = solve_ivp( ode_EP, (s0, s.max()), w_init, method='RK45', t_eval=s,
# first_step=ds ) # 'RK23' for speed (all slower than odeint)
# integrate angular deviation vector in order to get the pose
if wv_only:
pmat, Rmat = None, None
else:
pmat, Rmat = integratePose_wv( wv, s=s, s0=s0, ds=ds, R_init=R_init )
return pmat, Rmat, wv
| 25,667
|
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints
| 25,668
|
def eval_log_type(env_var_name):
"""get the log type from environment variable"""
ls_log = os.environ.get(env_var_name, '').lower().strip()
return ls_log if ls_log in LOG_LEVELS else False
| 25,669
|
def _get_name(filename: str) -> str:
"""
Function returns a random name (first or last)
from the filename given as the argument.
Internal function. Not to be imported.
"""
LINE_WIDTH: int = 20 + 1 # 1 for \n
with open(filename) as names:
try:
total_names = int(next(names))
nth_name_to_read: int = random.randint(1, total_names)
# Here 'nth_name_to_read' lines are skipped that include
# the first line (with no of lines) and n-1 names
# Next read would always be the desired name
bytes_to_seek: int = LINE_WIDTH * nth_name_to_read
_ = names.seek(bytes_to_seek) # Now skipped n - 1 names
name: str = next(names).strip()
return name
except StopIteration:
# Return empty string if the file is empty
return ''
| 25,670
|
def map_to_udm_section_associations(enrollments_df: DataFrame) -> DataFrame:
"""
Maps a DataFrame containing Canvas enrollments into the Ed-Fi LMS Unified Data
Model (UDM) format.
Parameters
----------
enrollments_df: DataFrame
Pandas DataFrame containing all Canvas enrollments
Returns
-------
DataFrame
A LMSSectionAssociations-formatted DataFrame
DataFrame columns are:
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a the section-association by
the source system
SourceSystem: The system code or name providing the user data
EnrollmentStatus: Possible values are Active, Expired, Invite pending, Request Pending, Archived
LMSUserSourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source
system
LMSSectionSourceSystemIdentifier: A unique number or alphanumeric code assigned to a section by the
source system
CreateDate: Date/time at which the record was first retrieved
LastModifiedDate: Date/time when the record was modified, or when first retrieved
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
if enrollments_df.empty:
return enrollments_df
assert "id" in enrollments_df.columns
assert "enrollment_state" in enrollments_df.columns
assert "user_id" in enrollments_df.columns
assert "course_section_id" in enrollments_df.columns
assert "created_at" in enrollments_df.columns
assert "updated_at" in enrollments_df.columns
enrollments_df = enrollments_df[
[
"id",
"enrollment_state",
"user_id",
"course_section_id",
"created_at",
"updated_at",
"CreateDate",
"LastModifiedDate",
]
].copy()
enrollments_df.rename(
columns={
"id": "SourceSystemIdentifier",
"enrollment_state": "EnrollmentStatus",
"user_id": "LMSUserSourceSystemIdentifier",
"course_section_id": "LMSSectionSourceSystemIdentifier",
"created_at": "SourceCreateDate",
"updated_at": "SourceLastModifiedDate",
},
inplace=True,
)
enrollments_df["SourceCreateDate"] = enrollments_df["SourceCreateDate"].apply(
_get_date_formated
)
enrollments_df["SourceLastModifiedDate"] = enrollments_df[
"SourceLastModifiedDate"
].apply(_get_date_formated)
enrollments_df["EnrollmentStatus"] = enrollments_df["EnrollmentStatus"].apply(
_get_enrollment_status
)
enrollments_df["SourceSystem"] = SOURCE_SYSTEM
return enrollments_df
| 25,671
|
def ngrammer(tokens, length=4):
"""
Generates n-grams from the given tokens
:param tokens: list of tokens in the text
:param length: n-grams of up to this length
:return: n-grams as tuples
"""
for n in range(1, min(len(tokens) + 1, length+1)):
for gram in ngrams(tokens, n):
yield gram
| 25,672
|
def plot_electrodes(mris, grid, values=None, ref_label=None, functional=None):
"""
"""
surf = mris.get('pial', None)
if surf is None:
surf = mris.get('dura', None)
pos = grid['pos'].reshape(-1, 3)
norm = grid['norm'].reshape(-1, 3)
labels = grid['label'].reshape(-1)
right_or_left = sign(mean(surf['pos'][:, 0]))
if values is None:
iswire = labels == WIRE
colors = labels.copy()
colors[iswire] = 'red'
colors[~iswire] = 'black'
if ref_label is not None:
colors[labels == ref_label] = 'green'
marker = dict(
size=MARKER_SIZE,
color=colors,
)
hovertext = labels
else:
values = values['value'].reshape(-1)
marker = dict(
size=MARKER_SIZE,
color=values,
colorscale=COLORSCALE,
showscale=True,
cmin=nanmin(values),
cmax=nanmax(values),
colorbar=dict(
title='electrode values',
),
)
hovertext = [f'{x0}<br>{x1:0.3f}' for x0, x1 in zip(labels, values)]
traces = [
go.Mesh3d(
x=surf['pos'][:, 0],
y=surf['pos'][:, 1],
z=surf['pos'][:, 2],
i=surf['tri'][:, 0],
j=surf['tri'][:, 1],
k=surf['tri'][:, 2],
color='pink',
hoverinfo='skip',
flatshading=False,
lighting=dict(
ambient=0.18,
diffuse=1,
fresnel=0.1,
specular=1,
roughness=0.1,
),
lightposition=dict(
x=0,
y=0,
z=-1,
),
),
]
if functional is not None:
traces.append(
go.Scatter3d(
x=functional['pos'][:, 0],
y=functional['pos'][:, 1],
z=functional['pos'][:, 2],
mode='markers',
hoverinfo='skip',
marker=dict(
size=5,
color=functional['value'],
symbol='diamond',
colorscale='RdBu',
reversescale=True,
cmid=0,
colorbar=dict(
x=1.2,
title='functional values',
),
),
opacity=1,
))
elif False:
"""do not show Cone, it's not easy to see"""
traces.append(
go.Cone(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
u=norm[:, 0] * -1,
v=norm[:, 1] * -1,
w=norm[:, 2] * -1,
sizeref=2,
sizemode='absolute',
anchor='tail',
text=labels,
showscale=False,
colorscale=[
[0, 'rgb(0, 0, 0)'],
[1, 'rgb(0, 0, 0)'],
],
hoverinfo='skip',
),
)
traces.append(
go.Scatter3d(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
text=labels,
mode='markers',
hovertext=hovertext,
hoverinfo='text',
marker=marker,
),
)
fig = go.Figure(
data=traces,
layout=go.Layout(
showlegend=False,
scene=dict(
xaxis=AXIS,
yaxis=AXIS,
zaxis=AXIS,
camera=dict(
eye=dict(
x=right_or_left,
y=0,
z=0.5,
),
projection=dict(
type='orthographic',
),
),
),
),
)
return fig
| 25,673
|
def _get_laplace_matrix(bcs: Boundaries) -> Tuple[np.ndarray, np.ndarray]:
"""get sparse matrix for laplace operator on a 1d Cartesian grid
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
tuple: A sparse matrix and a sparse vector that can be used to evaluate
the discretized laplacian
"""
dim = bcs.grid.dim
if dim == 1:
result = _get_laplace_matrix_1d(bcs)
elif dim == 2:
result = _get_laplace_matrix_2d(bcs)
else:
raise NotImplementedError(f"{dim:d}-dimensional Laplace matrix not implemented")
return result
| 25,674
|
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
| 25,675
|
def deplacer_pion(event,fenetre,grille,can):
"""
deplace un pion
:param event: evenement tkinter
:type event: evenement tkinter
:param fenetre: fenetre tkinter
:type fenetre: fenetre tkinter
:param can: canvas tkinter
:type can: objet tkinter
:param grille: grille du plateau
:type grille: liste
:rtype: void
"""
global x,y
global tour
x,y = plateau.correspond(event.x,event.y)
try:
if g_evenements.tour_joueur(tour) == 'b' and g_evenements.case_vide(grille,x,y) == True and (grille[xs][ys] == "C" or grille[xs][ys] == "F"):
if g_evenements.confirmer_deplacement(x,y) == True:
plateau.supprimer_indicateur(can,"ind")
deplacer_pions(can,grille,xs,ys,x,y,"log")
g_evenements.capture(grille,x,y)
tour +=1
plateau.supprimer_indicateur(can,"dep")
plateau.supprimer_indicateur(can,"dep_imp")
elif g_evenements.tour_joueur(tour) == 'n'and g_evenements.case_vide(grille,x,y) == True and (grille[xs][ys] == "c" or grille[xs][ys] == "f"):
if g_evenements.confirmer_deplacement(x,y) == True:
plateau.supprimer_indicateur(can,"ind")
deplacer_pions(can,grille,xs,ys,x,y,"log")
g_evenements.capture(grille,x,y)
tour +=1
plateau.supprimer_indicateur(can,"dep")
plateau.supprimer_indicateur(can,"dep_imp")
promotion_pions(grille,0,7,"F","C","f","c")
afficher_pions_captures(can,grille,"C","c",g_images.cavalier_blanc_plat,g_images.cavalier_noir_plat)
afficher_pions_plateau_plat(grille,can)
afficher_pions_plateau_iso(grille,can)
gagnant = g_evenements.gagner_partie(grille,"F","C","f","c")
g_evenements.afficher_tour(can,tour,g_images.tourn,g_images.tourb)
if gagnant != False:
tour = 0
g_evenements.partie_gagner(fenetre,can,gagnant,"blancs","noires")
except IndexError:#Pass IndexError: clique en dehors de la fenetre
plateau.supprimer_indicateur(can,"ind")
plateau.supprimer_indicateur(can,"dep")
plateau.supprimer_indicateur(can,"dep_imp")
print("erreur")
pass
| 25,676
|
def conv_tower(
inputs,
filters_init,
filters_end=None,
filters_mult=None,
divisible_by=1,
repeat=1,
**kwargs
):
"""Construct a reducing convolution block.
Args:
inputs: [batch_size, seq_length, features] input sequence
filters_init: Initial Conv1D filters
filters_end: End Conv1D filters
filters_mult: Multiplier for Conv1D filters
divisible_by: Round filters to be divisible by (eg a power of two)
repeat: Tower repetitions
Returns:
[batch_size, seq_length, features] output sequence
"""
def _round(x):
return int(np.round(x / divisible_by) * divisible_by)
# flow through variable current
current = inputs
# initialize filters
rep_filters = filters_init
# determine multiplier
if filters_mult is None:
assert filters_end is not None
filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1))
for ri in range(repeat):
# convolution
current = conv_block(current, filters=_round(rep_filters), **kwargs)
# update filters
rep_filters *= filters_mult
return current
| 25,677
|
def transcribe(args: argparse.Namespace):
"""Do speech to text on one more WAV files."""
# Load transcriber
args.model_dir = Path(args.model_dir)
if args.graph_dir:
args.graph_dir = Path(args.graph_dir)
else:
args.graph_dir = args.model_dir / "graph"
transcriber = KaldiCommandLineTranscriber(
args.model_type, args.model_dir, args.graph_dir
)
# Do transcription
try:
if args.wav_file:
# Transcribe WAV files
for wav_path in args.wav_file:
_LOGGER.debug("Processing %s", wav_path)
wav_bytes = open(wav_path, "rb").read()
result = transcriber.transcribe_wav(wav_bytes)
if not result:
result = Transcription.empty()
print_json(result)
else:
# Read WAV data from stdin
if os.isatty(sys.stdin.fileno()):
print("Reading WAV data from stdin...", file=sys.stderr)
# Stream in chunks
with wave.open(sys.stdin.buffer, "rb") as wav_file:
def audio_stream(wav_file, frames_in_chunk):
num_frames = wav_file.getnframes()
try:
while num_frames > frames_in_chunk:
yield wav_file.readframes(frames_in_chunk)
num_frames -= frames_in_chunk
if num_frames > 0:
# Last chunk
yield wav_file.readframes(num_frames)
except KeyboardInterrupt:
pass
result = transcriber.transcribe_stream(
audio_stream(wav_file, args.frames_in_chunk),
wav_file.getframerate(),
wav_file.getsampwidth(),
wav_file.getnchannels(),
)
assert result
print_json(result)
except KeyboardInterrupt:
pass
finally:
transcriber.stop()
| 25,678
|
def edit_coach(request, coach_id):
""" Edit a coach's information """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only the owners can do that.')
return redirect(reverse('home'))
coach = get_object_or_404(Coach, pk=coach_id)
if request.method == 'POST':
form = CoachForm(request.POST, request.FILES, instance=coach)
if form.is_valid():
coach = form.save()
messages.success(request, 'Successfully updated coach!')
return redirect(reverse('view_coach', args=[coach.id]))
else:
messages.error(request, (
'Failed to update coach. Please ensure the form is valid.'))
else:
form = CoachForm(instance=coach)
messages.info(request, f'You are editing {coach.first_name}')
template = 'coaches/edit_coach.html'
context = {
'form': form,
'coach': coach,
}
return render(request, template, context)
| 25,679
|
def estimate_translation(S,
joints_2d,
focal_length=5000.,
img_size=224.,
use_all_joints=False,
rotation=None):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
if rotation is not None:
S = torch.einsum('bij,bkj->bki', rotation, S)
# Use only joints 25:49 (GT joints)
if use_all_joints:
S = S.cpu().numpy()
joints_2d = joints_2d.cpu().numpy()
else:
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i,
joints_i,
conf_i,
focal_length=focal_length,
img_size=img_size)
return torch.from_numpy(trans).to(device)
| 25,680
|
def find_ladders_pretty(left_word: str, right_word: str, exact_depth: Optional[int] = None, max_depth: int = 10, use_heuristic: bool = True):
""" Print unique word ladders (see find_ladders)
See Also:
ladder: Find adjacent words
find_ladders: Find all ladders between two words
find_ladders_unique: Get unique ladders between two words
"""
# Find all ladders and sort by length
pretty_ladders = find_ladders_unique(left_word, right_word, exact_depth, max_depth, use_heuristic)
if len(pretty_ladders) == 0:
return
pretty_ladders.sort(key=len)
for pretty_ladder in pretty_ladders:
print(pretty_ladder)
| 25,681
|
def create_midterm_data(all_students):
"""
Create the midterm data set
Ten questions, two from each topic, a percentage of students did not
show up, use it as an example of merge
Rules:
- International students have a 10% drop out rate
- Performance changes by PROGRAM!
:param all_students:
:return: dictionary with the midterm answers
"""
midterm_choices = ['A', 'B', 'C', 'D']
midterm_solution = []
for _ in range(0, 10):
midterm_solution.append(random.choice(midterm_choices))
# Insert the solution row
midterm_answers = pd.DataFrame(
[[0, '', 'SOLUTION', 'SOLUTION'] + midterm_solution + ['100']],
columns=midterm_answers_fields)
for idx, student_info in all_students.iterrows():
midterm_score = {}
# Detect if a student has to be dropped
skip = False
for enrolment, rate in midterm_dropout_rates:
# print random.random(), rate
if student_info['Enrolment Type'] == enrolment and \
random.random() <= rate:
skip = True
if skip:
continue
midterm_score['SID'] = student_info['SID']
midterm_score['email'] = student_info['email']
midterm_score['Last Name'] = student_info['Surname']
midterm_score['First Name'] = student_info['GivenName']
# Select the score based on the program
prg = student_info['Program']
score = int(round(random.normalvariate(
midterm_score_average[prg][0] / 10,
midterm_score_average[prg][1] / 10)))
if score > 10:
score = 10
if score < 0:
score = 0
# Score contains the number of questions that are correct
text_score = str(10 * score)
midterm_score['Total'] = text_score
# Add the score also to the all_student database for further reference
student_info['MIDTERM_SCORE'] = text_score
# Generate the set of answers for the midterm
correct_answers = random.sample(list(range(0, 10)), score)
for x in range(0, 10):
field = midterm_answers_fields[x + 4]
if x in correct_answers:
answer = midterm_solution[x]
score = 1
else:
incorrect = list(midterm_choices)
incorrect.remove(midterm_solution[x])
answer = random.choice(incorrect)
score = 0
midterm_score[field] = answer
midterm_score[field[1:]] = score
midterm_answers = midterm_answers.append(midterm_score,
ignore_index=True)
return midterm_answers
| 25,682
|
def identify_outliers(x_vals, y_vals, obj_func, outlier_fraction=0.1):
"""Finds the indices of outliers in the provided data to prune for subsequent curve fitting
Args:
x_vals (np.array): the x values of the data being analyzed
y_vals (np.array): the y values of the data being analyzed
obj_func (str): the objective function to use for curve fitting to determine outliers
outlier_fraction (float): the fractional deviation from predicted value required in
order to classify a data point as an outlier
Returns:
np.array: the indices of the identified outliers"""
# get objective function
objective = create_objective_function(obj_func)
# get fitted values
popt, _ = curve_fit(objective, x_vals, y_vals)
# create generate function
func = create_prediction_function(name=obj_func, weights=popt)
# generate predictions
preds = func(x_vals)
# specify outlier bounds based on multiple of predicted value
upper_bound = preds * (1 + outlier_fraction)
lower_bound = preds * (1 - outlier_fraction)
# identify outliers
outlier_mask = np.logical_or(y_vals > upper_bound, y_vals < lower_bound)
outlier_idx = np.where(outlier_mask)[0]
return outlier_idx
| 25,683
|
def GetTypeMapperFlag(messages):
"""Helper to get a choice flag from the commitment type enum."""
return arg_utils.ChoiceEnumMapper(
'--type',
messages.Commitment.TypeValueValuesEnum,
help_str=(
'Type of commitment. `memory-optimized` indicates that the '
'commitment is for memory-optimized VMs.'),
default='general-purpose',
include_filter=lambda x: x != 'TYPE_UNSPECIFIED')
| 25,684
|
def gaussian_2Dclusters(n_clusters: int,
n_points: int,
means: List[float],
cov_matrices: List[float]):
"""
Creates a set of clustered data points, where the distribution within each
cluster is Gaussian.
Parameters
----------
n_clusters:
The number of clusters
n_points:
A list of the number of points in each cluster
means:
A list of the means [x,y] coordinates of each cluster in the plane
i.e. their centre)
cov_matrices:
A list of the covariance matrices of the clusters
Returns
-------
data
A dict whose keys are the cluster labels, and values are a matrix of
the with the x and y coordinates as its rows.
TODO
Output data as Pandas DataFrame?
"""
args_in = [len(means), len(cov_matrices), len(n_points)]
assert all(item == n_clusters for item in args_in),\
"Insufficient data provided for specified number of clusters"
data = {}
for i in range(n_clusters):
cluster_mean = means[i]
x, y = np.random.multivariate_normal(cluster_mean, cov_matrices[i], n_points[i]).T
coords = np.array([x, y])
tmp_dict = {str(i): coords.T}
data.update(tmp_dict)
return data
| 25,685
|
def test_dependent_get_define(mock_code_version, mock_dep_code):
"""Test task dependent function get_define."""
project_name = "test-dep-project"
process_definition_name = "test-dep-definition"
dependent_task_name = "test-dep-task"
dep_operator = And(
Or(
# test dependence with add tasks
DependentItem(
project_name=project_name,
process_definition_name=process_definition_name,
)
),
And(
# test dependence with specific task
DependentItem(
project_name=project_name,
process_definition_name=process_definition_name,
dependent_task_name=dependent_task_name,
)
),
)
name = "test_dependent_get_define"
expect = {
"code": 123,
"name": name,
"version": 1,
"description": None,
"delayTime": 0,
"taskType": "DEPENDENT",
"taskParams": {
"resourceList": [],
"localParams": [],
"dependence": {
"relation": "AND",
"dependTaskList": [
{
"relation": "OR",
"dependItemList": [
{
"projectCode": TEST_PROJECT_CODE,
"definitionCode": TEST_DEFINITION_CODE,
"depTaskCode": "0",
"cycle": "day",
"dateValue": "today",
}
],
},
{
"relation": "AND",
"dependItemList": [
{
"projectCode": TEST_PROJECT_CODE,
"definitionCode": TEST_DEFINITION_CODE,
"depTaskCode": TEST_TASK_CODE,
"cycle": "day",
"dateValue": "today",
}
],
},
],
},
"conditionResult": {"successNode": [""], "failedNode": [""]},
"waitStartTimeout": {},
},
"flag": "YES",
"taskPriority": "MEDIUM",
"workerGroup": "default",
"failRetryTimes": 0,
"failRetryInterval": 1,
"timeoutFlag": "CLOSE",
"timeoutNotifyStrategy": None,
"timeout": 0,
}
task = Dependent(name, dependence=dep_operator)
assert task.get_define() == expect
| 25,686
|
def prepare_concepts_index(create=False):
"""
Creates the settings and mappings in Elasticsearch to support term search
"""
index_settings = {
"settings": {"analysis": {"analyzer": {"folding": {"tokenizer": "standard", "filter": ["lowercase", "asciifolding"]}}}},
"mappings": {
"_doc": {
"properties": {
"top_concept": {"type": "keyword"},
"conceptid": {"type": "keyword"},
"language": {"type": "keyword"},
"id": {"type": "keyword"},
"category": {"type": "keyword"},
"provisional": {"type": "boolean"},
"type": {"type": "keyword"},
"value": {
"analyzer": "standard",
"type": "text",
"fields": {"raw": {"type": "keyword"}, "folded": {"analyzer": "folding", "type": "text"}},
},
}
}
},
}
if create:
se = SearchEngineFactory().create()
se.create_index(index=CONCEPTS_INDEX, body=index_settings)
return index_settings
| 25,687
|
def test_to_graph_should_return_has_part_as_uri() -> None:
"""It returns a information model graph isomorphic to spec."""
"""It returns an has_part graph isomorphic to spec."""
informationmodel = InformationModel()
informationmodel.identifier = "http://example.com/informationmodels/1"
has_part: List[Union[InformationModel, URI]] = []
has_part1 = "https://example.com/informationmodels/2"
has_part.append(has_part1)
has_part2 = URI("https://example.com/informationmodels/3")
has_part.append(has_part2)
informationmodel.has_part = has_part
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/informationmodels/1>
a modelldcatno:InformationModel ;
dct:hasPart <https://example.com/informationmodels/2> ;
dct:hasPart <https://example.com/informationmodels/3> ;
.
"""
g1 = Graph().parse(data=informationmodel.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
| 25,688
|
def pack_wrapper(module, att_feats, att_masks):
"""
for batch computation, pack sequences with different lenghth with explicit setting the batch size at each time step
"""
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
| 25,689
|
async def test_form_invalid_auth(opp):
"""Test we handle invalid auth."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"pylitterbot.Account.connect",
side_effect=LitterRobotLoginException,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"], CONFIG[DOMAIN]
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
| 25,690
|
def cd(directory):
"""A context manager for switching the current working directory when
using the local() function. Not thread safe.
"""
global GLOBAL_PWD
GLOBAL_PWD.append(directory)
yield
if len(GLOBAL_PWD) > 1:
GLOBAL_PWD.pop()
| 25,691
|
def processor(preprocessed_data_id, param_id, param_constructor):
"""Dispatch the processor work"""
preprocessed_data = PreprocessedData(preprocessed_data_id)
params = param_constructor(param_id)
sp = StudyProcessor()
try:
process_out = sp(preprocessed_data, params)
except Exception as e:
error_msg = ''.join(format_exception_only(e, exc_info()))
preprocessed_data.processing_status = "failed: %s" % error_msg
process_out = None
return process_out
| 25,692
|
def recommendation_inspiredby(film: str, limit: int=20) -> list:
"""Movie recommandations from the same inspiration with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, inspiration list,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(GROUP_CONCAT(DISTINCT ?inspiredbyLabel; separator="; ") AS ?inspiredbyList)
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originInspiredby
WHERE {{ wd:{film} wdt:P941 ?originInspiredby . }}
}}
?film wdt:P31 wd:Q11424;
wdt:P941 ?inspiredby;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?inspiredby rdfs:label ?inspiredbyLabel.
}}
FILTER (?inspiredby IN (?originInspiredby))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings'])
| 25,693
|
def str2twixt(move):
""" Converts one move string to a twixt backend class move.
Handles both T1-style coordinates (e.g.: 'd5', 'f18'') as well as tsgf-
style coordinates (e.g.: 'fg', 'bi') as well as special strings
('swap' and 'resign'). It can handle letter in upper as well as lowercase.
Args:
move: string with a move
Returns:
twixt.SWAP or twixt.RESIGN or twixt.Point
Raises
ValueError if the move_str can't be parsed in any valid format
Examples:
>>> str2twixt('b3')
b3
>>> str2twixt('i18')
i18
>>> str2twixt('fj')
f10
>>> str2twixt('swap')
'swap'
>>> str2twixt('resign')
'resign'
>>> str2twixt('123')
ValueError: Can't parse move: '123'
>>> str2twixt('invalid')
ValueError: Can't parse move: 'invalid'
"""
# Handle swap and resign
if move.lower() == twixt.SWAP.lower():
return twixt.SWAP
elif move.lower() == twixt.RESIGN.lower():
return twixt.RESIGN
# Handle T1-style moves
elif move[0] in string.ascii_letters and move[-1] in string.digits:
return twixt.Point(move)
# Handle tsgf-stype moves
elif len(move) == 2 and all(c in string.ascii_letters for c in move):
return twixt.Point(move[0] + str(ord(move[1].lower()) - ord('a') + 1))
# Can't handle move. Throw exception
raise ValueError(f"Can't parse move: '{move}'")
| 25,694
|
def celery_health_view(request):
"""Admin view that displays the celery configuration and health."""
if request.method == 'POST':
celery_health_task.delay(datetime.now())
messages.success(request, 'Health task created.')
return HttpResponseRedirect(request.path)
capital = re.compile('^[A-Z]')
settings = [key for key in dir(current_app.conf) if capital.match(key)]
sorted_settings = [
{
'key': key,
'value': ('*****' if 'password' in key.lower()
else getattr(current_app.conf, key))
} for key in sorted(settings)
]
return render(request, 'admin/celery_health_view.html', {
'settings': sorted_settings,
'title': 'Celery Settings and Health'
})
| 25,695
|
def get_n_runs(slurm_array_file):
"""Reads the run.sh file to figure out how many conformers or rotors were meant to run
"""
with open(slurm_array_file, 'r') as f:
for line in f:
if 'SBATCH --array=' in line:
token = line.split('-')[-1]
n_runs = 1 + int(token.split('%')[0])
return n_runs
return 0
| 25,696
|
def consume(pipeline, data, cleanup=None, **node_contexts):
"""Handles node contexts before/after calling pipeline.consume()
Note
----
It would have been better to subclass Pipeline and implement this logic
right before/after the core consume() call, but there is a bug in pickle
that prevents that from working with multiprocessing.
"""
update_node_contexts(pipeline, node_contexts)
try:
contexts = get_node_contexts(pipeline)
dbg("size=%s\n%s" % (size(data, "n/a"), pf(contexts)), indent="label")
try:
if data is None:
return consume_none(pipeline)
else:
return pipeline.consume(iterize(data))
finally:
if cleanup:
clean_up_nodes(cleanup, contexts)
finally:
reset_node_contexts(pipeline, node_contexts)
| 25,697
|
def notify(hours):
"""
This Function notifies when it's an hour, two hours or more.
"""
present = it_is_time_for_notify()
if hours == 1:
Notification("It's an Hour!!", present).send()
else:
ptr = f"It's {hours} Hours"
Notification(ptr, present).send()
Timer(3600, function=lambda: notify(hours + 1)).start()
| 25,698
|
def get_field_attribute(field):
"""
Format and return a whole attribute string
consists of attribute name in snake case and field type
"""
field_name = get_field_name(field.name.value)
field_type = get_field_type(field)
strawberry_type = get_strawberry_type(
field_name, field.description, field.directives
)
field_type += strawberry_type if strawberry_type else ""
return f"{str_converters.to_snake_case(field.name.value)}: {field_type}"
| 25,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.