content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import List
from typing import Tuple
def partwise_function(function: str, parts: List[Tuple[str, str]], add_zero_otherwise: bool = True) -> str:
"""
Returns a string representing the definition a part-wise mathematical function.
**Parameters**
- `function`: str
The name of the function.
- `parts`: list
Each element is a tuple yields whose 1st element is the value of the function and whose second is a condition stating where the 1st applies.
- `add_zero_otherwise`: bool
If True, one last part stating "0, otherwise" is added.
**Returns**
`out`: str
TeX compatible string.
"""
res = f'{function}='
res += '\\begin{cases}\n'
for p in parts:
res += f'{p[0]},& {p[1]} \\\\'
if add_zero_otherwise:
res += r'0,& \text{otherwise}'
res += r'\end{cases}'
return res | b2954a9c947add4cf4b4740ac62f4ca16d3e1d70 | 3,630,200 |
def coprime(a, b):
"""0 is coprime only with 1"""
return gcd(a, b) in (0, 1) | e2b6434390f5569bd6b4817271f7b32e0e205193 | 3,630,201 |
def user_picture1(request):
"""A view that is vulnerable to malicious file access."""
filename = request.GET.get('p')
# BAD: This could read any file on the file system
data = open(filename, 'rb').read()
return HttpResponse(data) | 8601c9c23391d6224a0a63e8d19d9054454a3ea4 | 3,630,202 |
import random
import array
def random_array():
"""
089
Create an array which will store a list of integers. Generate five random numbers and store them in
the array. Display the array (showing each item on a separate line).
"""
rand_array, counter = array("i"), 5
while counter:
rand_array.append(random.randint(1, 1000))
counter -= 1
for num in rand_array:
print(num)
return "" | fb5f35dadabd4bb9976248f40db2201c2a0f8a8b | 3,630,203 |
def load_seq_ids(fname):
"""Load sequence of IDs from txt file."""
ids = []
with open(fname, "r") as f:
for line in f:
ids.append(int(line.strip()))
ids = np.asarray(ids)
return ids | 1a80dcbedefccc89a3e9944276131f0c7f30bdad | 3,630,204 |
from . import routes
def create_app(config):
"""Initialize the core application."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object(config)
db.init_app(app)
with app.app_context():
# Include our Routes
# Register Blueprints
# Call factory function to create our blueprint
swaggerui_blueprint = get_swaggerui_blueprint(
config.SWAGGER_URL, # Swagger UI static files will be mapped to '{SWAGGER_URL}/dist/'
config.API_URL,
config={ # Swagger UI config overrides
'app_name': "ULI Proof of Concept"
},
)
app.register_blueprint(swaggerui_blueprint)
#app.register_blueprint(auth.auth_bp)
#app.register_blueprint(admin.admin_bp)
return app | dc67754a5eda92edeec7de314e5560cb2fca1d89 | 3,630,205 |
def is_valid_maze(maze):
"""Returns True if the maze is valid, False otherwise."""
# Asserts each constraint and returns False if any are not met.
if not constraint_0(maze):
return(False)
if not constraint_1(maze):
return(False)
if not constraint_2(maze):
return(False)
if not constraint_3(maze):
return(False)
if not constraint_4(maze):
return(False)
if constraint_5(maze) is False:
return(False)
if constraint_6(maze) is False:
return(False)
if constraint_7(maze) is False:
return(False)
if constraint_8a(maze) is False:
return(False)
if constraint_8b(maze) is False:
return(False)
return(True) | d6be2ff37899134ae108d299d226423b9c0f0c4e | 3,630,206 |
def spectral_dimension_ui(i):
"""Return a list of widgets whose entries are used in evaluating the dimension
coordinates along the i^th dimension. The widgets includes number of points (count),
spectral width, and reference offset.
Args:
i: An integer with the dimension index.
"""
# count
count = custom_input_group(
prepend_label="Number of points",
value=512,
min=2,
id=f"count-{i}",
debounce=True,
pattern="[0-9]*",
)
# spectral width
spectral_width = custom_input_group(
prepend_label="Spectral width",
append_label="kHz",
value=25.0,
min=1e-6,
id=f"spectral_width-{i}",
debounce=True,
)
# reference offset
reference_offset = custom_input_group(
prepend_label="Reference offset",
append_label="kHz",
value=0.0,
id=f"reference_offset-{i}",
debounce=True,
)
# origin offset
# origin_offset = custom_input_group(
# prepend_label="Origin offset",
# append_label="MHz",
# value=0.0,
# id=f"origin_offset-{i}",
# debounce=True,
# )
# origin offset
label = custom_input_group(
prepend_label="Label",
append_label="",
input_type="text",
value="frequency",
id=f"label-{i}",
debounce=True,
)
return collapsable_card(
text=f"Spectral Dimension - {i}",
id_=f"dim-{i}",
featured=[count, spectral_width, reference_offset],
hidden=[label],
message="Show/Hide",
outer=True,
) | 56deda19f52aed1752c464767ce374cbdab85f31 | 3,630,207 |
def get_params(filename=None, **kwargs):
"""Return all the parameters, retrieved following this order of priority:
* parameters specified as keyword arguments in this function,
* parameters specified in the .PRM file given in `filename`,
* default parameters.
"""
# Extract sample_rate before loading the default parameters.
# This is because some default parameters are expressed as a function
# of the sample rate.
sample_rate = get_pydict(filename).get('sample_rate', None) or kwargs['sample_rate']
if 'sample_rate' not in kwargs:
kwargs['sample_rate'] = sample_rate
default = load_default_params(kwargs)
params = get_pydict(filename=filename,
pydict_default=default,
**kwargs)
# Set waveforms_nsamples, which is defined as extract_s_before + extract_s_after
params['waveforms_nsamples'] = params['extract_s_before'] + params['extract_s_after']
return params | 14514e61cd4083e9d10201273e5f6589710227d6 | 3,630,208 |
def read_restrictions_file(file):
"""
<Purpose>
Reads in the contents of a restrictions file.
<Arguments>
file: name/path of the file to open
<Returns>
A list, where each element is a line in the file
"""
# Get the file object, read mode with universal newlines
fileo = open(file,"rU")
# Read in all the contents
contents = fileo.readlines()
# Close the file object
fileo.close()
return contents | df7207ea3bab49af47fcfbdaa9cc51f54692bb85 | 3,630,209 |
def argsToParams(args):
"""
"""
if args.explanation_computer == ExplanationComputer.MUS.name:
params = MUSParams()
elif args.explanation_computer == ExplanationComputer.OUS_INCREMENTAL_NAIVE.name:
params = OusIncrNaiveParams()
elif args.explanation_computer == ExplanationComputer.OUS_INCREMENTAL_SHARED.name:
params = OusIncrSharedParams()
elif args.explanation_computer == ExplanationComputer.OUS_SS.name:
params = OusParams()
elif args.explanation_computer == ExplanationComputer.OCUS.name:
params = COusParams()
elif args.explanation_computer == ExplanationComputer.OCUS_NOT_INCREMENTAL.name:
params = COusNonIncrParams()
else:
raise Exception(f"Wrong params {str(args)}")
if args.maxsatpolarity and args.maxsatpolarity != "ignore":
params.maxsat_polarity = True if args.maxsatpolarity == "True" else False
if args.grow and args.grow != "ignore":
params.grow = Grow(args.grow)
if args.interpretation and args.interpretation != "ignore":
params.interpretation = Interpretation(args.interpretation)
if args.weighing and args.weighing != "ignore":
params.maxsat_weighing = Weighing(args.weighing)
if args.puzzle and args.puzzle != "ignore":
params.instance = args.puzzle
if args.output and args.output != "ignore":
params.output = args.output
if args.sort_literals and args.sort_literals != "ignore":
params.sort_literals = True if args.sort_literals == "True" else False
if args.reuseSubset and isinstance(params, BestStepParams) and args.reuseSubset != "ignore":
params.reuse_SSes = True if args.reuseSubset == "True" else False
if args.timeout and args.timeout != "ignore":
params.timeout = int(args.timeout)
if args.disable_disjoint_mcses and args.disable_disjoint_mcses != "ignore":
params.disable_disjoint_mcses = True if args.disable_disjoint_mcses == "True" else False
return params | a45c75ac4a9df617e76b35226a392c802f7ce6b8 | 3,630,210 |
def randiwishart(sigma, df):
""" Generate an inverse Wishart random matrix in the form consistent with randiwishart.m
:param sigma: covariance matrix (n x n)
:param df: degrees of freedom. Must be greater than n (dimension of sigma)
:type sigma: np.ndarray
:type df: int
:return: sqrtinvx
:return: sqrtx
"""
n = sigma.shape[0]
if df < n:
raise DfError('df < n. Please add degrees of freedom')
d = np.linalg.cholesky(sigma) # the output is the transpose of MATLAB's chol function
di = np.linalg.inv(d) # so no need to take transpose of d here
# a = randwishart(df/2, n)
a = rand.randomwishart(df/2, n) # REMOVE
sqrtinvx = (np.sqrt(2) * a) @ di
sqrtx = np.linalg.inv(sqrtinvx).T
return sqrtx, sqrtinvx | 077c46a878d9a26628def661d186cb79c2ffef03 | 3,630,211 |
def get_pred_metrics(key_filepath, pred_filepath, average="macro"):
""" Gets F1 value out a key and predicted csv files
Files must be formated as
name,target
name1,1
name2,3
...
name948,2
name949,2
"""
key = pd.read_csv(key_filepath)
pred = pd.read_csv(pred_filepath)
assert len(key.index) == len(pred.index)
merged = pd.merge(key, pred, on=['name'], how='inner')
y_true, y_pred = merged["target_x"].to_numpy(), merged["target_y"].to_numpy()
cm = ConfusionMatrix(actual_vector=y_true, predict_vector=y_pred)
f1 = f1_score(list(y_true), list(y_pred), average=average)
auc = roc_auc_score_multiclass(y_true, y_pred, average=average)
acc = accuracy_score(y_true, y_pred)
return acc, auc, f1, cm, key, pred | bf89fb8a31b334f2c23f28cef16b60326a3510f2 | 3,630,212 |
def guess_email_class(email: EmailMessage) -> str :
"""
:param email:
:return:
"""
for part in email.walk():
if part.get_content_type() == 'text/calendar':
return 'IPM.Appointment'
else:
return 'IPM.Note' | 5b26ac875e43d9b613215b26b8377187c99b85eb | 3,630,213 |
import os
def slice_generator(image_file, out_dir, new_size=None):
"""
Image generator.
creates slices of a given size for an image and saves them
to disk, to use for c3d.
image_filename: Filename of the image.
out_dir: output directory.
Returns the paths of each of the saved slices.
"""
# for each image file
crop_paths = []
# load img
img = load_img(image_file, new_size)
# current pointer of slice
i = 0
while i + 16 < 112:
# For each dimension
# also transpose it so that the small dim goes first
i1 = img[:, :, i:i+16]
img_cropped_1 = np.transpose(i1, (2, 0, 1))
i2 = img[:, i:i+16, :]
img_cropped_2 = np.transpose(i2, (1, 0, 2))
img_cropped_3 = img[i:i+16, :, :]
i = i + 6
out_file = out_dir + os.path.basename(image_file) + '_crop' + str(i)
# Save paths and images (only if it has anything else than 0)
if np.any(img_cropped_1):
save_img(img_cropped_1, out_file + '_1.nii.gz')
crop_paths.append(out_file + '_1.nii.gz')
if np.any(img_cropped_2):
save_img(img_cropped_2, out_file + '_2.nii.gz')
crop_paths.append(out_file + '_2.nii.gz')
if np.any(img_cropped_3):
save_img(img_cropped_3, out_file + '_3.nii.gz')
crop_paths.append(out_file + '_3.nii.gz')
return crop_paths | 09e66a6180c4ca64d3f801542d42f42331693d67 | 3,630,214 |
from pathlib import Path
def synctree(a, b):
"""
Copy new and updated files from a to b.
:param a: The seed directory
:param b: The destination directory
:return: `True` if files were copied, `False` otherwise
"""
def sync(cmp):
for name in cmp.left_only + cmp.diff_files:
a_path = str(Path(cmp.left, name))
b_path = str(Path(cmp.right, name))
try:
copytree(a_path, b_path)
except NotADirectoryError:
copy2(a_path, b_path)
return len(cmp.left_only) + len(cmp.diff_files)
if not Path(b).exists():
copytree(a, b)
return True
cmp = dircmp(a, b, ignore=['.SRCINFO'])
r = 0
for c in [cmp] + list(cmp.subdirs.values()):
r += sync(c)
return r > 0 | 3832d7136cca3c33c113d4c81c649d83621aea16 | 3,630,215 |
import types
def fqn(o):
"""Returns the fully qualified class name of an object or a class
:param o: object or class
:return: class name
"""
parts = []
if isinstance(o, (str, bytes)):
return o
if not hasattr(o, '__module__'):
raise ValueError('Invalid argument `%s`' % o)
parts.append(o.__module__)
if isclass(o):
parts.append(o.__name__)
elif isinstance(o, types.FunctionType):
parts.append(o.__name__)
else:
parts.append(o.__class__.__name__)
return '.'.join(parts) | bb95cc55c8afb8785dd4daf1cd6d75997794a181 | 3,630,216 |
def get_lane_boundaries_surface(
world: carla.World, # pylint: disable=no-member
pixels_per_meter: int = 5,
scale: float = 1.0,
margin: int = 150,
) -> pygame.Surface:
"""Generates a `PyGame` surface of a CARLA town lane boundaries.
Heavily inspired by the official CARLA `no_rendering_mode.py` example.
Args:
world: The `CARLA` world.
pixels_per_meter: The number of pixels rendered per meter.
scale: The scaling factor of the rendered map.
margin: The number of pixels used for margin.
Returns
The lane boundaries of a CARLA town as a `PyGame` surface.
"""
# Fetch CARLA map.
carla_map = world.get_map()
# Setups the `PyGame` surface and offsets.
world_offset, surface = draw_settings(
carla_map=carla_map,
pixels_per_meter=pixels_per_meter,
scale=scale,
margin=margin,
)
def get_lane_markings(lane_marking_type, waypoints, sign):
margin = 0.25
marking_1 = [
world_to_pixel(
lateral_shift(w.transform, sign * w.lane_width * 0.5),
scale,
world_offset,
pixels_per_meter,
) for w in waypoints
]
if lane_marking_type == carla.LaneMarkingType.Broken or ( # pylint: disable=no-member
lane_marking_type == carla.LaneMarkingType.Solid): # pylint: disable=no-member
return [(lane_marking_type, marking_1)]
else:
marking_2 = [
world_to_pixel(
lateral_shift(w.transform,
sign * (w.lane_width * 0.5 + margin * 2)),
scale,
world_offset,
pixels_per_meter,
) for w in waypoints
]
if lane_marking_type == carla.LaneMarkingType.SolidBroken: # pylint: disable=no-member
return [
(carla.LaneMarkingType.Broken, marking_1), # pylint: disable=no-member
(carla.LaneMarkingType.Solid, marking_2), # pylint: disable=no-member
]
elif lane_marking_type == carla.LaneMarkingType.BrokenSolid: # pylint: disable=no-member
return [
(carla.LaneMarkingType.Solid, marking_1), # pylint: disable=no-member
(carla.LaneMarkingType.Broken, marking_2), # pylint: disable=no-member
]
elif lane_marking_type == carla.LaneMarkingType.BrokenBroken: # pylint: disable=no-member
return [
(carla.LaneMarkingType.Broken, marking_1), # pylint: disable=no-member
(carla.LaneMarkingType.Broken, marking_2), # pylint: disable=no-member
]
elif lane_marking_type == carla.LaneMarkingType.SolidSolid: # pylint: disable=no-member
return [
(carla.LaneMarkingType.Solid, marking_1), # pylint: disable=no-member
(carla.LaneMarkingType.Solid, marking_2), # pylint: disable=no-member
]
return [
(carla.LaneMarkingType.NONE, []), # pylint: disable=no-member
]
def draw_solid_line(surface, color, closed, points, width):
if len(points) >= 2:
pygame.draw.lines(surface, color, closed, points, width)
def draw_broken_line(surface, color, closed, points, width):
broken_lines = [
x for n, x in enumerate(zip(*(iter(points),) * 20)) if n % 3 == 0
]
for line in broken_lines:
pygame.draw.lines(surface, color, closed, line, width)
def draw_lane_marking_single_side(surface, waypoints, sign):
lane_marking = None
marking_type = carla.LaneMarkingType.NONE # pylint: disable=no-member
previous_marking_type = carla.LaneMarkingType.NONE # pylint: disable=no-member
markings_list = []
temp_waypoints = []
current_lane_marking = carla.LaneMarkingType.NONE # pylint: disable=no-member
for sample in waypoints:
lane_marking = sample.left_lane_marking if sign < 0 else sample.right_lane_marking
if lane_marking is None:
continue
marking_type = lane_marking.type
if current_lane_marking != marking_type:
markings = get_lane_markings(
previous_marking_type,
temp_waypoints,
sign,
)
current_lane_marking = marking_type
for marking in markings:
markings_list.append(marking)
temp_waypoints = temp_waypoints[-1:]
else:
temp_waypoints.append((sample))
previous_marking_type = marking_type
# Add last marking.
last_markings = get_lane_markings(
previous_marking_type,
temp_waypoints,
sign,
)
for marking in last_markings:
markings_list.append(marking)
for markings in markings_list:
if markings[0] == carla.LaneMarkingType.Solid: # pylint: disable=no-member
draw_solid_line(
surface,
COLORS["WHITE"],
False,
markings[1],
2,
)
elif markings[0] == carla.LaneMarkingType.Broken: # pylint: disable=no-member
draw_broken_line(
surface,
COLORS["WHITE"],
False,
markings[1],
2,
)
# Set background black
surface.fill(COLORS["BLACK"])
precision = 0.05
# Parse OpenDrive topology.
topology = [x[0] for x in carla_map.get_topology()]
topology = sorted(topology, key=lambda w: w.transform.location.z)
set_waypoints = []
for waypoint in topology:
waypoints = [waypoint]
nxt = waypoint.next(precision)
if len(nxt) > 0:
nxt = nxt[0]
while nxt.road_id == waypoint.road_id:
waypoints.append(nxt)
nxt = nxt.next(precision)
if len(nxt) > 0:
nxt = nxt[0]
else:
break
set_waypoints.append(waypoints)
# Draw roads.
for waypoints in set_waypoints:
waypoint = waypoints[0]
road_left_side = [
lateral_shift(w.transform, -w.lane_width * 0.5) for w in waypoints
]
road_right_side = [
lateral_shift(w.transform, w.lane_width * 0.5) for w in waypoints
]
polygon = road_left_side + [x for x in reversed(road_right_side)]
polygon = [
world_to_pixel(
x,
scale=scale,
offset=world_offset,
pixels_per_meter=pixels_per_meter,
) for x in polygon
]
# Draw Lane Markings
if not waypoint.is_junction:
# Left Side
draw_lane_marking_single_side(surface, waypoints, -1)
# Right Side
draw_lane_marking_single_side(surface, waypoints, 1)
return surface | 3f80bff30bb652d4173feb5a62434a0e4be0ffdd | 3,630,217 |
def batch_dq_prod_vector(dqs, V):
"""Apply transforms represented by a dual quaternions to vectors.
Parameters
----------
dqs : array-like, shape (..., 8)
Unit dual quaternions
V : array-like, shape (..., 3)
3d vectors
Returns
-------
W : array, shape (3,)
3d vectors
"""
dqs = np.asarray(dqs)
v_dqs = np.empty_like(dqs)
v_dqs[..., 0] = 1.0
v_dqs[..., 1:5] = 0.0
v_dqs[..., 5:] = V
v_dq_transformed = batch_concatenate_dual_quaternions(
batch_concatenate_dual_quaternions(dqs, v_dqs),
batch_dq_conj(dqs))
return v_dq_transformed[5:] | 1db9b9d9c2e2cbc9e11c903dfc2fa6a1bd9da594 | 3,630,218 |
import re
def uniescape(text: str) -> str:
"""
Escapes all non-ASCII printable characters with JavaScript Unicode escapes.
"""
def escape(match):
character = match.group(0)
assert len(character) == 1
code_point = ord(character)
assert code_point <= 0xFFFF
return f"\\u{code_point:04X}"
return re.sub("[^\u0020-\u007F]", escape, text) | 0ce57acb1dc8dc88ad366844f323e6527eb655af | 3,630,219 |
from typing import Dict
from typing import List
from typing import Union
def aggregate_collate_fn(insts) -> Dict[str, List[List[Union[int, str, List[int]]]]]:
"""aggregate the instance to the max seq length in batch
Args:
insts: list of sample
Returns:
"""
subtree_spans, children_spans, snts = [], [], []
for inst in insts:
subtree_spans.append(inst['subtree_span'])
snts.append(inst['snt'])
children_spans.append(inst['children_span'])
assert len(subtree_spans) == len(snts) == len(children_spans)
return {'subtree_spans': subtree_spans, 'snts': snts, 'children_spans': children_spans} | 5b3cbb71876b9814a9664f0d99396308a218c3aa | 3,630,220 |
def encode_extended(values, value_range):
"""Encode data using Google's "extended" encoding for the most granularity."""
return "".join(num2chars(v, value_range) for v in values) | b5d980f26b77cb9f0ffedcfb9b5cdb7a29afa8ff | 3,630,221 |
import argparse
import sys
import json
def main(argv=None):
"""
The main function of this script. Converts JSON to XML based on the arguments provided.
:param argv: List[str] Arguments to parse (default sys.argv)
:return: int
"""
#Parse arguments
parser = argparse.ArgumentParser(description='Converts JSON to XML.')
parser.add_argument('-t', '--indent', default='\t',
help='Sets the string used for a single level of indentation.')
parser.add_argument('-n', '--newline', default='\n',
help='Sets the string used for new lines.')
parser.add_argument('-r', '--root', default='root',
help='Sets the root tag.')
parser.add_argument('-i', '--ignore', default=[], action='append',
help='Adds a key to ignore.')
parser.add_argument('-p', '--prefix', default='',
help='Sets the tag prefix (e.g. for namespacing).')
parser.add_argument('-s', '--suffix', default='',
help='Sets the tag suffix (e.g. for namespacing).')
parser.add_argument('-c', '--capitalize', dest='transform',
action='store_const', const=str.capitalize, default=lambda x: x,
help='Capitalizes tag names (default=None)')
parser.add_argument('-l', '--lower', dest='transform',
action='store_const', const=str.lower,
help='Lowercases tag names (default=None)')
parser.add_argument('-u', '--upper', dest='transform',
action='store_const', const=str.upper,
help='Uppercases tag names (default=None)')
#TODO: underscores to camel
if argv is None:
argv = sys.argv
arguments = parser.parse_args(argv[1:])
#Read input
inputString = sys.stdin.read()
try:
inputJson = json.loads(inputString)
except ValueError as e:
print('Error parsing JSON: %s' % e)
return 1
#Convert
outputXml = jsonToXml(arguments.root, inputJson,
indent=arguments.indent,
newline=arguments.newline,
ignores=set(arguments.ignore),
prefix=arguments.prefix,
suffix=arguments.suffix,
transform=arguments.transform)
#Output
print(outputXml)
return 0 | 38fc88929bd1b45d80d7f0520f2ac4f3a978bf0e | 3,630,222 |
import string
def part_b(puzzle_input, workers=5, offset=60):
"""
Calculate the answer for part_b.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_b.
"""
deps = parse_deps(puzzle_input)
finished = []
queue = []
processing = []
t = 0
def process():
nonlocal queue
while len(processing) < workers and queue:
c = min(queue)
queue.remove(c)
processing.append((string.ascii_uppercase.index(c) + 1 + t + offset, c))
for dep in deps:
[queue.append(c) for c in deps[dep] if c not in deps.keys()]
queue = list(set(queue))
process()
while processing or queue:
for time, next_char in processing:
if time == t:
finished.append(next_char)
processing.remove((time, next_char))
for dep in set(deps.keys()):
if deps[dep].issubset(finished):
queue.append(dep)
del deps[dep]
process()
t += 1
return str(t - 1) | b9484aa9c66d425798fa806b84e6bd7c31a4eb1f | 3,630,223 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a National Weather Service entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
station = entry.data[CONF_STATION]
client_session = async_get_clientsession(hass)
# set_station only does IO when station is None
nws_data = SimpleNWS(latitude, longitude, api_key, client_session)
await nws_data.set_station(station)
coordinator_observation = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS observation station {station}",
update_method=nws_data.update_observation,
update_interval=DEFAULT_SCAN_INTERVAL,
)
coordinator_forecast = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS forecast station {station}",
update_method=nws_data.update_forecast,
update_interval=DEFAULT_SCAN_INTERVAL,
)
coordinator_forecast_hourly = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS forecast hourly station {station}",
update_method=nws_data.update_forecast_hourly,
update_interval=DEFAULT_SCAN_INTERVAL,
)
hass.data[DOMAIN][entry.entry_id] = {
NWS_DATA: nws_data,
COORDINATOR_OBSERVATION: coordinator_observation,
COORDINATOR_FORECAST: coordinator_forecast,
COORDINATOR_FORECAST_HOURLY: coordinator_forecast_hourly,
}
# Fetch initial data so we have data when entities subscribe
await coordinator_observation.async_refresh()
await coordinator_forecast.async_refresh()
await coordinator_forecast_hourly.async_refresh()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | 2a7acda783a3ee29289cec651f530281af52e199 | 3,630,224 |
def cov_hvus(data1,data2,label):
"""
Estimate the covariance of hvus between two classifiers/features based on graph theory
Parameters
----------
data1 : array-like of shape (n_samples,)
samples' value from classifier1
data2 : array-like of shape (n_samples,)
samples' value from classifier1
label : array-like of shape (n_samples,)
samples' label indicating whihch class a sample belongs to
Returns
-------
cov_hvus : float64
the covariance of hvus
"""
cov_hv = 0
hvus2 = order_hvus(label.copy(),data2.copy())
hvus1 = order_hvus(label.copy(),data1.copy())
subclass_num1 = np.array([(label==i).sum() for i in set(label)])
subclass_num2 = subclass_num1
r = len(subclass_num1)
appdata1c = np.append(0,subclass_num1)
cs = np.cumsum(appdata1c)
gamaIndex = np.zeros((2**r,r))
for c1 in range(2**r):
for c2 in range(r):
gamaIndex[c1][c2] = _bitget(c1,c2)
V = [[]]* (r+1)
Vi = [[]] * r
Ni = [[]] * r
for t in range(1,2**r):
for k in range(r):
if gamaIndex[t][k] == 1:
V[k] = np.zeros((1,subclass_num1[k]))
Vi[k] = np.array((range(subclass_num1[k]),range(subclass_num1[k])))
Ni[k] = np.array((np.ones(subclass_num1[k]),range(subclass_num1[k])))
else:
V[k]= np.zeros((subclass_num1[k],subclass_num1[k]))
Vi[k] = np.zeros((2,subclass_num1[k]**2))
Ni[k] = np.zeros((2,subclass_num1[k]**2))
length = 0
for i in range(subclass_num1[k]):
for j in range(subclass_num1[k]):
Vi[k][0,length] = i
Vi[k][1,length] = j
Ni[k][0,length] = i
Ni[k][1,length] = j
length += 1
V[0] = np.ones(V[0].shape)
if gamaIndex[t][0] != 1:
for i in range(V[0].shape[0]):
V[0][i][i] = 0
for k in range(r-1):
for va in range(V[k].size):
_,Vk_width = V[k].shape
Vk_row_index = va // Vk_width
Vk_col_index = va % Vk_width
for vb in range(V[k+1].size):
_,Vkpone_width =V[k+1].shape
Vkpo_row_index = vb//Vkpone_width
Vkpo_col_index = vb%Vkpone_width
if gamaIndex[t][k+1] != 1 and (Vi[k+1][0][vb]==Vi[k+1][1][vb]):
V[k+1][Vkpo_row_index][Vkpo_col_index] = 0
else:
if data1[int(cs[k]+Vi[k][0][va])]<data1[int(cs[k+1]+Vi[k+1][0][vb])] and \
data2[int(cs[k]+Vi[k][1][va])]<data2[int(cs[k+1]+Vi[k+1][1][vb])]:
V[k+1][Vkpo_row_index][Vkpo_col_index] += V[k][Vk_row_index,Vk_col_index]
V[r] = sum(sum(V[r-1]))
temp_prod = (subclass_num1[gamaIndex[t][:]==0]-1).prod()
cov_hv += temp_prod * (V[r] / (subclass_num1.prod()*temp_prod) - hvus1*hvus2)
cov_hv = cov_hv / subclass_num1.prod()
return cov_hv | 4ab273d7a94f85a55fcfe4c67255e8714f04da5f | 3,630,225 |
import re
def clean(s):
"""
remove symbols and lowercase
"""
regex = re.compile('\W+')
s = regex.sub(' ', s).strip()
return s.lower() | f0f71dacac0d792c10480f9eec605bc85bf58be0 | 3,630,226 |
def trueifset(xeval, typematch=False):
"""return True if @xeval is set, otherwise False"""
if typematch:
if not (xeval is False or xeval is None): return True
else: return False
else:
if xeval: return True
else: return False | ff9ab55f869edc0fc784d9a34f55fe46652f22b5 | 3,630,227 |
def _generate_dash_manifest(encoding, output, output_path):
# type: (Encoding, Output, str) -> DashManifestDefault
"""
Creates a DASH default manifest that automatically includes all representations configured in
the encoding.
<p>API endpoint:
https://bitmovin.com/docs/encoding/api-reference/sections/manifests#/Encoding/PostEncodingManifestsDash
:param encoding: The encoding for which the manifest should be generated
:param output: The output to which the manifest should be written
:param output_path: The path to which the manifest should be written
"""
dash_manifest_default = DashManifestDefault(
encoding_id=encoding.id,
manifest_name="stream.mpd",
version=DashManifestDefaultVersion.V1,
outputs=[_build_encoding_output(output, output_path)]
)
return bitmovin_api.encoding.manifests.dash.default.create(dash_manifest_default=dash_manifest_default) | 4a2563b48ac3977b16901f735aaf719bdffe97f9 | 3,630,228 |
def shipping():
"""The webapp's page for viewing the shipping status of work orders.
This allows the employee to review existing work orders and see if they are ready to ship or not."""
# if the current user is not authenticated, redirect the user to the logged out index page
if not current_user.is_authenticated:
return redirect(url_for("cim.templates.index"))
# When first loaded, load in work orders and employee data from DB
if request.method=="GET":
return render_template("shipping.html", work_orders=dbq.get_db_work_orders(), employees=dbq.get_db_employees())
# If a POST request is submitted, update the Status for the provided work order to Shipped
if request.method=="POST":
# obtain the details of the work order to update
shipped_work_order = str(request.form['work-order-id-to-ship'])
# Update the status of the work order to completed
dbuq.set_workorder_status(wo_id=shipped_work_order, wo_status="'completed'")
# reload with updated information
return render_template("shipping.html", work_orders=dbq.get_db_work_orders(), employees=dbq.get_db_employees()) | 25c6a9bb71744b63a102751dc1c675137008a514 | 3,630,229 |
def get_file_content(file: str) -> str:
"""
Get file content.
"""
try:
with open(file, 'r') as f:
content = f.read()
return content
except IOError as e:
print(e)
print('Exiting...')
exit(1) | c10407d73ba2cd2d84eb99c0f131d3895ede460d | 3,630,230 |
import types
def simplify_name_to_string(input_name):
"""
``simplify_name_to_string`` simplifies a templated C++ name with default arguments and returns a string
:param input_name: String or qualified name to be simplified
:type input_name: Union[str, QualifiedName]
:return: simplified name (or original name if simplifier fails/cannot simplify)
:rtype: str
:Example:
>>> bdemangle.simplify_name_to_string("std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >")
'std::string'
>>>
"""
result = None
if isinstance(input_name, str):
result = core.BNRustSimplifyStrToStr(input_name)
elif isinstance(input_name, types.QualifiedName):
result = core.BNRustSimplifyStrToStr(str(input_name))
else:
raise TypeError("Parameter must be of type `str` or `types.QualifiedName`")
return result | 218b4289612849d0cc4b24b541882ba7bb9240b9 | 3,630,231 |
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index) | e3175d169b2179493125b58ee49b79bb39843e51 | 3,630,232 |
def english_to_french(english_text):
#write the code here
"""class docstring
code to translate english text to french"""
translation = language_translator.translate(
text = english_text, model_id = "en-fr"
).get_result()
french_text = translation["translations"][0]["translation"]
return french_text | a806a9b7d0beb267d02096dd21a4711aba506417 | 3,630,233 |
def get_sampling_distribution(samples):
"""
Get the sampling distribution of all samples of a population.
Parameter: a list of lists containing all possibile samples of a distribution.
Returns: sampling distribution, which is the list of means of all samples given.
"""
sampling_distribution = [get_mean(sample) for sample in samples]
return sampling_distribution | 2f5bdfe89ed6886c4d858846c6d40b6aa04fc483 | 3,630,234 |
import torch
def compute_loss3D(embeddings, labels, device, temperature=0.7):
"""
embeddings: a B, C, D, H, W array
lables: B, 1 array
device: torch.device instance, either cuda or cpu
"""
# all of them are 3d embeddings, I again need to separate
# them and then do the dot product things.
B, C, D, H, W = list(embeddings.shape)
class1_idx = (labels == 0).nonzero()
class1_embs = embeddings[class1_idx.squeeze(1)]
assert class1_embs.shape[1] == C
class2_idx = (labels == 1).nonzero()
class2_embs = embeddings[class2_idx.squeeze(1)]
assert class2_embs.shape[1] == C
# reshape the embeddings to be B, C, (D*H*W)
class1_embs = class1_embs.view(B//2, C, -1)
class2_embs = class2_embs.view(B//2, C, -1)
# generate positives for class1 and class2
class1_pos = class1_embs[1:].clone()
class1_pos = torch.cat((class1_pos, class1_embs[0].unsqueeze(0).clone()), dim=0)
assert torch.equal(class1_embs[1:], class1_pos[:-1])
assert torch.equal(class1_embs[0], class1_pos[-1])
class2_pos = class2_embs[1:].clone()
class2_pos = torch.cat((class2_pos, class2_embs[0].unsqueeze(0).clone()), dim=0)
assert torch.equal(class2_embs[1:], class2_pos[:-1])
assert torch.equal(class2_embs[0], class2_pos[-1])
# join the keys, and queries together
pos_query_embs = torch.cat((class1_embs, class2_embs), dim=0).view(B, -1)
pos_key_embs = torch.cat((class1_pos, class2_pos), dim=0).view(B, -1)
# now finally do the dot product one-to-one
logits_pos = torch.bmm(pos_query_embs.view(B, 1, -1), pos_key_embs.view(B, -1, 1))
logits_pos = logits_pos.div(D*H*W).view(-1, 1)
num_neg_samples = min(len(class1_embs), len(class2_embs))
logits_neg_class1 = torch.mm(class1_embs.view(num_neg_samples,-1),
class2_embs.view(num_neg_samples, -1).t())
logits_neg_class1 = logits_neg_class1.div(D*H*W)
logits_neg_class2 = torch.mm(class2_embs.view(num_neg_samples,-1),
class1_embs.view(num_neg_samples, -1).t())
logits_neg_class2 = logits_neg_class2.div(D*H*W)
logits_negs = torch.cat((logits_neg_class1, logits_neg_class2), dim=0)
# logits
logits = torch.cat((logits_pos, logits_negs), dim=1)
labels = torch.zeros(len(logits)).long().to(device)
# compute the loss
loss = F.cross_entropy(logits/temperature, labels)
return loss | 53cc363fe8fc9266f5143d4641e63e1f8491f0d1 | 3,630,235 |
def grad_polynomial_bases(X, N):
"""
Compute the gradients of the monomial bases.
Parameters
----------
X : ndarray
contains the points to evaluate the monomials
N : int
the maximum degree of the monomial basis
Returns
-------
dB : ndarray
contains the gradients of the monomials evaluate at `X`. `dB` is a
three-dimensional ndarray. The third dimension indexes the partial
derivatives in each gradient.
"""
M, m = X.shape
I = index_set(N, m)
n = I.shape[0]
B = np.zeros((M, n, m))
for k in range(m):
for i in range(n):
ind = I[i,:].copy()
indk = ind[k]
if indk==0:
B[:,i,k] = np.zeros(M)
else:
ind[k] -= 1
B[:,i,k] = indk*np.prod(np.power(X, ind), axis=1)
return B | 5d2ae8fdbfae187b05ea1b3186e307468cabbb04 | 3,630,236 |
def hor1f(x, z, offset=1):
"""
BROKEN: Haven't quite figured this one out
Calculate the horizon pixel for all x,z
This mimics the algorthim from Dozier 1981 and the
hor1f.c from IPW
Works backwards from the end but looks forwards for
the horizon
xrange stops one index before [stop]
Args:
x - horizontal distances for points
z - elevations for the points
Returns:
h - index to the horizon point
20150601 Scott Havens
"""
N = len(x) # number of points to look at
x = np.array(x)
z = np.array(z)
# preallocate the h array
h = np.zeros(N, dtype=int)
h[N-1] = N-1 # the end point is it's own horizon
# work backwarks from the end for the pixels
for i in range(N-2, -1, -1):
zi = z[i]
# Start with next-to-adjacent point in either forward or backward
# direction, depending on which way loop is running. Note that we
# don't consider the adjacent point; this seems to help reduce noise.
k = i + offset
if k >= N:
k -= 1
# loop until horizon is found
# xrange will set the maximum number of iterations that can be
# performed based on the length of the vector
for t in range(k, N):
j = k
k = h[j]
sij = _slope(x[i], zi, x[j], z[j])
sihj = _slope(x[i], zi, x[k], z[k])
# if slope(i,j) >= slope(i,h[j]), horizon has been found; otherwise
# set j to k (=h[j]) and loop again
# or if we are at the end of the section
if sij > sihj: # or k == N-1:
break
# if slope(i,j) > slope(j,h[j]), j is i's horizon; else if slope(i,j)
# is zero, i is its own horizon; otherwise slope(i,j) = slope(i,h[j])
# so h[j] is i's horizon
if sij > sihj:
h[i] = j
elif sij == 0:
h[i] = i
else:
h[i] = k
return h | 21612c6b9b4243d57ef91c649b593e92babdde90 | 3,630,237 |
def make_multi_class_binomial_deviance(n_class, alpha=2., beta=0.5, cy=25., sim='cosine'):
""" Builder for the Binomial Deviance loss function. Ref:
https://papers.nips.cc/paper/6464-learning-deep-embeddings-with-histogram-loss.pdf
Note that the batch must be built using M exclusive classes and N images per class, following this construction:
im1_c1, im2_c1, ..., im1_c2, im2_c2, ... imN_cM
Where imN_cM means "the N-th image for the M-th class".
Arguments:
n_class: Number of exclusive class within a batch.
alpha: Parameter for binomial deviance, see the paper.
beta: Parameter for binomial deviance, see the paper.
cy: Parameter for binomial deviance, see the paper.
sim: Computation of the similarity. Except 'cosine' or 'l2'.
Returns:
(Scalar) the loss.
"""
if sim != 'cosine':
raise ValueError("Binomial deviance expects cosine similarity only."
"This requirement may be changed in future version.")
def binomial_deviance(y_true, y_pred):
s = tf.shape(y_pred)
c = s[0] // n_class
sim_p, sim_n = __get_similarity_matrices(n_class, y_pred, sim=sim)
# Compute the loss:
loss_p = tf.reduce_sum(tf.reduce_mean(tf.log(1. + tf.exp(-alpha * (sim_p - beta))), axis=1))
loss_n = tf.reduce_sum(tf.log(1. + tf.exp(alpha * cy * (sim_n - beta)))) / tf.cast(c * (n_class - 1), dtype=tf.float32)
return loss_p + loss_n
return binomial_deviance | 5a5cae1f6e80060d1c81acc7e7811963f9e11771 | 3,630,238 |
import json
from collections import OrderedDict
def json_to_odict(json_series):
"""
Load a JSON series into OrderedDict
"""
odict = json.loads(js, object_pairs_hook=OrderedDict)
return odict | 20c377fef580660b583f87e2c447ed978821113d | 3,630,239 |
import typing
def sectors_performance(
apikey: str, limit: int = DEFAULT_LIMIT
) -> typing.List[typing.Dict]:
"""
Query FMP /sectors_performance/ API
:param apikey: Your API key.
:param limit: Number of rows to return
:return: A list of dictionaries.
"""
path = f"sectors-performance"
query_vars = {"apikey": apikey, "limit": limit}
return __return_json(path=path, query_vars=query_vars) | 03d8f1ee35f68d93c0c1536aea0d0e9d232f1782 | 3,630,240 |
from datetime import datetime
def _prepTrends(rawReport, startDt, numFiles, countMonth, granularity):
"""
Helper function which reformats data into list of lists with correct data
types. If anything is empty or has incorrect data, then an empty list is
returned.
"""
#load each rawReport into separate list
reportData = []
for i in range(numFiles):
#convert string to 2d list
raw = rawReport[i]
rawLines = raw.split("\n")
lines = []
for rawLine in rawLines:
line = rawLine.split(",")
lines.append(line)
#check if the actual granularity matches the desired granularity. If
#no, then alter to match and continue
trueGran = lines[4][0]
if granularity == "d" and trueGran == "Week":
print("Error: The file returned from Google Trends doesn't match your desired granularity."
" Altering your desired granularity to match.")
granularity = 'w'
if granularity == "w" and trueGran == "Day":
print("Error: The file returned from Google Trends doesn't match your desired granularity."
" Altering your desired granularity to match.")
granularity = 'd'
#prep data
#remove header
lines = lines[5:]
#remove country data
for j, line in enumerate(lines):
if line[0] == "": #checks if line is empty
lines = lines[:j]
break
else:
continue
#remove 2nd month data (except 1st day)
for j, line in enumerate(lines):
try:
if granularity == 'd':
dt = datetime.datetime.strptime(line[0], "%Y-%m-%d")
else: #granularity == 'w':
dt = line[0][:-13]
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
finalMonth = startDt.month + (i+1)*countMonth #would just use % operator for this
while finalMonth > 12: #however it doesn't work bc the range
finalMonth -= 12 #runs from 1-12, not 0-11
if dt.month == finalMonth:
lines = lines[:j+1] #+1 bc we want to keep this first day/week/month
break
else:
continue
#If there is a ValueError, then there is incorrectly week data,
#and so we should just return an empty array, bc the data is not
#correct to begin with.
except ValueError:
print("Value Error: Unable to format datetime correctly from file, returning empty list.")
return []
#Checks that there is data. If not, then returns empty list.
if len(lines) == 0:
return []
#Saves data to list, which is element of larger list.
report = []
for line in lines:
try:
newLine = []
if granularity == 'd':
dt = datetime.datetime.strptime(line[0], "%Y-%m-%d")
else: #granularity == 'w':
dt = line[0][:-13]
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
newLine.append(dt)
#Removes the final item in the line, which is the constant term
#This makes sure that there is the same scaling.
for j in range(1, len(line)-1):
value = int(line[j])
newLine.append(value)
report.append(newLine)
except ValueError: #issue with data, return empty list
print("Value Error: Unable to format datetime correctly from file, returning empty list.")
return []
reportData.append(report)
return reportData | 2f416f04fc930d9729d5ef54a901963e33021fba | 3,630,241 |
def drift(ips, tau):
"""Drift operator.
"""
if ips.include_pn_corrections:
return drift_pn(ips, tau)
return drift_n(ips, tau) | 30ea7822ba8cce35e86b82deb3f92154f15d7603 | 3,630,242 |
def F1Score4(pred, groundtruth):
"""
# N A O P
# Normal AF Other Noisy Total
#Normal Nn Na No Np ∑N
# AF An Aa Ao Ap ∑A
#Other On Oa Oo Op ∑O
#Noisy Pn Pa Po Pp ∑P
#Total ∑n ∑a ∑o ∑p
"""
pred_len = len(pred)
re_table =np.zeros([4,4])
for i in range(pred_len):
i_pred = pred[i]
i_groundtruth = groundtruth[i]
if i_pred == 'N':
if i_groundtruth == 'N':
re_table[0,0] += 1
elif i_groundtruth == 'A':
re_table[1,0] += 1
elif i_groundtruth == 'O':
re_table[2,0] += 1
elif i_groundtruth == '~':
re_table[3,0] += 1
else:
print('wrong label')
if i_pred == 'A':
if i_groundtruth == 'N':
re_table[0,1] += 1
elif i_groundtruth == 'A':
re_table[1,1] += 1
elif i_groundtruth == 'O':
re_table[2,1] += 1
elif i_groundtruth == '~':
re_table[3,1] += 1
else:
print('wrong label')
if i_pred == 'O':
if i_groundtruth == 'N':
re_table[0,2] += 1
elif i_groundtruth == 'A':
re_table[1,2] += 1
elif i_groundtruth == 'O':
re_table[2,2] += 1
elif i_groundtruth == '~':
re_table[3,2] += 1
else:
print('wrong label')
if i_pred == '~':
if i_groundtruth == 'N':
re_table[0,3] += 1
elif i_groundtruth == 'A':
re_table[1,3] += 1
elif i_groundtruth == 'O':
re_table[2,3] += 1
elif i_groundtruth == '~':
re_table[3,3] += 1
else:
print('wrong label')
F1_N = 2*re_table[0,0] / (sum(re_table[:,0]) + sum(re_table[0,:]))
F1_A = 2*re_table[1,1] / (sum(re_table[:,1]) + sum(re_table[1,:]))
F1_O = 2*re_table[2,2] / (sum(re_table[:,2]) + sum(re_table[2,:]))
F1_P = 2*re_table[3,3] / (sum(re_table[:,3]) + sum(re_table[3,:]))
F1 = (F1_N + F1_A + F1_O + F1_P)/4
np.set_printoptions(suppress=True)
print('N', 'A', 'O', 'P')
print(re_table)
print(F1_N, F1_A, F1_O, F1_P)
print(F1)
return F1 | b3019f13939de99bda714e99894947d2de636390 | 3,630,243 |
from datetime import datetime
import tqdm
from functools import cmp_to_key
def get_point_pixel_values(
coords,
start_date: datetime = date.today() - timedelta(days=365),
end_date: datetime = date.today(),
ascending: bool = True,
orbit_number: object = None,
scale: int = 20,
n_jobs: int = 1,
verbose: int = 0,
):
"""Given a coordinate tuple and a list of date intervals, loops over both to agglomerate GEE sar pixel values and returns a list of dictionnaries, with intensities and timestamps.
The main loop, over dates, is parallelised.
Parameters
----------
coords : tuple of float
Coordinates (lon, lat) of the point of interest
start_date : datetime.datetime, optional
First date of the time interval
end_date : datetime.datetime, optional
Last date of the time interval
ascending : boolean, optional
The trajectory to use when selecting data
orbit_number : int or str, optional
The orbit number to restrict the download to. If provided with an integer, the S1 temporal stack is filtered using the provided orbit number.
If provided with a string value, we expect one of these keywords:
- "max" for the orbit number with the highest number of image in the stack
- "min" for the orbit number with the smallest number of image in the stack
If ``None``, then no filter over the orbit number is applied.
scale : int, optional
Scale parameters of the getRegion() function. Defaulting at ``20``,
change it to change the scale of the final data points. The highest,
the lower the spatial resolution. Should be at least ``10``.
n_jobs : int, optional
Set the parallelisation factor (number of threads) for the GEE data
access process. Set to 1 if no parallelisation required.
verbose : int, optional
Verbosity mode (0: No info, 1: Info, 2: Detailed info, with added timestamp)
Returns
-------
`list`
A list with a single dictionnary, to match the behaviour of :func:`get_pixel_values` for a single point.
"""
orbit = ASCENDING if ascending else DESCENDING
date_intervals = get_date_interval_array(start_date, end_date)
polygon = ee.Geometry.Point(coords)
headers = []
if orbit_number is not None and type(orbit_number) == str:
orbit_number = get_orbit_number(
coords=coords,
start_date=start_date,
end_date=end_date,
ascending=ascending,
orbit_number=orbit_number,
scale=scale,
)
if orbit_number is not None:
print_verbose(f"Selected orbit: {orbit_number}", verbose, 1)
print_verbose(f"Generating values for each time interval", verbose, 1)
# Updates vals and headers, one by aggregating returned values of the delayed function, the other by modifying the passed `headers` argument
vals = Parallel(n_jobs=n_jobs, require="sharedmem")(
delayed(_get_zone_between_dates)(
sub_start_date, sub_end_date, polygon, scale, orbit, orbit_number, headers
)
for sub_start_date, sub_end_date in tqdm(date_intervals)
)
print_verbose(f"Retrieving data properties for each time interval", verbose, 1)
properties = Parallel(n_jobs=n_jobs, require="sharedmem")(
delayed(_get_properties_between_dates)(
sub_start_date, sub_end_date, polygon, orbit, orbit_number
)
for sub_start_date, sub_end_date in tqdm(date_intervals)
)
properties = [p for p in properties if p is not None]
properties = np.array([properties])
vals = [val for val in vals if val is not None]
dictified_vals = [dict(zip(headers, val)) for values in vals for val in values]
per_coord_dict = populate_coordinates_dictionary(
dictified_values=dictified_vals, coordinates_dictionary={}
)
pixel_values = [per_coord_dict[k] for k in per_coord_dict.keys()]
cmp_coordinates = cmp_to_key(cmp_coords)
pixel_values.sort(key=cmp_coordinates) # sorting pixels by latitude then longitude
return pixel_values, properties | e86fd02e58d23b74f64608071abf8826399c3cd3 | 3,630,244 |
from typing import Union
from typing import Optional
from typing import Tuple
from typing import List
def any(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int], List[int]]] = None,
keepdims: bool = False)\
-> ivy.Array:
"""
Tests whether any input array element evaluates to ``True`` along a specified axis.
.. note::
Positive infinity, negative infinity, and NaN must evaluate to ``True``.
.. note::
If ``x`` is an empty array or the size of the axis (dimension) along which to evaluate elements is zero, the test result must be ``False``.
Parameters
----------
x:
input array.
axis:
axis or axes along which to perform a logical OR reduction. By default, a logical OR reduction must be performed over the entire array. If a tuple of integers, logical OR reductions must be performed over multiple axes. A valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If an ``axis`` is specified as a negative integer, the function must determine the axis along which to perform a reduction by counting backward from the last dimension (where ``-1`` refers to the last dimension). If provided an invalid ``axis``, the function must raise an exception. Default: ``None``.
keepdims:
If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``.
Returns
-------
out:
if a logical OR reduction was performed over the entire array, the returned array must be a zero-dimensional array containing the test result; otherwise, the returned array must be a non-zero-dimensional array containing the test results. The returned array must have a data type of ``bool``.
"""
return _cur_framework(x).any(x, axis, keepdims) | 2712ba15f571ecbe00e66a6ddb16695017ee6ccf | 3,630,245 |
def _filter_in_wet_days(da: DataArray, dry_day_value: float):
"""
Turns non wet days to NaN.
dry_day_value may be Nan or 0.
"""
precip = convert_units_to(da, "mm/d")
return precip.where(precip > 1, dry_day_value) | 739a210d772a6d124fceb50258d46012cb5b3060 | 3,630,246 |
def codemirror_field_css_assets(*args):
"""
Tag to render CodeMirror CSS assets needed for all given fields.
Example:
::
{% load djangocodemirror_tags %}
{% codemirror_field_css_assets form.myfield1 form.myfield2 %}
"""
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(*args)
return mark_safe(manifesto.css_html()) | bd127a3974ba9a12568359ed0dc0af99e31aa38f | 3,630,247 |
import json
def request(method, url, **kwargs):
"""
Wrapper around :func:`treq.request` with some added arguments
and validation.
:param str method:
The HTTP method to use when making the request.
:param str url:
The url this request will be made to.
:type data: str, list, tuple, set, dict
:keyword data:
The data to send along with some types of requests
such as ``POST`` or ``PUT``
:keyword dict headers:
The headers to send along with the request to
``url``. Currently only single values per header
are supported.
:keyword function callback:
The function to deliver an instance of :class:`Response`
once we receive and unpack a response.
:keyword function errback:
The function to deliver an error message to. By default
this will use :func:`.log.err`.
:keyword class response_class:
The class to use to unpack the internal response. This is mainly
used by the unittests but could be used elsewhere to add some
custom behavior to the unpack process for the incoming response.
:raises NotImplementedError:
Raised whenever a request is made of this function that we can't
implement such as an invalid http scheme, request method or a problem
constructing data to an api.
"""
assert isinstance(url, STRING_TYPES)
direct = kwargs.pop("direct", False)
# We only support http[s]
parsed_url = urlparse(url)
if not parsed_url.hostname:
raise NotImplementedError("No hostname present in url")
if not parsed_url.path:
raise NotImplementedError("No path provided in url")
if not direct:
original_request = Request(
method=method, url=url, kwargs=ImmutableDict(kwargs.copy()))
# Headers
headers = kwargs.pop("headers", {})
headers.setdefault("Content-Type", ["application/json"])
headers.setdefault("User-Agent", [USERAGENT])
# Twisted requires lists for header values
for header, value in headers.items():
if isinstance(value, STRING_TYPES):
headers[header] = [value]
elif isinstance(value, (list, tuple, set)):
continue
else:
raise NotImplementedError(
"Cannot handle header values with type %s" % type(value))
# Handle request data
data = kwargs.pop("data", NOTSET)
if isinstance(data, dict):
data = json_safe(data)
if (data is not NOTSET and
headers["Content-Type"] == ["application/json"]):
data = json.dumps(data)
elif data is not NOTSET:
raise NotImplementedError(
"Don't know how to dump data for %s" % headers["Content-Type"])
# prepare keyword arguments
kwargs.update(
headers=headers,
persistent=config["agent_http_persistent_connections"])
if data is not NOTSET:
kwargs.update(data=data)
if direct:
# We don't support these with direct request
# types.
assert "callback" not in kwargs
assert "errback" not in kwargs
assert "response_class" not in kwargs
# Construct the request and attach some loggers
# to callback/errback.
uid = uuid4()
treq_request = treq.request(method, url, **kwargs)
treq_request.addCallback(HTTPLog.response, uid=uid)
treq_request.addErrback(HTTPLog.error, uid=uid, method=method, url=url)
return treq_request
else:
callback = kwargs.pop("callback", None)
errback = kwargs.pop("errback", log.err)
response_class = kwargs.pop("response_class", Response)
# check assumptions for keywords
assert callback is not None, "callback not provided"
assert callable(callback) and callable(errback)
assert data is NOTSET or \
isinstance(data, tuple(list(STRING_TYPES) + [dict, list]))
def unpack_response(response):
deferred = Deferred()
deferred.addCallback(callback)
# Deliver the body onto an instance of the response
# object along with the original request. Finally
# the request and response via an instance of `Response`
# to the outer scope's callback function.
response.deliverBody(
response_class(deferred, response, original_request))
return deferred
debug_kwargs = kwargs.copy()
debug_url = build_url(url, debug_kwargs.pop("params", None))
logger.debug(
"Queued %s %s, kwargs: %r", method, debug_url, debug_kwargs)
try:
deferred = treq.request(method, quote_url(url), **kwargs)
except NotImplementedError: # pragma: no cover
logger.error(
"Attempting to access a url over SSL but you don't have the "
"proper libraries installed. Please install the PyOpenSSL and "
"service_identity Python packages.")
raise
deferred.addCallback(unpack_response)
deferred.addErrback(errback)
return deferred | e0b70390ce4168f043b6394a4155f3d439945368 | 3,630,248 |
def for_stmt(target, iter_, body, orelse, local_writes):
"""Functional form of a for statement."""
if tf.is_tensor(iter_):
local_writes = [
var for var in local_writes if not py_defaults.is_undefined(var.val)
]
n = _tf_len(iter_)
def for_test(i, *_):
return i < n
def for_body(iterate_index, *state): # pylint: disable=missing-docstring
for var, s in zip(local_writes, state):
var.val = s
target.val = iter_[iterate_index]
mods, _ = staging.execute_isolated(body, local_writes)
state = [iterate_index + 1] + mods
return state
result_values = _tf_while_stmt(for_test, for_body,
[0] + [var.val for var in local_writes])
for var, val in zip(local_writes, result_values[1:]):
var.val = val
else:
py_defaults.for_stmt(target, iter_, body, orelse, local_writes) | f47a11ebee4143b0c0f92e8c7fb814ecfd79e91f | 3,630,249 |
def is_running():
"""Return if the main global loop is running."""
global __loop__
return __loop__ is not None and __loop__.is_running() | 6698c224109d8de3f95ba894a18bc420ff7e5734 | 3,630,250 |
from functools import reduce
def build_web_site(resources, site_class=None):
"""Build a Twisted web Site instance for a specified dictionary of
resources.
:param dict resources:
Dictionary of path -> resource class mappings to create the site from.
:type site_class: Sub-class of Twisted's Site
:param site_class:
Site class to create. Defaults to :class:`LogFilterSite`.
"""
if site_class is None:
site_class = LogFilterSite
root = Resource()
# sort by ascending path length to make sure we create
# resources lower down in the path earlier
resources = resources.items()
resources = sorted(resources, key=lambda r: len(r[0]) if r[0] is None else 0)
def create_node(node, path):
if path in node.children:
return node.children.get(path)
else:
new_node = Resource()
node.putChild(path, new_node)
return new_node
for path, resource in resources:
request_path = filter(None, path.split('/'))
nodes, leaf = request_path[0:-1], request_path[-1]
parent = reduce(create_node, nodes, root)
parent.putChild(leaf, resource)
site_factory = site_class(root)
return site_factory | 1087fb6550a60e09d515ff12494ba8b7a344a489 | 3,630,251 |
def CheckExpectedOutput(output, expected):
"""Assert that expected content appears in the output.
Arguments:
output: Output from a tool to be searched for matches
expected: An iterable which contains regular expressions, a match for
each of which must appear in 'output'
Returns the number of elements in 'expected' for which no match is found.
"""
failures = 0
for ex in expected:
match = re.search(ex, output)
if not match:
print 'Test match failed:'
print 'Searching for regex:', ex
failures += 1
if failures:
print 'output:\n', output
return failures | 88023bb5c2884213df192491487c6d8bc909c52d | 3,630,252 |
from functools import reduce
def vector_sum(vectors):
""" sums all corresponding elements """
return reduce(vector_add, vectors) | 54fe814c07c991099b300f56a9995934069d0e5b | 3,630,253 |
async def get_all_consent_documents(environment, access_token, deployment_id):
"""
Function: [get_all_consent_documents]
:param environment: The CARP [environment].
:param access_token: The [access_token].
:param deployment_id: The [deployment_id].
:return: Lists all consents documents by [deployment_id].
"""
request = ''.join([const.CONSENT, str(deployment_id), const.CONSENT_DOCUMENTS])
response = api.get(environment, request, access_token)
return response | b699a40cd11229555be8cfb07cfdf063f0e895a0 | 3,630,254 |
def convert_to_time_series(dataset, sequence_len, input_dim, output_dim):
"""
Function to convert a sequential `dataset` into time series data
:param dataset: 1-D List containing sequential data
:param sequence_len: Number of time steps in each instance
:param input_dim: Dimension of input at each time step
:param output_dim: Dimension of output for each instance
:return: tuple containing :
numpy array of shape (None, sequence_len, input_dim), numpy array of shape (None, output_dim)
"""
data_x, data_y = [], []
for ii in range(0, len(dataset) - sequence_len * input_dim - output_dim + 1, input_dim):
data_x.append(np.reshape(dataset[ii:ii + sequence_len * input_dim], (sequence_len, input_dim)))
data_y.append(dataset[ii + sequence_len * input_dim:ii + sequence_len * input_dim + output_dim])
return np.array(data_x), np.array(data_y) | 87b33ee5e8205229d6804225dd5f4e943b019773 | 3,630,255 |
from typing import Tuple
def get_ip_reputation_score(classification: str) -> Tuple[int, str]:
"""Get DBot score and human readable of score.
:type classification: ``str``
:param classification: classification of ip provided from GreyNoise.
:return: tuple of dbot score and it's readable form.
:rtype: ``tuple``
"""
if not classification or classification == "unknown":
return Common.DBotScore.NONE, "Unknown"
elif classification == "benign":
return Common.DBotScore.GOOD, "Good"
elif classification == "malicious":
return Common.DBotScore.BAD, "Bad"
else:
return Common.DBotScore.NONE, "Unknown" | a71ec03b176425040047695e73a8046742878137 | 3,630,256 |
def _cv_input_text(cfg):
"""Configure validation helper for input box (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum > maximum:
raise vol.Invalid('Max len ({}) is not greater than min len ({})'
.format(minimum, maximum))
state = cfg.get(CONF_INITIAL)
if state is not None and (len(state) < minimum or len(state) > maximum):
raise vol.Invalid('Initial value {} length not in range {}-{}'
.format(state, minimum, maximum))
return cfg | 53a6a9025e8f10857e0a558e908acf10fa0eb473 | 3,630,257 |
def map_values(e):
"""
:rtype: Column
"""
return col(MapValues(ensure_column(e))) | 6008b81c07c3d7a283b30ba7de493b27a32541eb | 3,630,258 |
from typing import List
from typing import Dict
from typing import Tuple
from typing import Set
def find_connected_pattern(variables: List[str], triples: List[Dict[str, str]]) -> Tuple[Dict[str, str],int, Set[str]]:
"""Find the first pattern in a set of triples pattern connected to a set of variables"""
pos = 0
#print("fcp:"+str(variables))
for triple in triples:
tripleVars = get_vars(triple['triple'])
if len(variables & tripleVars) > 0:
return triple, pos, variables | tripleVars
pos += 1
return None, None, variables | 30e2fc143b0252faad597320eb5a22a4b5ee75e9 | 3,630,259 |
import hmac
def make_secure_val(val):
"""
Creates secure value using secret.
"""
return '%s|%s' % (val, hmac.new(secret, val).hexdigest()) | ce8f1af92e91c99a7cc7dcb6c536dd56403738cb | 3,630,260 |
from .plotting import make_rand_cmap
import os
def make_segm_from_catalog(catalog_star,
bounds, estimate_radius,
mag_name='rmag', mag_limit=22,
obj_name='', band='G',
ext_cat=None, draw=True,
save=False, dir_name='./Measure'):
"""
Make segmentation map from star catalog. Aperture size used is based on SE semg map.
Parameters
----------
catalog_star : star catalog
bounds : 1X4 1d array defining bounds of region
estimate_radius : function of turning magnitude into log R
mag_name : magnitude column name in catalog_star
mag_limit : magnitude limit to add segmentation
ext_cat : (bright) extended source catalog to mask
draw : whether to draw the segm map
save : whether to save the segm map as fits
dir_name : path of saving
Returns
----------
seg_map : output segm map generated from catalog
"""
Xmin, Ymin, Xmax, Ymax = bounds
nX = Xmax - Xmin
nY = Ymax - Ymin
try:
catalog = catalog_star[~catalog_star[mag_name].mask]
except AttributeError:
catalog = catalog_star[~np.isnan(catalog_star[mag_name])]
catalog = catalog[catalog[mag_name]<mag_limit]
msg = "Make segmentation map based on catalog {:s}: {:d} stars"
msg = msg.format(mag_name, len(catalog))
logger.info(msg)
# Estimate mask radius
R_est = np.array([estimate_radius(m) for m in catalog[mag_name]])
# Generate object apertures
apers = [CircularAperture((X_c-Xmin, Y_c-Ymin), r=r)
for (X_c,Y_c, r) in zip(catalog['X_CATALOG'], catalog['Y_CATALOG'], R_est)]
# Further mask for bright extended sources
if ext_cat is not None:
if len(ext_cat)>0:
for (X_c,Y_c, a, b, theta) in zip(ext_cat['X_IMAGE'],
ext_cat['Y_IMAGE'],
ext_cat['A_IMAGE'],
ext_cat['B_IMAGE'],
ext_cat['THETA_IMAGE'],):
pos = (X_c-Xmin, Y_c-Ymin)
theta_ = np.mod(theta, 360) * np.pi/180
aper = EllipticalAperture(pos, a*6, b*6, theta_)
apers.append(aper)
# Draw segment map generated from the catalog
seg_map = np.zeros((nY, nX))
# Segmentation k sorted by mag of source catalog
for (k, aper) in enumerate(apers):
star_ma = aper.to_mask(method='center').to_image((nY, nX))
if star_ma is not None:
seg_map[star_ma.astype(bool)] = k+2
if draw:
plt.figure(figsize=(6,6), dpi=100)
plt.imshow(seg_map, vmin=1, cmap=make_rand_cmap(int(seg_map.max())))
plt.show()
# Save segmentation map built from catalog
if save:
check_save_path(dir_name, overwrite=True, verbose=False)
hdu_seg = fits.PrimaryHDU(seg_map.astype(int))
band = band.lower()
range_str = f"X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]"
fname = f"{obj_name}-segm_{band}_catalog_{range_str}.fits"
filename = os.path.join(dir_name, fname)
hdu_seg.writeto(filename, overwrite=True)
logger.info(f"Saved segmentation map made from catalog as {filename}")
return seg_map | 11270fedf8cb376c3180e3c3ad319137fc640254 | 3,630,261 |
import itertools
def get_forced_photometry_mp(table, ra_col_name, dec_col_name, surveys,
bands, apertures, fovs, image_folder_path, n_jobs=5,
auto_download=True,
verbosity=0):
"""Calculate forced photometry in multiprocessing mode.
This function works analogous to get_forced_photometry only allowing to
use multiple processor (python multiprocessing module).
:param table: table object
Input data table with at least RA and Decl. columns
:param ra_col_name: string
Exact string for the RA column in the table
:param dec_col_name: string
Exact string for the Decl. column in the table
:param surveys: list of strings
List of survey names, length has to be equal to bands, apertures and
fovs
:param bands: list of strings
List of band names, length has to be equal to surveys, apertures and
fovs
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys,
bands and apertures
:param image_folder_path: string
Path to the directory where all the images will be stored
:param n_jobs:
Number of cores to be used
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: DataFrame
Returns a DataFrame with the added columns for the forced photometry
calculation.
"""
# Check if table is pandas DataFrame otherwise convert to one
table, format = ct.check_if_table_is_pandas_dataframe(table)
# Add a column to the table specifying the object name used
# for the image name
table['temp_object_name'] = ut.coord_to_name(table[ra_col_name].values,
table[dec_col_name].values,
epoch="J")
for jdx, survey in enumerate(surveys):
band = bands[jdx]
aperture = apertures[jdx]
fov = fovs[jdx]
# Create list with image names
ra = table[ra_col_name].values
dec = table[dec_col_name].values
index = table.index
# # Create image names without the fov ending.
# temp = table.temp_object_name
mp_args = list(zip(index,
ra,
dec,
itertools.repeat(survey),
itertools.repeat(band),
itertools.repeat(aperture),
itertools.repeat(fov),
itertools.repeat(image_folder_path),
table.temp_object_name,
itertools.repeat(auto_download),
itertools.repeat(verbosity)))
# Start multiprocessing pool
with mp.Pool(n_jobs) as pool:
results = pool.starmap(_mp_get_forced_photometry, mp_args)
for result in results:
idx, mag, flux, sn, err, comment = result
table.loc[idx, 'forced_{}_mag_{}'.format(survey, band)] = mag
table.loc[idx, 'forced_{}_flux_{}'.format(survey, band)] = flux
table.loc[idx, 'forced_{}_sn_{}'.format(survey, band)] = sn
table.loc[idx, 'forced_{}_magerr_{}'.format(survey, band)] = \
err
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
comment
table.drop(columns='temp_object_name')
table = ct.convert_table_to_format(table, format)
return table | b633c5cb07a555541ae351c8fd0de682f1b506fb | 3,630,262 |
def Any(x):
"""The Any type; can also be used to cast a value to type Any."""
return x | 88abecb27317e5bf16c5bd27c306ce800c7ac760 | 3,630,263 |
def resource_name_for_resource_type(resource_type, row):
"""Return the resource name for the resource type.
Each returned row contains all possible changed fields. This function
returns the resource name of the changed field based on the
resource type. The changed field's parent is also populated but is not used.
Args:
resource_type: the string equivalent of the resource type
row: a single row returned from the service
Returns:
The resource name of the field that changed.
"""
resource_name = '' # default for UNSPECIFIED or UNKNOWN
if resource_type == 'AD_GROUP':
resource_name = row.change_status.ad_group.value
elif resource_type == 'AD_GROUP_AD':
resource_name = row.change_status.ad_group_ad.value
elif resource_type == 'AD_GROUP_CRITERION':
resource_name = row.change_status.ad_group_criterion.value
elif resource_type == 'CAMPAIGN':
resource_name = row.change_status.campaign.value
elif resource_type == 'CAMPAIGN_CRITERION':
resource_name = row.change_status.campaign_criterion.value
return resource_name | 500bc32be1765f1e516f4f7cd386b24c3c4f373f | 3,630,264 |
from re import M
import pprint
def compute_performance_metrics_binary(model, x, y, metric_names):
"""
Given a model (TensorFlow) and (x, y).
Compute accuracy, loss, True Positive, False Negative, False Positive, True Negative, Recall, Precision,
f1 score, Average Precision Recall, ROC AUC, and classification report. Only for binary classification
Arguments:
model: tensorflow model
x: feature vector
y: label vector (one hot encoded)
Returns: A dictionary containing, Accuracy, Loss, True Positive, False Positive, False Negative,
True Negative, Recall, Precision, f1 score, roc_auc_score
"""
y_true = np.argmax(y, axis=1)
if len(np.unique(y_true)) > 2:
print("This only works for binary classification")
return {}
# get the metrics
metrics = model.evaluate(x, y)
rt = dict()
for name, val in zip(metric_names, metrics):
rt[name] = val
# the loss is always at first position and accuracy the second
loss, acc = metrics[0], metrics[1] * 100
print("Accuracy {:.3f}, Loss {:.3f}".format(acc, loss))
y_probs = model.predict(x)
y_pred = np.argmax(y_probs, axis=1)
tp, fp, tn, fn = (0, 0, 0, 0)
try:
# we can only do this in binary case
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
except:
print("Not a binary classification problem")
print("True Positive ", tp)
print("False Positive ", fp)
print("True Negative ", tn)
print("False Negative ", fn)
recall = M.recall_score(y_true, y_pred)
precision = M.precision_score(y_true, y_pred)
print("Recall {:.3f}, with formula {:.3f}".format(recall, (tp / (tp + fn))))
print("Precision {:.3f}, with formula {:.3f}".format(precision, (tp / (tp + fp))))
f1_score_cal = M.f1_score(y_true, y_pred)
print("F1 score {:.3f}, with formula {:.3f}".format(f1_score_cal,
2 * ((precision * recall) / (precision + recall))))
print("Average precision score {:.3f}".format(M.average_precision_score(y_true, y_pred)))
roc_auc = M.roc_auc_score(y_true, y_pred)
print("ROC AUC Score {:.3f}".format(roc_auc))
clf_report = M.classification_report(y_true, y_pred, output_dict=True)
pprint.pprint(clf_report)
rt_dict = {'Accuracy': acc,
'Loss': loss,
'True Positive': tp,
'False Positive': fp,
'True Negative': tn,
'False Negative': fn,
'Recall': recall,
'Precision': precision,
'F1 Score': f1_score_cal,
'ROC AUC': roc_auc
}
return rt_dict | 7f8912b845b1c6e4e8083458d7e79d1eba624c1f | 3,630,265 |
import re
def to_yw7(text):
"""Convert html tags to yWriter 6/7 raw markup.
Return a yw6/7 markup string.
"""
# Clean up polluted HTML code.
text = re.sub('</*font.*?>', '', text)
text = re.sub('</*span.*?>', '', text)
text = re.sub('</*FONT.*?>', '', text)
text = re.sub('</*SPAN.*?>', '', text)
# Put everything in one line.
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
text = text.replace('\t', ' ')
while ' ' in text:
text = text.replace(' ', ' ').rstrip().lstrip()
# Replace HTML tags by yWriter markup.
text = text.replace('<i>', '[i]')
text = text.replace('<I>', '[i]')
text = text.replace('</i>', '[/i]')
text = text.replace('</I>', '[/i]')
text = text.replace('</em>', '[/i]')
text = text.replace('</EM>', '[/i]')
text = text.replace('<b>', '[b]')
text = text.replace('<B>', '[b]')
text = text.replace('</b>', '[/b]')
text = text.replace('</B>', '[/b]')
text = text.replace('</strong>', '[/b]')
text = text.replace('</STRONG>', '[/b]')
text = re.sub('<em.*?>', '[i]', text)
text = re.sub('<EM.*?>', '[i]', text)
text = re.sub('<strong.*?>', '[b]', text)
text = re.sub('<STRONG.*?>', '[b]', text)
# Remove orphaned tags.
text = text.replace('[/b][b]', '')
text = text.replace('[/i][i]', '')
text = text.replace('[/b][b]', '')
return text | 59b9b961f7a94d23e2829b9d940f63c32207600b | 3,630,266 |
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w, c = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' \
% (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs | 967da7e34e993bf2eb4a2140898309811de79330 | 3,630,267 |
def get_base_url(hostname):
"""
Constructs the GitHub API url with the given hostname.
:param str hostname: Hostname to construct the API endpoint url.
:returns: None
:raises: None
"""
if hostname and hostname.startswith('http'):
return hostname
else:
return "https://{hostname}/api/v3".format(hostname=hostname)
return hostname | b0a13d3054fd48a9970c639ea8730d24a67a09ed | 3,630,268 |
import array
def gauss_sqrt_cov_ft(karr: array, width: float = 1.0) -> array:
"""sqrt of FFT of KD Gaussian covariance matrix
Square root of Fourier transform of a covariance matrix that is a Gaussian
function of difference in position
Returns
-------
karr : (L1,L2,...,LK/2+1,1,K)
Array of vectors of spatial frequencies used in FFT, with singletons
added to broadcast with `embed_ft`.
Parameters
----------
k
vector of spatial frequencies
width
std dev of gaussian covariance. Default=1.0
"""
K = karr.shape[-1]
LK_real = karr.shape[-3] # = L_K/2 + 1, due to rfft
# length of grid
num_pt = karr.size * 2*(LK_real - 1) // (K*LK_real) # size of irfft
dk = np.prod([np.diff(karr, axis=i).max() for i in range(K)]) # k-cell vol
ksq = np.sum((width * karr)**2, axis=-1)
# scaled to convert continuum FT to DFT
cov_ft = (dk * np.prod(width) / np.sqrt(2 * np.pi)) * np.exp(-0.5 * ksq)
return num_pt * np.sqrt(cov_ft) | 5b16b19847706b9985855e1d32df3abab95685f3 | 3,630,269 |
def get_qt_lineedit_suffix(gb, suffix):
"""Gets the line edit associated with a tab and suffix.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
suffix (str): Text appended to UI element search query.
Returns:
QtWidgets.QLineEdit: LineEdit matching val_{tag}_{suffix}.
"""
t = remove_prefix(gb.objectName(), "gb_")
return gb.findChild(QtWidgets.QLineEdit, f"val_{t}_{suffix}") | 01443bf3d96a89037fdf9276d8e4bbb57f4e2175 | 3,630,270 |
import logging
def fit_statmech_pseudo_rotors(Tlist, Cvlist, n_vib, n_rot, molecule=None):
"""
Fit `n_vib` harmonic oscillator and `n_rot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are enough heat capacity points
provided that the vibrational frequencies can be fit directly, but the
hindered rotors must be combined into a single "pseudo-rotor".
"""
# Construct the lower and upper bounds for each variable
bounds = []
# Bounds for harmonic oscillator frequencies
for i in range(n_vib):
bounds.append((ho_freq_lower_bound, ho_freq_upper_bound))
# Bounds for pseudo-hindered rotor frequency and barrier height
bounds.append((hr_freq_lower_bound, hr_freq_upper_bound))
bounds.append((hr_barr_lower_bound, hr_barr_upper_bound))
# Construct the initial guess
# Initial guesses within each mode type must be distinct or else the
# optimization will fail
x0 = np.zeros(n_vib + 2, np.float64)
# Initial guess for harmonic oscillator frequencies
if n_vib > 0:
x0[0] = 200.0
x0[1:n_vib] = np.linspace(800.0, 1600.0, n_vib - 1)
# Initial guess for hindered rotor frequencies and barrier heights
x0[n_vib] = 100.0
x0[n_vib + 1] = 300.0
# Execute the optimization
fit = PseudoRotorFit(Tlist, Cvlist, n_vib, n_rot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=max_iter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not np.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for '
'{0}.'.format(molecule.to_smiles()))
if igo > 8:
logging.warning('A solver error occured when fitting spectral data for {0}.'.format(molecule.to_smiles()))
logging.debug('Fitting remaining heat capacity to {0} vibrations and {1} rotations'.format(n_vib, n_rot))
logging.debug('The residuals for heat capacity values is {}'.format(fit.evaluate(x)[0]))
# Postprocess optimization results
vib = list(x[0:n_vib])
hind = []
for i in range(n_rot):
hind.append((x[n_vib], x[n_vib + 1]))
return vib, hind | cd3a45c4964c2ce00ae1fdddded4a75020fb0beb | 3,630,271 |
import logging
def response_to_invalid_request(diameter_request,
origin_host,
origin_realm):
"""
Method used to respond to invalid Diameter request.
We define an invalid Diameter request by comparing its command code
with the ones that we have support for.
"""
logging.info("Responding to invalid request...")
# Generating a standard Diameter response
generic_response = generate_generic_diameter_message(diameter_request,
origin_host,
origin_realm)
response_header = generic_response['header']
response_avps = generic_response['avps']
# Customizing the standard response for invalid request answer
response_avps.append(
encodeAVP('Result-Code',
diameter_base.result_codes['DIAMETER_UNABLE_TO_COMPLY']))
# Generating the actual Diameter response
# by joining the header and the AVPs
response_message = createRes(response_header, response_avps)
return response_message | da18bd3120e55c7a0b50940e57a83edb9fab4b72 | 3,630,272 |
def lsi_model(tmp_corpus, temp_dict):
""" creates an latent semantic indexing model from
a corpus of words and a dictionary using gensim
Args:
tmp_corpus (list): a list of words from dict
temp_dict (Dictionary): a gensim Dictionary
Returns:
a list of word vectors that comprise the top 20 topics from an LSI
model for easy processing.
"""
print 'starting lsi model'
lsi = models.LsiModel(tmp_corpus, id2word=temp_dict,
num_topics=number_topics)
lsi_topics = lsi.show_topics(formatted=False)
words = []
for topic in lsi_topics:
for item in topic:
words.append(item[1])
return words | a39fa884c9eb4fe08941f1ae1d51b7a6a8403d7f | 3,630,273 |
def kmeanssample(X, k, nsample=0, **kwargs):
""" 2-pass kmeans, fast for large N:
1) kmeans a random sample of nsample ~ sqrt(N) from X
2) full kmeans, starting from those centres
"""
# merge w kmeans ? mttiw
# v large N: sample N^1/2, N^1/2 of that
# seed like sklearn ?
N, dim = X.shape
if nsample == 0:
nsample = max(2*np.sqrt(N), 10*k)
Xsample = randomsample(X, int(nsample))
pass1centres = randomsample(X, int(k))
samplecentres = kmeans(Xsample, pass1centres, **kwargs)[0]
return kmeans(X, samplecentres, **kwargs) | 3853349e42b2ce29384d202a381ce26a05f49a53 | 3,630,274 |
def get_real_resolution():
"""获取真实的分辨率"""
hDC = win32gui.GetDC(0)
# 横向分辨率
w = win32print.GetDeviceCaps(hDC, win32con.DESKTOPHORZRES)
# 纵向分辨率
h = win32print.GetDeviceCaps(hDC, win32con.DESKTOPVERTRES)
return w, h | d8eac10bc449a3e1e44f983e9b2cf76494ef56e1 | 3,630,275 |
def op_acot(x):
"""Returns the inverse cotangent of this mathematical object."""
return op_inv(op_atan(x)) | 604b989524081078ab7a40e09c36b20ad6c52be3 | 3,630,276 |
import struct
import fcntl
import termios
def terminal_size():
"""
Get terminal size
Will return width and height
"""
if os_windows:
w = 80
h = 25
else:
h, w, hp, wp = struct.unpack(
'HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
return w, h | a88d7f869fc0de5a310d3390091a62c2dd44c1ac | 3,630,277 |
import types
from typing import Dict
import numpy
from typing import List
def sdc_pandas_series_groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, observed=False):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.groupby
Limitations
-----------
- Parameters ``axis``, ``level``, ``as_index``, ``group_keys``, ``squeeze`` and ``observed`` \
are currently unsupported by Intel Scalable Dataframe Compiler
- Parameter ``by`` is supported as single literal column name only
- Mutating the contents of a DataFrame between creating a groupby object and calling it's methods is unsupported
Examples
--------
.. literalinclude:: ../../../examples/series/series_groupby.py
:language: python
:lines: 33-
:caption: Return the mean of the values grouped by numpy array.
:name: ex_series_groupby
.. command-output:: python ./series/series_groupby.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.resample <pandas.Series.resample>`
Resample time-series data.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_groupby.TestGroupBy.test_series_groupby*
"""
_func_name = 'Method Series.groupby().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
# we support only simpliest case of by being 1D array (a column of a DataFrame)
# TODO: extend and support fully functional SeriesGroupBy
if not ((isinstance(by, types.Array) and by.ndim == 1)
or by == string_array_type):
return None
if not (isinstance(axis, (types.Integer, types.UnicodeType, types.Omitted)) or axis == 0):
ty_checker.raise_exc(axis, 'int or str', 'axis')
if not (level is None or isinstance(level, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, level))
if not (as_index is True or isinstance(as_index, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, as_index))
if not (isinstance(sort, (types.Omitted, types.Boolean)) or sort is True):
ty_checker.raise_exc(sort, 'bool', 'sort')
if not (group_keys is True or isinstance(group_keys, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, group_keys))
if not (squeeze is False or isinstance(squeeze, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, squeeze))
if not (observed is False or isinstance(observed, types.Omitted)):
raise TypingError('{} Unsupported parameters. Given inplace: {}'.format(_func_name, observed))
by_type = by.dtype
list_type = types.ListType(types.int64)
def sdc_pandas_series_groupby_impl(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, observed=False):
if len(self) != len(by):
raise ValueError("Series.groupby(). Grouper and axis must be same length")
grouped = Dict.empty(by_type, list_type)
for i in numpy.arange(len(by)):
if isna(by, i):
continue
value = by[i]
group_list = grouped.get(value, List.empty_list(types.int64))
group_list.append(i)
grouped[value] = group_list
return init_series_groupby(self, by, grouped, sort)
return sdc_pandas_series_groupby_impl | fb33b16b35850839616ba8d9936f1fd19606a1d2 | 3,630,278 |
def get_most_popular():
"""
Helper which retrieves most popular urls based on `redirects`(desc) and
`created_at`(desc)
"""
return Url.objects.order_by('-redirects', '-created_at') | 660714f455e6e425698e4e1a86635b0d12a290f6 | 3,630,279 |
def eval_genomes(genomes, substrate, vd_environment, generation):
"""
The function to evaluate fitness of the entire population against test
visual descriminator environment using the provided substrate
configuration of the descriminatorANN
Arguments:
genomes: The list of genomes in the population
substrate: The substrate configuration of the descriminatorANN
vd_environment: The test visual descriminator environment
generation: The id of current generation
Returns:
the tuple (best_genome, max_fitness, distances) with best CPPN genome,
the maximal fitness score value and the list of erro distances for each genome
"""
best_genome = None
max_fitness = 0
distances = []
for genome in genomes:
fitness, dist = eval_individual(genome, substrate, vd_environment)
genome.SetFitness(fitness)
distances.append(dist)
if fitness > max_fitness:
max_fitness = fitness
best_genome = genome
return best_genome, max_fitness, distances | 7d03f265820a5ea148c8df81262927a1b6acb578 | 3,630,280 |
def cosine_similarities(a, b, transform):
"""
returns list of cosine similarities between lists of vectors
a and b. The z_score transformation is applied if transform == True
"""
a = numpy.stack(a)
b = numpy.stack(b)
#transform if requested
if transform:
print "transforming"
# z_score is written to apply same scale to a and b
a, b = z_score(a, b)
print "calculating cosine dists"
cos = [cosine_similarity(a[i], b[i]) for i in range(len(a))]
return cos | 4164dcce80ba84ceb8b468b4db2d72b2ca8a10f7 | 3,630,281 |
def element_damage_nohitbox(raw_element, sharpness):
"""
Calculate elemental damage to a monster part with the given elemental
attack, the given sharpness, and the given monster elemental weakness.
Note that this is independent of the motion value of the attack.
"""
return (raw_element * SharpnessLevel.element_modifier(sharpness)) | 10d527e1740172e2bba4f4fe9170dfe8f4218a98 | 3,630,282 |
def getTabData(header, index):
"""Get the table data at index from above tables generator.
Expects the header string and the index of the table."""
tabStart = header.index('{', index) + 1 #start of the table, first letter after {.
tabEnd = header.index('}', tabStart) #last of the array.
return header[tabStart:tabEnd] | 738e6f1532129043e67e1e639a66d12e43d1e9a6 | 3,630,283 |
def get_default_input_id():
"""gets default input device number
pygame.midi.get_default_input_id(): return default_id
Return the default device ID or -1 if there are no devices.
The result can be passed to the Input()/Ouput() class.
On the PC, the user can specify a default device by
setting an environment variable. For example, to use device #1.
set PM_RECOMMENDED_INPUT_DEVICE=1
The user should first determine the available device ID by using
the supplied application "testin" or "testout".
In general, the registry is a better place for this kind of info,
and with USB devices that can come and go, using integers is not
very reliable for device identification. Under Windows, if
PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is
*NOT* found in the environment, then the default device is obtained
by looking for a string in the registry under:
HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device
and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device
for a string. The number of the first device with a substring that
matches the string exactly is returned. For example, if the string
in the registry is "USB", and device 1 is named
"In USB MidiSport 1x1", then that will be the default
input because it contains the string "USB".
In addition to the name, get_device_info() returns "interf", which
is the interface name. (The "interface" is the underlying software
system or API used by PortMidi to access devices. Examples are
MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.)
At present, the only Win32 interface is "MMSystem", the only Linux
interface is "ALSA", and the only Max OS X interface is "CoreMIDI".
To specify both the interface and the device name in the registry,
separate the two with a comma and a space, e.g.:
MMSystem, In USB MidiSport 1x1
In this case, the string before the comma must be a substring of
the "interf" string, and the string after the space must be a
substring of the "name" name string in order to match the device.
Note: in the current release, the default is simply the first device
(the input or output device with the lowest PmDeviceID).
"""
return _pypm.GetDefaultInputDeviceID() | 482a02501343d365ea05bda825bc2d8560a687b3 | 3,630,284 |
from contextlib import contextmanager
from wurlitzer import sys_pipes
def notebook_system_output():
"""Capture system-level stdout/stderr within a Jupyter Notebook shell.
Get a context manager that attempts to use `wurlitzer
<https://github.com/minrk/wurlitzer>`__ to capture system-level
stdout/stderr within a Jupyter Notebook shell, without affecting
normal operation when run as a Python script. For example:
>>> sys_pipes = sporco.util.notebook_system_output()
>>> with sys_pipes():
>>> command_producing_system_level_output()
Returns
-------
sys_pipes : context manager
Context manager that handles output redirection when run within a
Jupyter Notebook shell
"""
@contextmanager
def null_context_manager():
yield
if in_notebook():
try:
except ImportError:
sys_pipes = null_context_manager
else:
sys_pipes = null_context_manager
return sys_pipes | 4085d83003a1b9e31ee48867d8261732508e1195 | 3,630,285 |
def dict_merged(d, _filter=None, **kw):
"""Update dictionary d with the items passed as kw if the value passes _filter."""
def f(s):
if _filter:
return _filter(s)
return s is not None
d = d or {}
for k, v in kw.items():
if f(v):
d[k] = v
return d | 03df2c26975f114db1b6f258a4324d0d12109126 | 3,630,286 |
def check_latest_deploy(package, region, requirements_hash):
"""
Args:
package: Name of package to query
region: region to query for
requirements_hash: hash of requirements.txt file
returns:
Boolean: False if requirements hash matches latest deployhed version (doesn't need deploying)
True if requirements hash does not match latest deployed version (needs deploying)
"""
last_deployed_version, last_deployed_requirements_hash = get_latest_deployed_version(region=region,
package=package)
if requirements_hash == last_deployed_requirements_hash:
return False
return True | 0f74fc73e7cb01d9c0f2e466246feba44021cb3f | 3,630,287 |
def docker_images(docker_url, query):
"""Looking for local images.
:param docker_url: Docker base url
:param query: image name lookup
:type docker_url: str
:type query: str
:returns: all matching images
:rtype: set
"""
cli = Client(base_url=docker_url)
results = set()
images = cli.images()
for image in images:
for tag in image['RepoTags']:
if tag.startswith(query):
results.add(tag)
# image id
image_id = image['Id'].split(':')[1]
if image_id.startswith(query):
# add id
results.add(image_id)
return results | 1e9dc9f9048985503ea53f90fbb1290ba3d7be38 | 3,630,288 |
from typing import List
from typing import Dict
from typing import Optional
import logging
def _generate_pathfinding_environment(
object_bounds_list: List[List[Dict[str, float]]],
source: Dict[str, float] = None,
target: Dict[str, float] = None,
save_path_plot_with_name: str = None
) -> Optional[Environment]:
"""Generate and return the pathfinding environment using the given list of
object bounds and the global room bounds. Save plots of the paths to the
local drive if save_path_plot_with_name is not None."""
poly_coords_list = _dilate_and_unify_object_bounds(
object_bounds_list,
((PERFORMER_HALF_WIDTH + VARIANCE)),
(source['x'], source['z']) if source else None,
(target['x'], target['z']) if target else None
)
logging.debug(f'poly coords list {poly_coords_list}')
pathfinding_environment = (
plotting.PlottingEnvironment(plotting_dir=save_path_plot_with_name)
if save_path_plot_with_name else Environment()
)
room_bounds = [
(ROOM_X_MAX - VARIANCE, ROOM_Z_MAX - VARIANCE),
(ROOM_X_MIN + VARIANCE, ROOM_Z_MAX - VARIANCE),
(ROOM_X_MIN + VARIANCE, ROOM_Z_MIN + VARIANCE),
(ROOM_X_MAX - VARIANCE, ROOM_Z_MIN + VARIANCE)
]
logging.debug(f'room bounds {room_bounds}')
try:
pathfinding_environment.store(
room_bounds,
poly_coords_list,
validate=True
)
pathfinding_environment.prepare()
except Exception as e:
logging.error('UNEXPECTED ERROR IN ENVIRONMENT')
logging.error(e)
return None
return pathfinding_environment | ee35dd84dbdd88e65f50a38ed5dbe4b9476e55b1 | 3,630,289 |
def keyword_confirm(request, name=None):
"""."""
q_objects = Q()
q_objects.add(Q(target_species__icontains=name), Q.OR)
confirm_proteins = Association.objects.filter(q_objects)
confirm_proteins = _sorted_nicely(confirm_proteins, sort_key="name")
return render(
request,
"association/search_results.html",
{"confirm_proteins": confirm_proteins},
) | e1d97020e2267ff96c2f5d2c37315254621aaae4 | 3,630,290 |
from typing import Union
from typing import Optional
from typing import Callable
def day_dropdown_widget(
data: Union["LogData", pd.DataFrame], callback: Optional[Callable] = None
) -> "ipywidgets.Dropdown":
"""Create dropdown widget to filter log data by a specific day.
Parameters
----------
data : :class:`~biopsykit.carwatch_logs.log_data.LogData` or :class:`~pandas.DataFrame`
log data as ``LogData`` object or as dataframe
callback : function, optional
function reference to be used as callback function or ``None`` for no callback. Default: ``None``
Returns
-------
:class:`~ipywidgets.widgets.widget_selection.Dropdown`
dropdown widget
"""
from biopsykit.carwatch_logs import LogData # pylint:disable=import-outside-toplevel
try:
import ipywidgets.widgets # pylint:disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
"Creating widget failed because ipywidgets cannot be imported. Install it via 'pip install ipywidgets'."
) from e
options = [("Select Day", None)]
if isinstance(data, LogData):
data = data.data
dates = data.index.normalize().unique()
dates = [str(date.date()) for date in dates]
options = options + list(zip(dates, dates))
widget = ipywidgets.Dropdown(options=options, description="Day")
if callback:
widget.observe(callback, names="value")
return widget | 87f0e7d527d74de0ccec98c42ac2127fcf1143b3 | 3,630,291 |
def is_node_valid(node: dict) -> bool:
"""
Returns True if the provided node contains a directive and token.
"""
result = False
if node:
if DIR in node and TOK in node:
result = True
return result | b99329e3a76705172f88fe62ecc6aee640aa1736 | 3,630,292 |
import os
def is_crawler(client_ip):
"""
:return: crawler or not
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
udger = Udger(data_dir)
return True if udger.parse_ip(client_ip)['ip_classification_code'] == 'crawler' else False | 240d5681c26ceb5bdcd39a08ee1be96d9c07df49 | 3,630,293 |
import pytz
def pytz_version():
"""Return the version of pytz as a tuple."""
year, month = pytz.__version__.split('.')
return int(year), int(month) | 0d28f0dcceb629168a997f01a5d07b89231e0894 | 3,630,294 |
from typing import List
from typing import Any
from typing import Tuple
def fixup_partition_table(partitions: List[Any], sector_size=512) -> Tuple[List[Any], int]:
"""
Return fixed partition table so it can be really written to disk.
Also return total size of partition.
"""
start_offset = 0
if _sect(partitions[0].start, sector_size) < 2048:
start_offset = 2048
end = start_offset * sector_size
ret = []
for part in partitions:
start = _align(end + 1, DEFAULT_ALIGNMENT) # Align to 1 MB
size = _align(part.size, sector_size)
ret.append(part._replace(start=start, size=size))
end = start + size
log.debug("Partition table: %s", pformat(ret))
# Account for GPT copy
return ret, end + 16 * 1024 * 1024 | ac3023ac021c41d95a4274b0ec82449c9d70d1ad | 3,630,295 |
def topological_sort(self):
"""
Returns:
List(int): Topological sort of vertices of a graph.
"""
topo_order = self.depth_first_search()[1]
position = [-1] * len(self.v)
for i, v in enumerate(topo_order):
position[v] = i
for u, v in self.e:
if position[u] > position[v]:
return None
return topo_order | ccd21c7486c3b32da1203015e77c24530d1f5e56 | 3,630,296 |
def construct_start_stop_intervals(intron_intervals, d):
"""Splits a iterable of intervals into two parallel tuples of 2d bp intervals representing their start and stop"""
left_intervals = []
right_intervals = []
for i in intron_intervals:
left_intervals.append(ChromosomeInterval(i.chromosome, i.start - d, i.start + d, i.strand))
right_intervals.append(ChromosomeInterval(i.chromosome, i.stop - d, i.stop + d, i.strand))
return tuple(left_intervals), tuple(right_intervals) | ee46c343d0a465a3b67b9f4990ebcc9d44cef669 | 3,630,297 |
def region_growing(image,coordinate,number):
"""Give coordinate and size,return a region."""
# tmp_image store marks and rg_image store new image after region grow
nt = number
tmp_image = np.zeros_like(image)
rg_image = np.zeros_like(image)
image_shape = image.shape
x = coordinate[0]
y = coordinate[1]
z = coordinate[2]
# ensure the coordinate is in the image
inside = (x >= 0) and (x < image_shape[0]) and (y >= 0) and \
(y < image_shape[1]) and (z >= 0) and (z < image_shape[2])
if inside != True:
print "The coordinate is out of the image range."
return False
# initialize region_mean and region_size
region_mean = image[x, y, z]
region_size = 0
# initialize neighbor_list with 10000 rows 4 columns
neighbor_free = 10000
neighbor_pos = -1
neighbor_list = np.zeros((neighbor_free, 4))
# 26 direct neighbor points
neighbors = [[1,0,0], \
[-1,0,0], \
[0,1,0], \
[0,-1,0], \
[0,0,-1], \
[0,0,1], \
[1,1,0], \
[1,1,1], \
[1,1,-1], \
[0,1,1], \
[-1,1,1], \
[1,0,1], \
[1,-1,1], \
[-1,-1,0], \
[-1,-1,-1], \
[-1,-1,1], \
[0,-1,-1], \
[1,-1,-1], \
[-1,0,-1], \
[-1,1,-1], \
[0,1,-1], \
[0,-1,1], \
[1,0,-1], \
[1,-1,0], \
[-1,0,1], \
[-1,1,0]]
while region_size < nt:
# (xn, yn, zn) store direct neighbor of seed point
for i in range(6):
xn = x + neighbors[i][0]
yn = y + neighbors[i][1]
zn = z + neighbors[i][2]
# ensure the coordinate is in the image
inside = (xn >= 0) and (xn < image_shape[0]) and (yn >= 0) and \
(yn < image_shape[1]) and (zn >= 0) and (zn < image_shape[2])
# ensure the original flag 0 is not changed
if inside and tmp_image[xn, yn, zn]==0:
# add this point to neighbor_list and mark it with 1
neighbor_pos = neighbor_pos + 1
neighbor_list[neighbor_pos] = [xn, yn, zn, image[xn, yn, zn]]
tmp_image[xn, yn, zn] = 1
# ensure there is enough space to store neighbor_list
if (neighbor_pos+100 > neighbor_free):
neighbor_free += 10000
new_list = np.zeros((10000, 4))
neighbor_list = np.vstack((neighbor_list, new_list))
# the distance between every neighbor point value to new region mean value
distance = np.abs(neighbor_list[:neighbor_pos+1, 3] - np.tile(region_mean, neighbor_pos+1))
# chose the min distance point
#voxel_distance = distance.min()
index = distance.argmin()
# mark the new region point with 2 and update new image
tmp_image[x, y, z] = 2
rg_image[x, y, z] = image[x, y, z]
region_size += 1
# (x, y, z) the new seed point
x = neighbor_list[index][0]
y = neighbor_list[index][1]
z = neighbor_list[index][2]
# update region mean value
region_mean = (region_mean*region_size+neighbor_list[index, 3])/(region_size+1)
# remove the seed point from neighbor_list
neighbor_list[index] = neighbor_list[neighbor_pos]
neighbor_pos -= 1
return rg_image | ee495848e6d9afd4f29bc97663f8804092b51559 | 3,630,298 |
def mock_subprocess(case_tuple):
"""We perform several subprocess.check_output calls, but we want to only mock
one of them at a time. This function helps us do that.
:type case_tuple: tuple of SubprocessMock
:param case_tuple: See docstring for SubprocessMock
"""
def fn(inputs, **kwargs):
while len(inputs) >= 2 and inputs[1] in ['--git-dir', '--work-tree']:
# Remove `--git-dir <arg>` from git command.
# This is just a convenience / increased readability conditional
inputs = inputs[0:1] + inputs[3:]
str_input = ' '.join(
map(lambda x: x.decode('utf-8')
if not isinstance(x, str) else x, inputs)
)
for tup in case_tuple:
if not str_input.startswith(tup.expected_input):
# We don't care what is returned, if we're not mocking it.
continue
if tup.should_throw_exception:
raise CalledProcessError(1, '', tup.mocked_output)
return tup.mocked_output
# Default return value is just a byte-string.
return b''
return fn | 0c98028464633c01808108145a462c64ce7015bd | 3,630,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.