repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
L0laapk3/RLBot-ReorientML | test.py | import random
import sys
from pathlib import Path
from typing import Optional
import math
from rlbot.training.training import Grade
from rlbot.utils.game_state_util import GameState, BallState, Physics, Rotator, Vector3, CarState
from rlbottraining import exercise_runner
from rlbottraining.match_configs import make_match_config_with_bots
from rlbottraining.rng import SeededRandomNumberGenerator
from rlbottraining.training_exercise import TrainingExercise
class RotationExercise(TrainingExercise):
def on_briefing(self) -> Optional[Grade]:
self.grader.matchcomms = self.get_matchcomms()
return None
def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState:
rng = random
car_physics = Physics()
car_physics.rotation = Rotator(math.sinh(rng.uniform(-1, 1)),
rng.uniform(-math.pi, math.pi), rng.uniform(-math.pi, math.pi))
car_physics.location = Vector3(0, 0, 800)
velocity = (rng.normalvariate(0, 1) for _ in range(3))
norm = sum(x ** 2 for x in velocity) ** 0.5
car_physics.angular_velocity = Vector3(*(x / norm * 5.5 for x in velocity))
ball_state = BallState(physics=Physics(velocity=Vector3(0, 0, 20), location=Vector3(500, 0, 800)))
return GameState(cars={0: CarState(physics=car_physics)}, ball=ball_state)
if __name__ == '__main__':
current_path = Path(__file__).absolute().parent
sys.path.insert(0, str(current_path.parent.parent)) # this is for first process imports
from common_graders.matchcomms_grader import MatchcommsGrader
match_config = make_match_config_with_bots(blue_bots=[current_path / 'simulation_agent.cfg'])
exercises = [RotationExercise(name='simulate rotation', grader=MatchcommsGrader(), match_config=match_config)
for _ in range(100)]
print(list(exercise_runner.run_playlist(exercises)))
|
L0laapk3/RLBot-ReorientML | train_aerial_turn.py | <gh_stars>0
import sys, os
from pathlib import Path
import msvcrt
import math
import torch
import gc
from torch.optim.adadelta import Adadelta
from quicktracer import trace
from device import device
delta_time = 1 / 120
steps = int(round(1.8 / delta_time))
hidden_size = 32
hidden_size_2 = 32
load = True
rotation_eps = 1 / 180 * math.pi
model_name = f'2layer_{hidden_size}_{hidden_size_2}'
class Trainer:
def __init__(self):
global load
from policy import Policy
from simulation import Simulation
from optimizer import Yeet, andt
self.policy = Policy(hidden_size, hidden_size_2).to(device)
self.simulation = Simulation(self.policy)
self.optimizer = Yeet(self.policy.parameters())
# self.optimizer = Adadelta(self.policy.parameters())
self.andt = andt
self.max_reward = 0
if load and not os.path.exists(f"{model_name}.state"):
print("not loading cuz it doesnt exist")
load = False
self.reachesEnd = load
if load:
self.policy.load_state_dict(torch.load(model_name + '.mdl'), False)
self.optimizer.load_state_dict(torch.load(model_name + '.state'))
for group in self.optimizer.param_groups:
group['rho'] = 0.5
group['lr'] = 0.0002
def train(self):
while not msvcrt.kbhit():
self.episode()
torch.save(self.policy.state_dict(), model_name + '.mdl')
torch.save(self.optimizer.state_dict(), model_name + '.state')
def episode(self):
self.simulation.random_state()
reward = torch.zeros((self.simulation.o.shape[0],), device=device)
framesDone = torch.zeros((self.simulation.o.shape[0],), device=device)
# profile()
# sys.exit()
for i in range(steps):
self.simulation.step(delta_time)
diff = rotation_eps - self.simulation.error()
# reward *= 0.8
# reward += diff.clamp(max=0)
reward += diff.clamp(max=0, min=-rotation_eps/2 if self.reachesEnd else None)
finished = (diff > 0).float()
# reward = diff.clamp(max=0)
framesDone += 1
framesDone *= finished
# reward = finished * (reward + 1)
# if i == steps-1:
# framesDone = reward.clone().detach()
# reward += diff.clamp(max=0) / rotation_eps
# reward = framesDone
trace(((steps - framesDone) * delta_time * 120).mean(0).item(), reset_on_parent_change=False, key='game frames to destination')
failed = (framesDone == 0).float().mean(0).item()
self.reachesEnd = failed < 0.2
trace(failed, reset_on_parent_change=False, key='amount failed')
# reward[:, steps - 1] = self.andt(reward[:, steps - 1])
# for i in reversed(range(steps - 1)):
# reward[:, i] = self.andt(reward[:, i], reward[:, i+1])
loss = reward.mean(0).neg()
# average_reward = sum(reward[:, steps - 1]) / len(reward[:, steps - 1])
# if average_reward.item() > self.max_reward:
# self.max_reward = average_reward.item()
# torch.save(self.policy.state_dict(), f'out/{model_name}_{round(self.max_reward, 1)}.mdl')
# torch.save(self.optimizer.state_dict(), f'out/{model_name}_{round(self.max_reward, 1)}.state')
self.optimizer.zero_grad()
loss.backward() # spits out error
self.optimizer.step()
trace(loss.item(), reset_on_parent_change=False, key='loss')
# trace((reward < 0).float().mean(0).item(), reset_on_parent_change=False, key='frame weight')
def profile():
print("----------------------------------------")
print("PROFILING")
print("----------------------------------------")
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
except:
pass
if __name__ == '__main__':
current_path = Path(__file__).absolute().parent
sys.path.insert(0, str(current_path.parent.parent)) # this is for first process imports
torch.autograd.set_detect_anomaly(True)
trainer = Trainer()
trainer.train()
|
L0laapk3/RLBot-ReorientML | simulation_agent.py | <gh_stars>0
from rlbot.utils.game_state_util import GameState, Vector3, CarState, Physics, Rotator
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlutilities.linear_algebra import euler_to_rotation, dot, transpose, look_at, vec2, vec3, norm, normalize, angle_between, orthogonalize
from rlutilities.simulation import Ball, Field, Game, Car
from rlutilities.mechanics import ReorientML
from policy import Policy
from simulation import Simulation
import math
import torch
import gc
from device import device
from random import random, seed
hidden_size = 32
hidden_size_2 = 32
model_name = f'2layer_{hidden_size}_{hidden_size_2}'
model = torch.load(model_name + '.mdl')
seed(0)
class TestAgent(BaseAgent):
def __init__(self, name, team, index):
self.name = name
self.index = index
self.policy = Policy(hidden_size, hidden_size_2).to(device)
self.policy.load_state_dict(model)
self.simulation = Simulation(self.policy)
self.controls = SimpleControllerState()
self.finished = False
self.FPS = 120
self.lastTime = 0
self.realLastTime = 0
self.currentTick = 0
self.skippedTicks = 0
self.doneTicks = 0
self.ticksNowPassed = 0
self.lastDodgeTick = -math.inf
self.lastDodgePitch = 0
self.lastDodgeRoll = 0
self.lastReset = 0
self.target = vec3(1, 0, 0)
self.up = vec3(0, 0, 1)
self.targetOrientation = look_at(self.target, self.up)
self.lastDoneTick = 0
self.totalScore = 0
self.tests = 0
self.stage = 0
def initialize_agent(self):
self.game = Game()
self.game.set_mode("soccar")
self.car = self.game.cars[self.index]
self.reorientML = ReorientML(self.car)
game_state = None
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
self.renderer.begin_rendering()
self.game.read_game_information(packet, self.get_rigid_body_tick(), self.get_field_info())
if self.lastReset + 300 < self.currentTick:
if self.tests > 0:
score = min(300, self.currentTick - self.lastDoneTick)
self.totalScore += score
print(self.tests, score, round(self.totalScore / self.tests, 2))
self.tests += 1
self.lastReset = self.currentTick
self.target = vec3(2*random()-1, 2*random()-1, 2*random()-1)
self.up = orthogonalize(vec3(2*random()-1, 2*random()-1, 2*random()-1), self.target)
self.targetOrientation = look_at(self.target, self.up)
car_state = CarState(physics=Physics(location=Vector3(0, 1000, 17), velocity=Vector3(0, 0, 0), rotation=Rotator(0, 0, 0), angular_velocity=Vector3(0, 0, 0)))
self.set_game_state(GameState(cars={self.index: car_state}))
self.stage = 0
self.lastDodgeTick = -math.inf
# print("TELEPORT TO GROUND")
return self.controls
else:
car_state = CarState(physics=Physics(location=Vector3(0, 0, 400), velocity=Vector3(0, 0, 0)))
self.set_game_state(GameState(cars={self.index: car_state}))
if self.stage <= 5:
self.stage += 1
if self.stage == 6:
self.dodgeDirection = normalize(vec2(0, 2*random()-1))
self.controls.jump = True#random() > 0.5
if self.controls.jump:
self.lastDodgeTick = self.currentTick
self.controls.roll, self.controls.pitch, self.controls.yaw = self.dodgeDirection[0], self.dodgeDirection[1], 0
self.stage += 1
return self.controls
else:
self.controls.jump = False
self.packet = packet
self.handleTime()
car = packet.game_cars[self.index]
position = vec3(car.physics.location.x, car.physics.location.y, car.physics.location.z)
self.renderer.draw_line_3d(car.physics.location, position + 300 * normalize(self.target), self.renderer.yellow())
self.renderer.draw_line_3d(car.physics.location, position + 300 * normalize(self.up), self.renderer.pink())
carOrientation = rotationToOrientation(car.physics.rotation)
ang = parseVector(car.physics.angular_velocity)
if angle_between(carOrientation, self.targetOrientation) > 1 / 180 * math.pi:
self.lastDoneTick = self.currentTick
o_rlu = dot(transpose(self.targetOrientation), carOrientation)
w_rlu = dot(transpose(self.targetOrientation), ang)
o = torch.tensor([[o_rlu[i, j] for j in range(3)] for i in range(3)])[None, :].float().to(device)
w = torch.tensor([w_rlu[i] for i in range(3)])[None, :].float().to(device)
noPitchTime = max(0, (self.lastDodgeTick - self.currentTick)/120 + .95)
dodgeTime = max(0, (self.lastDodgeTick - self.currentTick)/120 + .65)
if dodgeTime == 0:
self.dodgeDirection = vec2(0, 0)
noPitchTime = torch.tensor([noPitchTime]).float().to(device)
dodgeTime = torch.tensor([dodgeTime]).float().to(device)
dodgeDirection = torch.tensor([self.dodgeDirection[i] for i in range(2)])[None, :].float().to(device)
# if self.simulation.o is not None and self.simulation.w is not None:
# print("=====================================")
# print("-------------------------------------")
# print(self.simulation.o, o)
# print(self.simulation.w, w)
# print(self.simulation.noPitchTime, noPitchTime)
# print(self.simulation.dodgeTime, dodgeTime)
# print(self.simulation.dodgeDirection, dodgeDirection)
# self.simulation.step(self.ticksNowPassed / 120)
# print(self.simulation.o, o)
# print(self.simulation.w, w)
# print(self.simulation.noPitchTime, noPitchTime)
# print(self.simulation.dodgeTime, dodgeTime)
# print(self.simulation.dodgeDirection, dodgeDirection)
self.simulation.o = o
self.simulation.w = w
self.simulation.noPitchTime = noPitchTime
self.simulation.dodgeTime = dodgeTime
self.simulation.dodgeDirection = dodgeDirection
if True:
rpy = self.policy(
self.simulation.o.permute(0, 2, 1),
self.simulation.w_local(),
self.simulation.noPitchTime,
self.simulation.dodgeTime,
self.simulation.dodgeDirection
)[0]
self.controls.roll, self.controls.pitch, self.controls.yaw = rpy
else:
self.reorientML.target_orientation = self.targetOrientation
self.reorientML.step(1/self.FPS)
self.controls.roll, self.controls.pitch, self.controls.yaw = self.reorientML.controls.roll, self.reorientML.controls.pitch, self.reorientML.controls.yaw
if self.simulation.error()[0].item() < 0.01:
self.frames_done += 1
else:
self.frames_done = 0
if self.frames_done >= 10:
self.finished = True
self.renderer.end_rendering()
return self.controls
def get_mechanic_controls(self):
return self.mechanic.step(self.info)
def handleTime(self):
# this is the most conservative possible approach, but it could lead to having a "backlog" of ticks if seconds_elapsed
# isnt perfectly accurate.
if not self.lastTime:
self.lastTime = self.packet.game_info.seconds_elapsed
else:
if self.realLastTime == self.packet.game_info.seconds_elapsed:
return
if int(self.lastTime) != int(self.packet.game_info.seconds_elapsed):
if self.skippedTicks > 0:
print(f"did {self.doneTicks}, skipped {self.skippedTicks}")
self.skippedTicks = self.doneTicks = 0
self.ticksNowPassed = round(max(1, (self.packet.game_info.seconds_elapsed - self.lastTime) * self.FPS))
self.lastTime = min(self.packet.game_info.seconds_elapsed, self.lastTime + self.ticksNowPassed)
self.realLastTime = self.packet.game_info.seconds_elapsed
self.currentTick += self.ticksNowPassed
if self.ticksNowPassed > 1:
#print(f"Skipped {ticksPassed - 1} ticks!")
self.skippedTicks += self.ticksNowPassed - 1
self.doneTicks += 1
def parseVector(u):
return vec3(u.x, u.y, u.z)
def rotationToOrientation(rotation):
return euler_to_rotation(vec3(
rotation.pitch,
rotation.yaw,
rotation.roll
))
|
L0laapk3/RLBot-ReorientML | policy.py | <gh_stars>0
import torch
from torch import Tensor
from torch.nn import Module, Linear, ReLU
import sys
class Actor(Module):
def __init__(self, hidden_size, hidden_size_2):
super().__init__()
self.linear1 = Linear(16, hidden_size)
self.linear2 = Linear(hidden_size, hidden_size_2)
self.linear3 = Linear(hidden_size_2, 3)
self.softsign = ReLU()
def forward(self, o: Tensor, w: Tensor, noPitchTime: Tensor, dodgeTime: Tensor, dodgeDirection: Tensor):
flat_data = torch.cat((o.flatten(1, 2), w, noPitchTime[:, None], dodgeTime[:, None], dodgeDirection), 1)
return self.linear3(self.softsign(self.linear2(self.softsign(self.linear1(flat_data)))))
class Policy(Module):
def __init__(self, hidden_size, hidden_size_2):
super().__init__()
self.actor = Actor(hidden_size, hidden_size_2)
self.symmetry = True
def forward(self, o: Tensor, w: Tensor, noPitchTime: Tensor, dodgeTime: Tensor, dodgeDirection: Tensor):
# print(w)
# print(noPitchTime)
# print(dodgeTime)
# print(dodgeDirection)
# sys.exit()
if self.symmetry:
o = o[:, None, :, :].repeat(1, 4, 1, 1)
w = w[:, None, :].repeat(1, 4, 1)
noPitchTime = noPitchTime[:, None].repeat(1, 4)
dodgeTime = dodgeTime[:, None].repeat(1, 4)
dodgeDirection = dodgeDirection[:, None, :].repeat(1, 4, 1)
o[:, 0:2, :, 0].neg_()
o[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :, 1].neg_()
o[:, 0:2, 0].neg_()
o[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1].neg_()
w[:, 0:2, 1].neg_()
w[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0].neg_()
w[:, 0:2, 2].neg_()
w[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 2].neg_()
dodgeDirection[:, 0:2, 1].neg_()
dodgeDirection[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0].neg_()
rpy: Tensor = self.actor(o.flatten(0, 1), w.flatten(0, 1), noPitchTime.flatten(0, 1), dodgeTime.flatten(0, 1), dodgeDirection.flatten(0, 1)).view(-1, 4, 3)
rpy[:, 0:2, 1].neg_()
rpy[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0].neg_()
rpy[:, 0:2, 2].neg_()
rpy[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 2].neg_()
return torch.clamp(rpy.mean(1), -1, 1)
else:
rpy: Tensor = self.actor(o, w, noPitchTime, dodgeTime, dodgeDirection)
return torch.clamp(rpy, -1, 1)
|
L0laapk3/RLBot-ReorientML | device.py | <gh_stars>0
import torch
import __main__
if __main__.__file__ == "train_aerial_turn.py":
device = torch.device("cuda:0")
else:
device = torch.device("cpu") |
L0laapk3/RLBot-ReorientML | torch_script.py | <reponame>L0laapk3/RLBot-ReorientML
import torch
from torch.onnx.utils import export
import sys
from pathlib import Path
import numpy
if __name__ == "__main__":
current_path = Path(__file__).absolute().parent
sys.path.insert(0, str(current_path.parent.parent)) # this is for first process imports
from mechanic.aerial_turn_ml.policy import Policy
policy = Policy(20)
policy.load_state_dict(torch.load("full_rotation_20.mdl"))
# print(policy.actor.linear1.weight[3, 2])
# print(policy.actor.linear1.bias[4])
# print(policy.actor.linear2.weight[2, 6])
#
# print(torch.mm(policy.actor.linear1.weight, torch.ones(12, 1)))
print(policy(torch.ones(1,3,3), torch.ones(1,3)))
# example = torch.rand(1, 3, 3), torch.rand(1, 3)
# torch_script_module = torch.jit.trace(policy, example)
#
# torch_script_module.save('orientation.pt')
# export(policy, example, "full_rotation_20.onnx", verbose=True)
# data = [
# policy.actor.linear1.weight.detach().numpy().flatten('F'),
# policy.actor.linear1.bias.detach().numpy().flatten('F'),
# policy.actor.linear2.weight.detach().numpy().flatten('F'),
# ]
#
# data = numpy.concatenate(data)
#
# numpy.memmap('orientation.bin', dtype=numpy.float32, mode='w+', shape=data.shape)[:] = data[:]
# print(data.shape)
|
L0laapk3/RLBot-ReorientML | optimizer.py | import torch
from torch.optim.optimizer import Optimizer
class Yeet(Optimizer):
"""Implements Yeet algorithm by Hytak.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rho (float, optional): rho = 1 gives SGD, rho = 0 is a bit like newtons method (default: 0.5)
lr (float, optional): the initial learning rate (default: 1e-6)
"""
def __init__(self, params, lr=1e-6, rho=0.5):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= rho <= 1.0:
raise ValueError("Invalid rho value: {}".format(rho))
defaults = dict(lr=lr, rho=rho)
super(Yeet, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
with torch.no_grad():
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if p.grad.is_sparse:
raise RuntimeError('Yeet does not support sparse gradients')
state = self.state[p]
rho, lr = group['rho'], group['lr']
# State initialization
if len(state) == 0:
state['previous_g'] = p.grad.clone()
state['previous_p'] = p.clone()
delta_g = (p.grad - state['previous_g']).abs()
delta_p = (p - state['previous_p']).abs()
inv_hessian = delta_p / delta_g
inv_hessian = torch.where(torch.isnan(inv_hessian), torch.zeros_like(p), inv_hessian)
step = p.grad * (inv_hessian + lr)
# Trust region
if rho != 0:
max_step = delta_p / rho + lr * p.grad.abs()
step = torch.where(step < -max_step, -max_step, step)
step = torch.where(step > max_step, max_step, step)
state['previous_g'] = p.grad.clone()
state['previous_p'] = p.clone()
p.sub_(step)
return loss
def andt(*t: torch.Tensor):
t = torch.stack(t)
mask = t <= 0
only_one = mask.sum(0) == 1
done = mask.sum(0) == 0
return (t * mask.float()).sum(0) * only_one.float() + done.float()
|
L0laapk3/RLBot-ReorientML | base_mechanic.py | <gh_stars>0
from rlbot.agents.base_agent import SimpleControllerState
class BaseMechanic:
def __init__(self):
self.controls = SimpleControllerState()
self.finished = False
def step(self, *args) -> SimpleControllerState:
raise NotImplementedError
|
L0laapk3/RLBot-ReorientML | simulation.py | import torch
from torch import Tensor
from torch.distributions.normal import Normal
import gc
from policy import Policy
from device import device
# ??
j = 10.5
# air control torque coefficients
t = torch.tensor([-400.0, -130.0, 95.0], dtype=torch.float, device=device)
m = torch.diag(torch.ones(3)).bool().to(device)
identity = torch.diag(torch.ones(3)).float()[None, :, :].to(device)
w_max = 5.5
batch_size = 5000
meps = 1 - 1e-5
class Simulation:
o: Tensor = None
w: Tensor = None
def __init__(self, policy: Policy):
self.policy = policy
def random_state(self):
x_axis = Normal(0, 1).sample((batch_size, 3)).to(device)
y_axis = Normal(0, 1).sample((batch_size, 3)).to(device)
z_axis = torch.cross(x_axis, y_axis, dim=1)
y_axis = torch.cross(z_axis, x_axis, dim=1)
self.o = torch.stack((x_axis, y_axis, z_axis), dim=1)
self.o = self.o / torch.norm(self.o, dim=2, keepdim=True)
self.w = Normal(0, 1).sample((batch_size, 3)).to(device)
self.w = self.w / torch.norm(self.w, dim=1, keepdim=True)
self.w = self.w * torch.rand((batch_size, 1), device=device) * w_max
willDodge = torch.randint(2, (batch_size,), device=device)
self.noPitchTime = (willDodge == 1) * .95
# (willDodge == 1) * torch.randint(1, int(round(.95*120))+1, (batch_size,)).to(device).float() / 120
# self.noPitchTime = torch.ones((batch_size, ), device=device) * 0.95
self.dodgeTime = (self.noPitchTime - .3).clamp(min=0)
dodgeMode = torch.randint(4, (batch_size,), device=device)
self.dodgeDirection = Normal(0, 1).sample((batch_size, 2)).to(device) # roll pitch
self.dodgeDirection /= torch.norm(self.dodgeDirection, dim=1, keepdim=True)
self.dodgeDirection[:, 0] = self.dodgeDirection[:, 0] * (dodgeMode >= 2) + (dodgeMode == 0) * self.dodgeDirection[:, 0].sign()
self.dodgeDirection[:, 1] = self.dodgeDirection[:, 1] * (dodgeMode >= 2) + (dodgeMode == 1) * self.dodgeDirection[:, 1].sign()
self.dodgeDirection *= (self.dodgeTime > 0.01/120)[:, None]
# profile()
def simulate(self, steps: int, dt: float):
for _ in range(steps):
self.step(dt)
def w_local(self):
return torch.sum(self.o * self.w[:, :, None], 1)
def step(self, dt):
w_local = self.w_local()
rpy = self.policy(self.o.permute(0, 2, 1), w_local, self.noPitchTime, self.dodgeTime, self.dodgeDirection)
# air damping torque coefficients
h = torch.stack((
torch.full_like(rpy[:, 0], -50.0),
-30.0 * (1.0 - rpy[:, 1].abs()),
-20.0 * (1.0 - rpy[:, 2].abs())
), dim=1)
angularAcc = t[None, :] * rpy
angularAcc[:, 1] *= self.noPitchTime < 0.01/120
dodge = self.dodgeTime > 0.01/120
cancel = 1 - (rpy[:, 1] * -self.dodgeDirection[:, 1].sign()).clamp(min=0)
angularAcc[:, 0] += self.dodgeDirection[:, 0] * 260 * dodge
angularAcc[:, 1] += self.dodgeDirection[:, 1] * 224 * dodge * cancel
self.w = self.w + torch.sum(self.o * (angularAcc + h * w_local)[:, None, :], 2) * (dt / j)
self.o = torch.sum(self.o[:, None, :, :] * axis_to_rotation(self.w * dt)[:, :, :, None], 2)
self.w = self.w / torch.clamp_min(torch.norm(self.w, dim=1) / w_max, 1)[:, None]
self.noPitchTime -= dt
self.noPitchTime.clamp_(min=0)
self.dodgeTime -= dt
self.dodgeTime.clamp_(min=0)
self.dodgeDirection *= (self.dodgeTime > 0.01/120)[:, None]
def error(self):
torch.sum(self.o[:, :, None, :] * identity[:, None, :, :], 3)[:, m]
return torch.acos(meps * 0.5 * (torch.sum(torch.sum(self.o[:, :, None, :] *
identity[:, None, :, :], 3)[:, m], 1) - 1.0))
def axis_to_rotation(omega: Tensor):
norm_omega = torch.norm(omega, dim=1)
u = omega / norm_omega[:, None]
c = torch.cos(norm_omega)
s = torch.sin(norm_omega)
result = u[:, :, None] * u[:, None, :] * (-c[:, None, None] + 1.0)
result += c[:, None, None] * torch.diag(torch.ones(3, device=device))[None, :, :]
result += torch.cross(s[:, None, None] * torch.diag(torch.ones(3, device=device))[None, :, :],
u[:, None, :].repeat(1, 3, 1), dim=2)
return result
def profile():
print("----------------------------------------")
print("PROFILING")
print("----------------------------------------")
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
except:
pass |
johnjasa/ORBIT | tests/api/wisdem/test_fixed_wisdem_api.py | """Tests for the Monopile Wisdem API"""
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import openmdao.api as om
from ORBIT.api.wisdem import OrbitWisdemFixed
def test_wisdem_monopile_api_default():
prob = om.Problem()
prob.model = OrbitWisdemFixed()
prob.setup()
prob.run_driver()
prob.model.list_inputs()
prob.model.list_outputs()
|
johnjasa/ORBIT | tests/phases/design/test_cable.py | <filename>tests/phases/design/test_cable.py
"""Provides a testing framework for the `Cable` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import copy
import itertools
import numpy as np
import pytest
from ORBIT.phases.design._cables import Cable, Plant
cables = {
"empty": {},
"passes": {
"conductor_size": 400,
"current_capacity": 610,
"rated_voltage": 33,
"ac_resistance": 0.06,
"inductance": 0.375,
"capacitance": 225,
"linear_density": 35,
"cost_per_km": 300000,
"name": "passes",
},
}
plant_config_calculate_all_ring = {
"site": {"depth": 20},
"plant": {
"layout": "ring",
"row_spacing": 7,
"turbine_spacing": 5,
"num_turbines": 40,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_calculate_all_grid = {
"site": {"depth": 20},
"plant": {
"layout": "grid",
"row_spacing": 7,
"turbine_spacing": 5,
"num_turbines": 40,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_distance_provided_ring = {
"site": {"depth": 20},
"plant": {
"layout": "ring",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_distance_provided_grid = {
"site": {"depth": 20},
"plant": {
"layout": "grid",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_custom = {
"site": {"depth": 20},
"plant": {
"layout": "custom",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
def test_cable_creation():
cable = Cable(cables["passes"])
assert cable
for r in cable.required:
assert getattr(cable, r, None) == cables["passes"][r]
def test_cable_required_inputs():
with pytest.raises(ValueError):
Cable(cables["empty"])
def test_power_factor():
c = copy.deepcopy(cables["passes"])
results = []
for i in itertools.product(
range(100, 1001, 150), # conductor size
np.arange(0.01, 0.91, 0.1), # ac_resistance
np.arange(0, 1, 0.15), # inductance
range(100, 1001, 150), # capacitance
):
c["conductor_size"] = i[0]
c["ac_resistance"] = i[1]
c["inductance"] = i[2]
c["capacitance"] = i[3]
cable = Cable(c)
results.append(cable.power_factor)
if any((a < 0) | (a > 1) for a in results):
raise Exception("Invalid Power Factor.")
@pytest.mark.parametrize(
"config",
(
plant_config_calculate_all_ring,
plant_config_calculate_all_grid,
plant_config_distance_provided_ring,
plant_config_distance_provided_grid,
),
ids=["calculate_ring", "calculate_grid", "provided_ring", "provided_grid"],
)
def test_plant_creation(config):
plant = Plant(config)
assert plant.turbine_rating == config["turbine"]["turbine_rating"]
assert plant.site_depth == config["site"]["depth"] / 1000.0
assert plant.layout == config["plant"]["layout"]
assert plant.num_turbines == config["plant"]["num_turbines"]
if "turbine_spacing" in config["plant"]:
td = (
config["turbine"]["rotor_diameter"]
* config["plant"]["turbine_spacing"]
/ 1000.0
)
else:
td = config["plant"]["turbine_distance"]
assert plant.turbine_distance == td
if "row_spacing" in config["plant"]:
if config["plant"]["layout"] == "grid":
rd = (
config["turbine"]["rotor_diameter"]
* config["plant"]["row_spacing"]
/ 1000.0
)
if config["plant"]["layout"] == "ring":
rd = td
else:
rd = config["plant"]["row_distance"]
assert plant.row_distance == rd
if "substation_distance" in config["plant"]:
sd = config["plant"]["substation_distance"]
else:
sd = td
assert plant.substation_distance == sd
def test_custom_plant_creation():
plant = Plant(plant_config_custom)
for attr in ("row_distance", "turbine_distance", "substation_distance"):
assert getattr(plant, attr, None) is None
|
johnjasa/ORBIT | tests/phases/install/turbine_install/test_turbine_install.py | <gh_stars>0
"""Tests for the `MonopileInstallation` class without feeder barges."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from tests.data import test_weather
from ORBIT.library import extract_library_specs
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import TurbineInstallation
config_wtiv = extract_library_specs("config", "turbine_install_wtiv")
config_long_mobilize = extract_library_specs(
"config", "turbine_install_long_mobilize"
)
config_wtiv_feeder = extract_library_specs("config", "turbine_install_feeder")
config_wtiv_multi_feeder = deepcopy(config_wtiv_feeder)
config_wtiv_multi_feeder["num_feeders"] = 2
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_simulation_setup(config):
sim = TurbineInstallation(config)
assert sim.config == config
assert sim.env
assert sim.port.crane.capacity == config["port"]["num_cranes"]
assert sim.num_turbines == config["plant"]["num_turbines"]
t = len([i for i in sim.port.items if i.type == "TowerSection"])
assert t == sim.num_turbines
n = len([i for i in sim.port.items if i.type == "Nacelle"])
assert n == sim.num_turbines
b = len([i for i in sim.port.items if i.type == "Blade"])
assert b == sim.num_turbines * 3
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_vessel_creation(config):
sim = TurbineInstallation(config)
assert sim.wtiv
assert sim.wtiv.jacksys
assert sim.wtiv.crane
assert sim.wtiv.storage
if config.get("feeder", None) is not None:
assert len(sim.feeders) == config["num_feeders"]
for feeder in sim.feeders:
assert feeder.jacksys
assert feeder.storage
@pytest.mark.parametrize(
"config, expected", [(config_wtiv, 72), (config_long_mobilize, 14 * 24)]
)
def test_vessel_mobilize(config, expected):
sim = TurbineInstallation(config)
assert sim.wtiv
mobilize = [a for a in sim.env.actions if a["action"] == "Mobilize"][0]
assert mobilize["duration"] == expected
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
@pytest.mark.parametrize(
"weather", (None, test_weather), ids=["no_weather", "test_weather"]
)
def test_for_complete_logging(weather, config):
sim = TurbineInstallation(config, weather=weather)
sim.run()
df = pd.DataFrame(sim.env.actions)
df = df.assign(shift=(df["time"] - df["time"].shift(1)))
for vessel in df["agent"].unique():
_df = df[df["agent"] == vessel].copy()
_df = _df.assign(shift=(_df["time"] - _df["time"].shift(1)))
assert (_df["shift"] - _df["duration"]).abs().max() < 1e-9
assert ~df["cost"].isnull().any()
_ = sim.agent_efficiencies
_ = sim.detailed_output
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_for_complete_installation(config):
sim = TurbineInstallation(config)
sim.run()
installed_nacelles = len(
[a for a in sim.env.actions if a["action"] == "Attach Nacelle"]
)
assert installed_nacelles == sim.num_turbines
def test_kwargs():
sim = TurbineInstallation(config_wtiv)
sim.run()
baseline = sim.total_phase_time
keywords = [
"tower_section_fasten_time",
"tower_section_release_time",
"tower_section_attach_time",
"nacelle_fasten_time",
"nacelle_release_time",
"nacelle_attach_time",
"blade_fasten_time",
"blade_release_time",
"blade_attach_time",
"site_position_time",
"crane_reequip_time",
]
failed = []
for kw in keywords:
default = pt[kw]
kwargs = {kw: default + 2}
new_sim = TurbineInstallation(config_wtiv, **kwargs)
new_sim.run()
new_time = new_sim.total_phase_time
if new_time > baseline:
pass
else:
failed.append(kw)
if failed:
raise Exception(f"'{failed}' not affecting results.")
else:
assert True
def test_multiple_tower_sections():
sim = TurbineInstallation(config_wtiv)
sim.run()
baseline = len(
[a for a in sim.env.actions if a["action"] == "Attach Tower Section"]
)
two_sections = deepcopy(config_wtiv)
two_sections["turbine"]["tower"]["sections"] = 2
sim2 = TurbineInstallation(two_sections)
sim2.run()
new = len(
[a for a in sim2.env.actions if a["action"] == "Attach Tower Section"]
)
assert new == 2 * baseline
df = pd.DataFrame(sim.env.actions)
for vessel in df["agent"].unique():
vl = df[df["agent"] == vessel].copy()
vl = vl.assign(shift=(vl["time"] - vl["time"].shift(1)))
assert (vl["shift"] - vl["duration"]).abs().max() < 1e-9
|
johnjasa/ORBIT | ORBIT/phases/install/mooring_install/mooring.py | <filename>ORBIT/phases/install/mooring_install/mooring.py
"""Installation strategies for mooring systems."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from marmot import process
from ORBIT.core import Cargo, Vessel
from ORBIT.core.logic import position_onsite, get_list_of_items_from_port
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import InstallPhase
from ORBIT.core.exceptions import ItemNotFound
class MooringSystemInstallation(InstallPhase):
"""Module to model the installation of mooring systems at sea."""
phase = "Mooring System Installation"
#:
expected_config = {
"mooring_install_vessel": "dict | str",
"site": {"depth": "m", "distance": "km"},
"plant": {"num_turbines": "int"},
"mooring_system": {
"num_lines": "int",
"line_mass": "t",
"anchor_mass": "t",
"anchor_type": "str (optional, default: 'Suction Pile')",
},
}
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of `MooringSystemInstallation`.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.array
Weather data at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.extract_defaults()
self.setup_simulation(**kwargs)
def setup_simulation(self, **kwargs):
"""
Sets up the required simulation infrastructure:
- initializes port
- initializes installation vessel
- initializes mooring systems at port.
"""
self.initialize_port()
self.initialize_installation_vessel()
self.initialize_components()
depth = self.config["site"]["depth"]
distance = self.config["site"]["distance"]
install_mooring_systems(
self.vessel,
self.port,
distance,
depth,
self.number_systems,
**kwargs,
)
def initialize_installation_vessel(self):
"""Initializes the mooring system installation vessel."""
vessel_specs = self.config.get("mooring_install_vessel", None)
name = vessel_specs.get("name", "Mooring System Installation Vessel")
vessel = Vessel(name, vessel_specs)
self.env.register(vessel)
vessel.initialize()
vessel.at_port = True
vessel.at_site = False
self.vessel = vessel
def initialize_components(self):
"""Initializes the Cargo components at port."""
system = MooringSystem(**self.config["mooring_system"])
self.number_systems = self.config["plant"]["num_turbines"]
for _ in range(self.number_systems):
self.port.put(system)
@property
def detailed_output(self):
"""Detailed outputs of the scour protection installation."""
outputs = {self.phase: {**self.agent_efficiencies}}
return outputs
@process
def install_mooring_systems(vessel, port, distance, depth, systems, **kwargs):
"""
Logic for the Mooring System Installation Vessel.
Parameters
----------
vessel : Vessel
Mooring System Installation Vessel
port : Port
distance : int | float
Distance between port and site (km).
systems : int
Total systems to install.
"""
n = 0
while n < systems:
if vessel.at_port:
try:
# Get mooring systems from port.
yield get_list_of_items_from_port(
vessel, port, ["MooringSystem"], **kwargs
)
except ItemNotFound:
# If no items are at port and vessel.storage.items is empty,
# the job is done
if not vessel.storage.items:
vessel.submit_debug_log(
message="Item not found. Shutting down."
)
break
# Transit to site
vessel.update_trip_data()
vessel.at_port = False
yield vessel.transit(distance)
vessel.at_site = True
if vessel.at_site:
if vessel.storage.items:
system = yield vessel.get_item_from_storage(
"MooringSystem", **kwargs
)
for _ in range(system.num_lines):
yield position_onsite(vessel, **kwargs)
yield perform_mooring_site_survey(vessel, **kwargs)
yield install_mooring_anchor(
vessel, depth, system.anchor_type, **kwargs
)
yield install_mooring_line(vessel, depth, **kwargs)
n += 1
else:
# Transit to port
vessel.at_site = False
yield vessel.transit(distance)
vessel.at_port = True
vessel.submit_debug_log(message="Mooring systems installation complete!")
@process
def perform_mooring_site_survey(vessel, **kwargs):
"""
Calculates time required to perform a mooring system survey.
Parameters
----------
vessel : Vessel
Vessel to perform action.
Yields
------
vessel.task representing time to "Perform Mooring Site Survey".
"""
key = "mooring_site_survey_time"
survey_time = kwargs.get(key, pt[key])
yield vessel.task(
"Perform Mooring Site Survey",
survey_time,
constraints=vessel.transit_limits,
**kwargs,
)
@process
def install_mooring_anchor(vessel, depth, _type, **kwargs):
"""
Calculates time required to install a mooring system anchor.
Parameters
----------
vessel : Vessel
Vessel to perform action.
depth : int | float
Depth at site (m).
_type : str
Anchor type. 'Suction Pile' or 'Drag Embedment'.
Yields
------
vessel.task representing time to install mooring anchor.
"""
if _type == "Suction Pile":
key = "suction_pile_install_time"
task = "Install Suction Pile Anchor"
fixed = kwargs.get(key, pt[key])
elif _type == "Drag Embedment":
key = "drag_embed_install_time"
task = "Install Drag Embedment Anchor"
fixed = kwargs.get(key, pt[key])
else:
raise ValueError(
f"Mooring System Anchor Type: {_type} not recognized."
)
install_time = fixed + 0.005 * depth
yield vessel.task(
task, install_time, constraints=vessel.transit_limits, **kwargs
)
@process
def install_mooring_line(vessel, depth, **kwargs):
"""
Calculates time required to install a mooring system line.
Parameters
----------
vessel : Vessel
Vessel to perform action.
depth : int | float
Depth at site (m).
Yields
------
vessel.task representing time to install mooring line.
"""
install_time = 0.005 * depth
yield vessel.task(
"Install Mooring Line",
install_time,
constraints=vessel.transit_limits,
**kwargs,
)
class MooringSystem(Cargo):
"""Mooring System Cargo"""
def __init__(
self,
num_lines=None,
line_mass=None,
anchor_mass=None,
anchor_type="Suction Pile",
**kwargs,
):
"""Creates an instance of MooringSystem"""
self.num_lines = num_lines
self.line_mass = line_mass
self.anchor_mass = anchor_mass
self.anchor_type = anchor_type
self.deck_space = 0
@property
def mass(self):
"""Returns total system mass in t."""
return self.num_lines * (self.line_mass + self.anchor_mass)
@staticmethod
def fasten(**kwargs):
"""Dummy method to work with `get_list_of_items_from_port`."""
key = "mooring_system_load_time"
time = kwargs.get(key, pt[key])
return "Load Mooring System", time
@staticmethod
def release(**kwargs):
"""Dummy method to work with `get_list_of_items_from_port`."""
return "", 0
def anchor_install_time(self, depth):
"""
Returns time to install anchor. Varies by depth.
Parameters
----------
depth : int | float
Depth at site (m).
"""
if self.anchor_type == "Suction Pile":
fixed = 11
elif self.anchor_type == "Drag Embedment":
fixed = 5
else:
raise ValueError(
f"Mooring System Anchor Type: {self.anchor_type} not recognized."
)
return fixed + 0.005 * depth
|
johnjasa/ORBIT | ORBIT/phases/design/__init__.py | <reponame>johnjasa/ORBIT
"""The design package contains `DesignPhase` and its subclasses."""
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>" "<EMAIL>"]
from .design_phase import DesignPhase # isort:skip
from .oss_design import OffshoreSubstationDesign
from .spar_design import SparDesign
from .monopile_design import MonopileDesign
from .array_system_design import ArraySystemDesign, CustomArraySystemDesign
from .project_development import ProjectDevelopment
from .export_system_design import ExportSystemDesign
from .mooring_system_design import MooringSystemDesign
from .scour_protection_design import ScourProtectionDesign
from .semi_submersible_design import SemiSubmersibleDesign
|
johnjasa/ORBIT | ORBIT/core/port.py | <filename>ORBIT/core/port.py
"""Provides the `Port` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import simpy
from ORBIT.core.exceptions import ItemNotFound
class Port(simpy.FilterStore):
"""Port Class"""
def __init__(self, env, **kwargs):
"""
Creates an instance of Port.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
"""
capacity = kwargs.get("capacity", float("inf"))
super().__init__(env, capacity)
def get_item(self, _type):
"""
Checks self.items for an item satisfying `item.type = _type`, otherwise
returns `ItemNotFound`.
Parameters
----------
_type : str
Type of item to match. Checks `item.type`.
Returns
-------
res.value : FilterStoreGet.value
Returned item.
Raises
------
ItemNotFound
"""
target = None
for i in self.items:
try:
if i.type == _type:
target = i
break
except AttributeError:
continue
if not target:
raise ItemNotFound(_type)
else:
res = self.get(lambda x: x == target)
return res.value
class WetStorage(simpy.Store):
"""Storage infrastructure for floating substructures."""
def __init__(self, env, capacity):
"""
Creates an instance of WetStorage.
Parameters
----------
capacity : int
Number of substructures or assemblies that can be stored.
"""
super().__init__(env, capacity)
|
johnjasa/ORBIT | tests/test_project_manager.py | <reponame>johnjasa/ORBIT
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from ORBIT import ProjectManager
from tests.data import test_weather
from ORBIT.library import extract_library_specs
from ORBIT.manager import ProjectProgress
from ORBIT.core.exceptions import (
MissingInputs,
PhaseNotFound,
WeatherProfileError,
PhaseDependenciesInvalid,
)
weather_df = pd.DataFrame(test_weather).set_index("datetime")
config = extract_library_specs("config", "project_manager")
complete_project = extract_library_specs("config", "complete_project")
### Top Level
@pytest.mark.parametrize("weather", (None, weather_df))
def test_complete_run(weather):
project = ProjectManager(config, weather=weather)
project.run_project()
actions = pd.DataFrame(project.project_actions)
phases = ["MonopileInstallation", "TurbineInstallation"]
assert all(p in list(actions["phase"]) for p in phases)
### Module Integrations
def test_for_required_phase_structure():
"""
Automated integration test to verify that all classes listed in
ProjectManager.possible_phases are structured correctly.
"""
for p in ProjectManager._install_phases:
assert isinstance(p.expected_config, dict)
for p in ProjectManager._design_phases:
assert isinstance(p.expected_config, dict)
assert isinstance(p.output_config, dict)
# TODO: Expand these tests
### Config Management
def test_phase_specific_definitions():
"""
Tests that phase specific information makes it to phase_config.
"""
project = ProjectManager(config)
phase_config = project.create_config_for_phase("MonopileInstallation")
assert phase_config["wtiv"]["name"] == "Phase Specific WTIV"
assert phase_config["site"]["distance"] == 500
phase_config = project.create_config_for_phase("TurbineInstallation")
assert phase_config["wtiv"]["name"] == "Example WTIV"
assert phase_config["site"]["distance"] == 50
project.run_project()
def test_expected_config_merging():
"""
Tests for merging of expected configs
"""
config1 = {
"site": {"distance": "float", "depth": "float"},
"plant": {"num_turbines": "int"},
}
config2 = {
"site": {"distance": "float", "wave_height": "float"},
"monopile": {"diameter": "float"},
}
config = ProjectManager.merge_dicts(config1, config2)
assert config == {
"site": {
"distance": "float",
"depth": "float",
"wave_height": "float",
},
"plant": {"num_turbines": "int"},
"monopile": {"diameter": "float"},
}
def test_find_key_match():
class SpecificTurbineInstallation:
expected_config = {}
TestProjectManager = deepcopy(ProjectManager)
TestProjectManager._install_phases.append(SpecificTurbineInstallation)
phase_dict = TestProjectManager.phase_dict()
assert "SpecificTurbineInstallation" in phase_dict.keys()
tests = [
("TurbineInstallation", "TurbineInstallation"),
("TurbineInstallation_Test", "TurbineInstallation"),
("TurbineInstallation Test", "TurbineInstallation"),
("TurbineInstallation Test_1", "TurbineInstallation"),
("SpecificTurbineInstallation", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation_Test", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation Test", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation Test_1", "SpecificTurbineInstallation"),
]
for test in tests:
i, expected = test
response = TestProjectManager.find_key_match(i)
assert response.__name__ == expected
fails = [
"DifferentTurbineInstallation",
"Other TurbineInstallation",
"Extra Different TurbineInstallation_1",
]
for f in fails:
assert TestProjectManager.find_key_match(f) is None
### Overlapping Install Phases
def test_install_phase_start_parsing():
config_mixed_starts = deepcopy(config)
config_mixed_starts["install_phases"] = {
"MonopileInstallation": 0,
"TurbineInstallation": "10/22/2009",
"ArrayCableInstallation": ("MonopileInstallation", 0.5),
}
project = ProjectManager(config_mixed_starts, weather=weather_df)
defined, depends = project._parse_install_phase_values(
config_mixed_starts["install_phases"]
)
assert len(defined) == 2
assert len(depends) == 1
assert defined["MonopileInstallation"] == 0
assert defined["TurbineInstallation"] == 1
def test_chained_dependencies():
config_chained = deepcopy(config)
config_chained["spi_vessel"] = "test_scour_protection_vessel"
config_chained["scour_protection"] = {"tons_per_substructure": 200}
config_chained["install_phases"] = {
"ScourProtectionInstallation": 0,
"MonopileInstallation": ("ScourProtectionInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.5),
}
project = ProjectManager(config_chained)
project.run_project()
df = pd.DataFrame(project.project_actions)
sp = list(df.loc[df["phase"] == "ScourProtectionInstallation"]["time"])
mp = list(df.loc[df["phase"] == "MonopileInstallation"]["time"])
tu = list(df.loc[df["phase"] == "TurbineInstallation"]["time"])
assert min(sp) == 0
assert min(mp) == (max(sp) - min(sp)) * 0.1
assert min(tu) == (max(mp) - min(mp)) * 0.5 + min(mp)
@pytest.mark.parametrize(
"m_start, t_start", [(0, 0), (0, 100), (100, 100), (100, 200)]
)
def test_index_starts(m_start, t_start):
"""
Tests functionality related to passing index starts into 'install_phases' sub-dict.
"""
_target_diff = t_start - m_start
config_with_index_starts = deepcopy(config)
config_with_index_starts["install_phases"] = {
"MonopileInstallation": m_start,
"TurbineInstallation": t_start,
}
project = ProjectManager(config_with_index_starts)
project.run_project()
df = pd.DataFrame(project.project_actions)
_m = df.loc[df["phase"] == "MonopileInstallation"].iloc[0]
_t = df.loc[df["phase"] == "TurbineInstallation"].iloc[0]
_diff = (_t["time"] - _t["duration"]) - (_m["time"] - _m["duration"])
assert _diff == _target_diff
@pytest.mark.parametrize(
"m_start, t_start, expected",
[
(0, 0, 0),
(0, 1000, 1000),
(0, "05/01/2010", 4585),
("03/01/2010", "03/01/2010", 0),
("03/01/2010", "05/01/2010", 1464),
],
)
def test_start_dates_with_weather(m_start, t_start, expected):
config_with_defined_starts = deepcopy(config)
config_with_defined_starts["install_phases"] = {
"MonopileInstallation": m_start,
"TurbineInstallation": t_start,
}
project = ProjectManager(config_with_defined_starts, weather=weather_df)
project.run_project()
df = pd.DataFrame(project.project_actions)
_m = df.loc[df["phase"] == "MonopileInstallation"].iloc[0]
_t = df.loc[df["phase"] == "TurbineInstallation"].iloc[0]
_diff = (_t["time"] - _t["duration"]) - (_m["time"] - _m["duration"])
assert _diff == expected
def test_duplicate_phase_definitions():
config_with_duplicates = deepcopy(config)
config_with_duplicates["MonopileInstallation_1"] = {
"plant": {"num_turbines": 5}
}
config_with_duplicates["MonopileInstallation_2"] = {
"plant": {"num_turbines": 5},
"site": {"distance": 100},
}
config_with_duplicates["install_phases"] = {
"MonopileInstallation_1": 0,
"MonopileInstallation_2": 800,
"TurbineInstallation": 1600,
}
project = ProjectManager(config_with_duplicates)
project.run_project()
df = (
pd.DataFrame(project.project_actions)
.groupby(["phase", "action"])
.count()["time"]
)
assert df.loc[("MonopileInstallation_1", "Drive Monopile")] == 5
assert df.loc[("MonopileInstallation_2", "Drive Monopile")] == 5
assert df.loc[("TurbineInstallation", "Attach Tower Section")] == 10
### Design Phase Interactions
def test_design_phases():
config_with_design = deepcopy(config)
# Add MonopileDesign
config_with_design["design_phases"] = ["MonopileDesign"]
# Add required parameters
config_with_design["site"]["mean_windspeed"] = 9
config_with_design["turbine"]["rotor_diameter"] = 200
config_with_design["turbine"]["rated_windspeed"] = 10
config_with_design["monopile_design"] = {}
# Remove monopile sub dictionary
_ = config_with_design.pop("monopile")
project = ProjectManager(config_with_design)
project.run_project()
assert isinstance(project.config["monopile"], dict)
project = ProjectManager(config_with_design)
project.run_project()
### Outputs
def test_resolve_project_capacity():
# Missing turbine rating
config1 = {"plant": {"capacity": 600, "num_turbines": 40}}
out1 = ProjectManager.resolve_project_capacity(config1)
assert out1["plant"]["capacity"] == config1["plant"]["capacity"]
assert out1["plant"]["num_turbines"] == config1["plant"]["num_turbines"]
assert out1["turbine"]["turbine_rating"] == 15
# Missing plant capacity
config2 = {
"plant": {"num_turbines": 40},
"turbine": {"turbine_rating": 15},
}
out2 = ProjectManager.resolve_project_capacity(config2)
assert out2["plant"]["capacity"] == 600
assert out2["plant"]["num_turbines"] == config2["plant"]["num_turbines"]
assert (
out2["turbine"]["turbine_rating"]
== config2["turbine"]["turbine_rating"]
)
# Missing number of turbines
config3 = {"plant": {"capacity": 600}, "turbine": {"turbine_rating": 15}}
out3 = ProjectManager.resolve_project_capacity(config3)
assert out3["plant"]["capacity"] == config3["plant"]["capacity"]
assert out3["plant"]["num_turbines"] == 40
assert (
out3["turbine"]["turbine_rating"]
== config3["turbine"]["turbine_rating"]
)
# Test for float precision
config4 = {
"plant": {"capacity": 600, "num_turbines": 40},
"turbine": {"turbine_rating": 15.0},
}
out4 = ProjectManager.resolve_project_capacity(config4)
assert out4["plant"]["capacity"] == config4["plant"]["capacity"]
assert out4["plant"]["num_turbines"] == config4["plant"]["num_turbines"]
assert (
out4["turbine"]["turbine_rating"]
== config4["turbine"]["turbine_rating"]
)
# Non matching calculated value
config5 = {
"plant": {"capacity": 700, "num_turbines": 40},
"turbine": {"turbine_rating": 15.0},
}
with pytest.raises(AttributeError):
_ = ProjectManager.resolve_project_capacity(config5)
# Test for not enough information
config6 = {"plant": {"capacity": 600}}
out6 = ProjectManager.resolve_project_capacity(config6)
assert out6["plant"]["capacity"] == config6["plant"]["capacity"]
with pytest.raises(KeyError):
_ = out6["turbine"]["turbine_rating"]
with pytest.raises(KeyError):
_ = out6["plant"]["num_turbines"]
### Exceptions
def test_incomplete_config():
incomplete_config = deepcopy(config)
_ = incomplete_config["site"].pop("depth")
with pytest.raises(MissingInputs):
project = ProjectManager(incomplete_config)
project.run_project()
def test_wrong_phases():
wrong_phases = deepcopy(config)
wrong_phases["install_phases"].append("IncorrectPhaseName")
with pytest.raises(PhaseNotFound):
project = ProjectManager(wrong_phases)
project.run_project()
def test_bad_dates():
bad_dates = deepcopy(config)
bad_dates["install_phases"] = {
"MonopileInstallation": "03/01/2015",
"TurbineInstallation": "05/01/2015",
}
with pytest.raises(WeatherProfileError):
project = ProjectManager(bad_dates, weather=weather_df)
project.run_project()
def test_no_defined_start():
missing_start = deepcopy(config)
missing_start["install_phases"] = {
"MonopileInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
}
with pytest.raises(ValueError):
project = ProjectManager(missing_start)
project.run_project()
def test_circular_dependencies():
circular_deps = deepcopy(config)
circular_deps["spi_vessel"] = "test_scour_protection_vessel"
circular_deps["scour_protection"] = {"tons_per_substructure": 200}
circular_deps["install_phases"] = {
"ScourProtectionInstallation": 0,
"MonopileInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
}
with pytest.raises(PhaseDependenciesInvalid):
project = ProjectManager(circular_deps)
project.run_project()
def test_dependent_phase_ordering():
wrong_order = deepcopy(config)
wrong_order["spi_vessel"] = "test_scour_protection_vessel"
wrong_order["scour_protection"] = {"tons_per_substructure": 200}
wrong_order["install_phases"] = {
"ScourProtectionInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
"MonopileInstallation": 0,
}
project = ProjectManager(wrong_order)
project.run_project()
assert len(project.phase_times) == 3
def test_ProjectProgress():
data = [
("Export System", 10),
("Offshore Substation", 20),
("Array String", 15),
("Array String", 25),
("Turbine", 5),
("Turbine", 10),
("Turbine", 15),
("Turbine", 20),
("Turbine", 25),
("Substructure", 6),
("Substructure", 9),
("Substructure", 14),
("Substructure", 22),
("Substructure", 26),
]
progress = ProjectProgress(data)
assert progress.parse_logs("Export System") == [10]
turbines = progress.parse_logs("Turbine")
assert len(turbines) == 5
chunks = list(progress.chunk_max(turbines, 2))
assert chunks[0] == 10
assert chunks[1] == 20
assert chunks[2] == 25
assert progress.complete_export_system == 20
times, _ = progress.complete_array_strings
assert times == [15, 26]
times, turbines = progress.energize_points
assert times == [20, 26]
assert sum(turbines) == 5
def test_ProjectProgress_with_incomplete_project():
project = ProjectManager(config)
project.run_project()
_ = project.progress.parse_logs("Substructure")
_ = project.progress.parse_logs("Turbine")
with pytest.raises(ValueError):
project.progress.complete_export_system
with pytest.raises(ValueError):
project.progress.complete_array_strings
def test_ProjectProgress_with_complete_project():
project = ProjectManager(complete_project)
project.run_project()
_ = project.progress.parse_logs("Substructure")
_ = project.progress.parse_logs("Turbine")
_ = project.progress.parse_logs("Array String")
_ = project.progress.parse_logs("Export System")
_ = project.progress.parse_logs("Offshore Substation")
_ = project.progress.complete_export_system
_ = project.progress.complete_array_strings
_ = project.progress.energize_points
new = deepcopy(complete_project)
new["plant"]["num_turbines"] = 61
# Uneven strings
project = ProjectManager(new)
project.run_project()
_ = project.progress.energize_points
def test_monthly_expenses():
project = ProjectManager(complete_project)
project.run_project()
_ = project.monthly_expenses
# Still report expenses for "incomplete" project
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
_ = project.monthly_expenses
def test_monthly_revenue():
project = ProjectManager(complete_project)
project.run_project()
_ = project.monthly_revenue
# Can't generate revenue with "incomplete" project
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
with pytest.raises(ValueError):
_ = project.monthly_revenue
def test_cash_flow():
project = ProjectManager(complete_project)
project.run_project()
_ = project.cash_flow
# Can't generate revenue with "incomplete" project but cash flow will still
# be reported
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
cash_flow = project.cash_flow
assert all(v <= 0 for v in cash_flow.values())
def test_npv():
project = ProjectManager(complete_project)
project.run_project()
baseline = project.npv
config = deepcopy(complete_project)
config["ncf"] = 0.35
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["offtake_price"] = 70
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["project_lifetime"] = 30
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["discount_rate"] = 0.03
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["opex_rate"] = 120
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
|
johnjasa/ORBIT | tests/core/test_port.py | <gh_stars>10-100
"""Tests for the `Port` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pytest
from marmot import Environment
from ORBIT.core import Port, Cargo
from ORBIT.core.exceptions import ItemNotFound
class SampleItem(Cargo):
def __init__(self):
pass
def test_port_creation():
env = Environment()
port = Port(env)
item = SampleItem()
port.put(item)
port.put(item)
items = [item for item in port.items if item.type == "SampleItem"]
assert len(items) == 2
def test_get_item():
env = Environment()
port = Port(env)
item = SampleItem()
port.put(item)
port.put(item)
returned = port.get_item("SampleItem")
assert returned == item
assert len(port.items) == 1
port.put({"type": "Not type Cargo"})
with pytest.raises(ItemNotFound):
_ = port.get_item("WrongItem")
_ = port.get_item("SampleItem")
with pytest.raises(ItemNotFound):
_ = port.get_item("SampleItem")
|
johnjasa/ORBIT | ORBIT/core/components.py | <filename>ORBIT/core/components.py
"""Provides the `Crane` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import simpy
from ORBIT.core._defaults import process_times as pt
from ORBIT.core.exceptions import (
ItemNotFound,
CargoMassExceeded,
DeckSpaceExceeded,
InsufficientCable,
)
# TODO: __str__ methods for Components
class Crane:
"""Base Crane Class"""
def __init__(self, crane_specs):
"""
Creates an instance of Crane.
Parameters
----------
crane_specs : dict
Dictionary containing crane system specifications.
"""
self.extract_crane_specs(crane_specs)
def extract_crane_specs(self, crane_specs):
"""
Extracts and defines crane specifications.
Parameters
----------
crane_specs : dict
Dictionary of crane specifications.
"""
# Physical Dimensions
self.boom_length = crane_specs.get("boom_length", None)
self.radius = crane_specs.get("radius", None)
# Operational Parameters
self.max_lift = crane_specs.get("max_lift", None)
self.max_hook_height = crane_specs.get("max_hook_height", None)
self.max_windspeed = crane_specs.get("max_windspeed", 99)
self._crane_rate = crane_specs.get("crane_rate", 100)
def crane_rate(self, **kwargs):
"""Returns `self._crane_rate`."""
return self._crane_rate
@staticmethod
def reequip(**kwargs):
"""
Calculates time taken to change crane equipment.
Parameters
----------
crane_reequip_time : int | float
Time required to change crane equipment (h).
Returns
-------
reequip_time : float
Time required to change crane equipment (h).
"""
_key = "crane_reequip_time"
duration = kwargs.get(_key, pt[_key])
return duration
class JackingSys:
"""Base Jacking System Class"""
def __init__(self, jacksys_specs):
"""
Creates an instance of JackingSys.
Parameters
----------
jacksys_specs : dict
Dictionary containing jacking system specifications.
"""
self.extract_jacksys_specs(jacksys_specs)
def extract_jacksys_specs(self, jacksys_specs):
"""
Extracts and defines jacking system specifications.
Parameters
----------
jacksys_specs : dict
Dictionary containing jacking system specifications.
"""
# Physical Dimensions
self.num_legs = jacksys_specs.get("num_legs", None)
self.leg_length = jacksys_specs.get("leg_length", None)
self.air_gap = jacksys_specs.get("air_gap", None)
self.leg_pen = jacksys_specs.get("leg_pen", None)
# Operational Parameters
self.max_depth = jacksys_specs.get("max_depth", None)
self.max_extension = jacksys_specs.get("max_extension", None)
self.speed_below_depth = jacksys_specs.get("speed_below_depth", None)
self.speed_above_depth = jacksys_specs.get("speed_above_depth", None)
def jacking_time(self, extension, depth):
"""
Calculates jacking time for a given depth.
Parameters
----------
extension : int | float
Height to jack-up to or jack-down from (m).
depth : int | float
Depth at jack-up location (m).
Returns
-------
extension_time : float
Time required to jack-up to given extension (h).
"""
if extension > self.max_extension:
raise Exception(
"{} extension is greater than {} maximum"
"".format(extension, self.max_extension)
)
elif depth > self.max_depth:
raise Exception(
"{} is beyond the operating depth {}"
"".format(depth, self.max_depth)
)
elif depth > extension:
raise Exception("Extension must be greater than depth")
else:
return (
depth / self.speed_below_depth
+ (extension - depth) / self.speed_above_depth
) / 60
class VesselStorage(simpy.FilterStore):
"""Vessel Storage Class"""
required_keys = ["type", "mass", "deck_space"]
def __init__(
self, env, max_cargo, max_deck_space, max_deck_load, **kwargs
):
"""
Creates an instance of VesselStorage.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_cargo : int | float
Maximum mass the storage system can carry (t).
max_deck_space : int | float
Maximum deck space the storage system can use (m2).
max_deck_load : int | float
Maximum deck load that the storage system can apply (t/m2).
"""
capacity = kwargs.get("capacity", float("inf"))
super().__init__(env, capacity)
self.max_cargo_mass = max_cargo
self.max_deck_space = max_deck_space
self.max_deck_load = max_deck_load
@property
def current_cargo_mass(self):
"""Returns current cargo mass in tons."""
return sum([item.mass for item in self.items])
@property
def current_deck_space(self):
"""Returns current deck space used in m2."""
return sum([item.deck_space for item in self.items])
def put_item(self, item):
"""
Checks VesselStorage specific constraints and triggers self.put()
if successful.
Items put into the instance should be a dictionary with the following
attributes:
- name
- mass (t)
- deck_space (m2)
Parameters
----------
item : dict
Dictionary of item properties.
"""
# if any(x not in item.keys() for x in self.required_keys):
# raise ItemPropertyNotDefined(item, self.required_keys)
if self.current_deck_space + item.deck_space > self.max_deck_space:
raise DeckSpaceExceeded(
self.max_deck_space, self.current_deck_space, item
)
if self.current_cargo_mass + item.mass > self.max_cargo_mass:
raise CargoMassExceeded(
self.max_cargo_mass, self.current_cargo_mass, item
)
self.put(item)
def get_item(self, _type):
"""
Checks `self.items` for an item satisfying `item.type = _type`. Returns
item if found, otherwise returns an error.
Parameters
----------
_type : str
Type of item to retrieve.
"""
target = None
for i in self.items:
if i.type == _type:
target = i
break
if not target:
raise ItemNotFound(_type)
else:
res = self.get(lambda x: x == target)
return res.value
def any_remaining(self, _type):
"""
Checks `self.items` for an item satisfying `item.type = _type`. Returns
True/False depending on if an item is found. Used to trigger vessel
release if empty without having to wait for next self.get_item()
iteration.
Parameters
----------
_type : str
Type of item to retrieve.
Returns
-------
resp : bool
Indicates if any items in self.items satisfy `_type`.
"""
target = None
for i in self.items:
if i.type == _type:
target = i
break
if target:
return True
else:
return False
class ScourProtectionStorage(simpy.Container):
"""Scour Protection Storage Class"""
def __init__(self, env, max_mass, **kwargs):
"""
Creates an instance of VesselStorage.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_mass : int | float
Maximum mass the storage system can carry (t).
"""
self.max_mass = max_mass
super().__init__(env, self.max_mass)
@property
def available_capacity(self):
"""Returns available cargo capacity."""
return self.max_mass - self.level
class CableCarousel(simpy.Container):
"""Cable Storage Class"""
def __init__(self, env, max_mass, **kwargs):
"""
Creates an instance of CableCarousel.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_mass : int | float
Maximum mass the storage system can carry (t).
"""
self.cable = None
self.max_mass = max_mass
super().__init__(env)
@property
def available_mass(self):
"""Returns available cargo mass capacity."""
return self.max_mass - self.current_mass
@property
def current_mass(self):
"""Returns current cargo mass"""
try:
mass = self.level * self.cable.linear_density
return mass
except AttributeError:
return 0
def available_length(self, cable):
"""Returns available length capacity based on input linear density."""
return self.available_mass / cable.linear_density
def reset(self):
"""Resets `self.cable` and empties `self.level`."""
if self.level != 0.0:
_ = self.get(self.level)
self.cable = None
def load_cable(self, cable, length=None):
"""
Loads input `cable` type onto `self.level`. If `length` isn't passed,
defaults to maximum amount of cable that can be loaded.
Parameters
----------
cable : Cable | SimpleCable
length : int | float
Raises
------
ValueError
"""
if self.cable and self.cable != cable:
raise AttributeError("Carousel already has a cable type.")
self.cable = cable
if length is None:
# Load maximum amount
length = self.available_length(self.cable)
self.put(length)
else:
# Load length of cable
proposed = length * cable.linear_density
if proposed > self.available_mass:
raise ValueError(
f"Length {length} of {cable} can't be loaded."
)
self.put(length)
def get_cable(self, length):
"""
Retrieves `length` of cable from `self.level`.
Parameters
----------
length : int | float
Length of cable to retrieve.
Raises
------
InsufficientCable
"""
if self.cable is None:
raise AttributeError("Carousel doesn't have any cable.")
if length > self.level:
raise InsufficientCable(self.level, length)
else:
return self.get(length).amount
|
johnjasa/ORBIT | ORBIT/core/_defaults.py | <reponame>johnjasa/ORBIT<gh_stars>0
"""
<NAME>
National Renewable Energy Lab
07/11/2019
This module contains default vessel process times.
"""
process_times = {
# Export Cable Installation
"onshore_construction_time": 48, # hr
"trench_dig_speed": 0.1, # km/hr
"pull_winch_speed": 5, # km/hr
"tow_plow_speed": 5, # km/hr
# Array Cable Installation
# General Cable Installation
"plgr_speed": 1, # km/hr
"cable_load_time": 6, # hr
"cable_prep_time": 1, # hr
"cable_lower_time": 1, # hr
"cable_pull_in_time": 5.5, # hr
"cable_termination_time": 5.5, # hr
"cable_lay_speed": 1, # km/hr
"cable_lay_bury_speed": 0.3, # km/hr
"cable_bury_speed": 0.5, # km/hr
"cable_splice_time": 48, # hr
"cable_raise_time": 0.5, # hr
# Offshore Substation
"topside_fasten_time": 12, # hr
"topside_release_time": 2, # hr
"topside_attach_time": 6, # hr
# Monopiles
"mono_embed_len": 30, # m
"mono_drive_rate": 20, # m/hr
"mono_fasten_time": 12, # hr
"mono_release_time": 3, # hr
"tp_fasten_time": 8, # hr
"tp_release_time": 2, # hr
"tp_bolt_time": 4, # hr
"grout_cure_time": 24, # hr
"grout_pump_time": 2, # hr
# Scour Protection
"drop_rocks_time": 10, # hr
"load_rocks_time": 4, # hr
# Turbines
"tower_section_fasten_time": 4, # hr, applies to all sections
"tower_section_release_time": 3, # hr, applies to all sections
"tower_section_attach_time": 6, # hr, applies to all sections
"nacelle_fasten_time": 4, # hr
"nacelle_release_time": 3, # hr
"nacelle_attach_time": 6, # hr
"blade_fasten_time": 1.5, # hr
"blade_release_time": 1, # hr
"blade_attach_time": 3.5, # hr
# Mooring System
"mooring_system_load_time": 5, # hr
"mooring_site_survey_time": 4, # hr
"suction_pile_install_time": 11, # hr
"drag_embed_install_time": 5, # hr
# Misc.
"site_position_time": 2, # hr
"rov_survey_time": 1, # hr
"crane_reequip_time": 1, # hr
}
|
johnjasa/ORBIT | library/__init__.py | __author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = ["<EMAIL>", "<EMAIL>"]
__status__ = "Development"
|
johnjasa/ORBIT | ORBIT/api/wisdem/fixed.py | """WISDEM Monopile API"""
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import yaml
import openmdao.api as om
from ORBIT import ProjectManager
class Orbit(om.Group):
def setup(self):
# Define all input variables from all models
self.set_input_defaults('wtiv', 'example_wtiv')
self.set_input_defaults('feeder', 'example_feeder')
self.set_input_defaults('oss_install_vessel', 'example_heavy_lift_vessel')
self.set_input_defaults('site_distance_to_landfall', 40.0, units='km')
self.set_input_defaults('interconnection_distance', 40.0, units='km')
self.set_input_defaults('plant_turbine_spacing', 7)
self.set_input_defaults('plant_row_spacing', 7)
self.set_input_defaults('plant_substation_distance', 1, units='km')
self.set_input_defaults('port_cost_per_month', 2e6, units='USD/mo')
self.set_input_defaults('commissioning_pct', 0.01)
self.set_input_defaults('decommissioning_pct', 0.15)
self.set_input_defaults('site_auction_price', 100e6, units='USD')
self.set_input_defaults('site_assessment_plan_cost', 1e6, units='USD')
self.set_input_defaults('site_assessment_cost', 25e6, units='USD')
self.set_input_defaults('construction_operations_plan_cost', 2.5e6, units='USD')
self.set_input_defaults('design_install_plan_cost', 2.5e6, units='USD')
self.add_subsystem('orbit', OrbitWisdemFixed(), promotes=['*'])
class OrbitWisdemFixed(om.ExplicitComponent):
"""ORBIT-WISDEM Fixed Substructure API"""
def setup(self):
""""""
# Inputs
# self.add_discrete_input('weather_file', 'block_island', desc='Weather file to use for installation times.')
# Vessels
self.add_discrete_input('wtiv', 'example_wtiv', desc='Vessel configuration to use for installation of foundations and turbines.')
self.add_discrete_input('feeder', 'future_feeder', desc='Vessel configuration to use for (optional) feeder barges.')
self.add_discrete_input('num_feeders', 0, desc='Number of feeder barges to use for installation of foundations and turbines.')
self.add_discrete_input('oss_install_vessel', 'example_heavy_lift_vessel', desc='Vessel configuration to use for installation of offshore substations.')
# Site
self.add_input('site_depth', 40., units='m', desc='Site depth.')
self.add_input('site_distance', 40., units='km', desc='Distance from site to installation port.')
self.add_input('site_distance_to_landfall', 50., units='km', desc='Distance from site to landfall for export cable.')
self.add_input('interconnection_distance', 3., units='km', desc='Distance from landfall to interconnection.')
self.add_input('site_mean_windspeed', 9., units='m/s', desc='Mean windspeed of the site.')
# Plant
self.add_discrete_input('number_of_turbines', 60, desc='Number of turbines.')
self.add_input('plant_turbine_spacing', 7, desc='Turbine spacing in rotor diameters.')
self.add_input('plant_row_spacing', 7, desc='Row spacing in rotor diameters. Not used in ring layouts.')
self.add_input('plant_substation_distance', 1, units='km', desc='Distance from first turbine in string to substation.')
# Turbine
self.add_input('turbine_rating', 8., units='MW', desc='Rated capacity of a turbine.')
self.add_input('turbine_rated_windspeed', 11., units='m/s', desc='Rated windspeed of the turbine.')
self.add_input('turbine_capex', 1100, units='USD/kW', desc='Turbine CAPEX')
self.add_input('hub_height', 100., units='m', desc='Turbine hub height.')
self.add_input('turbine_rotor_diameter', 130, units='m', desc='Turbine rotor diameter.')
self.add_input('tower_mass', 400., units='t', desc='mass of the total tower.')
self.add_input('tower_length', 100., units='m', desc='Total length of the tower.')
self.add_input('tower_deck_space', 0., units='m**2', desc='Deck space required to transport the tower. Defaults to 0 in order to not be a constraint on installation.')
self.add_input('nacelle_mass', 500., units='t', desc='mass of the rotor nacelle assembly (RNA).')
self.add_input('nacelle_deck_space', 0., units='m**2', desc='Deck space required to transport the rotor nacelle assembly (RNA). Defaults to 0 in order to not be a constraint on installation.')
self.add_discrete_input('number_of_blades', 3, desc='Number of blades per turbine.')
self.add_input('blade_mass', 50., units='t', desc='mass of an individual blade.')
self.add_input('blade_deck_space', 0., units='m**2', desc='Deck space required to transport a blade. Defaults to 0 in order to not be a constraint on installation.')
# Port
self.add_input('port_cost_per_month', 2e6, units='USD/mo', desc='Monthly port costs.')
# Monopile
self.add_input('monopile_length', 100., units='m', desc='Length of monopile.')
self.add_input('monopile_diameter', 7., units='m', desc='Diameter of monopile.')
self.add_input('monopile_mass', 900., units='t', desc='mass of an individual monopile.')
self.add_input('monopile_deck_space', 0., units='m**2', desc='Deck space required to transport a monopile. Defaults to 0 in order to not be a constraint on installation.')
self.add_input('transition_piece_mass', 250., units='t', desc='mass of an individual transition piece.')
self.add_input('transition_piece_deck_space', 0., units='m**2', desc='Deck space required to transport a transition piece. Defaults to 0 in order to not be a constraint on installation.')
# Other
self.add_input('commissioning_pct', 0.01, desc="Commissioning percent.")
self.add_input('decommissioning_pct', 0.15, desc="Decommissioning percent.")
# Outputs
# Totals
self.add_output('bos_capex', 0.0, units='USD', desc='Total BOS CAPEX not including commissioning or decommissioning.')
self.add_output('total_capex', 0.0, units='USD', desc='Total BOS CAPEX including commissioning and decommissioning.')
self.add_output('total_capex_kW', 0.0, units='USD/kW', desc='Total BOS CAPEX including commissioning and decommissioning.')
self.add_output('installation_time', 0.0, units='h', desc='Total balance of system installation time.')
self.add_output('installation_capex', 0.0, units='USD', desc='Total balance of system installation cost.')
def compile_orbit_config_file(self, inputs, outputs, discrete_inputs, discrete_outputs):
""""""
config = {
# Vessels
'wtiv': discrete_inputs['wtiv'],
'feeder': discrete_inputs['feeder'],
'num_feeders': discrete_inputs['num_feeders'],
'spi_vessel': 'example_scour_protection_vessel',
'array_cable_install_vessel': 'example_cable_lay_vessel',
'array_cable_bury_vessel': 'example_cable_lay_vessel',
'export_cable_install_vessel': 'example_cable_lay_vessel',
'export_cable_bury_vessel': 'example_cable_lay_vessel',
# Site/plant
'site': {
'depth': float(inputs['site_depth']),
'distance': float(inputs['site_distance']),
'distance_to_landfall': float(inputs['site_distance_to_landfall']),
'mean_windspeed': float(inputs['site_mean_windspeed'])
},
'landfall': {
'interconnection_distance': float(inputs['interconnection_distance']),
},
'plant': {
'layout': 'grid',
'num_turbines': discrete_inputs['number_of_turbines'],
'row_spacing': float(inputs['plant_row_spacing']),
'turbine_spacing': float(inputs['plant_turbine_spacing']),
'substation_distance': float(inputs['plant_substation_distance'])
},
'port': {
'num_cranes': 1,
'monthly_rate': float(inputs['port_cost_per_month'])
},
# Turbine + components
'turbine': {
'hub_height': float(inputs['hub_height']),
'rotor_diameter': float(inputs['turbine_rotor_diameter']),
'turbine_rating': float(inputs['turbine_rating']),
'rated_windspeed': float(inputs['turbine_rated_windspeed']),
'tower': {
'type': 'Tower',
'deck_space': float(inputs['tower_deck_space']),
'mass': float(inputs['tower_mass']),
'length': float(inputs['tower_length'])
},
'nacelle': {
'type': 'Nacelle',
'deck_space': float(inputs['nacelle_deck_space']),
'mass': float(inputs['nacelle_mass'])
},
'blade': {
'type': 'Blade',
'number': float(discrete_inputs['number_of_blades']),
'deck_space': float(inputs['blade_deck_space']),
'mass': float(inputs['blade_mass'])
}
},
# Substructure components
'monopile': {
'type': 'Monopile',
'length': float(inputs['monopile_length']),
'diameter': float(inputs['monopile_diameter']),
'deck_space': float(inputs['monopile_deck_space']),
'mass': float(inputs['monopile_mass'])
},
'transition_piece': {
'type': 'Transition Piece',
'deck_space': float(inputs['transition_piece_deck_space']),
'mass': float(inputs['transition_piece_mass'])
},
'scour_protection_design': {
'cost_per_tonne': 20,
},
# Electrical
'array_system_design': {
'cables': ['XLPE_400mm_33kV', 'XLPE_630mm_33kV']
},
'export_system_design': {
'cables': 'XLPE_500mm_132kV',
'percent_added_length': .1
},
# Phase Specific
"OffshoreSubstationInstallation": {
"oss_install_vessel": 'example_heavy_lift_vessel',
"feeder": "future_feeder",
"num_feeders": 1
},
# Other
"commissioning": float(inputs["commissioning_pct"]),
"decomissioning": float(inputs["decommissioning_pct"]),
"turbine_capex": float(inputs["turbine_capex"]),
# Phases
'design_phases': [
"MonopileDesign",
"ScourProtectionDesign",
"ArraySystemDesign",
"ExportSystemDesign",
"OffshoreSubstationDesign"
],
'install_phases': [
'MonopileInstallation',
'ScourProtectionInstallation',
'TurbineInstallation',
'ArrayCableInstallation',
'ExportCableInstallation',
"OffshoreSubstationInstallation",
]
}
self._orbit_config = config
return config
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
config = self.compile_orbit_config_file(inputs, outputs,
discrete_inputs,
discrete_outputs)
project = ProjectManager(config)
project.run_project()
outputs['bos_capex'] = project.bos_capex
outputs['total_capex'] = project.total_capex
outputs['total_capex_kW'] = project.total_capex_per_kw
outputs['installation_time'] = project.installation_time
outputs['installation_capex'] = project.installation_capex
if __name__ == "__main__":
prob = om.Problem()
prob.model = OrbitWisdemFixed()
prob.setup()
prob.run_driver()
prob.model.list_inputs()
prob.model.list_outputs()
|
johnjasa/ORBIT | tests/test_design_install_phase_interactions.py | <reponame>johnjasa/ORBIT
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
from ORBIT import ProjectManager
config = {
"wtiv": "test_wtiv",
"site": {"depth": 20, "distance": 20, "mean_windspeed": 9},
"plant": {"num_turbines": 20},
"turbine": {
"hub_height": 130,
"rotor_diameter": 154,
"rated_windspeed": 11,
},
"port": {"num_cranes": 1, "monthly_rate": 2e6},
"monopile": {
"type": "Monopile",
"length": 60,
"diameter": 8,
"deck_space": 0,
"mass": 600,
},
"transition_piece": {
"type": "Transition Piece",
"deck_space": 0,
"mass": 500,
},
"monopile_design": {},
"design_phases": ["MonopileDesign"],
"install_phases": ["MonopileInstallation"],
}
def test_monopile_definition():
test_config = deepcopy(config)
_ = test_config.pop("transition_piece")
project = ProjectManager(test_config)
project.run_project()
for key, value in config["monopile"].items():
if key == "type":
continue
assert project.config["monopile"][key] == value
for key, value in config["transition_piece"].items():
if key == "type":
continue
assert project.config["transition_piece"][key] != value
def test_transition_piece_definition():
test_config = deepcopy(config)
_ = test_config.pop("monopile")
project = ProjectManager(test_config)
project.run_project()
for key, value in config["monopile"].items():
if key == "type":
continue
assert project.config["monopile"][key] != value
for key, value in config["transition_piece"].items():
if key == "type":
continue
assert project.config["transition_piece"][key] == value
def test_mono_and_tp_definition():
test_config = deepcopy(config)
project = ProjectManager(test_config)
project.run_project()
for key, value in config["monopile"].items():
if key == "type":
continue
assert project.config["monopile"][key] == value
for key, value in config["transition_piece"].items():
if key == "type":
continue
assert project.config["transition_piece"][key] == value
|
johnjasa/ORBIT | docs/conf.py | """
<NAME>
National Renewable Energy Lab
09/13/2019
Configuration file for the Sphinx documentation builder.
"""
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import ORBIT
# -- Project information -----------------------------------------------------
project = "ORBIT"
copyright = "2020, National Renewable Energy Lab"
author = "<NAME>, <NAME>, <NAME>"
release = ORBIT.__version__
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.autosectionlabel",
]
master_doc = "contents"
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
html_theme_options = {"display_version": True, "body_max_width": "70%"}
# Napoleon options
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
|
johnjasa/ORBIT | tests/phases/install/test_install_phase.py | """Tests for the `InstallPhase` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pandas as pd
import pytest
from marmot import Environment
from ORBIT.phases.install import InstallPhase
class BadInstallPhase(InstallPhase):
"""Subclass for testing InstallPhase."""
def __init__(self, config, **kwargs):
"""Creates an instance of BadInstallPhase."""
self.config = config
self.env = Environment()
class SampleInstallPhase(InstallPhase):
"""Subclass for testing InstallPhase."""
phase = "SampleInstallPhase"
def __init__(self, config, **kwargs):
"""Creates an instance of SampleInstallPhase."""
self.config = config
self.env = Environment()
def detailed_output(self):
pass
def setup_simulation(self):
pass
base_config = {"port": {"num_cranes": 1, "name": "TEST_PORT"}}
def test_abstract_methods():
with pytest.raises(TypeError):
install = BadInstallPhase(base_config)
install = SampleInstallPhase(base_config)
def test_run():
sim = SampleInstallPhase(base_config)
sim.run(until=10)
assert sim.env.now == 10
|
johnjasa/ORBIT | tests/phases/install/monopile_install/test_monopile_tasks.py | """
Testing framework for common monopile installation tasks.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pytest
from ORBIT.core.exceptions import MissingComponent
from ORBIT.phases.install.monopile_install.common import (
drive_monopile,
lower_monopile,
upend_monopile,
bolt_transition_piece,
lower_transition_piece,
cure_transition_piece_grout,
pump_transition_piece_grout,
)
@pytest.mark.parametrize(
"task, log, args",
[
(upend_monopile, "Upend Monopile", [100]),
(lower_monopile, "Lower Monopile", []),
(drive_monopile, "Drive Monopile", []),
(lower_transition_piece, "Lower TP", []),
(bolt_transition_piece, "Bolt TP", []),
(pump_transition_piece_grout, "Pump TP Grout", []),
(cure_transition_piece_grout, "Cure TP Grout", []),
],
)
def test_task(env, wtiv, task, log, args):
env.register(wtiv)
wtiv.initialize(mobilize=False)
task(wtiv, *args, site_depth=10)
env.run()
actions = [a["action"] for a in env.actions]
assert log in actions
@pytest.mark.parametrize(
"task, log, args",
[
(upend_monopile, "Upend Monopile", [100]),
(lower_monopile, "Lower Monopile", []),
(drive_monopile, "Drive Monopile", []),
(lower_transition_piece, "Lower TP", []),
],
)
def test_task_fails(env, feeder, task, log, args):
env.register(feeder)
feeder.initialize(mobilize=False)
with pytest.raises(MissingComponent):
task(feeder, *args, site_depth=10)
env.run()
actions = [a["action"] for a in env.actions]
assert log not in actions
|
johnjasa/ORBIT | ORBIT/phases/install/cable_install/common.py | """Common processes and cargo types for Cable Installations."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from marmot import process
from ORBIT.core.logic import position_onsite
from ORBIT.core._defaults import process_times as pt
class SimpleCable:
"""Simple Cable Class"""
def __init__(self, linear_density):
"""
Creates an instance of SimpleCable.
Parameters
----------
linear_density : int | float
"""
self.linear_density = linear_density
@process
def load_cable_on_vessel(vessel, cable, constraints={}, **kwargs):
"""
Subprocess for loading `cable` onto the configured `vessel`.
Parameters
----------
vessel : Vessel
Performing vessel. Required to have configured `cable_storage`.
cable : SimpleCable | Cable
Cable type.
constraints : dict
Constraints to be applied to cable loading subprocess.
"""
key = "cable_load_time"
load_time = kwargs.get(key, pt[key])
vessel.cable_storage.load_cable(cable)
yield vessel.task(
"Load Cable", load_time, constraints=constraints, **kwargs
)
@process
def landfall_tasks(vessel, trench_length, **kwargs):
"""
List of tasks that must be completed at landfall at the beginning of the
export system installation process.
Parameters
----------
vessel : Vessel
Performing vessel. Required to have configured `cable_storage`.
trench_length : int | float
Length of trench that is dug through the beach (km).
"""
yield tow_plow(vessel, trench_length, **kwargs)
yield pull_winch(vessel, trench_length, **kwargs)
yield prep_cable(vessel, **kwargs)
yield pull_in_cable(vessel, **kwargs)
yield terminate_cable(vessel, **kwargs)
yield lower_cable(vessel, **kwargs)
@process
def prep_cable(vessel, **kwargs):
"""
Task representing time required to prepare cable for pull-in.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `transit_limits`.
"""
key = "cable_prep_time"
prep_time = kwargs.get(key, pt[key])
yield vessel.task(
"Prepare Cable", prep_time, constraints=vessel.transit_limits, **kwargs
)
@process
def lower_cable(vessel, **kwargs):
"""
Task representing time required to lower cable to seafloor.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
"""
key = "cable_lower_time"
lower_time = kwargs.get(key, pt[key])
yield vessel.task(
"Lower Cable",
lower_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def pull_in_cable(vessel, **kwargs):
"""
Task representing time required to pull cable into offshore substructure or
at onshore trench.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
"""
key = "cable_pull_in_time"
pull_in_time = kwargs.get(key, pt[key])
yield vessel.task(
"Pull In Cable",
pull_in_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def terminate_cable(vessel, **kwargs):
"""
Task representing time required to terminate and test cable connection.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
"""
key = "cable_termination_time"
termination_time = kwargs.get(key, pt[key])
yield vessel.task(
"Terminate Cable",
termination_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def lay_bury_cable(vessel, distance, **kwargs):
"""
Task representing time required to lay and bury a cable section.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
distance : int | float
Distance of cable section (km)
cable_lay_bury_speed : int | float
Maximum speed at which cable is dispensed (km/hr)
"""
kwargs = {**kwargs, **getattr(vessel, "_transport_specs", {})}
key = "cable_lay_bury_speed"
lay_bury_speed = kwargs.get(key, pt[key])
lay_bury_time = distance / lay_bury_speed
yield vessel.task(
"Lay/Bury Cable",
lay_bury_time,
constraints=vessel.operational_limits,
suspendable=True,
**kwargs,
)
@process
def lay_cable(vessel, distance, **kwargs):
"""
Task representing time required to lay a cable section.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
distance : int | float
Distance of cable section (km).
cable_lay_speed : int | float
Maximum speed at which cable is dispensed (km/hr)
"""
kwargs = {**kwargs, **getattr(vessel, "_transport_specs", {})}
key = "cable_lay_speed"
lay_speed = kwargs.get(key, pt[key])
lay_time = distance / lay_speed
yield vessel.task(
"Lay Cable",
lay_time,
constraints=vessel.operational_limits,
suspendable=True,
**kwargs,
)
@process
def bury_cable(vessel, distance, **kwargs):
"""
Task representing time required to bury a cable section.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
distance : int | float
Distance of cable section (km).
cable_bury_speed : int | float
Maximum speed at which cable is buried (km/hr).
"""
kwargs = {**kwargs, **getattr(vessel, "_transport_specs", {})}
key = "cable_bury_speed"
bury_speed = kwargs.get(key, pt[key])
bury_time = distance / bury_speed
yield vessel.task(
"Bury Cable",
bury_time,
constraints=vessel.operational_limits,
suspendable=True,
**kwargs,
)
@process
def splice_cable(vessel, **kwargs):
"""
Task representing time required to splice a cable at sea.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
cable_splice_time : int | float
Time required to splice two cable ends together (h).
"""
key = "cable_splice_time"
splice_time = kwargs.get(key, pt[key])
yield vessel.task(
"Splice Cable",
splice_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def raise_cable(vessel, **kwargs):
"""
Task representing time required to raise the unspliced cable from the
seafloor.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
cable_raise_time : int | float
Time required to raise the cable from the seafloor (h).
"""
key = "cable_raise_time"
raise_time = kwargs.get(key, pt[key])
yield vessel.task(
"Raise Cable",
raise_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def splice_process(vessel, **kwargs):
"""
A list of tasks representing the entire cable splicing process.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
"""
yield position_onsite(vessel)
yield raise_cable(vessel, **kwargs)
yield splice_cable(vessel, **kwargs)
yield lower_cable(vessel, **kwargs)
@process
def tow_plow(vessel, distance, **kwargs):
"""
Task representing time required to tow plow at landfall site.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
distance : int | float
Distance to between cable laying vessel and onshore construction (km).
tow_plow_speed : float
Towing speed (km/h).
"""
key = "tow_plow_speed"
plow_speed = kwargs.get(key, pt[key])
plow_time = distance / plow_speed
yield vessel.task(
"Tow Plow", plow_time, constraints=vessel.operational_limits, **kwargs
)
@process
def pull_winch(vessel, distance, **kwargs):
"""
Task representing time required to pull cable onshore through the
previously dug trench.
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operation_limits`.
distance: int | float
Distance winch wire must travel to reach the plow.
pull_winch_speed : int | float
Speed at wich the winch travels (km/h).
"""
key = "pull_winch_speed"
winch_speed = kwargs.get(key, pt[key])
winch_time = distance / winch_speed
yield vessel.task(
"Pull Winch",
winch_time,
constraints=vessel.operational_limits,
**kwargs,
)
@process
def dig_trench(vessel, distance, **kwargs):
"""
Task representing time required to dig a trench prior to cable lay and burial
Parameters
----------
vessel : Vessel
Performing vessel. Requires configured `operational_limits`.
distance : int | float
Length of trench, equal to length of cable section (km).
trench_dig_speed : int | float
Speed at which trench is dug (km/hr).
"""
kwargs = {**kwargs, **getattr(vessel, "_transport_specs", {})}
key = "trench_dig_speed"
trench_dig_speed = kwargs.get(key, pt[key])
trench_dig_time = distance / trench_dig_speed
yield vessel.task(
"Dig Trench",
trench_dig_time,
constraints=vessel.operational_limits,
suspendable=True,
**kwargs,
)
|
johnjasa/ORBIT | tests/phases/design/test_project_development.py | """Tests for the `ProjectDevelopment` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import os
from copy import deepcopy
import pytest
from ORBIT.library import extract_library_specs
from ORBIT.phases.design import ProjectDevelopment
base = {
"project_development": {
"site_auction_cost": 100e6, # USD
"site_auction_duration": 0, # hrs
"site_assessment_plan_cost": 0.5e6, # USD
"site_assessment_plan_duration": 8760, # hrs
"site_assessment_cost": 50e6, # USD
"site_assessment_duration": 43800, # hrs
"construction_operations_plan_cost": 1e6, # USD
"construction_operations_plan_duration": 43800, # hrs
"boem_review_cost": 0, # No cost to developer
"boem_review_duration": 8760, # hrs
"design_install_plan_cost": 0.25e6, # USD
"design_install_plan_duration": 8760, # hrs
}
}
def test_run():
dev = ProjectDevelopment(base)
dev.run()
def test_defaults():
for k, _ in base["project_development"].items():
_config = deepcopy(base)
_config["project_development"].pop(k)
dev = ProjectDevelopment(_config, library_path=pytest.library)
dev.run()
defaults = extract_library_specs("defaults", "project")
_split = k.split("_")
n = "_".join(_split[:-1])
t = _split[-1]
val = dev._outputs[n][t]
assert val == defaults[k]
|
johnjasa/ORBIT | tests/phases/design/test_monopile_design.py | """Tests for the `MonopileDesign` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
from itertools import product
import pytest
from ORBIT.phases.design import MonopileDesign
base = {
"site": {"depth": 50, "mean_windspeed": 9},
"plant": {"num_turbines": 20},
"turbine": {
"rotor_diameter": 150,
"hub_height": 110,
"rated_windspeed": 11,
},
}
turbines = [
{"rotor_diameter": 155, "hub_height": 100, "rated_windspeed": 12},
{"rotor_diameter": 180, "hub_height": 112, "rated_windspeed": 12},
{"rotor_diameter": 205, "hub_height": 125, "rated_windspeed": 12},
{"rotor_diameter": 222, "hub_height": 136, "rated_windspeed": 12},
{"rotor_diameter": 248, "hub_height": 149, "rated_windspeed": 12},
]
@pytest.mark.parametrize(
"depth,mean_ws,turbine",
product(range(10, 51, 10), range(8, 13, 1), turbines),
)
def test_paramater_sweep(depth, mean_ws, turbine):
config = {
"site": {"depth": depth, "mean_windspeed": mean_ws},
"plant": {"num_turbines": 20},
"turbine": turbine,
}
m = MonopileDesign(config)
m.run()
# Check valid monopile length
assert 10 < m._outputs["monopile"]["length"] < 130
# Check valid monopile diameter
assert 4 < m._outputs["monopile"]["diameter"] < 13
# Check valid monopile mass
assert 200 < m._outputs["monopile"]["mass"] < 2500
# Check valid transition piece diameter
assert 4 < m._outputs["transition_piece"]["diameter"] < 14
def test_monopile_kwargs():
test_kwargs = {
"yield_stress": 400000000,
"load_factor": 1.25,
"material_factor": 1.2,
"monopile_density": 9000,
"monopile_modulus": 220e9,
"soil_coefficient": 4500000,
"air_density": 1.125,
"weibull_scale_factor": 10,
"weibull_shape_factor": 1.87,
"turb_length_scale": 350,
}
m = MonopileDesign(base)
m.run()
base_results = m._outputs["monopile"]
for k, v in test_kwargs.items():
config = deepcopy(base)
config["monopile_design"] = {}
config["monopile_design"][k] = v
m = MonopileDesign(config)
m.run()
results = m._outputs["monopile"]
assert results != base_results
def test_transition_piece_kwargs():
test_kwargs = {
# Transition piece specific
"monopile_tp_connection_thickness": 0.005,
"transition_piece_density": 8200,
"transition_piece_thickness": 0.12,
"transition_piece_length": 30,
}
m = MonopileDesign(base)
m.run()
base_results = m._outputs["transition_piece"]
for k, v in test_kwargs.items():
config = deepcopy(base)
config["monopile_design"] = {}
config["monopile_design"][k] = v
m = MonopileDesign(config)
m.run()
results = m._outputs["transition_piece"]
assert results != base_results
|
johnjasa/ORBIT | ORBIT/phases/install/scour_protection_install/standard.py | """`ScourProtectionInstallation` and related processes."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from math import ceil
import simpy
from marmot import process
from ORBIT.core import Vessel
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import InstallPhase
from ORBIT.core.exceptions import CargoMassExceeded, InsufficientAmount
class ScourProtectionInstallation(InstallPhase):
"""Scour protection installation simulation using a single vessel."""
#:
expected_config = {
"spi_vessel": "dict | str",
"site": {"distance": "int"},
"plant": {
"num_turbines": "int",
"turbine_spacing": "rotor diameters",
"turbine_distance": "km (optional)",
},
"turbine": {"rotor_diameter": "m"},
"port": {
"monthly_rate": "USD/mo (optional)",
"name": "str (optional)",
},
"scour_protection": {"tons_per_substructure": "float"},
}
phase = "Scour Protection Installation"
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of `ScourProtectionInstallation`.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.ndarray
Weather profile at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.extract_defaults()
self.setup_simulation(**kwargs)
def setup_simulation(self, **kwargs):
"""
Sets up the required simulation infrastructure:
- creates a port
- initializes a scour protection installation vessel
- initializes vessel storage
"""
self.initialize_port()
self.initialize_spi_vessel()
self.num_turbines = self.config["plant"]["num_turbines"]
site_distance = self.config["site"]["distance"]
rotor_diameter = self.config["turbine"]["rotor_diameter"]
turbine_distance = self.config["plant"].get("turbine_distance", None)
if turbine_distance is None:
turbine_distance = (
rotor_diameter
* self.config["plant"]["turbine_spacing"]
/ 1000.0
)
self.tons_per_substructure = ceil(
self.config["scour_protection"]["tons_per_substructure"]
)
install_scour_protection(
self.spi_vessel,
port=self.port,
site_distance=site_distance,
turbines=self.num_turbines,
turbine_distance=turbine_distance,
tons_per_substructure=self.tons_per_substructure,
**kwargs,
)
def initialize_port(self):
"""
Initializes a Port object with a simpy.Container of scour protection
material.
"""
self.port = simpy.Container(self.env)
def initialize_spi_vessel(self):
"""
Creates the scouring protection isntallation (SPI) vessel.
"""
spi_specs = self.config["spi_vessel"]
name = spi_specs.get("name", "SPI Vessel")
spi_vessel = Vessel(name, spi_specs)
self.env.register(spi_vessel)
spi_vessel.initialize()
spi_vessel.at_port = True
spi_vessel.at_site = False
self.spi_vessel = spi_vessel
@property
def detailed_output(self):
"""Detailed outputs of the scour protection installation."""
outputs = {self.phase: {**self.agent_efficiencies}}
return outputs
@process
def install_scour_protection(
vessel,
port,
site_distance,
turbines,
turbine_distance,
tons_per_substructure,
**kwargs,
):
"""
Installs the scour protection. Processes the traveling between site
and turbines for when there are enough rocks leftover from a previous
installation as well as the mass of rocks available.
Parameters
----------
port : simpy.FilterStore
Port simulation object.
port_to_site_distance : int | float
Distance (km) between site and the port.
turbine_to_turbine_distance : int | float
Distance between any two turbines.
For now this assumes it traverses an edge and not a diagonal.
turbines_to_install : int
Number of turbines where scouring protection must be installed.
tons_per_substructure : int
Number of tons required to be installed at each substation
"""
while turbines > 0:
if vessel.at_port:
# Load scour protection material
yield load_material(
vessel, vessel.rock_storage.available_capacity, **kwargs
)
# Transit to site
vessel.at_port = False
yield vessel.transit(site_distance)
vessel.at_site = True
elif vessel.at_site:
if vessel.rock_storage.level >= tons_per_substructure:
# Drop scour protection material
yield drop_material(vessel, tons_per_substructure, **kwargs)
turbines -= 1
# Transit to another turbine
if (
vessel.rock_storage.level >= tons_per_substructure
and turbines > 0
):
yield vessel.transit(turbine_distance)
else:
# Transit back to port
vessel.at_site = False
yield vessel.transit(site_distance)
vessel.at_port = True
else:
# Transit back to port
vessel.at_site = False
yield vessel.transit(site_distance)
vessel.at_port = True
else:
raise Exception("Vessel is lost at sea.")
vessel.submit_debug_log(message="Scour Protection Installation Complete!")
@process
def load_material(vessel, mass, **kwargs):
"""
A wrapper for simpy.Container.put that checks VesselStorageContainer
constraints and triggers self.put() if successful.
Items put into the instance should be a dictionary with the following
attributes:
- name
- mass (t)
- length (km)
Parameters
----------
item : dict
Dictionary of item properties.
"""
if vessel.rock_storage.level + mass > vessel.rock_storage.max_mass:
raise CargoMassExceeded(
vessel.rock_storage.max_mass,
vessel.rock_storage.level,
"Scour Protection",
)
key = "load_rocks_time"
load_time = kwargs.get(key, pt[key])
vessel.rock_storage.put(mass)
yield vessel.task(
"Load SP Material",
load_time,
constraints=vessel.transit_limits,
**kwargs,
)
@process
def drop_material(vessel, mass, **kwargs):
"""
Checks if there is enough of item, otherwise returns an error.
Parameters
----------
item_type : str
Short, descriptive name of the item being accessed.
item_amount : int or float
Amount of the item to be loaded into storage.
"""
if vessel.rock_storage.level < mass:
raise InsufficientAmount(
vessel.rock_storage.level, "Scour Protection", mass
)
key = "drop_rocks_time"
drop_time = kwargs.get(key, pt[key])
_ = vessel.rock_storage.get(mass)
yield vessel.task(
"Drop SP Material",
drop_time,
constraints=vessel.transit_limits,
**kwargs,
)
|
johnjasa/ORBIT | tests/phases/install/mooring_install/test_mooring_install.py | """
Testing framework for the `MooringSystemInstallation` class.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from tests.data import test_weather
from ORBIT.library import extract_library_specs
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import MooringSystemInstallation
config = extract_library_specs("config", "mooring_system_install")
def test_simulation_creation():
sim = MooringSystemInstallation(config)
assert sim.config == config
assert sim.env
assert sim.port
assert sim.vessel
assert sim.number_systems
@pytest.mark.parametrize(
"weather", (None, test_weather), ids=["no_weather", "test_weather"]
)
def test_full_run_logging(weather):
sim = MooringSystemInstallation(config, weather=weather)
sim.run()
lines = (
config["plant"]["num_turbines"] * config["mooring_system"]["num_lines"]
)
df = pd.DataFrame(sim.env.actions)
df = df.assign(shift=(df.time - df.time.shift(1)))
assert (df.duration - df["shift"]).fillna(0.0).abs().max() < 1e-9
assert df[df.action == "Install Mooring Line"].shape[0] == lines
assert ~df["cost"].isnull().any()
_ = sim.agent_efficiencies
_ = sim.detailed_output
@pytest.mark.parametrize(
"anchor, key",
[
("Suction Pile", "suction_pile_install_time"),
("Drag Embedment", "drag_embed_install_time"),
],
)
def test_kwargs(anchor, key):
new = deepcopy(config)
new["mooring_system"]["anchor_type"] = anchor
sim = MooringSystemInstallation(new)
sim.run()
baseline = sim.total_phase_time
keywords = ["mooring_system_load_time", "mooring_site_survey_time", key]
failed = []
for kw in keywords:
default = pt[kw]
kwargs = {kw: default + 2}
new_sim = MooringSystemInstallation(new, **kwargs)
new_sim.run()
new_time = new_sim.total_phase_time
if new_time > baseline:
pass
else:
failed.append(kw)
if failed:
raise Exception(f"'{failed}' not affecting results.")
else:
assert True
|
johnjasa/ORBIT | tests/core/test_environment.py | <gh_stars>10-100
"""Tests for the `Vessel` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pandas as pd
import pytest
from marmot import le
from ORBIT.core import Environment
from tests.data import test_weather as _weather
# Weather data with 'windspeed' column
simple_weather = _weather.copy()
# Weather data with windspeed heights and complex varied column name precision
weather = pd.DataFrame(_weather)
weather["windspeed_10m"] = weather["windspeed"].copy()
weather = weather.drop("windspeed", axis=1)
weather["windspeed_100.0m"] = weather["windspeed_10m"].copy() * 1.3
weather["windspeed_1.2m"] = weather["windspeed_10m"].copy() * 0.7
weather = weather.to_records()
def test_simple_inputs():
env = Environment(state=simple_weather)
assert "waveheight" in env.state.dtype.names
assert "windspeed" in env.state.dtype.names
def test_inputs():
env = Environment(state=weather)
assert "waveheight" in env.state.dtype.names
assert "windspeed" not in env.state.dtype.names
assert "windspeed_10m" in env.state.dtype.names
assert "windspeed_100m" in env.state.dtype.names
assert "windspeed_1.2m" in env.state.dtype.names
env2 = Environment(state=weather, ws_alpha=0.12, ws_default_height=20)
assert env2.alpha == 0.12
assert env2.default_height == 20
def test_simple_constraint_application():
env = Environment(state=simple_weather)
constraints = {"windspeed": le(10)}
valid = env._find_valid_constraints(**constraints)
assert valid == constraints
with_height = {"windspeed_100m": le(10)}
valid = env._find_valid_constraints(**with_height)
assert "windspeed" in list(valid.keys())
with_mult_heights = {"windspeed_100m": le(10), "windspeed_10m": le(8)}
with pytest.raises(ValueError):
valid = env._find_valid_constraints(**with_mult_heights)
def test_constraint_application():
env = Environment(state=weather)
constraints = {"waveheight": le(2), "windspeed": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "windspeed_10m" in list(valid.keys())
constraints = {"waveheight": le(2), "windspeed_10m": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "waveheight" in list(valid.keys())
assert "windspeed_10m" in list(valid.keys())
assert "windspeed_20m" not in env.state.dtype.names
constraints = {"waveheight": le(2), "windspeed_20m": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "windspeed_20m" in list(valid.keys())
assert "windspeed_20m" in env.state.dtype.names
assert "windspeed_120m" not in env.state.dtype.names
constraints = {"waveheight": le(2), "windspeed_120m": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "windspeed_120m" in list(valid.keys())
assert "windspeed_120m" in env.state.dtype.names
def test_interp():
env = Environment(state=weather)
assert "windspeed_20m" not in env.state.dtype.names
constraints = {"waveheight": le(2), "windspeed_20m": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "windspeed_20m" in env.state.dtype.names
assert (env.state["windspeed_10m"] < env.state["windspeed_20m"]).all()
assert (env.state["windspeed_20m"] < env.state["windspeed_100m"]).all()
def test_extrap():
env = Environment(state=weather)
assert "windspeed_120m" not in env.state.dtype.names
constraints = {"waveheight": le(2), "windspeed_120m": le(10)}
valid = env._find_valid_constraints(**constraints)
assert "windspeed_120m" in env.state.dtype.names
assert (env.state["windspeed_120m"] > env.state["windspeed_100m"]).all()
env2 = Environment(state=weather, ws_alpha=0.12)
assert "windspeed_120m" not in env2.state.dtype.names
constraints = {"waveheight": le(2), "windspeed_120m": le(10)}
valid = env2._find_valid_constraints(**constraints)
assert (env.state["windspeed_100m"] == env2.state["windspeed_100m"]).all()
assert (env.state["windspeed_120m"] < env2.state["windspeed_120m"]).all()
|
johnjasa/ORBIT | ORBIT/phases/install/__init__.py | """The install package contains `InstallPhase` and its subclasses."""
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>" "<EMAIL>"]
from .install_phase import InstallPhase # isort:skip
from .oss_install import OffshoreSubstationInstallation
from .cable_install import ArrayCableInstallation, ExportCableInstallation
from .mooring_install import MooringSystemInstallation
from .turbine_install import TurbineInstallation
from .monopile_install import MonopileInstallation
from .quayside_assembly_tow import (
MooredSubInstallation,
GravityBasedInstallation,
)
from .scour_protection_install import ScourProtectionInstallation
|
johnjasa/ORBIT | tests/phases/design/test_export_system_design.py | <gh_stars>0
"""Tests for the `ExportSystemDesign` class."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pytest
from ORBIT.library import extract_library_specs
from ORBIT.phases.design import ExportSystemDesign
config = extract_library_specs("config", "export_design")
def test_export_system_creation():
export = ExportSystemDesign(config)
export.run()
assert export.num_cables
assert export.length
assert export.mass
assert export.cable
assert export.total_length
assert export.total_mass
def test_number_cables():
export = ExportSystemDesign(config)
export.run()
assert export.num_cables == 11
def test_cable_length():
export = ExportSystemDesign(config)
export.run()
length = (0.02 + 3 + 30) * 1.01
assert export.length == length
def test_cable_mass():
export = ExportSystemDesign(config)
export.run()
length = (0.02 + 3 + 30) * 1.01
mass = length * export.cable.linear_density
assert export.mass == mass
def test_total_cable():
export = ExportSystemDesign(config)
export.run()
length = 0.02 + 3 + 30
length += length * 0.01
mass = length * export.cable.linear_density
assert export.total_mass == pytest.approx(mass * 11, abs=1e-10)
assert export.total_length == pytest.approx(length * 11, abs=1e-10)
def test_cables_property():
export = ExportSystemDesign(config)
export.run()
assert (
export.sections_cables == export.cable.name
).sum() == export.num_cables
def test_cable_lengths_property():
export = ExportSystemDesign(config)
export.run()
cable_name = export.cable.name
assert (
export.cable_lengths_by_type[cable_name] == export.length
).sum() == export.num_cables
def test_total_cable_len_property():
export = ExportSystemDesign(config)
export.run()
cable_name = export.cable.name
assert export.total_cable_length_by_type[cable_name] == pytest.approx(
export.total_length, abs=1e-10
)
def test_design_result():
export = ExportSystemDesign(config)
export.run()
_ = export.cable.name
cables = export.design_result["export_system"]["cable"]
assert cables["sections"] == [export.length]
assert cables["number"] == 11
assert cables["linear_density"] == export.cable.linear_density
def test_floating_length_calculations():
base = deepcopy(config)
base["site"]["depth"] = 250
base["export_system_design"]["touchdown_distance"] = 0
sim = ExportSystemDesign(base)
sim.run()
base_length = sim.total_length
with_cat = deepcopy(config)
with_cat["site"]["depth"] = 250
new = ExportSystemDesign(with_cat)
new.run()
assert new.total_length < base_length
|
johnjasa/ORBIT | ORBIT/phases/design/design_phase.py | """Provides the base `DesignPhase` class."""
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from abc import abstractmethod
from ORBIT.phases import BasePhase
class DesignPhase(BasePhase):
"""BasePhase subclass for design modules."""
expected_config = None
output_config = None
@property
@abstractmethod
def design_result(self):
"""
Returns result of DesignPhase to be passed into config and consumed by
InstallPhase.
Returns
-------
dict
Dictionary of design results.
"""
return {}
|
johnjasa/ORBIT | tests/conftest.py | """Shared pytest settings and fixtures."""
import os
import pytest
from marmot import Environment
from ORBIT.core import Vessel
from tests.data import test_weather
from ORBIT.library import initialize_library, extract_library_specs
from ORBIT.phases.install.cable_install import SimpleCable
def pytest_configure():
"""Creates the default library for pytest testing suite and initializes it
when required.
"""
test_dir = os.path.split(os.path.abspath(__file__))[0]
pytest.library = os.path.join(test_dir, "data", "library")
initialize_library(pytest.library)
@pytest.fixture()
def env():
return Environment("Test Environment", state=test_weather)
@pytest.fixture()
def wtiv():
specs = extract_library_specs("wtiv", "test_wtiv")
return Vessel("Test WTIV", specs)
@pytest.fixture()
def feeder():
specs = extract_library_specs("feeder", "test_feeder")
return Vessel("Test Feeder", specs)
@pytest.fixture()
def cable_vessel():
specs = extract_library_specs(
"array_cable_install_vessel", "test_cable_lay_vessel"
)
return Vessel("Test Cable Vessel", specs)
@pytest.fixture()
def heavy_lift():
specs = extract_library_specs(
"oss_install_vessel", "test_heavy_lift_vessel"
)
return Vessel("Test Heavy Vessel", specs)
@pytest.fixture()
def spi_vessel():
specs = extract_library_specs("spi_vessel", "test_scour_protection_vessel")
return Vessel("Test SPI Vessel", specs)
@pytest.fixture()
def simple_cable():
return SimpleCable(linear_density=50.0)
|
johnjasa/ORBIT | ORBIT/phases/install/quayside_assembly_tow/__init__.py | """Quayside assembly and tow-out modules."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from .moored import MooredSubInstallation
from .gravity_base import GravityBasedInstallation
|
johnjasa/ORBIT | tests/phases/design/test_spar_design.py | <filename>tests/phases/design/test_spar_design.py
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
from itertools import product
import pytest
from ORBIT.phases.design import SparDesign
base = {
"site": {"depth": 500},
"plant": {"num_turbines": 50},
"turbine": {"turbine_rating": 12},
"spar_design": {},
}
@pytest.mark.parametrize(
"depth,turbine_rating", product(range(100, 1201, 200), range(3, 15, 1))
)
def test_parameter_sweep(depth, turbine_rating):
config = {
"site": {"depth": depth},
"plant": {"num_turbines": 50},
"turbine": {"turbine_rating": turbine_rating},
"substation_design": {},
}
s = SparDesign(config)
s.run()
assert s.detailed_output["stiffened_column_mass"] > 0
assert s.detailed_output["tapered_column_mass"] > 0
assert s.detailed_output["ballast_mass"] > 0
assert s.detailed_output["secondary_steel_mass"] > 0
def test_design_kwargs():
test_kwargs = {
"stiffened_column_CR": 3000,
"tapered_column_CR": 4000,
"ballast_material_CR": 200,
"secondary_steel_CR": 7000,
}
s = SparDesign(base)
s.run()
base_cost = s.total_phase_cost
for k, v in test_kwargs.items():
config = deepcopy(base)
config["spar_design"] = {}
config["spar_design"][k] = v
s = SparDesign(config)
s.run()
cost = s.total_phase_cost
assert cost != base_cost
|
johnjasa/ORBIT | ORBIT/phases/install/oss_install/standard.py | """`OffshoreSubstationInstallation` and related processes."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import simpy
from marmot import process
from ORBIT.core import Vessel
from ORBIT.core.logic import shuttle_items_to_queue, prep_for_site_operations
from ORBIT.phases.install import InstallPhase
from ORBIT.phases.install.monopile_install.common import (
Monopile,
upend_monopile,
install_monopile,
)
from .common import Topside, install_topside
class OffshoreSubstationInstallation(InstallPhase):
"""
Offshore Substation (OSS) installation process using a single heavy lift
vessel and feeder barge.
"""
phase = "Offshore Substation Installation"
#:
expected_config = {
"num_substations": "int",
"oss_install_vessel": "dict | str",
"num_feeders": "int",
"feeder": "dict | str",
"site": {"distance": "km", "depth": "m"},
"port": {
"num_cranes": "int (optional, default: 1)",
"monthly_rate": "USD/mo (optional)",
"name": "str (optional)",
},
"offshore_substation_topside": {"deck_space": "m2", "mass": "t"},
"offshore_substation_substructure": {
"type": "Monopile",
"deck_space": "m2",
"mass": "t",
"length": "m",
},
}
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of OffshoreSubstationInstallation.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.ndarray
Weather profile at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.extract_defaults()
self.initialize_port()
self.setup_simulation(**kwargs)
def setup_simulation(self, **kwargs):
"""
Initializes required objects for simulation.
- Creates port + crane
- Creates monopile and topside
- Creates heavy lift vessel and feeder
"""
self.initialize_topsides_and_substructures()
self.initialize_oss_install_vessel()
self.initialize_feeders()
self.initialize_queue()
site_distance = self.config["site"]["distance"]
site_depth = self.config["site"]["depth"]
num_subsations = self.config["num_substations"]
install_oss_from_queue(
self.oss_vessel,
queue=self.active_feeder,
site_depth=site_depth,
distance=site_distance,
substations=num_subsations,
**kwargs,
)
component_list = ["Monopile", "Topside"]
for feeder in self.feeders:
shuttle_items_to_queue(
feeder,
port=self.port,
queue=self.active_feeder,
distance=site_distance,
items=component_list,
**kwargs,
)
def initialize_topsides_and_substructures(self):
"""
Creates offshore substation objects at port.
"""
top = Topside(**self.config["offshore_substation_topside"])
sub = Monopile(**self.config["offshore_substation_substructure"])
self.num_substations = self.config["num_substations"]
for _ in range(self.num_substations):
self.port.put(sub)
self.port.put(top)
def initialize_oss_install_vessel(self):
"""
Creates the offshore substation installation vessel object.
"""
oss_vessel_specs = self.config.get("oss_install_vessel", None)
name = oss_vessel_specs.get("name", "Heavy Lift Vessel")
oss_vessel = Vessel(name, oss_vessel_specs)
self.env.register(oss_vessel)
oss_vessel.initialize()
oss_vessel.at_port = True
oss_vessel.at_site = False
self.oss_vessel = oss_vessel
def initialize_feeders(self):
"""
Initializes feeder barge objects.
"""
number = self.config.get("num_feeders", None)
feeder_specs = self.config.get("feeder", None)
self.feeders = []
for n in range(number):
# TODO: Add in option for named feeders.
name = "Feeder {}".format(n)
feeder = Vessel(name, feeder_specs)
self.env.register(feeder)
feeder.initialize()
feeder.at_port = True
feeder.at_site = False
self.feeders.append(feeder)
def initialize_queue(self):
"""
Creates the queue that feeders will join at site. Limited to one active
feeder at a time.
"""
self.active_feeder = simpy.Resource(self.env, capacity=1)
self.active_feeder.vessel = None
self.active_feeder.activate = self.env.event()
@property
def detailed_output(self):
"""Returns detailed outputs of the oss installation."""
if self.feeders:
transport_vessels = [*self.feeders]
else:
transport_vessels = [self.oss_vessel]
outputs = {
self.phase: {
**self.agent_efficiencies,
**self.get_max_cargo_mass_utilzations(transport_vessels),
**self.get_max_deck_space_utilzations(transport_vessels),
}
}
return outputs
@process
def install_oss_from_queue(vessel, queue, substations, distance, **kwargs):
"""
Installs offshore subsations and substructures from queue of feeder barges.
Parameters
----------
env : Environment
vessel : Vessel
queue : simpy.Resource
Queue object to shuttle to.
number : int
Number of substructures to install.
distance : int | float
Distance from site to port (km).
"""
n = 0
while n < substations:
if vessel.at_port:
# Transit to site
vessel.at_port = False
yield vessel.transit(distance)
vessel.at_site = True
if vessel.at_site:
if queue.vessel:
# Prep for monopile install
yield prep_for_site_operations(
vessel, survey_required=True, **kwargs
)
# Get monopile
monopile = yield vessel.get_item_from_storage(
"Monopile", vessel=queue.vessel, **kwargs
)
yield upend_monopile(vessel, monopile.length, **kwargs)
yield install_monopile(vessel, monopile, **kwargs)
# Get topside
topside = yield vessel.get_item_from_storage(
"Topside", vessel=queue.vessel, release=True, **kwargs
)
yield install_topside(vessel, topside, **kwargs)
n += 1
else:
start = vessel.env.now
yield queue.activate
delay_time = vessel.env.now - start
vessel.submit_action_log("Delay", delay_time, location="Site")
# Transit to port
vessel.at_site = False
yield vessel.transit(distance)
vessel.at_port = True
vessel.submit_debug_log(
message="Monopile installation complete!",
progress="Offshore Substation",
)
|
dinarosv/fasttext-model | data/english_tweets/preprocess_tweets.py | <filename>data/english_tweets/preprocess_tweets.py
import csv
import re
import string
def remove_chars(text):
# remove links
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
text = pattern.sub('', text)
# remove tags
text = re.sub(r'\w*@\w*', '', text)
# remove punctuations
text = text.translate(str.maketrans('', '', string.punctuation))
# remove newlines and strip text
text = text.replace("\n", "").strip()
return text
if __name__ == "__main__":
pos = 2363
neg = 2363
with open('Tweets.csv', mode='r') as csv_file:
with open('even_dataset.txt', "w") as train_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
text = row["text"]
text = remove_chars(text)
if row["airline_sentiment"] == "positive" and pos > 0:
pos -= 1
train_file.write(f'__label__2 {text}' + "\n")
elif neg > 0:
neg -= 1
train_file.write(f'__label__1 {text}' + "\n")
line_count += 1
if neg == 0 and pos == 0:
break
print(f'Processed {line_count} lines.') |
dinarosv/fasttext-model | data/norec/norec_prep.py | from scripts import load, html_to_text
import os
import string
import re
def get_stop_words():
stop_words = []
with open("../stopwords/stopwords.txt") as infile:
for line in infile:
stop_words.append(line.split("\n")[0])
return stop_words
def remove_stop_words(querytext, stop_words):
stop_words = get_stop_words()
resultwords = [word for word in querytext.split() if word.lower() not in stop_words]
result = ' '.join(resultwords)
return result
def remove_spaces(text):
return text.replace('\n', ' ').replace('-', ' ').replace('\u00A0', ' ')
def remove_numbers(text):
text = re.sub(r"\d", "", text)
return text
# Prep does the same preprocessing as fastText in their examples
def prep(text):
text = text.lower()
punctuations = [".", ",", "/", "'", "\"", "(", ")", "!", "?", ";", ":"]
for p in punctuations:
text = text.replace(p, " ")
text = re.sub(' +', ' ', text)
return text
def preprocessing(subset, output, testset):
t_data = load("html.tar.gz", subset=subset)
# Convert html to text and put in a list together with rating
train_set = [(html_to_text(html), metadata['rating'])
for html, metadata in t_data]
number_of_examples = len(train_set)
# We will combine validation set and test set to one test set, so we have a 20/80 relationship of the data
if testset: train_file = open(output, "a")
else: train_file = open(output, "w")
for set in train_set:
text = remove_spaces(set[0])
# Remove unwanted elements in the example
text = prep(text)
if set[1] > 4:
line = '__label__' + str(2) + ' ' + text + '\n'
train_file.write(line)
print(str(set[1]) + " - " + text)
elif set[1] < 3:
line = '__label__' + str(0) + ' ' + text + '\n'
train_file.write(line)
else:
line = '__label__' + str(1) + ' ' + text + '\n'
train_file.write(line)
train_file.close()
return number_of_examples
if __name__ == "__main__":
if os.path.isfile('test.txt'):
os.remove('test.txt')
train_length = preprocessing("train", "train.txt", False)
dev_length = preprocessing("dev", "test.txt", True)
test_length = preprocessing("test", "test.txt", True)
print("Total:\t\t" + str(dev_length + test_length + train_length))
print("Test set:\t" + str(dev_length + test_length))
print("Training set:\t" + str(train_length))
|
dinarosv/fasttext-model | testengmodel.py | <reponame>dinarosv/fasttext-model
from fastText import load_model
m = load_model("models/english_model.bin")
with open("watson/examplesforwatson.txt") as test:
for line in test:
correct = line[9:10]
predict = line[10:].replace("\n", "")
prediciton = m.predict(predict, 1)
value = prediciton[0][0].split('__')[2]
if value != correct:
print(value + " " + str(prediciton[1][0]) + " " + line)
|
dinarosv/fasttext-model | data/tn_mixed/shuffleset.py | <filename>data/tn_mixed/shuffleset.py
import random
with open('mixed.txt','r') as source:
data = [ (random.random(), line) for line in source]
data.sort()
with open('mixed_shuffled.txt','w') as target:
for _, line in data:
target.write( line ) |
dinarosv/fasttext-model | data/twitter/fasttext_prep.py | <filename>data/twitter/fasttext_prep.py<gh_stars>0
import re
# Prep does the same preprocessing as fastText in their examples
def prep(text):
text = text.lower()
punctuations = [".", ",", "/", "'", "\"", "(", ")", "!", "?", ";", ":"]
for p in punctuations:
text = text.replace(p, " ")
text = re.sub(' +', ' ', text)
text = text.strip()
text = text + "\n"
return text
# Split twitter dataset in training and testing with ratio 80/20
with open("ns_shuffled_dataset.txt") as infile:
with open("ns_train.txt", "w") as train:
with open("ns_test.txt", "w") as test:
for index, line in enumerate(infile):
if index != 0:
if index < round(110602 * 0.8):
train.write("__label__" + line.split(";")[0] + " " + line.split(";")[1] + "\n")
else:
test.write("__label__" + line.split(";")[0] + " " + line.split(";")[1] + "\n")
|
dinarosv/fasttext-model | data/ft/preprocess.py |
with open("two_labels.txt") as dataset:
with open("twotrain.txt", "w") as train:
with open("twotest.txt", "w") as test:
for index, line in enumerate(dataset):
if index > 0:
text = "__label__" + line.split(";")[0] + " " + line.split(";")[1] + "\n"
if index < (114742*0.8):
train.write(text)
else:
test.write(text) |
dinarosv/fasttext-model | data/norec/fasttext_prep.py | with open("dataset.txt") as dataset:
with open("train.txt", "w") as train:
with open("test.txt", "w") as test:
for index, line in enumerate(dataset):
text = "__label__" + line[0] + line[1:]
if index < (35189*0.8):
train.write(text)
else:
test.write(text) |
dinarosv/fasttext-model | data/norec/scripts/misc.py | <filename>data/norec/scripts/misc.py
# Copyright 2017 <NAME> <<EMAIL>>
# This file is part of norec.
# norec is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# norec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with norec. If not, see <http://www.gnu.org/licenses/>.
import tarfile
import locale
import codecs
from io import BytesIO
class TarFile(tarfile.TarFile):
"""Subclass of TarFile adding som abstraction."""
def get_files(self, encoding=None):
member = self.next()
while member:
with self.open_file(member, encoding=encoding) as fd:
yield member.name, fd
member = self.next()
def open_file(self, member, mode="r", encoding=None):
"""Return file object for file in archive."""
# Set default encoding if not provided
if not "b" in mode and not encoding:
encoding = locale.getpreferredencoding(False)
return self.readfile(member, encoding)
def readfile(self, member, encoding):
"""Provides a file object for writing to file in archive."""
fd = self.extractfile(member)
if encoding:
# File object from tarfile returns bytes, so wrap decoder
# to read strings.
return codecs.getreader(encoding)(fd)
else:
return fd
|
dinarosv/fasttext-model | data/tn_mixed/fasttext_prep.py | <filename>data/tn_mixed/fasttext_prep.py<gh_stars>0
with open('not_stemmed.txt') as readfile:
with open('train.txt', 'w') as trainfile:
with open('test.txt', 'w') as testfile:
for index, line in enumerate(readfile):
txt = "__label__" + line.split(";")[0] + " " + line.split(";")[1] + "\n"
if index < (207302*0.8):
trainfile.write(txt)
else:
testfile.write(txt) |
dinarosv/fasttext-model | data/imdb/fasttext_prep.py | import re
import string
def remove_chars(text):
# remove links
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
text = pattern.sub('', text)
# remove punctuations
text = text.translate(str.maketrans('', '', string.punctuation))
# remove newlines and strip text
text = text.replace("\n", "").strip()
return text
pos = 0
neg = 0
with open("dataset.txt") as dataset:
with open("train.txt", "w") as trainfile:
with open("test.txt", "w") as testfile:
for index, line in enumerate(dataset):
if index != 0:
value = int(line.split(" ")[1])+1
if value == 2:
pos += 1
if value == 1:
neg += 1
text = remove_chars(line[11:])
nline = "__label__" + str(value) + " " + text
if index < round(25000*0.8):
trainfile.write(nline + "\n")
else:
testfile.write(nline + "\n")
print(pos)
print(neg)
|
dinarosv/fasttext-model | data/twitter/countlabels.py |
pos = 0
neu = 0
neg = 0
with open("dataset.txt") as f:
for line in f:
if line[0] == "0":
neg += 1
elif line[0] == "1":
neu += 1
elif line[0] == "2":
pos += 1
print(pos)
print(neu)
print(neg) |
dinarosv/fasttext-model | data/english_tweets/fasttext_prep.py | # Split twitter dataset in training and testing with ratio 80/20
with open("snewdataset.txt") as infile:
with open("train.txt", "w") as train:
with open("test.txt", "w") as test:
for index, line in enumerate(infile):
if index != 0:
if index < round(4724 * 0.8):
train.write(line)
else:
test.write(line)
|
dinarosv/fasttext-model | data/norec/scripts/conllu.py | <gh_stars>0
# Copyright 2017 <NAME> <<EMAIL>>
# This file is part of norec.
# norec is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# norec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with norec. If not, see <http://www.gnu.org/licenses/>.
CONLLU_FIELDS = ('id', 'form', 'lemma', 'upostag', 'xpostag', 'feats',
'head', 'deprel', 'deps', 'misc')
def parse_conllu_line(line):
"""Parse CoNLL-U line and return dictionary."""
# Created dictionary and do type conversion
return dict(zip(CONLLU_FIELDS, line.split("\t")))
def empty_line(line):
"""Check if line is emtpy."""
return not bool(line.strip())
def is_comment(line):
"""Check if line is comment."""
return line.startswith("#")
def parse_conllu(conllu):
"""Parse ConLL-U file and return generator over sentences."""
sentence = []
for line in conllu.split("\n"):
# Ignore comments
if is_comment(line):
continue
# Emtpy line, marks end of sentence
if empty_line(line):
yield sentence
sentence = []
continue
# Parse line
w = parse_conllu_line(line)
if w:
sentence.append(w)
if sentence:
yield sentence
|
dinarosv/fasttext-model | data/norec/scripts/__init__.py | <reponame>dinarosv/fasttext-model
from .main import load, html_to_text, conllu_to_tokens, load_metadata
|
dinarosv/fasttext-model | data/norec/scripts/main.py | # Copyright 2017 <NAME> <<EMAIL>>
# This file is part of norec.
# norec is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# norec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with norec. If not, see <http://www.gnu.org/licenses/>.
import json
import os.path
import lxml.html
from .conllu import parse_conllu
from .misc import TarFile
def load_metadata(filename):
"""Read json from file and return json object."""
with open(filename, encoding="utf-8") as fd:
return json.load(fd)
def get_split_and_id(filename):
"""Extract split name and file id from filename."""
head, tail = os.path.split(filename)
return os.path.basename(head), os.path.splitext(tail)[0]
def load(filename, subset=None):
"""Load subset from NoReC file."""
# Load metadata
metadata = load_metadata(os.path.join(os.path.dirname(filename),
"metadata.json"))
# Open archive and iterate over files
with TarFile.open(filename, "r|gz") as archive:
for member_filename, fd in archive.get_files(encoding="utf-8"):
split, ident = get_split_and_id(member_filename)
# Only load files for specified subset
if subset and split != subset:
continue
# Read file and yield contents and associated metadata
yield fd.read(), metadata[ident]
def html_to_text(html):
"""Convert html to text, stripping all tags and removing content
enclosed in remove-tag."""
return "\n\n".join(elem.text_content() for elem
in lxml.html.fragments_fromstring(html)
if elem.tag != "remove")
def conllu_to_tokens(conllu):
"""Extract tokens from ConLL-U."""
for sentence in parse_conllu(conllu):
for word in sentence:
yield word
|
dinarosv/fasttext-model | watson/watsontranslate.py | import json
from watson_developer_cloud import NaturalLanguageUnderstandingV1, LanguageTranslatorV3
from apikeys import apikeytrans
if __name__ == "__main__":
translator = LanguageTranslatorV3(
version='2018-05-01',
iam_apikey=apikeytrans,
url='https://gateway-lon.watsonplatform.net/language-translator/api')
with open('./text/en_validation.txt', 'w') as outfile:
with open("./text/validation.txt") as infile:
table = []
for index, line in enumerate(infile):
data = line.split('__')[2][1:]
table.append(data)
if index == 35 or index == 70 or index == 105:
raw_translations = translator.translate(
text=table,
model_id='nb-en').get_result()
table = []
for item in raw_translations["translations"]:
outfile.write(item["translation"])
if index == 105:
break
|
dinarosv/fasttext-model | watson/watsonvalidate.py | <reponame>dinarosv/fasttext-model
import json
import os
from watson_developer_cloud import NaturalLanguageUnderstandingV1, LanguageTranslatorV3
from watson_developer_cloud.natural_language_understanding_v1 import Features, SentimentOptions
from apikeys import apikeyvalid
import time
if __name__ == "__main__":
nlu = NaturalLanguageUnderstandingV1(
version='2018-11-16',
iam_apikey=apikeyvalid,
url='https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
)
with open('../data/amazon/testfile.txt') as infile:
with open('../data/amazon/results.txt', 'a') as outfile:
for index, line in enumerate(infile):
if index > 0 and index < 4000:
val = line.replace(line.split(' ')[0], "")
val = val[1:]
response = nlu.analyze(
text=val,
features=Features(sentiment=SentimentOptions()),
language='en').get_result()
score = response["sentiment"]["document"]["score"]
if score > 0:
outfile.write("__label__2 "+val)
else:
outfile.write("__label__1 "+val) |
dinarosv/fasttext-model | watson/watsontesting.py |
if __name__ == "__main__":
corrects = 0
total = 0
with open("../data/amazon/result_wat.txt") as watsonfile, open("../data/amazon/cuttest.ft.txt") as validationfile:
for x, y in zip(watsonfile, validationfile):
line = x.strip()
corrline = y.strip()
if line[9:10] == corrline[9:10]:
corrects += 1
else:
print(line)
print(corrline)
total += 1
print(total)
print("Accuracy Watson: " + str(round((corrects/total * 100),2)) + "%")
# Result: 85.68% for 30 000 linjer |
dinarosv/fasttext-model | trainmodel.py | <reponame>dinarosv/fasttext-model
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fastText
from fastText import train_supervised
import os
import time
import sys
# Arguments: Trainfile, testfile, model, bestparameters
hyper_params = {
"epoch": 25, # number of loops through same example {5} [5 - 50]
"lr": 0.05, # learning rate {0.05, 0.1, 0.25, 0.5} [0 - 1] {0.05}
"wordNgrams": 3, # relation to surrounding words [1 - 5]
"minCount": 3, # minimal number of word occurrences {5}
"dim": 100, # dimension of vectors {100}
"bucket": 2000000, # number of buckets {2000000}
"loss": "softmax", # loss function {ns, hs, softmax} [ns]
"ws": 128, # window size {5}
"minn": 3, # min length of char ngram [3]
"maxn": 7 # max length of char ngram [6]
}
# Print the precision and save parameteres to file if precision is over 76%
def print_results(N, p, r):
print("Examples:\t" + str(N))
print("Precision:\t" + str(round(p*100, 2)) + " %")
paramfile = sys.argv[4] if len(sys.argv) > 4 else "data/norec/bestparams"
if p > 0.50:
with open(paramfile, 'a') as infile:
infile.write(str(round(p, 3)) + " " + str(hyper_params) + "\n")
if __name__ == "__main__":
start = time.time()
length = len(sys.argv)
trainfile = sys.argv[1] if length > 1 else "data/norec/train.txt"
testfile = sys.argv[2] if length > 2 else "data/norec/test.txt"
modelfile = sys.argv[3] if length > 3 else "models/model.bin"
# Path to train and test data
train_data = os.path.join(os.getenv("DATADIR", ''), trainfile)
valid_data = os.path.join(os.getenv("DATADIR", ''), testfile)
print('\033[1m'+str(hyper_params).strip("{}").replace("'", "")+'\033[0m')
# Fasttext's supervised training function
model = train_supervised(
input=train_data,
thread=3,
**hyper_params
)
print_results(*model.test(valid_data))
# print("Quantizing: ")
# model.quantize(input=train_data, qnorm=True, retrain=True, cutoff=100000)
model.save_model(modelfile)
end = time.time()
total = end-start
print("Time training:\t" + str(round(total,1)) + "s")
|
dinarosv/fasttext-model | data/norec/countlabels.py |
with open("dataset.txt") as readfile:
neu = 0
pos = 0
neg = 0
for line in readfile:
if line[0] == "0":
neg += 1
elif line[0] == "1":
neu += 1
elif line[0] == "2":
pos += 1
print(neg)
print(neu)
print(pos) |
dinarosv/fasttext-model | testmodel.py | <reponame>dinarosv/fasttext-model
from fastText import load_model
import sys
import os
from trainmodel import print_results
import time
def stripName(text):
return text[0][0].split('__')[2]
def strip_prob(text):
return text[1][0]
def count_labels():
pos = 0
neg = 0
neu = 0
trainfile = sys.argv[2] if len(sys.argv) > 1 else 'data/norec/train.txt'
with open(trainfile) as textfile:
for line in textfile:
val = line.split(' ')[0]
val = val.split('__label__')[1]
if val == '6':
pos += 1
elif val == '3':
neu += 1
elif val == '1':
neg += 1
print("negative: " + str(neg))
print("neutral: " + str(neu))
print("positive: " + str(pos))
if __name__ == "__main__":
model = sys.argv[1] if len(sys.argv) > 0 else "models/model.bin"
m = load_model(model)
if len(sys.argv) > 2:
test_data = os.path.join(os.getenv("DATADIR", ''), sys.argv[3])
print_results(*m.test(test_data))
text = ""
#--- Input from console ---
while text != "q":
print("Type a sentence to analyze and then press ENTER...")
text = sys.stdin.readline().replace("\n", "")
prediciton = m.predict(text, 1)
value = stripName(prediciton)
prob = strip_prob(prediciton)
if value == "1":
print("Value: Negative, Probability: " + str(round(prob, 4)))
elif value == "2":
print("Value: Positive, Probability: " + str(round(prob, 4)))
else:
print("Value: Neutral, Probability: " + str(round(prob, 4)))
|
sotte/great_expectations | tests/test_pandas_dataset.py | from __future__ import division
import unittest
import json
import datetime
import pandas as pd
import great_expectations as ge
from .test_utils import assertDeepAlmostEqual
class TestPandasDataset(unittest.TestCase):
def run_encapsulated_test(self, expectation_name, filename):
with open(filename) as f:
T = json.load(f)
D = ge.dataset.PandasDataset(T["dataset"])
D.set_default_expectation_argument("output_format", "COMPLETE")
self.maxDiff = None
for t in T["tests"]:
if "title" in t:
print(t["title"])
else:
print("WARNING: test set has no `title` field. In future versions of Great Expectations, this will be required.")
expectation = getattr(D, expectation_name)
out = expectation(**t['in'])
out = json.loads(json.dumps(out))
self.assertEqual(out, t['out'])
# def test_expect_column_values_to_be_between(self):
# """
# """
# with open("./tests/test_sets/expect_column_values_to_be_between_test_set.json") as f:
# fixture = json.load(f)
# dataset = fixture["dataset"]
# tests = fixture["tests"]
# D = ge.dataset.PandasDataset(dataset)
# D.set_default_expectation_argument("result_format", "COMPLETE")
# self.maxDiff = None
# for t in tests:
# out = D.expect_column_values_to_be_between(**t['in'])
# # print '-'*80
# print(t)
# # print(json.dumps(out, indent=2))
# if 'out' in t:
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
# if 'error' in t:
# self.assertEqual(out['exception_info']['raised_exception'], True)
# self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
# def test_expect_column_values_to_match_regex_list(self):
# with open("./tests/test_sets/expect_column_values_to_match_regex_list_test_set.json") as f:
# J = json.load(f)
# D = ge.dataset.PandasDataset(J["dataset"])
# D.set_default_expectation_argument("result_format", "COMPLETE")
# T = J["tests"]
# self.maxDiff = None
# for t in T:
# out = D.expect_column_values_to_match_regex_list(**t['in'])
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_expect_column_values_to_match_strftime_format(self):
"""
"""
D = ge.dataset.PandasDataset({
'x' : [1,2,4],
'us_dates' : ['4/30/2017','4/30/2017','7/4/1776'],
'us_dates_type_error' : ['4/30/2017','4/30/2017', 5],
'almost_iso8601' : ['1977-05-25T00:00:00', '1980-05-21T13:47:59', '2017-06-12T23:57:59'],
'almost_iso8601_val_error' : ['1977-05-55T00:00:00', '1980-05-21T13:47:59', '2017-06-12T23:57:59'],
'already_datetime' : [datetime.datetime(2015,1,1), datetime.datetime(2016,1,1), datetime.datetime(2017,1,1)]
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column':'us_dates', 'strftime_format':'%m/%d/%Y'},
'out':{'success':True, 'unexpected_index_list':[], 'unexpected_list':[]}
},
{
'in':{'column':'us_dates_type_error','strftime_format':'%m/%d/%Y', 'mostly': 0.5, 'catch_exceptions': True},
# 'out':{'success':True, 'unexpected_index_list':[2], 'unexpected_list':[5]}},
'error':{
'traceback_substring' : 'TypeError'
},
},
{
'in':{'column':'us_dates_type_error','strftime_format':'%m/%d/%Y', 'catch_exceptions': True},
'error':{
'traceback_substring' : 'TypeError'
}
},
{
'in':{'column':'almost_iso8601','strftime_format':'%Y-%m-%dT%H:%M:%S'},
'out':{'success':True,'unexpected_index_list':[], 'unexpected_list':[]}},
{
'in':{'column':'almost_iso8601_val_error','strftime_format':'%Y-%m-%dT%H:%M:%S'},
'out':{'success':False,'unexpected_index_list':[0], 'unexpected_list':['1977-05-55T00:00:00']}},
{
'in':{'column':'already_datetime','strftime_format':'%Y-%m-%d', 'catch_exceptions':True},
# 'out':{'success':False,'unexpected_index_list':[0], 'unexpected_list':['1977-05-55T00:00:00']},
'error':{
'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_match_strftime_format must be of type string.'
},
}
]
for t in T:
out = D.expect_column_values_to_match_strftime_format(**t['in'])
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
elif 'error' in t:
self.assertEqual(out['exception_info']['raised_exception'], True)
self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
def test_expect_column_values_to_be_dateutil_parseable(self):
D = ge.dataset.PandasDataset({
'c1':['03/06/09','23 April 1973','January 9, 2016'],
'c2':['9/8/2012','covfefe',25],
'c3':['Jared','June 1, 2013','July 18, 1976'],
'c4':['1', '2', '49000004632'],
'already_datetime' : [datetime.datetime(2015,1,1), datetime.datetime(2016,1,1), datetime.datetime(2017,1,1)],
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column': 'c1'},
'out':{'success':True, 'unexpected_list':[], 'unexpected_index_list': []}},
{
'in':{"column":'c2', "catch_exceptions":True},
# 'out':{'success':False, 'unexpected_list':['covfefe', 25], 'unexpected_index_list': [1, 2]}},
'error':{ 'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string' },
},
{
'in':{"column":'c3'},
'out':{'success':False, 'unexpected_list':['Jared'], 'unexpected_index_list': [0]}},
{
'in':{'column': 'c3', 'mostly':.5},
'out':{'success':True, 'unexpected_list':['Jared'], 'unexpected_index_list': [0]}
},
{
'in':{'column': 'c4'},
'out':{'success':False, 'unexpected_list':['49000004632'], 'unexpected_index_list': [2]}
},
{
'in':{'column':'already_datetime', 'catch_exceptions':True},
'error':{ 'traceback_substring' : 'TypeError: Values passed to expect_column_values_to_be_dateutil_parseable must be of type string' },
}
]
for t in T:
out = D.expect_column_values_to_be_dateutil_parseable(**t['in'])
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
elif 'error' in t:
self.assertEqual(out['exception_info']['raised_exception'], True)
self.assertIn(t['error']['traceback_substring'], out['exception_info']['exception_traceback'])
def test_expect_column_values_to_be_json_parseable(self):
d1 = json.dumps({'i':[1,2,3],'j':35,'k':{'x':'five','y':5,'z':'101'}})
d2 = json.dumps({'i':1,'j':2,'k':[3,4,5]})
d3 = json.dumps({'i':'a', 'j':'b', 'k':'c'})
d4 = json.dumps({'i':[4,5], 'j':[6,7], 'k':[8,9], 'l':{4:'x', 5:'y', 6:'z'}})
D = ge.dataset.PandasDataset({
'json_col':[d1,d2,d3,d4],
'not_json':[4,5,6,7],
'py_dict':[{'a':1, 'out':1},{'b':2, 'out':4},{'c':3, 'out':9},{'d':4, 'out':16}],
'most':[d1,d2,d3,'d4']
})
D.set_default_expectation_argument("result_format", "COMPLETE")
T = [
{
'in':{'column':'json_col'},
'out':{'success':True, 'unexpected_index_list':[], 'unexpected_list':[]}},
{
'in':{'column':'not_json'},
'out':{'success':False, 'unexpected_index_list':[0,1,2,3], 'unexpected_list':[4,5,6,7]}},
{
'in':{'column':'py_dict'},
'out':{'success':False, 'unexpected_index_list':[0,1,2,3], 'unexpected_list':[{'a':1, 'out':1},{'b':2, 'out':4},{'c':3, 'out':9},{'d':4, 'out':16}]}},
{
'in':{'column':'most'},
'out':{'success':False, 'unexpected_index_list':[3], 'unexpected_list':['d4']}},
{
'in':{'column':'most', 'mostly':.75},
'out':{'success':True, 'unexpected_index_list':[3], 'unexpected_list':['d4']}}
]
for t in T:
out = D.expect_column_values_to_be_json_parseable(**t['in'])
self.assertEqual(t['out']['success'], out['success'])
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
# def test_expect_column_values_to_match_json_schema(self):
# with open("./tests/test_sets/expect_column_values_to_match_json_schema_test_set.json") as f:
# J = json.load(f)
# D = ge.dataset.PandasDataset(J["dataset"])
# D.set_default_expectation_argument("result_format", "COMPLETE")
# T = J["tests"]
# self.maxDiff = None
# for t in T:
# out = D.expect_column_values_to_match_json_schema(**t['in'])#, **t['kwargs'])
# self.assertEqual(t['out']['success'], out['success'])
# if 'unexpected_index_list' in t['out']:
# self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
# if 'unexpected_list' in t['out']:
# self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_expectation_decorator_summary_mode(self):
df = ge.dataset.PandasDataset({
'x' : [1,2,3,4,5,6,7,7,None,None],
})
df.set_default_expectation_argument("result_format", "COMPLETE")
# print '&'*80
# print json.dumps(df.expect_column_values_to_be_between('x', min_value=1, max_value=5, result_format="SUMMARY"), indent=2)
self.maxDiff = None
self.assertEqual(
df.expect_column_values_to_be_between('x', min_value=1, max_value=5, result_format="SUMMARY"),
{
"success" : False,
"result" : {
"element_count" : 10,
"missing_count" : 2,
"missing_percent" : .2,
"unexpected_count" : 3,
"partial_unexpected_counts": [
{"value": 7.0,
"count": 2},
{"value": 6.0,
"count": 1}
],
"unexpected_percent": 0.3,
"unexpected_percent_nonmissing": 0.375,
"partial_unexpected_list" : [6.0,7.0,7.0],
"partial_unexpected_index_list": [5,6,7],
}
}
)
self.assertEqual(
df.expect_column_mean_to_be_between("x", 3, 7, result_format="SUMMARY"),
{
'success': True,
'result': {
'observed_value': 4.375,
'element_count': 10,
'missing_count': 2,
'missing_percent': .2
},
}
)
def test_positional_arguments(self):
df = ge.dataset.PandasDataset({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
df.set_default_expectation_argument('result_format', 'COMPLETE')
self.assertEqual(
df.expect_column_mean_to_be_between('x',4,6),
{'success':True, 'result': {'observed_value': 5, 'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0}}
)
out = df.expect_column_values_to_be_between('y',1,6)
t = {'out': {'success':False, 'unexpected_list':[8,10], 'unexpected_index_list': [3,4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_between('y',1,6,mostly=.5)
t = {'out': {'success':True, 'unexpected_list':[8,10], 'unexpected_index_list':[3,4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_in_set('z',['a','b','c'])
t = {'out': {'success':False, 'unexpected_list':['abc'], 'unexpected_index_list':[4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
out = df.expect_column_values_to_be_in_set('z',['a','b','c'],mostly=.5)
t = {'out': {'success':True, 'unexpected_list':['abc'], 'unexpected_index_list':[4]}}
if 'out' in t:
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'], out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'], out['result']['unexpected_list'])
def test_result_format_argument_in_decorators(self):
df = ge.dataset.PandasDataset({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
df.set_default_expectation_argument('result_format', 'COMPLETE')
#Test explicit Nones in result_format
self.assertEqual(
df.expect_column_mean_to_be_between('x',4,6, result_format=None),
{'success':True, 'result': {'observed_value': 5, 'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0
}}
)
self.assertEqual(
df.expect_column_values_to_be_between('y',1,6, result_format=None),
{'result': {'element_count': 5,
'missing_count': 0,
'missing_percent': 0.0,
'partial_unexpected_counts': [{'count': 1, 'value': 8},
{'count': 1, 'value': 10}],
'partial_unexpected_index_list': [3, 4],
'partial_unexpected_list': [8, 10],
'unexpected_count': 2,
'unexpected_index_list': [3, 4],
'unexpected_list': [8, 10],
'unexpected_percent': 0.4,
'unexpected_percent_nonmissing': 0.4},
'success': False}
)
#Test unknown output format
with self.assertRaises(ValueError):
df.expect_column_values_to_be_between('y',1,6, result_format="QUACK")
with self.assertRaises(ValueError):
df.expect_column_mean_to_be_between('x',4,6, result_format="QUACK")
def test_from_pandas(self):
pd_df = pd.DataFrame({
'x':[1,3,5,7,9],
'y':[2,4,6,8,10],
'z':[None,'a','b','c','abc']
})
ge_df = ge.from_pandas(pd_df)
self.assertIsInstance(ge_df, ge.dataset.Dataset)
self.assertEquals(list(ge_df.columns), ['x', 'y', 'z'])
self.assertEquals(list(ge_df['x']), list(pd_df['x']))
self.assertEquals(list(ge_df['y']), list(pd_df['y']))
self.assertEquals(list(ge_df['z']), list(pd_df['z']))
def test_from_pandas_expectations_config(self):
# Logic mostly copied from TestValidation.test_validate
def load_ge_config(file):
with open(file) as f:
return json.load(f)
my_expectations_config = load_ge_config("./tests/test_sets/titanic_expectations.json")
pd_df = pd.read_csv("./tests/test_sets/Titanic.csv")
my_df = ge.from_pandas(pd_df, expectations_config=my_expectations_config)
my_df.set_default_expectation_argument("result_format", "COMPLETE")
results = my_df.validate(catch_exceptions=False)
expected_results = load_ge_config("./tests/test_sets/expected_results_20180303.json")
self.maxDiff = None
assertDeepAlmostEqual(self, results, expected_results)
def test_ge_pandas_concatenating(self):
df1 = ge.dataset.PandasDataset({
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']
})
df1.expect_column_values_to_match_regex('A', '^A[0-2]$')
df1.expect_column_values_to_match_regex('B', '^B[0-2]$')
df2 = ge.dataset.PandasDataset({
'A': ['A3', 'A4', 'A5'],
'B': ['B3', 'B4', 'B5']
})
df2.expect_column_values_to_match_regex('A', '^A[3-5]$')
df2.expect_column_values_to_match_regex('B', '^B[3-5]$')
df = pd.concat([df1, df2])
exp_c = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}}
]
# The concatenated data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_c)
def test_ge_pandas_joining(self):
df1 = ge.dataset.PandasDataset({
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
df1.expect_column_values_to_match_regex('A', '^A[0-2]$')
df1.expect_column_values_to_match_regex('B', '^B[0-2]$')
df2 = ge.dataset.PandasDataset({
'C': ['C0', 'C2', 'C3'],
'D': ['C0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
df2.expect_column_values_to_match_regex('C', '^C[0-2]$')
df2.expect_column_values_to_match_regex('D', '^D[0-2]$')
df = df1.join(df2)
exp_j = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}}
]
# The joined data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_j)
def test_ge_pandas_merging(self):
df1 = ge.dataset.PandasDataset({
'id': [1, 2, 3, 4],
'name': ['a', 'b', 'c', 'd']
})
df1.expect_column_values_to_match_regex('name', '^[A-Za-z ]+$')
df2 = ge.dataset.PandasDataset({
'id': [1, 2, 3, 4],
'salary': [57000, 52000, 59000, 65000]
})
df2.expect_column_values_to_match_regex('salary', '^[0-9]{4,6]$')
df = df1.merge(df2, on='id')
exp_m = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'id'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'name'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'salary'}}
]
# The merged data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_m)
def test_ge_pandas_sampling(self):
df = ge.dataset.PandasDataset({
'A': [1, 2, 3, 4],
'B': [5, 6, 7, 8],
'C': ['a', 'b', 'c', 'd'],
'D': ['e', 'f', 'g', 'h']
})
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ['a', 'b', 'c', 'd'])
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'h'])
exp1 = df.find_expectations()
# The sampled data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the non-failing expectations of the parent data frame
samp1 = df.sample(n=2)
self.assertIsInstance(samp1, ge.dataset.PandasDataset)
self.assertEqual(samp1.find_expectations(), exp1)
samp1 = df.sample(frac=0.25, replace=True)
self.assertIsInstance(samp1, ge.dataset.PandasDataset)
self.assertEqual(samp1.find_expectations(), exp1)
# Change expectation on column "D", sample, and check expectations.
# The failing expectation on column "D" is automatically dropped in
# the sample.
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'x'])
samp1 = df.sample(n=2)
exp1 = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'A', 'value_set': [1, 2, 3, 4]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'B', 'value_set': [5, 6, 7, 8]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'C', 'value_set': ['a', 'b', 'c', 'd']}}
]
self.assertEqual(samp1.find_expectations(), exp1)
def test_ge_pandas_concatenating(self):
df1 = ge.dataset.PandasDataset({
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']
})
df1.expect_column_values_to_match_regex('A', '^A[0-2]$')
df1.expect_column_values_to_match_regex('B', '^B[0-2]$')
df2 = ge.dataset.PandasDataset({
'A': ['A3', 'A4', 'A5'],
'B': ['B3', 'B4', 'B5']
})
df2.expect_column_values_to_match_regex('A', '^A[3-5]$')
df2.expect_column_values_to_match_regex('B', '^B[3-5]$')
df = pd.concat([df1, df2])
exp_c = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}}
]
# The concatenated data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_c)
def test_ge_pandas_joining(self):
df1 = ge.dataset.PandasDataset({
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
df1.expect_column_values_to_match_regex('A', '^A[0-2]$')
df1.expect_column_values_to_match_regex('B', '^B[0-2]$')
df2 = ge.dataset.PandasDataset({
'C': ['C0', 'C2', 'C3'],
'D': ['C0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
df2.expect_column_values_to_match_regex('C', '^C[0-2]$')
df2.expect_column_values_to_match_regex('D', '^D[0-2]$')
df = df1.join(df2)
exp_j = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}}
]
# The joined data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_j)
def test_ge_pandas_merging(self):
df1 = ge.dataset.PandasDataset({
'id': [1, 2, 3, 4],
'name': ['a', 'b', 'c', 'd']
})
df1.expect_column_values_to_match_regex('name', '^[A-Za-z ]+$')
df2 = ge.dataset.PandasDataset({
'id': [1, 2, 3, 4],
'salary': [57000, 52000, 59000, 65000]
})
df2.expect_column_values_to_match_regex('salary', '^[0-9]{4,6]$')
df = df1.merge(df2, on='id')
exp_m = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'id'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'name'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'salary'}}
]
# The merged data frame will:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Only have the default expectations
self.assertIsInstance(df, ge.dataset.PandasDataset)
self.assertEqual(df.find_expectations(), exp_m)
def test_ge_pandas_sampling(self):
df = ge.dataset.PandasDataset({
'A': [1, 2, 3, 4],
'B': [5, 6, 7, 8],
'C': ['a', 'b', 'c', 'd'],
'D': ['e', 'f', 'g', 'h']
})
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ['a', 'b', 'c', 'd'])
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'h'])
exp1 = df.find_expectations()
# The sampled data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the expectations of the parent data frame
samp1 = df.sample(n=2)
self.assertIsInstance(samp1, ge.dataset.PandasDataset)
self.assertEqual(samp1.find_expectations(), exp1)
samp1 = df.sample(frac=0.25, replace=True)
self.assertIsInstance(samp1, ge.dataset.PandasDataset)
self.assertEqual(samp1.find_expectations(), exp1)
# Change expectation on column "D", sample, and check expectations.
# The failing expectation on column "D" is NOT automatically dropped
# in the sample.
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'x'])
samp1 = df.sample(n=2)
exp1 = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'A', 'value_set': [1, 2, 3, 4]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'B', 'value_set': [5, 6, 7, 8]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'C', 'value_set': ['a', 'b', 'c', 'd']}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'D', 'value_set': ['e', 'f', 'g', 'x']}}
]
self.assertEqual(samp1.find_expectations(), exp1)
def test_ge_pandas_subsetting(self):
df = ge.dataset.PandasDataset({
'A':[1,2,3,4],
'B':[5,6,7,8],
'C':['a','b','c','d'],
'D':['e','f','g','h']
})
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ['a', 'b', 'c', 'd'])
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'h'])
# The subsetted data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the expectations of the parent data frame
exp1 = df.find_expectations()
sub1 = df[['A', 'D']]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[['A']]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[:3]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[1:2]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[:-1]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[-1:]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df.iloc[:3, 1:4]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df.loc[0:, 'A':'B']
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
def test_ge_pandas_automatic_failure_removal(self):
df = ge.dataset.PandasDataset({
'A': [1, 2, 3, 4],
'B': [5, 6, 7, 8],
'C': ['a', 'b', 'c', 'd'],
'D': ['e', 'f', 'g', 'h']
})
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ['w', 'x', 'y', 'z'])
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'h'])
# First check that failing expectations are NOT automatically
# dropped when sampling.
# For this data frame, the expectation on column "C" above fails.
exp1 = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'A', 'value_set': [1, 2, 3, 4]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'B', 'value_set': [5, 6, 7, 8]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'C', 'value_set': ['w', 'x', 'y', 'z']}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'D', 'value_set': ['e', 'f', 'g', 'h']}}
]
samp1 = df.sample(n=2)
self.assertEqual(samp1.find_expectations(), exp1)
# Now check subsetting to verify that failing expectations are NOT
# automatically dropped when subsetting.
sub1 = df[['A', 'D']]
self.assertEqual(sub1.find_expectations(), exp1)
# Set property/attribute so that failing expectations are
# automatically removed when sampling or subsetting.
df.discard_subset_failing_expectations = True
exp_samp = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'B'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'C'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'A', 'value_set': [1, 2, 3, 4]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'B', 'value_set': [5, 6, 7, 8]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'D', 'value_set': ['e', 'f', 'g', 'h']}}
]
samp2 = df.sample(n=2)
self.assertEqual(samp2.find_expectations(), exp_samp)
# Now check subsetting. In additional to the failure on column "C",
# the expectations on column "B" now fail since column "B" doesn't
# exist in the subset.
sub2 = df[['A', 'D']]
exp_sub = [
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'A'}},
{'expectation_type': 'expect_column_to_exist',
'kwargs': {'column': 'D'}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'A', 'value_set': [1, 2, 3, 4]}},
{'expectation_type': 'expect_column_values_to_be_in_set',
'kwargs': {'column': 'D', 'value_set': ['e', 'f', 'g', 'h']}}
]
self.assertEqual(sub2.find_expectations(), exp_sub)
def test_ge_pandas_subsetting(self):
df = ge.dataset.PandasDataset({
'A':[1,2,3,4],
'B':[5,6,7,8],
'C':['a','b','c','d'],
'D':['e','f','g','h']
})
# Put some simple expectations on the data frame
df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
df.expect_column_values_to_be_in_set("C", ['a', 'b', 'c', 'd'])
df.expect_column_values_to_be_in_set("D", ['e', 'f', 'g', 'h'])
# The subsetted data frame should:
#
# 1. Be a ge.dataset.PandaDataSet
# 2. Inherit ALL the expectations of the parent data frame
#
exp1 = df.find_expectations()
sub1 = df[['A', 'D']]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[['A']]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[:3]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[1:2]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[:-1]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df[-1:]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df.iloc[:3, 1:4]
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
sub1 = df.loc[0:, 'A':'B']
self.assertIsInstance(sub1, ge.dataset.PandasDataset)
self.assertEqual(sub1.find_expectations(), exp1)
def test_subclass_pandas_subset_retains_subclass(self):
"""A subclass of PandasDataset should still be that subclass after a Pandas subsetting operation"""
class CustomPandasDataset(ge.dataset.PandasDataset):
@ge.dataset.MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_odd(self, column):
return column.map(lambda x: x % 2 )
@ge.dataset.MetaPandasDataset.column_map_expectation
def expectation_that_crashes_on_sixes(self, column):
return column.map(lambda x: (x-6)/0 != "duck")
df = CustomPandasDataset({
'all_odd': [1, 3, 5, 5, 5, 7, 9, 9, 9, 11],
'mostly_odd': [1, 3, 5, 7, 9, 2, 4, 1, 3, 5],
'all_even': [2, 4, 4, 6, 6, 6, 8, 8, 8, 8],
'odd_missing': [1, 3, 5, None, None, None, None, 1, 3, None],
'mixed_missing': [1, 3, 5, None, None, 2, 4, 1, 3, None],
'all_missing': [None, None, None, None, None, None, None, None, None, None]
})
df2 = df.sample(frac=0.5)
self.assertTrue(type(df2) == type(df))
def test_pandas_deepcopy():
import copy
df = ge.dataset.PandasDataset({"a": [1, 2, 3]})
df2 = copy.deepcopy(df)
df["a"] = [2, 3, 4]
# Our copied dataframe should not be affected
assert df2.expect_column_to_exist("a")["success"] == True
assert list(df["a"]) == [2, 3, 4]
assert list(df2["a"]) == [1, 2, 3]
if __name__ == "__main__":
unittest.main()
|
sotte/great_expectations | great_expectations/data_context/__init__.py | from .pandas_context import PandasCSVDataContext
from .sqlalchemy_context import SqlAlchemyDataContext
def get_data_context(context_type, options):
"""Return a data_context object which exposes options to list datasets and get a dataset from
that context. This is a new API in Great Expectations 0.4, and is subject to rapid change.
:param context_type: (string) one of "SqlAlchemy" or "PandasCSV"
:param options: options to be passed to the data context's connect method.
:return: a new DataContext object
"""
if context_type == "SqlAlchemy":
return SqlAlchemyDataContext(options)
elif context_type == "PandasCSV":
return PandasCSVDataContext(options)
else:
raise ValueError("Unknown data context.") |
sotte/great_expectations | tests/test_data_contexts/test_data_contexts.py | import pytest
import os
import sqlalchemy as sa
import pandas as pd
from great_expectations import get_data_context
from great_expectations.dataset import PandasDataset, SqlAlchemyDataset
@pytest.fixture(scope="module")
def test_db_connection_string(tmpdir_factory):
df1 = pd.DataFrame({'col_1': [1, 2, 3, 4, 5], 'col_2': ['a', 'b', 'c', 'd', 'e']})
df2 = pd.DataFrame({'col_1': [0, 1, 2, 3, 4], 'col_2': ['b', 'c', 'd', 'e', 'f']})
path = tmpdir_factory.mktemp("db_context").join("test.db")
engine = sa.create_engine('sqlite:///' + str(path))
df1.to_sql('table_1', con=engine, index=True)
df2.to_sql('table_2', con=engine, index=True)
# Return a connection string to this newly-created db
return 'sqlite:///' + str(path)
@pytest.fixture(scope="module")
def test_folder_connection_path(tmpdir_factory):
df1 = pd.DataFrame({'col_1': [1, 2, 3, 4, 5], 'col_2': ['a', 'b', 'c', 'd', 'e']})
path = tmpdir_factory.mktemp("csv_context")
df1.to_csv(path.join("test.csv"))
return str(path)
def test_invalid_data_context():
# Test an unknown data context name
with pytest.raises(ValueError) as err:
context = get_data_context('what_a_ridiculous_name', None)
assert "Unknown data context." in str(err)
def test_sqlalchemy_data_context(test_db_connection_string):
context = get_data_context('SqlAlchemy', test_db_connection_string)
assert context.list_datasets() == ['table_1', 'table_2']
dataset = context.get_dataset('table_1')
assert isinstance(dataset, SqlAlchemyDataset)
def test_pandas_data_context(test_folder_connection_path):
context = get_data_context('PandasCSV', test_folder_connection_path)
assert context.list_datasets() == ['test.csv']
dataset = context.get_dataset('test.csv')
assert isinstance(dataset, PandasDataset)
|
sotte/great_expectations | great_expectations/dataset/base.py | <gh_stars>0
from __future__ import division
import json
import inspect
import copy
from functools import wraps
import traceback
import warnings
from six import string_types
from collections import namedtuple
from collections import (
Counter,
defaultdict
)
from ..version import __version__
from .util import DotDict, recursively_convert_to_json_serializable, parse_result_format
class Dataset(object):
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
self._initialize_expectations()
@classmethod
def expectation(cls, method_arg_names):
"""Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the Dataset object. It is the core decorator \
used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation \
(typically the result of inspection). Positional arguments are explicitly mapped to \
keyword arguments when the expectation is run.
Notes:
Intermediate decorators that call the core @expectation decorator will most likely need to pass their \
decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset \
column_map_expectation decorator relies on the Dataset expectation decorator, but will pass through the \
signature from the implementing method.
@expectation intercepts and takes action based on the following parameters:
* include_config (boolean or None) : \
If True, then include the generated expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
* catch_exceptions (boolean or None) : \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
* result_format (str or None) : \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
* meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
"""
def outer_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
#Get the name of the method
method_name = func.__name__
# Combine all arguments into a single new "kwargs"
all_args = dict(zip(method_arg_names, args))
all_args.update(kwargs)
#Unpack display parameters; remove them from all_args if appropriate
if "include_config" in kwargs:
include_config = kwargs["include_config"]
del all_args["include_config"]
else:
include_config = self.default_expectation_args["include_config"]
if "catch_exceptions" in kwargs:
catch_exceptions = kwargs["catch_exceptions"]
del all_args["catch_exceptions"]
else:
catch_exceptions = self.default_expectation_args["catch_exceptions"]
if "result_format" in kwargs:
result_format = kwargs["result_format"]
else:
result_format = self.default_expectation_args["result_format"]
# Extract the meta object for use as a top-level expectation_config holder
if "meta" in kwargs:
meta = kwargs["meta"]
del all_args["meta"]
else:
meta = None
# This intends to get the signature of the inner wrapper, if there is one.
if "result_format" in inspect.getargspec(func)[0][1:]:
all_args["result_format"] = result_format
else:
if "result_format" in all_args:
del all_args["result_format"]
all_args = recursively_convert_to_json_serializable(all_args)
# Patch in PARAMETER args, and remove locally-supplied arguments
expectation_args = copy.deepcopy(all_args) # This will become the stored config
if "evaluation_parameters" in self._expectations_config:
evaluation_args = self._build_evaluation_parameters(expectation_args,
self._expectations_config["evaluation_parameters"]) # This will be passed to the evaluation
else:
evaluation_args = self._build_evaluation_parameters(expectation_args, None)
#Construct the expectation_config object
expectation_config = DotDict({
"expectation_type": method_name,
"kwargs": expectation_args
})
# Add meta to our expectation_config
if meta is not None:
expectation_config["meta"] = meta
raised_exception = False
exception_traceback = None
exception_message = None
# Finally, execute the expectation method itself
try:
return_obj = func(self, **evaluation_args)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = str(err)
return_obj = {
"success": False
}
else:
raise(err)
# Append the expectation to the config.
self._append_expectation(expectation_config)
if include_config:
return_obj["expectation_config"] = copy.deepcopy(expectation_config)
if catch_exceptions:
return_obj["exception_info"] = {
"raised_exception": raised_exception,
"exception_message": exception_message,
"exception_traceback": exception_traceback
}
# Add a "success" object to the config
expectation_config["success_on_last_run"] = return_obj["success"]
return_obj = recursively_convert_to_json_serializable(return_obj)
return return_obj
return wrapper
return outer_wrapper
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The column_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per-row basis.
Args:
func (function): \
The function implementing a row-wise expectation. The function should take a column of data and \
return an equally-long column of boolean values corresponding to whether the truthiness of the \
underlying expectation.
Notes:
column_map_expectation intercepts and takes action based on the following parameters:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
column_map_expectation *excludes null values* from being passed to the function
Depending on the `result_format` selected, column_map_expectation can additional data to a return object, \
including `element_count`, `nonnull_values`, `nonnull_count`, `success_count`, `unexpected_list`, and \
`unexpected_index_list`. See :func:`_format_column_map_output <great_expectations.dataset.base.Dataset._format_column_map_output>`
See also:
:func:`expect_column_values_to_be_unique <great_expectations.dataset.base.Dataset.expect_column_values_to_be_unique>` \
for an example of a column_map_expectation
"""
raise NotImplementedError
@classmethod
def column_aggregate_expectation(cls, func):
"""Constructs an expectation using column-aggregate semantics.
The column_aggregate_expectation decorator handles boilerplate issues surrounding the common pattern of \
evaluating truthiness of some condition on an aggregated-column basis.
Args:
func (function): \
The function implementing an expectation using an aggregate property of a column. \
The function should take a column of data and return the aggregate value it computes.
Notes:
column_aggregate_expectation *excludes null values* from being passed to the function
See also:
:func:`expect_column_mean_to_be_between <great_expectations.dataset.base.Dataset.expect_column_mean_to_be_between>` \
for an example of a column_aggregate_expectation
"""
raise NotImplementedError
def _initialize_expectations(self, config=None, name=None):
"""Instantiates `_expectations_config` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
Args:
config (json): \
A json-serializable expectation config. \
If None, creates default `_expectations_config` with an empty list of expectations and \
key value `dataset_name` as `name`.
name (string): \
The name to assign to `_expectations_config.dataset_name` if `config` is not provided.
"""
if config != None:
#!!! Should validate the incoming config with jsonschema here
# Copy the original so that we don't overwrite it by accident
## Pandas incorrectly interprets this as an attempt to create a column and throws up a warning. Suppress it
## since we are subclassing.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
self._expectations_config = DotDict(copy.deepcopy(config))
else:
## Pandas incorrectly interprets this as an attempt to create a column and throws up a warning. Suppress it
## since we are subclassing.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
self._expectations_config = DotDict({
"dataset_name" : name,
"meta": {
"great_expectations.__version__": __version__
},
"expectations" : []
})
## Pandas incorrectly interprets this as an attempt to create a column and throws up a warning. Suppress it
## since we are subclassing.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
self.default_expectation_args = {
"include_config" : False,
"catch_exceptions" : False,
"result_format" : 'BASIC',
}
def _append_expectation(self, expectation_config):
"""Appends an expectation to `DataSet._expectations_config` and drops existing expectations of the same type.
If `expectation_config` is a column expectation, this drops existing expectations that are specific to \
that column and only if it is the same expectation type as `expectation_config`. Otherwise, if it's not a \
column expectation, this drops existing expectations of the same type as `expectation config`. \
After expectations of the same type are dropped, `expectation_config` is appended to `DataSet._expectations_config`.
Args:
expectation_config (json): \
The JSON-serializable expectation to be added to the DataSet expectations in `_expectations_config`.
Notes:
May raise future errors once json-serializable tests are implemented to check for correct arg formatting
"""
expectation_type = expectation_config['expectation_type']
#Test to ensure the new expectation is serializable.
#FIXME: If it's not, are we sure we want to raise an error?
#FIXME: Should we allow users to override the error?
#FIXME: Should we try to convert the object using something like recursively_convert_to_json_serializable?
json.dumps(expectation_config)
#Drop existing expectations with the same expectation_type.
#For column_expectations, _append_expectation should only replace expectations
# where the expectation_type AND the column match
#!!! This is good default behavior, but
#!!! it needs to be documented, and
#!!! we need to provide syntax to override it.
if 'column' in expectation_config['kwargs']:
column = expectation_config['kwargs']['column']
self._expectations_config.expectations = [f for f in filter(
lambda exp: (exp['expectation_type'] != expectation_type) or ('column' in exp['kwargs'] and exp['kwargs']['column'] != column),
self._expectations_config.expectations
)]
else:
self._expectations_config.expectations = [f for f in filter(
lambda exp: exp['expectation_type'] != expectation_type,
self._expectations_config.expectations
)]
self._expectations_config.expectations.append(expectation_config)
def _copy_and_clean_up_expectation(self,
expectation,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Returns copy of `expectation` without `success_on_last_run` and other specified key-value pairs removed
Returns a copy of specified expectation will not have `success_on_last_run` key-value. The other key-value \
pairs will be removed by default but will remain in the copy if specified.
Args:
expectation (json): \
The expectation to copy and clean.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A copy of the provided expectation with `success_on_last_run` and other specified key-value pairs removed
"""
new_expectation = copy.deepcopy(expectation)
if "success_on_last_run" in new_expectation:
del new_expectation["success_on_last_run"]
if discard_result_format_kwargs:
if "result_format" in new_expectation["kwargs"]:
del new_expectation["kwargs"]["result_format"]
# discards["result_format"] += 1
if discard_include_configs_kwargs:
if "include_configs" in new_expectation["kwargs"]:
del new_expectation["kwargs"]["include_configs"]
# discards["include_configs"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in new_expectation["kwargs"]:
del new_expectation["kwargs"]["catch_exceptions"]
# discards["catch_exceptions"] += 1
return new_expectation
def _copy_and_clean_up_expectations_from_indexes(
self,
match_indexes,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Copies and cleans all expectations provided by their index in DataSet._expectations_config.expectations.
Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \
`DataSet,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.
Args:
match_indexes (List): \
Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.
discard_result_format_kwargs (boolean): \
if True, will remove the kwarg `output_format` key-value pair from the copied expectation.
discard_include_configs_kwargs (boolean):
if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.
discard_catch_exceptions_kwargs (boolean):
if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.
Returns:
A list of the copied expectations with `success_on_last_run` and other specified \
key-value pairs removed.
See also:
_copy_and_clean_expectation
"""
rval = []
for i in match_indexes:
rval.append(
self._copy_and_clean_up_expectation(
self._expectations_config.expectations[i],
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
)
)
return rval
def find_expectation_indexes(self,
expectation_type=None,
column=None,
expectation_kwargs=None
):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
Returns:
A list of indexes for matching expectation objects.
If there are no matches, the list will be empty.
"""
if expectation_kwargs == None:
expectation_kwargs = {}
if "column" in expectation_kwargs and column != None and column != expectation_kwargs["column"]:
raise ValueError("Conflicting column names in remove_expectation: %s and %s" % (column, expectation_kwargs["column"]))
if column != None:
expectation_kwargs["column"] = column
match_indexes = []
for i, exp in enumerate(self._expectations_config.expectations):
if expectation_type == None or (expectation_type == exp['expectation_type']):
# if column == None or ('column' not in exp['kwargs']) or (exp['kwargs']['column'] == column) or (exp['kwargs']['column']==:
match = True
for k,v in expectation_kwargs.items():
if k in exp['kwargs'] and exp['kwargs'][k] == v:
continue
else:
match = False
if match:
match_indexes.append(i)
return match_indexes
def find_expectations(self,
expectation_type=None,
column=None,
expectation_kwargs=None,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
discard_result_format_kwargs=True : In returned expectation object(s), suppress the `result_format` parameter.
discard_include_configs_kwargs=True : In returned expectation object(s), suppress the `include_configs` parameter.
discard_catch_exceptions_kwargs=True : In returned expectation object(s), suppress the `catch_exceptions` parameter.
Returns:
A list of matching expectation objects.
If there are no matches, the list will be empty.
"""
match_indexes = self.find_expectation_indexes(
expectation_type,
column,
expectation_kwargs,
)
return self._copy_and_clean_up_expectations_from_indexes(
match_indexes,
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
)
def remove_expectation(self,
expectation_type=None,
column=None,
expectation_kwargs=None,
remove_multiple_matches=False,
dry_run=False,
):
"""Remove matching expectation(s) from _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
remove_multiple_matches=False : Match multiple expectations
dry_run=False : Return a list of matching expectations without removing
Returns:
None, unless dry_run=True.
If dry_run=True and remove_multiple_matches=False then return the expectation that *would be* removed.
If dry_run=True and remove_multiple_matches=True then return a list of expectations that *would be* removed.
Note:
If remove_expectation doesn't find any matches, it raises a ValueError.
If remove_expectation finds more than one matches and remove_multiple_matches!=True, it raises a ValueError.
If dry_run=True, then `remove_expectation` acts as a thin layer to find_expectations, with the default values for discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs
"""
match_indexes = self.find_expectation_indexes(
expectation_type,
column,
expectation_kwargs,
)
if len(match_indexes) == 0:
raise ValueError('No matching expectation found.')
elif len(match_indexes) > 1:
if not remove_multiple_matches:
raise ValueError('Multiple expectations matched arguments. No expectations removed.')
else:
if not dry_run:
self._expectations_config.expectations = [i for j, i in enumerate(self._expectations_config.expectations) if j not in match_indexes]
else:
return self._copy_and_clean_up_expectations_from_indexes(match_indexes)
else: #Exactly one match
expectation = self._copy_and_clean_up_expectation(
self._expectations_config.expectations[match_indexes[0]]
)
if not dry_run:
del self._expectations_config.expectations[match_indexes[0]]
else:
if remove_multiple_matches:
return [expectation]
else:
return expectation
def discard_failing_expectations(self):
res = self.validate(only_return_failures=True).get('results')
if any(res):
for item in res:
self.remove_expectation(expectation_type=item['expectation_config']['expectation_type'],
expectation_kwargs=item['expectation_config']['kwargs'])
# print("WARNING: Removed %s expectations that were 'False'" % len(res))
warnings.warn("Removed %s expectations that were 'False'" % len(res))
def get_default_expectation_arguments(self):
"""Fetch default expectation arguments for this dataset
Returns:
A dictionary containing all the current default expectation arguments for a dataset
Ex::
{
"include_config" : False,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self.default_expectation_args
def set_default_expectation_argument(self, argument, value):
"""Set a default expectation argument for this dataset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
#!!! Maybe add a validation check here?
self.default_expectation_args[argument] = value
def get_expectations_config(self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations=True : Only include expectations with success_on_last_run=True in the exported config.
discard_result_format_kwargs=True : In returned expectation objects, suppress the `result_format` parameter.
discard_include_configs_kwargs=True : In returned expectation objects, suppress the `include_configs` parameter.
discard_catch_exceptions_kwargs=True : In returned expectation objects, suppress the `catch_exceptions` parameter.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object.
"""
config = dict(self._expectations_config)
config = copy.deepcopy(config)
expectations = config["expectations"]
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
#Note: This is conservative logic.
#Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
#In cases where expectation["success"] is missing or None, expectations are *retained*.
#Such a case could occur if expectations were loaded from a config file and never run.
if "success_on_last_run" in expectation and expectation["success_on_last_run"] == False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
for expectation in expectations:
#FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, which calls _copy_and_clean_up_expectation
if "success_on_last_run" in expectation:
del expectation["success_on_last_run"]
if discard_result_format_kwargs:
if "result_format" in expectation["kwargs"]:
del expectation["kwargs"]["result_format"]
discards["result_format"] += 1
if discard_include_configs_kwargs:
if "include_configs" in expectation["kwargs"]:
del expectation["kwargs"]["include_configs"]
discards["include_configs"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation["kwargs"]:
del expectation["kwargs"]["catch_exceptions"]
discards["catch_exceptions"] += 1
if not suppress_warnings:
"""
WARNING: get_expectations_config discarded
12 failing expectations
44 result_format kwargs
0 include_config kwargs
1 catch_exceptions kwargs
If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.
"""
if any([discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):
print ("WARNING: get_expectations_config discarded")
if discard_failed_expectations:
print ("\t%d failing expectations" % discards["failed_expectations"])
if discard_result_format_kwargs:
print ("\t%d result_format kwargs" % discards["result_format"])
if discard_include_configs_kwargs:
print ("\t%d include_configs kwargs" % discards["include_configs"])
if discard_catch_exceptions_kwargs:
print ("\t%d catch_exceptions kwargs" % discards["catch_exceptions"])
print ("If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.")
config["expectations"] = expectations
return config
def save_expectations_config(
self,
filepath=None,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False
):
"""Writes ``_expectation_config`` to a JSON file.
Writes the DataSet's expectation config to the specified JSON ``filepath``. Failing expectations \
can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \
pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from the JSON \
expectations config.
Args:
filepath (string): \
The location and name to write the JSON config file to.
discard_failed_expectations (boolean): \
If True, excludes expectations that do not return ``success = True``. \
If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): \
If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config file. \
discard_include_configs_kwargs (boolean): \
If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config file.\
discard_catch_exceptions_kwargs (boolean): \
If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON config \
file.
suppress_warnings (boolean): \
It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \
suppressed.
"""
if filepath==None:
#FIXME: Fetch the proper filepath from the project config
pass
expectations_config = self.get_expectations_config(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings
)
expectation_config_str = json.dumps(expectations_config, indent=2)
open(filepath, 'w').write(expectation_config_str)
def validate(self, expectations_config=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False):
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectations_config=None to validate the expectations config associated with the DataSet.
Args:
expectations_config (json or None): \
If None, uses the expectations config generated with the Dataset during the current session. \
If a JSON file, validates those expectations.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectations_config provided or as part of the dataset.
If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
include_config (boolean): \
If True, the returned results include the config information associated with each expectation, if \
it exists.
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False``\.
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. \
If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
results = []
if expectations_config is None:
expectations_config = self.get_expectations_config(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_configs_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectations_config, string_types):
expectations_config = json.load(open(expectations_config, 'r'))
if evaluation_parameters is None:
# Use evaluation parameters from the (maybe provided) config
if "evaluation_parameters" in expectations_config:
evaluation_parameters = expectations_config["evaluation_parameters"]
# Warn if our version is different from the version in the configuration
try:
if expectations_config['meta']['great_expectations.__version__'] != __version__:
warnings.warn("WARNING: This configuration object was built using a different version of great_expectations than is currently validating it.")
except KeyError:
warnings.warn("WARNING: No great_expectations version found in configuration object.")
for expectation in expectations_config['expectations']:
try:
expectation_method = getattr(self, expectation['expectation_type'])
if result_format is not None:
expectation['kwargs'].update({"result_format": result_format})
# A missing parameter should raise a KeyError
evaluation_args = self._build_evaluation_parameters(expectation['kwargs'], evaluation_parameters)
result = expectation_method(
catch_exceptions=catch_exceptions,
**evaluation_args
)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = {
"success": False,
"exception_info": {
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err)
}
}
else:
raise(err)
#if include_config:
result["expectation_config"] = copy.deepcopy(expectation)
# Add an empty exception_info object if no exception was caught
if catch_exceptions and ('exception_info' not in result):
result["exception_info"] = {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None
}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if exp["success"]==False:
abbrev_results.append(exp)
results = abbrev_results
result = {
"results": results,
"success": statistics.success,
"statistics": {
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
}
}
if evaluation_parameters is not None:
result.update({"evaluation_parameters": evaluation_parameters})
return result
def get_evaluation_parameter(self, parameter_name, default_value=None):
"""Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
"""
if "evaluation_parameters" in self._expectations_config and \
parameter_name in self._expectations_config['evaluation_parameters']:
return self._expectations_config['evaluation_parameters'][parameter_name]
else:
return default_value
def set_evaluation_parameter(self, parameter_name, parameter_value):
"""Provide a value to be stored in the dataset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
if 'evaluation_parameters' not in self._expectations_config:
self._expectations_config['evaluation_parameters'] = {}
self._expectations_config['evaluation_parameters'].update({parameter_name: parameter_value})
def _build_evaluation_parameters(self, expectation_args, evaluation_parameters):
"""Build a dictionary of parameters to evaluate, using the provided evaluation_paramters,
AND mutate expectation_args by removing any parameter values passed in as temporary values during
exploratory work.
"""
evaluation_args = copy.deepcopy(expectation_args)
# Iterate over arguments, and replace $PARAMETER-defined args with their
# specified parameters.
for key, value in evaluation_args.items():
if isinstance(value, dict) and '$PARAMETER' in value:
# First, check to see whether an argument was supplied at runtime
# If it was, use that one, but remove it from the stored config
if "$PARAMETER." + value["$PARAMETER"] in value:
evaluation_args[key] = evaluation_args[key]["$PARAMETER." + value["$PARAMETER"]]
del expectation_args[key]["$PARAMETER." + value["$PARAMETER"]]
elif evaluation_parameters is not None and value["$PARAMETER"] in evaluation_parameters:
evaluation_args[key] = evaluation_parameters[value['$PARAMETER']]
else:
raise KeyError("No value found for $PARAMETER " + value["$PARAMETER"])
return evaluation_args
##### Output generation #####
def _format_column_map_output(self,
result_format, success,
element_count, nonnull_count,
unexpected_list, unexpected_index_list
):
"""Helper function to construct expectation result objects for column_map_expectations.
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
"""
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
# Incrementally add to result and return when all values for the specified level are present
return_obj = {
'success': success
}
if result_format['result_format'] == 'BOOLEAN_ONLY':
return return_obj
missing_count = element_count - nonnull_count
unexpected_count = len(unexpected_list)
if element_count > 0:
unexpected_percent = unexpected_count / element_count
missing_percent = missing_count / element_count
if nonnull_count > 0:
unexpected_percent_nonmissing = unexpected_count / nonnull_count
else:
unexpected_percent_nonmissing = None
else:
missing_percent = None
unexpected_percent = None
unexpected_percent_nonmissing = None
return_obj['result'] = {
'element_count': element_count,
'missing_count': missing_count,
'missing_percent': missing_percent,
'unexpected_count': unexpected_count,
'unexpected_percent': unexpected_percent,
'unexpected_percent_nonmissing': unexpected_percent_nonmissing,
'partial_unexpected_list': unexpected_list[:result_format['partial_unexpected_count']]
}
if result_format['result_format'] == 'BASIC':
return return_obj
# Try to return the most common values, if possible.
try:
partial_unexpected_counts = [
{'value': key, 'count': value}
for key, value
in sorted(
Counter(unexpected_list).most_common(result_format['partial_unexpected_count']),
key=lambda x: (-x[1], x[0]))
]
except TypeError:
partial_unexpected_counts = ['partial_exception_counts requires a hashable type']
return_obj['result'].update(
{
'partial_unexpected_index_list': unexpected_index_list[:result_format['partial_unexpected_count']] if unexpected_index_list is not None else None,
'partial_unexpected_counts': partial_unexpected_counts
}
)
if result_format['result_format'] == 'SUMMARY':
return return_obj
return_obj['result'].update(
{
'unexpected_list': unexpected_list,
'unexpected_index_list': unexpected_index_list
}
)
if result_format['result_format'] == 'COMPLETE':
return return_obj
raise ValueError("Unknown result_format %s." % (result_format['result_format'],))
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
"""Calculate success and percent_success for column_map_expectations
Args:
success_count (int): \
The number of successful values in the column
nonnull_count (int): \
The number of nonnull values in the column
mostly (float or None): \
A value between 0 and 1 (or None), indicating the percentage of successes required to pass the expectation as a whole\
If mostly=None, then all values must succeed in order for the expectation as a whole to succeed.
Returns:
success (boolean), percent_success (float)
"""
if nonnull_count > 0:
# percent_success = float(success_count)/nonnull_count
percent_success = success_count / nonnull_count
if mostly != None:
success = bool(percent_success >= mostly)
else:
success = bool(nonnull_count-success_count == 0)
else:
success = True
percent_success = None
return success, percent_success
##### Iterative testing for custom expectations #####
def test_expectation_function(self, function, *args, **kwargs):
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc.
To use developed expectations from the command-line tool, you'll still need to define custom classes, etc.
Check out :ref:`custom_expectations` for more information.
"""
new_function = self.expectation(inspect.getargspec(function)[0][1:])(function)
return new_function(self, *args, **kwargs)
def test_column_map_expectation_function(self, function, *args, **kwargs):
"""Test a column map expectation function
Args:
function (func): The function to be tested. (Must be a valid column_map_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc.
To use developed expectations from the command-line tool, you'll still need to define custom classes, etc.
Check out :ref:`custom_expectations` for more information.
"""
new_function = self.column_map_expectation( function )
return new_function(self, *args, **kwargs)
def test_column_aggregate_expectation_function(self, function, *args, **kwargs):
"""Test a column aggregate expectation function
Args:
function (func): The function to be tested. (Must be a valid column_aggregate_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc.
To use developed expectations from the command-line tool, you'll still need to define custom classes, etc.
Check out :ref:`custom_expectations` for more information.
"""
new_function = self.column_aggregate_expectation( function )
return new_function(self, *args, **kwargs)
##### Table shape expectations #####
def expect_column_to_exist(
self, column, column_index=None, result_format=None, include_config=False,
catch_exceptions=None, meta=None
):
"""Expect the specified column to exist.
expect_column_to_exist is a :func:`expectation <great_expectations.dataset.base.Dataset.expectation>`, not a \
`column_map_expectation` or `column_aggregate_expectation`.
Args:
column (str): \
The column name.
Other Parameters:
column_index (int or None): \
If not None, checks the order of the columns. The expectation will fail if the \
column is not in location column_index (zero-indexed).
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_table_columns_to_match_ordered_list(self,
column_list,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the columns to exactly match a specified list.
expect_table_columns_to_match_ordered_list is a :func:`expectation <great_expectations.dataset.base.DataSet.expectation>`, not a \
`column_map_expectation` or `column_aggregate_expectation`.
Args:
column_list (list of str): \
The column names, in the correct order.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_table_row_count_to_be_between(self,
min_value=0,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the number of rows to be between two values.
expect_table_row_count_to_be_between is a :func:`expectation <great_expectations.dataset.base.Dataset.expectation>`, \
not a `column_map_expectation` or `column_aggregate_expectation`.
Keyword Args:
min_value (int or None): \
The minimum number of rows, inclusive.
max_value (int or None): \
The maximum number of rows, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has no maximum.
See Also:
expect_table_row_count_to_equal
"""
raise NotImplementedError
def expect_table_row_count_to_equal(self,
value,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a basic :func:`expectation <great_expectations.dataset.base.Dataset.expectation>`, \
not a `column_map_expectation` or `column_aggregate_expectation`.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
raise NotImplementedError
##### Missing values, unique values, and types #####
def expect_column_values_to_be_unique(self,
column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with `unexpected_percent=0.6.`
expect_column_values_to_be_unique is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_not_be_null(self,
column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to not be null.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an np.NaN in pandas.
Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_be_null
"""
raise NotImplementedError
def expect_column_values_to_be_null(self,
column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to be null.
expect_column_values_to_be_null is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_not_be_null
"""
raise NotImplementedError
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect each column entry to be a specified data type.
expect_column_values_to_be_of_type is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
type\_ (str): \
A string representing the data type that each column should have as entries.
For example, "double integer" refers to an integer with double precision.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Warning:
expect_column_values_to_be_of_type is slated for major changes in future versions of great_expectations.
As of v0.3, great_expectations is exclusively based on pandas, which handles typing in its own peculiar way.
Future versions of great_expectations will allow for Datasets in SQL, spark, etc.
When we make that change, we expect some breaking changes in parts of the codebase that are based strongly on pandas notions of typing.
See also:
expect_column_values_to_be_in_type_list
"""
raise NotImplementedError
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect each column entry to match a list of specified data types.
expect_column_values_to_be_in_type_list is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
type_list (list of str): \
A list of strings representing the data type that each column should have as entries.
For example, "double integer" refers to an integer with double precision.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Warning:
expect_column_values_to_be_in_type_list is slated for major changes in future versions of great_expectations.
As of v0.3, great_expectations is exclusively based on pandas, which handles typing in its own peculiar way.
Future versions of great_expectations will allow for Datasets in SQL, spark, etc.
When we make that change, we expect some breaking changes in parts of the codebase that are based strongly on pandas notions of typing.
See also:
expect_column_values_to_be_of_type
"""
raise NotImplementedError
##### Sets and ranges #####
def expect_column_values_to_be_in_set(self,
column,
value_set,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect each column value to be in a given set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"unexpected_count": 1
"unexpected_percent": 0.16666666666666666,
"unexpected_percent_nonmissing": 0.16666666666666666,
"partial_unexpected_list": [
1
],
},
}
expect_column_values_to_be_in_set is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_not_be_in_set
"""
raise NotImplementedError
def expect_column_values_to_not_be_in_set(self,
column,
value_set,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to not be in the set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[1,2]
)
{
"success": false
"result": {
"unexpected_count": 3
"unexpected_percent": 0.5,
"unexpected_percent_nonmissing": 0.5,
"partial_unexpected_list": [
1, 2, 2
],
},
}
expect_column_values_to_not_be_in_set is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_be_in_set
"""
raise NotImplementedError
def expect_column_values_to_be_between(self,
column,
min_value=None,
max_value=None,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has no maximum.
See Also:
expect_column_value_lengths_to_be_between
"""
raise NotImplementedError
def expect_column_values_to_be_increasing(self,
column,
strictly=None,
parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_be_decreasing
"""
raise NotImplementedError
def expect_column_values_to_be_decreasing(self,
column,
strictly=None,
parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_be_increasing
"""
raise NotImplementedError
##### String matching #####
def expect_column_value_lengths_to_be_between(self,
column,
min_value=None,
max_value=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has no maximum.
See Also:
expect_column_value_lengths_to_equal
"""
raise NotImplementedError
def expect_column_value_lengths_to_equal(self,
column,
value,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_value_lengths_to_be_between
"""
def expect_column_values_to_match_regex(self,
column,
regex,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_not_match_regex
expect_column_values_to_match_regex_list
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex(self,
column,
regex,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat".
expect_column_values_to_not_match_regex is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_match_regex
expect_column_values_to_match_regex_list
"""
raise NotImplementedError
def expect_column_values_to_match_regex_list(self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions.
Matches can be anywhere in the string.
expect_column_values_to_match_regex_list is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should match
Keyword Args:
match_on= (string): \
"any" or "all".
Use "any" if the value should match at least one regular expression in the list.
Use "all" if it should match each regular expression in the list.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_match_regex
expect_column_values_to_not_match_regex
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex_list(self, column, regex_list,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
"""Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can \
be anywhere in the string.
expect_column_values_to_not_match_regex_list is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should not match
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_match_regex_list
"""
raise NotImplementedError
##### Datetime and JSON parsing #####
def expect_column_values_to_match_strftime_format(self,
column,
strftime_format,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings representing a date or time with a given format.
expect_column_values_to_match_strftime_format is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
strftime_format (str): \
A strftime format string to use for matching
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_dateutil_parseable(self,
column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be parseable using dateutil.
expect_column_values_to_be_dateutil_parseable is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_json_parseable(self,
column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be data written in JavaScript Object Notation.
expect_column_values_to_be_json_parseable is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_match_json_schema
"""
raise NotImplementedError
def expect_column_values_to_match_json_schema(self,
column,
json_schema,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column entries to be JSON objects matching a given JSON schema.
expect_column_values_to_match_json_schema is a :func:`column_map_expectation <great_expectations.dataset.base.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_column_values_to_be_json_parseable
The JSON-schema docs at: http://json-schema.org/
"""
raise NotImplementedError
##### Aggregate functions #####
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(self,
column, distribution,
p_value=0.05, params=None,
result_format=None,
include_config=False,
catch_exceptions=None, meta=None):
"""
Expect the column values to be distributed similarly to a scipy distribution. \
This expectation compares the provided column to the specified continuous distribution with a parameteric \
Kolmogorov-Smirnov test. The K-S test compares the provided column to the cumulative density function (CDF) of \
the specified scipy distribution. If you don't know the desired distribution shape parameters, use the \
`ge.dataset.util.infer_distribution_parameters()` utility function to estimate them.
It returns 'success'=True if the p-value from the K-S test is greater than or equal to the provided p-value.
expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than is a \
:func:`column_aggregate_expectation <great_expectations.dataset.base.DataSet.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
distribution (str): \
The scipy distribution name. See: https://docs.scipy.org/doc/scipy/reference/stats.html
p_value (float): \
The threshold p-value for a passing test. Default is 0.05.
params (dict or list) : \
A dictionary or positional list of shape parameters that describe the distribution you want to test the\
data against. Include key values specific to the distribution from the appropriate scipy \
distribution CDF function. 'loc' and 'scale' are used as translational parameters.\
See https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"details":
"expected_params" (dict): The specified or inferred parameters of the distribution to test against
"ks_results" (dict): The raw result of stats.kstest()
}
* The Kolmogorov-Smirnov test's null hypothesis is that the column is similar to the provided distribution.
* Supported scipy distributions:
-norm
-beta
-gamma
-uniform
-chi2
-expon
"""
raise NotImplementedError
def expect_column_mean_to_be_between(self,
column,
min_value=None,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column mean to be between a minimum value and a maximum value (inclusive).
expect_column_mean_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column mean.
max_value (float or None): \
The maximum value for the column mean.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true mean for the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound.
* If max_value is None, then min_value is treated as a lower bound.
See Also:
expect_column_median_to_be_between
expect_column_stdev_to_be_between
"""
raise NotImplementedError
def expect_column_median_to_be_between(self,
column,
min_value=None,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column median to be between a minimum value and a maximum value.
expect_column_median_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum value for the column median.
max_value (int or None): \
The maximum value for the column median.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true median for the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
expect_column_mean_to_be_between
expect_column_stdev_to_be_between
"""
raise NotImplementedError
def expect_column_stdev_to_be_between(self,
column,
min_value=None,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column standard deviation to be between a minimum value and a maximum value.
expect_column_stdev_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column standard deviation.
max_value (float or None): \
The maximum value for the column standard deviation.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true standard deviation for the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
expect_column_mean_to_be_between
expect_column_median_to_be_between
"""
raise NotImplementedError
def expect_column_unique_value_count_to_be_between(self,
column,
min_value=None,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the number of unique values to be between a minimum value and a maximum value.
expect_column_unique_value_count_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum number of unique values allowed.
max_value (int or None): \
The maximum number of unique values allowed.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (int) The number of unique values in the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
expect_column_proportion_of_unique_values_to_be_between
"""
raise NotImplementedError
def expect_column_proportion_of_unique_values_to_be_between(self,
column,
min_value=0,
max_value=1,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the proportion of unique values to be between a minimum value and a maximum value.
For example, in a column containing [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], there are 4 unique values and 10 total \
values for a proportion of 0.4.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum proportion of unique values. (Proportions are on the range 0 to 1)
max_value (float or None): \
The maximum proportion of unique values. (Proportions are on the range 0 to 1)
expect_column_unique_value_count_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The proportion of unique values in the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
expect_column_unique_value_count_to_be_between
"""
raise NotImplementedError
def expect_column_most_common_value_to_be_in_set(self,
column,
value_set,
ties_okay=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the most common value to be within the designated value set
expect_column_most_common_value_to_be_in_set is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
value_set (set-like): \
A list of potential values to match
Keyword Args:
ties_okay (boolean or None): \
If True, then the expectation will still succeed if values outside the designated set are as common (but not more common) than designated values
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The most common values in the column
}
`observed_value` contains a list of the most common values.
Often, this will just be a single element. But if there's a tie for most common among multiple values,
`observed_value` will contain a single copy of each most common value.
"""
raise NotImplementedError
def expect_column_sum_to_be_between(self,
column,
min_value=None,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column to sum to be between an min and max value
expect_column_sum_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column sum
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
raise NotImplementedError
def expect_column_min_to_be_between(self,
column,
min_value=None,
max_value=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column to sum to be between an min and max value
expect_column_min_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column min
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
raise NotImplementedError
def expect_column_max_to_be_between(self,
column,
min_value=None,
max_value=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect the column max to be between an min and max value
expect_column_sum_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column max
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
raise NotImplementedError
### Distributional expectations
def expect_column_chisquare_test_p_value_to_be_greater_than(self,
column,
partition_object=None,
p=0.05,
tail_weight_holdout=0,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to be distributed similarly to the provided categorical partition. \
This expectation compares categorical distributions using a Chi-squared test. \
It returns `success=True` if values in the column match the distribution of the provided partition.
expect_column_chisquare_test_p_value_to_be_greater_than is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for rejecting the null hypothesis of the Chi-Squared test.\
For values below the specified threshold, the expectation will return `success=False`,\
rejecting the null hypothesis that the distributions are the same.\
Defaults to 0.05.
Keyword Args:
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly between values observed in the data but not present in the \
provided partition. tail_weight_holdout provides a mechanism to make the test less strict by \
assigning positive weights to unknown values observed in the data that are not present in the \
partition.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the Chi-squared test
"details": {
"observed_partition" (dict):
The partition observed in the data.
"expected_partition" (dict):
The partition expected from the data, after including tail_weight_holdout
}
}
"""
raise NotImplementedError
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect column values to be distributed similarly to the provided continuous partition. This expectation \
compares continuous distributions using a bootstrapped Kolmogorov-Smirnov test. It returns `success=True` if \
values in the column match the distribution of the provided partition.
The expected cumulative density function (CDF) is constructed as a linear interpolation between the bins, \
using the provided weights. Consequently the test expects a piecewise uniform distribution using the bins from \
the provided partition object.
expect_column_bootstrapped_ks_test_p_value_to_be_greater_than is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for the Kolmogorov-Smirnov test.
For values below the specified threshold the expectation will return `success=False`, rejecting the \
null hypothesis that the distributions are the same. \
Defaults to 0.05.
Keyword Args:
bootstrap_samples (int): \
The number bootstrap rounds. Defaults to 1000.
bootstrap_sample_size (int): \
The number of samples to take from the column for each bootstrap. A larger sample will increase the \
specificity of the test. Defaults to 2 * len(partition_object['weights'])
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the KS test
"details": {
"bootstrap_samples": The number of bootstrap rounds used
"bootstrap_sample_size": The number of samples taken from
the column in each bootstrap round
"observed_cdf": The cumulative density function observed
in the data, a dict containing 'x' values and cdf_values
(suitable for plotting)
"expected_cdf" (dict):
The cumulative density function expected based on the
partition object, a dict containing 'x' values and
cdf_values (suitable for plotting)
"observed_partition" (dict):
The partition observed on the data, using the provided
bins but also expanding from min(column) to max(column)
"expected_partition" (dict):
The partition expected from the data. For KS test,
this will always be the partition_object parameter
}
}
"""
raise NotImplementedError
def expect_column_kl_divergence_to_be_less_than(self,
column,
partition_object=None,
threshold=None,
tail_weight_holdout=0,
internal_weight_holdout=0,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
"""Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \
partition object to be lower than the provided threshold.
KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \
difference between the two distributions. A relative entropy of zero indicates that the data are \
distributed identically, `when binned according to the provided partition`.
In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test.
This expectation works on both categorical and continuous partitions. See notes below for details.
expect_column_kl_divergence_to_be_less_than is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
threshold (float): \
The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\
provided threshold, the test will return `success=False`.
Keyword Args:
internal_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \
provides a mechanims to make the test less strict by assigning positive weights to values observed in \
the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \
any value observed in such a region will cause KL divergence to rise to +Infinity.\
Defaults to 0.
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\
(-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \
tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \
values observed in the data that are not present in the partition. With no tail_weight_holdout, \
any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\
Defaults to 0.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true KL divergence (relative entropy)
"details": {
"observed_partition": (dict) The partition observed in the data
"expected_partition": (dict) The partition against which the data were compared,
after applying specified weight holdouts.
}
}
If the partition_object is categorical, this expectation will expect the values in column to also be \
categorical.
* If the column includes values that are not present in the partition, the tail_weight_holdout will be \
equally split among those values, providing a mechanism to weaken the strictness of the expectation \
(otherwise, relative entropy would immediately go to infinity).
* If the partition includes values that are not present in the column, the test will simply include \
zero weight for that value.
If the partition_object is continuous, this expectation will discretize the values in the column according \
to the bins specified in the partition_object, and apply the test to the resulting distribution.
* The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \
expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \
are observed in that interval.
* If internal_weight_holdout is specified, that value will be distributed equally among any intervals \
with weight zero in the partition_object.
* If tail_weight_holdout is specified, that value will be appended to the tails of the bins \
((-Infinity, min(bins)) and (max(bins), Infinity).
See also:
expect_column_chisquare_test_p_value_to_be_greater_than
expect_column_bootstrapped_ks_test_p_value_to_be_greater_than
"""
raise NotImplementedError
### Column pairs ###
def expect_column_pair_values_to_be_equal(self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""
Expect the values in column A to be the same as column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_A_to_be_greater_than_B(self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""
Expect values in column A to be greater than column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
or_equal (boolean or None): If True, then values can be equal, not strictly greater
Keyword Args:
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_to_be_in_set(self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""
Expect paired values from columns A and B to belong to a set of valid pairs.
Args:
column_A (str): The first column name
column_B (str): The second column name
value_pairs_set (list of tuples): All the valid pairs to be matched
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
ValidationStatistics = namedtuple("ValidationStatistics", [
"evaluated_expectations",
"successful_expectations",
"unsuccessful_expectations",
"success_percent",
"success",
])
def _calc_validation_statistics(validation_results):
"""
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
"""
# calc stats
successful_expectations = sum(exp["success"] for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
success_percent = float("nan")
return ValidationStatistics(
successful_expectations=successful_expectations,
evaluated_expectations=evaluated_expectations,
unsuccessful_expectations=unsuccessful_expectations,
success=success,
success_percent=success_percent,
)
|
sotte/great_expectations | tests/test_utils.py | from __future__ import division
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from great_expectations.dataset import PandasDataset, SqlAlchemyDataset
## Taken from the following stackoverflow: https://stackoverflow.com/questions/23549419/assert-that-two-dictionaries-are-almost-equal
def assertDeepAlmostEqual(test_case, expected, actual, *args, **kwargs):
"""
Assert that two complex structures have almost equal contents.
Compares lists, dicts and tuples recursively. Checks numeric values
using test_case's :py:meth:`unittest.TestCase.assertAlmostEqual` and
checks all other values with :py:meth:`unittest.TestCase.assertEqual`.
Accepts additional positional and keyword arguments and pass those
intact to assertAlmostEqual() (that's how you specify comparison
precision).
:param test_case: TestCase object on which we can call all of the basic
'assert' methods.
:type test_case: :py:class:`unittest.TestCase` object
"""
is_root = not '__trace' in kwargs
trace = kwargs.pop('__trace', 'ROOT')
try:
# if isinstance(expected, (int, float, long, complex)):
if isinstance(expected, (int, float, complex)):
test_case.assertAlmostEqual(expected, actual, *args, **kwargs)
elif isinstance(expected, (list, tuple, np.ndarray)):
test_case.assertEqual(len(expected), len(actual))
for index in range(len(expected)):
v1, v2 = expected[index], actual[index]
assertDeepAlmostEqual(test_case, v1, v2,
__trace=repr(index), *args, **kwargs)
elif isinstance(expected, dict):
test_case.assertEqual(set(expected), set(actual))
for key in expected:
assertDeepAlmostEqual(test_case, expected[key], actual[key],
__trace=repr(key), *args, **kwargs)
else:
test_case.assertEqual(expected, actual)
except AssertionError as exc:
exc.__dict__.setdefault('traces', []).append(trace)
if is_root:
trace = ' -> '.join(reversed(exc.traces))
exc = AssertionError("%s\nTRACE: %s" % (str(exc), trace))
raise exc
def get_dataset(dataset_type, data):
"""For Pandas, data should be either a DataFrame or a dictionary that can be instantiated as a DataFrame
For SQL, data should have the following shape:
{
'table':
'table': SqlAlchemy Table object
named_column: [list of values]
}
"""
if dataset_type == 'PandasDataset':
return PandasDataset(data)
elif dataset_type == 'SqlAlchemyDataset':
# Create a new database
engine = create_engine('sqlite://')
# Add the data to the database as a new table
df = pd.DataFrame(data)
df.to_sql(name='test_data', con=engine, index=False)
# Build a SqlAlchemyDataset using that database
return SqlAlchemyDataset('test_data', engine=engine)
else:
raise ValueError("Unknown dataset_type " + str(dataset_type))
def candidate_test_is_on_temporary_notimplemented_list(context, expectation_type):
if context == "SqlAlchemyDataset":
return expectation_type in [
#"expect_column_to_exist",
#"expect_table_row_count_to_be_between",
#"expect_table_row_count_to_equal",
#"expect_table_columns_to_match_ordered_list",
"expect_column_values_to_be_unique",
# "expect_column_values_to_not_be_null",
# "expect_column_values_to_be_null",
"expect_column_values_to_be_of_type",
"expect_column_values_to_be_in_type_list",
# "expect_column_values_to_be_in_set",
# "expect_column_values_to_not_be_in_set",
# "expect_column_values_to_be_between",
"expect_column_values_to_be_increasing",
"expect_column_values_to_be_decreasing",
# "expect_column_value_lengths_to_be_between",
# "expect_column_value_lengths_to_equal",
"expect_column_values_to_match_regex",
"expect_column_values_to_not_match_regex",
"expect_column_values_to_match_regex_list",
"expect_column_values_to_not_match_regex_list",
"expect_column_values_to_match_strftime_format",
"expect_column_values_to_be_dateutil_parseable",
"expect_column_values_to_be_json_parseable",
"expect_column_values_to_match_json_schema",
#"expect_column_mean_to_be_between",
#"expect_column_median_to_be_between",
"expect_column_stdev_to_be_between",
#"expect_column_unique_value_count_to_be_between",
#"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_most_common_value_to_be_in_set",
#"expect_column_sum_to_be_between",
#"expect_column_min_to_be_between",
#"expect_column_max_to_be_between",
"expect_column_chisquare_test_p_value_to_be_greater_than",
"expect_column_bootstrapped_ks_test_p_value_to_be_greater_than",
"expect_column_kl_divergence_to_be_less_than",
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than",
"expect_column_pair_values_to_be_equal",
"expect_column_pair_values_A_to_be_greater_than_B",
"expect_column_pair_values_to_be_in_set",
]
return False
def evaluate_json_test(dataset, expectation_type, test):
"""
This method will evaluate the result of a test build using the Great Expectations json test format.
:param dataset: (Dataset) A great expectations Dataset
:param expectation_type: (string) the name of the expectation to be run using the test input
:param test: (dict) a dictionary containing information for the test to be run. The dictionary must include:
- title: (string) the name of the test
- exact_match_out: (boolean) If true, match the 'out' dictionary exactly against the result of the expectation
- in: (dict or list) a dictionary of keyword arguments to use to evaluate the expectation or a list of positional arguments
- out: (dict) the dictionary keys against which to make assertions. Unless exact_match_out is true, keys must\
come from the following list:
- success
- observed_value
- unexpected_index_list
- unexpected_list
- details
- traceback_substring (if present, the string value will be expected as a substring of the exception_traceback)
:return: None. asserts correctness of results.
"""
dataset.set_default_expectation_argument('result_format', 'COMPLETE')
if 'title' not in test:
raise ValueError("Invalid test configuration detected: 'title' is required.")
if 'exact_match_out' not in test:
raise ValueError("Invalid test configuration detected: 'exact_match_out' is required.")
if 'in' not in test:
raise ValueError("Invalid test configuration detected: 'in' is required.")
if 'out' not in test:
raise ValueError("Invalid test configuration detected: 'out' is required.")
# Pass the test if we are in a test condition that is a known exception
# Known condition: SqlAlchemy does not support parse_strings_as_datetimes
if 'parse_strings_as_datetimes' in test['in'] and isinstance(dataset, SqlAlchemyDataset):
return
# Known condition: SqlAlchemy does not support allow_cross_type_comparisons
if 'allow_cross_type_comparisons' in test['in'] and isinstance(dataset, SqlAlchemyDataset):
return
try:
# Support tests with positional arguments
if isinstance(test['in'], list):
result = getattr(dataset, expectation_type)(*test['in'])
# As well as keyword arguments
else:
result = getattr(dataset, expectation_type)(**test['in'])
except NotImplementedError:
#Note: This method of checking does not look for false negatives: tests that are incorrectly on the notimplemented_list
assert candidate_test_is_on_temporary_notimplemented_list(dataset.__class__.__name__, expectation_type), "Error: this test was supposed to return NotImplementedError"
return
# Check results
if test['exact_match_out'] is True:
assert test['out'] == result
else:
for key, value in test['out'].items():
# Apply our great expectations-specific test logic
if key == 'success':
assert result['success'] == value
elif key == 'observed_value':
# assert np.allclose(result['result']['observed_value'], value)
assert value == result['result']['observed_value']
elif key == 'unexpected_index_list':
if isinstance(dataset, SqlAlchemyDataset):
pass
else:
assert result['result']['unexpected_index_list'] == value
elif key == 'unexpected_list':
assert result['result']['unexpected_list'] == value, "expected " + str(value) + " but got " + str(result['result']['unexpected_list'])
elif key == 'details':
assert result['result']['details'] == value
elif key == 'traceback_substring':
assert result['exception_info']['raised_exception']
assert value in result['exception_info']['exception_traceback'], "expected to find " + value + " in " + result['exception_info']['exception_traceback']
else:
raise ValueError("Invalid test specification: unknown key " + key + " in 'out'")
|
ash2shukla/mongodb-timepass | src/pages/update.py | from json.decoder import JSONDecodeError
import streamlit as st
from ..utils import Page
import streamlit as st
from ..db import MongoDBClient
import json
class UpdatePage(Page):
RENAME_COLL_OPT = "Update Collection Name"
UPDATE_DOCS_OPT = "Update Documents"
def __init__(self, state):
self.state = state
self.db_client: MongoDBClient = self.state.db_client
def write(self):
st.title("Update")
opt = st.selectbox(
"Select Update Type",
[UpdatePage.RENAME_COLL_OPT, UpdatePage.UPDATE_DOCS_OPT],
)
if opt == UpdatePage.RENAME_COLL_OPT:
db_name = st.selectbox(
"Select Database", self.db_client.get_database_names()
)
coll_from = st.selectbox(
"Select Collection Name To Update",
self.db_client.get_collection_names(db_name=db_name),
)
coll_to = st.text_input("Enter New Collection Name")
if coll_to == "" and not any(char in coll_to for char in {"$", "system."}):
st.info(
"Collection Names cant be empty. cant have '$' and cant have 'system.'"
)
st.stop()
if st.button("Update"):
self.db_client.rename_collection(
db_name=db_name, coll_from=coll_from, coll_to=coll_to
)
elif opt == UpdatePage.UPDATE_DOCS_OPT:
db_name = st.selectbox(
"Select Database", self.db_client.get_database_names()
)
coll_name = st.selectbox(
"Select Collection",
self.db_client.get_collection_names(db_name=db_name),
)
_filter = st.text_area("Filter")
try:
_filter = json.loads(_filter)
except JSONDecodeError:
st.warning("Must be a valid JSON.")
st.stop()
see_filter = st.checkbox("See Filter ?")
if see_filter:
st.write(_filter)
see_how_many = st.checkbox("See How Many Match ?")
if see_how_many:
count = self.db_client.get_collection_count(db_name=db_name, coll_name=coll_name, _filter=_filter)
st.write(count)
update = st.text_input("Update")
try:
update = json.loads(update)
except JSONDecodeError:
st.warning("Must be a valid JSON.")
st.stop()
see_update = st.checkbox("See update ?")
if see_update:
st.write(update)
do_upsert = st.checkbox("Upsert? (Update if found else insert)")
if st.button("Update"):
self.db_client.update_documents(
db_name=db_name,
coll_name=coll_name,
_filter=_filter,
update=update,
upsert=do_upsert,
)
|
ash2shukla/mongodb-timepass | src/db.py | <gh_stars>0
import streamlit as st
import pymongo
from bson import json_util
import json
from functools import lru_cache
class MongoDBClient:
def __init__(self, CONN_URI):
self.conn = pymongo.MongoClient(CONN_URI)
def get_database_names(self):
return self.conn.list_database_names()
def get_collection_names(self, db_name):
db = self.conn[db_name]
return db.list_collection_names()
def get_collection_count(self, db_name, coll_name, _filter=None):
if _filter is None:
_filter = {}
doc_count = self.conn[db_name][coll_name].count_documents(_filter)
return doc_count
def collect_documents(
self, db_name, coll_name, page, query, projection, page_size, page_number
):
query = json.loads(query)
projection = json.loads(projection)
result = []
if projection != "":
data = list(
self.conn[db_name][coll_name]
.find(query, projection)
.skip((page - 1) * page_size)
.limit(page_size)
)
else:
data = list(
self.conn[db_name][coll_name]
.find(query)
.skip((page - 1) * page_size)
.limit(page_size)
)
for record in data:
json_str = json_util.dumps(record)
result.append(json.loads(json_str))
return result
def create_collection(self, db_name, coll_name):
return self.conn[db_name].create_collection(coll_name)
def insert_docs(self, db_name, coll_name, document):
if isinstance(document, dict):
return self.conn[db_name][coll_name].insert_one(document)
elif isinstance(document, list):
return self.conn[db_name][coll_name].insert_many(document)
def rename_collection(self, db_name, coll_from, coll_to):
return self.conn[db_name][coll_from].rename(coll_to)
def update_documents(self, db_name, coll_name, _filter, update, upsert):
return self.conn[db_name][coll_name].update_many(_filter, update, upsert=upsert)
def drop_database(self, db_name):
return self.conn.drop_database(db_name)
def drop_collection(self, db_name, coll_name):
return self.conn[db_name].drop_collection(coll_name)
def remove_documents(self, db_name, coll_name, _filter):
return self.conn[db_name][coll_name].delete_many(_filter)
def __del__(self):
self.conn.close()
|
ash2shukla/mongodb-timepass | src/pages/explore.py | <filename>src/pages/explore.py<gh_stars>0
import streamlit as st
from ..utils import Page
import streamlit as st
from math import ceil
from json import loads, JSONDecodeError
import json
class ExplorePage(Page):
def __init__(self, state):
self.state = state
self.db_client = self.state.db_client
def write(self):
st.title("Explore")
db_name = st.sidebar.selectbox(
"Select database: ", self.db_client.get_database_names()
)
coll_name = st.sidebar.selectbox(
"Select collection: ", self.db_client.get_collection_names(db_name=db_name)
)
total_documents = st.sidebar.empty()
document_count = self.db_client.get_collection_count(
db_name=db_name, coll_name=coll_name
)
find_query = st.sidebar.empty()
find_warning = st.sidebar.empty()
projection = st.sidebar.empty()
projection_warning = st.sidebar.empty()
st.sidebar.markdown("---")
page_size = st.sidebar.number_input(
"Page Size", min_value=1, max_value=1000, value=10
)
page_number = st.number_input(
label="Page Number",
min_value=1,
max_value=ceil(document_count / page_size),
step=1,
)
total_documents.markdown(f"Total Documents: {document_count}")
find_str = find_query.text_input(
"Find Query to Run on selected Collection", value="{}"
)
try:
find_str = loads(find_str)
except JSONDecodeError as e:
find_warning.warning("Find string should be a valid JSON")
st.stop()
project_str = projection.text_input(
"Projection to Filter on selected Collection", value=""
)
try:
if project_str != "":
project_str = loads(project_str)
except JSONDecodeError as e:
projection_warning.warning("Projection String should be a valid JSON")
st.stop()
# dump back to string to avoid unhashable type dict in LRU cache
st.write(
self.db_client.collect_documents(
db_name=db_name,
coll_name=coll_name,
page=page_number,
query=json.dumps(find_str),
projection=json.dumps(project_str),
page_size=page_size,
page_number=page_number,
)
)
|
ash2shukla/mongodb-timepass | src/pages/delete.py | from json.decoder import JSONDecodeError
import streamlit as st
from ..utils import Page
import streamlit as st
from ..db import MongoDBClient
import json
class DeletePage(Page):
DROP_DB_OPT = "Drop database"
DROP_COLL_OPT = "Drop collection"
DELETE_DOC_OPT = "Remove document(s)"
def __init__(self, state):
self.state = state
self.db_client: MongoDBClient = self.state.db_client
def write(self):
st.title("Remove")
obj_type = st.selectbox("Select object to remove", [DeletePage.DROP_DB_OPT, DeletePage.DROP_COLL_OPT, DeletePage.DELETE_DOC_OPT])
if obj_type == DeletePage.DROP_DB_OPT:
db_name = st.selectbox("Select Database Name", self.db_client.get_database_names())
st.write("You sure about dropping the db ? 😯")
if st.button("DROP DATABASE"):
self.db_client.drop_database(db_name=db_name)
elif obj_type == DeletePage.DROP_COLL_OPT:
db_name = st.selectbox("Select Database Name", self.db_client.get_database_names())
coll_name = st.selectbox("Select Collection Name", self.db_client.get_collection_names(db_name=db_name))
st.write("You sure about dropping the collection ? 😯")
if st.button("DROP COLLECTION"):
self.db_client.drop_collection(db_name=db_name, coll_name=coll_name)
elif obj_type == DeletePage.DELETE_DOC_OPT:
db_name = st.selectbox("Select Database Name", self.db_client.get_database_names())
coll_name = st.selectbox("Select Collection Name", self.db_client.get_collection_names(db_name=db_name))
_filter = st.text_area("Filter")
try:
_filter = json.loads(_filter)
except JSONDecodeError:
st.warning("Must be a valid JSON.")
st.stop()
see_filter = st.checkbox("See Filter ?")
if see_filter:
st.write(_filter)
see_how_many = st.checkbox("See How Many Match ?")
if see_how_many:
count = self.db_client.get_collection_count(db_name=db_name, coll_name=coll_name, _filter=_filter)
st.write(count)
if st.button("Delete"):
self.db_client.remove_documents(db_name=db_name, coll_name=coll_name, _filter=_filter)
|
ash2shukla/mongodb-timepass | src/pages/create.py | from json.decoder import JSONDecodeError
import streamlit as st
from ..utils import Page
import streamlit as st
from ..db import MongoDBClient
import json
class CreatePage(Page):
CREATE_NEW_DB_OPT = "CREATE NEW DATABASE"
CREATE_NEW_COLL_OPT = "CREATE NEW COLLECTION"
COLL_OPT = "collection"
DOC_OPT = "document(s)"
def __init__(self, state):
self.state = state
self.db_client: MongoDBClient = self.state.db_client
def write(self):
st.title("Create")
obj_type = st.selectbox(
"Select object to create", [CreatePage.COLL_OPT, CreatePage.DOC_OPT]
)
if obj_type == CreatePage.COLL_OPT:
db_name = st.selectbox(
"Select Database",
self.db_client.get_database_names() + [CreatePage.CREATE_NEW_DB_OPT],
)
if db_name == CreatePage.CREATE_NEW_DB_OPT:
db_name = st.text_input("Database Name ?")
if (
db_name == ""
and not any(char in db_name for char in {"$", "\\", "/", ".", " ", '"'})
and len(db_name) < 64
):
st.info(
"Database Names cant be empty. cant have '$', '\\', '/', '.', 'space', '\"' (quotes)' and length must be less than 64"
)
st.stop()
coll_name = st.text_input("Collection Name ?")
if coll_name == "" and not any(
char in coll_name for char in {"$", "system."}
):
st.info(
"Collection Names cant be empty. cant have '$' and cant have 'system.'"
)
st.stop()
if st.button("Create"):
self.db_client.create_collection(db_name=db_name, coll_name=coll_name)
elif obj_type == CreatePage.DOC_OPT:
db_name = st.selectbox(
"Select Database",
self.db_client.get_database_names() + [CreatePage.CREATE_NEW_DB_OPT],
)
coll_name = None
if db_name == CreatePage.CREATE_NEW_DB_OPT:
db_name = st.text_input("Database Name ?")
if (
db_name == ""
and not any(
char in db_name for char in {"$", "\\", "/", ".", " ", '"'}
)
and len(db_name) < 64
):
st.info(
"Database Names cant be empty. cant have '$', '\\', '/', '.', 'space', '\"' (quotes)' and length must be less than 64"
)
st.stop()
coll_name = st.text_input("Collection Name ?")
if coll_name == "" and not any(
char in coll_name for char in {"$", "system."}
):
st.info(
"Collection Names cant be empty. cant have '$' and cant have 'system.'"
)
st.stop()
else:
coll_name = st.selectbox(
"Select Collection",
self.db_client.get_collection_names(db_name=db_name)
+ [CreatePage.CREATE_NEW_COLL_OPT],
)
if coll_name == CreatePage.CREATE_NEW_COLL_OPT:
coll_name = st.text_input("Collection Name ?")
if coll_name == "" and not any(
char in coll_name for char in {"$", "system."}
):
st.info(
"Collection Names cant be empty. cant have '$' and cant have 'system.'"
)
st.stop()
document = st.text_area("Document(s) data ?")
try:
document = json.loads(document)
except JSONDecodeError:
st.warning("Must be a valid JSON.")
st.stop()
if st.button("Create"):
self.db_client.insert_docs(db_name, coll_name, document)
see_doc = st.checkbox("See Document(s) ?")
if see_doc:
st.write(document)
|
ash2shukla/mongodb-timepass | main.py | <gh_stars>0
import streamlit as st
from src.utils import Page, provide_state, get_base_64_img, _skip_hash
from src.pages import ExplorePage, CreatePage, UpdatePage, DeletePage
from pathlib import Path
from src.db import MongoDBClient
import pymongo
from typing import Dict, Type
PAGE_MAP: Dict[str, Type[Page]] = {
"Explore": ExplorePage,
"Create": CreatePage,
"Update": UpdatePage,
"Delete": DeletePage
}
@provide_state(hash_funcs={MongoDBClient: _skip_hash})
def main(state=None):
st.beta_set_page_config(
page_title="MongoDB TimePass",
layout="centered",
initial_sidebar_state="auto",
)
logo_uri = get_base_64_img(Path(__file__).parent / "assets" / "timepass-logo.png")
st.sidebar.markdown(logo_uri, unsafe_allow_html=True)
CONN_URI = st.sidebar.text_input("Connection URI")
if CONN_URI == "":
st.sidebar.info("Input a connection URI")
st.stop()
current_page = st.sidebar.radio("Go To", list(PAGE_MAP))
if state.db_client is None or state.CONN_URI != CONN_URI:
# different sessions can have differnet DB Connections
state.db_client = MongoDBClient(CONN_URI)
state.CONN_URI = CONN_URI
PAGE_MAP[current_page](state=state).write()
if __name__ == "__main__":
main()
|
ash2shukla/mongodb-timepass | src/pages/__init__.py | from .explore import ExplorePage
from .create import CreatePage
from .update import UpdatePage
from .delete import DeletePage
__all__ = ["ExplorePage", "CreatePage", "UpdatePage", "DeletePage"]
|
adriengentil/assisted-test-infra | src/update_assisted_service_cm.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Idea is to pass os environments to assisted-service config map, to make an easy way to configure assisted-service
Note: defaulting an env var to "" in Makefile, will result in an empty string value in the configmap.
E.g.
Makefile:
MY_VAR := $(or $(MY_VAR), "")
configmap:
MY_VAR: ""
Hence, in order to support unset env vars, avoid override in Makefile.
"""
import json
import os
import yaml
from deprecated_utils import warn_deprecate
warn_deprecate()
CM_PATH = "assisted-service/deploy/assisted-service-configmap.yaml"
ENVS = [
("INSTALLER_IMAGE", ""),
("CONTROLLER_IMAGE", ""),
("SERVICE_BASE_URL", ""),
("IMAGE_SERVICE_BASE_URL", ""),
("AGENT_DOCKER_IMAGE", ""),
("BASE_DNS_DOMAINS", ""),
("IMAGE_BUILDER", ""),
("OCM_BASE_URL", ""),
("AGENT_TIMEOUT_START", ""),
("PUBLIC_CONTAINER_REGISTRIES", ""),
("CHECK_CLUSTER_VERSION", ""),
("HW_VALIDATOR_REQUIREMENTS", ""),
("HW_VALIDATOR_MIN_CPU_CORES_SNO", ""),
("HW_VALIDATOR_MIN_RAM_GIB_SNO", ""),
]
DEFAULT_MASTER_REQUIREMENTS = {
"cpu_cores": 4,
"ram_mib": 8192,
"disk_size_gb": 10,
"installation_disk_speed_threshold_ms": 10,
}
DEFAULT_WORKER_REQUIREMENTS = {
"cpu_cores": 2,
"ram_mib": 3072,
"disk_size_gb": 10,
"installation_disk_speed_threshold_ms": 10,
}
DEFAULT_SNO_REQUIREMENTS = {
"cpu_cores": 8,
"ram_mib": 32768,
"disk_size_gb": 10,
"installation_disk_speed_threshold_ms": 10,
}
DEFAULT_REQUIREMENTS = [
{
"version": "default",
"master": DEFAULT_MASTER_REQUIREMENTS,
"worker": DEFAULT_WORKER_REQUIREMENTS,
"sno": DEFAULT_SNO_REQUIREMENTS,
}
]
def _read_yaml():
if not os.path.exists(CM_PATH):
return
with open(CM_PATH, "r+") as cm_file:
return yaml.safe_load(cm_file)
def _get_relevant_envs():
data = {}
for env in ENVS:
evn_data = os.getenv(env[0], env[1])
# Set value as empty if variable is an empty string (e.g. defaulted in Makefile)
if evn_data == '""':
data[env[0]] = ""
elif evn_data:
data[env[0]] = evn_data
return data
def update_requirements(requirements_json):
if requirements_json == "" or requirements_json == "REPLACE_HW_VALIDATOR_REQUIREMENTS":
return json.dumps(DEFAULT_REQUIREMENTS)
requirements = json.loads(requirements_json)
for version_requirements in requirements:
if version_requirements["version"] == "default":
version_requirements["master"] = DEFAULT_MASTER_REQUIREMENTS
version_requirements["worker"] = DEFAULT_WORKER_REQUIREMENTS
version_requirements["sno"] = DEFAULT_SNO_REQUIREMENTS
return json.dumps(requirements)
def set_envs_to_service_cm():
cm_data = _read_yaml()
if not cm_data:
raise Exception(f"{CM_PATH} must exists before setting envs to it")
cm_data["data"].update(_get_relevant_envs())
existing_requirements = cm_data["data"].get("HW_VALIDATOR_REQUIREMENTS", "")
requirements = update_requirements(existing_requirements)
cm_data["data"].update({"HW_VALIDATOR_REQUIREMENTS": requirements})
with open(CM_PATH, "w") as cm_file:
yaml.dump(cm_data, cm_file)
if __name__ == "__main__":
set_envs_to_service_cm()
|
adriengentil/assisted-test-infra | src/triggers/olm_operators_trigger.py | <gh_stars>0
from contextlib import suppress
from typing import Callable
from assisted_test_infra.test_infra.utils.operators_utils import resource_param
from consts import OperatorResource
from triggers.env_trigger import Trigger, Triggerable
class OlmOperatorsTrigger(Trigger):
def __init__(self, condition: Callable[[Triggerable], bool], operator: str):
super().__init__(condition=condition, operator=operator)
self._operator = operator
def handle(self, config: Triggerable):
variables_to_set = self.get_olm_variables(config)
config.handle_trigger(self._conditions_string, variables_to_set)
def get_olm_variables(self, config: Triggerable) -> dict:
operator_variables = {}
for key in OperatorResource.get_resource_dict().keys():
with suppress(AttributeError):
operator_variables[key] = resource_param(getattr(config, key), key, self._operator)
return operator_variables
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/controllers/ipxe_controller/ipxe_controller.py | import os
import shutil
import consts
from assisted_test_infra.test_infra import utils
from service_client import InventoryClient, log
class IPXEController:
def __init__(
self,
api_client: InventoryClient,
name: str = None,
port: int = consts.DEFAULT_IPXE_SERVER_PORT,
ip: str = consts.DEFAULT_IPXE_SERVER_IP,
):
self._name = name
self._ip = ip
self._port = port
self._api_client = api_client
self._dir = os.path.dirname(os.path.realpath(__file__))
self._ipxe_scripts_folder = f"{self._dir}/server/ipxe_scripts"
def remove(self):
log.info(f"Removing iPXE Server {self._name}")
utils.remove_running_container(container_name=self._name)
self._remove_ipxe_scripts_folder()
def start(self, infra_env_id: str, cluster_name: str):
log.info("Preparing iPXE server")
self._download_ipxe_script(infra_env_id=infra_env_id, cluster_name=cluster_name)
self._build_server_image()
self.run_ipxe_server()
def run_ipxe_server(self):
log.info(f"Running iPXE Server {self._name}")
run_flags = [
"-d",
"--network=host",
f"--publish {self._port}:{self._port}",
]
utils.run_container(container_name=self._name, image=self._name, flags=run_flags)
def _build_server_image(self):
log.info(f"Creating Image for iPXE Server {self._name}")
build_flags = f"--build-arg SERVER_IP={self._ip} --build-arg SERVER_PORT={self._port}"
utils.run_command(f"podman {consts.PODMAN_FLAGS} build {self._dir}/server -t {self._name} {build_flags}")
def _download_ipxe_script(self, infra_env_id: str, cluster_name: str):
log.info(f"Downloading iPXE script to {self._ipxe_scripts_folder}")
utils.recreate_folder(self._ipxe_scripts_folder, force_recreate=False)
self._api_client.download_and_save_infra_env_file(
infra_env_id=infra_env_id, file_name="ipxe-script", file_path=f"{self._ipxe_scripts_folder}/{cluster_name}"
)
def _remove_ipxe_scripts_folder(self):
log.info(f"Removing iPXE scripts folder {self._ipxe_scripts_folder}")
if os.path.exists(self._ipxe_scripts_folder):
path = os.path.abspath(self._ipxe_scripts_folder)
shutil.rmtree(path)
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/utils/operators_utils.py | import os
from typing import List
import waiting
from assisted_service_client import MonitoredOperator
import consts
from service_client import InventoryClient, log
def get_env(env, default=None):
res = os.environ.get(env, "").strip()
if not res or res == '""':
res = default
return res
def parse_olm_operators_from_env():
return get_env("OLM_OPERATORS", default="").lower().split()
def _are_operators_in_status(
cluster_id: str,
client: InventoryClient,
operators: List[MonitoredOperator],
operators_count: int,
statuses: List[str],
fall_on_error_status: bool,
) -> bool:
log.info(
"Asked operators to be in one of the statuses from %s and currently operators statuses are %s",
statuses,
[(operator.name, operator.status, operator.status_info) for operator in operators],
)
if fall_on_error_status:
for operator in operators:
if operator.status == consts.OperatorStatus.FAILED:
_Exception = consts.olm_operators.get_exception_factory(operator.name)
raise _Exception(f"Operator {operator.name} status is failed with info {operator.status_info}")
cluster = client.cluster_get(cluster_id=cluster_id).to_dict()
log.info("Cluster %s progress info: %s", cluster_id, cluster["progress"])
if len([operator for operator in operators if operator.status in statuses]) >= operators_count:
return True
return False
def is_operator_in_status(operators: List[MonitoredOperator], operator_name: str, status: str) -> bool:
log.info(
"Asked operator %s to be in status: %s, and currently operators statuses are %s",
operator_name,
status,
[(operator.name, operator.status, operator.status_info) for operator in operators],
)
return any(operator.status == status for operator in operators if operator.name == operator_name)
def wait_till_all_operators_are_in_status(
client,
cluster_id,
operators_count,
operator_types,
statuses,
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
fall_on_error_status=False,
interval=10,
):
log.info(f"Wait till {operators_count} {operator_types} operators are in one of the statuses {statuses}")
try:
waiting.wait(
lambda: _are_operators_in_status(
cluster_id,
client,
filter_operators_by_type(client.get_cluster_operators(cluster_id), operator_types),
operators_count,
statuses,
fall_on_error_status,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Monitored {operator_types} operators to be in of the statuses {statuses}",
)
except BaseException:
operators = client.get_cluster_operators(cluster_id)
log.info("All operators: %s", operators)
raise
def filter_operators_by_type(operators: List[MonitoredOperator], operator_types: List[str]) -> List[MonitoredOperator]:
return list(filter(lambda operator: operator.operator_type in operator_types, operators))
def resource_param(base_value: int, resource_name: str, operator: str):
try:
value = base_value
resource = consts.OperatorResource.values()[operator][resource_name]
if value <= resource:
value = value + resource
return value
except KeyError as e:
raise ValueError(f"Unknown operator name {e.args[0]}") from e
|
adriengentil/assisted-test-infra | src/triggers/env_trigger.py | import inspect
import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List
from assisted_test_infra.test_infra.utils import EnvVar
from service_client import log
class DataPool(ABC):
@classmethod
@abstractmethod
def get_env(cls, item) -> EnvVar:
pass
class Triggerable(ABC):
def _is_set(self, var, expected_value):
return getattr(self._get_data_pool(), var, None) == expected_value
@abstractmethod
def _get_data_pool(self) -> DataPool:
pass
def is_user_set(self, item: str):
try:
attr = self._get_data_pool().get_env(item)
return attr.is_user_set
except AttributeError:
return False
def handle_trigger(self, conditions_string: List[str], values: Dict[str, Any]) -> None:
for k, v in values.items():
if not hasattr(self, k):
continue
if not self.is_user_set(k):
log.debug(f"{self.__class__.__name__} - Trigger set `{k}` to `{v}`, Condition: {conditions_string}")
self._set(k, v)
else:
log.warning(f"Skipping setting {k} to value {v} due that it already been set by the user")
@abstractmethod
def _set(self, key: str, value: Any):
pass
class Trigger:
"""Mechanism for applying pre-known configurations if a given trigger condition was met"""
def __init__(self, *, condition: Callable[[Triggerable], bool], **kwargs):
self._condition = condition
self._variables_to_set = kwargs
self._conditions_string = re.findall(r"(lambda.*),", str(inspect.getsourcelines(condition)[0]))
def is_condition_met(self, config: Triggerable):
try:
return self._condition(config)
except AttributeError:
return False
def handle(self, config: Triggerable):
config.handle_trigger(self._conditions_string, self._variables_to_set)
@classmethod
def trigger_configurations(cls, configs: List[Triggerable], default_triggers: dict):
met_triggers = {}
for trigger_name, trigger in default_triggers.items():
for config in configs:
if trigger.is_condition_met(config):
met_triggers[trigger_name] = trigger
break
for trigger_name, trigger in met_triggers.items():
for config in configs:
log.info(f"Handling {trigger_name} trigger")
trigger.handle(config)
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/helper_classes/infra_env.py | <reponame>adriengentil/assisted-test-infra<filename>src/assisted_test_infra/test_infra/helper_classes/infra_env.py
import json
import os
from pathlib import Path
from typing import List, Optional
from assisted_service_client import models
from junit_report import JunitTestCase
import consts
from assisted_test_infra.test_infra import BaseInfraEnvConfig, utils
from assisted_test_infra.test_infra.helper_classes.entity import Entity
from assisted_test_infra.test_infra.helper_classes.nodes import Nodes
from assisted_test_infra.test_infra.utils.waiting import wait_till_all_infra_env_hosts_are_in_status
from service_client import InventoryClient, log
class InfraEnv(Entity):
_config: BaseInfraEnvConfig
def __init__(self, api_client: InventoryClient, config: BaseInfraEnvConfig, nodes: Optional[Nodes] = None):
super().__init__(api_client, config, nodes)
@property
def id(self):
return self._config.infra_env_id
def update_existing(self) -> str:
return self.id
def _create(self):
if self._config.ignition_config_override:
ignition_config_override = json.dumps(self._config.ignition_config_override)
else:
ignition_config_override = None
infra_env = self.api_client.create_infra_env(
self._config.entity_name.get(),
pull_secret=self._config.pull_secret,
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
cluster_id=self._config.cluster_id,
static_network_config=self._config.static_network_config,
ignition_config_override=ignition_config_override,
proxy=self._config.proxy,
image_type=self._config.iso_image_type,
)
self._config.infra_env_id = infra_env.id
return infra_env.id
@JunitTestCase()
def download_image(self, iso_download_path: str = None) -> Path:
iso_download_url = self.get_details().download_url
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
log.info(f"Downloading image {iso_download_url} to {iso_download_path}")
return utils.download_file(iso_download_url, iso_download_path, self._config.verify_download_iso_ssl)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, nodes_count: int, allow_insufficient=False):
statuses = [consts.NodesStatus.KNOWN_UNBOUND]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT_UNBOUND)
wait_till_all_infra_env_hosts_are_in_status(
client=self.api_client,
infra_env_id=self.id,
nodes_count=nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def update_host(
self,
host_id: str,
host_role: Optional[str] = None,
host_name: Optional[str] = None,
node_labels: Optional[List[dict]] = None,
):
self.api_client.update_host(
infra_env_id=self.id, host_id=host_id, host_role=host_role, host_name=host_name, node_labels=node_labels
)
def bind_host(self, host_id: str, cluster_id: str) -> None:
self.api_client.bind_host(infra_env_id=self.id, host_id=host_id, cluster_id=cluster_id)
def unbind_host(self, host_id: str) -> None:
self.api_client.unbind_host(infra_env_id=self.id, host_id=host_id)
def delete_host(self, host_id: str) -> None:
self.api_client.deregister_host(infra_env_id=self.id, host_id=host_id)
def get_discovery_ignition(self) -> str:
return self.api_client.get_discovery_ignition(infra_env_id=self.id)
def patch_discovery_ignition(self, ignition_info: str) -> None:
self.api_client.patch_discovery_ignition(infra_env_id=self.id, ignition_info=ignition_info)
def get_details(self) -> models.infra_env.InfraEnv:
return self.api_client.get_infra_env(infra_env_id=self.id)
def update_proxy(self, proxy: models.Proxy) -> None:
self.update_config(proxy=proxy)
infra_env_update_params = models.InfraEnvUpdateParams(proxy=self._config.proxy)
self.api_client.update_infra_env(infra_env_id=self.id, infra_env_update_params=infra_env_update_params)
def update_static_network_config(self, static_network_config: List[dict]) -> None:
self.update_config(static_network_config=static_network_config)
infra_env_update_params = models.InfraEnvUpdateParams(static_network_config=static_network_config)
self.api_client.update_infra_env(infra_env_id=self.id, infra_env_update_params=infra_env_update_params)
def select_host_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self.api_client.select_installation_disk(infra_env_id=self.id, host_id=host_id, disk_paths=disk_paths)
def deregister(self, deregister_hosts=True):
log.info(f"Deregister infra env with id: {self.id}")
if deregister_hosts:
for host in self.api_client.client.v2_list_hosts(self.id):
log.info(f"Deregister infra_env host with id: {host['id']}")
self.api_client.client.v2_deregister_host(infra_env_id=self.id, host_id=host["id"])
self.api_client.client.deregister_infra_env(self.id)
self._config.infra_env_id = None
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/controllers/ipxe_controller/server/local_ipxe_server.py | <filename>src/assisted_test_infra/test_infra/controllers/ipxe_controller/server/local_ipxe_server.py<gh_stars>0
#!/usr/bin/python3
import os
from http.server import CGIHTTPRequestHandler, HTTPServer
ip = os.getenv("SERVER_IP", "192.168.122.1")
port = int(os.getenv("SERVER_PORT", 8500))
# Make sure the server is hosting the iPXE scripts directory
dir = f"{os.getcwd()}/ipxe_scripts"
os.chdir(dir)
# Create server object
server_object = HTTPServer(server_address=(ip, port), RequestHandlerClass=CGIHTTPRequestHandler)
# Start the web server
server_object.serve_forever()
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/helper_classes/config/nodes_config.py | <reponame>adriengentil/assisted-test-infra
from abc import ABC
from dataclasses import dataclass, field
from typing import Dict, List
from munch import Munch
from .controller_config import BaseNodeConfig
@dataclass
class BaseTerraformConfig(BaseNodeConfig, ABC):
"""
Define all configurations variables that are needed for Nodes during it's execution
All arguments must have default to None with type hint
"""
single_node_ip: str = None
dns_records: Dict[str, str] = field(default_factory=dict)
libvirt_master_ips: List[str] = None
libvirt_secondary_master_ips: List[str] = None
libvirt_worker_ips: List[str] = None
libvirt_secondary_worker_ips: List[str] = None
ingress_dns: bool = False
net_asset: Munch = None
tf_folder: str = None
network_name: str = None
storage_pool_path: str = None
def __post_init__(self):
super().__post_init__()
|
adriengentil/assisted-test-infra | scripts/override_release_images.py | <reponame>adriengentil/assisted-test-infra
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import json
import os
import shlex
import subprocess
import tempfile
from argparse import ArgumentParser
from contextlib import contextmanager
import semver
@contextmanager
def pull_secret_file():
pull_secret = os.environ.get("PULL_SECRET")
try:
json.loads(pull_secret)
except json.JSONDecodeError as e:
raise ValueError("Value of PULL_SECRET environment variable is not a valid JSON payload") from e
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(pull_secret)
f.flush()
yield f.name
def run_command(command, shell=False, raise_errors=True, env=None):
command = command if shell else shlex.split(command)
process = subprocess.run(
command, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True
)
def _io_buffer_to_str(buf):
if hasattr(buf, "read"):
buf = buf.read().decode()
return buf
out = _io_buffer_to_str(process.stdout).strip()
err = _io_buffer_to_str(process.stderr).strip()
if raise_errors and process.returncode != 0:
raise RuntimeError(f"command: {command} exited with an error: {err} " f"code: {process.returncode}")
return out, err, process.returncode
def get_full_openshift_version_from_release(release_image: str) -> str:
with pull_secret_file() as pull_secret:
stdout, _, _ = run_command(
f"oc adm release info '{release_image}' --registry-config '{pull_secret}' -o json |"
f" jq -r '.metadata.version'",
shell=True,
)
return stdout
def get_release_image(release_images, ocp_version, cpu_architecture="x86_64"):
archs_images = [v for v in release_images if v.get("cpu_architecture") == cpu_architecture]
release_image = [v for v in archs_images if v.get("openshift_version") == ocp_version]
if len(release_image) >= 1:
return release_image[0]
return {"cpu_architecture": cpu_architecture}
def set_release_image(release_image: dict, release_images: list, ocp_version, ocp_full_version):
release_image_index = -1 if "openshift_version" not in release_image else release_images.index(release_image)
release_image["openshift_version"] = ocp_version
release_image["url"] = os.getenv("OPENSHIFT_INSTALL_RELEASE_IMAGE")
release_image["version"] = ocp_full_version
if release_image_index != -1:
release_images[release_image_index] = release_image
else:
release_images.append(release_image)
def main():
# Load default release images
with open(args.src, "r") as f:
release_images: list = json.load(f)
release_image = os.getenv("OPENSHIFT_INSTALL_RELEASE_IMAGE")
ocp_full_version = get_full_openshift_version_from_release(release_image)
ocp_semver = semver.VersionInfo.parse(ocp_full_version)
ocp_version = f"{ocp_semver.major}.{ocp_semver.minor}"
# Find relevant release image
release_image = get_release_image(release_images, ocp_version)
set_release_image(release_image, release_images, ocp_version, ocp_full_version)
# Store release images
json.dump(release_images, os.sys.stdout, separators=(",", ":"))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--src", type=str, help="Release images list file path")
args = parser.parse_args()
main()
|
adriengentil/assisted-test-infra | src/assisted_test_infra/test_infra/helper_classes/config/controller_config.py | import warnings
from abc import ABC
from dataclasses import dataclass
from pathlib import Path
from typing import List
import consts
from .base_config import BaseConfig
@dataclass
class BaseNodeConfig(BaseConfig, ABC):
platform: str = None
is_ipv4: bool = None
is_ipv6: bool = None
bootstrap_in_place: bool = None
private_ssh_key_path: Path = None
working_dir: str = consts.WORKING_DIR
master_memory: int = None
master_vcpu: int = None
masters_count: int = None
master_cpu_mode: str = None
master_disk: int = None # disk size in MB.
master_disk_size_gib: str = None # disk size in GB.
master_disk_count: int = None # number of disks to create
master_boot_devices: List[str] = None # order of boot devices to use
worker_memory: int = None
worker_vcpu: int = None
workers_count: int = None
worker_cpu_mode: str = None
worker_disk: int = None
worker_disk_size_gib: str = None # disk size in GB.
worker_disk_count: int = None
worker_boot_devices: List[str] = None
network_mtu: int = None
@property
def nodes_count(self):
if self.workers_count is not None and self.masters_count is not None:
return self.masters_count + self.workers_count
return 0
@nodes_count.setter
def nodes_count(self, nodes_count: int):
warnings.warn(
"Setting nodes_count is deprecated. nodes_count value is taken from masters_count plus"
" workers_count instead.",
DeprecationWarning,
)
|
nshaikh1/DesignAndMaintenance | design-patterns/flyweight-python/flyweight.py | import json
from typing import Dict
class Flyweight():
"""
Intrinsic State:
The Flyweight stores a common portion of the state (also called intrinsic
state) that belongs to multiple real business entities. The Flyweight
accepts the rest of the state (extrinsic state, unique for each entity) via
its method parameters.
"""
def __init__(self, shared_state: str) -> None:
self._shared_state = shared_state
def operation(self, unique_state: str) -> None:
s = json.dumps(self._shared_state)
u = json.dumps(unique_state)
print(f"Flyweight: Displaying shared ({s}) and unique ({u}) state.", end="")
class FlyweightFactory():
"""
The Flyweight Factory creates and manages the Flyweight objects. It ensures
that flyweights are shared correctly. When the client requests a flyweight,
the factory either returns an existing instance or creates a new one, if it
doesn't exist yet.
"""
_flyweights: Dict[str, Flyweight] = {}
def __init__(self, initial_flyweights: Dict) -> None:
for state in initial_flyweights:
self._flyweights[self.get_key(state)] = Flyweight(state)
def get_key(self, state: Dict) -> str:
"""
Returns a Flyweight's string hash for a given state.
"""
return "_".join(sorted(state))
def get_flyweight(self, shared_state: Dict) -> Flyweight:
"""
Returns an existing Flyweight with a given state or creates a new one.
"""
key = self.get_key(shared_state)
if not self._flyweights.get(key):
print("FlyweightFactory: Can't find a flyweight, creating new one.")
self._flyweights[key] = Flyweight(shared_state)
else:
print("FlyweightFactory: Reusing existing flyweight.")
return self._flyweights[key]
def list_flyweights(self) -> None:
count = len(self._flyweights)
print(f"FlyweightFactory: I have {count} flyweights:")
print("\n".join(map(str, self._flyweights.keys())), end="")
def add_account_to_bank_database(
factory: FlyweightFactory, account_number: str, owner: str,
account_type: str, card_type: str, facility: str
) -> None:
#Extrinsic State
print("\n\nClient: Adding a customer account to database.")
flyweight = factory.get_flyweight([account_type, card_type, facility])
# The client code either stores or calculates extrinsic state and passes it
# to the flyweight's methods.
flyweight.operation([account_number, owner])
if __name__ == "__main__":
"""
The client code usually creates a bunch of pre-populated flyweights in the
initialization stage of the application.
"""
factory = FlyweightFactory([
["Checking Account", "Credit Card", "Net Banking"],
["Savings Account", "Debit Card", "Mobile Banking"],
["Interest Bearing Checking Accounts", "Credit Card", "Net Banking"],
["Brokerage_Accounts", "Credit_Card", "Mobile_Banking"],
])
factory.list_flyweights()
add_account_to_bank_database(
factory, "010424454", "<NAME>", "Checking Account", "Credit Card", "Net Banking")
add_account_to_bank_database(
factory, "010424459", "<NAME>", "Investment Retirement Accounts", "Debit Card", "Net Banking")
print("\n")
factory.list_flyweights() |
nshaikh1/DesignAndMaintenance | design-patterns/adapter-python/Adapter.py |
class OnePlus:
"""Class for OnePlus"""
def __init__(self):
self.name = "OnePlus"
def O(self):
return "2400"
class Apple:
"""Class for Apple"""
def __init__(self):
self.name = "Apple"
def A(self):
return "1750"
class Samsung:
"""Class for Samsung"""
def __init__(self):
self.name = "Samsung"
def S(self):
return "2000"
class Adapter:
def __init__(self, obj, **adapted_methods):
"""We set the adapted methods in the object's dict"""
self.obj = obj
self.__dict__.update(adapted_methods)
def __getattr__(self, attr):
"""All non-adapted calls are passed to the object"""
return getattr(self.obj, attr)
def original_dict(self):
"""Print original object dict"""
return self.obj.__dict__
""" main method """
if __name__ == "__main__":
"""list to store objects"""
objects = []
oneplus = OnePlus()
objects.append(Adapter(oneplus, battery = oneplus.O))
apple = Apple()
objects.append(Adapter(apple, battery = apple.A))
samsung = Samsung()
objects.append(Adapter(samsung, battery = samsung.S))
for obj in objects:
print("{0} has {1}Mah battery".format(obj.name, obj.battery()))
|
nshaikh1/DesignAndMaintenance | design-patterns/proxy-python/proxy.py | <reponame>nshaikh1/DesignAndMaintenance
"""Interface for Proxy and real project."""
class Shape_Interface:
def triangle(self, height, base):
raise NotImplementedError()
def parallelo_gram(self, height, base):
raise NotImplementedError()
def rectangle(self, height, base):
raise NotImplementedError()
def trapezium(self, height, base,side):
raise NotImplementedError()
class area_Finder(Shape_Interface):
"""Real project."""
def triangle(self, height, base):
return 1/2*(height * base)
def parallelo_gram(self, height, base):
return (height * base)
def rectangle(self, height, base):
return (height * base)
def trapezium(self, height, base, side):
return 1/2*(side+base)*height
class Proxy_area(Shape_Interface):
"""Proxy"""
def __init__(self):
self.area_Finder = area_Finder()
def triangle(self, height, base):
return self.area_Finder.triangle(height, base)
def parallelo_gram(self, height, base):
return self.area_Finder.parallelo_gram(height, base)
def rectangle(self, height, base):
return self.area_Finder.rectangle(height, base)
def trapezium(self, height, base,side):
if side == 0:
print("Side length can't be none.")
else:
return self.area_Finder.trapezium(height, base, side)
proxy = Proxy_area()
height, base, side = 4, 2, 5
print('Area Of Triangle = ' + str(proxy.triangle(height, base)))
print('Area Of Parallelogram = ' + str(proxy.parallelo_gram(height, base)))
print('Area of triangle = ' + str(proxy.rectangle(height, base)))
print('Area of trapezium = ' + str(proxy.trapezium(height, base, side))) |
nshaikh1/DesignAndMaintenance | design-patterns/builder-python/builder.py | <reponame>nshaikh1/DesignAndMaintenance
# Director: returns assembled plane
class Director(object):
plane_builder = None
def setBuilder(self, builder):
self.plane_builder = builder
# code for assembling a plane
def getPlane(self):
plane = Plane()
body = self.plane_builder.getBody()
plane.setBody(body)
engine = self.plane_builder.getEngine()
plane.setEngine(engine)
i = 0
while i < 2:
wing = self.plane_builder.getWing()
plane.attachWing(wing)
i += 1
j = 0
while j < 4:
wheel = self.plane_builder.getWheel()
plane.attachWheel(wheel)
j += 1
k = 0
while k < 2:
propeller = self.plane_builder.getPropeller()
plane.attachPropeller(propeller)
k += 1
return plane
# Product: contains different parts
class Plane(object):
def __init__(self):
self.__wings = list()
self.__wheels = list()
self.__engine = None
self.__body = None
self.__propeller = list()
def attachWing(self, wing):
self.__wings.append(wing)
def attachWheel(self, wheel):
self.__wheels.append(wheel)
def setEngine(self, engine):
self.__engine = engine
def setBody(self, body):
self.__body = body
def attachPropeller(self, propeller):
self.__propeller.append(propeller)
def specification(self):
print('body: %s' %self.__body.shape)
print('engine hoorsepower: %d' %self.__engine.horsepower)
print('wing length: %d ft.' %self.__wings[0].length)
print('tire size: %d in.' %self.__wheels[0].size)
print('Propeller diameter : %d cm.' %self.__propeller[0].diameter)
# Product parts
class Wing(object):
length = None
class Wheel(object):
size = None
class Engine(object):
horsepower = None
class Body(object):
shape = None
class Propeller(object):
diameter = None
# Builder Interface
class PlaneBuilderInterface(object):
def getBody(self): pass
def getEngine(self): pass
def getWing(self): pass
def getWheel(self): pass
def getPropeller(self): pass
# Builder: returns different product parts
class PlaneBuilder1(PlaneBuilderInterface):
def getBody(self):
body = Body()
body.shape = 'Aer0'
return body
def getEngine(self):
engine = Engine()
engine.horsepower = 100000
return engine
def getWing(self):
wing = Wing()
wing.length = 231
return wing
def getWheel(self):
wheel = Wheel()
wheel.size = 49
return wheel
def getPropeller(self):
propeller = Propeller()
propeller.diameter = 200
return propeller
# Builder: returns different product parts
class PlaneBuilder2(PlaneBuilderInterface):
def getBody(self):
body = Body()
body.shape = 'Ballistic'
return body
def getEngine(self):
engine = Engine()
engine.horsepower = 120000
return engine
def getWing(self):
wing = Wing()
wing.length = 197
return wing
def getWheel(self):
wheel = Wheel()
wheel.size = 50
return wheel
def getPropeller(self):
propeller = Propeller()
propeller.diameter = 320
return propeller
if __name__ == '__main__':
director = Director()
print('*'*30)
print("Plane1")
director.setBuilder(PlaneBuilder1())
plane1 = director.getPlane()
plane1.specification()
print('*'*30)
print("Plane2")
director.setBuilder(PlaneBuilder2())
plane2 = director.getPlane()
plane2.specification()
print('*'*30) |
nshaikh1/DesignAndMaintenance | design-patterns/factory-method-python/factory.py | <reponame>nshaikh1/DesignAndMaintenance<gh_stars>1-10
# Python Code for factory method
# it comes under the creational
# Design Pattern
class HexadecimalLocalizer:
""" it simply returns the hexadecimal version """
def localize(self, value):
data = {0 : "0", 1 : "1", 2 : "2", 3 : "3", 4 : "4", 5 : "5", 6 : "6", 7 : "7", 8 : "8", 9 : "9", 10 : "a", 11 : "b", 12 : "c",13 : "d",14 : "e",15 : "f"}
hexadecimal = ""
i, n = 0, 0
decimal = value
while(decimal != 0):
temp = decimal % 16
hexadecimal = hexadecimal + data[temp]
decimal = decimal // 16
i += 1
return hexadecimal[::-1]
class BinaryLocalizer:
"""it simply returns the binary version"""
def localize(self, value):
binary, i, n = 0, 0, 0
decimal = value
while(decimal != 0):
temp = decimal % 2
binary = binary + temp * pow(10, i)
decimal = decimal // 2
i += 1
return binary
class OctalLocalizer:
"""Simply returns the Octal version"""
def localize(self, value):
octal, i, n = 0, 0, 0
decimal = value
while(decimal != 0):
temp = decimal % 8
octal = octal + temp * pow(10, i)
decimal = decimal // 8
i += 1
return octal
def Factory(conversions ="Hexadecimal"):
"""Factory Method"""
localizers = {
"Hexadecimal": HexadecimalLocalizer,
"Binary": BinaryLocalizer,
"Octal": OctalLocalizer,
}
return localizers[conversions]()
if __name__ == "__main__":
h = Factory("Hexadecimal")
b = Factory("Binary")
o = Factory("Octal")
input = [7,73,178]
for i in input:
print("Hexadecimal of {} is {}".format(i, h.localize(i)))
print("Binary of {} is {}".format(i, b.localize(i)))
print("Octal of {} is {}".format(i, o.localize(i)))
|
nshaikh1/DesignAndMaintenance | design-patterns/decorator-python/decorator.py | <reponame>nshaikh1/DesignAndMaintenance
class distance_units:
def __init__(self, length):
self._length = length
def convert(self):
return self._length
class decorator(distance_units):
def __init__(self, length):
self._length = length
def convert(self):
return self._length
class in_meter(distance_units):
def __init__(self, decorate):
self._decorate = decorate
def convert(self):
return self._decorate*1000
class in_centimeter(distance_units):
def __init__(self, decorate):
self._decorate = decorate
def convert(self):
return self._decorate*100
class in_miles(distance_units):
def __init__(self, decorate):
self._decorate = decorate
def convert(self):
return self._decorate/1.609
class nautical_miles(distance_units):
def __init__(self, decorate):
self._decorate = decorate
def convert(self):
return self._decorate/1.852
if __name__ == '__main__':
distance = input("Enter distance in kilometers: ")
in_kilometer = distance_units(int(distance))
meter = (in_meter(in_kilometer.convert()))
final_val = in_centimeter(meter.convert())
miles = (in_miles(in_kilometer.convert()))
nautical = nautical_miles(miles.convert())
print("Meter :", meter.convert(),"m")
print("Centimeter :", (final_val.convert()),"cm")
print("Miles :",round(miles.convert(),2),"mi")
print("Nautical :",round(nautical.convert(),2),"nmi")
|
nshaikh1/DesignAndMaintenance | design-patterns/singleton-python/singleton.py | def singleton(myClass):
instances = {} #dictonary of instances
def getInstance(*args, **kwargs):
if myClass not in instances: #if instance of myClass is not already there in dictonary of instances
instances[myClass] = myClass(*args, **kwargs) #Create new instance
return instances[myClass] #return newly created instance
return getInstance #return function
@singleton #decorator
class PhoneBook(object):
def __init__(self):
self.database = {}
def AddContact(self, Name, Number): #Function to store contact in the database dictonary
self.database[Name] = Number
print("One new contact added to Phone Book")
def GetContact(self, Name): #Function to retrieve contact information from database dictonary
return self.database[Name]
if __name__ == "__main__":
x = PhoneBook() #Created the very first intance of singleton class
x.AddContact("Jayesh", 6124697466)
x.AddContact("Nomaan", 5307176298)
x.AddContact("Diksha", 5309335595)
y = PhoneBook() #The new instance created here points to the old instance created earlier (x)
print("Contact Jayesh: ", y.GetContact("Jayesh"))
print("Contact Nomaan: ", y.GetContact("Nomaan"))
print("Contact Diksha: ", y.GetContact("Diksha"))
|
nshaikh1/DesignAndMaintenance | design-patterns/bridge-python/bridge.py | <filename>design-patterns/bridge-python/bridge.py
from __future__ import annotations
from abc import ABC, abstractmethod
class Ford_Mustang:
"""
The abstraction Ford_Mustang defines the interface for the "control" part of the two
class hierarchies. It maintains a reference to an object of the
implementation Engine_Type hierarchy and delegates all of the real work to this object.
"""
def __init__(self, engine_Type: Engine_Type) -> None:
self.engine_Type = engine_Type
def Base_Model_Name(self) -> str:
return (f"EcoBoost Fastback\n")
def Base_Price(self) -> int:
return (26670 + self.engine_Type.Engine_Variant())
def Engine_Specs(self) -> str:
return(f"{self.engine_Type.Variant_Name()}")
class Eco(Ford_Mustang):
"""
You can extend the abstraction Ford_Mustang without changing the Engine_Type implementation classes.
"""
def Base_Model_Name(self) -> str:
return (f"EcoBoost Premium Fastback\n")
def Base_Price(self) -> int:
return (31685 + self.engine_Type.Engine_Variant())
def Engine_Specs(self) -> str:
return(f"{self.engine_Type.Variant_Name()}")
class Convertible(Ford_Mustang):
"""
You can extend the abstraction Ford_Mustang without changing the Engine_Type implementation classes.
"""
def Base_Model_Name(self) -> str:
return (f"EcoBoost Premium Convertible\n")
def Base_Price(self) -> int:
return (37185 + self.engine_Type.Engine_Variant())
def Engine_Specs(self) -> str:
return(f"{self.engine_Type.Variant_Name()}")
class GT(Ford_Mustang):
"""
You can extend the abstraction Ford_Mustang without changing the Engine_Type implementation classes.
"""
def Base_Model_Name(self) -> str:
return (f"GT Fastback\n")
def Base_Price(self) -> int:
return (37185 + self.engine_Type.Engine_Variant())
def Engine_Specs(self) -> str:
return(f"{self.engine_Type.Variant_Name()}")
class Engine_Type(ABC):
"""
The implementation Engine_Type defines the interface for all implementation classes. It
doesn't have to match the abstraction's (Ford_Mustang) interface. In fact, the two
interfaces can be entirely different. Typically the Implementation interface
provides only primitive operations, while the Abstraction defines higher-
level operations based on those primitives.
"""
@abstractmethod
def Engine_Variant(self) -> int:
pass
@abstractmethod
def Variant_Name(self) -> str:
pass
"""
Each concrete implementations of Engine_Type corresponds to a specific platform and implements
the implementation interface Engine_Type using that platform's API.
"""
class Default(Engine_Type):
def Engine_Variant(self) -> int:
return 0
def Variant_Name(self) -> str:
return (f"Default - 2.3L EcoBoost® Engine\n")
class V6(Engine_Type):
def Engine_Variant(self) -> int:
return 3780
def Variant_Name(self) -> str:
return (f"3.0L Ti-VCT V6 Engine (BULLITT™)\n")
class V8(Engine_Type):
def Engine_Variant(self) -> int:
return 6899
def Variant_Name(self) -> str:
return (f"5.0L Ti-VCT V8 Engine (BULLITT™)\n")
class V12(Engine_Type):
def Engine_Variant(self) -> int:
return 9780
def Variant_Name(self) -> str:
return (f"7.0L Ti-VCT V12 Engine (BULLITT™)\n")
def client_code(ford_Mustang: Ford_Mustang) -> None:
"""
Except for the initialization phase, where an abstraction Ford_Mustang object gets linked
with a specific implementation Engine_Type object, the client code should only depend on
the abstraction class. This way the client code can support any abstraction-
implementation combination.
"""
# ...
print(ford_Mustang.Base_Model_Name(), end="")
print("Price: ", ford_Mustang.Base_Price(), end="\n")
print(ford_Mustang.Engine_Specs(), end="")
# ...
if __name__ == "__main__":
"""
The client code should be able to work with any pre-configured abstraction-
implementation combination.
"""
print("\n")
engine_Type = Default()
ford_Mustang = Ford_Mustang(engine_Type)
client_code(ford_Mustang)
print("\n")
engine_Type = V12()
ford_Mustang = Convertible(engine_Type)
client_code(ford_Mustang)
print("\n")
engine_Type = V6()
ford_Mustang = Ford_Mustang(engine_Type)
client_code(ford_Mustang)
print("\n")
engine_Type = V8()
ford_Mustang = GT(engine_Type)
client_code(ford_Mustang)
print("\n") |
nshaikh1/DesignAndMaintenance | design-patterns/composite-python/Composite.py | class CompositeInterface:
def showDetails(self):
pass
class LeafElement(CompositeInterface):
'''Class representing objects at the bottom.'''
def __init__(self, *args):
''''Takes the first positional argument and assigns to member variable "position".'''
self.position = args[0]
def showDetails(self):
'''Prints position of child element.'''
print("-", end ="")
print(self.position)
class CompositeElement:
'''Class representing objects at any level of the hierarchy
tree except for the bottom level. Maintains the child
objects by adding and removing them from the tree structure.'''
def __init__(self, *args):
'''Takes the first positional argument and assigns to member
variable "position". Initializes a list of children elements.'''
self.position = args[0]
self.children = []
def add(self, child):
'''Adds the supplied child element.'''
self.children.append(child)
def remove(self, child):
'''Removes the supplied child element.'''
self.children.remove(child)
def showDetails(self):
'''Prints the details of the component element first. Then,
iterates over each of its children, prints their details by
calling their showDetails() method.'''
print(self.position)
for child in self.children:
print("-", end ="")
child.showDetails()
"""main method"""
if __name__ == "__main__":
topLevelMenu = CompositeElement("Grocery Store")
subMenuItem1 = CompositeElement("Department 1")
subMenuItem2 = CompositeElement("Department 2")
subMenuItem11 = LeafElement("Item 1")
subMenuItem12 = LeafElement("Item 2")
subMenuItem21 = LeafElement("Item 3")
subMenuItem22 = LeafElement("Item 4")
subMenuItem1.add(subMenuItem11)
subMenuItem1.add(subMenuItem12)
subMenuItem2.add(subMenuItem22)
subMenuItem2.add(subMenuItem22)
topLevelMenu.add(subMenuItem1)
topLevelMenu.add(subMenuItem2)
topLevelMenu.showDetails()
|
nshaikh1/DesignAndMaintenance | design-patterns/prototype-python/prototype.py | #shallow copy : copy.copy()
#Shallow copy creates a copy of that object but references each element of that object.
#deep copy : copy.deepcopy()
#The objects that support cloning are called prototypes.
from copy import deepcopy
class body_Temp(object):
def __init__(self, centigrade, Fahrenheit):
self.centigrade = centigrade
self.Fahrenheit = Fahrenheit
def temp_Rise(self, centigrade, Fahrenheit):
self.centigrade += centigrade
self.Fahrenheit += Fahrenheit
def current_bodyTemp(self):
if(self.centigrade>39.5 and self.Fahrenheit>103.1):
print("High Fever")
self.Fahrenheit= (self.centigrade*9/5)+32
print(f'{self.centigrade} *C, {self.Fahrenheit} *F')
elif(self.centigrade>38 and self.Fahrenheit>100.4):
print("Fever")
self.Fahrenheit= (self.centigrade*9/5)+32
print(f'{self.centigrade} *C, {self.Fahrenheit} *F')
else:
print("Normal Body Temperature")
self.Fahrenheit1= (self.centigrade*9/5)+32
print(f'{self.centigrade} *C, {self.Fahrenheit} *F')
def clone(self):
obj = deepcopy(self)
return obj
#clone function is used for deepcopying the current object
class prototype(object):
def clone(self):
obj = deepcopy(self)
return obj
if __name__ == '__main__':
p1 = body_Temp(37,98.6)
p1.current_bodyTemp()
fever = p1.clone()
high_Fever = p1.clone()
#rise = body_Temp(0,0)
fever.temp_Rise(1.5,2.9)
high_Fever.temp_Rise(3.0,6.9)
fever.current_bodyTemp()
high_Fever.current_bodyTemp()
print("No change is made to the prototype object even after cloning(deep copy)")
p1.current_bodyTemp()
|
nshaikh1/DesignAndMaintenance | design-patterns/abstract-factory-python/abstract_factory_method.py | # Python Code for object
# oriented concepts using
# the abstract factory
# design patter
import random
class Cars_avaliable:
def __init__(self, Car = None):
self.cars = Car
def show_car(self):
rental = self.cars()
print(f'Name of car {rental}')
print(f'Its priced at {rental.Fee()} /hour')
class BMW_330Ci:
def Fee(self):
return 56
def __str__(self):
return "BMW_330Ci"
class Audi_A4:
def Fee(self):
return 30
def __str__(self):
return "Audi_A4"
class Merc_S550:
def Fee(self):
return 90
def __str__(self):
return 'Merc_S550'
def r_choice():
# random_car
return random.choice([BMW_330Ci, Audi_A4, Merc_S550])()
if __name__ == "__main__":
rental = Cars_avaliable(r_choice)
for x in range(7):
rental.show_car() |
lereldarion/slam | slam/plugins/backlight.py | # Copyright (c) 2013-2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Backlight management plugin
"""
from .. import util
logger = util.setup_logger (__name__)
# Make the prop object available in Layout.Output object ? (maybe subclass of a layout.Properties)
# Initialize watched props by edid (from layout) + ones declared by plugins
#
# Callbacks on plugins for modifs.
# Add dbus interface (with list of interfaces defined by core + plugins)
#
# Split management from layout ?
|
lereldarion/slam | slam/__init__.py | <filename>slam/__init__.py
# Copyright (c) 2013-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
Daemon to manage multi monitors
Frontend
'''
# Config and start
def default_configuration (config_dict):
from pathlib import Path
import logging
from . import xcb_backend
"""
Complete the config dict with default setup.
Also normalize Paths to use pathlib.
"""
default_working_dir = Path.home ().joinpath(".config", "slam")
def normalize_or_default_path (key, default_path):
if key in config_dict:
f = config_dict[key]
if f is not None:
f = Path (f) # Normalize if not None
else:
f = default_path # Use default
# None is propagated as is
if f is not None:
f.parent.mkdir (parents=True, exist_ok=True)
config_dict[key] = f
# Logging
normalize_or_default_path ("log_file", default_working_dir.joinpath ("log"))
config_dict.setdefault ("log_level", logging.INFO)
# Database
normalize_or_default_path ("db_file", default_working_dir.joinpath ("database"))
# Backend
config_dict.setdefault ("backend_module", xcb_backend)
config_dict.setdefault ("backend_args", {})
# Oneshot mode (start, apply config, stop)
config_dict.setdefault ("oneshot", False)
def start (**config):
"""
Start the daemon.
Config parameters : see slam.default_configuration
"""
from . import util
from . import layout
default_configuration (config)
logger = util.setup_root_logging (config["log_file"], config["log_level"])
logger.info ("SESSION START")
# Try loading database file.
# On failure we will just have an empty database, and start from zero.
config_manager = layout.Manager (config["db_file"])
# Launch backend and event loop
# db_file is written at each modification of database to avoid failures
try:
backend = config["backend_module"].Backend (**config["backend_args"])
try:
config_manager.start (backend)
if not config["oneshot"]:
util.Daemon.event_loop (backend)
except Exception:
# Log backend detailed state in case of error
logger.error ("logging backend state:\n" + backend.dump ())
raise
finally:
backend.cleanup ()
except Exception:
# Log all top level errors
logger.exception ("fatal error")
finally:
logger.info ("SESSION END")
|
lereldarion/slam | setup.py | # Copyright (c) 2013-2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup, Extension
import io
setup (
# Base info
name = "slam",
version = "0.4.3",
author = "<NAME>",
author_email = "<EMAIL>",
# Code content
packages = ["slam"],
ext_modules = [
Extension ("slam.ext",
libraries = ["isl", "boost_python3"],
sources = ["ext/boost_wrapper.cpp", "ext/screen_layout.cpp"])
],
# Metadata
description = "Screen layout manager",
long_description = io.open ("Readme.md", encoding = "utf-8").read (),
url = "https://github.com/lereldarion/slam",
license = "MIT",
# Classification
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Programming Language :: Python :: 3",
"Topic :: Desktop Environment",
"Topic :: Desktop Environment :: Window Managers"
]
)
|
lereldarion/slam | slam/xcb_backend.py | # Copyright (c) 2013-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
XCB interface part of the daemon.
- Keeps a valid copy of the xrandr state (updating it on events)
- Can generate and apply layouts from this state
- Signal the config manager when the current state changed
"""
import operator
import functools
import struct
import xcffib, xcffib.xproto, xcffib.randr
from . import util
from . import layout
from .util import Pair
from .layout import BackendError, BackendFatalError
logger = util.setup_logger (__name__)
class Backend (util.Daemon):
##################
# Main Interface #
##################
def __init__ (self, **kwd):
"""
Backend init. Optionnal arguments :
dpi :
By default X11 forces a 96 dpi to not bother with it. It affects the reported size of the virtual screen.
if set to a value, force this value
if not set (default), use 96
This value doesn't make sense anyway when more than 1 screen exists
screen, display :
override X11 default connect information
"""
self.dpi = kwd.get ("dpi", 96)
self.update_callback = (lambda _: 0)
self.init_randr_connection (**kwd)
def cleanup (self):
self.conn.disconnect ()
def fileno (self):
return self.conn.get_file_descriptor ()
def activate (self):
""" Daemon callback """
# Flush all events
if self.flush_notify ():
# If one of them was from Randr, update the state, and notify manager
try:
self.reload_state ()
except BackendError as e:
# Failure means another change arrived during reload. So reload again
logger.warn ("reload state failed: {}".format (e))
self.activate_manually ()
return True
self.update_callback (self.to_concrete_layout ())
# Tell event loop to continue
return True
def dump (self):
""" Returns internal state debug info as a string """
acc = "Screen: {:s}\n".format (self.screen_size)
acc += "Modes\n"
for mode in self.screen_res.modes:
acc += "\t{0}\t{1[0]:s} {1[1]}Hz\n".format (mode.id, mode_info (mode))
acc += "CRTCs\n"
for c in self.screen_res.crtcs:
info = self.crtcs[c]
acc += "\t{}\t{:s}+{:s}\n".format (c, Pair.from_size (info), Pair.from_struct (info))
acc += "\t|\tOutput[active]: {}\n".format (util.sequence_stringify (
info.possible, highlight = (lambda o: o in info.outputs)))
acc += "\t|\tRotations[current]: {}\n".format (info.transform)
acc += "\t\\\tMode: {}\n".format (info.mode)
acc += "Outputs\n"
for o in self.screen_res.outputs:
info = self.outputs[o]
if self.is_connected (o):
acc += "\t{}\t{}\tConnected\n".format (o, info.name)
acc += "\t|\tSize: {:p}\n".format (Pair.from_size (info, "mm_{}"))
acc += "\t|\tCrtcs[active]: {}\n".format (util.sequence_stringify (
info.crtcs, highlight = (lambda c: c == info.crtc)))
acc += "\t|\tClones: {}\n".format (util.sequence_stringify (info.clones))
acc += "\t|\tModes[pref]: {}\n".format (util.sequence_stringify (enumerate (info.modes),
highlight = (lambda t: t[0] < info.num_preferred), stringify = (lambda t: t[1])))
acc += "\t\\\tProperties:\n"
for name, prop in info.props.items ():
acc += "\t\t\t{}: {}\n".format (name, prop)
else:
acc += "\t{}\t{}\tDisconnected\n".format (o, info.name)
return acc
############################
# Layout Manager Interface #
############################
def attach (self, callback):
""" Register the callback from the manager """
self.update_callback = callback
callback (self.to_concrete_layout ()) # initial call to let the manager update itself
def apply_concrete_layout (self, concrete):
""" Set up a concretelayout from the manager in X """
# Apply may generate new notifications from X, so reactivate us to handle them
self._apply_concrete_layout (concrete)
self.activate_manually ()
####################
# XRandR internals #
####################
randr_version = Pair (1, 3)
def init_randr_connection (self, **kwd):
""" Starts connection, construct an initial state, setup events. """
# Connection
self.conn = xcffib.connect (display = kwd.get ("display"))
# Randr init
self.conn.randr = self.conn (xcffib.randr.key)
version = Pair.from_struct (self.conn.randr.QueryVersion (*Backend.randr_version).reply (), "major_version", "minor_version")
if (not version >= Backend.randr_version):
raise BackendFatalError ("version: requested >= {}, got {}".format (Client.randr_version, version))
# Properties query object
self.prop_manager = PropertyQuery (self.conn) # TODO
# Internal state
screen_setup = self.conn.setup.roots[kwd.get ("screen", self.conn.pref_screen)]
self.root = screen_setup.root
limits = self.conn.randr.GetScreenSizeRange (self.root).reply ()
self.screen_limit_min = Pair.from_size (limits, "min_{}")
self.screen_limit_max = Pair.from_size (limits, "max_{}")
self.reload_state ()
# Randr register for events
masks = xcffib.randr.NotifyMask.ScreenChange | xcffib.randr.NotifyMask.CrtcChange
masks |= xcffib.randr.NotifyMask.OutputChange | xcffib.randr.NotifyMask.OutputProperty
self.conn.randr.SelectInput (self.root, masks)
self.conn.flush ()
def reload_state (self):
""" Updates the state by reloading everything """
# Get screen ressources and screen size (using geometry of root window)
cookie_res = self.conn.randr.GetScreenResources (self.root)
cookie_size = self.conn.core.GetGeometry (self.root)
self.screen_res = cookie_res.reply ()
self.screen_size = Pair.from_size (cookie_size.reply ())
# Send queries for Crtc and Output info
crtc_req, output_req = {}, {}
for c in self.screen_res.crtcs:
crtc_req[c] = self.conn.randr.GetCrtcInfo (c, self.screen_res.config_timestamp)
for o in self.screen_res.outputs:
output_req[o] = self.conn.randr.GetOutputInfo (o, self.screen_res.config_timestamp)
# Get Crtc info
self.crtcs = {}
for c in self.screen_res.crtcs:
self.crtcs[c] = check_reply (crtc_req[c].reply ())
self.crtcs[c].transform = XcbTransform.from_xcffib_struct (self.crtcs[c])
# Get output info
self.outputs = {}
for o in self.screen_res.outputs:
self.outputs[o] = check_reply (output_req[o].reply ())
self.outputs[o].name = bytes (self.outputs[o].name).decode ()
if self.is_connected (o):
self.outputs[o].props = self.prop_manager.get_properties (o)
def flush_notify (self):
""" Discards all events, returns True if one was from Randr """
had_randr_event = False
ev = self.conn.poll_for_event ()
while ev:
# Detect if we received at least one randr event
if isinstance (ev, (xcffib.randr.ScreenChangeNotifyEvent, xcffib.randr.NotifyEvent)):
had_randr_event = True
# Print debug info for each randr event
if isinstance (ev, xcffib.randr.ScreenChangeNotifyEvent):
logger.debug ("[notify] ScreenChange = {:s}, {:p} | {}".format (Pair.from_size (ev), Pair.from_size (ev, "m{}"),
XcbTransform (ev.rotation)))
if isinstance (ev, xcffib.randr.NotifyEvent):
if ev.subCode == xcffib.randr.Notify.CrtcChange:
logger.debug ("[notify] CrtcChange[{}] = {:s}+{:s} | {}".format (ev.u.cc.crtc,
Pair.from_size (ev.u.cc), Pair.from_struct (ev.u.cc), XcbTransform (ev.u.cc.rotation)))
if ev.subCode == xcffib.randr.Notify.OutputChange:
logger.debug ("[notify] OutputChange[{}] = crtc[{}]".format (ev.u.oc.output, ev.u.oc.crtc))
if ev.subCode == xcffib.randr.Notify.OutputProperty:
logger.debug ("[notify] OutputProperty[{}]".format (ev.u.op.output))
ev = self.conn.poll_for_event ()
return had_randr_event
def to_concrete_layout (self):
""" Convert current X state into ConcreteLayout """
def find_best_mode_size (o_data):
# Lexicographic order, so biggest and then fastest mode
return max (map (self.mode_by_id, self.preferred_mode_ids (o_data))) [0]
def make_output_entry (o_id):
xcb_o_data = self.outputs[o_id]
layout_output = layout.ConcreteLayout.Output (edid = xcb_o_data.props["edid"], preferred_size = find_best_mode_size (xcb_o_data))
crtc = self.crtcs.get (xcb_o_data.crtc, None)
if crtc and self.mode_exists (crtc.mode):
layout_output.enabled = True
layout_output.base_size = self.mode_by_id (crtc.mode) [0]
layout_output.position = Pair.from_struct (crtc)
layout_output.transform = crtc.transform.to_slam ()
return (xcb_o_data.name, layout_output)
return layout.ConcreteLayout (
outputs = dict (map (make_output_entry, filter (self.is_connected, self.outputs))),
vs_size = self.screen_size, vs_min = self.screen_limit_min, vs_max = self.screen_limit_max)
def _apply_concrete_layout (self, concrete):
""" Internal function that push a ConcreteLayout to X """
output_id_by_name = {self.outputs[o].name: o for o in self.outputs}
enabled_outputs = [n for n in concrete.outputs if concrete.outputs[n].enabled]
new_output_by_crtc = dict.fromkeys (self.crtcs)
### Compute crtc <-> output mapping ###
unallocated = set (enabled_outputs)
def try_allocate_crtc (c_id, o_name):
# Test if crtc / output not already allocated
if new_output_by_crtc[c_id] is None and o_name in unallocated:
# Does it fits into the Crtc ?
transform = XcbTransform.from_slam (concrete.outputs[o_name].transform, self.crtcs[c_id].rotations)
if transform.valid () and output_id_by_name[o_name] in self.crtcs[c_id].possible:
new_output_by_crtc[c_id] = o_name
unallocated.remove (o_name)
# Outputs already enabled may keep the same crtc if not clones
for o_name in enabled_outputs:
for c_id in self.crtcs:
if output_id_by_name[o_name] in self.crtcs[c_id].outputs:
try_allocate_crtc (c_id, o_name)
# Map remaning outputs
for o_name in enabled_outputs:
if o_name in unallocated:
for c_id in self.crtcs:
try_allocate_crtc (c_id, o_name)
if len (unallocated) > 0:
raise BackendError ("crtc allocation (tmp = {}) failed for outputs {}".format (new_output_by_crtc, list (unallocated)))
### Utility functions to wrap Xcb calls ###
timestamp = self.screen_res.timestamp
c_timestamp = self.screen_res.config_timestamp
def resize_screen (virtual_size):
# The dpi is used to compute the physical size of the virtual screen (required by X when we resize)
# Old Gui programs might read this size and compute the dpi from it.
# Newer program should infer per-screen dpi and ignore this value...
mm_per_inch = 25.4
phy = Pair (map (lambda pixels: int (pixels * mm_per_inch / self.dpi), virtual_size))
logger.debug ("[send] SetScreenSize = {:s}, {:p}".format (virtual_size, phy))
self.conn.randr.SetScreenSize (self.root, virtual_size.w, virtual_size.h, phy.w, phy.h, is_checked = True).check ()
# Crtc setup wrapper are simple, and just do some data formatting from slam to xcb.
# set_crtc is the lowest level
def set_crtc (t, c_id, pos, mode, tr, outputs):
logger.debug ("[send] SetCrtcConfig[{}] = {} | {}".format (c_id, outputs, tr))
request = self.conn.randr.SetCrtcConfig (c_id, t, c_timestamp, pos.x, pos.y, mode, tr.mask, len (outputs), outputs)
return check_reply (request.reply ()).timestamp
def disable_crtc (t, c_id):
return set_crtc (timestamp, c_id, Pair (0, 0), 0, XcbTransform (), [])
def assign_crtc (t, c_id, o_name):
o_data, o_id = concrete.outputs[o_name], output_id_by_name[o_name]
return set_crtc (t, c_id, o_data.position, self.find_mode_id (o_data.base_size, o_id), XcbTransform.from_slam (o_data.transform), [o_id])
### Push new layout to X ###
# We grab the server to make all requests appear 'atomic' to other listeners of xrandr events.
# It will force X to ignore other clients until we ungrab.
# Ungrabing the server is ensured whatever happens with a 'finally' bloc
self.conn.core.GrabServer (is_checked = True).check ()
try:
# SetCrtc will fail if the new crtc doesn't fit into the current virtual screen.
# So resize the virtual screen to max (before, after) sizes to avoid this problem.
before, after = self.screen_size, concrete.virtual_screen_size
temporary = Pair (map (max, before, after))
resize_screen (temporary)
# Crtc changes are sequential and each intermediate state must be valid.
# Temporarily mapping the same output to two Crtc would be an error.
# We avoid problematic case by :
# - avoiding output exchange between two Crtcs in the mapping algorithm (swap is annoying)
# - if an output goes from a cloned Crtc (>1 outputs) to an empty one, remove it
# before setting the new Crtc
# Keep in mind the Manager doesn't create cloned outputs
# Disable newly unused Crtcs
for c_id in self.crtcs:
if new_output_by_crtc[c_id] is None and self.crtcs[c_id].num_outputs > 0:
timestamp = disable_crtc (timestamp, c_id)
# Update cloned Crtcs first
for c_id in self.crtcs:
if new_output_by_crtc[c_id] is not None and self.crtcs[c_id].num_outputs > 1:
timestamp = assign_crtc (timestamp, c_id, new_output_by_crtc[c_id])
# Setup unused or single output Crtcs
for c_id in self.crtcs:
if new_output_by_crtc[c_id] is not None and self.crtcs[c_id].num_outputs <= 1:
timestamp = assign_crtc (timestamp, c_id, new_output_by_crtc[c_id])
# TODO disable stupid modes (panning, crtctransform, etc) ?
# After all Crtc modifications, set the final virtual screen size if needed
if temporary != after:
resize_screen (after)
except BackendError:
# Settings crtcs may fail due to invisible constraints (like limited clocks generators)
# If a SetCrtc request failed in a clean way (reported an error), try to restore old config
# However, X protocol errors are treated as fatal (ie resetting the crtc will probably fail horribly too)
logger.info ("restoring state")
# Clean state : disable all crtcs and reset screen size
for c_id in self.crtcs:
timestamp = disable_crtc (timestamp, c_id)
resize_screen (before)
# Restore crtcs
for c_id, d in self.crtcs.items ():
timestamp = set_crtc (timestamp, c_id, Pair.from_struct (d), d.mode, d.transform, d.outputs)
raise
finally:
self.conn.core.UngrabServer (is_checked = True).check ()
###########
# Helpers #
###########
def is_connected (self, o_id):
# Due to observed strange states, do not trust the connected flag from X
# Also check that we have modes and possible crtcs
o_data = self.outputs[o_id]
return (o_data.connection == xcffib.randr.Connection.Connected and
len (o_data.modes) > 0 and
len (o_data.crtcs) > 0)
def mode_by_id (self, m_id):
try:
return mode_info ([m for m in self.screen_res.modes if m.id == m_id][0])
except IndexError:
# Mode not found indicates some corruption in X data, bail out
raise BackendFatalError ("mode {} not found".format (m_id))
def mode_exists (self, m_id):
return len ([m for m in self.screen_res.modes if m.id == m_id]) == 1
def preferred_mode_ids (self, o_data):
if o_data.num_preferred > 0:
return (o_data.modes[i] for i in range (o_data.num_preferred))
else:
return o_data.modes
def find_mode_id (self, size, o_id):
preferred_ids = self.preferred_mode_ids (self.outputs[o_id])
matching_size_ids = (m_id for m_id in preferred_ids if self.mode_by_id (m_id) [0] == size)
try:
return max (matching_size_ids, key = self.mode_by_id)
except ValueError:
# We should have found this mode since it was extracted from this list in the first place
raise BackendFatalError ("no matching mode for size {} and output {}".format (size, self.outputs[o_id].name))
def mode_info (mode):
""" Extract size and frequency from X mode info """
freq = int (mode.dot_clock / (mode.htotal * mode.vtotal)) if mode.htotal > 0 and mode.vtotal > 0 else 0
return (Pair.from_size (mode), freq)
def check_reply (reply):
""" Raise exception if reply status is not ok """
req_name = reply.__class__
e = xcffib.randr.SetConfig
if reply.status == e.Success: return reply
# Invalid timing error should be temporary, let the manager recover
elif reply.status == e.InvalidConfigTime: raise BackendError ("invalid config timestamp ({})".format (req_name))
elif reply.status == e.InvalidTime: raise BackendError ("invalid timestamp ({})".format (req_name))
# Other errors may indicate a bigger problem
else: raise BackendError ("request failed ({})".format (req_name))
class XcbTransform (object):
"""
Stores X rotation & rotation capability masks.
Helps to make conversions
X format : reflections (xy), then trigo rotation : (rx, ry, rot), as a bitmask (xcffib.randr.Rotation)
"""
class StaticData (object):
def __init__ (self):
self.cls = xcffib.randr.Rotation
self.flags_by_name = util.class_attributes (self.cls)
self.all_flags = functools.reduce (operator.__or__, self.flags_by_name.values ())
self.flags_by_rotation_value = {rot: self.flags_by_name["Rotate_" + str (rot)] for rot in layout.Transform.rotations}
static = StaticData ()
# Constructors
def __init__ (self, mask = static.cls.Rotate_0, allowed_masks = static.all_flags):
""" Init with explicit masks """
self.mask = mask
self.allowed_masks = allowed_masks
@staticmethod
def from_xcffib_struct (st):
""" Extract masks from xcffib struct """
return XcbTransform (st.rotation, st.rotations)
@staticmethod
def from_slam (tr, allowed_masks = static.all_flags):
""" Build from Slam rotation """
st = XcbTransform.static
return XcbTransform (st.flags_by_rotation_value[tr.rotation] | (st.cls.Reflect_X if tr.reflect else 0), allowed_masks)
# Conversion, validity, pretty print
def to_slam (self):
""" Convert to slam Transform """
try:
[rot] = (r for r, mask in self.static.flags_by_rotation_value.items () if mask & self.mask)
except ValueError:
raise BackendFatalError ("xcffib transformation has 0 or >1 rotation flags")
tr = layout.Transform ()
if self.mask & self.static.cls.Reflect_X: tr = tr.reflectx ()
if self.mask & self.static.cls.Reflect_Y: tr = tr.reflecty ()
return tr.rotate (rot)
def valid (self):
""" Check if current mask is within the capability mask """
return self.allowed_masks & self.mask == self.mask
def __str__ (self):
allowed_flags = ((n, f) for n, f in sorted (self.static.flags_by_name.items ()) if f & self.allowed_masks)
return util.sequence_stringify (allowed_flags, highlight = lambda t: t[1] & self.mask, stringify = lambda t: t[0])
##################
# Xcb properties #
##################
class Fail (Exception):
""" Local exception """
pass
class PropertyQuery:
""" Allow to query/change property values """
class XInterface:
""" Wraps the X11 atom naming system """
def __init__ (self, conn):
self.conn = conn
self.atoms = {}
def atom (self, name):
if name not in self.atoms:
self.atoms[name] = self.conn.core.InternAtom (False, len (name), name).reply ().atom
return self.atoms[name]
def get_property (self, output, name):
data = self.conn.randr.GetOutputProperty (output, self.atom (name), xcffib.xproto.GetPropertyType.Any, 0, 10000, False, False).reply ()
if data.format == 0 and data.type == xcffib.xproto.Atom._None and data.bytes_after == 0 and data.num_items == 0:
raise Fail ("property '{}' not found".format (name))
return data
def set_property (self, output, name, type, format, elem, data):
self.conn.randr.ChangeOutputProperty (output, self.atom (name), type, format, xcffib.xproto.PropMode.Replace, elem, data, is_checked = True).check ()
def property_config (self, output, name):
return self.conn.randr.QueryOutputProperty (output, self.atom (name)).reply ()
class Base:
""" Property base class """
def __init__ (self, x):
self.x = x
class Edid (Base):
"""
EDID (unique device identifier) Xcb property (str)
The bytes 8-15 are enough for identification, the rest is mode data
"""
name = "EDID"
def __init__ (self, *args):
super ().__init__ (*args)
def get (self, output):
try:
data = self.x.get_property (output, self.name)
if not (data.format == 8 and data.type == xcffib.xproto.Atom.INTEGER and data.bytes_after == 0 and data.num_items > 0): raise Fail ("invalid 'edid' value formatting")
if data.data[:8] != [0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00]: raise Fail ("'edid' lacks 1.3 constant header")
return ''.join (map ("{:02X}".format, data.data[8:16]))
except Fail as e:
logger.info (e)
return None
class Backlight (Base):
"""
Backlight Xcb property (value, lowest, highest)
"""
name = "BACKLIGHT"
def __init__ (self, *args):
super ().__init__ (*args)
def get (self, output):
try: data = self.x.get_property (output, self.name)
except Fail: return None # no backlight interface is not an error
try:
# Data : backlight value
if not (data.format > 0 and data.type == xcffib.xproto.Atom.INTEGER and data.bytes_after == 0 and data.num_items == 1): raise Fail ("invalid 'backlight' value formatting")
(value,) = struct.unpack_from ({ 8: "b", 16: "h", 32: "i" } [data.format], bytes (data.data))
# Config : backlight value range
config = self.x.property_config (output, self.name)
if not (config.range and len (config.validValues) == 2): raise Fail ("invalid 'backlight' config")
lowest, highest = config.validValues
if not (lowest <= value and value <= highest): raise Fail ("'backlight' value out of bounds")
return (value, lowest, highest)
except Fail as e:
logger.info (e)
return None
def set (self, output, value):
self.x.set_property (output, "backlight", xcffib.xproto.Atom.INTEGER, 32, 1, struct.pack ("=i", value))
def __init__ (self, conn):
x = self.XInterface (conn)
self.properties = dict ((cls.name.lower (), cls (x)) for cls in [self.Edid, self.Backlight])
def get_properties (self, output):
return dict ((name, prop.get (output)) for name, prop in self.properties.items ())
def set_property (self, output, name, value):
try: return self.properties[name.lower ()].set (output, value)
except Fail as e: raise BackendError (e)
except AttributeError: raise BackendError ("property {} cannot be set".format (name.lower ()))
|
lereldarion/slam | slam/layout.py | # Copyright (c) 2013-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Layout manager
"""
import itertools
import collections
import pickle
from . import ext
from . import util
from .util import Pair
logger = util.setup_logger (__name__)
### Utils ###
class LayoutError (Exception): pass
class LayoutFatalError (Exception): pass
class BackendError (Exception): pass
class BackendFatalError (Exception): pass
# Directions
class Dir:
""" Convention between c++ extension and python """
# values
none = 0
left = 1
right = 2
above = 3
under = 4
@staticmethod
def iter ():
return range (5)
# utils
@staticmethod
def invert (d):
return ext.Dir_invert (d)
@staticmethod
def str (d):
return ext.Dir_str (d)
# Transformation
class Transform (util.AttributeEquality):
rotations = { 0: False, 90: True, 180: False, 270: True }
"""
Transformation is internally a reflection on x coordinates followed by a trigonometric rotation
Externally, rotate(), reflectx/y() return a new transformation based on the current one
Not modifiable, only creates new instances
"""
def __init__ (self, rx = False, rot = 0):
self.reflect = rx
self.rotation = rot
# Dump / load
@staticmethod
def load (data): return Transform (*data)
def dump (self): return (self.reflect, self.rotation)
# Derived transformation generators
def rotate (self, rot):
if (rot % 360) not in Transform.rotations:
raise LayoutFatalError ("unsupported rotation")
return Transform (self.reflect, (self.rotation + rot) % 360)
def reflectx (self):
return Transform (not self.reflect, (self.rotation + 180) % 360 if self.inverted () else self.rotation)
def reflecty (self):
return Transform (not self.reflect, self.rotation if self.inverted () else (self.rotation + 180) % 360)
# Misc
def inverted (self):
return Transform.rotations[self.rotation]
def rectangle_size (self, size):
return size.swap () if self.inverted () else size
def __str__ (self):
return ("R" if self.reflect else "") + str (self.rotation)
def __hash__ (self):
# Make object hashable (for Database.generate_statistical_layout)
return hash ((self.reflect, self.rotation))
### AbstractLayout ###
class AbstractLayout (object):
"""
Abstract Layout model used in the database.
A layout is a set of outputs (represented by their EDID), their transformations, and relations between them.
It can represent multiple physical layouts if same outputs screens are plugged into different plugs.
Relations are duplicated (a < b && b > a).
"""
class Output (object):
def __init__ (self, **kwd):
self.transform = kwd.get ("transform", Transform ())
self.neighbours = kwd.get ("neighbours", {})
# Load / dump
@staticmethod
def load (data): return AbstractLayout.Output (transform = Transform.load (data[0]), neighbours = data[1])
def dump (self): return (self.transform.dump (), self.neighbours)
def rel (self, neighbour):
return self.neighbours.get (neighbour, Dir.none)
def __init__ (self, **kwd):
self.outputs = kwd.get ("outputs", {})
# Load / dump
@staticmethod
def load (data): return AbstractLayout (outputs = {edid: AbstractLayout.Output.load (d) for edid, d in data.items ()})
def dump (self): return {edid: output.dump () for edid, output in self.outputs.items ()}
def set_relation (self, edid_a, rel, edid_b):
self.outputs[edid_a].neighbours[edid_b] = rel
self.outputs[edid_b].neighbours[edid_a] = Dir.invert (rel)
def key (self):
""" Key for Database is set of edid """
return frozenset (self.outputs.keys ())
### ConcreteLayout ###
class ConcreteLayout (util.AttributeEquality):
"""
Concrete layout representing a simplified backend state.
It is an interface between abstract layouts and backend, and can be converted to/from both.
A layout is a set of output (by output name), that may be enabled (actively used) or not.
Each output has sizes and absolute positions (only meaningful if enabled).
Some non-layout additionnal info from the backend is stored, like preferred sizes and EDID.
"""
class Output (util.AttributeEquality):
def __init__ (self, **kwd):
# Layout info by output
self.enabled = kwd.get ("enabled", False)
self.transform = kwd.get ("transform", Transform ())
self.base_size = kwd.get ("base_size", Pair (0, 0))
self.position = kwd.get ("position", Pair (0, 0))
# Additionnal data from backend
self.preferred_size = kwd.get ("preferred_size", Pair (0, 0))
self.edid = kwd.get ("edid", None)
def size (self):
return self.transform.rectangle_size (self.base_size)
def __str__ (self):
return "{}: {}, tr({}), pos({:s}), size({:s}|{:s})".format (
self.edid, {False:"D",True:"E"}[self.enabled],
self.transform, self.position,
self.base_size, self.preferred_size)
def __init__ (self, **kwd):
import sys
# Layout data
self.outputs = kwd.get ("outputs", {})
self.virtual_screen_size = kwd.get ("vs_size", Pair (0, 0))
# Additionnal info : screen size limits
self.virtual_screen_min = kwd.get ("vs_min", Pair (0, 0))
self.virtual_screen_max = kwd.get ("vs_max", Pair (sys.maxsize, sys.maxsize))
# State checks
def manual (self):
"""
Returns True if this layout cannot be represented by an AbstractLayout.
Reasons are: disabled outputs, invalid Edid data, non-preferred mode, mirroring / overlapping
"""
if any (not o.enabled for o in self.outputs.values ()):
return True
if not self.edid_valid ():
return True
if any (o.preferred_size != o.base_size for o in self.outputs.values ()):
return True
# Check for overlap (and mirroring that is included in overlap)
for oa, ob in itertools.combinations (self.outputs.values (), 2):
oa_corner, ob_corner = oa.position + oa.size (), ob.position + ob.size ()
if not (ob.position.x >= oa_corner.x or ob.position.y >= oa_corner.y or oa.position.x >= ob_corner.x or oa.position.y >= ob_corner.y):
return True
return False
def edid_valid (self):
"""
Returns True if the set of connected output has sufficient Edid data for the manager
That means that each output has a unique Edid
"""
edid_list = [o.edid for o in self.outputs.values ()]
if None in edid_list:
return False
if len (edid_list) != len (frozenset (edid_list)):
return False # Collision test
return True
# Edid / name info
def connected_edids (self):
"""
Returns set of connected outputs Edid
Ignores outputs without Edid, and merge duplicates
"""
return frozenset (o.edid for o in self.outputs.values () if o.edid is not None)
def edid (self, name):
return self.outputs[name].edid
def name_map (self):
return {o.edid: name for name, o in self.outputs.items ()}
# Pretty print
def __str__ (self):
outputs = (map ("\t{0[0]} ({0[1]})\n".format, self.outputs.items ()))
return "ConcreteLayout(vss={:s}, vs_min={:s}, vs_max={:s}){{\n{}}}".format (
self.virtual_screen_size, self.virtual_screen_min, self.virtual_screen_max,
"".join (outputs))
# Import/export
def from_abstract (self, abstract):
"""
Builds a new backend layout object from an abstract layout and current additionnal info
Absolute layout positionning uses the c++ isl extension
It assumes the ConcreteLayout base object has correct Edid (bijection name <-> edid)
"""
names = self.name_map ()
def make_entry (edid, o):
size = self.outputs[names[edid]].preferred_size
output = ConcreteLayout.Output (enabled = True, transform = o.transform, base_size = size, edid = edid, preferred_size = size)
return (names[edid], output)
concrete = ConcreteLayout (vs_min = self.virtual_screen_min, vs_max = self.virtual_screen_max,
outputs = dict (make_entry (*entry) for entry in abstract.outputs.items ()))
# Compute absolute layout
edids = abstract.outputs.keys ()
constraints = [[abstract.outputs[ea].rel (eb) for eb in edids] for ea in edids]
sizes = [concrete.outputs[names[e]].size () for e in edids]
result = ext.screen_layout (self.virtual_screen_min, self.virtual_screen_max, sizes, constraints)
if result is None:
raise LayoutError ("unable to compute concrete positions")
# Fill result
concrete.virtual_screen_size = Pair (result[0])
for i, edid in enumerate (edids):
concrete.outputs[names[edid]].position = Pair (result[1][i])
return concrete
def to_abstract (self):
"""
Build an AbstractLayout from a ConcreteLayout.
Two screen are considered related if their borders are touching in the ConcreteLayout
"""
if self.manual ():
raise LayoutFatalError ("cannot abstract manual ConcreteLayout in manual")
outputs = self.outputs.values ()
abstract = AbstractLayout (outputs = {o.edid: AbstractLayout.Output (transform = o.transform) for o in outputs})
# Extract neighbouring relations for each pair of outputs
for oa, ob in itertools.permutations (outputs, 2):
oa_corner, ob_corner = oa.position + oa.size (), ob.position + ob.size ()
if oa_corner.x == ob.position.x and oa.position.y < ob_corner.y and oa_corner.y > ob.position.y:
abstract.set_relation (oa.edid, Dir.left, ob.edid)
if oa_corner.y == ob.position.y and oa.position.x < ob_corner.x and oa_corner.x > ob.position.x:
abstract.set_relation (oa.edid, Dir.above, ob.edid)
return abstract
### Database ###
class Database (object):
version = 4
"""
Database of AbstractLayout
Can be stored/loaded from/to files
Format v4 is:
* int : version number
* list of abstractlayout object dumps : layouts
* relation_counters dict : (output_nameA, relation, output_nameB) for every pair of outputs
"""
def __init__ (self, db_file):
# Database : frozenset(edids) -> AbstractLayout ()
self.layouts = {}
# Relation usage counters : (nameA, rel, nameB) -> int | with nameA < nameB
self.relation_counters = collections.defaultdict (int)
# Load from database file
self.db_file = db_file
self.load_database ()
# database access and update
def get_layout (self, key):
try:
return self.layouts[key]
except KeyError:
raise LayoutError ("layout for [{}] not found in database".format (",".join (key))) from None
def successfully_applied (self, abstract, concrete):
# update database
self.layouts[abstract.key ()] = abstract
# increment statistics counters
for na, nb in itertools.permutations (concrete.outputs, 2):
# increment relation usage counter
relation = abstract.outputs[concrete.edid (na)].rel (concrete.edid (nb))
self.relation_counters[(na, relation, nb)] += 1
self.store_database ()
# default
def generate_statistical_layout (self, concrete, edid_set):
""" Generates a layout using statistics """
abstract = self.generate_default_layout (edid_set)
# Set relation between two outputs screens as the most frequently used between the two outputs plugs
for na, nb in itertools.combinations (concrete.outputs, 2):
# Find relation with max use.
def count (d):
return self.relation_counters[na, d, nb] + self.relation_counters[nb, Dir.invert (d), na]
most_used = max (Dir.iter (), key = count)
if count (most_used) > 0:
abstract.set_relation (concrete.edid (na), most_used, concrete.edid (nb))
# For each known Edid, set transformation as the most frequent in the database
for edid in abstract.outputs:
transform_frequency = collections.defaultdict (int)
for key in self.layouts:
if edid in key:
transform_frequency[self.layouts[key].outputs[edid].transform] += 1
abstract.outputs[edid].transform = max (transform_frequency, default = Transform (), key = transform_frequency.get)
return abstract
def generate_default_layout (self, edid_set):
""" Generates a default layout with no relations or transformation """
return AbstractLayout (outputs = {edid: AbstractLayout.Output () for edid in edid_set})
# store / load buffer version
def load (self, buf):
""" Read the database with layouts from buf (pickle format) """
# check version
version = pickle.load (buf)
if not isinstance (version, int):
raise ValueError ("incorrect database format : version = {}".format (version))
if version != Database.version:
raise ValueError ("incorrect database version : {} (expected {})".format (version, Database.version))
# get layout database
layout_dump_list = pickle.load (buf)
for layout_dump in layout_dump_list:
layout = AbstractLayout.load (layout_dump)
self.layouts[layout.key ()] = layout
# get relation_counters
self.relation_counters = collections.defaultdict (int, pickle.load (buf))
def store (self, buf):
""" Outputs manager database into buffer object (pickle format) """
# version
pickle.dump (int (Database.version), buf)
# database
layout_dump_list = [abstract.dump () for abstract in self.layouts.values ()]
pickle.dump (layout_dump_list, buf)
# relation_counters
pickle.dump (dict (self.relation_counters), buf)
# store / load file version
def load_database (self):
try:
with self.db_file.open ("rb") as db:
self.load (db)
logger.info ("loaded database from '{}'".format (self.db_file))
except FileNotFoundError:
logger.warn ("database file '{}' not found".format (self.db_file))
except Exception as e:
logger.error ("unable to load database file '{}': {}".format (self.db_file, e))
def store_database (self):
# Write to a temporary file
temp_file = self.db_file.with_suffix (".temp")
with temp_file.open ("wb") as db:
self.store (db)
# On success copy it to new position
temp_file.rename (self.db_file)
logger.info ("stored database into '{}'".format (self.db_file))
### Manager ###
class Manager (Database):
"""
Glue between Database and backend.
Receive and handle events from the backend.
"""
def __init__ (self, *args, **kwd):
super ().__init__ (*args, **kwd)
def start (self, backend):
# Init with default empty layout
self.current_concrete_layout = ConcreteLayout ()
# Attach to backend, will force an update of the current_concrete_layout
self.backend = backend
self.backend.attach (lambda concrete: self.backend_changed (concrete))
# Callback
def backend_changed (self, new_concrete_layout):
""" Backend callback, called for each hardware state change. """
logger.info ("backend changed")
logger.debug ("current " + str (self.current_concrete_layout))
logger.debug ("new " + str (new_concrete_layout))
if new_concrete_layout == self.current_concrete_layout:
return self.action_same_as_before ()
if not new_concrete_layout.edid_valid ():
return self.action_manual (new_concrete_layout, " (wrong or missing Edid data)")
edid_set = new_concrete_layout.connected_edids ()
if edid_set != self.current_concrete_layout.connected_edids ():
# New output set, apply a layout
self.action_apply_from_table (new_concrete_layout, edid_set)
else:
# Same output set
if new_concrete_layout.manual ():
self.action_manual (new_concrete_layout)
else:
self.action_store_and_normalize (new_concrete_layout)
# do nothing actions
def action_same_as_before (self):
# We are being notified of our last update to backend
logger.info ("do nothing (same layout as before)")
def action_manual (self, new_concrete_layout, postfix = ""):
# Entering manual mode, just keep current_concrete_layout updated
logger.info ("do nothing, manual mode{}".format (postfix))
self.current_concrete_layout = new_concrete_layout
# apply config actions
# Failure management discussion, by exception type:
#
# LayoutError:
# * layout not found > use default
# * layout with stupid relations > use default
# * screen limits so tight it will never fit > using default will fail too anyway, so use default
# BackendError:
# * invalid time > x state changed, abort modification. event_loop will reload state and see what to do then
# * crtc allocation error > crtc shortage. using default state will also fail, so abort modification
# * x request error > abort modification
# (Layout|Backend)FatalError:
# * invalid program state, bail out, do not catch
# <other, like xcb badmatch>:
# * Badmatch should be avoided by backend, so abort if one goes through
def helper_apply_abstract (self, abstract, new_concrete_layout):
# Compute ConcreteLayout and apply it to backend
concrete = new_concrete_layout.from_abstract (abstract)
self.backend.apply_concrete_layout (concrete)
# Update manager data on success
self.current_concrete_layout = concrete
self.successfully_applied (abstract, concrete)
def action_apply_from_table (self, new_concrete_layout, edid_set):
# Try to apply stored layout
logger.info ("apply from table [{}]".format (",".join (edid_set)))
try:
return self.helper_apply_abstract (self.get_layout (edid_set), new_concrete_layout)
except LayoutError as e:
logger.info ("unable apply from table: {}".format (e))
except BackendError as e:
logger.error ("unable to apply to backend, abort change: {}".format (e))
return # Abort change
return self.action_apply_statistical_layout (new_concrete_layout, edid_set)
def action_apply_statistical_layout (self, new_concrete_layout, edid_set):
# Build a default config with no relation
logger.info ("apply statistical layout [{}]".format (",".join (new_concrete_layout.outputs)))
try:
return self.helper_apply_abstract (self.generate_statistical_layout (new_concrete_layout, edid_set), new_concrete_layout)
except LayoutError as e:
logger.info ("unable to apply statistical layout: {}".format (e))
except BackendError as e:
logger.error ("unable to apply to backend, abort change: {}".format (e))
return # Abort change
return self.action_apply_default_layout (new_concrete_layout, edid_set)
def action_apply_default_layout (self, new_concrete_layout, edid_set):
# Build a default config with no relation
logger.info ("apply default layout")
try:
self.helper_apply_abstract (self.generate_default_layout (edid_set), new_concrete_layout)
except (LayoutError, BackendError) as e:
# Provide detailed error if we failed with this default one, as it should only fail in the backend
logger.exception ("unable to apply default layout, abort change: {}".format (e))
def action_store_and_normalize (self, new_concrete_layout):
# Update database
logger.info ("store and normalize")
try:
self.helper_apply_abstract (new_concrete_layout.to_abstract (), new_concrete_layout)
except BackendError as e:
# Should not fail due to layout problems, so let these exceptions through
logger.error ("unable to apply to backend, abort change: {}".format (e))
|
lereldarion/slam | slam/util.py | # Copyright (c) 2013-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Utilities
"""
import logging, logging.handlers
# Logging
def setup_root_logging (filename, level):
root = logging.getLogger ()
root.setLevel (level)
formatter = logging.Formatter (style = "{", fmt = "{asctime} :: {levelname} :: {name} :: {message}")
if filename:
output = logging.handlers.RotatingFileHandler (filename, "a", 1000000, 1)
else:
output = logging.StreamHandler ()
output.setLevel (level)
output.setFormatter (formatter)
root.addHandler (output)
return root
def setup_logger (module_name):
return logging.getLogger (module_name)
logger = setup_logger (__name__)
# Attribute equality helper class
class AttributeEquality (object):
""" Inherit from this class to automatically support basic equality test """
def __eq__ (self, other):
return isinstance (other, self.__class__) and self.__dict__ == other.__dict__
def __ne__ (self, other):
return not self.__eq__ (other)
# Pair
class Pair (tuple):
""" Utility type for a pair of values """
def __new__ (cls, a, b = None):
""" Takes a pair of values, or an iterable """
if b != None: a = (a, b)
return super ().__new__ (cls, a)
@classmethod
def from_struct (cls, struct, xkey="x", ykey="y"):
""" Construct a Pair by querying xkey/ykey (default x/y) fields in a structure """
return cls (getattr (struct, xkey), getattr (struct, ykey))
@classmethod
def from_size (cls, struct, formatting="{}"):
""" Construct a Pair by taking (optionnaly formatted) width/height fields in the given class """
return cls.from_struct (struct, formatting.format ("width"), formatting.format ("height"))
def __getattr__ (self, attr):
""" Provide x/y/w/h quick access """
if attr in ["x", "w"]: return self[0]
elif attr in ["y", "h"]: return self[1]
else: raise AttributeError ("Pair doesn't support '{}' attr (only x/y/w/h)".format (attr))
def copy (self): return Pair (self)
def swap (self): return Pair (self.y, self.x)
def __add__ (self, other): return Pair (self.x + other.x, self.y + other.y)
def __neg__ (self): return Pair (-self.x, -self.y)
def __sub__ (self, other): return self + (-other)
def __format__ (self, spec):
""" Pretty printing, with two str.format flags for integers sizes """
if spec == "s": return "{}x{}".format (self.x, self.y)
elif spec == "p": return "{}mm x {}mm".format (self.x, self.y)
else: return str (self)
# Daemon
# Daemon
class Daemon (object):
"""
Daemon objects are objects that can activated when some conditions happen in an event_loop.
They can be activated if:
1/ New data is available on a file descriptor.
To enable this behavior, fileno() must return a descriptor integer instead of None
This integer must be constant for the event_loop.
2/ The wait in the event_loop timeouts.
To enable this, timeout() must return an integer >= 0 (in seconds) instead of None
3/ The daemon is activated manually.
During execution, some code calls d.activate_manually() on the daemon to make it activate.
This is useful to reactivate a daemon event if no new data is available.
A counter ensure that reactivations does not loop undefinitely (it triggers an error).
Finally, an activate() callback function must be implemented.
It must return a bool indicating if the event loop should continue.
During its execution, d.activation_reason() gives the reason for activation.
"""
NOT_ACTIVATED = 0
ACTIVATED_MANUAL = 1
ACTIVATED_TIMEOUT = 2
ACTIVATED_DATA = 3
# Default version of API
def fileno (self):
return None
def timeout (self):
return None
def activate (self):
raise NotImplementedError
# Methods provided to subclasses
def __init__ (self):
""" Creates internal variables """
self._activation_reason = self.NOT_ACTIVATED
self._current_activation_reason = self.NOT_ACTIVATED
self._activation_counter = 0
def activate_manually (self):
""" Ask the event loop to activate us again """
self._activation_reason = self.ACTIVATED_MANUAL
def activation_reason (self):
""" Gives us the activation reason for this call of activate() """
return self._current_activation_reason
# Internal stuff
def _is_activated (self):
return self._activation_reason != self.NOT_ACTIVATED
def _activate (self):
# Detect possible activate_manually () loop
self._activation_counter += 1
if self._activation_counter > 100:
raise RuntimeError ("Daemon.event_loop: reactivation loop detected")
# Set context for activate (), then clean
self._current_activation_reason = self._activation_reason
self._activation_reason = self.NOT_ACTIVATED
continue_event_loop = self.activate ()
self._current_activation_reason = self.NOT_ACTIVATED
return continue_event_loop
# Top level event_loop system
@staticmethod
def event_loop (*daemons):
"""
Take a list of daemons as input, handle their activation in an event loop.
fileno(): is supposed constant (only read once).
timeout(): read at each cycle ; only the smallest timeout daemon is activated for timeout.
"""
# Quit nicely on SIGTERM
import signal
def sigterm_handler (sig, stack):
import sys
sys.exit ()
signal.signal (signal.SIGTERM, sigterm_handler)
# Event loop setup : use selector library
import selectors
selector_device = selectors.DefaultSelector ()
try:
for d in daemons:
if d.fileno () is not None:
selector_device.register (d, selectors.EVENT_READ)
while True:
# Activate deamons until no one has the activation flag raised
for d in daemons:
d._activation_counter = 0
while any (map (Daemon._is_activated, daemons)):
d = next (filter (Daemon._is_activated, daemons))
if d._activate () == False:
return
# First determine if a timeout is used, and which daemons will timeout first
timeout = None
lowest_timeout_daemons = []
for d, t in ((d, d.timeout()) for d in daemons):
if t is not None:
if timeout is None or t < timeout:
timeout = t
lowest_timeout_daemons = [d]
elif t == timeout:
lowest_timeout_daemons.append (d)
# Check for input data using select
activated_daemons = selector_device.select (timeout)
if len (activated_daemons) > 0:
for key, _ in activated_daemons:
key.fileobj._activation_reason = Daemon.ACTIVATED_DATA
else:
# Timeout
for d in lowest_timeout_daemons:
d._activation_reason = Daemon.ACTIVATED_TIMEOUT
finally:
selector_device.close ()
# Class introspection and pretty print
def class_attributes (cls):
""" Return all class attributes (usually class constants) """
return {attr: getattr (cls, attr) for attr in dir (cls) if not callable (attr) and not attr.startswith ("__")}
def sequence_stringify (iterable, highlight = lambda t: False, stringify = str):
""" Print and join all elements of <iterable>, highlighting those matched by <highlight : obj -> bool> """
def formatting (data):
return ("[{}]" if highlight (data) else "{}").format (stringify (data))
return " ".join (map (formatting, iterable))
|
editorconfig/editorconfig-sublime | editorconfig/__init__.py | """EditorConfig Python Core"""
from editorconfig.versiontools import join_version
VERSION = (0, 11, 1, "final")
__all__ = ['get_properties', 'EditorConfigError', 'exceptions']
__version__ = join_version(VERSION)
def get_properties(filename):
"""Locate and parse EditorConfig files for the given filename"""
handler = EditorConfigHandler(filename)
return handler.get_configurations()
from editorconfig.handler import EditorConfigHandler
from editorconfig.exceptions import *
|
editorconfig/editorconfig-sublime | EditorConfig.py | <filename>EditorConfig.py
import sublime_plugin
from editorconfig import get_properties, EditorConfigError
LINE_ENDINGS = {
'lf': 'unix',
'crlf': 'windows',
'cr': 'cr'
}
CHARSETS = {
'latin1': 'Western (ISO 8859-1)',
'utf-8': 'utf-8',
'utf-8-bom': 'utf-8 with bom',
'utf-16be': 'utf-16 be',
'utf-16le': 'utf-16 le'
}
class EditorConfig(sublime_plugin.EventListener):
def on_load(self, view):
self.init(view, False)
def on_pre_save(self, view):
self.init(view, True)
def init(self, view, pre_save):
path = view.file_name()
if not path:
return
try:
config = get_properties(path)
except EditorConfigError:
print 'Error occurred while getting EditorConfig properties'
else:
if config:
if pre_save:
self.apply_charset(view, config)
else:
self.apply_config(view, config)
def apply_charset(self, view, config):
charset = config.get('charset')
if charset in CHARSETS:
view.set_encoding(CHARSETS[charset])
def apply_config(self, view, config):
settings = view.settings()
indent_style = config.get('indent_style')
indent_size = config.get('indent_size')
end_of_line = config.get('end_of_line')
trim_trailing_whitespace = config.get('trim_trailing_whitespace')
insert_final_newline = config.get('insert_final_newline')
if indent_style == 'space':
settings.set('translate_tabs_to_spaces', True)
elif indent_style == 'tab':
settings.set('translate_tabs_to_spaces', False)
if indent_size:
try:
settings.set('tab_size', int(indent_size))
except ValueError:
pass
if end_of_line in LINE_ENDINGS:
view.set_line_endings(LINE_ENDINGS[end_of_line])
if trim_trailing_whitespace == 'true':
settings.set('trim_trailing_white_space_on_save', True)
elif trim_trailing_whitespace == 'false':
settings.set('trim_trailing_white_space_on_save', False)
if insert_final_newline == 'true':
settings.set('ensure_newline_at_eof_on_save', True)
elif insert_final_newline == 'false':
settings.set('ensure_newline_at_eof_on_save', False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.