repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
bAmpT/muzero-pytorch | core/mcts.py | <reponame>bAmpT/muzero-pytorch<filename>core/mcts.py
import math
import numpy as np
import torch
class MinMaxStats(object):
"""A class that holds the min-max values of the tree."""
def __init__(self, min_value_bound=None, max_value_bound=None):
self.maximum = min_value_bound if min_value_bound else -float('inf')
self.minimum = max_value_bound if max_value_bound else float('inf')
def update(self, value: float):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value: float) -> float:
if self.maximum > self.minimum:
# We normalize only when we have set the maximum and minimum values.
return (value - self.minimum) / (self.maximum - self.minimum)
return value
class Node(object):
def __init__(self, prior: float):
self.visit_count = 0
self.to_play = -1
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self) -> bool:
return len(self.children) > 0
def value(self) -> float:
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
def expand(self, to_play, actions, network_output):
self.to_play = to_play
self.hidden_state = network_output.hidden_state
self.reward = network_output.reward
# softmax over policy logits
policy = {a: math.exp(network_output.policy_logits[0][a.index]) for a in actions}
policy_sum = sum(policy.values())
for action, p in policy.items():
self.children[action] = Node(p / policy_sum)
def add_exploration_noise(self, dirichlet_alpha, exploration_fraction):
actions = list(self.children.keys())
noise = np.random.dirichlet([dirichlet_alpha] * len(actions))
frac = exploration_fraction
for a, n in zip(actions, noise):
self.children[a].prior = self.children[a].prior * (1 - frac) + n * frac
class MCTS(object):
def __init__(self, config):
self.config = config
def run(self, root, action_history, model):
min_max_stats = MinMaxStats()
for _ in range(self.config.num_simulations):
history = action_history.clone()
node = root
search_path = [node]
while node.expanded():
action, node = self.select_child(node, min_max_stats)
history.add_action(action)
search_path.append(node)
# Inside the search tree we use the dynamics function to obtain the next
# hidden state given an action and the previous hidden state.
parent = search_path[-2]
network_output = model.recurrent_inference(parent.hidden_state,
torch.tensor([[history.last_action().index]],
device=parent.hidden_state.device))
node.expand(history.to_play(), history.action_space(), network_output)
self.backpropagate(search_path, network_output.value.item(), history.to_play(), min_max_stats)
def select_child(self, node, min_max_stats):
_, action, child = max((self.ucb_score(node, child, min_max_stats), action, child)
for action, child in node.children.items())
return action, child
def ucb_score(self, parent, child, min_max_stats) -> float:
pb_c = math.log(
(parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base) + self.config.pb_c_init
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = child.reward + self.config.discount * min_max_stats.normalize(child.value())
else:
value_score = 0
return prior_score + value_score
def backpropagate(self, search_path, value, to_play, min_max_stats):
for node in reversed(search_path):
node.value_sum += value if node.to_play == to_play else -value
node.visit_count += 1
min_max_stats.update(node.value())
value = node.reward + self.config.discount * value
|
bAmpT/muzero-pytorch | core/train.py | import logging
import ray
import torch
import torch.optim as optim
from torch.nn import L1Loss
from .mcts import MCTS, Node
from .replay_buffer import ReplayBuffer
from .test import test
from .utils import select_action
import time
train_logger = logging.getLogger('train')
test_logger = logging.getLogger('train_test')
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def _log(config, step_count, log_data, model, replay_buffer, lr, worker_logs, summary_writer):
loss_data, td_data, priority_data = log_data
weighted_loss, loss, policy_loss, reward_loss, value_loss = loss_data
target_reward, target_value, trans_target_reward, trans_target_value, target_reward_phi, target_value_phi, \
pred_reward, pred_value, target_policies, predicted_policies = td_data
batch_weights, batch_indices = priority_data
worker_reward, worker_eps_len, test_score, temperature, visit_entropy = worker_logs
replay_episodes_collected = ray.get(replay_buffer.episodes_collected.remote())
replay_buffer_size = ray.get(replay_buffer.size.remote())
_msg = '#{:<10} Loss: {:<8.3f} [weighted Loss:{:<8.3f} Policy Loss: {:<8.3f} Value Loss: {:<8.3f} ' \
'Reward Loss: {:<8.3f} ] Replay Episodes Collected: {:<10d} Buffer Size: {:<10d} Lr: {:<8.3f}'
_msg = _msg.format(step_count, loss, weighted_loss, policy_loss, value_loss, reward_loss,
replay_episodes_collected, replay_buffer_size, lr)
train_logger.info(_msg)
if test_score is not None:
test_msg = '#{:<10} Test Score: {:<10}'.format(step_count, test_score)
test_logger.info(test_msg)
if summary_writer is not None:
if config.debug:
for name, W in model.named_parameters():
summary_writer.add_histogram('after_grad_clip' + '/' + name + '_grad', W.grad.data.cpu().numpy(),
step_count)
summary_writer.add_histogram('network_weights' + '/' + name, W.data.cpu().numpy(), step_count)
pass
summary_writer.add_histogram('replay_data/replay_buffer_priorities',
ray.get(replay_buffer.get_priorities.remote()),
step_count)
summary_writer.add_histogram('replay_data/batch_weight', batch_weights, step_count)
summary_writer.add_histogram('replay_data/batch_indices', batch_indices, step_count)
summary_writer.add_histogram('train_data_dist/target_reward', target_reward.flatten(), step_count)
summary_writer.add_histogram('train_data_dist/target_value', target_value.flatten(), step_count)
summary_writer.add_histogram('train_data_dist/transformed_target_reward', trans_target_reward.flatten(),
step_count)
summary_writer.add_histogram('train_data_dist/transformed_target_value', trans_target_value.flatten(),
step_count)
summary_writer.add_histogram('train_data_dist/target_reward_phi', target_reward_phi.unique().flatten(),
step_count)
summary_writer.add_histogram('train_data_dist/target_value_phi', target_value_phi.unique().flatten(),
step_count)
summary_writer.add_histogram('train_data_dist/pred_reward', pred_reward.flatten(), step_count)
summary_writer.add_histogram('train_data_dist/pred_value', pred_value.flatten(), step_count)
summary_writer.add_histogram('train_data_dist/pred_policies', predicted_policies.flatten(), step_count)
summary_writer.add_histogram('train_data_dist/target_policies', target_policies.flatten(), step_count)
summary_writer.add_scalar('train/loss', loss, step_count)
summary_writer.add_scalar('train/weighted_loss', weighted_loss, step_count)
summary_writer.add_scalar('train/policy_loss', policy_loss, step_count)
summary_writer.add_scalar('train/value_loss', value_loss, step_count)
summary_writer.add_scalar('train/reward_loss', reward_loss, step_count)
summary_writer.add_scalar('train/episodes_collected', ray.get(replay_buffer.episodes_collected.remote()),
step_count)
summary_writer.add_scalar('train/replay_buffer_len', ray.get(replay_buffer.size.remote()), step_count)
summary_writer.add_scalar('train/lr', lr, step_count)
if worker_reward is not None:
summary_writer.add_scalar('workers/reward', worker_reward, step_count)
summary_writer.add_scalar('workers/eps_len', worker_eps_len, step_count)
summary_writer.add_scalar('workers/temperature', temperature, step_count)
summary_writer.add_scalar('workers/visit_entropy', visit_entropy, step_count)
if test_score is not None:
summary_writer.add_scalar('train/test_score', test_score, step_count)
@ray.remote
class SharedStorage(object):
def __init__(self, model):
self.step_counter = 0
self.model = model
self.reward_log = []
self.test_log = []
self.eps_lengths = []
self.temperature_log = []
self.visit_entropies_log = []
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
return self.model.set_weights(weights)
def incr_counter(self):
self.step_counter += 1
def get_counter(self):
return self.step_counter
def set_data_worker_logs(self, eps_len, eps_reward, temperature, visit_entropy):
self.eps_lengths.append(eps_len)
self.reward_log.append(eps_reward)
self.temperature_log.append(temperature)
self.visit_entropies_log.append(visit_entropy)
def add_test_log(self, score):
self.test_log.append(score)
def get_worker_logs(self):
if len(self.reward_log) > 0:
reward = sum(self.reward_log) / len(self.reward_log)
eps_lengths = sum(self.eps_lengths) / len(self.eps_lengths)
temperature = sum(self.temperature_log) / len(self.temperature_log)
visit_entropy = sum(self.visit_entropies_log) / len(self.visit_entropies_log)
self.reward_log = []
self.eps_lengths = []
self.temperature_log = []
self.visit_entropies_log = []
else:
reward = None
eps_lengths = None
temperature = None
visit_entropy = None
if len(self.test_log) > 0:
test_score = sum(self.test_log) / len(self.test_log)
self.test_log = []
else:
test_score = None
return reward, eps_lengths, test_score, temperature, visit_entropy
@ray.remote
class DataWorker(object):
def __init__(self, rank, config, shared_storage, replay_buffer):
self.rank = rank
self.config = config
self.shared_storage = shared_storage
self.replay_buffer = replay_buffer
def run(self):
model = self.config.get_uniform_network()
with torch.no_grad():
while ray.get(self.shared_storage.get_counter.remote()) < self.config.training_steps:
model.set_weights(ray.get(self.shared_storage.get_weights.remote()))
model.eval()
env = self.config.new_game(self.config.seed + self.rank)
obs = env.reset()
done = False
priorities = []
eps_reward, eps_steps, visit_entropies = 0, 0, 0
trained_steps = ray.get(self.shared_storage.get_counter.remote())
_temperature = self.config.visit_softmax_temperature_fn(num_moves=len(env.history),
trained_steps=trained_steps)
while not done and eps_steps <= self.config.max_moves:
root = Node(0)
obs = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
network_output = model.initial_inference(obs)
root.expand(env.to_play(), env.legal_actions(), network_output)
root.add_exploration_noise(dirichlet_alpha=self.config.root_dirichlet_alpha,
exploration_fraction=self.config.root_exploration_fraction)
MCTS(self.config).run(root, env.action_history(), model)
action, visit_entropy = select_action(root, temperature=_temperature, deterministic=False)
obs, reward, done, info = env.step(action.index)
env.store_search_stats(root)
eps_reward += reward
eps_steps += 1
visit_entropies += visit_entropy
if not self.config.use_max_priority:
error = L1Loss(reduction='none')(network_output.value,
torch.tensor([[root.value()]])).item()
priorities.append(error + 1e-5)
env.close()
self.replay_buffer.save_game.remote(env,
priorities=None if self.config.use_max_priority else priorities)
# Todo: refactor with env attributes to reduce variables
visit_entropies /= eps_steps
self.shared_storage.set_data_worker_logs.remote(eps_steps, eps_reward, _temperature, visit_entropies)
def update_weights(model, target_model, optimizer, replay_buffer, config):
batch = ray.get(replay_buffer.sample_batch.remote(config.num_unroll_steps, config.td_steps,
model=target_model if config.use_target_model else None,
config=config))
obs_batch, action_batch, target_reward, target_value, target_policy, indices, weights = batch
obs_batch = obs_batch.to(config.device)
action_batch = action_batch.to(config.device).unsqueeze(-1)
target_reward = target_reward.to(config.device)
target_value = target_value.to(config.device)
target_policy = target_policy.to(config.device)
weights = weights.to(config.device)
# transform targets to categorical representation
# Reference: Appendix F
transformed_target_reward = config.scalar_transform(target_reward)
target_reward_phi = config.reward_phi(transformed_target_reward)
transformed_target_value = config.scalar_transform(target_value)
target_value_phi = config.value_phi(transformed_target_value)
value, _, policy_logits, hidden_state = model.initial_inference(obs_batch)
scaled_value = config.inverse_value_transform(value)
# Note: Following line is just for logging.
predicted_values, predicted_rewards, predicted_policies = scaled_value, None, torch.softmax(policy_logits, dim=1)
# Reference: Appendix G
new_priority = L1Loss(reduction='none')(scaled_value.squeeze(-1), target_value[:, 0])
new_priority += 1e-5
new_priority = new_priority.data.cpu().numpy()
value_loss = config.scalar_value_loss(value, target_value_phi[:, 0])
policy_loss = -(torch.log_softmax(policy_logits, dim=1) * target_policy[:, 0]).sum(1)
reward_loss = torch.zeros(config.batch_size, device=config.device)
gradient_scale = 1 / config.num_unroll_steps
for step_i in range(config.num_unroll_steps):
value, reward, policy_logits, hidden_state = model.recurrent_inference(hidden_state, action_batch[:, step_i])
policy_loss += -(torch.log_softmax(policy_logits, dim=1) * target_policy[:, step_i + 1]).sum(1)
value_loss += config.scalar_value_loss(value, target_value_phi[:, step_i + 1])
reward_loss += config.scalar_reward_loss(reward, target_reward_phi[:, step_i])
hidden_state.register_hook(lambda grad: grad * 0.5)
# collected for logging
predicted_values = torch.cat((predicted_values, config.inverse_value_transform(value)))
scaled_rewards = config.inverse_reward_transform(reward)
predicted_rewards = scaled_rewards if predicted_rewards is None else torch.cat((predicted_rewards,
scaled_rewards))
predicted_policies = torch.cat((predicted_policies, torch.softmax(policy_logits, dim=1)))
# optimize
loss = (policy_loss + config.value_loss_coeff * value_loss + reward_loss)
weighted_loss = (weights * loss).mean()
weighted_loss.register_hook(lambda grad: grad * gradient_scale)
loss = loss.mean()
optimizer.zero_grad()
weighted_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
optimizer.step()
# update priorities
replay_buffer.update_priorities.remote(indices, new_priority)
# packing data for logging
loss_data = (weighted_loss.item(), loss.item(), policy_loss.mean().item(), reward_loss.mean().item(),
value_loss.mean().item())
td_data = (target_reward, target_value, transformed_target_reward, transformed_target_value,
target_reward_phi, target_value_phi, predicted_rewards, predicted_values,
target_policy, predicted_policies)
priority_data = (weights, indices)
return loss_data, td_data, priority_data
def adjust_lr(config, optimizer, step_count):
lr = config.lr_init * config.lr_decay_rate ** (step_count / config.lr_decay_steps)
lr = max(lr, 0.001)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def _train(config, shared_storage, replay_buffer, summary_writer):
model = config.get_uniform_network().to(config.device)
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr_init, momentum=config.momentum,
weight_decay=config.weight_decay)
target_model = config.get_uniform_network().to('cpu')
target_model.eval()
# wait for replay buffer to be non-empty
while ray.get(replay_buffer.size.remote()) == 0:
pass
for step_count in range(config.training_steps):
shared_storage.incr_counter.remote()
lr = adjust_lr(config, optimizer, step_count)
if step_count % config.checkpoint_interval == 0:
shared_storage.set_weights.remote(model.get_weights())
log_data = update_weights(model, target_model, optimizer, replay_buffer, config)
# softly update target model
if config.use_target_model:
soft_update(target_model, model, tau=1e-2)
target_model.eval()
_log(config, step_count, log_data, model, replay_buffer, lr,
ray.get(shared_storage.get_worker_logs.remote()), summary_writer)
if step_count % 50 == 0:
replay_buffer.remove_to_fit.remote()
shared_storage.set_weights.remote(model.get_weights())
@ray.remote
def _test(config, shared_storage):
test_model = config.get_uniform_network().to('cpu')
best_test_score = float('-inf')
while ray.get(shared_storage.get_counter.remote()) < config.training_steps:
test_model.set_weights(ray.get(shared_storage.get_weights.remote()))
test_model.eval()
test_score = test(config, test_model, config.test_episodes, 'cpu', False)
if test_score >= best_test_score:
best_test_score = test_score
torch.save(test_model.state_dict(), config.model_path)
shared_storage.add_test_log.remote(test_score)
time.sleep(30)
def train(config, summary_writer=None):
storage = SharedStorage.remote(config.get_uniform_network())
replay_buffer = ReplayBuffer.remote(batch_size=config.batch_size, capacity=config.window_size,
prob_alpha=config.priority_prob_alpha)
workers = [DataWorker.remote(rank, config, storage, replay_buffer).run.remote()
for rank in range(0, config.num_actors)]
workers += [_test.remote(config, storage)]
_train(config, storage, replay_buffer, summary_writer)
ray.wait(workers, len(workers))
return config.get_uniform_network().set_weights(ray.get(storage.get_weights.remote()))
|
bAmpT/muzero-pytorch | core/model.py | import typing
from typing import Dict, List
import torch
import torch.nn as nn
from .game import Action
class NetworkOutput(typing.NamedTuple):
value: float
reward: float
policy_logits: Dict[Action, float]
hidden_state: List[float]
class BaseMuZeroNet(nn.Module):
def __init__(self, inverse_value_transform, inverse_reward_transform):
super(BaseMuZeroNet, self).__init__()
self.inverse_value_transform = inverse_value_transform
self.inverse_reward_transform = inverse_reward_transform
def prediction(self, state):
raise NotImplementedError
def representation(self, obs_history):
raise NotImplementedError
def dynamics(self, state, action):
raise NotImplementedError
def initial_inference(self, obs) -> NetworkOutput:
state = self.representation(obs)
actor_logit, value = self.prediction(state)
if not self.training:
value = self.inverse_value_transform(value)
return NetworkOutput(value, 0, actor_logit, state)
def recurrent_inference(self, hidden_state, action) -> NetworkOutput:
state, reward = self.dynamics(hidden_state, action)
actor_logit, value = self.prediction(state)
if not self.training:
value = self.inverse_value_transform(value)
reward = self.inverse_reward_transform(reward)
return NetworkOutput(value, reward, actor_logit, state)
def get_weights(self):
return {k: v.cpu() for k, v in self.state_dict().items()}
def set_weights(self, weights):
self.load_state_dict(weights)
def get_gradients(self):
grads = []
for p in self.parameters():
grad = None if p.grad is None else p.grad.data.cpu().numpy()
grads.append(grad)
return grads
def set_gradients(self, gradients):
for g, p in zip(gradients, self.parameters()):
if g is not None:
p.grad = torch.from_numpy(g)
|
bAmpT/muzero-pytorch | core/config.py | import os
import torch
from .game import Game
class DiscreteSupport:
def __init__(self, min: int, max: int):
assert min < max
self.min = min
self.max = max
self.range = range(min, max + 1)
self.size = len(self.range)
class BaseMuZeroConfig(object):
def __init__(self,
training_steps: int,
test_interval: int,
test_episodes: int,
checkpoint_interval: int,
max_moves: int,
discount: float,
dirichlet_alpha: float,
num_simulations: int,
batch_size: int,
td_steps: int,
num_actors: int,
lr_init: float,
lr_decay_rate: float,
lr_decay_steps: float,
window_size: int = int(1e6),
value_loss_coeff: float = 1,
value_support: DiscreteSupport = None,
reward_support: DiscreteSupport = None):
# Self-Play
self.action_space_size = None
self.num_actors = num_actors
self.max_moves = max_moves
self.num_simulations = num_simulations
self.discount = discount
self.max_grad_norm = 5
# testing arguments
self.test_interval = test_interval
self.test_episodes = test_episodes
# Root prior exploration noise.
self.root_dirichlet_alpha = dirichlet_alpha
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
# If we already have some information about which values occur in the environment, we can use them to
# initialize the rescaling. This is not strictly necessary, but establishes identical behaviour to
# AlphaZero in board games.
self.max_value_bound = None
self.min_value_bound = None
# Training
self.training_steps = training_steps
self.checkpoint_interval = checkpoint_interval
self.window_size = window_size
self.batch_size = batch_size
self.num_unroll_steps = 5
self.td_steps = td_steps
self.value_loss_coeff = value_loss_coeff
self.device = 'cpu'
self.exp_path = None # experiment path
self.debug = False
self.model_path = None
self.seed = None
self.value_support = value_support
self.reward_support = reward_support
# optimization control
self.weight_decay = 1e-4
self.momentum = 0.9
self.lr_init = lr_init
self.lr_decay_rate = lr_decay_rate
self.lr_decay_steps = lr_decay_steps
# replay buffer
self.priority_prob_alpha = 1
self.use_target_model = True
self.revisit_policy_search_rate = 0
self.use_max_priority = None
def visit_softmax_temperature_fn(self, num_moves, trained_steps):
raise NotImplementedError
def set_game(self, env_name):
raise NotImplementedError
def new_game(self, seed=None, save_video=False, save_path=None, video_callable=None, uid=None) -> Game:
""" returns a new instance of the game"""
raise NotImplementedError
def get_uniform_network(self):
raise NotImplementedError
def scalar_loss(self, prediction, target):
raise NotImplementedError
@staticmethod
def scalar_transform(x):
""" Reference : Appendix F => Network Architecture
& Appendix A : Proposition A.2 in https://arxiv.org/pdf/1805.11593.pdf (Page-11)
"""
epsilon = 0.001
sign = torch.ones(x.shape).float().to(x.device)
sign[x < 0] = -1.0
output = sign * (torch.sqrt(torch.abs(x) + 1) - 1 + epsilon * x)
return output
def inverse_reward_transform(self, reward_logits):
return self.inverse_scalar_transform(reward_logits, self.reward_support)
def inverse_value_transform(self, value_logits):
return self.inverse_scalar_transform(value_logits, self.value_support)
def inverse_scalar_transform(self, logits, scalar_support):
""" Reference : Appendix F => Network Architecture
& Appendix A : Proposition A.2 in https://arxiv.org/pdf/1805.11593.pdf (Page-11)
"""
value_probs = torch.softmax(logits, dim=1)
value_support = torch.ones(value_probs.shape)
value_support[:, :] = torch.tensor([x for x in scalar_support.range])
value_support = value_support.to(device=value_probs.device)
value = (value_support * value_probs).sum(1, keepdim=True)
epsilon = 0.001
sign = torch.ones(value.shape).float().to(value.device)
sign[value < 0] = -1.0
output = (((torch.sqrt(1 + 4 * epsilon * (torch.abs(value) + 1 + epsilon)) - 1) / (2 * epsilon)) ** 2 - 1)
output = sign * output
return output
def value_phi(self, x):
return self._phi(x, self.value_support.min, self.value_support.max, self.value_support.size)
def reward_phi(self, x):
return self._phi(x, self.reward_support.min, self.reward_support.max, self.reward_support.size)
@staticmethod
def _phi(x, min, max, set_size: int):
x.clamp_(min, max)
x_low = x.floor()
x_high = x.ceil()
p_high = (x - x_low)
p_low = 1 - p_high
target = torch.zeros(x.shape[0], x.shape[1], set_size).to(x.device)
x_high_idx, x_low_idx = x_high - min, x_low - min
target.scatter_(2, x_high_idx.long().unsqueeze(-1), p_high.unsqueeze(-1))
target.scatter_(2, x_low_idx.long().unsqueeze(-1), p_low.unsqueeze(-1))
return target
def get_hparams(self):
hparams = {}
for k, v in self.__dict__.items():
if 'path' not in k and (v is not None):
hparams[k] = v
return hparams
def set_config(self, args):
self.set_game(args.env)
self.seed = args.seed
self.priority_prob_alpha = 1 if args.use_priority else 0
self.use_target_model = args.use_target_model
self.debug = args.debug
self.device = args.device
self.use_max_priority = (args.use_max_priority and args.use_priority)
if args.value_loss_coeff is not None:
self.value_loss_coeff = args.value_loss_coeff
if args.revisit_policy_search_rate is not None:
self.revisit_policy_search_rate = args.revisit_policy_search_rate
self.exp_path = os.path.join(args.result_dir, args.case, args.env,
'revisit_rate_{}'.format(self.revisit_policy_search_rate),
'val_coeff_{}'.format(self.value_loss_coeff),
'with_target' if self.use_target_model else 'no_target',
'with_prio' if args.use_priority else 'no_prio',
'max_prio' if self.use_max_priority else 'no_max_prio',
'seed_{}'.format(self.seed))
self.model_path = os.path.join(self.exp_path, 'model.p')
return self.exp_path
|
bAmpT/muzero-pytorch | config/classic_control/__init__.py | import gym
import torch
from core.config import BaseMuZeroConfig, DiscreteSupport
from .env_wrapper import ClassicControlWrapper
from .model import MuZeroNet
class ClassicControlConfig(BaseMuZeroConfig):
def __init__(self):
super(ClassicControlConfig, self).__init__(
training_steps=20000,
test_interval=100,
test_episodes=5,
checkpoint_interval=20,
max_moves=1000,
discount=0.997,
dirichlet_alpha=0.25,
num_simulations=50,
batch_size=128,
td_steps=5,
num_actors=32,
lr_init=0.05,
lr_decay_rate=0.01,
lr_decay_steps=10000,
window_size=1000,
value_loss_coeff=1,
value_support=DiscreteSupport(-20, 20),
reward_support=DiscreteSupport(-5, 5))
def visit_softmax_temperature_fn(self, num_moves, trained_steps):
if trained_steps < 0.5 * self.training_steps:
return 1.0
elif trained_steps < 0.75 * self.training_steps:
return 0.5
else:
return 0.25
def set_game(self, env_name, save_video=False, save_path=None, video_callable=None):
self.env_name = env_name
game = self.new_game()
self.obs_shape = game.reset().shape[0]
self.action_space_size = game.action_space_size
def get_uniform_network(self):
return MuZeroNet(self.obs_shape, self.action_space_size, self.reward_support.size, self.value_support.size,
self.inverse_value_transform, self.inverse_reward_transform)
def new_game(self, seed=None, save_video=False, save_path=None, video_callable=None, uid=None):
env = gym.make(self.env_name)
if seed is not None:
env.seed(seed)
if save_video:
from gym.wrappers import Monitor
env = Monitor(env, directory=save_path, force=True, video_callable=video_callable, uid=uid)
return ClassicControlWrapper(env, discount=self.discount, k=4)
def scalar_reward_loss(self, prediction, target):
return -(torch.log_softmax(prediction, dim=1) * target).sum(1)
def scalar_value_loss(self, prediction, target):
return -(torch.log_softmax(prediction, dim=1) * target).sum(1)
muzero_config = ClassicControlConfig()
|
bAmpT/muzero-pytorch | core/utils.py | import logging
import os
import shutil
import numpy as np
from scipy.stats import entropy
def make_results_dir(exp_path, args):
os.makedirs(exp_path, exist_ok=True)
if args.opr == 'train' and os.path.exists(exp_path) and os.listdir(exp_path):
if not args.force:
raise FileExistsError('{} is not empty. Please use --force to overwrite it'.format(exp_path))
else:
shutil.rmtree(exp_path)
os.makedirs(exp_path)
log_path = os.path.join(exp_path, 'logs')
os.makedirs(log_path, exist_ok=True)
return exp_path, log_path
def init_logger(base_path):
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s][%(filename)s>%(funcName)s] ==> %(message)s')
for mode in ['train', 'test', 'train_test', 'root']:
file_path = os.path.join(base_path, mode + '.log')
logger = logging.getLogger(mode)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.FileHandler(file_path, mode='a')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def select_action(node, temperature=1, deterministic=True):
visit_counts = [(child.visit_count, action) for action, child in node.children.items()]
action_probs = [visit_count_i ** (1 / temperature) for visit_count_i, _ in visit_counts]
total_count = sum(action_probs)
action_probs = [x / total_count for x in action_probs]
if deterministic:
action_pos = np.argmax([v for v, _ in visit_counts])
else:
action_pos = np.random.choice(len(visit_counts), p=action_probs)
count_entropy = entropy(action_probs, base=2)
return visit_counts[action_pos][1], count_entropy
|
bAmpT/muzero-pytorch | core/replay_buffer.py | import numpy as np
import ray
import torch
@ray.remote
class ReplayBuffer(object):
"""Reference : DISTRIBUTED PRIORITIZED EXPERIENCE REPLAY
Algo. 1 and Algo. 2 in Page-3 of (https://arxiv.org/pdf/1803.00933.pdf
"""
def __init__(self, capacity, batch_size, prob_alpha=1):
self.soft_capacity = capacity
self.batch_size = batch_size
self.buffer = []
self.priorities = []
self.game_look_up = []
self._eps_collected = 0
self.base_idx = 0
self.prob_alpha = prob_alpha
def save_game(self, game, priorities=None):
if priorities is None:
max_prio = self.priorities.max() if self.buffer else 1
self.priorities = np.concatenate((self.priorities, [max_prio for _ in range(len(game))]))
else:
assert len(game) == len(priorities), " priorities should be of same length as the game steps"
self.priorities = np.concatenate((self.priorities, priorities))
self.buffer.append(game)
self.game_look_up += [(self.base_idx + len(self.buffer) - 1, step_pos) for step_pos in range(len(game))]
self._eps_collected += 1
def sample_batch(self, num_unroll_steps: int, td_steps: int, beta: float = 1, model=None, config=None):
obs_batch, action_batch, reward_batch, value_batch, policy_batch = [], [], [], [], []
probs = np.array(self.priorities) ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.priorities), self.batch_size, p=probs)
total = len(self.priorities)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
indices = torch.tensor(indices)
weights = torch.tensor(weights).float()
for idx in indices:
game_id, game_pos = self.game_look_up[idx]
game_id -= self.base_idx
game = self.buffer[game_id]
_actions = game.history[game_pos:game_pos + num_unroll_steps]
# random action selection to complete num_unroll_steps
_actions += [np.random.randint(0, game.action_space_size)
for _ in range(num_unroll_steps - len(_actions))]
obs_batch.append(game.obs(game_pos))
action_batch.append(_actions)
value, reward, policy = game.make_target(game_pos, num_unroll_steps, td_steps, model, config)
reward_batch.append(reward)
value_batch.append(value)
policy_batch.append(policy)
obs_batch = torch.tensor(obs_batch).float()
action_batch = torch.tensor(action_batch).long()
reward_batch = torch.tensor(reward_batch).float()
value_batch = torch.tensor(value_batch).float()
policy_batch = torch.tensor(policy_batch).float()
return obs_batch, action_batch, reward_batch, value_batch, policy_batch, indices, weights
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
def remove_to_fit(self):
if self.size() > self.soft_capacity:
num_excess_games = self.size() - self.soft_capacity
excess_games_steps = sum([len(game) for game in self.buffer[:num_excess_games]])
del self.buffer[:num_excess_games]
self.priorities = self.priorities[excess_games_steps:]
del self.game_look_up[:excess_games_steps]
self.base_idx += num_excess_games
def size(self):
return len(self.buffer)
def episodes_collected(self):
return self._eps_collected
def get_priorities(self):
return self.priorities
|
stirlab/poor-mans-nagios | poor_mans_mailer.py | <reponame>stirlab/poor-mans-nagios<gh_stars>0
import logging
import smtplib
from smtplib import SMTPRecipientsRefused, SMTPHeloError, SMTPSenderRefused, SMTPDataError, SMTPNotSupportedError
from email.mime.text import MIMEText
POOR_MANS_NAGIOS_GIT_URL = "https://github.com/stirlab/poor-mans-nagios"
class PoorMansMailer(object):
def __init__(self, email_from, logger):
self.email_from = email_from
self.logger = logger
self.debug = self.logger.level == logging.DEBUG
def alert_problem(self, alert_emails, host, check_command):
subject = '[PROBLEM] %s failed on %s' % (check_command, host)
return self.send(subject, alert_emails)
def alert_recovery(self, alert_emails, host, check_command):
subject = '[RECOVERY] %s succeeded on %s' % (check_command, host)
return self.send(subject, alert_emails)
def send(self, subject, recipients):
with smtplib.SMTP("localhost") as server:
if self.debug:
server.set_debuglevel(1)
msg = MIMEText("Sent from poor-mans-nagios: %s" % POOR_MANS_NAGIOS_GIT_URL)
msg['Subject'] = subject
msg['From'] = self.email_from
msg['To'] = ", ".join(recipients)
try:
server.sendmail(self.email_from, recipients, msg.as_string())
return True
except (SMTPRecipientsRefused, SMTPHeloError, SMTPSenderRefused, SMTPDataError, SMTPNotSupportedError) as err:
message = err.message if hasattr(err, 'message') else str(err)
self.logger.error("Mailer exception: %s" % err)
return False
|
stirlab/poor-mans-nagios | poor_mans_nagios.py | <reponame>stirlab/poor-mans-nagios
#!/usr/bin/env python3
import os
import time
import subprocess
import yaml
import logging
import pprint
from poor_mans_mailer import PoorMansMailer
logging.basicConfig(level=logging.INFO)
pp = pprint.PrettyPrinter(indent=4)
DEFAULT_SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_CONFIG_FILE = "%s/config.yaml" % DEFAULT_SCRIPT_DIR
DEFAULT_CHECK_INTERVAL = 5
DEFAULT_RETRY_INTERVAL = 1
DEFAULT_FAILURE_THRESHOLD = 5
DEFAULT_NRPE_BINARY = "/usr/lib/nagios/plugins/check_nrpe"
class PoorMansNagios(object):
def __init__(self, config_file=None, args={}):
self.config = self.parse_config(config_file or DEFAULT_CONFIG_FILE)
self.config.update(args)
self.quiet = 'quiet' in self.config and self.config['quiet']
self.debug = 'debug' in self.config and self.config['debug']
self.logger = logging.getLogger(self.__class__.__name__)
self.log_level = self.logger.level
if self.quiet:
self.enable_quiet()
if self.debug:
self.enable_debug()
self.logger.debug('Config:')
pp.pprint(self.config)
self.build_configuration()
self.mailer = PoorMansMailer(self.email_from, self.logger)
self.reset_on_check_ok()
def default_loglevel(self):
self.logger.setLevel(self.log_level)
def enable_debug(self):
self.logger.setLevel(logging.DEBUG)
def enable_quiet(self):
self.logger.setLevel(logging.ERROR)
def parse_config(self, config_file):
with open(config_file, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as err:
raise RuntimeError("Could not load config file %s: %s" % (config_file, err))
return config
def build_configuration(self):
pmn_config = self.config['poor-mans-nagios']
nrpe_config = self.config['nrpe']
try:
self.nrpe_binary = pmn_config['nrpe-binary']
except KeyError:
self.nrpe_binary = DEFAULT_NRPE_BINARY
try:
self.check_interval = pmn_config['check-interval']
except KeyError:
self.check_interval = DEFAULT_CHECK_INTERVAL
try:
self.retry_interval = pmn_config['retry-interval']
except KeyError:
self.retry_interval = DEFAULT_RETRY_INTERVAL
try:
self.failure_threshold = pmn_config['failure-threshold']
except KeyError:
self.failure_threshold = DEFAULT_FAILURE_THRESHOLD
try:
self.alert_on_recovery = pmn_config['alert-on-recovery']
except KeyError:
self.alert_on_recovery = True
try:
self.alert_emails = pmn_config['alert-emails']
except KeyError:
self.alert_emails = []
self.email_from = pmn_config['email-from']
self.checked_host = nrpe_config['host']
self.check_command = nrpe_config['command']
def set_sleep_seconds(self, minutes):
self.sleep_seconds = minutes * 60
def reset_on_check_ok(self):
self.logger.info("Resetting tracking on recovery")
self.fail_count = 0
self.alert_sent = False
self.set_sleep_seconds(self.check_interval)
def check_failure_threshold(self):
return self.fail_count >= self.failure_threshold
def handle_failure(self):
self.fail_count += 1
self.logger.debug("Current fail count: %d, failure threshold: %d" % (self.fail_count, self.failure_threshold))
if self.check_failure_threshold():
self.logger.warning("Check %s for host %s over failure threshold" % (self.check_command, self.checked_host))
if self.alert_sent:
self.logger.debug("Alerts already sent for this failure, skipping")
else:
result = self.send_problem_alert()
if result:
self.alert_sent = True
def handle_recovery(self):
if self.alert_sent:
self.logger.info("Service recovered")
self.reset_on_check_ok()
self.send_recovery_alert()
def send_problem_alert(self):
self.logger.warning("Sending problem alert to: %s" % ", ".join(self.alert_emails))
return self.mailer.alert_problem(self.alert_emails, self.checked_host, self.check_command)
def send_recovery_alert(self):
if self.alert_on_recovery:
self.logger.info("Sending recovery alert to: %s" % ", ".join(self.alert_emails))
return self.mailer.alert_recovery(self.alert_emails, self.checked_host, self.check_command)
def run_shell_command(self, command, capture_output=True):
kwargs = {}
if capture_output:
kwargs.update({
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
})
try:
proc = subprocess.Popen(command, **kwargs)
stdout, stderr = proc.communicate()
returncode = proc.returncode
except Exception as e:
stdout = ''
stderr = e.message if hasattr(e, 'message') else str(e)
returncode = 1
return returncode, stdout, stderr
def build_command_args(self):
args = [
self.nrpe_binary,
]
for arg, val in self.config['nrpe'].items():
args.append("--%s" % arg)
if val is not True:
args.append(val)
return args
def execute_check(self):
command = self.build_command_args()
self.logger.debug("Running check command: %s" % " ".join(command))
returncode, stdout, stderr = self.run_shell_command(command)
if returncode == 0:
self.logger.debug("Check %s on host %s succeeded" % (self.check_command, self.checked_host))
self.handle_recovery()
return True
self.logger.warning("Check %s on host %s failed, stdout: %s, stderr: %s" % (self.check_command, self.checked_host, stdout, stderr))
self.handle_failure()
return False
def configure_next_action(self, success):
if success:
self.set_sleep_seconds(self.check_interval)
else:
self.set_sleep_seconds(self.retry_interval)
self.logger.debug("Set interval to %d seconds" % self.sleep_seconds)
def monitor(self):
self.logger.info("Starting poor-mans-nagios with check_interval: %d, retry_interval: %d, failure_threshold: %d" % (self.check_interval, self.retry_interval, self.failure_threshold))
try:
while True:
success = self.execute_check()
self.configure_next_action(success)
self.logger.debug("Sleeping %d seconds" % self.sleep_seconds)
time.sleep(self.sleep_seconds)
except KeyboardInterrupt:
self.logger.warning('Process interrupted')
|
stirlab/poor-mans-nagios | poor-mans-nagios-cli.py | <gh_stars>0
#!/usr/bin/env python3
import argparse
from poor_mans_nagios import PoorMansNagios, DEFAULT_CONFIG_FILE
def main():
parser = argparse.ArgumentParser(description="Run poor-mans-nagios from CLI")
parser.add_argument("--debug", action='store_true', help="Enable debugging")
parser.add_argument("--quiet", action='store_true', help="Silence output except for errors")
parser.add_argument("--config-file", type=str, metavar="FILE", default=DEFAULT_CONFIG_FILE, help="Configuration filepath, default: %s" % DEFAULT_CONFIG_FILE)
args = vars(parser.parse_args())
config_file = args.pop('config_file')
pmn = PoorMansNagios(config_file, args)
pmn.monitor()
if __name__ == "__main__":
main()
|
TobyChen320/CSPT15_TreeTraversals_GP | src/demos/demo1.py | <gh_stars>0
"""
You are given a binary tree.
Write a function that can return the inorder traversal of node values.
Example:
Input:
3
\
1
/
5
Output: [3,5,1]
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# a recursive solution
def inorder_traversal_r(root):
# create an inner helper function
def helper(root, result):
# if root exists
if root:
# call helper on the left of the root, passing the result list along
helper(root.left, result)
# append roots value to the result list
result.append(root.val)
# call the helper on the right of the root, passing the result list along
helper(root.right, result)
result = []
helper(root, result)
return result
# iterative solution
def inorder_traversal_i(root):
# hold the result
result = []
# make a stack
stack = []
# iterate
while True:
# while the root node is not none
while root:
# append the root to the stack
stack.append(root)
# traverse to the left of the root
root = root.left
# if there is no stack
if not stack:
# return the result
return result
# pop the stack on to a node variable
node = stack.pop()
# append the nodes value to the result list
result.append(node.val)
# traverse to the tight of the node
root = node.right
# Allison's in order recursive solution
def inorder_traversal_a(root):
if not root:
return []
left = inorder_traversal_a(root.left)
right = inorder_traversal_a(root.right)
return left + [root.val] + right
t1 = TreeNode(3)
t1.right = TreeNode(1)
t1.right.left = TreeNode(5)
# print(inorder_traversal(t1)) |
TobyChen320/CSPT15_TreeTraversals_GP | src/demos/demo2.py | <filename>src/demos/demo2.py
"""
You are given the values from a preorder and an inorder tree traversal. Write a
function that can take those inputs and output a binary tree.
*Note: assume that there will not be any duplicates in the tree.*
Example:
Inputs:
preorder = [5,7,22,13,9]
inorder = [7,5,13,22,9]
Output:
5
/ \
7 22
/ \
13 9
"""
pre_order = [5, 7, 22, 13, 9]
in_order = [7, 5, 13, 22, 9]
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def build_tree(preorder, inorder):
def helper(in_left=0, in_right=len(inorder)):
# setup
# store a pre order index external
nonlocal preorder_index
# if there are no elements to construct
if in_left == in_right:
# return None
return None
# pick the pre order index element as a root
# set the roots value to the preorder ar pre order index
root_value = preorder[preorder_index]
# set a root as a new TreeNode with the root value
root = TreeNode(root_value)
# root needs to be split on the in order list
# needs to be split in to left and right subtrees (maybe a dictionary)
# and set an index variable
index = index_map[root_value]
# now we can do the recursion
# increment the pre order index
preorder_index += 1
# build the left subtree
# set the roots left to a call of helper passing in "in_left", "index"
root.left = helper(in_left, index)
# build the right subtree
# set the roots right to a call of the helper passing in "index + 1", "in_right"
root.right = helper(index + 1, in_right)
# return the root
return root
# driver code
# start from the first pre order element
# create a pre order index of zero
preorder_index = 0
# build a dictionary of value -> its index
index_map = {}
# enumerate the index and value of inorder
for index, value in enumerate(inorder):
index_map[value] = index
# return helper()
return helper()
tree1 = build_tree(pre_order, in_order)
print("Done!")
|
iciubotarasu/phantom-apps | Apps/phbox/box_connector.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# -----------------------------------------
# Phantom sample App Connector python file
# -----------------------------------------
# Python 3 Compatibility imports
from __future__ import print_function, unicode_literals
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from phantom.vault import Vault
# Usage of the consts file is recommended
# from box_consts import *
import os, ast, json
from boxsdk import OAuth2
from boxsdk import Client
import sqlite3
from sqlite3 import Error
from os import path
from datetime import datetime
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class BoxConnector(BaseConnector):
def __init__(self):
super(BoxConnector, self).__init__()
self._state = None
self._base_url = None
def _create_sql_connection(self):
global conn
asset_id = self.get_asset_id()
app_dir = os.path.split(__file__)[0]
app_id = (app_dir.split('/')[-1].split('_')[-1])
app_state_location = '/opt/phantom/local_data/app_states/{0}/{1}_box_tokens.db'.format(app_id,asset_id)
conn = None
db_exist = path.exists(app_state_location)
if db_exist:
try:
conn = sqlite3.connect(app_state_location)
except Error as e:
print(e)
else:
create_table_sql = '''CREATE TABLE IF NOT EXISTS box (id integer PRIMARY KEY AUTOINCREMENT,
token text NOT NULL,refresh_token text NOT NULL,date text);'''
try:
conn = sqlite3.connect(app_state_location)
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
return conn
def _store_tokens(self, access_token, refresh_token):
self.debug_print('BOX tokens store:{},{} '.format(access_token, refresh_token))
self._create_sql_connection()
self.debug_print('BOX 1')
date = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p %Z")
self.debug_print('BOX 2')
data = [access_token, refresh_token, date]
self.debug_print('BOX 3')
sql = '''INSERT INTO box(token,refresh_token,date) VALUES(?,?,?);'''
self.debug_print('BOX 4')
cur = conn.cursor()
self.debug_print('BOX 5')
cur.execute(sql, data)
self.debug_print('BOX 6')
conn.commit()
self.debug_print('BOX 7')
conn.close()
self.debug_print('BOX 8')
return
def _read_tokens(self):
self._create_sql_connection()
cur = conn.cursor()
cur.execute("SELECT * FROM box ORDER BY id DESC LIMIT 1;")
rows = cur.fetchall()
auth_token = rows[0][1]
refresh_token= rows[0][2]
conn.close()
self.debug_print('BOX tokens read:{},{} '.format(auth_token, refresh_token))
return auth_token, refresh_token
def _authenticate(self):
global client
config = self.get_config()
client_id = config['client_id']
client_secret = config['client_secret']
access_token, refresh_token = self._read_tokens()
oauth = OAuth2(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
store_tokens=self._store_tokens,
)
client = Client(oauth)
return client
def _handle_test_connectivity(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Connecting to Box")
config = self.get_config()
access_token = config['initial_token']
refresh_token = config['initial_refresh_token']
self._authenticate()
try:
current_user = client.user(user_id='me').get()
except:
self._store_tokens(access_token, refresh_token)
self.save_progress("Initial Tokens were stored.")
self._authenticate()
try:
current_user = client.user(user_id='me').get()
if current_user.name:
self.save_progress("Connected to BOX")
#self.save_progress("Test Conectivity will fail after the first hour of the newly generated token, but the app stores the tokens and will continue to run. If the app is not used for more than 60 days, you'll have to redo the Test Conectivity with new tokens in the app config")
self.save_progress("Test Connectivity Passed.")
return action_result.set_status(phantom.APP_SUCCESS, 'Connection established and tokens stored.')
except Exception as e:
self.debug_print('BOX error: ', str(e))
self.save_progress("Test Connectivity Failed. {}".format(str(e)))
self.save_progress("If you haven't used the app in the last 60 days please use the Test Conectivity with newly generated tokens.")
self.save_progress("Please check if tokens were changed and update them into the app config.")
return action_result.set_status(phantom.APP_ERROR, 'Connection established and tokens stored.')
def _handle_refresh_token(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
self._authenticate()
try:
current_user = client.user(user_id='me').get()
self.save_progress('Box User: {}'.format(current_user.name))
self.save_progress('Tokens refreshed successfully')
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully refreshed tokens')
except Exception as e:
self.debug_print('BOX error: ', str(e))
self.save_progress('Error refreshing token:{}'.format(str(e)))
return action_result.set_status(phantom.APP_ERROR, "'Error refreshing tokens")
def _handle_create_folder(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
folder_name = param['folder_name']
root_folder = param['root_folder_id']
self._authenticate()
try:
items = client.folder(folder_id=root_folder).get_items()
existing_folders = [item.name for item in items]
if folder_name not in existing_folders:
subfolder = client.folder(root_folder).create_subfolder(folder_name)
self.save_progress('Created subfolder with ID {0}'.format(subfolder.id))
folder = {'created_folder_name':None, 'created_folder_id': None, 'root_folder':None}
folder['created_folder_name'] = folder_name
folder['created_folder_id'] = subfolder.id
folder['root_folder'] = root_folder
action_result.add_data(folder)
else:
self.save_progress('Folder {0} already exists in this location'.format(folder_name))
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully created folder')
except Exception as e:
self.debug_print('BOX error: ', str(e))
self.save_progress("Client error: {0}".format(str(e)))
return action_result.set_status(phantom.APP_ERROR, "'Error creating folder")
def _handle_upload_file(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
vault_ids = ast.literal_eval(param['vault_id'])
folder_id = param['box_folder_id']
self._authenticate()
try:
items = client.folder(folder_id=folder_id).get_items()
existing_files = [item.name for item in items]
count = 0
for item in vault_ids:
count += 1
self.save_progress('File "{0}" out of {1} is uploading'.format(count, len(vault_ids)))
file_info = Vault.get_file_info(item)
file_size = file_info[0]['size']
file_path = file_info[0]['path']
file_name = file_info[0]['name']
if file_name not in existing_files:
if file_size > 50000000:
chunked_uploader = client.folder(folder_id).get_chunked_uploader(file_path)
uploaded_file = chunked_uploader.start()
updated_file = client.file(uploaded_file.id).update_info({'name': file_name})
self.save_progress('File "{0}" uploaded to Box with file ID {1}'.format(updated_file.name, uploaded_file.id))
else:
new_file = client.folder(folder_id).upload(file_path)
updated_file = client.file(new_file.id).update_info({'name': file_name})
self.save_progress('File "{0}" uploaded to Box with file ID {1}'.format(updated_file.name, new_file.id))
else:
self.save_progress('File {0} already exists in this location'.format(file_name))
return action_result.set_status(phantom.APP_SUCCESS, 'Successfully uploaded files')
except Exception as e:
self.save_progress("Client error: {0}".format(str(e)))
return action_result.set_status(phantom.APP_ERROR, "'Error uploading files")
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'refresh_token':
ret_val = self._handle_refresh_token(param)
elif action_id == 'upload_file':
ret_val = self._handle_upload_file(param)
elif action_id == 'create_folder':
ret_val = self._handle_create_folder(param)
return ret_val
def initialize(self):
self._state = self.load_state()
config = self.get_config()
self._base_url = config.get('base_url')
return phantom.APP_SUCCESS
def finalize(self):
self.save_state(self._state)
return phantom.APP_SUCCESS
def main():
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = <PASSWORD>
if username is not None and password is None:
import getpass
password = get<PASSWORD>("Password: ")
if username and password:
try:
login_url = BoxConnector._get_phantom_base_url() + '/login'
print("Accessing the Login page")
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = BoxConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
if __name__ == '__main__':
main()
|
atalax/python-adf4351 | adf4351/__init__.py | # The MIT License (MIT)
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import spidev
try:
from math import gcd
except ImportError:
from fractions import gcd
def bit(x):
return 1 << x
def bits(n):
return (1 << n) - 1
def insert_val(n, v, o, m):
return (n & ~m) | ((v << o) & m)
class Register:
def __init__(self, bits):
self.bits = bits
self.val = 0x00000000
def __get__(self, obj, obtype = None):
if obj is None:
return self
return (self.val | self.bits) if self.val is not None else None
def __set__(self, obj, val):
self.val = val & ~0b111
obj.write(val | self.bits)
class RegBit:
def __init__(self, reg, bit):
self.reg = reg
self.bit = bit
def __get__(self, obj, obtype = None):
if obj is None:
return self
return bool(self.reg.__get__(obj) & self.bit)
def __set__(self, obj, val):
rval = self.reg.__get__(obj)
if val:
rval |= self.bit
else:
rval &= ~self.bit
self.reg.__set__(obj, rval)
class RegVal:
def __init__(self, reg, off, width):
self.reg = reg
self.off = off
self.mask = ((1 << width) - 1) << self.off
def __get__(self, obj, obtype = None):
if obj is None:
return self
return (self.reg.__get__(obj) & self.mask) >> self.off
def __set__(self, obj, val):
rval = self.reg.__get__(obj)
rval &= ~self.mask
rval |= (val << self.off) & self.mask
self.reg.__set__(obj, rval)
class ADF4351:
R0_FRAC_OFF = 3
R0_FRAC_MASK = bits(12) << R0_FRAC_OFF
R0_INT_OFF = 15
R0_INT_MASK = bits(16) << R0_INT_OFF
R1_MOD_OFF = 3
R1_MOD_MASK = bits(12) << R1_MOD_OFF
R1_PHASE_OFF = 15
R1_PHASE_MASK = bits(12) << R1_PHASE_OFF
R1_PRESCALER = bit(27)
R1_PHASE_ADJUST = bit(28)
R2_COUNTER_RESET_ENABLE = bit(3)
R2_CP_THREE_STATE_ENABLE = bit(4)
R2_POWER_DOWN = bit(5)
R2_PD_POLARITY_POSITIVE = bit(6)
R2_LDP_6NS = bit(7)
R2_LDF_INTN = bit(8)
R2_CHARGE_PUMP_CURRENT_OFF = 9
R2_DOUBLE_BUFFER_ENABLE = bit(13)
R2_R_OFF = 14
R2_R_MASK = bits(10) << R2_R_OFF
R2_RDIV2_ENABLE = bit(24)
R2_REFERENCE_DOUBLER_ENABLE = bit(25)
R2_MUXOUT_OFF = 26
R2_MUXOUT_THREE_STATE = 0 << R2_MUXOUT_OFF
R2_MUXOUT_DVDD = 1 << R2_MUXOUT_OFF
R2_MUXOUT_DGND = 2 << R2_MUXOUT_OFF
R2_MUXOUT_R = 3 << R2_MUXOUT_OFF
R2_MUXOUT_N = 4 << R2_MUXOUT_OFF
R2_MUXOUT_ALD = 5 << R2_MUXOUT_OFF
R2_MUXOUT_DLD = 6 << R2_MUXOUT_OFF
R2_LOW_NOISE_SPUR_OFF = 29
R2_LOW_NOISE_SPUR_MASK = bits(2) << R2_LOW_NOISE_SPUR_OFF
R3_CLK_DIV_OFF = 3
R3_CLK_DIV_MASK = bits(12) << R3_CLK_DIV_OFF
R3_CLK_DIV_MODE_OFF = 15
R3_CLK_DIV_MODE_DISABLED = 0b00 << R3_CLK_DIV_MODE_OFF
R3_CLK_DIV_MODE_FAST_LOCK = 0b01 << R3_CLK_DIV_MODE_OFF
R3_CLK_DIV_MODE_RESYNC_ENABLE = 0b10 << R3_CLK_DIV_MODE_OFF
R3_CSR = bit(18)
R3_CHARGE_CANCEL = bit(21)
R3_ABP = bit(22)
R3_BAND_SELECT_MODE = bit(23)
R4_OUTPUT_POWER_OFF = 3
R4_OUTPUT_POWER__4DBM = 0b00 << R4_OUTPUT_POWER_OFF
R4_OUTPUT_POWER__1DBM = 0b01 << R4_OUTPUT_POWER_OFF
R4_OUTPUT_POWER_2DBM = 0b10 << R4_OUTPUT_POWER_OFF
R4_OUTPUT_POWER_5DBM = 0b11 << R4_OUTPUT_POWER_OFF
R4_RF_OUTPUT_ENABLE = bit(5)
R4_AUX_OUTPUT_POWER_OFF = 6
R4_AUX_OUTPUT_POWER__4DBM = 0b00 << R4_AUX_OUTPUT_POWER_OFF
R4_AUX_OUTPUT_POWER__1DBM = 0b01 << R4_AUX_OUTPUT_POWER_OFF
R4_AUX_OUTPUT_POWER_2DBM = 0b10 << R4_AUX_OUTPUT_POWER_OFF
R4_AUX_OUTPUT_POWER_5DBM = 0b11 << R4_AUX_OUTPUT_POWER_OFF
R4_AUX_OUTPUT_ENABLE = bit(8)
R4_AUX_OUTPUT_SELECT = bit(9)
R4_MTLD = bit(10)
R4_VCO_POWER_DOWN = bit(11)
R4_BAND_SELECT_CLOCK_DIV_OFF = 12
R4_BAND_SELECT_CLOCK_DIV_MASK = bits(8) << R4_BAND_SELECT_CLOCK_DIV_OFF
R4_DIVIDER_SELECT_OFF = 20
R4_DIVIDER_SELECT_MASK = 0b111 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_1 = 0b000 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_2 = 0b001 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_4 = 0b010 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_8 = 0b011 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_16 = 0b100 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_32 = 0b101 << R4_DIVIDER_SELECT_OFF
R4_DIVIDER_SELECT_64 = 0b110 << R4_DIVIDER_SELECT_OFF
R4_FEEDBACK_SELECT = bit(23)
R5_LD_PIN_MODE_OFF = 22
R5_LD_PIN_MODE_LOW = 0b00 << R5_LD_PIN_MODE_OFF
R5_LD_PIN_MODE_LOCK_DETECT = 0b01 << R5_LD_PIN_MODE_OFF
R5_LD_PIN_MODE_HIGH = 0b11 << R5_LD_PIN_MODE_OFF
r0 = Register(0b000)
r1 = Register(0b001)
r2 = Register(0b010)
r3 = Register(0b011)
r4 = Register(0b100)
r5 = Register(0b101)
int = RegVal(r0, R0_INT_OFF, 16)
frac = RegVal(r0, R0_FRAC_OFF, 12)
mod = RegVal(r1, R1_MOD_OFF, 12)
phase = RegVal(r1, R1_PHASE_OFF, 12)
prescaler_89 = RegBit(r1, R1_PRESCALER)
phase_adj = RegBit(r1, R1_PHASE_ADJUST)
couter_reset = RegBit(r2, R2_COUNTER_RESET_ENABLE)
cp_three_state = RegBit(r2, R2_CP_THREE_STATE_ENABLE)
power_down = RegBit(r2, R2_POWER_DOWN)
pd_polarity_positive = RegBit(r2, R2_PD_POLARITY_POSITIVE)
ldp_6ns = RegBit(r2, R2_LDP_6NS)
ldf_intn = RegBit(r2, R2_LDF_INTN)
charge_pump_current = RegVal(r2, R2_CHARGE_PUMP_CURRENT_OFF, 4)
double_buffer = RegBit(r2, R2_DOUBLE_BUFFER_ENABLE)
r_counter = RegVal(r2, R2_R_OFF, 10)
ref_div2 = RegBit(r2, R2_RDIV2_ENABLE)
ref_doubler = RegBit(r2, R2_REFERENCE_DOUBLER_ENABLE)
muxout = RegVal(r2, R2_MUXOUT_OFF, 3)
@property
def low_spur(self):
return bool((self.r2 >> ADF4351.R2_LOW_NOISE_SPUR_OFF) & 0b11)
@low_spur.setter
def low_spur(self, val):
if val:
self.r2 |= ADF4351.R2_LOW_NOISE_SPUR_MASK
else:
self.r2 &= ~ADF4351.R2_LOW_NOISE_SPUR_MASK
clock_divider_val = RegVal(r3, R3_CLK_DIV_OFF, 12)
clock_divider_mode = RegVal(r3, R3_CLK_DIV_MODE_OFF, 2)
cycle_slip_reduction = RegBit(r3, R3_CSR)
charge_cancelation = RegBit(r3, R3_CHARGE_CANCEL)
antibacklash_pulse_3ns = RegBit(r3, R3_ABP)
band_select_high = RegBit(r3, R3_BAND_SELECT_MODE)
output_power = RegVal(r4, R4_OUTPUT_POWER_OFF, 2)
output_enable = RegBit(r4, R4_RF_OUTPUT_ENABLE)
aux_output_power = RegVal(r4, R4_AUX_OUTPUT_POWER_OFF, 2)
aux_output_enable = RegBit(r4, R4_AUX_OUTPUT_ENABLE)
aux_output_fundamental = RegBit(r4, R4_AUX_OUTPUT_SELECT)
mute_till_lock_detect = RegBit(r4, R4_MTLD)
vco_power_down = RegBit(r4, R4_VCO_POWER_DOWN)
band_select_clock_div = RegVal(r4, R4_BAND_SELECT_CLOCK_DIV_OFF, 8)
rf_divider = RegVal(r4, R4_DIVIDER_SELECT_OFF, 3)
feedback_fundamental = RegBit(r4, R4_FEEDBACK_SELECT)
ld_pin_mode = RegVal(r5, R5_LD_PIN_MODE_OFF, 2)
OUTPUT_DIVIDER_1 = 0b000
OUTPUT_DIVIDER_2 = 0b001
OUTPUT_DIVIDER_4 = 0b010
OUTPUT_DIVIDER_8 = 0b011
OUTPUT_DIVIDER_16 = 0b100
OUTPUT_DIVIDER_32 = 0b101
OUTPUT_DIVIDER_64 = 0b110
def __init__(self, bus, dev, refclk):
# TODO SPI
self.spi = spidev.SpiDev(bus, dev)
# Technically, the maximum speed is 20MHz, but my logic analyzer does
# not go that high
self.spi.max_speed_hz = 5000000
# CPOL = 0, CPHA = 0
self.spi.mode = 0b00
self.refclk = refclk
# Initialize to some sort of known state
self.init()
def init(self):
self.r5 = ADF4351.R5_LD_PIN_MODE_LOCK_DETECT
self.r4 = ADF4351.R4_MTLD
self.feedback_fundamental = True
self.output_enable = True
self.output_power = 3
self.r3 = 0x00000000
self.clock_divider_val = 150
self.r2 = 0x00000000
self.r_counter = 1
self.charge_pump_current = 0b0111
self.pd_polarity_positive = True
self.r1 = 0x00000000
self.r0 = 0x00000000
def write(self, val):
self.spi.xfer([(val >> x) & 0xff for x in range(24, -1, -8)])
def set_frequency(self, fout, spacing = 100e3):
# Based on https://ez.analog.com/thread/13743
# TODO: Reference doubler/divider
fpfd = self.refclk
outdivval = 0
outdiv = 1
while fout * outdiv < 2200e6:
outdiv *= 2
outdivval += 1
if self.feedback_fundamental:
N = fout * outdiv / fpfd
else:
N = fout / fpfd
INT = int(N)
MOD = int(fpfd / spacing)
FRAC = int((N - INT) * MOD)
div = gcd(MOD, FRAC)
MOD //= div
FRAC //= div
if MOD == 1:
MOD = 2
# TODO: PDF max error check
# Band select clock speed
fpfdm = fpfd / 1e6
if self.band_select_high:
bandseldiv = int(2 * fpfdm)
if 2 * fpfdm - bandseldiv > 0:
bandseldiv += 1
else:
bandseldiv = int(8 * fpfdm)
if 8 * fpfd - bandseldiv > 0:
bandseldiv += 1
bandseldiv = min(bandseldiv, 255)
# Write the register values
r4 = insert_val(self.r4, bandseldiv,
ADF4351.R4_BAND_SELECT_CLOCK_DIV_OFF,
ADF4351.R4_BAND_SELECT_CLOCK_DIV_MASK)
r4 = insert_val(r4, outdivval,
ADF4351.R4_DIVIDER_SELECT_OFF,
ADF4351.R4_DIVIDER_SELECT_MASK)
r1 = insert_val(self.r1, MOD,
ADF4351.R1_MOD_OFF,
ADF4351.R1_MOD_MASK)
r1 = insert_val(r1, 0b0001,
ADF4351.R1_PHASE_OFF,
ADF4351.R1_PHASE_MASK)
r0 = insert_val(self.r0, FRAC,
ADF4351.R0_FRAC_OFF,
ADF4351.R0_FRAC_MASK)
r0 = insert_val(r0, INT,
ADF4351.R0_INT_OFF,
ADF4351.R0_INT_MASK)
self.r4 = r4
self.r1 = r1
self.r0 = r0
def close(self):
self.spi.close()
|
atalax/python-adf4351 | setup.py | <reponame>atalax/python-adf4351<gh_stars>1-10
#! /usr/bin/env python3
try:
from setuptools import setup
except:
from distutils.core import setup
setup(
name = "adf4351",
version = "0.1",
packages = ["adf4351"],
description = "Python ADF4351 library",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/atalax/python-adf4351",
license = "MIT",
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Utilities",
]
)
|
ardyflora/crossfitTimeTable | crossfitTimeTable.py | from bs4 import BeautifulSoup
import urllib2,re, requests, os
from prettytable import PrettyTable
x = PrettyTable()
html = urllib2.urlopen("https://app.wodify.com/Schedule/PublicCalendarListView.aspx?tenant=3920").read()
soup = BeautifulSoup(html,"lxml")
table = soup.find('table', attrs={'class': 'TableRecords'})
table_body = table.find('tbody')
FinalData = []
FinalData2 = []
for row in table_body.find_all("tr"):
for stat in row.find_all("td", attrs={'class':['TableRecords_EvenLine', 'TableRecords_OddLine']}):
dictContent ={}
dict2Content = {}
z={}
for spn in stat.find_all("span"):
if spn.has_attr("class"):
if 'h3' in spn["class"]:
x.field_names = [str(spn.contents[0]), str(spn.contents[2])]
print x.get_string()
dictContent["Day"] = str(spn.contents[0])
dictContent["Date"] = str(spn.contents[2])
elif spn.has_attr("title"):
if 'Olympic Weightlifting' in spn["title"]:
if re.match(r'^[0-9]', spn["title"][0]):
x.add_row([str(spn["title"]),""])
dict2Content["Title"] = str(spn["title"])
elif 'CrossFit' in spn["title"]:
if re.match(r'^[0-9]', spn["title"][0]):
x.add_row([str(spn["title"]),""])
dict2Content["Title"] = str(spn["title"])
elif 'Open Gym' in spn["title"]:
if re.match(r'^[0-9]', spn["title"][0]):
x.add_row([str(spn["title"]),""])
dict2Content["Title"] = str(spn["title"])
elif 'Athletic Conditioning' in spn["title"]:
x.add_row([str(spn["title"]),""])
if re.match(r'^[0-9]', spn["title"][0]):
dict2Content["Title"] = str(spn["title"])
if len(dictContent) >0:
FinalData.append(dictContent)
if len(dict2Content)>0:
FinalData2.append(dict2Content)
print("Final data val:", FinalData)
print("Final data 2val:", FinalData2)
if len(FinalData2) >0:
z = dict(FinalData + FinalData2)
print("the val of z: ",z)
print("THe final data is:", FinalData)
print("THe final data 2 is:", FinalData2)
'''
elif spn.has_attr("class"):
if 'h3' in spn["class"]:
print(spn.contents)
elif spn.has_attr("style"):
print(spn.text)
'''
|
loganFortune/mvm | src/gen.py | <reponame>loganFortune/mvm
"""
Copyright <NAME>
MIT License
"""
import random
import os
class Instruction :
""" Instruction sent to the core """
def __init__(self, name):
self.name = name
self.register_dest = "t0"
self.register_addr = "t0"
# offset
self.immediate = "0"
self.asm_code = "nop"
def showInstruction(self):
print(f"- {self.name} => (asm_code) {self.asm_code}")
class MemoryInstructionL1 :
""" Instructions created by the cache L1 """
def __init__(self, name):
self.name = name
self.seq_instructions = []
def addInstruction(self, instruction):
self.seq_instructions.append(instruction)
def showMemInstr(self):
print(f"- {self.name} :")
for i in range (0, len(self.seq_instructions)):
print(f" cycle {i} --> {self.seq_instructions[i].name} @A/B (immediate:{self.seq_instructions[i].immediate})")
class GeneratorMemoryInst :
""" Generator of memory instructions """
def __init__(self, test_name):
self.test_name = test_name
self.memory_instr = []
def addMemInstruction(self, instruction):
self.memory_instr.append(instruction)
class MultiCoreModel :
""" Multi-Core Model used for generation"""
def __init__(self, test_name):
self.test_name = test_name
self.miss_read_penalty = 20
self.precision = 20
def initializeGeneratorF(test_name):
print("Multi-Core Verification Methodology \n")
# Core Instruction
print("Core Instructions (triggering memory instructions)")
# Init instructions
nop_instruction = Instruction("NOP")
# asm already implemented
lw_instruction = Instruction("LOAD")
lw_instruction.asm_code = "lw"
sw_instruction = Instruction("STORE")
sw_instruction.asm_code = "sw"
# Show instructions (Debug)
nop_instruction.showInstruction()
lw_instruction.showInstruction()
sw_instruction.showInstruction()
# Memory Instructions (sorted by length)
print("\nMemory Instructions (triggered by the core) ")
# Get Memory Mapping
# TODO
print("@A = @functData")
print("@B = @functData +X*(Line_Size)")
cache_Line_Size = 16*8
##
miss_read = MemoryInstructionL1("MISS_READ")
miss_read.addInstruction(lw_instruction)
miss_write = MemoryInstructionL1("MISS_WRITE")
miss_write.addInstruction(sw_instruction)
write_hit = MemoryInstructionL1("WRITE_HIT")
write_hit.addInstruction(lw_instruction)
write_hit.addInstruction(sw_instruction)
hit_read = MemoryInstructionL1("HIT_READ")
hit_read.addInstruction(lw_instruction)
hit_read.addInstruction(lw_instruction)
write_back = MemoryInstructionL1("WRITE_BACK")
write_back.addInstruction(sw_instruction)
lw1_instruction = Instruction("LOAD")
lw1_instruction.asm_code = "lw"
lw1_instruction.immediate = str(cache_Line_Size)
write_back.addInstruction(lw1_instruction)
lw2_instruction = Instruction("LOAD")
lw2_instruction.asm_code = "lw"
lw2_instruction.immediate = str(cache_Line_Size*2)
write_back.addInstruction(lw2_instruction)
lw3_instruction = Instruction("LOAD")
lw3_instruction.asm_code = "lw"
lw3_instruction.immediate = str(cache_Line_Size*3)
write_back.addInstruction(lw3_instruction)
write_back.addInstruction(lw_instruction)
load = MemoryInstructionL1("LOAD_MEMORY_MAPPED (bypass caches)")
load.addInstruction(lw_instruction)
write = MemoryInstructionL1("WRITE_MEMORY_MAPPED (bypass caches)")
write.addInstruction(sw_instruction)
# Show Memory Instructions
miss_read.showMemInstr()
miss_write.showMemInstr()
write_hit.showMemInstr()
hit_read.showMemInstr()
write_back.showMemInstr()
load.showMemInstr()
write.showMemInstr()
# (sorted by length)
gen_multicore = GeneratorMemoryInst(test_name)
gen_multicore.addMemInstruction(write_back)
gen_multicore.addMemInstruction(write_hit)
gen_multicore.addMemInstruction(miss_read)
gen_multicore.addMemInstruction(miss_write)
return gen_multicore
def show_instr(seqInstr):
for i in range (0, len(seqInstr)):
print(f"{seqInstr[i]} ", end="")
print("")
def createFileTest(skeleton_seqInstr_for_test_CoreOne, seqInstr_for_testTwo, id):
with open('template.cpp') as f:
datafile = f.readlines()
for line in range (0, len(datafile)):
if "#1FG#" in datafile[line]:
# do something
for i in range (0, len(skeleton_seqInstr_for_test_CoreOne)-1):
datafile.insert(line+1+i, "\t\t\""+skeleton_seqInstr_for_test_CoreOne[i]+" \\n\\t\"\n")
for line in range (0, len(datafile)):
if "#2FG" in datafile[line]:
#do something
for i in range (0, len(seqInstr_for_testTwo)-1):
datafile.insert(line+1+i, "\t\t\""+seqInstr_for_testTwo[i]+" \\n\\t\"\n")
outF = open("./hw-unit-tests/template_"+id+".cpp", "w")
outF.writelines(datafile)
outF.close()
def createInstrSeq(gen_multicore, multicore_model):
print("Creating Sequence of Instructions...")
# Create Directory to compile all tests
os.system("rm -rf hw-unit-tests")
os.system("mkdir hw-unit-tests")
# Generate Tests
for elem in range (0, len(gen_multicore.memory_instr)):
total_test_time = (multicore_model.precision+multicore_model.miss_read_penalty)*len(gen_multicore.memory_instr[elem].seq_instructions)
for challenger in range(elem, len(gen_multicore.memory_instr)):
print(f"{gen_multicore.memory_instr[elem].name} vs {gen_multicore.memory_instr[challenger].name}")
skeleton_seqInstr_for_test_CoreOne = ["NOP", "NOP"]
for instr in gen_multicore.memory_instr[elem].seq_instructions:
skeleton_seqInstr_for_test_CoreOne.append(instr.asm_code+" "+instr.register_dest+","+instr.immediate+"(%[input0])")
skeleton_seqInstr_for_test_CoreOne.append("INF-LOOP")
skeleton_seqInstr_for_testTwo = ["NOP"]
print("Core One:", end= "")
show_instr(skeleton_seqInstr_for_test_CoreOne)
for i in range (1, total_test_time):
seqInstr_for_testTwo = skeleton_seqInstr_for_testTwo.copy()
for j in range(1, total_test_time):
if(j==i):
for instr in gen_multicore.memory_instr[challenger].seq_instructions:
seqInstr_for_testTwo.append(instr.asm_code+" "+instr.register_dest+","+instr.immediate+"(%[input0])")
else:
seqInstr_for_testTwo.append("NOP")
seqInstr_for_testTwo.append("INF-LOOP")
idfile = str(elem) + "_" + str(challenger) + "_" + str(i) + "_"+ str(j)
createFileTest(skeleton_seqInstr_for_test_CoreOne, seqInstr_for_testTwo, idfile)
def initializeGeneratorC(test_name):
gen_multicore = GeneratorMemoryInst(test_name)
# todo: coherent memory test - coherent tests = litmus test
if __name__ == "__main__":
gen_multicore = initializeGeneratorF("functional test")
multicore_model = MultiCoreModel("model one: no data dependency")
createInstrSeq(gen_multicore, multicore_model) |
brokeyourbike/astrolords_dayleak | setup.py | <reponame>brokeyourbike/astrolords_dayleak
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup, Executable
base = 'Win32GUI' if sys.platform == 'win32' else None
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(
packages=[],
excludes=[],
includes=[],
include_files=[]
)
executables = [
Executable('core.pyw', base=base, icon='data/rocket.ico')
]
setup(
name="Astro Lords - DayLeak",
version="1.0",
description="Developed by <NAME>",
options=dict(build_exe=buildOptions),
executables=executables
)
input("Press Enter")
|
brokeyourbike/astrolords_dayleak | core.pyw | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
import math
import codecs
import locale
import astro
from contextlib import redirect_stdout
from PyQt4 import QtCore, QtGui, uic
"""
@author: <NAME>
@contact: brokeyourbike.com
Copyright (C) 2017
"""
__author__ = '<NAME>'
__version__ = '0.1'
__email__ = '<EMAIL>'
__contact__ = 'www.brokeyourbike.com'
class Main_Window(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
uic.loadUi("data/Main.ui", self)
self.setWindowIcon(QtGui.QIcon("data/rocket.png"))
love = QtGui.QPixmap("data/love_8px.png")
astro_logo = QtGui.QPixmap("data/logo.png")
self.love_label.setPixmap(love)
self.logo_label.setPixmap(astro_logo)
self.broky_label.setOpenExternalLinks(True)
self.connect(self.button_open, QtCore.SIGNAL("clicked()"),
self.connect_astro)
self.connect(self.button_go, QtCore.SIGNAL("clicked()"),
self.run_algo)
self.connect(self.button_x2, QtCore.SIGNAL("clicked()"),
self.set_x2)
self.connect(self.button_x5, QtCore.SIGNAL("clicked()"),
self.set_x5)
def connect_astro(self):
if astro.run():
self.button_open.setEnabled(False)
self.button_open.setText('Подключено')
self.button_go.setEnabled(True)
self.button_go.setText('Запуск алгоритма')
def set_x5(self):
self.button_x5.setEnabled(False)
self.button_x2.setEnabled(True)
astro.cords = astro.x5_cords
def set_x2(self):
self.button_x5.setEnabled(True)
self.button_x2.setEnabled(False)
astro.cords = astro.x2_cords
def run_algo(self):
if not astro.run():
self.button_open.setEnabled(True)
self.button_open.setText('Подключить игру')
self.button_go.setEnabled(False)
self.button_go.setText('Ожидаю ..')
else:
fl = astro.core(astro.run())
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
# app.setQuitOnLastWindowClosed(False) # Запрещаем автоматический выход при закрытии последнего окна
window = Main_Window()
window.show()
sys.exit(app.exec_())
|
brokeyourbike/astrolords_dayleak | astro.py | #!/usr/bin/env python3
"""
Some automation for Astrolords.
"""
import sys
import time
import win32api
import win32con
from PIL import ImageGrab
from pywinauto import application, mouse
cords = (1050, 585)
x2_cords = (1050, 585)
x5_cords = (1050, 640)
def run():
path_to_astro = r'C:\Program Files (x86)\Astro Lords\Astrolords.exe'
app = application.Application()
try:
app.connect(path=path_to_astro, title="Astro Lords")
sep = '-' * 30
print(sep)
print('Connected to Astrolords.')
print(sep)
return app
except application.ProcessNotFoundError:
print('Can\'t connect to Astrolords :(')
return False
def core(app):
app.AstroLords.set_focus()
app.AstroLords.draw_outline()
app.AstroLords.move_window(x=200, y=200)
mouse.move(coords=cords)
get_box = (906, 641, 910, 644)
# color_1 = [x for x in range(4, 5)]
# color_2 = [x for x in range(150, 180)]
# color_3 = [x for x in range(20, 40)]
for i in range(10):
tmp = []
image = ImageGrab.grab(get_box)
pre_color = image.getpixel((3, 1))
tmp.append(pre_color[0])
if len(tmp) > 2:
if pre_color[0] == tmp[-1]:
break
print('TARGET RGB =', pre_color[0], pre_color[1], pre_color[2])
while True:
image = ImageGrab.grab(get_box)
color = image.getpixel((3, 1))
if color[0] != pre_color[0]:
if color[1] != pre_color[1]:
if color[2] != pre_color[2]:
click(cords[0], cords[1])
print('NOT RGB =', color[0], color[1], color[2])
return True
break
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
if __name__ == "__main__":
core(run())
|
emrakul/kamni | main.py | from argparse import ArgumentError
from navec import Navec
from scipy.spatial.distance import cosine
import tqdm
import numpy as np
class Bot():
def __init__(self, secret=None):
path='hudlit_12B_500K_300d_100q.tar'
self.en_alphabet = [chr(ord('a')+i) for i in range(0,26)]
self.ru_alphabet = [chr(ord('а')+i) for i in range(0,32)] + ['ё']
self.model = Navec.load(path)
self.start(secret)
def detect_language(self, word) -> str:
if all([c in self.en_alphabet for c in word]):
return 'en'
else:
return 'ru'
def start(self, secret: str):
self.secret = secret
self.lang = self.detect_language(secret)
self.emb = self.model[self.secret]
self.vocab = list(filter(lambda x:
self.detect_language(x) == self.lang, self.model.vocab.words))
self.secret = secret
self.emb = self.model[self.secret]
# generation
self.top_words = sorted(self.vocab,
key=lambda x:
cosine(self.model[x], self.emb))[:1000]
def printout(self, distance, top=None) -> str:
rounded = np.round(distance, 3)
if top is not None:
progress = tqdm.tqdm.format_meter(top, 1000, 500)
return str(rounded) + " " +progress[progress.index('%')+1:progress.index('[')]
else:
return str(rounded)
def guess(self, guess: str) -> str:
if guess not in self.vocab:
return "такого слова НЕТ"
if guess == self.secret:
return "УГАДАНО"
vector = self.model[guess]
if guess in self.top_words:
distance = cosine(vector, self.emb)
rank = self.top_words.index(guess)
return self.printout(distance, rank)
else:
distance = cosine(vector, self.emb)
return self.printout(distance)
|
DDuygu/DiyetListesii | kalorisayaci/diyetlistesiii.py | import time
print("******kalori sayacina hosgeldiniz*****")
yas = int(input('yasinizi girin: '))
if yas <= 8:
print('en fazla 1000 kalori alabilirsiniz')
kalori1000 = [ 'sabah:'
'1 ince dilim kepek ekmek, 1 karper dilimi kadar beyaz peynir, domates, salatalık '
'ogle:'
'8 kaşık sebze yemeği, Yumurta büyüklüğünde 2 adet köfte ya da tavuk,salata ',
'aksam:'
'8 kaşık sebze yemeği, 1 ince dilim kepek ekmek, Salata, yogurt'
]
print(kalori1000)
elif 9 <= yas <= 13:
print('en fazla 1600 kalori alabilirsiniz')
kalori1600 = ['sabah:'
'Açık çay, 2 dilim ekmek, 6 zeytin, 3 kibrit kutusu peynir, istenilen kadar cig sebze'
'ogle:'
'6 kaşık sebze yemeği, 1 kase çorba, 2 dilim ekmek, istenildiği kadar yağsız salata'
'aksam:'
'1 kase çorba, 2 dilim ekmek ,istenildiği kadar yağsız salata'
]
print(kalori1600)
elif 14 <= yas <= 18:
print('en fazla 1800 kalori alabilirsiniz')
kalori1800 = [ 'sabah:'
'sekersiz cay, 2 k.k beyaz peynir, domates, salatalık, 2 dilim ekmek'
'ogle:'
'100 gr Haşlanmış tavuk, yoğurt (1 tabak 200 gr), yağsız salata'
'aksam:'
'Sebze yemeği (8 yemek kaşığı), kaymaksız 1 kase yoğurt, yağsız salata'
]
print(kalori1800)
elif 19 <= yas <= 30:
print('en fazla 2000 kalori alabilirsiniz')
kalori2000 = [ 'sabah:'
'Çay veya kahve (şekersiz), 1 su bardağı 200ml süt, 2 kibrit kutusu az yağlı beyaz peynir, salatalık , domates, 2 ince dilim kepekli ekmek'
'ogle:'
' 100 g tavuk (ızgara veya haşlanmış), 1 kase yoğurt (kaymaksız,200 g), yağsız salata, 1 porsiyon meyve'
'aksam:'
'8 yemek kaşığı sebze yemeği, 1 kase kaymaksız yoğurt, yağsız salata, 6 yemek kaşığı makarna'
]
print(kalori2000)
elif 31 <= yas <= 50:
print('en fazla 2200 kalori alabilirsiniz')
kalori2200 = ['sabah:'
'taze sıkılmış portakal suyu, 5 adet zeytin, 1 adet haşlanmış yumurta, 2 dilim kaşar peyniri, bir adet domates'
'ogle:'
' bir kâse lahana çorbası, bir kâse yoğurt ve mevsim salata '
'aksam:'
'13 adet köfte, bir kâse yoğurt, mevsim salata'
]
print(kalori2200)
elif yas > 50:
print('en fazla 1600 kalori alabilirsiniz')
kalori1400 = ['sabah:'
'Açık çay, 2 dilim ekmek, 6 zeytin, 3 kibrit kutusu peynir, istenilen kadar cig sebze'
'ogle:'
'6 kaşık sebze yemeği, 1 kase çorba, 2 dilim ekmek, istenildiği kadar yağsız salata'
'aksam:'
'1 kase çorba, 2 dilim ekmek ,istenildiği kadar yağsız salata'
]
print(kalori1400)
toplamkalori = 0
x = 'e'
while (x == 'e'):
print("protein icin 1 basin")
print("k.hidrat icin 2 basin")
print("sebze icin 3 basin")
print("programı kapatmak icin 4 basın")
secim = int(input("menuden bir secenek sec"))
#PROTEIN İCİN
if (secim == 1):
protein = {
#'0': 0 ,
'et': 100,
'yumurta': 50,
'sut': 150
}
for x in protein:
print(x)
secilenyemek = str(input("sectiğiniz yemeğin adini girin: "))
if (secilenyemek == 'et'):
print('et seçtiniz: 100 kalori')
toplamkalori = toplamkalori + 100
elif (secilenyemek == 'yumurta'):
print('yumurta seçtiniz: 50 kalori')
toplamkalori = toplamkalori + 50
elif (secilenyemek == 'sut'):
print('sut seçtiniz: 150 kalori')
toplamkalori = toplamkalori + 150
x = str(input("başka yemek seçmek için e toplam kaloriyi görmek için h basın "))
# KARBONHİDRAT
elif (secim == 2):
karbonhidrat = {
#'0': 0 ,
'makarna': 100,
'pilav': 50,
'patates kızartması': 150
}
for x in karbonhidrat:
print(x)
secilenyemek = str(input("sectiğiniz yemeğin adini girin: "))
if (secilenyemek == 'makarna'):
print('makarna seçtiniz 100 kalori')
toplamkalori = toplamkalori + 100
elif (secilenyemek == 'pilav'):
print('pilav seçtiniz 50 kalori')
toplamkalori = toplamkalori + 50
elif (secilenyemek == 'patates kizartmasi'):
print('patates kizartmasi seçtiniz 50 kalori')
toplamkalori = toplamkalori + 50
x = str(input("başka yemek seçmek için e toplam kaloriyi görmek için h basın "))
#SEBZE
elif (secim == 3):
sebze = {
# '0': 0 ,
'ıspanak': 100,
'fasulye': 500,
'patlıcan yemegi': 150
}
for x in sebze:
print(x)
secilenyemek = str(input("sectiğiniz yemeğin adini girin: "))
if (secilenyemek == 'ıspanak'):
print('ıspanak seçtiniz 100 kalori')
toplamkalori = toplamkalori + 100
elif (secilenyemek == 'fasulye'):
print('domates seçtiniz 500 kalori')
toplamkalori = toplamkalori + 50
elif (secilenyemek == 'patlican yemegi'):
print('patlican yemegi seçtiniz 150 kalori')
toplamkalori = toplamkalori + 150
x = str(input("başka yemek seçmek için e toplam kaloriyi görmek için h basın "))
elif (secim == 4):
print('cikis yapiliyor')
time.sleep(1)
exit()
else:
print('seçimleriniz toplam',toplamkalori,'kalori ')
|
yimuw/yimu-blog | random/visitor/simple/visitor_simple.py | <reponame>yimuw/yimu-blog
from abc import ABC, abstractmethod
import json
from collections import deque
class Traversable(ABC):
@abstractmethod
def traverse(self):
pass
class A(Traversable):
def __init__(self):
self.a = 1
self.b = "whatever"
self.c = [1, 2, 3]
def traverse(self, visitor):
visitor.visit('a', self.a)
visitor.visit('b', self.b)
visitor.visit('c', self.c)
class B(Traversable):
def __init__(self):
self.b1 = 100
self.instance_of_A = A()
def traverse(self, visitor):
visitor.visit('b1', self.b1)
visitor.visit('instance_of_A', self.instance_of_A)
class VisitorBase(ABC):
def __init__(self):
pass
@abstractmethod
def on_leaf(self, name, obj):
pass
@abstractmethod
def on_enter_level(self, name):
pass
@abstractmethod
def on_leave_level(self):
pass
@abstractmethod
def on_enter_list(self, name, obj):
pass
@abstractmethod
def on_leave_list(self):
pass
def visit(self, name, obj):
if isinstance(obj, list):
self.on_enter_list(name, obj)
for e in obj:
self.visit(name = None, obj = e)
self.on_leave_list()
elif isinstance(obj, Traversable):
self.on_enter_level(name)
obj.traverse(self)
self.on_leave_level()
else:
self.on_leaf(name, obj)
class JsonDump(VisitorBase):
def __init__(self):
self.result = ''
self.level = 0
self.indent = 2
def on_leaf(self, name, obj):
obj_str = str(obj) if not isinstance(obj, str) else '"{}"'.format(obj)
if name == None:
self.result += ' ' * self.level + obj_str + ',\n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + obj_str + ',\n'
def on_enter_level(self, name):
if name == None:
self.result += ' ' * self.level + '{\n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + '{\n'
self.level += self.indent
def on_leave_level(self):
self.level -= self.indent
self.result += ' ' * self.level + '}\n'
def on_enter_list(self, name, obj):
if name == None:
self.result += ' ' * self.level + '[\n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + '[\n'
self.level += self.indent
def on_leave_list(self):
# remove the last ','
self.result = self.result[:-2] + '\n'
self.level -= self.indent
self.result += ' ' * self.level + ']\n'
class JsonLoad(VisitorBase):
def __init__(self, dumped):
self.dumped = deque(dumped.split('\n'))
def on_leaf(self, name, obj):
line = self.dumped.popleft()
line = line.strip().rstrip(',')
print('line:', line, obj)
value_str = line
if ':' in line:
_, value_str = line.split(':')
if value_str[0] == '"':
obj = value_str.strip('"')
else:
obj = float(value_str)
print('obj:', obj)
def on_enter_level(self, name):
self.dumped.popleft()
def on_leave_level(self):
self.dumped.popleft()
def on_enter_list(self, name, obj):
cur = self.dumped.popleft()
def indent(line):
return len(line) - len(line.lstrip())
list_start_indent = indent(cur)
idx = 0
while indent(self.dumped[idx]) != list_start_indent:
idx += 1
list_size = idx
obj = [object] * list_size
def on_leave_list(self):
self.dumped.popleft()
if __name__ == "__main__":
b = B()
jsonDump = JsonDump()
jsonDump.visit('obj_b', b)
print(jsonDump.result)
|
yimuw/yimu-blog | random/lp_ip/lp_ip.py | # Import packages.
import cvxpy as cp
import numpy as np
import random
from collections import defaultdict
class AssociationGraph:
def __init__(self):
self.layer_sizes = None
# random experiment
RANDOM_SIZE = True
if RANDOM_SIZE:
size = random.randint(2, 10)
self.layer_sizes = [random.randint(2, 30) for i in range(size)]
else:
self.layer_sizes = np.array([3, 4, 3, 4])
self.len_layers = len(self.layer_sizes)
# association reward
self.weights = []
# map edges in the graph to a linear array
self.weights_linear_map = [0]
for i in range(self.len_layers - 1):
# noise for association reward
w = np.random.uniform(
0.0, 0.1, (self.layer_sizes[i], self.layer_sizes[i+1]))
# Associated nodes.
# Without loss of generality, put associated cost on diag.
min_len = min(self.layer_sizes[i], self.layer_sizes[i+1])
diag_w = np.random.uniform(0.5, 1., min_len)
# TODO: function? diag?
for j in range(min_len):
w[j][j] = diag_w[j]
self.weights.append(w)
self.weights_linear_map.append(w.size)
self.start_reward = np.random.uniform(0.2, 0.2)
self.end_reward = np.random.uniform(0.2, 0.2)
self.weights_linear_map = np.cumsum(self.weights_linear_map)
# l1<-layer l2
# ..u. ..v.
#
def edge_map(self, layer, u_idx, v_idx):
offset = self.weights_linear_map[layer]
adj_mat_size = self.weights[layer].shape
rows, cols = adj_mat_size
return offset + u_idx * cols + v_idx
# map the start edge for a node in a layer to a linear idx
def start_edge_map(self, layer, u_idx):
start_idx = self.weights_linear_map[-1] + sum(self.layer_sizes[:layer])
result = start_idx + u_idx
return result
# map the end edge for a node in a layer to a linear idx
def end_edge_map(self, layer, u_idx):
start_idx = self.weights_linear_map[-1] + \
sum(self.layer_sizes) + sum(self.layer_sizes[:layer])
result = start_idx + u_idx
return result
def constraint_lp(self):
num_vars = 0
# a variable for a association
for i in range(self.len_layers - 1):
num_vars += self.layer_sizes[i] * self.layer_sizes[i+1]
# a variable for the creation & the ending for each node
num_vars += 2 * sum(self.layer_sizes)
# num of constraint, 2 for each graph node
A = np.zeros((2 * sum(self.layer_sizes), num_vars))
print('num layers:', self.len_layers, ' num nodes:', sum(self.layer_sizes), 'num edges: ',
num_vars, ' num graph constraint:', A.shape[0])
constraint_idx = 0
c = np.zeros(num_vars)
for u_layer in range(len(self.layer_sizes) - 1):
w = self.weights[u_layer]
rows, cols = w.shape
# update c for each edge
for u in range(rows):
for v in range(cols):
idx = self.edge_map(u_layer, u, v)
c[idx] = w[u][v]
# constraints for out going edges
for u in range(rows):
for v in range(cols):
idx = self.edge_map(u_layer, u, v)
A[constraint_idx][idx] = 1.
end_edge_idx = self.end_edge_map(u_layer, u)
A[constraint_idx][end_edge_idx] = 1.
constraint_idx += 1
# constraints for in going edges
for v in range(cols):
for u in range(rows):
idx = self.edge_map(u_layer, u, v)
A[constraint_idx][idx] = 1.
start_edge_idx = self.start_edge_map(u_layer + 1, v)
A[constraint_idx][start_edge_idx] = 1.
constraint_idx += 1
# update c for start & end
for layer in range(self.len_layers):
layer_size = self.layer_sizes[layer]
for u in range(layer_size):
start_edge_idx = self.start_edge_map(layer, u)
end_edge_idx = self.end_edge_map(layer, u)
c[start_edge_idx] = self.start_reward
c[end_edge_idx] = self.end_reward
# start edges for the first layer
for u in range(self.layer_sizes[0]):
start_edge_idx = self.start_edge_map(0, u)
A[constraint_idx][start_edge_idx] = 1.
constraint_idx += 1
# end edges for the last layer
for u in range(self.layer_sizes[-1]):
end_edge_idx = self.end_edge_map(len(self.layer_sizes) - 1, u)
A[constraint_idx][end_edge_idx] = 1.
constraint_idx += 1
assert(constraint_idx == A.shape[0])
b = np.ones(constraint_idx)
if False:
print('A:\n', A)
# TODO: check if every row of A is not perpendicular to c?
return A, b, c, num_vars
def lp(self):
A, b, c, num_vars = self.constraint_lp()
# Define and solve the CVXPY problem.
x = cp.Variable(num_vars)
prob = cp.Problem(cp.Minimize(- c.T@x),
[A @ x <= b, x >= 0, x <= 1])
prob.solve(solver=cp.OSQP, verbose=False, eps_abs=1e-8, eps_rel=1e-8)
# Print result.
if False:
print("\nThe optimal value is", prob.value)
print("A solution x is")
print(x.value)
# TODO: what's the precision for cvxpy?
epsilon = 1e-7
integral_mask = (np.abs(x.value-0) <
epsilon) | (np.abs(x.value-1) < epsilon)
num_integral = sum(integral_mask)
print("Num integral:", num_integral)
relaxation_success = False
if num_integral == num_vars:
print("Integer Programming == Linear Programming !!")
relaxation_success = True
else:
print("Linear relaxation != Integer Programming")
relaxation_success = False
if True and relaxation_success is False:
for i in range(len(integral_mask)):
if not integral_mask[i]:
print('i:', i, ' v:', x.value[i])
return x.value, relaxation_success
def build_graph(self, lp_result_x):
# adjacent list
direct_graph = defaultdict(list)
# build the layer to layer graph
for u_layer in range(len(self.layer_sizes) - 1):
w = self.weights[u_layer]
rows, cols = w.shape
for v in range(cols):
v_name = 'layer:{} node:{}'.format(u_layer + 1, v)
for u in range(rows):
u_name = 'layer:{} node:{}'.format(u_layer, u)
x_idx = self.edge_map(u_layer, u, v)
direct_graph[v_name].append((u_name, lp_result_x[x_idx]))
# connect to start
start_name = 'start'
for layer in range(self.len_layers):
layer_size = self.layer_sizes[layer]
for u in range(layer_size):
u_name = 'layer:{} node:{}'.format(layer, u)
x_idx = self.start_edge_map(layer, u)
direct_graph[u_name].append((start_name, lp_result_x[x_idx]))
direct_graph['start'] = [(None, 0.)]
if False:
for k, v in direct_graph.items():
print('key: ', k)
print(v)
return direct_graph
def find_association_greedy(self, lp_result_x):
graph = self.build_graph(lp_result_x)
result = []
# just need to consider current node
for end_node in range(self.layer_sizes[-1]):
node = 'layer:{} node:{}'.format(self.len_layers - 1, end_node)
path = ['end']
while(node is not None):
path.append(node)
# [(node, weight), .....]
next_nodes = graph[node]
next_with_largest_weight = sorted(
next_nodes, key=lambda x: x[1], reverse=True)[0]
node = next_with_largest_weight[0]
path.reverse()
result.append(path)
return result
def relaxation_experiment():
total_try = 10000
success_count = 0
solver_failure = 0
for i in range(total_try):
try:
print('iter:', i)
association = AssociationGraph()
x, relaxation_success = association.lp()
if relaxation_success:
success_count += 1
except:
solver_failure += 1
print('total try:', total_try,
'relaxation success count:', success_count,
'solver failures:', solver_failure)
def run_single():
association = AssociationGraph()
x, relaxation_success = association.lp()
result = association.find_association_greedy(x)
for p in result:
print('path: ', p)
if __name__ == "__main__":
# run_single()
relaxation_experiment()
|
yimuw/yimu-blog | random/res/try2.py | # -*- coding: utf-8 -*-
import torch
import math
dtype = torch.float
device = torch.device("cpu")
def gt(x):
w1, w2, w3, w4 = 2, 2, -2, 2.
y = w1*w2*w3*w4*x \
+ w1*w2*w3*x + w1*w2*w4*x + w1*w3*w4*x + w2*w3*w4*x\
+ w1*w2*x + w1*w3*x + w1*w4*x + w2*w3*x + w2*w4*x + w3*w4*x\
+ w1*x + w2*x + w3*x + w4*x + x
return y
def f1(x, W):
w1, w2, w3, w4 = W
return w1*x + w2*x + w3*x + w4*x + x
def f2(x, W):
w1, w2, w3, w4 = W
return w1*w2*x + w1*w3*x + w1*w4*x + w2*w3*x + w2*w4*x + w3*w4*x\
+ w1*x + w2*x + w3*x + w4*x + x
def f3(x, W):
w1, w2, w3, w4 = W
return w1*w2*w3*x + w1*w2*w4*x + w1*w3*w4*x + w2*w3*w4*x\
+ w1*w2*x + w1*w3*x + w1*w4*x + w2*w3*x + w2*w4*x + w3*w4*x\
+ w1*x + w2*x + w3*x + w4*x + x
def f4(x, W):
z = x
for w in W:
z = (w + 1.) * z
y_pred = z
return y_pred
INIT_WEIGHTS = [0.01, -0.01, 0.01, -0.01]
X = [1,2,3,4,5]
Y = [gt(x) + 0.1 for x in X]
learning_rate = 1e-6
def test2():
print("test2 start!")
weights = [torch.tensor([[INIT_WEIGHTS[i]]], device=device,
dtype=dtype, requires_grad=True) for i in range(4)]
def f(x):
z = x
for w in weights:
z = (w + 1.) * z
y_pred = z
return y_pred
for t in range(2000):
loss = 0
for x, y in zip(X, Y):
y_pred = f(x)
loss += (y_pred - y).pow(2)
if t % 50 == 0:
print('iter:', t, 'loss:', loss.item())
loss.backward()
with torch.no_grad():
for _, w in enumerate(weights):
w -= learning_rate * w.grad
w.grad.zero_()
for x, y in zip(X, Y):
y_pred = f(x)
print('y:', y ,' y_pred:', y_pred.item())
def test3():
print("test2 start!")
weights = [torch.tensor([[INIT_WEIGHTS[i]]], device=device,
dtype=dtype, requires_grad=True) for i in range(4)]
for t in range(10000):
loss = 0
for x, y in zip(X, Y):
if t < 2000:
f = f1
elif 2000 <= t < 4000:
f = f2
elif 4000 < t < 6000:
f = f3
else:
f = f4
y_pred = f(x, weights)
loss += (y_pred - y).pow(2)
if t % 50 == 0:
print('iter:', t, 'loss:', loss.item())
loss.backward()
with torch.no_grad():
for _, w in enumerate(weights):
w -= learning_rate * w.grad
w.grad.zero_()
for x, y in zip(X, Y):
y_pred = f(x, weights)
print('y:', y ,' y_pred:', y_pred.item())
if __name__ == "__main__":
test3()
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing/fix_lag_smoother.py | <gh_stars>1-10
import scipy.linalg as linalg
import numpy as np
import profiler
import fix_lag_types as t
import copy
# lag = 1.
# Similar to Kalman filter without noise
class FixLagSmoother:
def __init__(self, prior):
self.state = prior.variables.copy()
# Because I didn't specify weight, so weights are indentity
self.state_hessian = np.identity(2)
# Should be J * W * prior, but J = W = I(2)
self.state_b = prior.variables.copy().reshape([2, 1])
self.all_states = [self.state]
def get_all_states(self):
return np.vstack(self.all_states)
def construct_linear_system(self, distance_measurement):
# x_i, x_i+1
size_variables = 4
# Hessian: A.T * W * A
lhs = np.zeros([size_variables, size_variables])
# A.T * W * r
rhs = np.zeros([size_variables, 1])
lhs[0:2, 0:2] += self.state_hessian
rhs[0:2] += self.state_b
assert(distance_measurement.state1_index +
1 == distance_measurement.state2_index)
dm = t.DistanceMeasurement(t.State(self.state.copy()), t.State(
self.state.copy()), distance_measurement.distance)
jacobi_wrt_s1 = dm.jacobi_wrt_state1()
jacobi_wrt_s2 = dm.jacobi_wrt_state2()
jacobi_dm = np.hstack([jacobi_wrt_s1, jacobi_wrt_s2])
lhs[0:4, 0:4] += jacobi_dm.T @ jacobi_dm
rhs[0:4] += jacobi_dm.T @ dm.residual()
return lhs, rhs
@profiler.time_it
def optimize_for_new_measurement(self, distance_measurement):
if False:
return self.optimize_for_new_measurement_gaussian_impl(distance_measurement)
else:
return self.optimize_for_new_measurement_schur_impl(distance_measurement)
@profiler.time_it
def optimize_for_new_measurement_gaussian_impl(self, distance_measurement):
lhs, rhs = self.construct_linear_system(distance_measurement)
# one step for linear system
lhs_LU = linalg.lu_factor(lhs)
x = linalg.lu_solve(lhs_LU, -rhs)
self.state = x[2:4].reshape([2])
self.all_states.append(self.state)
self.marginalization_guassian_impl(lhs_LU, rhs)
return x
def marginalization_guassian_impl(self, lhs_LU, rhs):
"""
factor-graph-for-robot-perception, page 68
"""
hessian_LU = lhs_LU
# By definition, covariance = inv(hessian)
# A * A-1 = I, reuse LU
covariance = linalg.lu_solve(hessian_LU, np.identity(4))
mean_covariance_form = covariance @ rhs
self.state_hessian = np.linalg.inv(covariance[2:4, 2:4])
# This is tricky
# get it by equating ||Ax + b||^2 = ||x + mu ||^2_W
self.state_b = self.state_hessian @ mean_covariance_form[2:4]
x_test = np.linalg.solve(self.state_hessian, -self.state_b)
np.testing.assert_array_almost_equal(x_test.reshape([2]), self.state)
@profiler.time_it
def optimize_for_new_measurement_schur_impl(self, distance_measurement):
"""
Force schur ordering.
"""
lhs, rhs = self.construct_linear_system(distance_measurement)
# one step for linear system
# lhs = [A, B; C, D]
A = lhs[:-2, :-2]
B = lhs[:-2, -2:]
C = lhs[-2:, :-2]
D = lhs[-2:, -2:]
# rhs = [b1, b2]
b1 = rhs[:-2]
b2 = rhs[-2:]
A_inv = np.linalg.inv(A)
self.state_hessian = D - C @ A_inv @ B
self.state_b = b2 - C @ A_inv @ b1
x = np.linalg.solve(self.state_hessian, -self.state_b)
self.state = x.reshape([2])
self.all_states.append(self.state)
return x
|
yimuw/yimu-blog | random/optimal_matrix_multiplcation/cpp/code_gen.py | import timeit
import numpy as np
import inspect
class Node:
def __init__(self, type):
self.node_type = type
self.shape = None
self.expression_string = None
class OpNode(Node):
def __init__(self, op):
super().__init__('OpNode')
self.left = None
self.right = None
self.op = op
class VarNode(Node):
def __init__(self, var):
super().__init__('VarNode')
self.var = var
self.expression_string = var.name
class Variable():
def __init__(self, name):
self.name = name
def print_tree(root):
'''
https://stackoverflow.com/questions/34012886/print-binary-tree-level-by-level-in-python
'''
def _display_aux(root):
# No child.
if root.node_type == 'VarNode':
line = root.var.name
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Two children.
left, n, p, x = _display_aux(root.left)
right, m, q, y = _display_aux(root.right)
s = root.op
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * \
'_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + \
(n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + \
[a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
lines, _, _, _ = _display_aux(root)
for line in lines:
print(line)
def build_all_expression_tree(vars):
'''
build all trees for a list of mats
'''
if len(vars) == 1:
return [VarNode(vars[0])]
trees = []
for i in range(1, len(vars)):
left_trees = build_all_expression_tree(vars[:i])
right_trees = build_all_expression_tree(vars[i:])
for l in left_trees:
for r in right_trees:
root = OpNode('Prod')
root.left = l
root.right = r
root.expression_string = \
'Prod<{},{}>'.format(root.left.expression_string,
root.right.expression_string)
trees.append(root)
return trees
def gen_test_data():
M1 = Variable('M1')
M2 = Variable('M2')
M3 = Variable('M3')
M4 = Variable('M4')
M5 = Variable('M5')
M6 = Variable('M6')
M7 = Variable('M7')
vars = [M1, M2, M3, M4, M5, M6, M7]
return vars
def min_opetation_for_expressions(expressions):
if len(expressions) == 1:
return expressions[0]
e1 = expressions[0]
s = 'Min<{},\n{}>'.format(e1,
min_opetation_for_expressions(expressions[1:]))
return s
def gen_all_expression():
vars = gen_test_data()
print('============ all expressions =============')
for size in range(3, len(vars)):
trees = build_all_expression_tree(vars[:size])
expressions = [t.expression_string for t in trees]
print('using MinExpression{} = {};'.format(size, min_opetation_for_expressions(expressions)))
def test_all_tree():
vars = gen_test_data()
vars = vars[:3]
trees = build_all_expression_tree(vars)
for t in trees:
print('for tree:', t.expression_string)
print_tree(t)
expressions = [t.expression_string for t in trees]
print('All expressions:')
var_size = len(vars)
print('using MinExpression = {};'.format(min_opetation_for_expressions(expressions)))
if __name__ == "__main__":
test_all_tree()
gen_all_expression()
|
yimuw/yimu-blog | least_squares/ceres-from-scratch/linear_regression.py | from number_forward_flow import *
def linear_case():
print('=============== linear_case ==============')
A = np.random.rand(6, 5)
x_gt = np.ones(5, dtype='float64')
b = A @ x_gt
def residual_test(vars):
"""
r = Ax - b
"""
ret = A @ vars - b
return ret
x0 = np.random.rand(5, 1)
r, J = ResidualBlock(residual_test, x0).evaluate()
print('r:', r)
print('J:', J)
print('A:', A)
J = np.array(J)
r = np.array(r)
dx = np.linalg.solve(J.T @ J, -J.T @ r)
print('x0:', x0.T)
print('dx:', dx.T)
print('solver res:', (x0 + dx).T)
print('x_gt:', x_gt.T)
if __name__ == "__main__":
linear_case()
|
yimuw/yimu-blog | least_squares/pca/utils.py | <gh_stars>1-10
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
def euler_angle_to_rotation(yaw, pitch, roll):
Rz = np.array([
[cos(yaw), -sin(yaw), 0.],
[sin(yaw), cos(yaw), 0.],
[0, 0, 1.],
])
Ry = np.array([
[cos(pitch), -0., sin(pitch)],
[0., 1., 0.],
[-sin(pitch), 0, cos(pitch)],
])
Rx = np.array([
[1., 0., 0.],
[0., cos(roll), -sin(roll)],
[0, sin(roll), cos(roll)],
])
return Rz @ Ry @ Rx
def skew(w):
wx, wy, wz = w
return np.array([
[0, -wz, wy],
[wz, 0, -wx],
[-wy, wx, 0.],
])
def so3_exp(w):
theta = np.linalg.norm(w)
# Approximation when theta is small
if(abs(theta) < 1e-8):
return np.identity(3) + skew(w)
normalized_w = w / theta
K = skew(normalized_w)
# Rodrigues
R = np.identity(3) + sin(theta) * K + (1 - cos(theta)) * K @ K
np.testing.assert_almost_equal(R @ R.transpose(), np.identity(3))
return R |
yimuw/yimu-blog | least_squares/icp/icp_main.py | <gh_stars>1-10
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
import icp_euler
import icp_so3
def generate_point_cloud():
points = np.array([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[0, 1e1, 1e5],
])
return points.transpose()
def generate_simulation_data():
point_src = generate_point_cloud()
R_gt = icp_euler.euler_angle_to_rotation(0.84, pi, pi)
point_target = R_gt @ point_src
return point_src, point_target, R_gt
def main():
point_src, point_target, R_gt = generate_simulation_data()
print('point_src', point_src)
print('icp euler start...')
icp_euler.icp_yaw_pitch_roll_numirical(point_src, point_target)
print('icp so3 start...')
icp_so3.icp_so3_numirical(point_src, point_target)
print('icp local so3 start...')
icp_so3.icp_local_so3_numirical(point_src, point_target)
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/ceres-from-scratch/number_forward_flow.py | import numpy as np
import math
from collections import defaultdict
import itertools
import numbers
class JetMapImpl:
def __init__(self, value, var_id=None):
self.value = value
self.derivative_wrt_variables = defaultdict(float)
self.var_id = var_id
if var_id is not None:
self.derivative_wrt_variables = {var_id: 1.}
def __repr__(self):
s = 'value:' + str(self.value) + ' '
for p in self.derivative_wrt_variables:
s += 'id:' + str(p) + ' :' + str(self.derivative_wrt_variables[p]) + ' '
return s
def __add__(self, other):
if isinstance(other, numbers.Number):
other = JetMapImpl(other)
ret = JetMapImpl(value=self.value + other.value)
ret.derivative_wrt_variables = defaultdict(float)
for var in self.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += self.derivative_wrt_variables[var]
for var in other.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += other.derivative_wrt_variables[var]
return ret
def __radd__(self, other):
if isinstance(other, numbers.Number):
other = JetMapImpl(other)
return other + self
def __mul__(self, other):
if isinstance(other, numbers.Number):
other = JetMapImpl(other)
ret = JetMapImpl(value=self.value * other.value)
ret.derivative_wrt_variables = defaultdict(float)
for var in self.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += self.derivative_wrt_variables[var] * other.value
for var in other.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += self.value * other.derivative_wrt_variables[var]
return ret
def __rmul__(self, other):
if isinstance(other, numbers.Number):
other = JetMapImpl(other)
return other * self
def __sub__(self, other):
if isinstance(other, numbers.Number):
other = JetMapImpl(other)
ret = JetMapImpl(value=self.value - other.value)
ret.derivative_wrt_variables = defaultdict(float)
for var in self.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += self.derivative_wrt_variables[var]
for var in other.derivative_wrt_variables:
ret.derivative_wrt_variables[var] -= other.derivative_wrt_variables[var]
return ret
def __neg__(self):
ret = JetMapImpl(value=-self.value)
for var in self.derivative_wrt_variables:
ret.derivative_wrt_variables[var] = - self.derivative_wrt_variables[var]
return ret
def cosine(num):
if isinstance(num, numbers.Number):
return math.cos(num)
else:
ret = JetMapImpl(value=math.cos(num.value))
ret.derivative_wrt_variables = defaultdict(float)
for var in num.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += - math.sin(num.value) * num.derivative_wrt_variables[var]
return ret
def sine(num):
if isinstance(num, numbers.Number):
return math.sin(num)
else:
ret = JetMapImpl(value=math.sin(num.value))
ret.derivative_wrt_variables = defaultdict(float)
for var in num.derivative_wrt_variables:
ret.derivative_wrt_variables[var] += math.cos(num.value) * num.derivative_wrt_variables[var]
return ret
class ResidualBlock:
def __init__(self, residual_function, init_vars):
self.residual_function = residual_function
self.jets = np.array([JetMapImpl(v, var_id='var{}'.format(i))
for i, v in enumerate(init_vars)])
def evaluate(self):
jet_residual = self.residual_function(self.jets)
jacobian = np.zeros([len(jet_residual), len(self.jets)])
for ridx in range(len(jet_residual)):
for vidx in range(len(self.jets)):
jacobian[ridx, vidx] = jet_residual[ridx].derivative_wrt_variables[self.jets[vidx].var_id]
residual = np.array([j.value for j in jet_residual])
return residual, jacobian
|
yimuw/yimu-blog | least_squares/kalman/main.py | <reponame>yimuw/yimu-blog
import numpy as np
import matplotlib.pyplot as plt
import extented_kalman_filter
import kalman_least_sqr
import batch_least_sqr
import model
def plot_all():
init_state = np.array([4.0, 0.1, 1, 0, 0.5])
gt_states, gt_measurements = model.generate_gt_data(init_state)
print('=========run_kalman_filter==========')
extented_kalman_filter.run_extented_kalman_filter(gt_states, gt_measurements)
print('=========run_kalman_least_sqr, iter : 1 ==========')
kalman_least_sqr.run_kalman_least_sqr(gt_states, gt_measurements, max_iter = 1)
print('=========run_kalman_least_sqr, iter : 10 ==========')
kalman_least_sqr.run_kalman_least_sqr(gt_states, gt_measurements, max_iter = 10)
print('==========run_graph_filter===========')
batch_least_sqr.run_batch_least_sqr(gt_states, gt_measurements)
plt.show()
if __name__ == '__main__':
np.set_printoptions(precision=4, suppress=True)
np.random.seed(0)
plot_all() |
yimuw/yimu-blog | least_squares/land_rocket/art.py | import os
from math import cos, sin
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import numpy as np
import pylab
import argparse
def save_cur_figure(dir, file_name):
if not os.path.exists(dir):
os.mkdir(dir)
path = os.path.join(dir, file_name)
plt.savefig(path)
class Artist:
def __init__(self):
import pathlib
current_file_path = pathlib.Path(__file__).parent.absolute()
fire_im = plt.imread(os.path.join(current_file_path, 'art_files/blue_fire.png'))
# may need filter
self.fire_im = fire_im[::5, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :]
self.earth_im = plt.imread(os.path.join(current_file_path, 'art_files/rocket.jpg'))
def draw_at(self, x, y, theta_raw, fire_len_scale, scale=1.):
theta = theta_raw - np.pi / 2.
im_earth = plt.imshow(self.earth_im)
elen_y, elen_x, _ = self.earth_im.shape
trans_data = \
mtransforms.Affine2D().translate(-elen_x / 2., -elen_y / 2.) \
+ mtransforms.Affine2D().scale(0.1 * scale) \
+ mtransforms.Affine2D().rotate(theta + np.pi) \
+ mtransforms.Affine2D().translate(x, y)
trans0 = im_earth.get_transform()
trans_data = trans_data + trans0
im_earth.set_transform(trans_data)
im_fire = plt.imshow(self.fire_im)
flen_y, flen_x, _ = self.fire_im.shape
trans_data = mtransforms.Affine2D().translate(-flen_x / 2., -flen_y * 0.8) \
+ mtransforms.Affine2D().scale(0.25 * scale, 0.5 * scale * fire_len_scale) \
+ mtransforms.Affine2D().rotate(theta) \
+ mtransforms.Affine2D().translate(x, y)
trans0 = im_fire.get_transform()
trans_data = trans_data + trans0
im_fire.set_transform(trans_data)
def movie(trajectory_mat):
"""
show a trajectory moive
"""
DT_IDX = 0
X_IDX = 1
Y_IDX = 2
HEADING_IDX = 3
VX_IDX = 4
VY_IDX = 5
ACCL_IDX = 6
HEADING_DOT_IDX = 7
# dt, x, y, heading, vx, vy, accl, heading_dot
max_x = np.max(trajectory_mat[:, X_IDX])
max_y = np.max(trajectory_mat[:, Y_IDX])
min_x = np.min(trajectory_mat[:, X_IDX])
min_y = np.min(trajectory_mat[:, Y_IDX])
max_diff = max(max_x - min_x, max_y - min_y)
max_thurst = max(trajectory_mat[:, ACCL_IDX])
speed = np.linalg.norm(trajectory_mat[:, VX_IDX:(VY_IDX+1)], axis=1)
max_speed = max(speed)
scale = 1 # (states[-1] - states[0])[0] / 30
num_states, state_size = trajectory_mat.shape
assert state_size == 8
time = 0.
for i in range(num_states):
dt, x, y, theta, v_x, v_y, thrust, theta_dot = trajectory_mat[i]
plt.clf()
time += dt
thrust_x = cos(theta) * thrust
thrust_y = sin(theta) * thrust
ax1 = plt.subplot(1, 1, 1)
plt.plot(trajectory_mat[:, X_IDX], trajectory_mat[:, Y_IDX], '-o', alpha=0.2)
# plt.axis('equal')
x_max = max(trajectory_mat[:, X_IDX])
x_min = min(trajectory_mat[:, X_IDX])
y_max = max(trajectory_mat[:, Y_IDX])
y_min = min(trajectory_mat[:, Y_IDX])
margin = 3
plt.xlim([x_min - margin, x_max + margin])
plt.ylim([y_min - margin, y_max + margin])
plt.autoscale(False)
if abs(thrust) > 0:
plt.arrow(x - thrust_x * scale, y - thrust_y * scale,
thrust_x * scale, thrust_y * scale,
width=0.05)
artist = Artist()
artist.draw_at(x,y,theta, thrust, scale=0.03)
plt.title('time :{:10.4f}sec, pos:({:10.4f}{:10.4f})'\
.format(time, x, y))
# save_cur_figure('test', '{}.png'.format(i))
plt.pause(0.01)
plt.show()
def movie_and_plots(trajectory_mat):
"""
show a trajectory moive
"""
DT_IDX = 0
X_IDX = 1
Y_IDX = 2
HEADING_IDX = 3
VX_IDX = 4
VY_IDX = 5
ACCL_IDX = 6
HEADING_DOT_IDX = 7
# dt, x, y, heading, vx, vy, accl, heading_dot
max_x = np.max(trajectory_mat[:, X_IDX])
max_y = np.max(trajectory_mat[:, Y_IDX])
min_x = np.min(trajectory_mat[:, X_IDX])
min_y = np.min(trajectory_mat[:, Y_IDX])
max_diff = max(max_x - min_x, max_y - min_y)
max_thurst = max(trajectory_mat[:, ACCL_IDX])
speed = np.linalg.norm(trajectory_mat[:, VX_IDX:(VY_IDX+1)], axis=1)
max_speed = max(speed)
scale = 1 # (states[-1] - states[0])[0] / 30
num_states, state_size = trajectory_mat.shape
assert state_size == 8
time = 0.
for i in range(num_states):
dt, x, y, theta, v_x, v_y, thrust, theta_dot = trajectory_mat[i]
plt.clf()
time += dt
thrust_x = cos(theta) * thrust
thrust_y = sin(theta) * thrust
ax1 = plt.subplot(2, 3, 1)
plt.plot(trajectory_mat[:, X_IDX], trajectory_mat[:, Y_IDX], '-o', alpha=0.2)
# plt.axis('equal')
x_max = max(trajectory_mat[:, X_IDX])
x_min = min(trajectory_mat[:, X_IDX])
y_max = max(trajectory_mat[:, Y_IDX])
y_min = min(trajectory_mat[:, Y_IDX])
margin = 3
plt.xlim([x_min - margin, x_max + margin])
plt.ylim([y_min - margin, y_max + margin])
plt.autoscale(False)
if abs(thrust) > 0:
plt.arrow(x - thrust_x * scale, y - thrust_y * scale,
thrust_x * scale, thrust_y * scale,
width=0.05)
artist = Artist()
artist.draw_at(x,y,theta, thrust, scale=0.03)
plt.title('time :{:10.4f}sec, pos:({:10.4f}{:10.4f})'\
.format(time, x, y))
plt.subplot(2, 3, 2)
plt.plot(trajectory_mat[:, ACCL_IDX:(HEADING_DOT_IDX+1)], 'g', alpha=0.1)
plt.plot(trajectory_mat[:i+1, HEADING_DOT_IDX], 'r', label='theta_dot')
plt.plot(trajectory_mat[:i+1, ACCL_IDX], 'b', label='thurst')
pylab.legend(loc='upper left')
plt.xlabel('index')
plt.ylabel('thrust / theta_dot')
plt.title('controls,theta:{:10.4f} thrust:{:10.4f}'.format(trajectory_mat[i, HEADING_DOT_IDX], trajectory_mat[i, ACCL_IDX]))
plt.subplot(2, 3, 3)
plt.plot(trajectory_mat[:, VX_IDX:(VY_IDX+1)], 'g', alpha=0.1)
plt.plot(trajectory_mat[:i+1, VX_IDX], 'r', label='vx')
plt.plot(trajectory_mat[:i+1, VY_IDX], 'b', label='vy')
pylab.legend(loc='upper left')
plt.title('speed')
plt.xlabel('idx')
plt.ylabel('meter/sec')
gx = 0
gy = -1
plt.subplot(2, 3, 4)
plt.plot([0, thrust_x], [0, thrust_y], alpha=0.5, label='thrust')
plt.scatter(thrust_x, thrust_y)
plt.plot([0, 0], [gx, gy], alpha=0.5, color='g', label='gravitation')
plt.scatter(gx, gy, color='g')
max_acc = max(max_thurst, 1)
pylab.legend(loc='upper left')
plt.xlim([-max_acc, max_acc])
plt.ylim([-max_acc, max_acc])
plt.title('thrust and gravitation')
plt.subplot(2, 3, 5)
plt.plot([0, thrust_x + gx], [0, thrust_y + gy], alpha=0.5, label='accl')
plt.scatter(thrust_x + gx, thrust_y + gy)
max_acc = max(max_thurst, 1)
plt.xlim([-max_acc, max_acc])
plt.ylim([-max_acc, max_acc])
plt.title('accl=thrust+g, ({:10.4f},{:10.4f})'.format(thrust_x + gx, thrust_y + gy))
plt.subplot(2, 3, 6)
plt.plot([0, v_x], [0, v_y], color='b', alpha=0.5)
plt.scatter(v_x, v_y, color='b', label='speed')
plt.plot([0, cos(theta)], [0, sin(theta)], color='g', alpha=0.5)
plt.scatter(cos(theta), sin(theta), color='g', label='heading')
plt.xlim([-max_speed, max_speed])
plt.ylim([-max_speed, max_speed])
plt.title('speed vector, heading vector')
pylab.legend(loc='upper left')
plt.xlabel('vx')
plt.ylabel('vy')
plt.pause(0.01)
# save_cur_figure('test', '{}.png'.format(i))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='plot the rocket trajectory')
parser.add_argument('--path', help='path to rocket csv file')
args = parser.parse_args()
trajectory_mat = np.genfromtxt(args.path, delimiter=' ')
movie_and_plots(trajectory_mat)
movie(trajectory_mat) |
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/variables_tree_flow.py | <reponame>yimuw/yimu-blog
import math
from collections import defaultdict
import numbers
import numpy as np
import utils
class Node:
def __init__(self, id):
self.id = id
# parent operators
self.parents = []
# child operator
self.children = []
class Variable(Node):
def __init__(self, value=0, id='', ntype='opt-varible'):
super().__init__(id)
self.value = value
self.grad = 0
# "opt-varible", "const", "intermediate"
self.ntype = ntype
def __add__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
plus = Plus(self, other)
# GLOBAL_VARS.operators.append(plus)
self.parents.append(plus)
other.parents.append(plus)
plus.result.children = [plus]
return plus.result
def __radd__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
return other + self
def __sub__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
neg_other = - other
return self + neg_other
def __rsub__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
return other - self
def __mul__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
mul = Mul(self, other)
self.parents.append(mul)
other.parents.append(mul)
mul.result.children = [mul]
return mul.result
def __rmul__(self, other):
if isinstance(other, numbers.Number):
other = Variable(value=other, ntype='const')
return other * self
def __neg__(self):
neg = Neg(self)
self.parents.append(neg)
neg.result.children = [neg]
return neg.result
def __str__(self):
return 'value:{} grad:{} #children:{} #parents:{}'.format(self.value, self.grad, len(self.children),
len(self.parents))
def ntf_sigmoid(number):
if isinstance(number, Variable):
sigmoid_operator = Sigmoid(number)
number.parents.append(sigmoid_operator)
sigmoid_operator.result.children = [sigmoid_operator]
return sigmoid_operator.result
else:
if abs(number) > 50:
number = np.sign(number) * 50
expo = math.exp(number)
#print('expo:', expo, self.a.value, expo / (1 + expo))
return expo / (1 + expo)
def ntf_log(number):
if isinstance(number, Variable):
log_operator = Log(number)
number.parents.append(log_operator)
log_operator.result.children = [log_operator]
return log_operator.result
else:
return math.log(number)
class Operator(Node):
def __init__(self, id):
super().__init__(id)
def forward(self):
raise NotImplementedError("Should have implemented this")
def backward(self):
raise NotImplementedError("Should have implemented this")
class Plus(Operator):
def __init__(self, a: Variable, b: Variable):
super().__init__("({})+({})".format(a.id, b.id))
self.a = a
self.b = b
self.result = Variable(ntype="intermediate",
id="res:{}".format(self.id))
self.children = [a, b]
self.parents = [self.result]
def forward(self):
self.result.value = self.a.value + self.b.value
def backward(self):
if self.a is self.b:
self.a.grad += 2 * self.result.grad
else:
self.a.grad += self.result.grad
self.b.grad += self.result.grad
class Mul(Operator):
def __init__(self, a: Variable, b: Variable):
super().__init__("({})*({})".format(a.id, b.id))
self.a = a
self.b = b
self.result = Variable(ntype="intermediate",
id="res:{}".format(self.id))
self.children = [a, b]
self.parents = [self.result]
def forward(self):
self.result.value = self.a.value * self.b.value
def backward(self):
if self.a is self.b:
self.a.grad += 2 * self.result.grad * self.a.value
else:
self.a.grad += self.result.grad * self.b.value
self.b.grad += self.result.grad * self.a.value
class Neg(Operator):
def __init__(self, a: Variable):
super().__init__("-({})".format(a.id))
self.a = a
self.result = Variable(ntype="intermediate",
id="res:{}".format(self.id))
self.children = [a]
self.parents = [self.result]
def forward(self):
self.result.value = - self.a.value
def backward(self):
self.a.grad += - self.result.grad
class Sigmoid(Operator):
def __init__(self, a: Variable):
super().__init__("sigmoid({})".format(a.id))
self.a = a
self.result = Variable(ntype="intermediate",
id="res:{}".format(self.id))
self.children = [a]
self.parents = [self.result]
def forward(self):
if abs(self.a.value) > 50:
self.a.value = np.sign(self.a.value) * 50
expo = math.exp(self.a.value)
#print('expo:', expo, self.a.value, expo / (1 + expo))
self.result.value = expo / (1 + expo)
def backward(self):
expo = math.exp(self.a.value)
sigmoid = expo / (1 + expo)
sigmoid_grad = sigmoid * (1 - sigmoid)
self.a.grad += self.result.grad * sigmoid_grad
class Log(Operator):
def __init__(self, a: Variable):
super().__init__("log({})".format(a.id))
self.a = a
self.result = Variable(ntype="intermediate",
id="res:{}".format(self.id))
self.children = [a]
self.parents = [self.result]
def forward(self):
# utils.traverse_tree(self)
self.result.value = math.log(self.a.value)
def backward(self):
self.a.grad += self.result.grad * (1. / self.a.value)
class NumberFlowCore:
def __init__(self, cost_node, method='iter'):
self.cost_node = cost_node
self.all_nodes, self.varible_nodes, self.const_nodes = self.__get_all_nodes(
cost_node)
# remove duplication
self.all_nodes = list(set(self.all_nodes))
self.varible_nodes = list(set(self.varible_nodes))
self.const_nodes = list(set(self.const_nodes))
self.method = method
if self.method == 'iter':
self.__topological_sort()
def __get_all_nodes(self, node):
allnodes = [node]
all_leaf_nodes = [node] if (isinstance(
node, Variable) and node.ntype == 'opt-varible') else []
all_const_nodes = [node] if (isinstance(
node, Variable) and node.ntype == 'const') else []
for c in node.children:
sub_allnodes, sub_all_leaf_nodes, sub_all_const_nodes = self.__get_all_nodes(
c)
allnodes += sub_allnodes
all_leaf_nodes += sub_all_leaf_nodes
all_const_nodes += sub_all_const_nodes
return allnodes, all_leaf_nodes, all_const_nodes
def __topological_sort(self):
zero_degree_nodes = []
indegree = defaultdict(int)
for node in self.all_nodes:
indegree[node] = len(node.children)
if len(node.children) == 0:
zero_degree_nodes.append(node)
topo_order = []
while zero_degree_nodes:
next_zero_degree_nodes = []
for znode in zero_degree_nodes:
topo_order.append(znode)
for p in znode.parents:
indegree[p] -= 1
if indegree[p] == 0:
next_zero_degree_nodes.append(p)
zero_degree_nodes = next_zero_degree_nodes
if len(topo_order) != len(self.all_nodes):
raise RuntimeError("cycle found!")
self.topologic_order = topo_order
def forward(self):
if self.method == 'iter':
self.__forward_iterative()
elif self.method == 'recur':
self.__forward_recursive()
def __forward_iterative(self):
for node in self.topologic_order:
if isinstance(node, Operator):
node.forward()
def __forward_recursive(self):
states = {}
def evaluate(node):
if node in states:
if states[node] == 'visiting':
raise RuntimeError("cycle found!")
else:
return states[node]
states[node] = 'visiting'
for child in node.children:
evaluate(child)
if isinstance(node, Operator):
node.forward()
if isinstance(node, Variable):
states[node] = node.value
evaluate(self.cost_node)
def backward(self):
if self.method == 'recur':
return self.__backward_recur()
elif self.method == 'iter':
return self.__backward_iter()
def __backward_recur(self):
visited = set()
def backward_dfs(node):
if node in visited: return
if isinstance(node, Operator):
node.backward()
visited.add(node)
for child in node.children:
backward_dfs(child)
self.cost_node.grad = 1.
backward_dfs(self.cost_node)
def __backward_iter(self):
self.cost_node.grad = 1.
for node in reversed(self.topologic_order):
if isinstance(node, Operator):
node.backward()
def clear_grad(self):
def clear_grad_dfs(node):
if isinstance(node, Variable):
node.grad = 0
for child in node.children:
clear_grad_dfs(child)
clear_grad_dfs(self.cost_node)
def gradient_desent(self, rate=0.01):
for var in self.varible_nodes:
var.value -= rate * var.grad
|
yimuw/yimu-blog | least_squares/icp/icp_se3_main.py | import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
import icp_so3_and_t
import icp_se3
import utils
def generate_point_cloud():
points = np.array([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[0, 1e1, 1e5],
])
return points.transpose()
def generate_simulation_data():
point_src = generate_point_cloud()
R_gt = utils.euler_angle_to_rotation(0.84, pi, pi)
T_gt = np.array([[1, 10, 100]]).transpose()
point_target = R_gt @ point_src + T_gt
return point_src, point_target, R_gt
def main():
point_src, point_target, R_gt = generate_simulation_data()
print('point_src', point_src)
print('icp (so3 + translation) start...')
icp_so3_and_t.icp_so3_and_translation_numirical(point_src, point_target)
print('icp se3 start...')
icp_se3.icp_se3_numirical(point_src, point_target)
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/time_calibration/time_calibration.py | import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
Function = namedtuple('Function', ['time', 'values'])
def generate_data():
time = np.linspace(0., 20., 1000)
values = np.cos(time) * time
time_offset = 1.54321
func1 = Function(time=time + time_offset, values=values)
func2 = Function(time=time, values=values)
if False:
plt.plot(func1.time, func1.values)
plt.plot(func2.time, func2.values)
plt.title('function. ground truth')
plt.show()
return func1, func2, time_offset
class TimeCalibration:
'''
want:
minimize_dt = sum_t ||func1(t + dt) - func2(t)||^2
'''
def __init__(self):
# states
self.variable_time = 0.
# config
self.max_iteration = 7
def least_squares_calibrate(self, func1, func2):
"""
It is the least square loop
"""
for iteration in range(self.max_iteration):
self.plot(func1, func2, 'fun1 vs fun2 at iter:{}, dt:{:.5f}'.format(
iteration, self.variable_time))
cost = self.compute_cost(func1, func2)
print('iteration:{} cost:{}'.format(iteration, cost))
jacobian = self.compute_jacobian(func1)
b = self.compute_b(func1, func2)
delta = self.solve_normal_equation(jacobian, b)
self.variable_time += delta
def compute_cost(self, func1, func2):
t = func1.time
diff = np.interp(t + self.variable_time, func1.time, func1.values) \
- np.interp(t, func2.time, func2.values)
return diff.T @ diff
def compute_jacobian(self, func1):
"""
Compute the derivative of residual w.r.t dt
"""
# compute derivative
dt = func1.time[1] - func1.time[0]
# np.convolve 'same' has boundary effect.
value_padded = np.concatenate(
[[func1.values[0]], func1.values, [func1.values[-1]]])
dfunc1_dt = Function(func1.time, np.convolve(
value_padded, [0.5 / dt, 0, - 0.5 / dt], 'valid'))
SHOW_DEVRIVATIVE = False
if SHOW_DEVRIVATIVE:
plt.plot(dfunc1_dt.time, dfunc1_dt.values, 'r')
plt.plot(func1.time, func1.values, 'g')
plt.show()
# r(dt) = func1(t+dt) - func2(t)
# drdt(dt) = dfun1_dt(t + dt)
jacobian = np.interp(func1.time + self.variable_time,
dfunc1_dt.time, dfunc1_dt.values)
return jacobian
def compute_b(self, func1, func2):
"""
compute residual evaluated at current variable
"""
# r(dt) = func1(t+dt) - func2(t)
t = func1.time
b = np.interp(t + self.variable_time, func1.time, func1.values) \
- np.interp(t, func2.time, func2.values)
return b
def solve_normal_equation(self, jacobian, b):
"""
solve the normal equation
"""
lhs = jacobian.T @ jacobian
rhs = -jacobian.T @ b
# NOTE: np.linalg.solve(rhs, lhs) doesn't work for 1d
delta = rhs / lhs
return delta
def plot(self, func1, func2, title='func1 vs func2'):
"""
Plot func1 and func2
"""
t = func1.time
v1 = np.interp(t + self.variable_time, func1.time, func1.values)
v2 = np.interp(t, func2.time, func2.values)
fig, ax = plt.subplots()
ax.plot(t, v1, label='func1(t + dt)')
ax.plot(t, v2, label='func2(t)')
ax.legend()
plt.title(title)
plt.show()
def main():
f1, f2, gt_time_offset = generate_data()
print('gt_time_offset:', gt_time_offset)
calib = TimeCalibration()
calib.least_squares_calibrate(f1, f2)
print('gt_time_offset:', gt_time_offset)
print('estimated_time_offset:', calib.variable_time)
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | random/thin_hessian/expr1.py | """
1. solve for a,b using grad
2. running avg + prior
3. gd
4. x* = -b / 2a
5. x -= 0.01 * (df / a)
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import argparse
METHOD = None
class function1:
def __init__(self):
self.a = 0.1
self.b = 2
self.c = 10
def print_gt(self):
print('x*-gt:', -self.b / (2*self.a))
def f(self, x):
a,b,c = self.a, self.b, self.c
return a * x * x + b * x + c
def df(self, x):
a,b,c = self.a, self.b, self.c
grad = 2 * a * x + b
# return grad
noise = np.random.normal(0, abs(grad) / 5)
# print("grad, noise:", grad, noise)
return grad + noise
class function2:
"""
a convex function:
(1.01 * x + math.cos(x)) ** 2
"""
def __init__(self):
pass
def f(self, x):
r = 1.01 * x + math.cos(x)
return r * r
def df(self, x):
r = 1.01 * x + math.cos(x)
grad = 2 * r * (1.01 - math.sin(x))
noise = np.random.normal(0, abs(grad) / 10)
return grad + noise
class Solver:
def __init__(self,func, x0):
self.func = func
self.x = x0
self.a = 1
self.b = 1
self.iteration = 0
def converge(self):
d = self.func.df(self.x)
return np.linalg.norm(d) < 1e-8
def iter(self):
x0 = self.x
d0 = self.func.df(x0)
# important. Make sure the sample points are far in normalized space
direction = - d0 / abs(self.a)
alpha = 0.1
x1 = x0 + alpha * direction
d1 = self.func.df(x1)
b = np.array([d0, d1])
A = np.array([
[2*x0, 1],
[2*x1, 1],
])
# prior
K = 1
prior_b = np.array([ + K * self.a, + K * self.b])
prior_A = ([
[K, 0],
[0, K],
])
# p = np.linalg.solve(A , b) # solve for para directly
p = np.linalg.solve(A.T @ A + prior_A, A.T @ b + prior_b) # least squares
res_a, res_b = p
update_weight = 0.9
self.a = update_weight * self.a + (1- update_weight) * res_a
self.b = update_weight * self.b + (1- update_weight) * res_b
method = METHOD
if method == 'c': # center
x_star_est = - self.b / (2 * self.a)
self.x = x0 + 0.1 * (x_star_est - x0)
elif method == 'gd':
print('d0:', d0)
self.x = x0 - 0.01 * d0
elif method == 's': # scale
normalized_d0 = d0 / abs(self.a)
print("d0, d0_norm: ", d0, normalized_d0)
self.x = x0 - 0.01 * normalized_d0
else:
raise
print('a b:', self.a, self.b)
print('x*: ', - self.b / (2 * self.a))
print('x:', self.x)
def test1():
def solve(f, x0):
d0 = f.df(x0)
x1 = x0 - 0.001 * d0
d1 = f.df(x1)
b = np.array([d0, d1])
A = np.array([
[2*x0, 1],
[2*x1, 1],
])
p = np.linalg.solve(A, b)
print("x0:", x0)
print("a, c : ", p)
func = function1()
for x in np.linspace(-100, 100, 200):
solve(func, x)
def test2():
func = function1()
s = Solver(func, 100)
for i in range(2000):
print('i:', i)
s.iter()
if s.converge():
print('converge!')
break
func.print_gt()
def test3():
func = function2()
s = Solver(func, 10)
for i in range(2000):
print('i:', i)
s.iter()
if s.converge():
print('converge!')
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('opt_method', type=str, help='c: quad center; gd: gradient descent; s: scaled gd')
args = parser.parse_args()
METHOD = args.opt_method
# test1()
# test2()
test3()
|
yimuw/yimu-blog | least_squares/pca/jacobian_check.py | <filename>least_squares/pca/jacobian_check.py
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
def skew(w):
wx, wy, wz = w
return np.array([
[0, -wz, wy],
[wz, 0, -wx],
[-wy, wx, 0.],
])
class JacobianCheck:
def __init__(self):
# print(self.points_covariance)
self.A = np.random.rand(3,3)
self.var = np.array([0, 0, 0.])
def function(A, w):
return A @ skew(w)
def df_dvar(self):
jacobian = np.zeros([3,3,3])
cur_var = self.var.copy()
DELTA = 1e-6
for var_idx in range(3):
var_plus = cur_var.copy()
var_plus[var_idx] += DELTA
f_plus = JacobianCheck.function(self.A, var_plus)
var_minus = cur_var.copy()
var_minus[var_idx] -= DELTA
f_minus = JacobianCheck.function(self.A, var_minus)
jacobian[:,:,var_idx] = (f_plus - f_minus) / (2 * DELTA)
print('var_idx:', var_idx)
print('j:', jacobian[:, :, var_idx])
return jacobian
def df_dvar_analytic(self):
# f = A * W
# dfdw = A * (dW*dw)
# = [ (A * (G1)) * w1, (A * G2) * w2, (A * G3) * w3]
jacobian = np.zeros([3,3,3])
G1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0.],
])
G2 = np.array([
[0, 0, 1],
[0, 0, 0],
[-1, 0, 0.],
])
G3 = np.array([
[0, -1, 0],
[1, 0, 0],
[0, 0, 0.],
])
jacobian[:, :, 0] = self.A @ G1
jacobian[:, :, 1] = self.A @ G2
jacobian[:, :, 2] = self.A @ G3
for var_idx in range(3):
print('var_idx:', var_idx)
print('j:', jacobian[:, :, var_idx])
class JacobianCheckLeftSkew:
def __init__(self):
# print(self.points_covariance)
self.A = np.random.rand(3,3)
self.var = np.array([0, 0, 0.])
def function(A, w):
return skew(w) @ A
def df_dvar(self):
jacobian = np.zeros([3,3,3])
cur_var = self.var.copy()
DELTA = 1e-6
for var_idx in range(3):
var_plus = cur_var.copy()
var_plus[var_idx] += DELTA
f_plus = JacobianCheckLeftSkew.function(self.A, var_plus)
var_minus = cur_var.copy()
var_minus[var_idx] -= DELTA
f_minus = JacobianCheckLeftSkew.function(self.A, var_minus)
jacobian[:,:,var_idx] = (f_plus - f_minus) / (2 * DELTA)
print('var_idx:', var_idx)
print('j:', jacobian[:, :, var_idx])
return jacobian
def df_dvar_analytic(self):
# f = W * A
# = ((W * A)^T)^T
# = (A^T * W^T)^T
jacobian = np.zeros([3,3,3])
G1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0.],
])
G2 = np.array([
[0, 0, 1],
[0, 0, 0],
[-1, 0, 0.],
])
G3 = np.array([
[0, -1, 0],
[1, 0, 0],
[0, 0, 0.],
])
jacobian[:, :, 0] = (self.A.transpose() @ G1.transpose()).transpose()
jacobian[:, :, 1] = (self.A.transpose() @ G2.transpose()).transpose()
jacobian[:, :, 2] = (self.A.transpose() @ G3.transpose()).transpose()
for var_idx in range(3):
print('var_idx:', var_idx)
print('j:', jacobian[:, :, var_idx])
def main():
print('=================== CHECK 1 ========================')
jc = JacobianCheck()
jc.df_dvar()
jc.df_dvar_analytic()
print('=================== CHECK 1 ========================')
jc_left = JacobianCheckLeftSkew()
jc_left.df_dvar()
jc_left.df_dvar_analytic()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/icp/icp_se3.py | <filename>least_squares/icp/icp_se3.py
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
from copy import deepcopy
import utils
def V_operator(w):
w_skew = utils.skew(w)
theta = np.linalg.norm(w)
if(abs(theta) < 1e-7):
return np.identity(3) + w_skew / 2.
V = np.identity(3) + (1. - cos(theta)) / (theta * theta) * w_skew + \
+ (theta - sin(theta)) / (theta * theta * theta) * w_skew @ w_skew
return V
def se3_exp(se3):
w = se3[:3]
t = se3[3:].reshape([3,1])
SE3_mat = np.identity(4)
SE3_mat[:3, :3] = utils.so3_exp(w)
# I hate numpy
SE3_mat[:3, 3] = (V_operator(w) @ t).flatten()
return SE3_mat
class SE3:
def __init__(self):
self.T = np.identity(4)
def right_add(self, se3):
assert(se3.size == 6)
ret = SE3()
ret.T = self.T @ se3_exp(se3)
return ret
def icp_se3_residual(point_src, point_target, variables_SE3):
R = variables_SE3.T[:3 , :3]
translation = variables_SE3.T[:3, 3].reshape([3,1])
residual =R @ point_src - point_target + translation
# [p1_x, p1_y, p1_z, p2_x, p2_y, p2_z, ...]
residual = residual.flatten('F')
return residual
# Can do a lambda to reduce code length
def compute_se3_jacobian_numurical(point_src, point_target, variables_SE3):
DELTA = 1e-6
num_residuals = point_src.size
num_variables = 6
jacobian = np.zeros([num_residuals, num_variables])
se3 = np.array([0, 0, 0, 0, 0, 0.])
curret_variables = deepcopy(variables_SE3)
for p_idx in range(6):
variables_plus = deepcopy(curret_variables)
delta_vector = se3.copy()
delta_vector[p_idx] += DELTA
variables_plus = variables_plus.right_add(delta_vector)
residual_plus = icp_se3_residual(point_src, point_target, variables_plus)
variables_minus = deepcopy(curret_variables)
delta_vector = se3.copy()
delta_vector[p_idx] -= DELTA
variables_minus = variables_minus.right_add(delta_vector)
residual_minus = icp_se3_residual(point_src, point_target, variables_minus)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
residual_cur_variables = icp_se3_residual(point_src, point_target, curret_variables)
return jacobian, residual_cur_variables
def icp_se3_numirical(point_src, point_target):
variables = SE3()
for iter in range(10):
# Jocobi on so3
jacobi, b = compute_se3_jacobian_numurical(point_src, point_target, variables)
delta = np.linalg.solve(jacobi.transpose() @ jacobi, -jacobi.transpose() @ b)
# Update on SO3
variables = variables.right_add(delta)
#print('jocobian:', jacobi)
#print('b: ', b)
print('iter: ', iter, ' cost:', b.transpose() @ b)
#print('current variables: ', w_so3) |
yimuw/yimu-blog | least_squares/fix_lag_smoothing/profiler.py | <reponame>yimuw/yimu-blog
import time
import numpy as np
from collections import defaultdict
time_map = defaultdict(list)
def time_it(func):
def inner(*args, **kwargs):
time_start = time.time()
res = func(*args, **kwargs)
elapsed_time = time.time() - time_start
key = func.__name__
time_map[key].append(elapsed_time)
return res
return inner
def print_time_map():
for k, v in time_map.items():
print('{}: mean time: {}, # called: {}, max: {}'.format(k, np.mean(v), len(v), max(v))) |
yimuw/yimu-blog | random/visitor/vistor_ref_version/visitor_ref.py | from abc import ABC, abstractmethod
import json
from collections import deque
class RefObj:
def __init__(self, val):
self.val = val
class Traversable(ABC):
def __init__(self):
self.version = RefObj(1)
@abstractmethod
def traverse(self, visitor):
visitor.visit('version', self.version)
class VisitorBase(ABC):
def __init__(self):
pass
@abstractmethod
def on_leaf(self, name, obj):
pass
@abstractmethod
def on_enter_level(self, name):
pass
@abstractmethod
def on_leave_level(self):
pass
@abstractmethod
def on_enter_list(self, name, obj):
pass
@abstractmethod
def on_leave_list(self):
pass
def visit(self, name, obj):
if isinstance(obj, list):
self.on_enter_list(name, obj)
for e in obj:
self.visit(None, e)
self.on_leave_list()
elif isinstance(obj, Traversable):
self.on_enter_level(name)
obj.traverse(self)
self.on_leave_level()
elif isinstance(obj, RefObj):
self.on_leaf(name, obj)
|
yimuw/yimu-blog | random/lyapunov/global_nonlinear_opt_sym.py | # Import packages.
import cvxpy as cp
import numpy as np
import sympy
class GlobalPolynomialOptimization:
def __init__(self):
pass
def coefficient_symbolic_match(self):
x, y, gamma = sympy.symbols('x y gamma')
# f(x, y) = 4 x^2 - 21/10* x^4 + 1/3 x^6 + xy - 4y^2 + 4y^4
f_monomials = [x**2, x**4, x**6, x*y, y**2, y**4]
f_coeffs = [4., -21/10., 1/3., 1., -4., 4.]
# b^T Q b
w = sympy.Matrix([1, x, x**2, x**3, y, y**2, y**3, x*y, x*y*y, x*x*y])
Q = sympy.MatrixSymbol('Q', 10, 10)
V_dot_SOS = (w.T @ Q @ w).as_explicit()
V_dot_SOS_poly = sympy.Poly(V_dot_SOS[0], x, y)
print('V_dot_SOS_poly:', V_dot_SOS_poly)
constraint_list_poly = []
for f_monomial, f_coeff in zip(f_monomials, f_coeffs):
Q_coeff = V_dot_SOS_poly.coeff_monomial(f_monomial)
constrain = '{}=={}'.format(Q_coeff, f_coeff)
print('constrain:', constrain)
constraint_list_poly.append(constrain)
MAX_ORDER = 10
constraint_list_zero = []
for x_order in range(0, MAX_ORDER + 1):
for y_order in range(0, MAX_ORDER + 1):
# skip symmetry. not sure how to do it.
# having duplicate constraints seem ok :)
# skip constant, gamma will do it
if y_order == 0 and x_order == 0:
continue
monomial = x**x_order * y ** y_order
# skip non-zero coef
if monomial in f_monomials:
continue
coeff = V_dot_SOS_poly.coeff_monomial(monomial)
if not coeff is sympy.S.Zero:
constrain = '{} == 0'.format(coeff)
print('constrain:', constrain, 'for coef:',
x**x_order * y ** y_order)
constraint_list_zero.append(constrain)
print('constraint_poly:', ','.join(constraint_list_poly))
print('constraint_zero:', ','.join(constraint_list_zero))
return constraint_list_poly, constraint_list_zero
def solve_sos_as_sdp(self):
num_var_w = 10
Q = cp.Variable((num_var_w, num_var_w), symmetric=True)
gamma = cp.Variable()
# sufficient condition
Epsilon = 0
constraints = [Q >> Epsilon * np.identity(num_var_w)]
constraints += [Q[0, 0] == -gamma]
constraints += [Q[0, 2] + Q[1, 1] + Q[2, 0] == 4.0, Q[1, 3] + Q[2, 2] + Q[3, 1] == -2.1, Q[3, 3] == 0.3333333333333333,
Q[0, 7] + Q[1, 4] + Q[4, 1] + Q[7, 0] == 1.0, Q[0, 5] + Q[4, 4] + Q[5, 0] == -4.0, Q[4, 6] + Q[5, 5] + Q[6, 4] == 4.0]
constraints += [Q[0, 4] + Q[4, 0] == 0, Q[0, 6] + Q[4, 5] + Q[5, 4] + Q[6, 0] == 0, Q[5, 6] + Q[6, 5] == 0, Q[6, 6] == 0, Q[0, 1] + Q[1, 0] == 0, Q[0, 8] + Q[1, 5] + Q[4, 7] + Q[5, 1] + Q[7, 4] + Q[8, 0] == 0, Q[1, 6] + Q[4, 8] + Q[5, 7] + Q[6, 1] + Q[7, 5] + Q[8, 4] == 0, Q[5, 8] + Q[6, 7] + Q[7, 6] + Q[8, 5] == 0, Q[6, 8] + Q[8, 6] == 0, Q[0, 9] + Q[1, 7] + Q[2, 4] + Q[4, 2] + Q[7, 1] + Q[9, 0] == 0, Q[1, 8] + Q[2, 5] + Q[4, 9] + Q[5, 2] + Q[7, 7] + Q[8,
1] + Q[9, 4] == 0, Q[2, 6] + Q[5, 9] + Q[6, 2] + Q[7, 8] + Q[8, 7] + Q[9, 5] == 0, Q[6, 9] + Q[8, 8] + Q[9, 6] == 0, Q[0, 3] + Q[1, 2] + Q[2, 1] + Q[3, 0] == 0, Q[1, 9] + Q[2, 7] + Q[3, 4] + Q[4, 3] + Q[7, 2] + Q[9, 1] == 0, Q[2, 8] + Q[3, 5] + Q[5, 3] + Q[7, 9] + Q[8, 2] + Q[9, 7] == 0, Q[3, 6] + Q[6, 3] + Q[8, 9] + Q[9, 8] == 0, Q[2, 9] + Q[3, 7] + Q[7, 3] + Q[9, 2] == 0, Q[3, 8] + Q[8, 3] + Q[9, 9] == 0, Q[2, 3] + Q[3, 2] == 0, Q[3, 9] + Q[9, 3] == 0]
prob = cp.Problem(cp.Minimize(-gamma),
constraints)
prob.solve(verbose=True)
# Print result.
print("status:", prob.status)
print("The optimal value is", prob.value)
print("The low bound is", gamma.value)
def main():
global_opt = GlobalPolynomialOptimization()
global_opt.coefficient_symbolic_match()
global_opt.solve_sos_as_sdp()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/logistic_regression.py | <filename>deep-learning/tensorflow-from-scratch/logistic_regression.py<gh_stars>1-10
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import numpy as np
import variables_tree_flow as vtf
class MyLogisticRegression:
def fit(self, X, y):
return self.__fit_numpy_impl(X, y)
def __fit_numpy_impl(self, X, y):
assert len(X) == len(y)
# bias
ones = np.ones((X.shape[0], 1))
X = np.hstack([X, ones])
self.data_num = len(X)
self.num_classes = max(y) + 1
self.len_theta = X.shape[1]
theta = np.array([vtf.Variable(value=0., id='t{}'.format(i))
for i in range(self.len_theta * self.num_classes)]).reshape([self.len_theta, self.num_classes])
linear_comb = X @ theta
pred = np.vectorize(vtf.ntf_sigmoid)(linear_comb)
cost = 0
for pred_row, gt_class in zip(pred, y):
for class_idx, pred_class in enumerate(pred_row):
# idx encoding
# multiple logistic regression to handle multiclasses.
if class_idx == gt_class:
cost += - vtf.ntf_log(pred_class)
else:
cost += - vtf.ntf_log(1 - pred_class)
cost = cost * (1 / self.data_num)
core = vtf.NumberFlowCore(cost)
for i in range(5000):
core.forward()
if i % 200 == 0:
print("cost.val:", cost.value, " iter:", i)
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.1)
for var in set(core.varible_nodes):
print(var.id, var.value)
self.coef_ = np.array([[theta.value for theta in row]
for row in theta])
def predict(self, X):
res = X @ self.coef_[:-1, :] + self.coef_[-1, :]
return np.argmax(res, axis=1)
if __name__ == "__main__":
X, y = load_iris(return_X_y=True)
mlg = MyLogisticRegression()
mlg.fit(X, y)
pred = mlg.predict(X)
print('pred:', pred)
print('y_gt:', y)
print('train accuracy:', np.sum((y == pred)) / len(y))
|
yimuw/yimu-blog | least_squares/ddp/ddp_optimization.py | import scipy.linalg as linalg
import numpy as np
from numpy.linalg import inv
import ddp_types
#Dynamic = ddp_types.LinearDynamic
Dynamic = ddp_types.NonlinearDynamic
class QuadraticCost:
def __init__(self, mean, hessian):
self.mean = mean
self.hessian = hessian
def eval(self, x):
dx = x - self.mean
return dx.T @ self.hessian @ dx
def grad(self, x):
dx = x -self.mean
return 2 * self.hessian @ dx
class ControlLaw:
def __init__(self, feedback, constant):
self.feedback = feedback
self.constant = constant
class DDP_optimization_perspective:
def initialize(self):
initial_state = np.array([0.1, 0.1])
num_controls = 10
init_controls = [np.array([0, 0.]) for i in range(num_controls)]
target_state = np.array([2., 2.])
return num_controls, initial_state, init_controls, target_state
def forward_pass(self, num_controls, initial_state, init_controls):
state = initial_state.copy()
forward_pass_states = [state]
for i in range(num_controls):
next_state = Dynamic().f_function(
state, init_controls[i])
forward_pass_states.append(next_state)
state = next_state
return forward_pass_states
def compute_ddp_subproblem_normal_equation(self, marginal_cost, xi_current, ui_current):
SIZE_X = 2
SIZE_U = 2
system_size = SIZE_U + SIZE_X
# varialbe order:
# [lhs ] xi = rhs
# [ ] ui
rhs = np.zeros([system_size, system_size])
lhs = np.zeros(system_size)
# marginal cost: V(xj) = ||xj - mean||^2_w , V(xj = Ai * dxi + Bi * dui + f(xi, ui))
residual_marginal = Dynamic().f_function(xi_current, ui_current) - marginal_cost.mean
Ai = Dynamic().jacobi_wrt_state(xi_current, ui_current)
Bi = Dynamic().jacobi_wrt_controls(xi_current, ui_current)
jacobian_marginal_cost_wrt_to_xiui = np.zeros([SIZE_X, system_size])
jacobian_marginal_cost_wrt_to_xiui[:, 0:2] = Ai
jacobian_marginal_cost_wrt_to_xiui[:, 2:4] = Bi
weight_marginal_cost = marginal_cost.hessian
rhs += jacobian_marginal_cost_wrt_to_xiui.T @ weight_marginal_cost @ jacobian_marginal_cost_wrt_to_xiui
lhs += - jacobian_marginal_cost_wrt_to_xiui.T @ weight_marginal_cost @ residual_marginal
# ||ui + dui||^2
weight_u = 0.5 * 1e-6
rhs[2:4, 2:4] += 2 * weight_u * np.identity(2)
lhs[2:4] += -2 * weight_u * ui_current
return rhs, lhs
def solve_ddp_subproblem(self, marginal_cost, xi_current, ui_current):
rhs, lhs = self.compute_ddp_subproblem_normal_equation(marginal_cost, xi_current, ui_current)
# |A1 A2| xi = b1
# |A3 A4| ui b2
# note: A2 = A3.T
# 1. elminate ui.
# (A1 - A2 * inv(A4) * A3) xi = b1 - A2 * inv(A4) * b2
# 2. Gievn xi, ui is
# A3*xi + A4*ui = b2
# ui = inv(A4)*(b2 - A3*xi) = inv(A4)*b2 - inv(A4)*A3 * xi
A1 = rhs[0:2, 0:2]
A2 = rhs[0:2, 2:4]
A3 = rhs[2:4, 0:2]
A4 = rhs[2:4, 2:4]
b1 = lhs[0:2]
b2 = lhs[2:4]
A4_inv = np.linalg.inv(A4)
rhs_xi = A1 - A2 @ A4_inv @ A3
lhs_xi = b1 - A2 @ A4_inv @ b2
xi_star = np.linalg.solve(rhs_xi, lhs_xi)
# the nonlinear derivation is very very trick! check notes.
xi_marginal_cost = QuadraticCost(mean=xi_star + xi_current, hessian=rhs_xi)
# print('mean:', xi_marginal_cost.mean)
# print('w:', xi_marginal_cost.hessian)
ui_control_law = ControlLaw(constant = A4_inv @ b2, feedback= - A4_inv @ A3)
return xi_marginal_cost, ui_control_law
def backward_pass(self, num_controls, forward_pass_states, init_controls,
final_cost):
marginal_cost = QuadraticCost(final_cost.quad_mean(), final_cost.quad_weight())
feedback_laws = [None] * num_controls
# iterate [n-1, 0] to compute the control law
for i in range(num_controls - 1, -1, -1):
state_i = forward_pass_states[i]
control_i = init_controls[i]
marginal_cost, feedback_law = self.solve_ddp_subproblem(marginal_cost, state_i, control_i)
feedback_laws[i] = feedback_law
return feedback_laws
def apply_control_law(self, num_controls, init_controls, forward_pass_states, feedback_laws):
new_cur_state = forward_pass_states[0].copy()
new_states = [new_cur_state]
new_controls = []
for i in range(num_controls):
feedback_law = feedback_laws[i]
dx = new_cur_state - forward_pass_states[i]
# the argmin_u Q(u, x)
du = feedback_law.constant + feedback_law.feedback @ dx
step = 0.5
control = init_controls[i] + step * du
new_cur_state = Dynamic().f_function(new_cur_state, control)
new_controls.append(control)
new_states.append(new_cur_state)
return new_controls, new_states
def check_dynamic(self, num_controls, states, controls):
state0 = states[0]
integrated_states = self.forward_pass(num_controls, state0, controls)
diff = np.stack(integrated_states) - np.stack(states)
assert np.allclose(np.sum(diff), 0)
# print('integrated_states - ddp_states: ', diff)
def run(self):
num_controls, initial_state, controls, target_state = self.initialize(
)
print('initial_state:', initial_state)
print('target_state:', target_state)
print('num_states:', num_controls + 1)
for iter in range(10):
forward_pass_states = self.forward_pass(num_controls, initial_state,
controls)
# print('forward_pass_states:', forward_pass_states)
final_state = forward_pass_states[-1]
final_state_init_cost = ddp_types.TargetCost(final_state, target_state)
feedback_laws = self.backward_pass(num_controls, forward_pass_states,
controls, final_state_init_cost)
controls, new_states = self.apply_control_law(
num_controls, controls, forward_pass_states, feedback_laws)
final_state_end_cost = ddp_types.TargetCost(
new_states[-1], target_state)
print('final_state_end_cost:', final_state_end_cost.cost())
print('----------------------------------')
print('new_controls:\n', controls)
print('new_states:\n', new_states)
self.check_dynamic(num_controls, new_states, controls)
def main():
ddp = DDP_optimization_perspective()
ddp.run()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/ddp/dirct_eliminate.py | import scipy.linalg as linalg
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import ddp_types
Dynamic = ddp_types.NonlinearDynamic
# Dynamic = ddp_types.LinearDynamic
# cost (u1, u2, u3) = k||u1||^2 + k||u2||^2 + k||u3||^3 + ||x4 - target||^2
# subject to: x_i+1 = A_i * x_i + B_i * u_i
# x_0 = start
class direct_DDP:
def initialize(self):
initial_state = np.array([0., 0.])
num_controls = 3
init_controls = [np.array([1, 1.]) for i in range(num_controls)]
target_state = np.array([2., 2.])
return num_controls, initial_state, init_controls, target_state
def forward_pass(self, num_controls, initial_state, controls):
state = initial_state.copy()
forward_pass_states = [state]
for i in range(num_controls):
next_state = Dynamic().f_function(
state, controls[i])
forward_pass_states.append(next_state)
state = next_state
return forward_pass_states
def almost_ddp(self, num_controls, initial_state, init_controls, target_state):
jacobian, residual = self.using_equality_constraints_for_target_residual(
num_controls, initial_state, init_controls, target_state)
lhs = jacobian.T @ jacobian
rhs = - jacobian.T @ residual
lhs, rhs = self.add_controls_cost(
lhs, rhs, num_controls, init_controls)
if False:
plt.spy(lhs, precision=0.1, markersize=5)
plt.show()
controls_delta = np.linalg.solve(lhs, rhs)
controls_delta = controls_delta.reshape(num_controls, 2)
print('controls_delta,', controls_delta)
STEP_SIZE = 1.
controls_result = [init_controls[i] + STEP_SIZE *
controls_delta[i] for i in range(num_controls)]
return controls_result
def add_controls_cost(self, lhs, rhs, num_controls, init_controls):
# cost for 0.5 * k||u_i||^2
K = 1e-6
for i in range(num_controls):
var_idx = i * 2
lhs[var_idx: var_idx + 2, var_idx: var_idx + 2] += 2 * \
K * np.identity(2)
rhs[var_idx: var_idx + 2, :] += - 2 * \
K * init_controls[i].reshape([2, 1])
return lhs, rhs
def using_equality_constraints_for_target_residual(self, num_controls, initial_state, init_controls, target_state):
SIZE_OF_CONTROL = 2
jacobian = np.zeros([2, num_controls * SIZE_OF_CONTROL])
states = self.forward_pass(num_controls, initial_state, init_controls)
print("predicted states:", states)
mul_A = np.identity(SIZE_OF_CONTROL)
# B_{n-1} * u_{n-1}
state_idx = num_controls - 1
var_idx = SIZE_OF_CONTROL * state_idx
B_last = Dynamic().jacobi_wrt_controls(states[-2], init_controls[-1])
jacobian[:, var_idx:] += B_last
# (prob_{j=i+1}^{n-1} A_j) * B_i * u_i
for state_idx in range(num_controls-2, -1, -1):
var_idx = SIZE_OF_CONTROL * state_idx
A_i = Dynamic().jacobi_wrt_state(
states[state_idx], init_controls[state_idx])
mul_A = mul_A @ A_i
B_i = Dynamic().jacobi_wrt_controls(
states[state_idx], init_controls[state_idx])
jacobian[:, var_idx:var_idx+SIZE_OF_CONTROL] += mul_A @ B_i
# check DDP note
# r = g(u1, u2, u3) - t
# r = g(0,0,0) - t + dgdu * u
# g(0,0,0) = (\prod A_i) * x_0
last_state = states[-1]
residual = (last_state - target_state).reshape([2, 1])
return jacobian, residual
def check_dynamic(self, num_controls, states, controls):
state0 = states[0]
integrated_states = self.forward_pass(num_controls, state0, controls)
diff = np.stack(integrated_states) - np.stack(states)
assert np.allclose(np.sum(diff), 0)
print('integrated_states - ddp_states: ', diff)
def run(self):
num_controls, initial_state, controls, target_state = self.initialize()
for iter in range(10):
controls = self.almost_ddp(
num_controls, initial_state, controls, target_state)
result_states = self.forward_pass(
num_controls, initial_state, controls)
final_state_end_cost = ddp_types.TargetCost(
result_states[-1], target_state)
print('final_state_end_cost:', final_state_end_cost.cost())
print('----------------------------------')
print('final controls:', controls)
print('final states:', result_states)
self.check_dynamic(num_controls, result_states, controls)
def main():
ddp = direct_DDP()
ddp.run()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | random/01_22_2029/ode_simulation.py | import numpy as np
from matplotlib import pyplot as plt
class DiseaseModel:
def __init__(self, infection_para_per_hour):
self.infection_para_per_hour = infection_para_per_hour
self.recover_para_per_hour = 0.005
self.dead_para_per_hour = 0.0005
def dynamic(self, state):
susceptible, infected, immune, dead = state
# 一个被感染者一小时可以感染多少人
infection_rate = self.infection_para_per_hour * susceptible
# 因为以上 self.infection_para_per_hour * susceptible 非常粗糙。
# 比如当susceptible=100000, infection_para_per_hour=0.0001的时候,
# self.infection_para_per_hour * susceptible=100是一个很大的数。
# 也就在一个被感染者一小时可以感染100个人。
# 有点不现实。
# 于是当self.infection_para_per_hour * susceptible太大的时候,
# 让他的最大值为一个定值。
# 实际意义就是一个人一天接触的人有限。
if self.infection_para_per_hour * susceptible > 0.5:
infection_rate = 0.5
new_infected = infection_rate * infected
print('new_infected:', new_infected)
next_susceptible = susceptible - new_infected
next_infected = infected + new_infected \
- self.recover_para_per_hour * infected - self.dead_para_per_hour * infected
next_immune = immune + self.recover_para_per_hour * infected
next_dead = dead + self.dead_para_per_hour * infected
next_state = np.array(
[next_susceptible, next_infected, next_immune, next_dead])
if next_susceptible < 0:
return state.copy()
else:
return next_state
def simulation(init_susceptible, init_infected, infection_para_per_hour, total_hours):
init_recover = 0
init_dead = 0
init_state = np.array(
[init_susceptible, init_infected, init_recover, init_dead])
disease_model = DiseaseModel(infection_para_per_hour)
all_states = [init_state]
cur_state = init_state
for i in range(total_hours):
next_state = disease_model.dynamic(cur_state)
all_states.append(next_state)
cur_state = next_state
all_states = np.vstack(all_states)
print('final state: susceptible, infected, immune, dead', cur_state)
time = np.linspace(0, total_hours / 24, total_hours + 1)
plt.plot(time, all_states[:, 0], "r", label=u"Susceptible")
plt.plot(time, all_states[:, 1], "g", label=u"Infective")
plt.plot(time, all_states[:, 2], "b", label=u"Immune")
plt.plot(time, all_states[:, 3], "m", label=u"Dead")
plt.title('disease model')
plt.xlabel('time(days)')
plt.ylabel('X10000 people')
plt.title('init-state: infection para: {}, (susceptible:{:.2f}, infective:{:.2f})\n\
end-state: susceptible:{:.2f}, infective:{:.2f}, covered:{:.2f}, dead:{:.2f}'.format(
infection_para_per_hour, init_susceptible, init_infected, \
cur_state[0], cur_state[1], cur_state[2], cur_state[3]))
plt.legend()
plt.show()
if __name__ == "__main__":
simulation(init_susceptible=1000, init_infected=0.1,
infection_para_per_hour=0.0001, total_hours=24 * 60)
simulation(init_susceptible=1000, init_infected=0.1,
infection_para_per_hour=0.00001, total_hours=24 * 180)
simulation(init_susceptible=1000, init_infected=0.1,
infection_para_per_hour=0.000005, total_hours=24 * 180)
|
yimuw/yimu-blog | random/visitor/vistor_ref_version/binary_visitor.py | from abc import ABC, abstractmethod
import json
from collections import deque
import visitor_ref
RefObj = visitor_ref.RefObj
class BinDumper(visitor_ref.VisitorBase):
def __init__(self):
self.result = ''
self.seperator = '|'
def on_leaf(self, name, obj):
obj_str = str(obj.val) if not isinstance(
obj.val, str) else '"{}"'.format(obj.val)
self.result += obj_str + self.seperator
def on_enter_level(self, name):
pass
def on_leave_level(self):
pass
def on_enter_list(self, name, obj):
# save the length of the list
self.result += str(len(obj)) + self.seperator
def on_leave_list(self):
pass
class BinLoader(visitor_ref.VisitorBase):
def __init__(self, dumped):
self.seperator = '|'
self.dumped = deque(dumped.split(self.seperator))
def on_leaf(self, name, obj):
value_str = self.dumped.popleft()
if value_str[0] == '"':
obj.val = value_str.strip('"')
else:
obj.val = float(value_str)
def on_enter_level(self, name):
pass
def on_leave_level(self):
pass
def on_enter_list(self, name, obj):
list_size = int(self.dumped.popleft())
# need to operate on the list object
obj.clear()
for _ in range(list_size):
obj.append(visitor_ref.RefObj(None))
def on_leave_list(self):
pass
|
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/utils.py | import variables_tree_flow
def traverse_tree(root, level=0):
import json
def helper(root):
data = {}
if isinstance(root, variables_tree_flow.Variable):
data[root.id] = {'val': root.value, 'grad': root.grad,
'children': [helper(c) for c in root.children]}
else:
data[root.id] = {'children': [helper(c) for c in root.children]}
return data
data = helper(root)
print(json.dumps(data, indent=2))
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing/main.py | import scipy.linalg as linalg
import numpy as np
import fix_lag_types as t
import batch_optimizer
import fix_lag_smoother
import profiler
def simulate_data(num_states):
# movement = x2 - x1
movement = np.array([2., 1.])
states_gt = [t.State(i * movement) for i in range(num_states)]
distrance_measurements = [t.DistanceBetweenStates(state1_index=i, state2_index=i + 1,
distance=movement + 5 * np.random.rand()) for i in range(num_states - 1)]
state_guess = [t.State(movement) for i in range(num_states)]
return states_gt, distrance_measurements, state_guess
def fix_lag_smoothing_demo():
NUM_STATES = 20
states_gt, distrance_measurements, state_guess = simulate_data(NUM_STATES)
batch_optimization = batch_optimizer.BatchOptimization(
state_guess, distrance_measurements)
x = batch_optimization.optimize()
batch_result = x.reshape(NUM_STATES, 2)
print('batch :\n', x.reshape(NUM_STATES, 2))
fix_lag = fix_lag_smoother.FixLagSmoother(states_gt[0])
for distance in distrance_measurements:
fix_lag.optimize_for_new_measurement(distance)
fixed_lag_result = fix_lag.get_all_states()
print('fixed lag :\n', fixed_lag_result)
print('diff :\n', fixed_lag_result - batch_result)
profiler.print_time_map()
if __name__ == "__main__":
fix_lag_smoothing_demo()
|
yimuw/yimu-blog | least_squares/pca/pca_nd.py | # from scipy.linalg import logm, expm
from math import cos, pi, sin
from scipy.linalg import expm
import matplotlib.pyplot as plt
import numpy as np
"""
Problem
cost(R, P) = || D - take_first_col(R) * P ||^2
=>
cost(w, P) = || D - take_first_col(R @ exp(W)) * P ||
cost(w, P) = || D - R @ take_first_col(exp(W)) * P ||
First order approximation, exp(W) = I + W
cost(W, P) = ||D - R @ take_first_col(I + W) * P||
Only need to solve for n-1 parameters
cost(W_c1, P) = ||D - R @ ([1,w1,w2,w3,..]) * P||
=> get W_c1
update: R = R * exp([W_c1,0,..])
"""
DIM = 10
def generate_data(dim, num_point = 500):
mean = np.zeros(dim)
# exp of skew symatric mat is SO(N)
Mat = np.random.rand(dim, dim)
W = Mat.transpose() - Mat
R = expm(W)
np.testing.assert_almost_equal(R.transpose() @ R, np.identity(dim))
# Transformation for covariance
principal_axis = 0.01 * np.ones(dim)
principal_axis[0] = 1.
principal_axis[1] = 0.3
principal_axis[2] = 0.2
cov = R @ np.diag(principal_axis) @ R.transpose()
points = np.random.multivariate_normal(mean, cov, size=num_point)
# For simplicity
points_mean = np.mean(points, axis=0)
points = points - points_mean
return points.transpose()
def take_first_cols(R):
return R[:, :1]
class PCAHighDimFirstPrincipleComponent:
"""
https://stats.stackexchange.com/questions/10251/what-is-the-objective-function-of-pca
"""
def __init__(self, points, dim):
self.num_data = points.shape[1]
self.num_residuals = points.size
points_mean = np.mean(points, axis=1)
self.points = (points.transpose() - points_mean).transpose()
self.dim = dim
self.var_SO = np.identity(dim)
self.var_projection = np.zeros([1, self.num_data])
def residaul(self, var_SO, var_projection):
"""
r_i = p_i - first_k_cols(R) * w
"""
# self.point.shape == (3, n)
r = self.points - take_first_cols(var_SO) @ var_projection
# make r col major
return r.transpose().flatten()
def compute_projection(self):
self.var_projection = take_first_cols(self.var_SO).transpose() @ self.points
def hat_local_so_first_col(self, local_params):
"""
Take SO3 as example
W = [[0 -w1 -w2],
[w1 0 -w3],
[w2 w3 0 ]]
local_params = [w1, w2]
"""
W = np.zeros([self.dim, self.dim])
for i in range(1, self.dim):
W[i, 0] = local_params[i - 1]
W[0, i] = -local_params[i - 1]
return W
def local_so_first_col_to_SO(self, local_params):
W = self.hat_local_so_first_col(local_params)
return expm(W)
def numerical_jacobi_wrt_first_col(self):
"""
r_i = p_i - first_k_cols(R) * w
"""
DELTA = 1e-8
num_local_variables = self.dim - 1
jacobian = np.zeros([self.num_residuals, num_local_variables])
w_so_local_first_col = np.zeros(num_local_variables)
curret_params = w_so_local_first_col.copy()
for p_idx in range(num_local_variables):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = self.residaul(self.var_SO @ self.local_so_first_col_to_SO(params_plus), self.var_projection)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = self.residaul(self.var_SO @ self.local_so_first_col_to_SO(params_minus), self.var_projection)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
return jacobian
def jacobi_wrt_so(self):
"""
The derivation is here,
r_i(w) = p_i - first_k_cols(R) * proj
= p_i - first_k_cols(R * exp(w)) * proj
(3,1) (3, k) (k, 1)
let f3 = - first_k_cols(R * exp(w)) * proj
f2 = first_k_cols(R * exp(w))
f3 = - f2 * proj
f3 = - (proj.T * f2.T).T
df3/ dw = - (proj.T * f2dw.T).T, (3, 3)
(1,k) ((k,3), 3) = (( 3*1), 3)
let f2 = first_k_cols(R * exp(w))
f1 = R * exp(w)
df2/dw = df2/df1 * df1/dw, ((3,k), 3)
df2/df1 = [1 when taking the element, 0 otherwise], ((3,k), (3,3))
df1/dw = [R * G1 , R * G2, R * G3], ((3,3), 3)
"""
num_variables = self.dim - 1
f2_t = np.zeros([self.dim,num_variables])
for var_idx in range(num_variables):
f2_t[:, var_idx] = self.var_SO[:, var_idx + 1]
f2_t = f2_t.reshape(1, self.dim, num_variables)
jacobi_all_points = np.zeros([self.num_data, self.dim, num_variables])
for i in range(num_variables):
# ((n,3), 3) = (n, k) @ ((k, 3), 3)
jacobi_all_points[:, :, i] = - (self.var_projection.transpose() @ f2_t[:,:, i])
jacobi = jacobi_all_points.reshape(self.num_data * self.dim, num_variables)
return jacobi
def solve_normal_equation_and_update_wrt_so(self):
"""
"""
jacobi = self.jacobi_wrt_so()
if False:
jacobi_nu = self.numerical_jacobi_wrt_first_col()
print('jacobi checking:', jacobi_nu - jacobi)
# print('jacobian', jacobi)
r = self.residaul(self.var_SO, self.var_projection)
rhs = jacobi.transpose() @ jacobi
lhs = - jacobi.transpose() @ r
# print('rhs:', rhs)
# print('lhs:', lhs)
delta = np.linalg.solve(rhs, lhs)
self.var_SO = self.var_SO @ self.local_so_first_col_to_SO(delta)
return delta
def cost(self):
r = self.residaul(self.var_SO, self.var_projection)
return r.transpose() @ r
def print_variable(self):
print('cost:', self.cost())
np.testing.assert_almost_equal(self.var_SO.transpose() @ self.var_SO, np.identity(self.dim))
print('Principal vector:', take_first_cols(self.var_SO).transpose())
p = self.var_projection[0, :]
print(p.transpose() @ p / self.num_data)
def minimize(self):
for iter in range(10):
print('iter:', iter)
self.print_variable()
self.compute_projection()
delta = self.solve_normal_equation_and_update_wrt_so()
if np.linalg.norm(delta) < 1e-4:
break
def point_statis(points):
cov_stats = points @ points.transpose() / points.shape[1]
e, v = np.linalg.eig(cov_stats)
idx = np.argsort(e)[::-1]
e = e[idx]
v = v[:,idx]
print('largest e(cov_stats):', e[0])
print('largest v(cov_stats):', v[:, 0].transpose())
def main():
dim = 100
points = generate_data(dim)
pca = PCAHighDimFirstPrincipleComponent(points, dim)
print("pca.cost():", pca.cost())
print("pca jocibian:", pca.numerical_jacobi_wrt_first_col())
pca.minimize()
point_statis(points)
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | random/lyapunov/lyapunov_polynomial_simple.py | <filename>random/lyapunov/lyapunov_polynomial_simple.py
# Import packages.
import cvxpy as cp
import numpy as np
import sympy
# linear case
# given a polynomial dynamic system dx = - 0.5 y
# dy = - 0.5 x
# Find a quadratic Lyapunov function of z, V(z) = z^T Q z
# where z is polynomial basis of (x,y). z = [x, x^2, y, y^2]
#
# We can express [dx, dy] in the polynomial basis
# dxy = A z
#
# want: V(z) > 0 => Q >> 0
#
# dot V(x) < 0 =>
# dot V(x) = dVdz * dzdx * dxdt
# dVdz = 2 z^T Q
# dzdx = [1, 2x, 0, 0] ^T = B
# [0, 0 , 1, 2y]
# dxdt is the fx
# 2 z^T Q B A z < 0 => - z^T Q B A z is S.O.S
class LinearSystemLyapunov:
def __init__(self):
pass
def polynomial_arrangement(self):
x, y = sympy.symbols('x y')
z = sympy.Matrix([[x, y]]).transpose()
Q = sympy.MatrixSymbol('Q', 2, 2)
# fx = sympy.Matrix([
# [-y - 3/2*x**2 - 1/2*x**3],
# [3*x - y],
# ])
fx = sympy.Matrix([
[-0.9 * x],
[-0.5 * y],
])
V = (z.T @ Q @ z).as_explicit()
V_poly = sympy.Poly(V[0], x, y)
print('V_poly:', V_poly)
V_dot = (- 2 * z.T @ Q @ fx).as_explicit()
V_dot_poly = sympy.Poly(V_dot[0], x, y)
print('V_dot_poly:', V_dot_poly)
w = sympy.Matrix([x, y])
G = sympy.MatrixSymbol('G', 2, 2)
SOS_of_V_dot = (w.T @ G @ w).as_explicit()
SOS_of_V_dot_poly = sympy.Poly(SOS_of_V_dot[0], x, y)
# print('SOS_of_V_dot_poly:', SOS_of_V_dot_poly)
constraint_list = []
for max_order in range(10):
for x_order in range(0, max_order + 1):
y_order = max_order - x_order
monomial = x ** x_order * y ** y_order
SOS_coeff = SOS_of_V_dot_poly.coeff_monomial(
monomial)
if SOS_coeff is sympy.S.Zero:
continue
V_dot_coeff = V_dot_poly.coeff_monomial(monomial)
if V_dot_coeff is not sympy.S.Zero:
constrain = '{}=={}'.format(V_dot_coeff, SOS_coeff)
print('constrain:', constrain, " of ", monomial)
constraint_list.append(constrain)
else:
constrain = '{}==0.'.format(SOS_coeff)
print('constrain:', constrain, " of ", monomial)
constraint_list.append(constrain)
print('Constraints (copy this!):', ','.join(constraint_list))
def solve_sos_as_sdp(self):
num_var_q = 2
Q = cp.Variable((num_var_q, num_var_q), symmetric=True)
# assuming w is [1, x, x^2, x^3, y, y^2, y^3]
num_var_w = 2
G = cp.Variable((num_var_w, num_var_w), symmetric=True)
# Q.value = np.identity(num_var)
# sufficient condition
Epsilon = 1e-4
constraints = [Q >> Epsilon * np.identity(num_var_q)]
constraints += [G >> Epsilon * np.identity(num_var_w)]
constraints += [1.0*Q[1, 1]==G[1, 1],1.0*Q[0, 1] + 1.8*Q[1, 0]==G[0, 1] + G[1, 0],1.8*Q[0, 0]==G[0, 0]]
prob = cp.Problem(cp.Maximize(1),
constraints)
prob.solve(verbose=False)
# Print result.
print("status:", prob.status)
print("The optimal value is", prob.value)
print("A solution Q is")
print(Q.value)
def draw_dynamic():
def main():
lyapunov = LinearSystemLyapunov()
lyapunov.polynomial_arrangement()
lyapunov.solve_sos_as_sdp()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/kalman/kalman_least_sqr.py | import math
import matplotlib.pyplot as plt
import numpy as np
from model import *
class KalmanLeastSqaures:
"""
cost(x) = ||F^-1 x - x_pre||^2_{FWF^T + Q}^{-1} + ||z - Hx||^2_R^{-1}
"""
def __init__(self, prior_state, prior_cov, max_iter = 10):
self.state = prior_state
self.cov = prior_cov
self.max_iter = max_iter
def filter(self, measurement):
prior_state = self.state
prior_cov = self.cov
self.least_sqr_optimization(prior_state, prior_cov, measurement)
def compute_jacobian_and_residual(self, current_varialbes,
prior_state, prior_cov, measurement):
model = get_model()
num_state = model.NUM_STATES
num_variables = num_state
num_measurements = model.NUM_OBSERVATION
num_prior_equations = model.NUM_STATES
num_observation_equations = model.NUM_OBSERVATION
num_equations = num_prior_equations + num_observation_equations
jacobian = np.zeros([num_equations, num_variables])
weight = np.zeros([num_equations, num_equations])
residual = np.zeros([num_equations, 1]).squeeze()
n = 0
f_jacobian_prev = model.f_jacobian(prior_state)
# prior
jacobian[n: num_prior_equations, :] = np.identity(num_state)
residual[n:num_prior_equations] = current_varialbes - model.f(prior_state)
weight[n:num_prior_equations, n:num_prior_equations] \
= np.linalg.inv(f_jacobian_prev @ prior_cov @ f_jacobian_prev.T + model.f_cov())
n += num_prior_equations
# observation
jacobian[n: n + num_observation_equations, :] = model.h_jacobian(current_varialbes)
residual[n: n + num_observation_equations] = model.h(current_varialbes) - measurement
weight[n: n + num_observation_equations, n: n + num_observation_equations] = model.h_weight()
n += num_observation_equations
assert n == num_equations
return jacobian, residual, weight
def construct_normal_equation(self, jacobian, residual, weight):
J_transpose_dot_weight = np.dot(jacobian.T, weight)
norm_equation_left = np.dot(J_transpose_dot_weight, jacobian)
norm_equation_right = - np.dot(J_transpose_dot_weight, residual)
return norm_equation_left, norm_equation_right
def least_sqr_optimization(self, prior_state, prior_cov, measurement):
model = get_model()
variables = model.f(prior_state)
norm_equation_left = prior_cov.copy()
for iter in range(self.max_iter):
jacobian, residual, weight \
= self.compute_jacobian_and_residual(variables, prior_state, prior_cov, measurement)
norm_equation_left, norm_equation_right \
= self.construct_normal_equation(jacobian, residual, weight)
dx = np.linalg.solve(norm_equation_left, norm_equation_right)
LAMBDA = 1.
variables += LAMBDA * dx
# if np.linalg.norm(dx) < 1e-8:
# break
self.state = variables
self.cov = np.linalg.inv(norm_equation_left)
def run_kalman_least_sqr(gt_states, gt_measurements, max_iter = 10):
prior_state = np.array([0.5, 0.5, 0, 0, 0])
prior_cov = np.diag([1, 1, 1, 1., 1.])
kalman_p_filter = KalmanLeastSqaures(prior_state, prior_cov, max_iter)
estimated_states = []
estimated_cov = []
for i, m in enumerate(gt_measurements):
kalman_p_filter.filter(m)
estimated_states.append(kalman_p_filter.state)
estimated_cov.append(kalman_p_filter.cov)
result_comparison(gt_states, estimated_states, estimated_cov,
'kalman least sqaures, iter:' + str(max_iter))
|
yimuw/yimu-blog | random/gimbal_lock/gimbal_lock.py | import numpy as np
from math import cos, sin
from scipy.linalg import logm, expm
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def rotate_lock(a):
r = np.array([
[0, 0, 1],
[sin(a), cos(a), 0],
[-cos(a), sin(a), 0],
])
return r
def rotate_x(a):
r = np.array([
[cos(a), -sin(a), 0],
[sin(a), cos(a), 0],
[0, 0, 1],
])
return r
def solve_axis_svd(R):
A = R - np.identity(3)
u, s, v = np.linalg.svd(A)
n = v[2, :].T
direction = np.array([1, 0, 0.])
if n @ direction < 0:
n = - n
return n
def solve_axis_log(R):
w_skew = logm(R)
w1 = w_skew[2, 1]
w2 = w_skew[0, 2]
w3 = w_skew[1, 0]
return np.array([w1, w2, w3])
def try_plot(R, fig):
fig.clf()
ax1 = fig.add_subplot(111, projection='3d')
# ax1.view_init(1, 1)
ax1.set_xlabel('X')
ax1.set_xlim(-1, 1)
ax1.set_ylabel('Y')
ax1.set_ylim(-1, 1)
ax1.set_zlabel('Z')
ax1.set_zlim(-1, 1)
# Here we create the arrows:
arrow_prop_dict = dict(
mutation_scale=20, arrowstyle='->', shrinkA=0, shrinkB=0)
x1, y1, z1 = R[:, 0]
a = Arrow3D([0, x1], [0, y1], [0, z1], **arrow_prop_dict, color='r')
ax1.add_artist(a)
x2, y2, z2 = R[:, 1]
a = Arrow3D([0, x2], [0, y2], [0, z2], **arrow_prop_dict, color='b')
ax1.add_artist(a)
x3, y3, z3 = R[:, 2]
a = Arrow3D([0, x3], [0, y3], [0, z3], **arrow_prop_dict, color='g')
ax1.add_artist(a)
# Give them a name:
ax1.text(0.0, 0.0, -0.1, r'$0$')
ax1.text(x1, y1, z1, r'$x-new$')
ax1.text(x2, y2, z2, r'$y-new$')
ax1.text(x3, y3, z3, r'$z-new$')
# ref
a = Arrow3D([0, 1], [0, 0], [0, 0], **
arrow_prop_dict, color='r', alpha=0.3)
ax1.add_artist(a)
a = Arrow3D([0, 0], [0, 1], [0, 0], **
arrow_prop_dict, color='b', alpha=0.3)
ax1.add_artist(a)
a = Arrow3D([0, 0], [0, 0], [0, 1], **
arrow_prop_dict, color='g', alpha=0.3)
ax1.add_artist(a)
# Give them a name:
ax1.text(1.1, 0, 0, r'$x-ref$')
ax1.text(0, 1.1, 0, r'$y-ref$')
ax1.text(0, 0, 1.1, r'$z-ref$')
# rotation_axis = solve_axis_log(R)
rotation_axis = solve_axis_svd(R)
w1, w2, w3 = rotation_axis
a = Arrow3D([0, w1], [0, w2], [0, w3], **
arrow_prop_dict, color='m', alpha=0.5)
ax1.text(w1, w2, w3, r'$w$')
ax1.add_artist(a)
# plt.show()
def main():
fig = plt.figure()
for i, a in enumerate(np.linspace(0, 2 * np.pi, 60)):
R = rotate_x(a)
R = rotate_lock(a)
try_plot(R, fig)
plt.pause(0.1)
plt.savefig('res/{0:03d}.png'.format(i))
# plt.show()
# solve_axis()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/pca/pca_3d.py | <gh_stars>1-10
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
import utils
def generate_point_cloud():
mean = np.array([0, 0, 0.])
# R = utils.euler_angle_to_rotation(0.4, -1., 6.6666)
R = utils.euler_angle_to_rotation(0.3, 0.5, 0.0)
# Transformation for covariance
cov = R @ np.diag([0.001, 1, 9.]) @ R.transpose()
num_point = 100
points = np.random.multivariate_normal(mean, cov, size=num_point)
points_mean = np.mean(points, axis=0)
print('points_mean:', points_mean)
points = points - points_mean
np.testing.assert_almost_equal(R.transpose() @ R, np.identity(3))
print('R_gt: ', R)
print('cov_gt:', cov)
print('numpy.linalg.eig(cov):', np.linalg.eig(cov))
return points.transpose()
def take_n_cols(R, n):
return R[:, :n]
class PCA_SO3_projection_minimization:
"""
https://stats.stackexchange.com/questions/10251/what-is-the-objective-function-of-pca
"""
def __init__(self, points):
self.num_data = points.shape[1]
self.num_residuals = points.size
points_mean = np.mean(points, axis=1)
self.points = (points.transpose() - points_mean).transpose()
self.points_covariance = points @ points.transpose() / self.num_data
# print(self.points_covariance)
self.var_SO3 = np.identity(3)
self.rank = 1
self.var_projection = np.zeros([self.rank, self.num_data])
def residaul(self, var_SO3, var_projection):
"""
r_i = p_i - first_k_cols(R) * w
"""
# self.point.shape == (3, n)
r = self.points - take_n_cols(var_SO3, self.rank) @ var_projection
# make r col major
return r.transpose().flatten()
def compute_projection(self):
self.var_projection = take_n_cols(self.var_SO3, self.rank).transpose() @ self.points
def numerical_jacobi_wrt_so3(self):
"""
r_i = p_i - first_k_cols(R) * w
"""
DELTA = 1e-8
jacobian = np.zeros([self.num_residuals, 3])
w_so3_local = np.array([0, 0, 0.])
curret_params = w_so3_local.copy()
for p_idx in range(3):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = self.residaul(self.var_SO3 @ utils.so3_exp(params_plus), self.var_projection)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = self.residaul(self.var_SO3 @ utils.so3_exp(params_minus), self.var_projection)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
return jacobian
def jacobi_wrt_so3(self):
"""
The derivation is here,
r_i(w) = p_i - first_k_cols(R) * proj
= p_i - first_k_cols(R * exp(w)) * proj
(3,1) (3, k) (k, 1)
let f3 = - first_k_cols(R * exp(w)) * proj
f2 = first_k_cols(R * exp(w))
f3 = - f2 * proj
f3 = - (proj.T * f2.T).T
df3/ dw = - (proj.T * f2dw.T).T, (3, 3)
(1,k) ((k,3), 3) = (( 3*1), 3)
let f2 = first_k_cols(R * exp(w))
f1 = R * exp(w)
df2/dw = df2/df1 * df1/dw, ((3,k), 3)
df2/df1 = [1 when taking the element, 0 otherwise], ((3,k), (3,3))
df1/dw = [R * G1 , R * G2, R * G3], ((3,3), 3)
"""
G1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0.],
])
G2 = np.array([
[0, 0, 1],
[0, 0, 0],
[-1, 0, 0.],
])
G3 = np.array([
[0, -1, 0],
[1, 0, 0],
[0, 0, 0.],
])
f1 = np.zeros([3,3,3])
f1[:, :, 0] = self.var_SO3 @ G1
f1[:, :, 1] = self.var_SO3 @ G2
f1[:, :, 2] = self.var_SO3 @ G3
# ((k, 3), 3)
f2_t = np.zeros([self.rank, 3, 3])
for i in range(3):
f2_t[:, :, i] = f1[:, :self.rank, i].transpose()
jacobi_all_points = np.zeros([self.num_data, 3, 3])
for i in range(3):
# ((n,3), 3) = (n, k) @ ((k, 3), 3)
jacobi_all_points[:, :, i] = - (self.var_projection.transpose() @ f2_t[:, :, i])
jacobi = jacobi_all_points.reshape(self.num_data * 3, 3)
return jacobi
def solve_normal_equation_and_update_wrt_so3(self):
"""
"""
jacobi = self.jacobi_wrt_so3()
# jacobi = self.numerical_jacobi_wrt_so3()
# print('jacobian', jacobi)
r = self.residaul(self.var_SO3, self.var_projection)
# rhs is invertable when rank == 1
regulization = 1e-6
rhs = jacobi.transpose() @ jacobi + regulization * np.identity(3)
lhs = - jacobi.transpose() @ r
# print('rhs:', rhs)
# print('lhs:', lhs)
delta_so3 = np.linalg.solve(rhs, lhs)
print('delta_so3:', delta_so3)
self.var_SO3 = self.var_SO3 @ utils.so3_exp(delta_so3)
def cost(self):
r = self.residaul(self.var_SO3, self.var_projection)
return r.transpose() @ r
def print_variable(self):
print('cost:', self.cost())
np.testing.assert_almost_equal(self.var_SO3.transpose() @ self.var_SO3, np.identity(3))
print('R_k:', take_n_cols(self.var_SO3, self.rank).transpose())
for i in range(self.rank):
p = self.var_projection[i, :]
print(p.transpose() @ p / self.num_data)
def minimize(self):
for iter in range(10):
if self.cost() < 1e-8:
break
self.print_variable()
self.compute_projection()
self.solve_normal_equation_and_update_wrt_so3()
np.testing.assert_almost_equal(self.var_SO3.transpose() @ self.var_SO3, np.identity(3))
def main():
points = generate_point_cloud()
# pca = PCA_SO3_Covariance(points)
# print("pca.cost():", pca.cost())
# print("pca jocibian:", pca.numerical_jacobi())
# pca.minimize()
pca_2 = PCA_SO3_projection_minimization(points)
pca_2.compute_projection()
# pca_2.var_projection = np.ones([1, 1])
# j1 = pca_2.jacobi_wrt_so3()
# j2 =pca_2.numerical_jacobi_wrt_so3()
# print('j1:', j1)
# print('j2:', j2)
# 1 / 0
print("pca_2.cost()", pca_2.cost())
pca_2.minimize()
print('finial so3:', pca_2.var_SO3)
cov_stats = points @ points.transpose() / points.shape[1]
# print('cov_stats:', cov_stats)
e, v = np.linalg.eig(cov_stats)
idx = np.argsort(e)[::-1]
e = e[idx]
v = v[:,idx]
print('e(cov_stats):', e)
print('v(cov_stats):', v)
pca_using_eig = PCA_SO3_projection_minimization(points)
pca_using_eig.var_SO3 = v
pca_using_eig.compute_projection()
pca_using_eig.print_variable()
print("pca_using_eig.cost() by svd:", pca_using_eig.cost())
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/kalman/batch_least_sqr.py | <reponame>yimuw/yimu-blog
import math
import matplotlib.pyplot as plt
import numpy as np
from model import *
class BatchLeastSqaures:
"""
cost(x1, x2, ..., xn) = sum ||x - f(x-1)||^2_{f_weight} + sum ||z - h(x)||^2_weight
Note: it is not the equality contrainted version
"""
def __init__(self, prior_state=None, prior_cov=None):
"""
technically, only prior is need for graph based filter
:param prior_state:
:param prior_cov:
"""
self.prior_state = prior_state
@staticmethod
def get_state_i(varibles_vector, idx):
model = get_model()
size_state = model.NUM_STATES
return varibles_vector[idx * size_state: (idx + 1) * size_state]
def construct_linear_system(self, current_variables, measurements):
model = get_model()
# size of a state.
size_state = model.NUM_STATES
# number of nodes (states) in a graph
num_nodes = len(measurements)
# total number of variables for all nodes
num_varibles = size_state * num_nodes
# size of a measurement vector
size_measurement = model.NUM_OBSERVATION
# total number of measurements
num_measurements = size_measurement * len(measurements)
# there is a kinematic link between 2 measurements. So number is n -1
# It is a full constrain for all state since kinematics model is given
odometry_equ_size = size_state
num_odometry_equations = (num_nodes - 1) * odometry_equ_size
observation_equ_size = size_measurement
num_observation_equations = len(measurements) * observation_equ_size
# For asserts
num_equations = num_odometry_equations + num_observation_equations
# Jacobian. row is number of equations (residuals). Cols is the number of variables
J = np.zeros([num_equations, num_varibles])
# Weight. weight for residuals. Typically very sparse or diagonal.
W = np.zeros([num_equations, num_equations])
# Residual of the least sqr system
r = np.zeros([num_equations, 1]).squeeze()
# keep track of equations, a better implementation is associated a index for every factor.
n = 0
# update odometry equations
for nidx in range(len(measurements) - 1):
this_node_varible_idx = nidx * size_state
next_node_varible_idx = (nidx + 1) * size_state
assert nidx + 1 < len(measurements)
this_state = self.get_state_i(current_variables, nidx)
next_staet = self.get_state_i(current_variables, nidx + 1)
# residual is "r = f(x1) - x2" => derivatives are "dr/dx1 = df(x1)", "dr/dx2 = -I"
# model constrain
# df / dx1
J[n:n + odometry_equ_size, this_node_varible_idx:this_node_varible_idx + size_state] \
= model.f_jacobian(this_state)
# df / dx2
J[n:n + odometry_equ_size, next_node_varible_idx: next_node_varible_idx + size_state] \
= -np.identity(size_state)
r[n:n + odometry_equ_size] = model.f(this_state) - next_staet
W[n:n + odometry_equ_size, n:n + odometry_equ_size] = model.f_weight()
n += odometry_equ_size
assert n == num_odometry_equations
for midx, measurement in enumerate(measurements):
this_state = self.get_state_i(current_variables, midx)
variable_idx = midx * size_state
# observation
J[n: n + observation_equ_size, variable_idx: variable_idx + size_state] += model.h_jacobian(this_state)
r[n: n + observation_equ_size] = model.h(this_state) - measurement
W[n: n + observation_equ_size, n: n + observation_equ_size] = model.h_weight()
n += observation_equ_size
assert n == num_equations
J_transpose_dot_weight = np.dot(J.T, W)
A = np.dot(J_transpose_dot_weight, J)
b = np.dot(J_transpose_dot_weight, r)
SPY_MATRIX = False
if SPY_MATRIX:
plt.spy(J, precision=1e-2)
plt.title('J')
plt.figure()
plt.spy(A, precision=1e-2)
plt.title('A.T * A')
plt.figure()
plt.spy(W)
plt.title('Weight')
plt.show()
return A, b
def init_variables(self, num_nodes):
model = get_model()
# total number of variables for all nodes
state_size = model.NUM_STATES
num_variables = state_size * num_nodes
variables = np.zeros([num_variables, 1]).squeeze()
state_pred = self.prior_state
for i in range(num_nodes):
var_idx = i * state_size
state_pred = model.f(state_pred)
variables[var_idx: var_idx + state_size] = state_pred
return variables
def filter(self, measurements):
"""
Assume there is a kinematic link between 2 measurements
:param measurements:
:return:
"""
variables = self.init_variables(len(measurements))
hessian = None
for newton_iter in range(50):
# print(('newton iter :', newton_iter))
A, b = self.construct_linear_system(variables, measurements)
dx = np.linalg.solve(A, b)
LAMBDA = 1
variables -= LAMBDA * dx
# by construction, A is the hessian for least sqr problem
hessian = A
if np.linalg.norm(dx) < 1e-8:
break
optimized_states = variables
full_covariance = np.linalg.inv(hessian)
return self.format_to_states_and_covs(optimized_states, full_covariance)
def format_to_states_and_covs(self, optimized_states, full_covariance):
model = get_model()
state_size = model.NUM_STATES
num_nodes = int(optimized_states.shape[0] / state_size)
states = []
covs = []
for i in range(num_nodes):
this_state = self.get_state_i(optimized_states, i)
states.append(this_state)
index_start = i * state_size
cov = full_covariance[index_start: index_start + state_size, index_start: index_start + state_size]
covs.append(cov)
return states, covs
def run_batch_least_sqr(gt_states, gt_measurements):
prior_state = np.array([0.5, 0.5, 0.1, 0, 0])
# prior_state = init_state
prior_cov = np.diag([1, 1, 1, 1., 1.])
graph_filter = BatchLeastSqaures(prior_state, prior_cov)
estimated_states, estimated_cov = graph_filter.filter(gt_measurements)
result_comparison(gt_states, estimated_states, estimated_cov, 'Graph Optimization')
|
yimuw/yimu-blog | least_squares/ceres-from-scratch/icp_se3.py | <gh_stars>1-10
from number_forward_flow import *
def euler_angle_to_rotation(yaw, pitch, roll):
from math import cos, sin
Rz = np.array([
[cos(yaw), -sin(yaw), 0.],
[sin(yaw), cos(yaw), 0.],
[0, 0, 1.],
])
Ry = np.array([
[cos(pitch), -0., sin(pitch)],
[0., 1., 0.],
[-sin(pitch), 0, cos(pitch)],
])
Rx = np.array([
[1., 0., 0.],
[0., cos(roll), -sin(roll)],
[0, sin(roll), cos(roll)],
])
return Rz @ Ry @ Rx
def skew(w):
wx, wy, wz = w
return np.array([
[0, -wz, wy],
[wz, 0, -wx],
[-wy, wx, 0.],
])
def so3_exp(w):
from math import cos, sin
theta = np.linalg.norm(w)
# Approximation when theta is small
if(abs(theta) < 1e-8):
return np.identity(3) + skew(w)
normalized_w = w / theta
K = skew(normalized_w)
# Rodrigues
R = np.identity(3) + sin(theta) * K + (1 - cos(theta)) * K @ K
np.testing.assert_almost_equal(R @ R.transpose(), np.identity(3))
return R
def V_operator(w):
from math import cos, sin
w_skew = skew(w)
theta = np.linalg.norm(w)
if(abs(theta) < 1e-7):
return np.identity(3) + w_skew / 2.
V = np.identity(3) + (1. - cos(theta)) / (theta * theta) * w_skew + \
+ (theta - sin(theta)) / (theta * theta * theta) * w_skew @ w_skew
return V
def se3_exp(se3):
w = se3[:3]
t = se3[3:].reshape([3,1])
SE3_mat = np.identity(4)
SE3_mat[:3, :3] = so3_exp(w)
# I hate numpy
SE3_mat[:3, 3] = (V_operator(w) @ t).flatten()
return SE3_mat
def icp_se3():
print('=============== icp_se3 ==============')
T = np.identity(4)
T[:3,:3] = euler_angle_to_rotation(0.2, 0.1, 0.3)
T[:3, 3] = np.array([1., 2., 3.])
src = np.array([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[0, 2, -1],
])
# homogenous coordinate
src = np.hstack([src, np.ones([src.shape[0],1])]).T
target = T @ src
def icp_residual_i(se3_para, T, src_i, target_i):
# this is the true function
# return exp(se3_para) @ T @ src_i - target_i
# but need to implement a couple of operators for so3_exp
wx,wy,wz,tx,ty,tz = se3_para
SE3_approx = np.array([
[1., -wz, wy, tx],
[wz, 1., -wx, ty],
[-wy, wx, 1., tz],
[0., 0., 0., 1.],
])
return (SE3_approx @ T @ src_i - target_i)[:3]
T_var = np.identity(4)
for iter in range(20):
local_se3 = np.array([0., 0., 0., 0., 0., 0.]).T
lhs = np.zeros([6, 6])
rhs = np.zeros(6)
cost = 0
for i in range(src.shape[1]):
src_i = src[:, i]
target_i = target[:, i]
r, J = ResidualBlock(lambda param: icp_residual_i(
param, T_var, src_i, target_i), local_se3).evaluate()
lhs += J.T @ J
rhs -= J.T @ r
cost += np.linalg.norm(r)
print('iter', iter, 'cost:', cost)
local_se3_delta = 0.8 * np.linalg.solve(lhs, rhs)
T_var = se3_exp(local_se3_delta) @ T_var
print('T_var:', T_var)
print('T_gt: ', T)
if __name__ == "__main__":
icp_se3()
|
yimuw/yimu-blog | random/thin_hessian/thin_hessian.py | import numpy as np
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
# create dummy data for training
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1)
y_values = [2*i + 1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
class linearRegression(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(linearRegression, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, x):
out = self.linear(x)
return out
inputDim = 1 # takes variable 'x'
outputDim = 1 # takes variable 'y'
learningRate = 0.01
epochs = 200
model = linearRegression(inputDim, outputDim)
##### For GPU #######
if torch.cuda.is_available():
model.cuda()
else:
print('cuda not available!')
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learningRate)
for epoch in range(epochs):
# Converting inputs and labels to Variable
if torch.cuda.is_available():
inputs = Variable(torch.from_numpy(x_train).cuda())
labels = Variable(torch.from_numpy(y_train).cuda())
else:
inputs = Variable(torch.from_numpy(x_train))
labels = Variable(torch.from_numpy(y_train))
# Clear gradient buffers because we don't want any gradient from previous epoch to carry forward, dont want to cummulate gradients
optimizer.zero_grad()
# get output from the model, given the inputs
outputs = model(inputs)
# get loss for the predicted output
loss = criterion(outputs, labels)
print(loss)
# get gradients w.r.t to parameters
loss.backward()
# update parameters
optimizer.step()
print('epoch {}, loss {}'.format(epoch, loss.item()))
with torch.no_grad(): # we don't need gradients in the testing phase
if torch.cuda.is_available():
predicted = model(Variable(torch.from_numpy(x_train).cuda())).cpu().data.numpy()
else:
predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
print(predicted)
plt.clf()
plt.plot(x_train, y_train, 'go', label='True data', alpha=0.5)
plt.plot(x_train, predicted, '--', label='Predictions', alpha=0.5)
plt.legend(loc='best')
plt.show() |
yimuw/yimu-blog | random/visitor/vistor_ref_version/example.py | from json_visitor import JsonDumper, JsonLoader
from binary_visitor import BinDumper, BinLoader
from visitor_ref import RefObj, Traversable
class TypeA(Traversable):
def __init__(self):
super().__init__()
self.version = RefObj(2)
self.a = RefObj(1)
self.b = RefObj("whatever")
self.c = [RefObj(1), RefObj(-1), RefObj(2.2)]
self.v2_var = RefObj('version 2!')
def traverse(self, visitor):
super().traverse(visitor)
visitor.visit('a', self.a)
visitor.visit('b', self.b)
visitor.visit('c', self.c)
if self.version.val >= 2:
visitor.visit('v2_var', self.v2_var)
class TypeB(Traversable):
def __init__(self):
super().__init__()
self.b1 = RefObj(123)
self.instance_of_A = TypeA()
def traverse(self, visitor):
super().traverse(visitor)
visitor.visit('b1', self.b1)
visitor.visit('instance_of_A', self.instance_of_A)
def json_example():
print("============= binary_example ================")
# dump the body of b
b1 = TypeB()
b1.instance_of_A.version = RefObj(1)
json_dumper = JsonDumper()
json_dumper.visit('b1', b1)
print('b1 dump:', json_dumper.result, sep='\n')
b1_dump = json_dumper.result
# b2 with no values
b2 = TypeB()
b2.b1 = RefObj(None)
b2.instance_of_A.a = RefObj(None)
b2.instance_of_A.b = RefObj(None)
b2.instance_of_A.c = []
json_dumper = JsonDumper()
json_dumper.visit('b2', b2)
print('b2 dump:', json_dumper.result, sep='\n')
# deserialize b dump into b2
loader = JsonLoader(b1_dump)
loader.visit('b2', b2)
# check the dump of b2
json_dumper = JsonDumper()
json_dumper.visit('b2', b2)
print('b2 dump after load b1:', json_dumper.result, sep='\n')
def binary_example():
print("============= binary_example ================")
# dump the body of b
b1 = TypeB()
bin_dumper = BinDumper()
bin_dumper.visit('no-name', b1)
print('b1 dump:', bin_dumper.result, sep='\n')
b1_dump = bin_dumper.result
# b2 with no values
b2 = TypeB()
b2.b1 = RefObj(None)
b2.instance_of_A.a = RefObj(None)
b2.instance_of_A.b = RefObj(None)
b2.instance_of_A.c = []
bin_dumper = BinDumper()
bin_dumper.visit('no-name', b2)
print('b2 dump:', bin_dumper.result, sep='\n')
# deserialize b dump into b2
loader = BinLoader(b1_dump)
loader.visit('no-name', b2)
# check the dump of b2
jsonDump = BinDumper()
jsonDump.visit('no-name', b2)
print('b2 dump after load b1:', jsonDump.result, sep='\n')
if __name__ == "__main__":
# binary_example()
json_example()
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing_v2/fix_lag_types.py | <gh_stars>1-10
import scipy.linalg as linalg
import numpy as np
import profiler
class State:
def __init__(self, variables):
self.variables = variables
def unpack_state(self):
x, y, vx, vy = self.variables
return x, y, vx, vy
def predict(self):
x, y, vx, vy = self.variables
predicted_state = np.array([x + vx, y + vy, vx, vy])
return State(predicted_state)
def __repr__(self):
return self.variables.__repr__()
@staticmethod
def size():
return 4
class OdometryMeasurement:
"""
Help the mapping between a state and the state's index in the Hessian of the optimization problem.
"""
def __init__(self, state1_index, state2_index):
# Dangeours
self.state1_index = state1_index
self.state2_index = state2_index
def __repr__(self):
return '({}, {})'.format(self.state1_index, self.state2_index)
class OdometryCost:
def __init__(self, state1, state2):
# Dangeours
self.state1 = state1
self.state2 = state2
def residual(self):
x1, y1, vx1, vy1 = self.state1.unpack_state()
x2, y2, vx2, vy2 = self.state2.unpack_state()
# s2 - pred(s1)
return np.array([
[x2 - x1 - vx1],
[y2 - y1 - vy1],
[vx2 - vx1],
[vy2 - vy1],
])
def jacobi_wrt_state1(self):
j = - np.identity(4)
j[0, 2] = -1.
j[1, 3] = -1.
return j
def jacobi_wrt_state2(self):
return np.identity(4)
@staticmethod
def residual_size():
return 4
@staticmethod
def variable_size():
return 4
class GPSMeasurement:
def __init__(self, state_index, gps):
self.state_index = state_index
self.gps = gps.copy()
def __repr__(self):
return '({}-{})'.format(self.state_index, self.gps)
class GPSCost:
def __init__(self, state, gps):
self.state = state
self.gps = gps.copy()
def residual(self):
x, y, _, _ = self.state.unpack_state()
gx, gy = self.gps
return np.array([
[x - gx],
[y - gy],
])
def jacobi_wrt_state(self):
j = np.zeros([2, 4])
j[0:2, 0:2] = np.identity(2)
return j
@staticmethod
def residual_size():
return 2
@staticmethod
def variable_size():
return 4
class PriorCost:
def __init__(self, state, prior):
self.state = state
self.prior = prior.copy()
def residual(self):
return (self.state.variables - self.prior).reshape([4, 1])
def jacobi_wrt_state(self):
return np.identity(4)
@staticmethod
def residual_size():
return 4
@staticmethod
def variable_size():
return 4
|
yimuw/yimu-blog | least_squares/manipulator/manipulator.py | import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
def mkdir(dir_name):
import os
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def SO2_expm(theta):
return np.array([
[cos(theta), -sin(theta)],
[sin(theta), cos(theta)],
])
def SO2_log(R):
return np.arctan2(R[1, 0], R[0, 0])
def vee(l1):
assert(l1.size == 2)
a, b = l1
return np.array([b, -a])
class ManipulatorOptimization:
def __init__(self, target):
self.R1_init = SO2_expm(0.01)
self.R2_init = SO2_expm(0.01)
self.R1 = self.R1_init
self.R2 = self.R2_init
self.l1 = np.array([1., 0])
self.l2 = np.array([2., 0])
self.target = target
def get_theta(self):
assert(abs(np.linalg.det(self.R1) - 1.) < 1e-8)
assert(abs(np.linalg.det(self.R2) - 1.) < 1e-8)
return SO2_log(self.R1), SO2_log(self.R2)
def end_effector_position(self, R1, R2):
p1_position = R1 @ self.l1
p2_position = p1_position + R2 @ R1 @ self.l2
return p2_position
def end_effector_position_jacobi_wrt_theta(self):
jacobi = np.zeros([2, 2])
# dr / d w1
jacobi[:, 0] = - self.R1 @ vee(self.l1) - \
self.R2 @ self.R1 @ vee(self.l2)
# dr / d w2
jacobi[:, 1] = - self.R2 @ vee(self.R1 @ self.l2)
return jacobi
def gradient_checking(self):
jacobi_analytic = self.end_effector_position_jacobi_wrt_theta()
jacobi_nu = np.zeros([2, 2])
DELTA = 1e-8
jacobi_nu[:, 0] = (self.residual(self.R1 @ SO2_expm(DELTA), self.R2)
- self.residual(self.R1 @ SO2_expm(- DELTA), self.R2)) / (2 * DELTA)
jacobi_nu[:, 1] = (self.residual(self.R1, self.R2 @ SO2_expm(DELTA))
- self.residual(self.R1, self.R2 @ SO2_expm(-DELTA))) / (2 * DELTA)
print('jacobi_analytic:', jacobi_analytic)
print('jacobi_nu :', jacobi_nu)
def residual(self, R1, R2):
return self.end_effector_position(R1, R2) - self.target
def SO2_generalized_plus(self, delta_local_params):
w1, w2 = delta_local_params
self.R1 = self.R1 @ SO2_expm(w1)
self.R2 = self.R2 @ SO2_expm(w2)
def optimize(self):
for iters in range(20):
# self.gradient_checking()
jacobi = self.end_effector_position_jacobi_wrt_theta()
b = self.residual(self.R1, self.R2)
delta_local_params = np.linalg.solve(
jacobi.T @ jacobi, -jacobi.T @ b)
self.SO2_generalized_plus(delta_local_params)
cost = b.T @ b
print('cost: ', cost)
if cost < 1e-8:
print('converged at iteration: ', iters)
break
def interp_and_show_video(self):
steps = 100
dt = 0.02
# basically so2
R1_speed = SO2_log(self.R1_init.T @ self.R1)
R2_speed = SO2_log(self.R2_init.T @ self.R2)
mkdir('res')
for i, k in enumerate(np.linspace(0, 1, 100)):
R1_k = self.R1_init @ SO2_expm(k * R1_speed)
R2_k = self.R2_init @ SO2_expm(k * R2_speed)
p1_position = R1_k @ self.l1
p2_position = p1_position + R2_k @ R1_k @ self.l2
p1x, p1y = p1_position
p2x, p2y = p2_position
plt.cla()
plt.plot([0, p1x, p2x], [0, p1y, p2y])
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.text(p2x, p2y, 'end-effector')
plt.savefig('res/{0:03d}.png'.format(i))
plt.pause(.001)
plt.show()
def main():
end_effector_target = np.array([0, 1.2])
manipulator_on_the_manifold = ManipulatorOptimization(end_effector_target)
manipulator_on_the_manifold.optimize()
manipulator_on_the_manifold.interp_and_show_video()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/lie_linear_residual/lie_linear_residaul.py | <filename>least_squares/lie_linear_residual/lie_linear_residaul.py
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
def random_angle():
import random
return random.uniform(-np.pi, np.pi)
def skew_symmetric(w):
"""
The cross operator for vector3d
"""
w1, w2, w3 = w
return np.array([
[0, -w3, w2], # NOLINT
[w3, 0, -w1], # NOLINT
[-w2, w1, 0]
])
def unskew_symmetric(W):
assert np.allclose(W, -W.T)
return np.array([W[2, 1], W[0, 2], W[1, 0]])
def skew_SO3_exp(w):
from scipy.linalg import expm
R = expm(skew_symmetric(w))
assert np.allclose(R.T @ R, np.identity(3))
return R
def SO3_log_unskew(R):
from scipy.linalg import logm
assert np.allclose(R.T @ R, np.identity(3))
if np.allclose(R, R.T):
# break sysmetry
# e.g. turning from 0 to pi, you can do clock-wise or counter-close-wise.
return SO3_log_unskew(R @ skew_SO3_exp([1e-6, 0, 0.]))
return unskew_symmetric(logm(R))
class Problem:
def __init__(self):
self.R_target = skew_SO3_exp(
[random_angle(), random_angle(),
random_angle()])
def residual_function(self, R):
"""
A residual function r(R) = log(R_target.T @ R)
"""
residual = SO3_log_unskew(self.R_target.T @ R)
return residual
def cost(self, R):
"""
cost(R) = ||residual(R)||^2
"""
r = self.residual_function(R)
return r.T @ r
def numerical_jacobian(self, R):
"""
dr/dw = (r(w + dw) - r(w - dw)) / (2*dw)
"""
# dr = jacobian @ dw
jacobian = np.zeros([3, 3])
DELTA = 1e-8
for i in range(3):
dw_plus = np.array([0., 0., 0.])
dw_plus[i] = DELTA
R_plus = R @ skew_SO3_exp(dw_plus)
dw_minus = np.array([0., 0., 0.])
dw_minus[i] = -DELTA
R_minus = R @ skew_SO3_exp(dw_minus)
j_i = (self.residual_function(R_plus) -
self.residual_function(R_minus)) / (2 * DELTA)
jacobian[:, i] = j_i
return jacobian
def gaussian_newton():
R_variable = skew_SO3_exp([random_angle(), random_angle(), random_angle()])
p = Problem()
print("R_init:", R_variable)
print('cost:', p.cost(R_variable))
print('gaussian_newton...')
residual = p.residual_function(R_variable)
jacobian = p.numerical_jacobian(R_variable)
dw = np.linalg.solve(jacobian.T @ jacobian, -jacobian.T @ residual)
R_single_iteration = R_variable @ skew_SO3_exp(dw)
print("R_single_iteration:", R_single_iteration)
print("R_target:", p.R_target)
assert np.allclose(R_single_iteration, p.R_target, 1e-4, 1e-6)
print('single iter cost:', p.cost(R_single_iteration))
def plot_cost_sqrt():
p = Problem()
direction = np.random.rand(3, 1)
direction = direction / np.linalg.norm(direction)
deltas = np.linspace(-np.pi, 2 * np.pi, 300)
costs = [p.cost(p.R_target @ skew_SO3_exp(d * direction)) for d in deltas]
plt.subplot(2, 1, 1)
plt.plot(deltas, costs)
plt.title('change-along-a-direction vs cost')
plt.subplot(2, 1, 2)
plt.plot(deltas, [sqrt(cost) for cost in costs])
plt.title('change-along-a-direction vs sqrt(cost)')
plt.show()
def main():
gaussian_newton()
plot_cost_sqrt()
if __name__ == "__main__":
main() |
yimuw/yimu-blog | least_squares/kalman/model.py | import math
import matplotlib.pyplot as plt
import numpy as np
NON_LINEAR_EXPERIMENT = True
NUM_OF_SIM_DATA = 30
def plot_cov2(mu, cov):
w, v = np.linalg.eig(cov)
eclipse_axis = w * v
t = np.linspace(0, 2 * np.pi, 100)
circle = np.vstack([np.cos(t), np.sin(t)])
eclipse = np.dot(eclipse_axis, circle)
plt.plot(eclipse[0, :] + mu[0], eclipse[1, :] + mu[1])
def result_comparison(gt_states, estimated_states, estimated_cov, string_info=''):
gstates = np.vstack(gt_states)
estates = np.vstack(estimated_states)
xy_norm = np.linalg.norm((gt_states - estates)[:, :2], axis=1)
plt.figure()
plt.subplot(1,2,1)
plt.plot(xy_norm)
plt.title('Method: {} norm(x_diff, y_diff)'.format(string_info))
plt.subplot(1,2,2)
plt.plot(gstates[:, 0], gstates[:, 1], '-o')
plt.plot(estates[:, 0], estates[:, 1], '-*')
for i, cov in enumerate(estimated_cov):
mu = estimated_states[i][:2]
plot_cov2(mu, cov[:2, :2])
plt.title('Method: {} -o is the ground truth. -* is the estimated'.format(string_info))
plt.axis('equal')
class PointModel:
"""
x = f(x)
y = h(x)
"""
DT = 0.1
NUM_STATES = 5
NUM_OBSERVATION = 2
STATE_NAME = ['x', 'y', 'v', 'theta', 'theta_dot']
@staticmethod
def unpack_state(state):
x, y, v, theta, theta_dot = state
return x, y, v, theta, theta_dot
def f(self, state):
"""
x_next = f(x)
"""
x, y, v, theta, theta_dot = self.unpack_state(state)
return np.array([
x + self.DT * math.cos(theta) * v,
y + self.DT * math.sin(theta) * v,
v,
theta + self.DT * theta_dot,
theta_dot
])
def f_jacobian(self, state):
"""
df/dx
"""
x, y, v, theta, theta_dot = self.unpack_state(state)
return np.array([
[1, 0, self.DT * math.cos(theta), - self.DT * math.sin(theta) * v, 0],
[0, 1, self.DT * math.sin(theta), self.DT * math.cos(theta) * v, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, self.DT],
[0, 0, 0, 0, 1.]
])
def f_weight(self):
return np.diag([1, 1, 10, 10, 10])
def f_cov(self):
return np.linalg.inv(self.f_weight())
def h(self, state):
"""
z = h(x)
"""
x, y, v, theta, theta_dot = self.unpack_state(state)
if NON_LINEAR_EXPERIMENT:
return np.array([x * x, y * y])
else:
return np.array([x, y])
def h_jacobian(self, state):
"""
dh/dx
"""
x, y, v, theta, theta_dot = self.unpack_state(state)
if NON_LINEAR_EXPERIMENT:
return np.array([
[2 * x, 0, 0, 0, 0],
[0, 2 * y, 0, 0, 0],
])
else:
return np.array([
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
])
def h_weight(self):
return np.array([
[5., 0],
[0, 5.]
])
def h_cov(self):
return np.linalg.inv(self.h_weight())
def get_model():
return PointModel()
def generate_gt_data(init_state):
model = get_model()
measurement_size = model.NUM_OBSERVATION
state_size = model.NUM_STATES
gt_states = []
gt_measurements = []
state = init_state
for step in range(NUM_OF_SIM_DATA):
state = model.f(state) + np.random.normal(0, 0.01, state_size)
measurement = model.h(state) + np.random.normal(0, 0.1, measurement_size)
gt_states.append(state)
gt_measurements.append(measurement)
PLOT_STATE = False
if PLOT_STATE:
states = np.vstack(gt_states)
plt.plot(states[:, 0], states[:, 1], '-o')
plt.title('gt states')
plt.show()
return gt_states, gt_measurements |
yimuw/yimu-blog | least_squares/fix_lag_smoothing/batch_optimizer.py | <filename>least_squares/fix_lag_smoothing/batch_optimizer.py
import scipy.linalg as linalg
import numpy as np
import profiler
import fix_lag_types as t
class BatchOptimization:
def __init__(self, states, distances):
assert(len(states) - 1 == len(distances))
self.states = states
self.distances = distances
self.num_states = len(states)
@profiler.time_it
def optimize(self):
jacobi, r = self.construct_linear_system()
# one step for linear system
x = np.linalg.solve(jacobi.T @ jacobi, - jacobi.T @ r)
return x
def construct_linear_system(self):
size_prior_residual = 2
size_distance_residual = 2 * (self.num_states - 1)
size_residual = size_distance_residual + size_prior_residual
size_variables = 2 * self.num_states
jacobi = np.zeros([size_residual, size_variables])
residual = np.zeros([size_residual, 1])
residual_index = 0
# Add prior to first state. Not a good design.
pm = t.PriorMeasurement(self.states[0], self.states[0].variables)
jacobi[residual_index:residual_index +
pm.residual_size(), 0:0 + pm.variable_size()] = pm.jacobi()
residual[residual_index:residual_index +
pm.residual_size()] = pm.residual()
residual_index += pm.residual_size()
for distance_between in self.distances:
state1_idx = distance_between.state1_index
state1_var_idx = 2 * state1_idx
state1 = self.states[state1_idx]
state2_idx = distance_between.state2_index
state2_var_idx = 2 * state2_idx
state2 = self.states[state2_idx]
distance = distance_between.distance
dm = t.DistanceMeasurement(state1, state2, distance)
jacobi[residual_index:residual_index + dm.residual_size(),
state1_var_idx:state1_var_idx + dm.variable_size()] = dm.jacobi_wrt_state1()
jacobi[residual_index:residual_index + dm.residual_size(),
state2_var_idx:state2_var_idx + dm.variable_size()] = dm.jacobi_wrt_state2()
residual[residual_index:residual_index +
dm.residual_size()] = dm.residual()
residual_index += dm.residual_size()
assert(residual_index == self.num_states * 2)
return jacobi, residual
|
yimuw/yimu-blog | random/thin_hessian/expr2.py | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
import math
class function3:
def __init__(self):
self.A = np.array([
[0.1, 0.2],
[0.2, 4.]
])
self.B = np.array([2,2.]).T
self.C = 0
def print_gt(self):
print('x*_gt:', np.linalg.inv(- self.A) @ self.B / 2 )
def f(self, x):
A, B, C = self.A, self.B, self.C
return x.T @ A @ x + B.T @ x + C
def df(self, x):
A, B, C = self.A, self.B, self.C
grad = 2 * A @ x + B
return grad
noise = np.random.normal(0, abs(grad) / 10, grad.shape)
# print("grad, noise:", grad, noise)
return grad + noise
class Solver:
def __init__(self,func, x0):
self.func = func
self.x = x0
self.A = np.array([1. ,1.]).T
self.B = np.array([0. ,0.]).T
self.iteration = 0
def iter(self):
x0 = self.x
d0 = self.func.df(x0)
# print("x0:", x0)
# print("d0:", d0)
# important. Make sure the sample points are far in normalized space
direction = - d0
alpha = 0.1
x1 = x0 + alpha * direction
d1 = self.func.df(x1)
b = np.hstack([d0, d1])
A = np.identity(4)
A[0:2, 0:2] = np.diag(2 * x0)
A[0:2, 2:4] = np.identity(2)
A[2:4, 0:2] = np.diag(2 * x1)
A[2:4, 2:4] = np.identity(2)
# print('A, b:', A, b)
# prior
K = 0.25
prior_b = K * np.hstack([self.A, self.B])
prior_A = K * np.identity(4)
# p = np.linalg.solve(A , b) # solve for para directly
p = np.linalg.solve(A.T @ A + prior_A, A.T @ b + prior_b) # least squares
res_a, res_b = p[:2], p[2:4]
update_weight = 0.9
self.A = update_weight * self.A + (1- update_weight) * res_a
self.B = update_weight * self.B + (1- update_weight) * res_b
print('self.A', self.A)
print('self.B', self.B)
# self.x = x1
method = 'c'
if method == 'c': # center
x_star_est = - self.B / self.A / 2 # since A is diag
print('x_star_est:', x_star_est)
self.x = x0 + 0.1 * (x_star_est - x0)
elif method == 'gd':
self.x = x0 - 0.01 * d0
# elif method == 's': # scale
# normalized_d0 = d0 / abs(self.a)
# print("d0, d0_norm: ", d0, normalized_d0)
# self.x = x0 - 0.01 * normalized_d0
else:
raise
print('x:', self.x)
def test4():
func = function3()
s = Solver(func, np.array([2,3]))
for i in range(300):
print('i:', i)
s.iter()
func.print_gt()
if __name__ == "__main__":
test4()
|
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/linear_regression.py | <filename>deep-learning/tensorflow-from-scratch/linear_regression.py
import variables_tree_flow as vtf
import numpy as np
class LinearRegression:
def __init__(self):
pass
def fit(self, X, y):
return self.__fit_numpy_impl(X,y)
def __fit_numpy_impl(self, X, y):
assert len(X) == len(y)
self.data_num = len(X)
self.len_theta = X.shape[1]
theta = np.array([vtf.Variable(value=0, id='t{}'.format(i))
for i in range(self.len_theta)])
bias = vtf.Variable(value=0, id='b')
pred = X @ theta + bias
diff = pred - y
cost = diff.T @ diff * (1 / len(X))
core = vtf.NumberFlowCore(cost)
for i in range(20000):
core.forward()
if i % 500 == 0:
print("cost.val:", cost.value, " iter:", i)
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.1)
for var in set(core.varible_nodes):
print(var.id, var.value)
self.coef_ = np.array([t.value for t in theta])
self.intercept_ = bias.value
def predict(self, X):
res = X @ self.coef_ + self.intercept_
return res
# this is basically a sklearn linear regression example.
# change the METHOD to see difference.
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import datasets, linear_model
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
# 'sklearn' or 'vtf'
METHOD = 'vtf'
# Create linear regression object
if METHOD == 'sklearn':
regr = linear_model.LinearRegression()
elif METHOD == 'vtf':
regr = LinearRegression()
# Load the diabetes dataset
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
# Use only one feature
diabetes_X = diabetes_X[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-400]
diabetes_X_test = diabetes_X[-400:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes_y[:-400]
diabetes_y_test = diabetes_y[-400:]
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# # The coefficients
print('Coefficients:', regr.coef_)
print('Intercept:', regr.intercept_)
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing_v2/fix_lag_smoother.py | <gh_stars>1-10
import scipy.linalg as linalg
import numpy as np
import profiler
import fix_lag_types as t
import copy
from collections import deque
UNBIAS_ESTIMATION = True
MARGINALIZATION_METHOD = 'gaussian'
class FixLagSmoother:
def __init__(self, prior_measurement, gps_measurement):
# using deque (linked list) for head & tail lookup
self.__state_opt_vars = deque([t.State(prior_measurement.copy())])
# mantain a lookup between state idx and state in the deque
self.__state_idx_offset = 0
# make sure the fixed-lagged graph invariant hold at the beginning
self.__odometry_measurements = deque()
self.__gps_measurements = deque([gps_measurement])
# For prior cost, Jacobian is I. Direct assign for simplicity
# Assuming Identity weight matrix.
self.__prior_hessian = np.identity(4)
self.__prior_mean = np.zeros((4, 1))
# config
self.__window_size = 5
# result for easy query
self.__result = []
self.__diag_covs = []
def get_all_states(self):
return np.vstack(self.__result)
def get_diag_cov(self):
return np.vstack(self.__diag_covs)
def state_index_lookup(self, idx):
return idx - self.__state_idx_offset
def extent_graph(self, odometry_measurement, gps_measurement):
# extent the graph
self.__odometry_measurements.append(odometry_measurement)
self.__gps_measurements.append(gps_measurement)
self.__state_opt_vars.append(
t.State(self.__state_opt_vars[-1].variables.copy()))
def construct_linear_system(self):
"""
Adding new measurements. Insert a new state implicitly
"""
size_state = t.State.size()
num_states = len(self.__state_opt_vars)
size_opt_vars = size_state * num_states
# normal equation
lhs = np.zeros([size_opt_vars, size_opt_vars])
rhs = np.zeros([size_opt_vars, 1])
# Adding prior cost
# update normal equation directly using quadratic intepretation.
# otherwise, we need do to a Cholesky which is not efficient.
lhs[:size_state, :size_state] += self.__prior_hessian
rhs[:size_state] += self.__prior_mean
# Odometry costs
for odometry_measurement in self.__odometry_measurements:
state1_idx = self.state_index_lookup(
odometry_measurement.state1_index)
state1_var_idx = size_state * state1_idx
state1 = self.__state_opt_vars[state1_idx]
state2_idx = self.state_index_lookup(
odometry_measurement.state2_index)
state2_var_idx = size_state * state2_idx
state2 = self.__state_opt_vars[state2_idx]
odometry_cost = t.OdometryCost(state1, state2)
cur_jacobi = np.zeros(
[odometry_cost.residual_size(), size_opt_vars])
cur_jacobi[:, state1_var_idx:state1_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state1()
cur_jacobi[:, state2_var_idx:state2_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state2()
cur_residual = odometry_cost.residual()
# Using the linear property of the least squares costs
lhs += cur_jacobi.T @ cur_jacobi
rhs += cur_jacobi.T @ cur_residual
# GPS costs
for gps_measurement in self.__gps_measurements:
state_idx = self.state_index_lookup(gps_measurement.state_index)
state_var_idx = size_state * state_idx
state = self.__state_opt_vars[state_idx]
gps_cost = t.GPSCost(state, gps_measurement.gps)
cur_jacobi = np.zeros([gps_cost.residual_size(), size_opt_vars])
cur_jacobi[:, state_var_idx:state_var_idx +
gps_cost.variable_size()] = gps_cost.jacobi_wrt_state()
cur_residual = gps_cost.residual()
# Using the linear property of the least squares costs
lhs += cur_jacobi.T @ cur_jacobi
rhs += cur_jacobi.T @ cur_residual
return lhs, rhs
def covariance_estimation(self, lhs):
return np.linalg.inv(lhs)
def solve_and_update(self, lhs, rhs):
# one step for linear system
# Using Nonlinear Gaussian-Newton formulation, the __result is dx
num_states = len(self.__state_opt_vars)
dx = np.linalg.solve(lhs, - rhs)
dx = dx.reshape(num_states, t.State.size())
# No practical for nonlinear case
if UNBIAS_ESTIMATION:
for i in range(len(self.__state_opt_vars)):
state_idx = self.__state_idx_offset + i
if state_idx == len(self.__result):
self.__result.append(None)
self.__result[state_idx] = self.__state_opt_vars[i].variables + dx[i]
else:
# iterator for a deque
for i, s in enumerate(self.__state_opt_vars):
s.variables += dx[i]
state_idx = self.__state_idx_offset + i
if state_idx == len(self.__result):
self.__result.append(None)
self.__result[state_idx] = self.__state_opt_vars[i].variables
cov = self.covariance_estimation(lhs)
for i in range(len(self.__state_opt_vars)):
state_idx = self.__state_idx_offset + i
if state_idx == len(self.__diag_covs):
self.__diag_covs.append(None)
size_state = t.State.size()
self.__diag_covs[state_idx] = np.diag(
cov[i*size_state:(i+1)*size_state, i*size_state:(i+1)*size_state])
@profiler.time_it
def optimize_for_new_measurement(self, distance_measurement, gps_measurement):
self.extent_graph(distance_measurement, gps_measurement)
lhs, rhs = self.construct_linear_system()
self.solve_and_update(lhs, rhs)
if len(self.__state_opt_vars) > self.__window_size:
if MARGINALIZATION_METHOD == 'schur':
self.marginalization_schur_impl()
elif MARGINALIZATION_METHOD == 'gaussian':
self.marginalization_gaussian_impl()
else:
assert False, "unknow marginalization method"
def construct_marginalization_equation(self):
"""
get all costs connect to the state to be marginalized and construct the equation for marginalization.
TODO: this can be a by-product from construct_linear_system.
"""
size_state = t.State.size()
num_states = 2
size_opt_vars = size_state * num_states
# normal equation
lhs = np.zeros([size_opt_vars, size_opt_vars])
rhs = np.zeros([size_opt_vars, 1])
# remove data related to state to be marginalized
# using prior knowledge. not general.
gps_to_be_marginalized = self.__gps_measurements.popleft()
odo_to_be_marginalized = self.__odometry_measurements.popleft()
# Prior cost
lhs[:size_state, :size_state] += self.__prior_hessian
rhs[:size_state] += self.__prior_mean
# Odometry cost
state1_idx = self.state_index_lookup(
odo_to_be_marginalized.state1_index)
state1_var_idx = size_state * state1_idx
state1 = self.__state_opt_vars[state1_idx]
state2_idx = self.state_index_lookup(
odo_to_be_marginalized.state2_index)
state2_var_idx = size_state * state2_idx
state2 = self.__state_opt_vars[state2_idx]
odometry_cost = t.OdometryCost(state1, state2)
cur_jacobi = np.zeros([odometry_cost.residual_size(), size_opt_vars])
cur_jacobi[:, state1_var_idx:state1_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state1()
cur_jacobi[:, state2_var_idx:state2_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state2()
cur_residual = odometry_cost.residual()
# Using the linear property of the least squares costs
lhs += cur_jacobi.T @ cur_jacobi
rhs += cur_jacobi.T @ cur_residual
# GPS costs
state_idx = self.state_index_lookup(gps_to_be_marginalized.state_index)
state_var_idx = size_state * state_idx
state = self.__state_opt_vars[state_idx]
gps_cost = t.GPSCost(state, gps_to_be_marginalized.gps)
cur_jacobi = np.zeros([gps_cost.residual_size(), size_opt_vars])
cur_jacobi[:, state_var_idx:state_var_idx +
gps_cost.variable_size()] = gps_cost.jacobi_wrt_state()
cur_residual = gps_cost.residual()
# Using the linear property of the least squares costs
lhs += cur_jacobi.T @ cur_jacobi
rhs += cur_jacobi.T @ cur_residual
return lhs, rhs
@profiler.time_it
def marginalization_schur_impl(self):
lhs, rhs = self.construct_marginalization_equation()
size_state = t.State.size()
# lhs = [A, B; C, D]
A = lhs[:size_state, :size_state]
B = lhs[:size_state, size_state:]
C = lhs[size_state:, :size_state]
D = lhs[size_state:, size_state:]
# rhs = [b1, b2]
b1 = rhs[:size_state]
b2 = rhs[size_state:]
A_inv = np.linalg.inv(A)
# schur
self.__prior_hessian = D - C @ A_inv @ B
self.__prior_mean = b2 - C @ A_inv @ b1
# pop tail
self.__state_opt_vars.popleft()
self.__state_idx_offset += 1
@profiler.time_it
def marginalization_gaussian_impl(self):
lhs, rhs = self.construct_marginalization_equation()
size_state = t.State.size()
lhs_LU = linalg.lu_factor(lhs)
covariance = linalg.lu_solve(lhs_LU, np.identity(size_state * 2))
mean_covariance_form = covariance @ rhs
self.__prior_hessian = np.linalg.inv(
covariance[size_state:, size_state:])
# This is tricky
# get it by equating ||Ax + b||^2 = ||x + mu ||^2_W
self.__prior_mean = self.__prior_hessian @ mean_covariance_form[size_state:]
# pop tail
self.__state_opt_vars.popleft()
self.__state_idx_offset += 1
|
yimuw/yimu-blog | least_squares/ceres-from-scratch/icp_so3.py | <filename>least_squares/ceres-from-scratch/icp_so3.py
from number_forward_flow import *
def euler_angle_to_rotation(yaw, pitch, roll):
from math import cos, sin
Rz = np.array([
[cos(yaw), -sin(yaw), 0.],
[sin(yaw), cos(yaw), 0.],
[0, 0, 1.],
])
Ry = np.array([
[cos(pitch), -0., sin(pitch)],
[0., 1., 0.],
[-sin(pitch), 0, cos(pitch)],
])
Rx = np.array([
[1., 0., 0.],
[0., cos(roll), -sin(roll)],
[0, sin(roll), cos(roll)],
])
return Rz @ Ry @ Rx
def skew(w):
wx, wy, wz = w
return np.array([
[0, -wz, wy],
[wz, 0, -wx],
[-wy, wx, 0.],
])
def so3_exp(w):
from math import cos, sin
theta = np.linalg.norm(w)
# Approximation when theta is small
if(abs(theta) < 1e-8):
return np.identity(3) + skew(w)
normalized_w = w / theta
K = skew(normalized_w)
# Rodrigues
R = np.identity(3) + sin(theta) * K + (1 - cos(theta)) * K @ K
np.testing.assert_almost_equal(R @ R.transpose(), np.identity(3))
return R
def icp_so3():
APPLY_CAUCHY_LOSS = True
ADD_WRONG_ASSOCIATION = True
print('=============== icp_so3 ==============')
R = euler_angle_to_rotation(0.2, 0.1, 0.3)
assert(abs(np.linalg.det(R) - 1) < 1e-4)
t = np.array([1., 2., 3.]).T
src = np.array([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[0, 2, -1],
])
def transform(p, R, t):
return R @ p + t
def transform_all(src, R, t):
return (R @ src.T + t[:, np.newaxis]).T
target = transform_all(src, R, t)
if ADD_WRONG_ASSOCIATION:
src = np.vstack([src, np.array([1, 2, 3])])
target = np.vstack([target, np.array([10, 20, 30])])
def icp_residual_i(epsilonWithT, Rot, src_i, target_i):
E = skew(epsilonWithT[:3])
t = epsilonWithT[3:]
# this is the true function
# return transform(src_i, so3_exp(epsilonWithT[:3]) @ R, t) - target_i
# but need to implement a couple of operators for so3_exp
return transform(src_i, (E + np.identity(3)) @ Rot, t) - target_i
epsilonWithT_var = np.array([0., 0., 0., 0., 0., 0.]).T
R_var = np.identity(3)
for iter in range(20):
lhs = np.zeros([6, 6])
rhs = np.zeros(6)
cost = 0
for src_i, target_i in zip(src, target):
r, J = ResidualBlock(lambda param: icp_residual_i(
param, R_var, src_i, target_i), epsilonWithT_var).evaluate()
if APPLY_CAUCHY_LOSS:
def cauchy_dot(s):
return 1 / (1 + s)
cauchy_weigth = cauchy_dot(r.T @ r)
lhs += cauchy_weigth * J.T @ J
rhs -= cauchy_weigth * J.T @ r
cost += math.log(1 + np.linalg.norm(r))
else:
lhs += J.T @ J
rhs -= J.T @ r
cost += np.linalg.norm(r)
print('iter:', iter, 'cost:', cost)
delta = 0.8 * np.linalg.solve(lhs, rhs)
R_var = so3_exp(delta[:3]) @ R_var
epsilonWithT_var[3:] += delta[3:]
print('t_est:', epsilonWithT_var[3:])
print('t_gt: ', t)
print('R_var:', R_var)
print('R_gt: ', R)
if __name__ == "__main__":
icp_so3()
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing_v2/batch_optimizer.py | import scipy.linalg as linalg
import numpy as np
import profiler
import fix_lag_types as t
class BatchOptimization:
def __init__(self, states, odometry_measurements, gps_measurements, prior_measurement):
assert(len(states) - 1 == len(odometry_measurements))
assert(len(states) == len(gps_measurements))
self.__states = states
self.__odometry_measurements = odometry_measurements
self.__gps_measurements = gps_measurements
self.__prior_measurement = prior_measurement
self.__num_states = len(states)
@profiler.time_it
def optimize(self):
jacobi, r = self.construct_linear_system()
# one step for linear system
# Using Nonlinear Gaussian-Newton formulation, the result is dx
lhs = jacobi.T @ jacobi
rhs = jacobi.T @ r
dx = np.linalg.solve(lhs, - rhs)
dx = dx.reshape(self.__num_states, t.State.size())
for i, s in enumerate(self.__states):
s.variables += dx[i]
return self.__states, self.covariance_estimation(lhs)
def covariance_estimation(self, lhs):
return np.linalg.inv(lhs)
def construct_linear_system(self):
size_state = t.State.size()
size_variables = size_state * self.__num_states
# Prior cost
state_idx = 0
state_var_idx = 0
prior_cost = t.PriorCost(
self.__states[state_idx], self.__prior_measurement)
jacobi = np.zeros([prior_cost.residual_size(), size_variables])
jacobi[:, state_var_idx:state_var_idx +
prior_cost.variable_size()] = prior_cost.jacobi_wrt_state()
residual = prior_cost.residual()
# Odometry costs
for odometry_measurement in self.__odometry_measurements:
state1_idx = odometry_measurement.state1_index
state1_var_idx = size_state * state1_idx
state1 = self.__states[state1_idx]
state2_idx = odometry_measurement.state2_index
state2_var_idx = size_state * state2_idx
state2 = self.__states[state2_idx]
odometry_cost = t.OdometryCost(state1, state2)
cur_jacobi = np.zeros(
[odometry_cost.residual_size(), size_variables])
cur_jacobi[:, state1_var_idx:state1_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state1()
cur_jacobi[:, state2_var_idx:state2_var_idx +
odometry_cost.variable_size()] = odometry_cost.jacobi_wrt_state2()
cur_residual = odometry_cost.residual()
jacobi = np.vstack([jacobi, cur_jacobi])
residual = np.vstack([residual, cur_residual])
# GPS costs
for gps_measurement in self.__gps_measurements:
state_idx = gps_measurement.state_index
state_var_idx = size_state * state_idx
state = self.__states[state_idx]
gps_cost = t.GPSCost(state, gps_measurement.gps)
cur_jacobi = np.zeros([gps_cost.residual_size(), size_variables])
cur_jacobi[:, state_var_idx:state_var_idx +
gps_cost.variable_size()] = gps_cost.jacobi_wrt_state()
cur_residual = gps_cost.residual()
jacobi = np.vstack([jacobi, cur_jacobi])
residual = np.vstack([residual, cur_residual])
return jacobi, residual
|
yimuw/yimu-blog | random/optimal_matrix_multiplcation/matrix_mul.py | <gh_stars>1-10
import timeit
import numpy as np
import inspect
class Node:
def __init__(self, type):
self.node_type = type
self.shape = None
self.expression_string = None
class OpNode(Node):
def __init__(self, op):
super().__init__('OpNode')
self.left = None
self.right = None
self.op = op
class VarNode(Node):
def __init__(self, var):
super().__init__('VarNode')
self.var = var
self.shape = var.data.shape
self.expression_string = var.name
def print_tree(root):
'''
https://stackoverflow.com/questions/34012886/print-binary-tree-level-by-level-in-python
'''
def _display_aux(root):
# No child.
if root.node_type == 'VarNode':
line = root.var.name
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Two children.
left, n, p, x = _display_aux(root.left)
right, m, q, y = _display_aux(root.right)
s = root.op
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * \
'_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + \
(n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + \
[a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
lines, _, _, _ = _display_aux(root)
for line in lines:
print(line)
def count_num_muls(trees):
dp_map = {}
def count_num_muls_internal(root):
s = root.expression_string
if s in dp_map:
count, shape = dp_map[s]
# update the shape of mul
root.shape = shape
return count
if root.node_type == 'VarNode':
return 0
assert root.node_type == 'OpNode'
left_muls = count_num_muls_internal(root.left)
right_muls = count_num_muls_internal(root.right)
lr, lc = root.left.shape
rr, rc = root.right.shape
assert lc == rr, 'matrix dim mismatch'
root.shape = (lr, rc)
cur_muls = lc * lr * rc
total_muls = left_muls + right_muls + cur_muls
# need to track the shape of mul
dp_map[s] = (total_muls, root.shape)
return total_muls
for t in trees:
yield count_num_muls_internal(t)
class Variable():
def __init__(self, name, data):
self.name = name
self.data = data
def build_expression_tree_simple(vars):
'''
build a single tree for a list of mats
'''
if len(vars) == 1:
return VarNode(vars[0])
op_node = OpNode('@')
op_node.left = VarNode(vars[0])
op_node.right = build_expression_tree_simple(vars[1:])
op_node.expression_string = \
'({}*{})'.format(op_node.left.expression_string,
op_node.right.expression_string)
return op_node
def build_expression_tree_mid(vars):
'''
build a single tree for a list of mats
'''
if len(vars) == 1:
return VarNode(vars[0])
op_node = OpNode('@')
var_len = len(vars)
op_node.left = build_expression_tree_simple(vars[: var_len // 2])
op_node.right = build_expression_tree_simple(vars[var_len // 2 :])
op_node.expression_string = \
'({}*{})'.format(op_node.left.expression_string,
op_node.right.expression_string)
return op_node
def build_all_expression_tree(vars):
'''
build all trees for a list of mats
'''
if len(vars) == 1:
return [VarNode(vars[0])]
trees = []
for i in range(1, len(vars)):
left_trees = build_all_expression_tree(vars[:i])
right_trees = build_all_expression_tree(vars[i:])
for l in left_trees:
for r in right_trees:
root = OpNode('@')
root.left = l
root.right = r
root.expression_string = \
'({}@{})'.format(root.left.expression_string,
root.right.expression_string)
trees.append(root)
return trees
def gen_test_data():
A = Variable('A', np.ones([20, 20]))
B = Variable('B', np.ones([20, 35]))
C = Variable('C', np.ones([35, 100]))
D = Variable('D', np.ones([100, 360]))
E = Variable('E', np.ones([360, 10]))
F = Variable('F', np.ones([10, 10]))
vars = [A, B, C, D, E, F]
return vars
def test_single_tree():
vars = gen_test_data()
tree = build_expression_tree_simple(vars)
print('for tree:', tree.expression_string)
print_tree(tree)
tree = build_expression_tree_mid(vars)
print('for tree:', tree.expression_string)
print_tree(tree)
def test_all_tree():
vars = gen_test_data()
trees = build_all_expression_tree(vars)
muls = list(count_num_muls(trees))
for t, m in zip(trees, muls):
print('for tree:', t.expression_string)
print_tree(t)
print('# muls:', m)
print('')
min_tree, min_muls = min(zip(trees, muls), key=lambda x: x[1])
print('optimal expression:', min_tree.expression_string)
print_tree(min_tree)
print('optimal # muls:', min_muls)
def time_expr():
print('timming....')
setup_code = '''
from __main__ import gen_test_data
vars = gen_test_data()
data = [v.data for v in vars]
A, B, C, D, E, F = data
'''
test_code1 = '''
val = (A@(B@(C@(D@(E@F)))))
'''
test_code2 = '''
val = A@B@C@D@E@F
'''
test_code3 = '''
val = (((((A@B)@C)@D)@E)@F)
'''
test_code_opt = '''
val = (A@((B@(C@(D@E)))@F))
'''
times = timeit.repeat(setup=setup_code,
stmt=test_code1,
number=10000)
print('time for {} is {} ms'.format(test_code1, min(times)))
times = timeit.repeat(setup=setup_code,
stmt=test_code2,
number=10000)
print('time for {} is {} ms'.format(test_code2, min(times)))
times = timeit.repeat(setup=setup_code,
stmt=test_code3,
number=10000)
print('time for {} is {} ms'.format(test_code3, min(times)))
times = timeit.repeat(setup=setup_code,
stmt=test_code_opt,
number=10000)
print('time for {} is {} ms'.format(test_code_opt, min(times)))
if __name__ == "__main__":
test_all_tree()
time_expr()
|
yimuw/yimu-blog | random/res/try1.py | # -*- coding: utf-8 -*-
import torch
import math
import matplotlib.pyplot as plt
import numpy as np
dtype = torch.float
device = torch.device("cpu")
INIT_WEIGHTS = [0.01, -0.01, 0.01, -0.01]
x = 1
y = 2
learning_rate = 1e-2
def plot_loss():
weights = INIT_WEIGHTS[:]
def loss1(xx, W):
z = xx
for w in W:
z = (w + 1) * z
y_pred = z
loss = (y_pred - y) ** 2
return loss
def loss2(xx, W):
w1,w2,w3,w4 = W
y_pred = w1*w2*w3*w4*x
loss = (y_pred - y) ** 2
return loss
loss1_all = []
loss2_all = []
w1_range = np.linspace(-3, 3, 100)
for w1 in w1_range:
weights[0] = w1
loss1_all.append(loss1(x, weights))
loss2_all.append(loss2(x, weights))
plt.plot(w1_range, loss1_all, 'r')
plt.plot(w1_range, loss2_all, 'b')
plt.show()
def test1():
print("test1 start!")
weights = [torch.tensor([[INIT_WEIGHTS[i]]], device=device,
dtype=dtype, requires_grad=True) for i in range(4)]
for t in range(500):
z = x
for w in weights:
z = w * z
y_pred = z
loss = (y_pred - y).pow(2).sum()
if t % 50 == 0:
print('iter:', t, 'loss:', loss.item(), 'y_pred:', y_pred.item())
loss.backward()
with torch.no_grad():
for l, w in enumerate(weights):
w -= learning_rate * w.grad
w.grad.zero_()
def test2():
print("test2 start!")
weights = [torch.tensor([[INIT_WEIGHTS[i]]], device=device,
dtype=dtype, requires_grad=True) for i in range(4)]
for t in range(500):
z = x
for w in weights:
z = (w + 1.) * z
y_pred = z
loss = (y_pred - y).pow(2).sum()
if t % 50 == 0:
print('iter:', t, 'loss:', loss.item(), 'y_pred:', y_pred.item())
loss.backward()
with torch.no_grad():
for l, w in enumerate(weights):
w -= learning_rate * w.grad
w.grad.zero_()
def test3():
print("test3 start!")
weights = [torch.tensor([[INIT_WEIGHTS[i]]], device=device,
dtype=dtype, requires_grad=True) for i in range(4)]
w1, w2, w3, w4 = weights
for t in range(500):
y_pred = w1*w2*w3*w4*x \
+ w1*w2*w3*x + w1*w2*w4*x + w1*w3*w4*x + w2*w3*w4*x\
+ w1*w2*x + w1*w3*x + w1*w4*x + w2*w3*x + w2*w4*x + w3*w4*x\
+ w1*x + w2*x + w3*x + w4*x + x
loss = (y_pred - y).pow(2).sum()
if t % 50 == 0:
print('iter:', t, 'loss:', loss.item(), 'y_pred:', y_pred.item())
loss.backward()
with torch.no_grad():
for l, w in enumerate(weights):
w -= learning_rate * w.grad
w.grad.zero_()
if __name__ == "__main__":
plot_loss()
test1()
test2()
test3()
|
yimuw/yimu-blog | least_squares/kalman/extented_kalman_filter.py | import math
import matplotlib.pyplot as plt
import numpy as np
from model import *
class ExtentedKalmanFilter:
def __init__(self, prior_state, prior_cov):
self.state = prior_state
self.cov = prior_cov
def predict(self):
model = get_model()
self.state = model.f(self.state)
f_jacobian = model.f_jacobian(self.state)
self.cov = f_jacobian @ self.cov @ f_jacobian.T + model.f_cov()
def update(self, measurement):
model = get_model()
from numpy.linalg import inv
innovation = measurement - model.h(self.state)
h_jacobian = model.h_jacobian(self.state)
Sk = h_jacobian @ self.cov @ h_jacobian.T + model.h_cov()
K = self.cov @ h_jacobian.T @ inv(Sk)
self.state = self.state + K @ innovation
self.cov = (np.identity(model.NUM_STATES) - K @ h_jacobian) @ self.cov
def filter(self, measurement):
self.predict()
self.update(measurement)
def run_extented_kalman_filter(gt_states, gt_measurements):
prior_state = np.array([0.5, 0.5, 0, 0, 0])
prior_cov = np.diag([1, 1, 1, 1., 1.])
kalman_filter = ExtentedKalmanFilter(prior_state, prior_cov)
estimated_states = []
estimated_cov = []
for i, m in enumerate(gt_measurements):
kalman_filter.filter(m)
estimated_states.append(kalman_filter.state)
estimated_cov.append(kalman_filter.cov)
result_comparison(gt_states, estimated_states, estimated_cov, 'Kalman filter')
|
yimuw/yimu-blog | random/lyapunov/lyapunov_linear.py | <reponame>yimuw/yimu-blog
# Import packages.
import cvxpy as cp
import numpy as np
# linear case
# given a linear dynamic system f(x) = Ax
# Find a quadratic Lyapunov function V(x) = x^T Q x
# want: V(x) > 0 => Q >> 0
# dot V(x) < 0 => 2x^T Q A x < 0 => - Q A - A Q > 0
class LinearSystemLyapunov:
def __init__(self):
pass
def solve_for_quadratic_lyapunov(self, A):
assert A.ndim == 2
assert A.shape[0] == A.shape[1]
num_var = A.shape[0]
Q = cp.Variable((num_var, num_var), symmetric=True)
# Q.value = np.identity(num_var)
# sufficient condition
Epsilon = 1e-4 * np.identity(num_var)
constraints = [Q >> Epsilon]
constraints += [-Q @ A - A.T @ Q >> Epsilon]
prob = cp.Problem(cp.Minimize(1),
constraints)
prob.solve(verbose = False)
# Print result.
print("test the stability of linear system A:\n", A)
print("status:", prob.status)
print("The optimal value is", prob.value)
print("A solution Q is")
print(Q.value)
def main():
A1 = np.array([
[-0.5, 0, 0],
[0, -0.5, 0],
[0, 0, -0.5]
])
lyapunov = LinearSystemLyapunov()
lyapunov.solve_for_quadratic_lyapunov(A1)
A2 = np.array([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]
])
lyapunov.solve_for_quadratic_lyapunov(A2)
if __name__ == "__main__":
main() |
yimuw/yimu-blog | least_squares/ceres-from-scratch/mpc.py | from number_forward_flow import *
def motion_case():
print('=============== motion (nonlinear) case ==============')
def motion_func(state):
x, y, v, theta = state
xnext = x + cosine(theta) * v
ynext = y + sine(theta) * v
return [xnext, ynext, v, theta]
def observation_func(state):
x, y, v, theta = state
return x, y
s0 = [0., 0., 0.5, math.pi / 9]
s1 = motion_func(s0)
o0 = observation_func(s0)
o1 = observation_func(s1)
v0 = [0., 0., 1., math.pi / 4]
v1 = [0., 0., 0., 0.]
states = v0 + v1
def motion_residaul(state0andstate1):
state0 = state0andstate1[:4]
state1 = state0andstate1[4:]
pred = motion_func(state0)
return [b - a for a, b in zip(pred, state1)]
def observation_residual0(state0andstate1):
state0 = state0andstate1[:4]
pred = observation_func(state0)
return [a - b for a, b in zip(pred, o0)]
def observation_residual1(state0andstate1):
state1 = state0andstate1[4:]
pred = observation_func(state1)
return [a - b for a, b in zip(pred, o1)]
for iter in range(10):
full_jacobian = np.zeros([0, 8])
residaul = np.zeros([0])
r, J = ResidualBlock(observation_residual0, states).evaluate()
r, J = np.array(r).T, np.array(J)
residaul = np.hstack([residaul, r])
full_jacobian = np.vstack([full_jacobian, J])
r, J = ResidualBlock(observation_residual1, states).evaluate()
r, J = np.array(r).T, np.array(J)
residaul = np.hstack([residaul, r])
full_jacobian = np.vstack([full_jacobian, J])
r, J = ResidualBlock(motion_residaul, states).evaluate()
r, J = np.array(r).T, np.array(J)
residaul = np.hstack([residaul, r])
full_jacobian = np.vstack([full_jacobian, J])
delta = np.linalg.solve(
full_jacobian.T @ full_jacobian, - full_jacobian.T @ residaul)
states = [s + 0.8 * d for s, d in zip(states, delta)]
print('cost:', residaul.T @ residaul)
print('result s0:', states[:4])
print('gt s0:', s0)
print('result s1:', states[4:])
print('gt s1:', s1)
if __name__ == "__main__":
motion_case()
|
yimuw/yimu-blog | random/my_git/my_git.py | import os
import hashlib
import pickle
import argparse
import sys
import shutil
import copy
join = os.path.join
class TreeNode:
def __init__(self, id, blobs=[], parents=[]):
self.id = id
self.blobs = blobs
self.parents = parents
self.children = []
class Blob:
def __init__(self, file_path):
self.file_path = file_path
self.file_content = open(self.file_path, 'r').read()
self.hash_val = hashlib.sha1(
self.file_content.encode('utf-8')).hexdigest()
class MyGit:
class States:
def __init__(self):
self.head = None
self.tracked_files = set()
def __init__(self, dir='.'):
self.git_dir = 'my_git'
self.blobs_dir = join(dir, self.git_dir, 'blobs')
self.nodes_dir = join(dir, self.git_dir, 'nodes')
self.states_path = join(dir, self.git_dir, 'states')
self.states = self.States()
self.root_name = 'ROOT'
def init(self):
if os.path.exists(self.git_dir):
print('git dir exist!')
return
os.mkdir(self.git_dir)
os.mkdir(self.blobs_dir)
os.mkdir(self.nodes_dir)
root = TreeNode(self.root_name)
self.states.head = root.id
self.save_node(root)
self.save_states()
print('init my_git repo!')
def load_states(self):
self.states = pickle.load(open(join(self.git_dir, 'states'), 'rb'))
def save_states(self):
pickle.dump(self.states, open(join(self.git_dir, 'states'), 'wb'))
def load_node(self, node_id):
return pickle.load(open(join(self.nodes_dir, node_id), 'rb'))
def save_node(self, node):
pickle.dump(node, open(join(self.nodes_dir, node.id), 'wb'))
def load_blob(self, blob_hash):
return pickle.load(open(join(self.blobs_dir, blob_hash), 'rb'))
def save_blob(self, blob):
pickle.dump(blob, open(join(self.blobs_dir, blob.hash_val), 'wb'))
def status(self):
self.load_states()
print('HEAD: ', self.states.head)
print('tracked files:', self.states.tracked_files)
for f in self.states.tracked_files:
print('content of f:', f)
print(open(f, 'r').read())
def add(self, files):
self.load_states()
for f in files:
if os.path.exists(f):
self.states.tracked_files.add(f)
else:
print(f, 'does not exist!')
self.save_states()
def commit(self, message, verbose=True):
self.load_states()
print('current HEAD:', self.states.head)
has_change = False
all_blob = []
for f in self.states.tracked_files:
blob = Blob(f)
all_blob.append(blob.hash_val)
if blob.hash_val not in os.listdir(self.blobs_dir):
has_change = True
self.save_blob(blob)
# using hash to detect changes
if has_change == False:
print('nothing to commit!')
return
new_node = TreeNode(message, blobs=all_blob,
parents=[self.states.head])
# update the children of current head node
head_node = self.load_node(self.states.head)
head_node.children.append(new_node.id)
self.save_node(head_node)
# head point to new node
self.states.head = new_node.id
self.save_states()
# save the new node
self.save_node(new_node)
if verbose:
print('commit success! ')
print('new HEAD:', self.states.head)
def __merge_commit(self, p1, p2):
merge_node = TreeNode(
'merge-{}-{}'.format(p1[:6], p2[:6]), parents=[p1, p2])
merge_node.hash_val = 'merge-{}-{}'.format(p1[:6], p2[:6])
# update the children of p1, p2 node
p1_node = self.load_node(p1)
p1_node.children.append(merge_node.hash_val)
self.save_node(p1_node)
p2_node = self.load_node(p2)
p2_node.children.append(merge_node.hash_val)
self.save_node(p2_node)
# head point to new node
self.states.head = merge_node.hash_val
self.save_states()
# save the new node
self.save_node(merge_node)
print('merged commit success! ')
print('new HEAD:', self.states.head)
def checkout(self, node_id, verbose=True):
if node_id not in os.listdir(self.nodes_dir):
print('commit doesn not exist')
return
self.load_states()
for f in self.states.tracked_files:
os.remove(f)
self.states.tracked_files = set()
node = self.load_node(node_id)
for blob_hash in node.blobs:
blob = self.load_blob(blob_hash)
self.states.tracked_files.add(blob.file_path)
with open(blob.file_path, 'w') as file:
file.write(blob.file_content)
self.states.head = node_id
self.save_states()
if verbose:
print('checkout success!')
print('new HEAD:', self.states.head)
def __lowest_common_ancester_path(self, node1_id, node2_id):
print('finding LCA path between', node1_id, node2_id)
self.lca = None
def recursion(node_id):
node = self.load_node(node_id)
value = 0
for c in node.children:
value += recursion(c)
value += 1 if node.id == node1_id or node.id == node2_id else 0
if self.lca is None and value == 2:
self.lca = node.id
return value
recursion(self.root_name)
return self.lca
def __detect_diff3_conflict(self, path):
return '<<<<<<<' in open(path, 'r').read()
def merge(self, node_id):
if node_id not in os.listdir(self.nodes_dir):
print('commit doesn not exist')
return
self.load_states()
lca = self.__lowest_common_ancester_path(self.states.head, node_id)
print('LCA node:', lca)
if lca == node_id or lca == self.states.head:
print('fastforward...')
self.checkout(node_id)
return
merge_id = 'merge-{}-{}'.format(self.states.head, node_id)
merge_dir = join(self.git_dir, merge_id)
source_dir = join(merge_dir, 'source')
target_dir = join(merge_dir, 'target')
lca_dir = join(merge_dir, 'lca')
if os.path.exists(merge_dir):
shutil.rmtree(merge_dir)
os.mkdir(merge_dir)
os.mkdir(source_dir)
os.mkdir(target_dir)
os.mkdir(lca_dir)
def snapshot(dir):
for f in self.states.tracked_files:
shutil.copyfile(f, join(dir, f))
cur_head = self.states.head
snapshot(source_dir)
source_files = copy.deepcopy(self.states.tracked_files)
self.checkout(lca, verbose=False)
snapshot(lca_dir)
lca_files = copy.deepcopy(self.states.tracked_files)
self.checkout(node_id, verbose=False)
snapshot(target_dir)
target_files = copy.deepcopy(self.states.tracked_files)
self.checkout(cur_head, verbose=False)
all_files = target_files.union(source_files).union(lca_files)
for f in all_files:
print('...merging...', f)
sf = join(source_dir, f)
tf = join(target_dir, f)
lf = join(lca_dir, f)
# 1. f in lca, source and target.
# do a diff3
if f in source_files and f in target_files and f in lca_files:
os.system('diff3 {} {} {} -m > {}'.format(sf, lf, tf, f))
self.states.tracked_files.add(f)
print('3 way merge')
if self.__detect_diff3_conflict(f):
print(
'!!!! Merge conflict for {}!!!! please address in another commit!'.format(f))
# 2. f in source and lca, but not target.
# take source
elif f in source_files and f in lca_files:
shutil.copyfile(sf, f)
self.states.tracked_files.add(f)
print('new change from source. take from source')
# 3. f in target and lca, but not source.
# take target
elif f in target_files and f in lca_files:
shutil.copyfile(tf, f)
self.states.tracked_files.add(f)
print('new change from target. take from target')
# 4. f in source or (in source and target).
# new file. take source
elif f in source_files:
shutil.copyfile(sf, f)
self.states.tracked_files.add(f)
print('new file. take from source')
# 5. f only in target.
# new file. take target
elif f in target_files:
shutil.copyfile(tf, f)
self.states.tracked_files.add(f)
print('new file. take from target')
# 6. f only in lca.
# file is removed. Do nothing
elif f in lca_files:
print('file is removed')
else:
print('I worte a bug!')
# create a merge commit which has 2 parents
self.__merge_commit(self.states.head, node_id)
def log(self):
def tree_recursion(node_id):
if node_id is None:
return
node = self.load_node(node_id)
print('======================')
print('node id/message: ', node.id)
print('blobs:', node.blobs)
print('children: ', node.children)
print('parents: ', node.parents)
print('======================')
for p in node.parents:
tree_recursion(p)
self.load_states()
tree_recursion(self.states.head)
def gitk(self):
import networkx as nx
from matplotlib import pyplot as plt
self.load_states()
def node_info_str(node):
return '{}'.format(node.id)
def get_edges(node_hash):
node = self.load_node(node_hash)
edges = [(node_info_str(node), node_info_str(self.load_node(c)))
for c in node.children]
for c in node.children:
edges += get_edges(c)
return edges
graph = nx.DiGraph()
edges = get_edges(self.root_name)
edges.append(('HEAD', node_info_str(self.load_node(self.states.head))))
graph.add_edges_from(edges)
plt.tight_layout()
nx.draw_networkx(graph, arrows=True)
plt.show()
if __name__ == '__main__':
git = MyGit()
argparser = argparse.ArgumentParser()
argsubparsers = argparser.add_subparsers(title='Commands', dest='command')
argsubparsers.required = True
argsubparsers.add_parser('init')
argsubparsers.add_parser('status')
argsubparsers.add_parser('log')
argsubparsers.add_parser('test')
argsubparsers.add_parser('gitk')
argsp = argsubparsers.add_parser('add')
argsp.add_argument('files', nargs='+')
argsp = argsubparsers.add_parser('merge')
argsp.add_argument('id',
help='target commit')
argsp = argsubparsers.add_parser('commit')
argsp.add_argument('message',
help='file to commit')
argsp = argsubparsers.add_parser('checkout')
argsp.add_argument('id',
help='id is a hash or a branch')
args = argparser.parse_args(sys.argv[1:])
if args.command == 'status':
git.status()
elif args.command == 'checkout':
git.checkout(args.id)
elif args.command == 'commit':
git.commit(args.message)
elif args.command == 'init':
git.init()
elif args.command == 'log':
git.log()
elif args.command == 'gitk':
git.gitk()
elif args.command == 'merge':
git.merge(args.id)
elif args.command == 'add':
git.add(args.files)
|
yimuw/yimu-blog | comms/http_cam/server.py | import os
from flask import Flask, request
import cv2
import time
import os
from flask_sqlalchemy import SQLAlchemy
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////data.db'
db = SQLAlchemy(app)
class Shot(db.Model):
__tablename__ = 'shots'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
created_date = db.Column(db.DateTime, server_default=db.func.now())
request_ip = db.Column(db.String(80))
im_path = db.Column(db.String(200))
def __repr__(self):
return 'id:{} created_date:{} request_id:{} im_path:{}'.format(self.id, self.created_date, self.request_ip,
self.im_path)
cam = cv2.VideoCapture(0)
image_save_dir = 'captures'
if not os.path.exists(image_save_dir):
os.makedirs(image_save_dir)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
@app.route('/tp', methods=['POST'])
def take_picture():
return_value, image = cam.read()
t = time.time()
im_save_path = os.path.join(image_save_dir, 'im_{:.3f}.png'.format(t))
cv2.imwrite(im_save_path, image)
request_ip = request.remote_addr
print('request from: ', request.form)
shot_record = Shot(request_ip=request_ip, im_path=im_save_path)
db.session.add(shot_record)
db.session.commit()
return {'cv_return': return_value,
'request_ip:': request_ip,
'timestamp:': time.time(),
'im_shape:': image.shape}
@app.route('/shots')
def get_shots():
return {'shots': [str(s) for s in Shot.query.all()]}
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing/fix_lag_types.py | import scipy.linalg as linalg
import numpy as np
import profiler
class State:
def __init__(self, variables):
self.variables = variables
def unpack_state(self):
x, y = self.variables
return x, y
class DistanceBetweenStates:
def __init__(self, state1_index, state2_index, distance):
# Dangeours
self.state1_index = state1_index
self.state2_index = state2_index
self.distance = distance.copy()
class DistanceMeasurement:
def __init__(self, state1, state2, distance):
# Dangeours
self.state1 = state1
self.state2 = state2
self.distance = distance.copy()
def residual(self):
x1, y1 = self.state1.unpack_state()
x2, y2 = self.state2.unpack_state()
dx, dy = self.distance
return np.array([
[x2 - x1 - dx],
[y2 - y1 - dy],
])
def jacobi_wrt_state1(self):
return -np.identity(2)
def jacobi_wrt_state2(self):
return np.identity(2)
def residual_size(self):
return 2
def variable_size(self):
return 2
class PriorMeasurement:
def __init__(self, state, prior):
self.state = state
self.prior = prior.copy()
def residual(self):
x, y = self.state.unpack_state()
px, py = self.prior
return np.array([
[x - px],
[y - py],
])
def jacobi(self):
return np.identity(2)
def residual_size(self):
return 2
def variable_size(self):
return 2
|
yimuw/yimu-blog | least_squares/icp/icp_euler.py | <gh_stars>1-10
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
from utils import euler_angle_to_rotation
def icp_residual_yaw_pitch_roll(point_src, point_target, yaw_pitch_roll):
R = euler_angle_to_rotation(*yaw_pitch_roll)
residual =R @ point_src - point_target
# [p1_x, p1_y, p1_z, p2_x, p2_y, p2_z, ...]
residual = residual.flatten('F')
return residual
def compute_yaw_pitch_roll_jacobian_numurical(point_src, point_target, yaw_pitch_roll):
DELTA = 1e-6
num_residuals = point_src.size
num_params = 3
jacobian = np.zeros([num_residuals, num_params])
curret_params = yaw_pitch_roll.copy()
for p_idx in range(3):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = icp_residual_yaw_pitch_roll(point_src, point_target, params_plus)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = icp_residual_yaw_pitch_roll(point_src, point_target, params_minus)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
residual_cur_params = icp_residual_yaw_pitch_roll(point_src, point_target, yaw_pitch_roll)
return jacobian, residual_cur_params
def icp_yaw_pitch_roll_numirical(point_src, point_target):
yaw_pitch_roll = np.array([0, 0, 0.])
for iter in range(10):
jacobi, b = compute_yaw_pitch_roll_jacobian_numurical(point_src, point_target, yaw_pitch_roll)
delta = np.linalg.solve(jacobi.transpose() @ jacobi, -jacobi.transpose() @ b)
yaw_pitch_roll += delta
#print('jocobian:', jacobi)
#print('b: ', b)
print('iter: ', iter, ' cost:', b.transpose() @ b)
#print('current params: ', yaw_pitch_roll)
|
yimuw/yimu-blog | least_squares/pca/pca_4d.py | # from scipy.linalg import logm, expm
from math import cos, pi, sin
from scipy.linalg import expm
import matplotlib.pyplot as plt
import numpy as np
"""
Problem
cost(R, P) = || D - take_first_col(R) * P ||^2
=>
cost(w, P) = || D - take_first_col(R @ exp(W)) * P ||
cost(w, P) = || D - R @ take_first_col(exp(W)) * P ||
First order approximation, exp(W) = I + W
cost(W, P) = ||D - R @ take_first_col(I + W) * P||
Only need to solve for n-1 parameters
cost(W_c1, P) = ||D - R @ ([1,w1,w2,w3,..]) * P||
=> get W_c1
update: R = R * exp([W_c1,0,..])
"""
def generate_point_cloud():
mean = np.array([0, 0, 0, 0.])
# R = utils.euler_angle_to_rotation(0.4, -1., 6.6666)
Mat = np.random.rand(4, 4)
W = Mat.transpose() - Mat
R = expm(W)
np.testing.assert_almost_equal(R.transpose() @ R, np.identity(4))
# Transformation for covariance
cov = R @ np.diag([0.001, 1, 9., 0.01]) @ R.transpose()
num_point = 500
points = np.random.multivariate_normal(mean, cov, size=num_point)
points_mean = np.mean(points, axis=0)
print('points_mean:', points_mean)
points = points - points_mean
print('R_gt: ', R)
print('cov_gt:', cov)
print('numpy.linalg.eig(cov):', np.linalg.eig(cov))
return points.transpose()
def take_first_cols(R):
return R[:, :1]
class PCA_SO4_first_principle_component:
"""
https://stats.stackexchange.com/questions/10251/what-is-the-objective-function-of-pca
"""
def __init__(self, points):
self.num_data = points.shape[1]
self.num_residuals = points.size
points_mean = np.mean(points, axis=1)
self.points = (points.transpose() - points_mean).transpose()
self.points_covariance = points @ points.transpose() / self.num_data
# print(self.points_covariance)
self.var_SO4 = np.identity(4)
self.var_projection = np.zeros([1, self.num_data])
def residaul(self, var_SO4, var_projection):
"""
r_i = p_i - first_k_cols(R) * w
"""
# self.point.shape == (3, n)
r = self.points - take_first_cols(var_SO4) @ var_projection
# make r col major
return r.transpose().flatten()
def compute_projection(self):
self.var_projection = take_first_cols(self.var_SO4).transpose() @ self.points
def hat_local_so4_first_col(self, local_params):
W = np.zeros([4,4])
w1, w2, w3 = local_params
W[1, 0] = w1
W[2, 0] = w2
W[3, 0] = w3
W[0, 1] = -w1
W[0, 2] = -w2
W[0, 3] = -w3
return W
def local_so4_first_col_to_SO4(self, local_params):
W = self.hat_local_so4_first_col(local_params)
return expm(W)
def numerical_jacobi_wrt_first_col(self):
"""
r_i = p_i - first_k_cols(R) * w
"""
DELTA = 1e-8
jacobian = np.zeros([self.num_residuals, 3])
w_so4_local_first_col = np.array([0, 0, 0.])
curret_params = w_so4_local_first_col.copy()
for p_idx in range(3):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = self.residaul(self.var_SO4 @ self.local_so4_first_col_to_SO4(params_plus), self.var_projection)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = self.residaul(self.var_SO4 @ self.local_so4_first_col_to_SO4(params_minus), self.var_projection)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
return jacobian
def jacobi_wrt_so3(self):
"""
"""
pass
def solve_normal_equation_and_update_wrt_so3(self):
"""
"""
# jacobi = self.jacobi_wrt_so3()
jacobi = self.numerical_jacobi_wrt_first_col()
# print('jacobian', jacobi)
r = self.residaul(self.var_SO4, self.var_projection)
# rhs is invertable when rank == 1
regulization = 0
rhs = jacobi.transpose() @ jacobi + regulization * np.identity(3)
lhs = - jacobi.transpose() @ r
# print('rhs:', rhs)
# print('lhs:', lhs)
delta = np.linalg.solve(rhs, lhs)
print('delta:', delta)
self.var_SO4 = self.var_SO4 @ self.local_so4_first_col_to_SO4(delta)
def cost(self):
r = self.residaul(self.var_SO4, self.var_projection)
return r.transpose() @ r
def print_variable(self):
print('cost:', self.cost())
np.testing.assert_almost_equal(self.var_SO4.transpose() @ self.var_SO4, np.identity(4))
print('Principal vector:', take_first_cols(self.var_SO4).transpose())
for i in range(1):
p = self.var_projection[i, :]
print(p.transpose() @ p / self.num_data)
def minimize(self):
for iter in range(10):
if self.cost() < 1e-8:
break
self.print_variable()
self.compute_projection()
self.solve_normal_equation_and_update_wrt_so3()
def point_statis(points):
cov_stats = points @ points.transpose() / points.shape[1]
e, v = np.linalg.eig(cov_stats)
idx = np.argsort(e)[::-1]
e = e[idx]
v = v[:,idx]
print('e(cov_stats):', e)
print('v(cov_stats):', v)
def main():
points = generate_point_cloud()
pca = PCA_SO4_first_principle_component(points)
print("pca.cost():", pca.cost())
print("pca jocibian:", pca.numerical_jacobi_wrt_first_col())
pca.minimize()
point_statis(points)
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/fix_lag_smoothing_v2/main.py | import scipy.linalg as linalg
import numpy as np
import fix_lag_types
import batch_optimizer
import fix_lag_smoother
import profiler
def simulate_data(num_states):
# movement = x2 - x1
state_init = fix_lag_types.State(np.array([0., 0., 1., 2.]))
states_gt = [state_init]
for i in range(num_states - 1):
states_gt.append(states_gt[-1].predict())
states_gt = [s.variables for s in states_gt]
odometry_measurements = [fix_lag_types.OdometryMeasurement(
state1_index=i, state2_index=i + 1) for i in range(num_states - 1)]
gps_measurements = [fix_lag_types.GPSMeasurement(state_index=i,
gps=states_gt[i][:2] + 0.5 * np.random.rand(2))
for i in range(num_states)]
prior_measurement = np.array([0., 0., 1., 2.])
state_init_guess = [fix_lag_types.State(
np.random.random(4)) for i in range(num_states)]
return states_gt, state_init_guess, odometry_measurements, gps_measurements, prior_measurement
def fix_lag_smoothing_demo():
NUM_STATES = 30
states_gt, state_init_guess, odometry_measurements, gps_measurements, prior_measurement = simulate_data(
NUM_STATES)
if False:
print('states_gt :\n', states_gt)
print('odometry_measurements:', odometry_measurements)
print('gps_measurements:', gps_measurements)
print('state_guess:', state_init_guess)
batch_optimization = batch_optimizer.BatchOptimization(
state_init_guess, odometry_measurements, gps_measurements, prior_measurement)
batch_result, batch_cov = batch_optimization.optimize()
# format data
batch_result = np.vstack([b.variables for b in batch_result])
batch_cov_diag = np.diag(batch_cov).reshape(-1, 4)
states_gt = np.vstack(states_gt)
print('states_gt :\n', states_gt)
print('batch resutl:\n', batch_result)
print('batch cov diag:\n', batch_cov_diag)
fix_lag = fix_lag_smoother.FixLagSmoother(
prior_measurement, gps_measurements[0])
# observation 1 by 1. e.g. real-time simulation
for i in range(len(odometry_measurements)):
fix_lag.optimize_for_new_measurement(
odometry_measurements[i], gps_measurements[i+1])
fixed_lag_result = fix_lag.get_all_states()
fixed_lag_cov_diag = fix_lag.get_diag_cov()
print('fixed lag :\n', fixed_lag_result)
print('fixed lag cov\n:', fixed_lag_cov_diag)
# show diff.
print('cov diff:\n', fixed_lag_cov_diag - batch_cov_diag)
print('fixed lag - batch\n:', fixed_lag_result - batch_result)
profiler.print_time_map()
if __name__ == "__main__":
fix_lag_smoothing_demo()
|
yimuw/yimu-blog | random/visitor/vistor_ref_version/json_visitor.py | from abc import ABC, abstractmethod
from collections import deque
import json
import visitor_ref
RefObj = visitor_ref.RefObj
class JsonDumper(visitor_ref.VisitorBase):
def __init__(self):
self.result = ''
self.level = 0
self.indent = 2
def on_leaf(self, name, obj):
obj_str = str(obj.val) if not isinstance(
obj.val, str) else '"{}"'.format(obj.val)
if name == None:
self.result += ' ' * self.level + obj_str + ',\n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + obj_str + ',\n'
def on_enter_level(self, name):
if name == None:
self.result += ' ' * self.level + '{\n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + '{\n'
self.level += self.indent
def on_leave_level(self):
self.level -= self.indent
self.result += ' ' * self.level + '}\n'
def on_enter_list(self, name, obj):
if name == None:
self.result += ' ' * self.level + '[ \n'
else:
self.result += ' ' * self.level + '"' + name + '"' + ':' + '[ \n'
self.level += self.indent
def on_leave_list(self):
# remove the last ",".
self.result = self.result[:-2] + '\n'
self.level -= self.indent
self.result += ' ' * self.level + ']\n'
class JsonLoader(visitor_ref.VisitorBase):
def __init__(self, dumped):
self.dumped = deque(dumped.split('\n'))
def on_leaf(self, name, obj):
line = self.dumped.popleft()
line = line.strip().rstrip(',')
value_str = line
if ':' in line:
_, value_str = line.split(':')
if value_str[0] == '"':
obj.val = value_str.strip('"')
else:
obj.val = float(value_str)
def on_enter_level(self, name):
self.dumped.popleft()
def on_leave_level(self):
self.dumped.popleft()
def on_enter_list(self, name, obj):
cur = self.dumped.popleft()
def indent(line):
return len(line) - len(line.lstrip())
list_start_indent = indent(cur)
idx = 0
while indent(self.dumped[idx]) != list_start_indent:
idx += 1
list_size = idx
# need to operate on the list object
obj.clear()
for _ in range(list_size):
obj.append(visitor_ref.RefObj(None))
def on_leave_list(self):
self.dumped.popleft()
|
yimuw/yimu-blog | random/lyapunov/sos_s_procedure_simple.py | # Import packages.
import cvxpy as cp
import numpy as np
# verify p(x) >= 0 on g(x) > 0
# p(x) = (x-3)^2 - 1
# g(x) = 1 - x^2
#
# L = p(x) - l(x)g(x)
# l(x) := 1^T Q 1, sum of squares of 1
def solve_simple_constraint_sos_1():
num_var = 1
Q = cp.Variable((num_var, num_var), symmetric=True)
slack = cp.Variable((2, 2), symmetric=True)
# Q.value = np.identity(num_var)
# sufficient condition
Epsilon = 1e-4 * np.identity(num_var)
constraints = [Q >> Epsilon]
constraints += [slack >> Epsilon]
constraints += [-Q + 8 == slack[0,0]]
constraints += [-3 == slack[1,0]]
constraints += [1+Q == slack[1,1]]
prob = cp.Problem(cp.Minimize(1),
constraints)
prob.solve(verbose = False)
# Print result.
print("status:", prob.status)
print("The optimal value is", prob.value)
print("A solution Q is")
print(Q.value)
# verify p(x) >= 0 on g(x) > 0
# p(x) = (x)^2 - 0.5
# g(x) = 1 - x^2
#
# L = p(x) - l(x)g(x)
# l(x) := 1^T Q 1, sum of squares of 1
def solve_simple_constraint_sos_2():
num_var = 1
Q = cp.Variable((num_var, num_var), symmetric=True)
slack = cp.Variable((2, 2), symmetric=True)
# Q.value = np.identity(num_var)
# sufficient condition
Epsilon = 1e-4 * np.identity(num_var)
constraints = [Q >> Epsilon]
constraints += [slack >> Epsilon]
constraints += [-Q - 0.5 == slack[0,0]]
constraints += [0 == slack[1,0]]
constraints += [1+Q == slack[1,1]]
prob = cp.Problem(cp.Minimize(1),
constraints)
prob.solve(verbose = False)
# Print result.
print("status:", prob.status)
print("The optimal value is", prob.value)
print("A solution Q is")
print(Q.value)
def main():
print('# problem 1')
solve_simple_constraint_sos_1()
print('# problem 2')
solve_simple_constraint_sos_2()
if __name__ == "__main__":
main() |
yimuw/yimu-blog | least_squares/icp/icp_so3_and_t.py | import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
from copy import deepcopy
import utils
class SO3AndTranslation:
def __init__(self):
self.R = np.identity(3)
self.translation = np.array([[0, 0, 0.]]).transpose()
def right_add(self, so3_local_and_translation):
assert(so3_local_and_translation.size == 6)
w_so3_local = so3_local_and_translation[:3]
translation = so3_local_and_translation[3:].reshape([3,1])
ret = SO3AndTranslation()
ret.R = self.R @ utils.so3_exp(w_so3_local)
ret.translation = self.translation + translation
assert(ret.translation.size == 3)
return ret
def icp_so3_and_translation_residual(point_src, point_target, variables_SO3_and_translation):
R = variables_SO3_and_translation.R
translation = variables_SO3_and_translation.translation
residual =R @ point_src - point_target + translation
# [p1_x, p1_y, p1_z, p2_x, p2_y, p2_z, ...]
residual = residual.flatten('F')
return residual
# Can do a lambda to reduce code length
def compute_so3_and_translation_jacobian_numurical(point_src, point_target, variables_SO3_and_translation):
DELTA = 1e-6
num_residuals = point_src.size
num_variables = 6
jacobian = np.zeros([num_residuals, num_variables])
so3_and_translation = np.array([0, 0, 0, 0, 0, 0.])
curret_variables = deepcopy(variables_SO3_and_translation)
for p_idx in range(6):
variables_plus = deepcopy(curret_variables)
delta_vector = so3_and_translation.copy()
delta_vector[p_idx] += DELTA
variables_plus = variables_plus.right_add(delta_vector)
residual_plus = icp_so3_and_translation_residual(point_src, point_target, variables_plus)
variables_minus = deepcopy(curret_variables)
delta_vector = so3_and_translation.copy()
delta_vector[p_idx] -= DELTA
variables_minus = variables_minus.right_add(delta_vector)
residual_minus = icp_so3_and_translation_residual(point_src, point_target, variables_minus)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
residual_cur_variables = icp_so3_and_translation_residual(point_src, point_target, curret_variables)
return jacobian, residual_cur_variables
def icp_so3_and_translation_numirical(point_src, point_target):
variables = SO3AndTranslation()
for iter in range(10):
# Jocobi on so3
jacobi, b = compute_so3_and_translation_jacobian_numurical(point_src, point_target, variables)
delta = np.linalg.solve(jacobi.transpose() @ jacobi, -jacobi.transpose() @ b)
# Update on SO3
variables = variables.right_add(delta)
#print('jocobian:', jacobi)
#print('b: ', b)
print('iter: ', iter, ' cost:', b.transpose() @ b)
#print('current variables: ', w_so3) |
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/neural_net.py | <reponame>yimuw/yimu-blog
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import variables_tree_flow as vtf
import matplotlib.pyplot as plt
import random
class NeuralNet:
def fit_and_test(self, images, labels):
assert len(images) == len(labels)
num_data = len(labels)
len0 = 64
len1 = 10
len2 = 10
self.theta0 = np.array([vtf.Variable(value=random.gauss(0, 0.01), id='t{}'.format(i))
for i in range((len0 + 1) * len1)]).reshape([len1, (len0 + 1)])
self.theta1 = np.array([vtf.Variable(value=random.gauss(0, 0.01), id='t{}'.format(i))
for i in range((len1 + 1) * len2)]).reshape([len2, (len1 + 1)])
cost = 0
for i, (im, label) in enumerate(zip(images, labels)):
if i == 2:
break
pred = self.__predict_func(im)
assert(len(pred) == 10)
cost_this = 0
for idx, pred_val in enumerate(pred):
# idx encoding
# multiple logistic regression to handle multiclasses.
if idx == label:
cost_this += - vtf.ntf_log(pred_val)
else:
cost_this += - vtf.ntf_log(1 - pred_val)
cost += cost_this * (1 / num_data)
core = vtf.NumberFlowCore(cost)
for i in range(10000):
core.forward()
if i % 100 == 0:
print("cost.val:", cost.value, " iter:", i)
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.05)
# replace graph node type by np array
self.theta0 = [[e.value for e in r] for r in self.theta0]
self.theta1 = [[e.value for e in r] for r in self.theta1]
for i in range(2):
print(self.predict(images[i]))
print('gt:', labels[i])
def __predict_func(self, data):
x0 = data.reshape(-1)
x0b = np.hstack([x0, 1])
z0 = self.theta0 @ x0b
x1 = np.vectorize(vtf.ntf_sigmoid)(z0)
x1b = np.hstack([x1, 1])
z1 = self.theta1 @ x1b
x2 = np.vectorize(vtf.ntf_sigmoid)(z1)
return x2
def predict(self, data):
print('========================')
pred = self.__predict_func(data)
print('pred:', pred)
return np.argmax(pred, axis=0)
class LogisticRegression:
def fit_and_test(self, images, labels):
assert len(images) == len(labels)
num_data = len(labels)
len0 = 64
len1 = 10
len2 = 10
self.theta0 = np.array([vtf.Variable(value=random.gauss(0, 0.01), id='t{}'.format(i))
for i in range((len0 + 1) * len1)]).reshape([len1, (len0 + 1)])
self.theta1 = np.array([vtf.Variable(value=random.gauss(0, 0.01), id='t{}'.format(i))
for i in range((len1 + 1) * len2)]).reshape([len2, (len1 + 1)])
cost = 0
for i, (im, label) in enumerate(zip(images, labels)):
if i == 10:
break
pred = self.__predict_func(im)
assert(len(pred) == 10)
cost_this = 0
for idx, pred_val in enumerate(pred):
# idx encoding
# multiple logistic regression to handle multiclasses.
if idx == label:
cost_this += - vtf.ntf_log(pred_val)
else:
cost_this += - vtf.ntf_log(1 - pred_val)
cost += cost_this * (1 / num_data)
core = vtf.NumberFlowCore(cost)
for i in range(1000):
core.forward()
if i % 100 == 0:
print("cost.val:", cost.value, " iter:", i)
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.05)
# replace graph node type by np array
self.theta0 = [[e.value for e in r] for r in self.theta0]
for i in range(10):
print(self.predict(images[i]))
print('gt:', labels[i])
def __predict_func(self, data):
x0 = data.reshape(-1)
x0b = np.hstack([x0, 1])
z0 = self.theta0 @ x0b
x1 = np.vectorize(vtf.ntf_sigmoid)(z0)
return x1
def predict(self, data):
print('========================')
pred = self.__predict_func(data)
print('pred:', pred)
return np.argmax(pred, axis=0)
if __name__ == "__main__":
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
_, axes = plt.subplots(2, 4)
images_and_labels = list(zip(digits.images, digits.target))
for ax, (image, label) in zip(axes[0, :], images_and_labels[:4]):
ax.set_axis_off()
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
ax.set_title('Training: %i' % label)
plt.show()
# nn = NeuralNet()
# nn.fit_and_test(digits.images, digits.target)
lg = LogisticRegression()
lg.fit_and_test(digits.images, digits.target)
|
yimuw/yimu-blog | least_squares/covariance1_gps/covariance1_gps.py | <filename>least_squares/covariance1_gps/covariance1_gps.py
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from collections import namedtuple
SatelliteMeasurement = namedtuple('SatelliteMeasurement',
['position', 'distant', 'weight'])
# Change satellite position here!
def generate_data():
"""
Generate simulation data
"""
receiver_position = np.array([-10, 0.])
satelite_positions = [
np.array([20, 20]),
np.array([30, -30]),
np.array([-40, 0]),
]
# satelite_positions = [
# np.array([-2, 30]),
# np.array([2, 30]),
# ]
distance_noisy_std = 0.1
measurements = [
SatelliteMeasurement(
position=pos,
distant=np.linalg.norm(receiver_position - pos +
np.random.normal(0, distance_noisy_std, 1)),
weight=1) for pos in satelite_positions
]
return measurements
def plot_hessian_as_covarinace_2d(ax, xy, hessian, satelite_measurements):
"""
plot 2d hessian as cov
https://stackoverflow.com/questions/20126061/creating-a-confidence-ellipses-in-a-sccatterplot-using-matplotlib
you made a mistake,
angle=np.rad2deg(np.arctan2(v[0, 1], v[-1, 0]))
not
angle=np.rad2deg(np.arccos(v[0, 0])))
"""
x, y = xy
cov = np.linalg.inv(hessian)
value, v = np.linalg.eig(cov)
value = np.sqrt(value)
for j in range(1, 4):
SCALE = 3
ell = Ellipse(xy=(np.mean(x), np.mean(y)),
width=value[0] * j * SCALE,
height=value[1] * j * SCALE,
angle=np.rad2deg(np.arctan2(v[0, 1], v[1, 0])),
facecolor='none',
edgecolor='red')
ax.add_artist(ell)
info_string = 'satellite position: '
for s in satelite_measurements:
plt.scatter(*s.position)
info_string += str(s.position) + ', '
ax.set_xlim(-50, 50)
ax.set_ylim(-50, 50)
plt.title(info_string)
plt.scatter(x, y)
class GPS:
"""
want:
minimize_xy = sum_i ||h_i(xy) - dist_i||^2
where h_i(xy) = dist(xy, satelite_i)
"""
def __init__(self, satelite_measurements):
# states
self.variables_xy = np.array([0., 0.])
# data
self.satelite_measurements = satelite_measurements
# config
self.max_iteration = 5
def least_squares(self):
"""
The nonlinear least squares iteration
"""
for iteration in range(self.max_iteration):
cost = self.compute_cost(self.satelite_measurements)
jacobian = self.compute_jacobian(self.satelite_measurements)
b = self.compute_b(self.satelite_measurements)
W = self.compute_weights(self.satelite_measurements)
delta = self.solve_normal_equation(jacobian, b, W)
self.variables_xy += delta
print('cost:', cost, ' position xy:', self.variables_xy)
def h_function(self, satelite_measurement):
"""
The distance observation function
"""
diff = self.variables_xy - satelite_measurement.position
dist = np.linalg.norm(diff)
return dist
def residual(self, satelite_measurement):
"""
The residual function for GPS
"""
return self.h_function(
satelite_measurement) - satelite_measurement.distant
def compute_cost(self, satelite_measurements):
"""
The cost function for GPS
"""
cost = 0.
for s in satelite_measurements:
r = self.residual(s)
cost += r * s.weight * r
cost /= len(satelite_measurements)
return cost
def compute_jacobian(self, satelite_measurements):
"""
Compute jacobian of residual function analytically
"""
num_residuals = len(satelite_measurements)
jacobian = np.zeros([num_residuals, 2])
for i, s in enumerate(satelite_measurements):
f = self.variables_xy - s.position
jacobian[i, :] = f / np.linalg.norm(f)
np.testing.assert_allclose(
jacobian[i, :],
self.gradient_checking_simple(satelite_measurements[i]), 1e-4)
return jacobian
def compute_weights(self, satelite_measurements):
"""
Format the weight into a block-diagonal matrix
"""
W_diag = [s.weight for s in satelite_measurements]
W = np.diag(W_diag)
return W
def gradient_checking_simple(self, satelite_measurement):
"""
Gradient checking
"""
x_orig = np.copy(self.variables_xy)
delta = 1e-6
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([delta, 0])
r_plus = self.residual(satelite_measurement)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([-delta, 0])
r_minus = self.residual(satelite_measurement)
grad_x = (r_plus - r_minus) / (2 * delta)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([0, delta])
r_plus = self.residual(satelite_measurement)
self.variables_xy = np.copy(x_orig)
self.variables_xy += np.array([0, -delta])
r_minus = self.residual(satelite_measurement)
grad_y = (r_plus - r_minus) / (2 * delta)
self.variables_xy = np.copy(x_orig)
return [grad_x, grad_y]
def compute_b(self, satelite_measurements):
"""
residual function evaluated at current variables
"""
num_residuals = len(satelite_measurements)
b = np.zeros([num_residuals, 1])
for i, s in enumerate(satelite_measurements):
b[i, :] = self.residual(s)
return b
def solve_normal_equation(self, jacobian, b, W):
"""
J^T J x = J^T b
"""
lhs = jacobian.T @ W @ jacobian
rhs = -jacobian.T @ W @ b
delta = np.linalg.solve(lhs, rhs)
delta = delta.flatten()
self.hessian = lhs
return delta
def plot_cost(self):
"""
Plot the cost field
"""
dx = dy = 0.5
Y, X = np.mgrid[slice(-50, 50 + dy, dy), slice(-50, 50 + dx, dx)]
costs = np.zeros_like(X)
cols, rows = X.shape
for col in range(cols):
for row in range(rows):
x = X[col, row]
y = Y[col, row]
self.variables_xy = np.array([x, y])
costs[col, row] = self.compute_cost(self.satelite_measurements)
im = plt.pcolormesh(X, Y, costs)
plt.colorbar(im)
plt.title('cost field')
plt.show()
def main():
satelite_measurements = generate_data()
gps = GPS(satelite_measurements)
gps.least_squares()
ax = plt.subplot(1, 2, 1, aspect='equal')
plot_hessian_as_covarinace_2d(ax, gps.variables_xy, gps.hessian,
gps.satelite_measurements)
plt.subplot(1, 2, 2, aspect='equal')
gps.plot_cost()
plt.show()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/ddp/ddp_linear.py | <reponame>yimuw/yimu-blog<filename>least_squares/ddp/ddp_linear.py
import scipy.linalg as linalg
import numpy as np
from numpy.linalg import inv
import ddp_types
Dynamic = ddp_types.LinearDynamic
class DPPstate:
def __init__(self, v_quadratic_weight, v_quadratic_mean):
self.v_quadratic_mean = v_quadratic_mean.copy()
self.v_quadratic_weight = v_quadratic_weight.copy()
def compute_q(self, state, control):
dynamic = Dynamic()
dynamic_jacobi_wrt_state = dynamic.jacobi_wrt_state(state, control)
dynamic_jacobi_wrt_control = dynamic.jacobi_wrt_controls(
state, control)
dynamic_jacobi = np.hstack(
[dynamic_jacobi_wrt_state, dynamic_jacobi_wrt_control])
# Q_n(x,u) = l_n(x, u) + V_n+1(f(x,u))
q_hessian_v_term = dynamic_jacobi.T @ self.v_quadratic_weight @ dynamic_jacobi
q_grad_v_term = dynamic_jacobi.T @ self.v_quadratic_weight @ (
-self.v_quadratic_mean)
# assume l(x, u) = 0.5 * k * u.T @ u
k = 1e-6
q_hessian_l_term = np.diag([0, 0, k, k])
# q_grad_l_term = np.array([0, 0, 0, 0])
# Q_n(x,u) = l_n(x, u) + V_n+1(f(x,u))
self.q_hessian = q_hessian_v_term + q_hessian_l_term
self.q_grad = -(q_grad_v_term )
print("self.q_hessian:", self.q_hessian)
print("self.q_grad:", self.q_grad)
def compute(self, state, control):
self.compute_q(state, control)
# The q system:
# | A11 A12 | x = b1
# | A21 A22 | u b2
# 1. u given x:
# u = inv(A22)* (b2 - A21 * x)
# u = inv(A22) * b2 - inv(A22) * A21 * x
# | |
# u_k1 u_k2
#
# 2. eliminate u
# u = inv(A22) * b2 - inv(A22) * A21 * x
#
# A11 * x + A12 * u = b1
# plug in u,
# A11 * x + A12 * (inv(A22) * b2 - inv(A22) * A21 * x) = b1
# (A11 - A12 @ inv(A22) @ A21) x = b1 - A12 @ inv(A22) @ b2
#
A11 = self.q_hessian[0:2, 0:2]
A12 = self.q_hessian[0:2, 2:4]
A21 = self.q_hessian[2:4, 0:2]
A22 = self.q_hessian[2:4, 2:4]
b1 = self.q_grad[0:2]
b2 = self.q_grad[2:4]
# compute u given x.
# u = u_k1 + u_k2 * x
A22_inv = inv(A22)
self.u_k1 = A22_inv @ b2
self.u_k2 = -A22_inv @ A21
# eliminate u for q function, the result is the next v function.
self.v_next_quad_weight = A11 - A12 @ A22_inv @ A21
self.v_next_quad_b = b1 - A12 @ A22_inv @ b2
self.v_next_quad_mean = np.linalg.solve(self.v_next_quad_weight,
self.v_next_quad_b)
return self.v_next_quad_weight, self.v_next_quad_mean
class DDP:
def initialize(self):
initial_state = np.array([0., 0.])
num_controls = 3
init_controls = [np.array([1, 1.]) for i in range(num_controls)]
target_state = np.array([2., 2.])
return num_controls, initial_state, init_controls, target_state
def forward_pass(self, num_controls, initial_state, init_controls):
state = initial_state.copy()
forward_pass_states = [state]
for i in range(num_controls):
next_state = Dynamic().f_function(
state, init_controls[i])
forward_pass_states.append(next_state)
state = next_state
return forward_pass_states
def backward_pass(self, num_controls, forward_pass_states, init_controls,
final_cost):
v_quad_weight = final_cost.quad_weight()
v_quad_mean = final_cost.quad_mean()
ddp_states = [None] * num_controls
# iterate [n-1, 0] to compute the control law
for i in range(num_controls - 1, -1, -1):
state = forward_pass_states[i]
control = init_controls[i]
ddp_state = DPPstate(v_quad_weight, v_quad_mean)
v_quad_weight, v_quad_mean = ddp_state.compute(state, control)
ddp_states[i] = ddp_state
return ddp_states
def apply_control_law(self, num_controls, initial_state, ddp_states):
state = initial_state.copy()
new_states = [state]
new_controls = []
for i in range(num_controls):
ddp_state = ddp_states[i]
# the argmin_u Q(u, x)
print('const:', ddp_state.u_k1)
print('feedbk:', ddp_state.u_k2)
control = ddp_state.u_k1 + ddp_state.u_k2 @ state
state = Dynamic().f_function(state, control)
new_controls.append(control)
new_states.append(state)
return new_controls, new_states
def check_dynamic(self, num_controls, states, controls):
state0 = states[0]
integrated_states = self.forward_pass(num_controls, state0, controls)
diff = np.stack(integrated_states) - np.stack(states)
assert np.allclose(np.sum(diff), 0)
print('integrated_states - ddp_states: ', diff)
def run(self):
num_controls, initial_state, init_controls, target_state = self.initialize(
)
print('initial_state:', initial_state)
print('target_state:', target_state)
print('num_states:', num_controls + 1)
forward_pass_states = self.forward_pass(num_controls, initial_state,
init_controls)
print('forward_pass_states:', forward_pass_states)
final_state = forward_pass_states[-1]
final_state_init_cost = ddp_types.TargetCost(final_state, target_state)
print('final_state_init_cost:', final_state_init_cost.cost())
ddp_states = self.backward_pass(num_controls, forward_pass_states,
init_controls, final_state_init_cost)
new_controls, new_states = self.apply_control_law(
num_controls, initial_state, ddp_states)
print('----------------------------------')
print('new_controls:', new_controls)
print('new_states:', new_states)
final_state_end_cost = ddp_types.TargetCost(
new_states[-1], target_state)
print('final_state_end_cost:', final_state_end_cost.cost())
self.check_dynamic(num_controls, new_states, new_controls)
def main():
ddp = DDP()
ddp.run()
if __name__ == "__main__":
main()
|
yimuw/yimu-blog | least_squares/ddp/ddp_types.py | import scipy.linalg as linalg
import numpy as np
from math import cos, sin
class LinearDynamic:
# TODO: change the API
def jacobi_wrt_state(self, state, controls):
return np.array([
[1, 0],
[0, 1.],
])
def jacobi_wrt_controls(self, state, controls):
return np.array([
[2, 0],
[0, 1.],
])
def f_function(self, state, controls):
x, y = state
ux, uy = controls
return np.array([x + 2 * ux,
y + uy])
class NonlinearDynamic:
# TODO: change the API
def jacobi_wrt_state(self, state, controls):
return np.array([
[1, 0],
[0, 1.],
])
def jacobi_wrt_controls(self, state, controls):
ux, uy = controls
return np.array([
[cos(ux), 0],
[0, 1],
])
def f_function(self, state, controls):
x, y = state
ux, uy = controls
return np.array([x + sin(ux),
y + uy])
class TargetCost:
def __init__(self, state, prior):
self.state = state.copy()
self.prior = prior.copy()
def residual(self):
x, y = self.state
px, py = self.prior
return np.array([
[x - px],
[y - py],
])
def cost(self):
r = self.residual()
return r.T @ r
def jacobi(self):
return np.identity(2)
def weight(self):
return np.identity(2)
def quad_weight(self):
J = self.jacobi()
W = self.weight()
return J.T @ W @ J
def quad_mean(self):
return self.prior
|
yimuw/yimu-blog | deep-learning/tensorflow-from-scratch/test.py | <filename>deep-learning/tensorflow-from-scratch/test.py
from variables_tree_flow import *
from utils import *
def linear():
a = Variable(1, 'a')
b = Variable(2, 'b')
c = Variable(3, 'c')
d = Variable(4, 'd')
e = Variable(5, 'e')
f1 = Variable(1, 'f1', 'const')
f2 = Variable(1.1, 'f2', 'const')
f3 = Variable(1.2, 'f3', 'const')
f4 = Variable(1.3, 'f4', 'const')
y = Variable(10, 'y')
t = a * a
temp = y - (f1*a + f2*b + f3*c + f4*d + e)
cost = temp * temp
core = NumberFlowCore(cost)
# build the topological order. Ignore it is fine. Just want to copy tensorflow
with core as graph:
for i in range(1000):
print("cost.val:", cost.value, " iter:", i)
core.forward()
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.0001)
if cost.value < 1e-8:
break
for var in core.varible_nodes:
print(var.id, var.value)
for const in core.const_nodes:
print(const.id, const.value)
res = y.value - (f1.value * a.value + f2.value*b.value +
f3.value*c.value + f4.value*d.value + e.value)
print("test res:", res * res)
def logistic():
a = Variable(0, 'a')
b = Variable(0, 'b')
f1 = Variable(1, 'f1', 'const')
pred = ntf_sigmoid(a * f1 + b)
cost = - ntf_log(pred)
core = NumberFlowCore(cost)
# build the topological order. Ignore it is fine. Just want to copy tensorflow
with core as graph:
for i in range(2000):
print("cost.val:", cost.value, " iter:", i)
print('pred:', pred)
# traverse_tree(cost)
core.forward()
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.01)
if cost.value < 1e-8:
break
for var in core.varible_nodes:
print(var.id, var.value)
for const in core.const_nodes:
print(const.id, const.value)
def test_chain():
theta0 = np.array([Variable(value=1., id='t{}'.format(i))
for i in range(2 * 2)]).reshape([2, 2])
theta1 = np.array([Variable(value=2., id='t{}'.format(i + 10))
for i in range(2 * 2)]).reshape([2, 2])
f1 = np.array([3, 4.])
temp = theta0 @ f1
print(temp.shape)
pred = theta1 @ temp
cost = pred[0] + pred[1]
core = NumberFlowCore(cost)
# build the topological order. Ignore it is fine. Just want to copy tensorflow
with core as graph:
for i in range(10):
print("cost.val:", cost.value, " iter:", i)
print('pred:', pred)
# traverse_tree(cost)
core.forward()
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.01)
if cost.value < 1e-8:
break
for var in core.varible_nodes:
print(var.id, var.value)
for const in core.const_nodes:
print(const.id, const.value)
def test_graph():
x = Variable(value=1., id='x')
y = Variable(value=1., id='y')
c = x + x * y
cost = c * c
core = NumberFlowCore(cost)
for i in range(100):
if i % 10 == 0:
print("cost.val:", cost.value, " iter:", i)
core.forward()
core.clear_grad()
core.backward()
core.gradient_desent(rate=0.01)
if abs(cost.value) < 1e-8:
print('break', cost.value)
break
for var in core.varible_nodes:
print(var.id, var.value)
for const in core.const_nodes:
print(const.id, const.value)
if __name__ == "__main__":
# linear()
# logistic()
# test_chain()
test_graph()
|
yimuw/yimu-blog | least_squares/icp/icp_so3.py | <filename>least_squares/icp/icp_so3.py
import numpy as np
# from scipy.linalg import logm, expm
from math import cos, sin, pi
import matplotlib.pyplot as plt
from utils import skew, so3_exp
def icp_residual_so3(point_src, point_target, w_so3):
R = so3_exp(w_so3)
residual =R @ point_src - point_target
# [p1_x, p1_y, p1_z, p2_x, p2_y, p2_z, ...]
residual = residual.flatten('F')
return residual
def compute_so3_jacobian_numurical(point_src, point_target, w_so3):
DELTA = 1e-6
num_residuals = point_src.size
num_params = 3
jacobian = np.zeros([num_residuals, num_params])
curret_params = w_so3.copy()
for p_idx in range(3):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = icp_residual_so3(point_src, point_target, params_plus)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = icp_residual_so3(point_src, point_target, params_minus)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
residual_cur_params = icp_residual_so3(point_src, point_target, w_so3)
return jacobian, residual_cur_params
def icp_so3_numirical(point_src, point_target):
w_so3 = np.array([0, 0, 0.])
for iter in range(10):
jacobi, b = compute_so3_jacobian_numurical(point_src, point_target, w_so3)
delta = np.linalg.solve(jacobi.transpose() @ jacobi, -jacobi.transpose() @ b)
w_so3 += delta
#print('jocobian:', jacobi)
#print('b: ', b)
print('iter: ', iter, ' cost:', b.transpose() @ b)
#print('current params: ', w_so3)
def icp_residual_local_so3(point_src, point_target, R_current, w_so3_local):
R = R_current @ so3_exp(w_so3_local)
residual =R @ point_src - point_target
# [p1_x, p1_y, p1_z, p2_x, p2_y, p2_z, ...]
residual = residual.flatten('F')
return residual
# Can do a lambda to reduce code length
def compute_local_so3_jacobian_numurical(point_src, point_target, R_current):
DELTA = 1e-6
num_residuals = point_src.size
num_params = 3
jacobian = np.zeros([num_residuals, num_params])
w_so3_local = np.array([0, 0, 0.])
curret_params = w_so3_local.copy()
for p_idx in range(3):
params_plus = curret_params.copy()
params_plus[p_idx] += DELTA
residual_plus = icp_residual_local_so3(point_src, point_target, R_current, params_plus)
params_minus = curret_params.copy()
params_minus[p_idx] -= DELTA
residual_minus = icp_residual_local_so3(point_src, point_target, R_current, params_minus)
dr_dpidx = (residual_plus - residual_minus) / (2. * DELTA)
jacobian[:, p_idx] = dr_dpidx
residual_cur_params = icp_residual_local_so3(point_src, point_target, R_current, w_so3_local)
return jacobian, residual_cur_params
def icp_local_so3_numirical(point_src, point_target):
w_so3_local = np.array([0, 0, 0.])
R_current = np.identity(3)
for iter in range(10):
# Jocobi on so3
jacobi, b = compute_local_so3_jacobian_numurical(point_src, point_target, R_current)
delta = np.linalg.solve(jacobi.transpose() @ jacobi, -jacobi.transpose() @ b)
# Update on SO3
R_current = R_current @ so3_exp(delta)
#print('jocobian:', jacobi)
#print('b: ', b)
print('iter: ', iter, ' cost:', b.transpose() @ b)
#print('current params: ', w_so3)
|
yimuw/yimu-blog | least_squares/warm_up/qudratic_regression.py | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
def generate_quadratic_data():
quadratic_a = 2.4
quadratic_b = -2.
quadratic_c = 1.
num_data = 30
noise = 2 * np.random.randn(num_data)
sampled_x = np.linspace(-10, 10., num_data)
sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c + noise
return sampled_x, sampled_y
class LeastSquares:
def __init__(self, x, y):
self.x = x
self.y = y
self.data_mat = np.vstack(
[self.x * self.x, self.x,
np.ones_like(self.x)]).T
self.theta = np.array([0, 0, 0.])
def residual(self):
pred_y = self.data_mat @ self.theta
r = pred_y - self.y
return r
def cost(self):
r = self.residual()
return r.T @ r
def compute_jacobian(self):
return self.data_mat
def least_squares_solve(self):
for i in range(10):
print('iteration: {} cost: {}'.format(i, self.cost()))
J = self.compute_jacobian()
r = self.residual()
delta = np.linalg.solve(J.T @ J, -J.T @ r)
self.theta += delta
if np.linalg.norm(delta) < 1e-8:
print('converged iteration: {} cost: {}'.format(
i, self.cost()))
break
return self.theta
def main():
x_data, y_data = generate_quadratic_data()
solver = LeastSquares(x_data, y_data)
theta = solver.least_squares_solve()
x = np.linspace(-12, 12., 100)
a, b, c = theta
print('fitted coefficient (a,b,c):', theta.transpose())
pred_y = a * x * x + b * x + c
p1 = plt.plot(x_data, y_data, '*r')
p2 = plt.plot(x, pred_y, 'g')
plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function'))
plt.title('Data points vs Fitted curve')
plt.show()
if __name__ == "__main__":
main()
|
ambidextrousTx/RNLTK | test/TestSimilarityUtils.py | '''
Test cases for the similarity utilities
'''
import sys
import unittest
sys.path.append('../src')
import SimilarityUtils
class TestSimilarityUtils(unittest.TestCase):
''' Main class that tests all similarity methods '''
def test_bleu_needs_nonempty_input(self):
''' The BLEU method should not work on empty arguments '''
bleu_scorer = SimilarityUtils.SimilarityMetrics()
self.assertRaises(TypeError, bleu_scorer.bleu, '', '')
self.assertRaises(TypeError, bleu_scorer.bleu, 'one', '')
if __name__ == '__main__':
unittest.main()
|
ambidextrousTx/RNLTK | test/TestBaseUtils.py | <filename>test/TestBaseUtils.py<gh_stars>1-10
''' Tests for BaseUtils
'''
import unittest
import sys
sys.path.append('../src')
import BaseUtils
class TestBaseUtils(unittest.TestCase):
''' Main test class for the BaseUtils '''
def test_word_segmenter_with_empty(self):
''' For an empty string, the segmenter returns
just an empty list '''
segments = BaseUtils.get_words('')
self.assertEqual(segments, [])
def test_word_segmenter(self):
''' The word segmenter returns the expected
array of strings '''
segments = BaseUtils.get_words('this is a random sentence')
self.assertEqual(segments, ['this', 'is', 'a', 'random', 'sentence'])
def test_ignoring_whitespace(self):
''' Whitespace in the input string is ignored
in the input string '''
segments = BaseUtils.get_words('this is a random sentence')
self.assertEqual(segments, ['this', 'is', 'a', 'random', 'sentence'])
def test_ignoring_special_chars(self):
''' If there are special characters in the input,
they are ignored as well '''
segments = BaseUtils.get_words('this is $$%%a random --00sentence')
self.assertEqual(segments, ['this', 'is', 'a', 'random', 'sentence'])
def test_segmenter_common_delims(self):
''' The sentence segmenter is able to split sentences
on ., !, ?, etc. '''
segments = BaseUtils.get_sentences('Wow! Did you see this? Amazing.')
self.assertEqual(segments, ['Wow', 'Did you see this', 'Amazing'])
def test_ignore_whitespace_sent(self):
''' Whitespace in the sentences is also ignored '''
segments = BaseUtils.get_sentences('Wow! Did you see this? Amazing.')
self.assertEqual(segments, ['Wow', 'Did you see this', 'Amazing'])
if __name__ == '__main__':
unittest.main()
|
ambidextrousTx/RNLTK | src/SimilarityUtils.py | <gh_stars>1-10
'''
The place to hold all similarity metrics. Currently implements the following:
BLEU
'''
class SimilarityMetrics(object):
''' The main class that holds method to compute different similarities
'''
def __init__(self):
pass
def bleu(self, phrase1, phrase2):
''' Compute the BLEU score
'''
if phrase1 == '' or phrase2 == '':
raise TypeError('Received one or more empty arguments')
|
ambidextrousTx/RNLTK | test/TestNGrams.py | <reponame>ambidextrousTx/RNLTK
''' Main class for testing the NGrams module
'''
import unittest
import sys
sys.path.append('../src')
import NGrams
class TestNGrams(unittest.TestCase):
''' Class to test n-gram creation methods '''
def test_unigrams(self):
''' Unigrams are just tokenized words from the
original string '''
sentence = 'this is a random piece of text'
ngram_list = NGrams.generate_ngrams(sentence, 1)
self.assertEqual(ngram_list, [['this'], ['is'], ['a'], ['random'],
['piece'], ['of'], ['text']])
def test_bigrams(self):
''' Bigrams are tokens from the original text
taken two tokens at a time '''
sentence = 'this is a random piece of text'
ngram_list = NGrams.generate_ngrams(sentence, 2)
self.assertEqual(ngram_list, [['this', 'is'], ['is', 'a'],
['a', 'random'], ['random', 'piece'],
['piece', 'of'], ['of', 'text']])
def test_fourgrams(self):
''' 4-grams are tokens from the original text
taken four tokens at a time '''
sentence = 'this is a random piece of text'
ngram_list = NGrams.generate_ngrams(sentence, 4)
self.assertEqual(ngram_list, [['this', 'is', 'a', 'random'],
['is', 'a', 'random', 'piece'],
['a', 'random', 'piece', 'of'],
['random', 'piece', 'of', 'text']])
if __name__ == '__main__':
unittest.main()
|
ambidextrousTx/RNLTK | src/NGrams.py | ''' Methods (static for now) to do with NGram
creation and calculations '''
def generate_ngrams(text, num):
''' Generates all possible n-grams of a
piece of text
>>> text = 'this is a random piece'
>>> n = 2
>>> generate_ngrams(text, num)
this is
is a
a random
random piece
'''
text_array = text.split(' ')
ngram_list = []
for i in range(0, len(text_array) - num + 1):
ngram_list.append(text_array[i:i + num])
return ngram_list
|
ambidextrousTx/RNLTK | src/BaseUtils.py | <gh_stars>1-10
'''
Base NLP utilities
'''
import re
def get_words(sentence):
''' Return all the words found in a sentence.
Ignore whitespace and all punctuation
>>> get_words('a most interesting piece')
>>> ['a', 'most', 'interesting', 'piece']
>>> get_words('a, most$ **interesting piece')
>>> ['a', 'most', 'interesting', 'piece']
'''
clean_sentence = ''.join([char for char in sentence if char.isalpha()
or char.isspace()])
segments = clean_sentence.split(' ')
words = [word for word in segments if not word == '']
return words
def get_sentences(phrase):
''' Return all sentences found in a phrase. Also
trim the individual sentences of the special characters
as well as spaces
>>> get_sentences('What an amazing opportunity! I am so glad.')
>>> ['What an amazing opportunity', 'I am so glad']
>>> get_sentences('It is raining outside. Are you awake?')
>>> ['It is raining outside', 'Are you awake']
'''
sentences = re.split(r'\?|!|\.', phrase)
trimmed_sentences = []
for sentence in sentences:
if not sentence == '':
trimmed_sentence = sentence.lstrip().rstrip()
trimmed_sentences.append(trimmed_sentence)
return trimmed_sentences
|
shreyanikkam11/fdb | bulk1.py | <filename>bulk1.py
import csv
import sys
csv.field_size_limit(sys.maxsize)
with open('Consumer_Complaints.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
print(', '.join(row))
#print(', '.join(col))
|
shreyanikkam11/fdb | bulk3.py | <filename>bulk3.py<gh_stars>0
import csv
import sys
csv.field_size_limit(sys.maxsize)
try:
risk = open('Consumer_Complaints.csv', 'r').read() #find the file
except:
while risk != "Consumer_Complaints.csv": # if the file cant be found if there is an error
print("Could not open", risk, "file")
risk = input("\nPlease try to open file again: ")
else:
with open("Consumer_Complaints.csv") as f:
reader = csv.reader(f, delimiter=' ', quotechar='|')
data = []
for row in reader:
#print(', '.join(row)
#print row[3]
for col in (1,18):
data.append(row)
data.append(col)
for item in data:
print(item) #print the rows and columns
|
shreyanikkam11/fdb | SchedulingTutorial.py | import itertools
import traceback
import fdb
import fdb.tuple
fdb.api_version(520)
####################################
## Initialization ##
####################################
# Data model:
# ('attends', student, class) = ''
# ('class', class_name) = seats_left
db = fdb.open()
scheduling = fdb.directory.create_or_open(db, ('scheduling',))
course = scheduling['class']
attends = scheduling['attends']
@fdb.transactional
def add_class(tr, c):
tr[course.pack((c,))] = fdb.tuple.pack((100,))
# Generate 1,620 classes like '9:00 chem for dummies'
levels = ['intro', 'for dummies', 'remedial', '101',
'201', '301', 'mastery', 'lab', 'seminar']
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
'alg', 'film', 'music', 'art', 'dance']
times = [str(h) + ':00' for h in range(2, 20)]
class_combos = itertools.product(times, types, levels)
class_names = [' '.join(tup) for tup in class_combos]
@fdb.transactional
def init(tr):
del tr[scheduling.range(())] # Clear the directory
for class_name in class_names:
add_class(tr, class_name)
####################################
## Class Scheduling Functions ##
####################################
@fdb.transactional
def available_classes(tr):
return [course.unpack(k)[0] for k, v in tr[course.range(())]
if fdb.tuple.unpack(v)[0]]
@fdb.transactional
def signup(tr, s, c):
rec = attends.pack((s, c))
if tr[rec].present(): return # already signed up
seats_left = fdb.tuple.unpack(tr[course.pack((c,))])[0]
if not seats_left: raise Exception('No remaining seats')
classes = tr[attends.range((s,))]
if len(list(classes)) == 5: raise Exception('Too many classes')
tr[course.pack((c,))] = fdb.tuple.pack((seats_left - 1,))
tr[rec] = b''
@fdb.transactional
def drop(tr, s, c):
rec = attends.pack((s, c))
if not tr[rec].present(): return # not taking this class
tr[course.pack((c,))] = fdb.tuple.pack((fdb.tuple.unpack(tr[course.pack((c,))])[0] + 1,))
del tr[rec]
@fdb.transactional
def switch(tr, s, old_c, new_c):
drop(tr, s, old_c)
signup(tr, s, new_c)
####################################
## Testing ##
####################################
import random
import threading
def indecisive_student(i, ops):
student_ID = 's{:d}'.format(i)
all_classes = class_names
my_classes = []
for i in range(ops):
class_count = len(my_classes)
moods = []
if class_count: moods.extend(['drop', 'switch'])
if class_count < 5: moods.append('add')
mood = random.choice(moods)
try:
if not all_classes:
all_classes = available_classes(db)
if mood == 'add':
c = random.choice(all_classes)
signup(db, student_ID, c)
my_classes.append(c)
elif mood == 'drop':
c = random.choice(my_classes)
drop(db, student_ID, c)
my_classes.remove(c)
elif mood == 'switch':
old_c = random.choice(my_classes)
new_c = random.choice(all_classes)
switch(db, student_ID, old_c, new_c)
my_classes.remove(old_c)
my_classes.append(new_c)
except Exception as e:
traceback.print_exc()
print("Need to recheck available classes.")
all_classes = []
def run(students, ops_per_student):
threads = [
threading.Thread(target=indecisive_student, args=(i, ops_per_student))
for i in range(students)]
for thr in threads: thr.start()
for thr in threads: thr.join()
print("Ran {} transactions".format(students * ops_per_student))
if __name__ == "__main__":
init(db)
print("initialized")
run(10, 10)
|
shreyanikkam11/fdb | setup.py | <filename>setup.py<gh_stars>0
from distutils.core import setup
try:
with open("README.rst") as f:
long_desc = f.read()
except:
long_desc = ""
setup(name="foundationdb",
version="5.2.5",
author="FoundationDB",
author_email="<EMAIL>",
description="Python bindings for the FoundationDB database",
url="https://www.foundationdb.org",
packages=['fdb'],
package_data={'fdb': ["fdb/*.py"]},
long_description=long_desc,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache v2 License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Database',
'Topic :: Database :: Front-Ends'
]
)
|
aldookware/auto-send-tweets | kinesis_write.py | <filename>kinesis_write.py<gh_stars>0
import boto3
import json
import uuid
import time
stream_name = 'twitter-stream'
kinesis_client = boto3.client('kinesis', region_name='eu-west-2')
records = [
{
'age': 29,
'stack': 'python'
},
{
'age':30,
'stack':'reactjs'
},
{
'age':32,
'stack':'Java'
},
{
'age':40,
'stack':'Scalar'
}
]
partition_key = str(uuid.uuid4())
def put_to_stream(records, partition_key):
for record in records:
put_response = kinesis_client.put_record(
StreamName=stream_name,
Data=json.dumps(record)
PartitionKey=partition_key
)
time.sleep(2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.