xcdata / code /serve_policy.py
fjwwjf151's picture
Upload folder using huggingface_hub
f9c42e5 verified
import dataclasses
import enum
import logging
import os
import pickle
import socket
import sys
import time
import traceback
import dill
import hydra
import numpy as np
import omegaconf
import torch
import torch.nn.functional as F
import tyro
from omegaconf import open_dict
from openpi.policies import policy as _policy
from openpi.policies import policy_config as _policy_config
from openpi.serving import websocket_policy_server
from openpi.training import config as _config
from openpi.training.config import get_data_config
from unified_video_action.common.pytorch_util import dict_apply
from unified_video_action.policy.base_image_policy import BaseImagePolicy
from unified_video_action.workspace.base_workspace import BaseWorkspace
from umi.real_world.real_inference_util import get_real_obs_resolution
language_latents = pickle.load(open("prepared_data/language_latents.pkl", "rb"))
def echo_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
return "".join(tb_lines)
def smooth_action(act_out, window_size=3, pad_size=1):
kernel = torch.ones(1, 1, window_size) / window_size
kernel = kernel.to(act_out.device)
act_out_padded = F.pad(act_out, (0, 0, pad_size, pad_size), mode="replicate")
batch_size, timesteps, action_dim = act_out_padded.shape
act_out_padded = act_out_padded.permute(0, 2, 1)
act_out_padded = act_out_padded.reshape(-1, 1, timesteps)
smoothed_act_out = F.conv1d(act_out_padded, kernel, padding=0)
smoothed_act_out = smoothed_act_out.reshape(batch_size, action_dim, timesteps - 2 * pad_size)
smoothed_act_out = smoothed_act_out.permute(0, 2, 1)
return smoothed_act_out
class EvalRealPolicyAdapter:
"""Adapter to wrap eval_real.py PolicyInferenceNode as a Policy interface."""
def __init__(self, ckpt_path: str, device: str, output_dir: str):
self.ckpt_path = ckpt_path
if not self.ckpt_path.endswith(".ckpt"):
self.ckpt_path = os.path.join(self.ckpt_path, "checkpoints", "latest.ckpt")
payload = torch.load(open(self.ckpt_path, "rb"), map_location="cpu", pickle_module=dill)
self.cfg = payload["cfg"]
with open_dict(self.cfg):
if "autoregressive_model_params" in self.cfg.model.policy:
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps = "100"
print("-----------------------------------------------")
print("num_sampling_steps", self.cfg.model.policy.autoregressive_model_params.num_sampling_steps)
print("-----------------------------------------------")
cfg_path = self.ckpt_path.replace(".ckpt", ".yaml")
with open(cfg_path, "w") as f:
f.write(omegaconf.OmegaConf.to_yaml(self.cfg))
print(f"Exported config to {cfg_path}")
print(f"Loading configure: {self.cfg.task.name}, workspace: {self.cfg.model._target_}, policy: {self.cfg.model.policy._target_}")
self.obs_res = get_real_obs_resolution(self.cfg.task.shape_meta)
self.device = torch.device(device)
cls = hydra.utils.get_class(self.cfg.model._target_)
self.workspace = cls(self.cfg, output_dir=output_dir)
self.workspace: BaseWorkspace
self.workspace.load_payload(payload, exclude_keys=None, include_keys=None)
self.policy: BaseImagePolicy = self.workspace.model
if self.cfg.training.use_ema:
self.policy = self.workspace.ema_model
print("Using EMA model")
self.policy.eval().to(self.device)
self.policy.reset()
# Note: past_action_list is shared across connections, but since WebSocket
# processing is sequential per connection, this should work in practice
self.past_action_list = []
self._metadata = {"obs_resolution": self.obs_res}
@property
def metadata(self):
return self._metadata
def infer(self, obs: dict) -> dict:
"""Infer action from observation. Returns dict with 'actions' key."""
obs_dict_np = obs.copy()
task_name = None
if "task_name" in obs_dict_np:
task_name = obs_dict_np["task_name"]
print("task_name", obs_dict_np["task_name"])
del obs_dict_np["task_name"]
if self.cfg.task.dataset.language_emb_model is not None and task_name:
if "cup" in task_name:
language_goal = language_latents["cup"]
elif "towel" in task_name:
language_goal = language_latents["towel"]
elif "mouse" in task_name:
language_goal = language_latents["mouse"]
else:
language_goal = None
if language_goal is not None:
language_goal = torch.tensor(language_goal).to(self.device)
language_goal = language_goal.unsqueeze(0)
print("task_name", task_name)
else:
language_goal = None
with torch.no_grad():
obs_dict = dict_apply(
obs_dict_np, lambda x: torch.from_numpy(x).unsqueeze(0).to(self.device)
)
if self.cfg.name == "uva":
result = self.policy.predict_action(
obs_dict=obs_dict, language_goal=language_goal
)
self.past_action_list.append(np.array(result["action"][0].cpu()))
if len(self.past_action_list) > 2:
self.past_action_list.pop(0)
action = smooth_action(result["action_pred"].detach().to("cpu")).numpy()[0]
else:
result = self.policy.predict_action(
obs_dict, language_goal=language_goal
)
action = result["action_pred"][0].detach().to("cpu").numpy()
print("action")
del result
del obs_dict
return {"actions": action}
class EnvMode(enum.Enum):
"""Supported environments."""
ALOHA = "aloha"
ALOHA_SIM = "aloha_sim"
DROID = "droid"
LIBERO = "libero"
REAL = "real"
@dataclasses.dataclass
class Checkpoint:
"""Load a policy from a trained checkpoint."""
# Training config name (e.g., "pi0_aloha_sim").
data_config: str
# Checkpoint directory (e.g., "checkpoints/pi0_aloha_sim/exp/10000").
dir: str | None = None
@dataclasses.dataclass
class EvalRealCheckpoint:
"""Load a policy from eval_real.py style checkpoint."""
# Checkpoint path (directory or .ckpt file).
dir: str
# Device to run on.
device: str = "cuda"
# Output directory.
output_dir: str = "."
@dataclasses.dataclass
class Default:
"""Use the default policy for the given environment."""
@dataclasses.dataclass
class Args:
"""Arguments for the serve_policy script."""
# Environment to serve the policy for. This is only used when serving default policies.
env: EnvMode = EnvMode.ALOHA_SIM
# If provided, will be used in case the "prompt" key is not present in the data, or if the model doesn't have a default
# prompt.
default_prompt: str | None = None
# Port to serve the policy on.
port: int = 8012
# Record the policy's behavior for debugging.
record: bool = False
# Specifies how to load the policy. If not provided, the default policy for the environment will be used.
policy: Checkpoint | EvalRealCheckpoint | Default = dataclasses.field(default_factory=Default)
# use_vllm: bool = False
# Default checkpoints that should be used for each environment.
# DEFAULT_CHECKPOINT: dict[EnvMode, Checkpoint] = {
# EnvMode.ALOHA: Checkpoint(
# config="pi0_aloha",
# dir="s3://openpi-assets/checkpoints/pi0_base",
# ),
# EnvMode.ALOHA_SIM: Checkpoint(
# config="pi0_aloha_sim",
# dir="s3://openpi-assets/checkpoints/pi0_aloha_sim",
# ),
# EnvMode.DROID: Checkpoint(
# config="pi0_fast_droid",
# dir="s3://openpi-assets/checkpoints/pi0_fast_droid",
# ),
# EnvMode.LIBERO: Checkpoint(
# config="pi0_fast_libero",
# dir="s3://openpi-assets/checkpoints/pi0_fast_libero",
# ),
# }
# def create_default_policy(env: EnvMode, *, default_prompt: str | None = None) -> _policy.Policy:
# """Create a default policy for the given environment."""
# if checkpoint := DEFAULT_CHECKPOINT.get(env):
# return _policy_config.create_trained_policy(
# _config.get_config(checkpoint.config), checkpoint.dir, default_prompt=default_prompt
# )
# raise ValueError(f"Unsupported environment mode: {env}")
def create_policy(args: Args):
"""Create a policy from the given arguments."""
match args.policy:
case EvalRealCheckpoint():
return EvalRealPolicyAdapter(
ckpt_path=args.policy.dir,
device=args.policy.device,
output_dir=args.policy.output_dir,
)
case Checkpoint():
import pathlib
import openpi.shared.normalize as _normalize
# _train_config = _config.get_config(args.policy.config)
_data_config: _config.DataConfig = get_data_config(args.policy.data_config)
norm_stats = _data_config.norm_stats
return _policy_config.create_trained_policy(
_data_config, args.policy.dir, default_prompt=args.default_prompt, norm_stats=norm_stats, use_vllm=_data_config.inference_use_vllm
)
case Default():
raise NotImplementedError("Default policies are not yet supported.")
# return create_default_policy(args.env, default_prompt=args.default_prompt)
def main(args: Args) -> None:
policy = create_policy(args)
policy_metadata = policy.metadata
# Record the policy's behavior.
if args.record:
policy = _policy.PolicyRecorder(policy, "policy_records")
# hostname = socket.gethostname()
# local_ip = socket.gethostbyname(hostname)
# logging.info("Creating server (host: %s, ip: %s)", hostname, local_ip)
server = websocket_policy_server.WebsocketPolicyServer(
policy=policy,
host="0.0.0.0",
port=args.port,
metadata=policy_metadata,
)
server.serve_forever()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, force=True)
main(tyro.cli(Args))