repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
pointnerf
pointnerf-master/data/data_utils.py
import numpy as np import open3d as o3d def get_cv_raydir(pixelcoords, height, width, focal, rot): # pixelcoords: H x W x 2 if isinstance(focal, float): focal = [focal, focal] x = (pixelcoords[..., 0] - width / 2.0) / focal[0] y = (pixelcoords[..., 1] - height / 2.0) / focal[1] z = np.ones_like(x) dirs = np.stack([x, y, z], axis=-1) dirs = np.sum(rot[None,None,:,:] * dirs[...,None], axis=-2) # 1*1*3*3 x h*w*3*1 dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5) return dirs def get_camera_rotation(eye, center, up): nz = center - eye nz /= np.linalg.norm(nz) x = np.cross(nz, up) x /= np.linalg.norm(x) y = np.cross(x, nz) return np.array([x, y, -nz]).T # # def get_blender_raydir(pixelcoords, height, width, focal, rot, dir_norm): # ## pixelcoords: H x W x 2 # x = (pixelcoords[..., 0] - width / 2.0) / focal # y = (pixelcoords[..., 1] - height / 2.0) / focal # z = np.ones_like(x) # dirs = np.stack([x, -y, -z], axis=-1) # dirs = np.sum(dirs[...,None,:] * rot[:,:], axis=-1) # 32, 32, 3 # if dir_norm: # # print("dirs",dirs-dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)) # dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5) # # print("dirs", dirs.shape) # # return dirs def get_blender_raydir(pixelcoords, height, width, focal, rot, dir_norm): ## pixelcoords: H x W x 2 x = (pixelcoords[..., 0] + 0.5 - width / 2.0) / focal y = (pixelcoords[..., 1] + 0.5 - height / 2.0) / focal z = np.ones_like(x) dirs = np.stack([x, -y, -z], axis=-1) dirs = np.sum(dirs[...,None,:] * rot[:,:], axis=-1) # h*w*1*3 x 3*3 if dir_norm: # print("dirs",dirs-dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)) dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5) # print("dirs", dirs.shape) return dirs def get_dtu_raydir(pixelcoords, intrinsic, rot, dir_norm): # rot is c2w ## pixelcoords: H x W x 2 x = (pixelcoords[..., 0] + 0.5 - intrinsic[0, 2]) / intrinsic[0, 0] y = (pixelcoords[..., 1] + 0.5 - intrinsic[1, 2]) / intrinsic[1, 1] z = np.ones_like(x) dirs = np.stack([x, y, z], axis=-1) # dirs = np.sum(dirs[...,None,:] * rot[:,:], axis=-1) # h*w*1*3 x 3*3 dirs = dirs @ rot[:,:].T # if dir_norm: # print("dirs",dirs-dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)) dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5) # print("dirs", dirs.shape) return dirs def get_optix_raydir(pixelcoords, height, width, focal, eye, center, up): c2w = get_camera_rotation(eye, center, up) return get_blender_raydir(pixelcoords, height, width, focal, c2w) def flip_z(poses): z_flip_matrix = np.eye(4, dtype=np.float32) z_flip_matrix[2, 2] = -1.0 return np.matmul(poses, z_flip_matrix[None,...]) def triangluation_bpa(pnts, test_pnts=None, full_comb=False): pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(pnts[:, :3]) pcd.normals = o3d.utility.Vector3dVector(pnts[:, :3] / np.linalg.norm(pnts[:, :3], axis=-1, keepdims=True)) # pcd.colors = o3d.utility.Vector3dVector(pnts[:, 3:6] / 255) # pcd.normals = o3d.utility.Vector3dVector(pnts[:, 6:9]) # o3d.visualization.draw_geometries([pcd]) distances = pcd.compute_nearest_neighbor_distance() avg_dist = np.mean(distances) radius = 3 * avg_dist dec_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(pcd, o3d.utility.DoubleVector( [radius, radius * 2])) # dec_mesh = dec_mesh.simplify_quadric_decimation(100000) # dec_mesh.remove_degenerate_triangles() # dec_mesh.remove_duplicated_triangles() # dec_mesh.remove_duplicated_vertices() # dec_mesh.remove_non_manifold_edges() # vis_lst = [dec_mesh, pcd] # vis_lst = [dec_mesh, pcd] # o3d.visualization.draw_geometries(vis_lst) # if test_pnts is not None : # tpcd = o3d.geometry.PointCloud() # print("test_pnts",test_pnts.shape) # tpcd.points = o3d.utility.Vector3dVector(test_pnts[:, :3]) # tpcd.normals = o3d.utility.Vector3dVector(test_pnts[:, :3] / np.linalg.norm(test_pnts[:, :3], axis=-1, keepdims=True)) # o3d.visualization.draw_geometries([dec_mesh, tpcd] ) triangles = np.asarray(dec_mesh.triangles, dtype=np.int32) if full_comb: q, w, e = triangles[..., 0], triangles[..., 1], triangles[..., 2] triangles2 = np.stack([w,q,e], axis=-1) triangles3 = np.stack([e,q,w], axis=-1) triangles = np.concatenate([triangles, triangles2, triangles3], axis=0) return triangles
4,744
37.893443
128
py
pointnerf
pointnerf-master/data/scannet_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from tqdm import tqdm from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py import models.mvs.mvs_utils as mvs_utils from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir from plyfile import PlyData, PlyElement FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class ScannetFtDataset(BaseDataset): def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.near_far = [opt.near_plane, opt.far_plane] self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'red': self.bg_color = (1, 0, 0) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) # if opt.normview > 0: # _, _ , w2cs, c2ws = self.build_proj_mats(list=torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_test']) # norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) # if opt.normview >= 2: # self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) # norm_w2c, norm_c2w = None, None # self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats() self.intrinsic = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/intrinsic/intrinsic_color.txt")).astype(np.float32)[:3,:3] self.depth_intrinsic = np.loadtxt( os.path.join(self.data_dir, self.scan, "exported/intrinsic/intrinsic_depth.txt")).astype(np.float32)[:3, :3] img = Image.open(self.image_paths[0]) ori_img_shape = list(self.transform(img).shape) # (4, h, w) self.intrinsic[0, :] *= (self.width / ori_img_shape[2]) self.intrinsic[1, :] *= (self.height / ori_img_shape[1]) # print(self.intrinsic) self.total = len(self.id_list) print("dataset total:", self.split, self.total) @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--edge_filter', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=0.5, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=5.0, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument( '--img_wh', type=int, nargs=2, default=(640, 480), help='resize target of the image' ) return parser def normalize_cam(self, w2cs, c2ws): index = 0 return w2cs[index], c2ws[index] def define_transforms(self): self.transform = T.ToTensor() def variance_of_laplacian(self, image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() def detect_blurry(self, list): blur_score = [] for id in list: image_path = os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(id)) image = cv2.imread(image_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) fm = self.variance_of_laplacian(gray) blur_score.append(fm) blur_score = np.asarray(blur_score) ids = blur_score.argsort()[:150] allind = np.asarray(list) print("most blurry images", allind[ids]) def remove_blurry(self, list): blur_path = os.path.join(self.data_dir, self.scan, "exported/blur_list.txt") if os.path.exists(blur_path): blur_lst = [] with open(blur_path) as f: lines = f.readlines() print("blur files", len(lines)) for line in lines: info = line.strip() blur_lst.append(int(info)) return [i for i in list if i not in blur_lst] else: print("no blur list detected, use all training frames!") return list def build_init_metas(self): colordir = os.path.join(self.data_dir, self.scan, "exported/color") self.image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f))] self.image_paths = [os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(i)) for i in range(len(self.image_paths))] self.all_id_list = self.filter_valid_id(list(range(len(self.image_paths)))) if len(self.all_id_list) > 2900: # neural point-based graphics' configuration self.test_id_list = self.all_id_list[::100] self.train_id_list = [self.all_id_list[i] for i in range(len(self.all_id_list)) if (((i % 100) > 19) and ((i % 100) < 81 or (i//100+1)*100>=len(self.all_id_list)))] else: # nsvf configuration step=5 self.train_id_list = self.all_id_list[::step] self.test_id_list = [self.all_id_list[i] for i in range(len(self.all_id_list)) if (i % step) !=0] if self.opt.test_num_step != 1 else self.all_id_list print("all_id_list",len(self.all_id_list)) print("test_id_list",len(self.test_id_list), self.test_id_list) print("train_id_list",len(self.train_id_list)) self.train_id_list = self.remove_blurry(self.train_id_list) self.id_list = self.train_id_list if self.split=="train" else self.test_id_list self.view_id_list=[] def filter_valid_id(self, id_list): empty_lst=[] for id in id_list: c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32) if np.max(np.abs(c2w)) < 30: empty_lst.append(id) return empty_lst def get_campos_ray(self): centerpixel=np.asarray(self.img_wh).astype(np.float32)[None,:] // 2 camposes=[] centerdirs=[] for id in self.id_list: c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32) #@ self.blender2opencv campos = c2w[:3, 3] camrot = c2w[:3,:3] raydir = get_dtu_raydir(centerpixel, self.intrinsic, camrot, True) camposes.append(campos) centerdirs.append(raydir) camposes=np.stack(camposes, axis=0) # 2091, 3 centerdirs=np.concatenate(centerdirs, axis=0) # 2091, 3 # print("camposes", camposes.shape, centerdirs.shape) return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda", dtype=torch.float32) def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] list = self.id_list if list is None else list focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh self.focal = focal self.near_far = np.array([2.0, 6.0]) for vid in list: frame = self.meta['frames'][vid] c2w = np.array(frame['transform_matrix']) # @ self.blender2opencv if norm_w2c is not None: c2w = norm_w2c @ c2w w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy()) # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def parse_mesh(self): points_path = os.path.join(self.data_dir, self.scan, "exported/pcd.ply") mesh_path = os.path.join(self.data_dir, self.scan, self.scan + "_vh_clean.ply") plydata = PlyData.read(mesh_path) print("plydata 0", plydata.elements[0], plydata.elements[0].data["blue"].dtype) vertices = np.empty(len( plydata.elements[0].data["blue"]), dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) vertices['x'] = plydata.elements[0].data["x"].astype('f4') vertices['y'] = plydata.elements[0].data["y"].astype('f4') vertices['z'] = plydata.elements[0].data["z"].astype('f4') vertices['red'] = plydata.elements[0].data["red"].astype('u1') vertices['green'] = plydata.elements[0].data["green"].astype('u1') vertices['blue'] = plydata.elements[0].data["blue"].astype('u1') # save as ply ply = PlyData([PlyElement.describe(vertices, 'vertex')], text=False) ply.write(points_path) def load_init_points(self): points_path = os.path.join(self.data_dir, self.scan, "exported/pcd.ply") # points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply") if not os.path.exists(points_path): if not os.path.exists(points_path): self.parse_mesh() plydata = PlyData.read(points_path) # plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar')) x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32) points_xyz = torch.stack([x,y,z], dim=-1) if self.opt.ranges[0] > -99.0: ranges = torch.as_tensor(self.opt.ranges, device=points_xyz.device, dtype=torch.float32) mask = torch.prod(torch.logical_and(points_xyz >= ranges[None, :3], points_xyz <= ranges[None, 3:]), dim=-1) > 0 points_xyz = points_xyz[mask] # np.savetxt(os.path.join(self.data_dir, self.scan, "exported/pcd.txt"), points_xyz.cpu().numpy(), delimiter=";") return points_xyz def read_depth(self, filepath): depth_im = cv2.imread(filepath, -1).astype(np.float32) depth_im /= 1000 depth_im[depth_im > 8.0] = 0 depth_im[depth_im < 0.3] = 0 return depth_im def load_init_depth_points(self, device="cuda", vox_res=0): py, px = torch.meshgrid( torch.arange(0, 480, dtype=torch.float32, device=device), torch.arange(0, 640, dtype=torch.float32, device=device)) # print("max py, px", torch.max(py), torch.max(px)) # print("min py, px", torch.min(py), torch.min(px)) img_xy = torch.stack([px, py], dim=-1) # [480, 640, 2] # print(img_xy.shape, img_xy[:10]) reverse_intrin = torch.inverse(torch.as_tensor(self.depth_intrinsic)).t().to(device) world_xyz_all = torch.zeros([0,3], device=device, dtype=torch.float32) for i in tqdm(range(len(self.all_id_list))): id = self.all_id_list[i] c2w = torch.as_tensor(np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32), device=device, dtype=torch.float32) #@ self.blender2opencv # 480, 640, 1 depth = torch.as_tensor(self.read_depth(os.path.join(self.data_dir, self.scan, "exported/depth/{}.png".format(id))), device=device)[..., None] cam_xy = img_xy * depth cam_xyz = torch.cat([cam_xy, depth], dim=-1) cam_xyz = cam_xyz @ reverse_intrin cam_xyz = cam_xyz[cam_xyz[...,2] > 0,:] cam_xyz = torch.cat([cam_xyz, torch.ones_like(cam_xyz[...,:1])], dim=-1) world_xyz = (cam_xyz.view(-1,4) @ c2w.t())[...,:3] # print("cam_xyz", torch.min(cam_xyz, dim=-2)[0], torch.max(cam_xyz, dim=-2)[0]) # print("world_xyz", world_xyz.shape) #, torch.min(world_xyz.view(-1,3), dim=-2)[0], torch.max(world_xyz.view(-1,3), dim=-2)[0]) if vox_res > 0: world_xyz = mvs_utils.construct_vox_points_xyz(world_xyz, vox_res) # print("world_xyz", world_xyz.shape) world_xyz_all = torch.cat([world_xyz_all, world_xyz], dim=0) if self.opt.ranges[0] > -99.0: ranges = torch.as_tensor(self.opt.ranges, device=world_xyz_all.device, dtype=torch.float32) mask = torch.prod(torch.logical_and(world_xyz_all >= ranges[None, :3], world_xyz_all <= ranges[None, 3:]), dim=-1) > 0 world_xyz_all = world_xyz_all[mask] return world_xyz_all def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] # mvs_images += [self.normalize_rgb(self.blackimgs[vid])] # mvs_images += [self.whiteimgs[vid]] mvs_images += [self.blackimgs[vid]] imgs += [self.whiteimgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) alphas.append(self.alphas[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) depths_h = np.stack(depths_h) alphas = np.stack(alphas) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = alphas.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars'] = near_fars.astype(np.float32) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} vid = self.id_list[id] image_path = os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(vid)) # print("vid",vid) img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(vid))).astype(np.float32) # w2c = np.linalg.inv(c2w) intrinsic = self.intrinsic # print("gt_image", gt_image.shape) width, height = img.shape[2], img.shape[1] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["c2w"] = torch.from_numpy(c2w).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([self.near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([self.near_far[0]]).view(1, 1) item['h'] = height item['w'] = width item['id'] = id item['vid'] = vid # bounding box margin = self.opt.edge_filter if full_img: item['images'] = img[None,...].clone() gt_image = np.transpose(img, (1, 2, 0)) subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(margin, width - margin - subsamplesize + 1) indy = np.random.randint(margin, height - margin - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(margin, width-margin, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(margin, height-margin, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(margin, width - margin - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(margin, height - margin - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(margin, width - margin).astype(np.float32), np.arange(margin, height- margin).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) # near far if self.opt.near_plane is not None: near = self.opt.near_plane else: near = max(dist - 1.5, 0.02) if self.opt.far_plane is not None: far = self.opt.far_plane # near + else: far = dist + 0.7 middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([far]).view(1, 1) item['near'] = torch.FloatTensor([near]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": px, py = self.proportional_select(gt_mask) else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0) item["pixel_idx"] = pixelcoords raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
32,944
43.162198
321
py
pointnerf
pointnerf-master/data/nerf_synth360_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from . import data_utils from plyfile import PlyData, PlyElement from torch.utils.data import Dataset, DataLoader import torch import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) return c2w trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class NerfSynth360FtDataset(BaseDataset): def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(800 * downSample), int(800 * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() meta_split = "train" if self.split == "render" else self.split with open(os.path.join(self.data_dir, self.scan, f'transforms_{meta_split}.json'), 'r') as f: self.meta = json.load(f) with open(os.path.join(self.data_dir, self.scan, f'transforms_test.json'), 'r') as f: self.testmeta = json.load(f) self.id_list = [i for i in range(len(self.meta["frames"]))] self.test_id_list = [i for i in range(len(self.testmeta["frames"]))] self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) if opt.normview > 0: _, _ , w2cs, c2ws = self.build_proj_mats(list=self.test_id_list) norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) if opt.normview >= 2: self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) norm_w2c, norm_c2w = None, None self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w) if self.split != "render": self.build_init_metas() self.read_meta() self.total = len(self.id_list) print("dataset total:", self.split, self.total) else: self.get_render_poses() print("render only, pose total:", self.total) def get_render_poses(self): stride = 20 #self.opt.render_stride radius = 4 #self.opt.render_radius self.render_poses = np.stack([pose_spherical(angle, -30.0, radius) @ self.blender2opencv for angle in np.linspace(-180, 180, stride + 1)[:-1]], 0) self.total = len(self.render_poses) @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--bg_filtering', type=int, default=0, help= '0 for alpha channel filtering, 1 for background color filtering' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') return parser def normalize_cam(self, w2cs, c2ws): # cam_xyz = c2ws[..., :3, 3] # rtp = self.bcart2sphere(cam_xyz) # print(rtp.shape) # rtp = np.mean(rtp, axis=0) # avg_xyz = self.sphere2cart(rtp) # euler_lst = [] # for i in range(len(c2ws)): # euler_angles = self.matrix2euler(c2ws[i][:3,:3]) # print("euler_angles", euler_angles) # euler_lst += [euler_angles] # euler = np.mean(np.stack(euler_lst, axis=0), axis=0) # print("euler mean ",euler) # M = self.euler2matrix(euler) # norm_c2w = np.eye(4) # norm_c2w[:3,:3] = M # norm_c2w[:3,3] = avg_xyz # norm_w2c = np.linalg.inv(norm_c2w) # return norm_w2c, norm_c2w index = 0 return w2cs[index], c2ws[index] def define_transforms(self): self.transform = T.ToTensor() def get_campos_ray(self): centerpixel = np.asarray(self.img_wh).astype(np.float32)[None, :] // 2 camposes = [] centerdirs = [] for i, idx in enumerate(self.id_list): c2w = self.cam2worlds[i].astype(np.float32) campos = c2w[:3, 3] camrot = c2w[:3, :3] raydir = get_dtu_raydir(centerpixel, self.intrinsics[0].astype(np.float32), camrot, True) camposes.append(campos) centerdirs.append(raydir) camposes = np.stack(camposes, axis=0) # 2091, 3 centerdirs = np.concatenate(centerdirs, axis=0) # 2091, 3 # print("camposes", camposes.shape, centerdirs.shape) return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda", dtype=torch.float32) def build_init_metas(self): self.view_id_list = [] cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds] _, _, w2cs, c2ws = self.build_proj_mats(meta=self.testmeta, list=self.test_id_list) test_cam_xyz_lst = [c2w[:3,3] for c2w in c2ws] if self.split=="train": cam_xyz = np.stack(cam_xyz_lst, axis=0) test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=test_cam_xyz, full_comb=self.opt.full_comb>0) self.view_id_list = [triangles[i] for i in range(len(triangles))] if self.opt.full_comb<0: with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f: for line in f: str_lst = line.rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append(src_views) def load_init_points(self): points_path = os.path.join(self.data_dir, self.scan, "colmap_results/dense/fused.ply") # points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply") assert os.path.exists(points_path) plydata = PlyData.read(points_path) # plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar')) print("plydata", plydata.elements[0]) x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32) points_xyz = torch.stack([x,y,z], dim=-1).to(torch.float32) # np.savetxt(os.path.join(self.data_dir, self.scan, "exported/pcd.txt"), points_xyz.cpu().numpy(), delimiter=";") if self.opt.comb_file is not None: file_points = np.loadtxt(self.opt.comb_file, delimiter=";") print("file_points", file_points.shape) comb_xyz = torch.as_tensor(file_points[...,:3].astype(np.float32), device=points_xyz.device, dtype=points_xyz.dtype) points_xyz = torch.cat([points_xyz, comb_xyz], dim=0) # np.savetxt("/home/xharlie/user_space/codes/testNr/checkpoints/pcolallship360_load_confcolordir_KNN8_LRelu_grid320_333_agg2_prl2e3_prune1e4/points/save.txt", points_xyz.cpu().numpy(), delimiter=";") return points_xyz def build_proj_mats(self, meta=None, list=None, norm_w2c=None, norm_c2w=None): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] list = self.id_list if list is None else list meta = self.meta if meta is None else meta focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh self.focal = focal self.near_far = np.array([2.0, 6.0]) for vid in list: frame = meta['frames'][vid] c2w = np.array(frame['transform_matrix']) @ self.blender2opencv if norm_w2c is not None: c2w = norm_w2c @ c2w w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy().astype(np.float32)) # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def read_meta(self): w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.mvsimgs = [] self.render_gtimgs = [] self.depths = [] self.alphas = [] self.view_id_dict = {} self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3) count = 0 for i, idx in enumerate(self.id_list): frame = self.meta['frames'][idx] image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.Resampling.LANCZOS) img = self.transform(img) # (4, h, w) self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)] self.mvsimgs += [img[:3] * img[-1:]] self.render_gtimgs += [img[:3] * img[-1:] + (1 - img[-1:])] if self.opt.bg_filtering: self.alphas += [ (torch.norm(self.mvsimgs[-1][:3], dim=0, keepdim=True) > 1e-6).numpy().astype(np.float32)] else: self.alphas += [img[-1:].numpy().astype(np.float32)] # ray directions for all pixels, same for all images (same H, W, focal) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] # mvs_images += [self.normalize_rgb(self.mvsimgs[vid])] # mvs_images += [self.render_gtimgs[vid]] mvs_images += [self.mvsimgs[vid]] imgs += [self.render_gtimgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) alphas.append(self.alphas[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) depths_h = np.stack(depths_h) alphas = np.stack(alphas) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = alphas.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} img = self.render_gtimgs[id] w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsics[id] proj_mat_ls, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item["c2w"] = torch.from_numpy(c2w).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width item['depths_h'] = self.depths[id] # bounding box if full_img: item['images'] = img[None,...] subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = transform_matrix[0:3, 0:3] campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] item['intrinsic'] = self.intrinsics[0] # near far item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1) item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, self.intrinsics[0], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() item['id'] = idx if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
30,908
40.488591
321
py
pointnerf
pointnerf-master/data/__init__.py
import importlib import torch.utils.data import sys sys.path.append("../") from utils.ncg_string import underscore2camelcase from .base_dataset import BaseDataset import numpy as np import time def find_dataset_class_by_name(name): ''' Input name: string with underscore representation Output dataset: a dataset class with class name {camelcase(name)}Dataset Searches for a dataset module with name {name}_dataset in current directory, returns the class with name {camelcase(name)}Dataset found in the module. ''' cls_name = underscore2camelcase(name) + 'Dataset' filename = "data.{}_dataset".format(name) module = importlib.import_module(filename) assert cls_name in module.__dict__, 'Cannot find dataset class name "{}" in "{}"'.format( cls_name, filename) cls = module.__dict__[cls_name] assert issubclass(cls, BaseDataset), 'Dataset class "{}" must inherit from BaseDataset'.format(cls_name) return cls def get_option_setter(dataset_name): dataset_class = find_dataset_class_by_name(dataset_name) return dataset_class.modify_commandline_options def create_dataset(opt): dataset = find_dataset_class_by_name(opt.dataset_name) instance = dataset() instance.initialize(opt) print("dataset [{}] was created".format(instance.name())) return instance def create_data_loader(opt, dataset=None): data_loader = DefaultDataLoader() data_loader.initialize(opt, dataset=dataset) return data_loader def worker_init_fn(worker_id): # np.random.seed(np.random.get_state()[1][0] + worker_id) np.random.seed((worker_id + torch.initial_seed() + np.floor(time.time()).astype(np.int64)) % np.iinfo(np.int32).max) class DefaultDataLoader: def name(self): return self.__class__.name def initialize(self, opt, dataset=None): assert opt.batch_size >= 1 assert opt.n_threads >= 0 assert opt.max_dataset_size >= 1 self.opt = opt self.dataset = create_dataset(opt) if dataset is None else dataset self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=not opt.serial_batches, num_workers=int(opt.n_threads), worker_init_fn=worker_init_fn) def load_data(self): return self.dataset def __len__(self): return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): for i, data in enumerate(self.dataloader): if i * self.opt.batch_size >= self.opt.max_dataset_size: break yield data def get_item(self, index): return self.dataset.get_item(index)
2,880
31.738636
120
py
pointnerf
pointnerf-master/data/load_blender.py
import os import numpy as np import imageio import json import torch import pickle, random # trans_t = lambda t : tf.convert_to_tensor([ # [1,0,0,0], # [0,1,0,0], # [0,0,1,t], # [0,0,0,1], # ], dtype=tf.float32) # # rot_phi = lambda phi : tf.convert_to_tensor([ # [1,0,0,0], # [0,tf.cos(phi),-tf.sin(phi),0], # [0,tf.sin(phi), tf.cos(phi),0], # [0,0,0,1], # ], dtype=tf.float32) # # rot_theta = lambda th : tf.convert_to_tensor([ # [tf.cos(th),0,-tf.sin(th),0], # [0,1,0,0], # [tf.sin(th),0, tf.cos(th),0], # [0,0,0,1], # ], dtype=tf.float32) trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w return c2w blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) def load_blender_data(basedir, splits, half_res=False, testskip=1): splits = ['train', 'val', 'test'] if splits is None else splits metas = {} for s in splits: with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp: metas[s] = json.load(fp) all_imgs = [] all_poses = [] counts = [0] for s in splits: meta = metas[s] imgs = [] poses = [] if s=='train' or testskip==0: skip = 1 else: skip = testskip for frame in meta['frames'][::skip]: fname = os.path.join(basedir, frame['file_path'] + '.png') imgs.append(imageio.imread(fname)) poses.append(np.array(frame['transform_matrix']) @ blender2opencv) imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA) poses = np.array(poses).astype(np.float32) counts.append(counts[-1] + imgs.shape[0]) all_imgs.append(imgs) all_poses.append(poses) i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))] imgs = np.concatenate(all_imgs, 0) poses = np.concatenate(all_poses, 0) H, W = imgs[0].shape[:2] camera_angle_x = float(meta['camera_angle_x']) focal = .5 * W / np.tan(.5 * camera_angle_x) stride = 20 render_poses = np.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, stride+1)[:-1]],0) if half_res: imgs = tf.image.resize_area(imgs, [400, 400]).numpy() H = H//2 W = W//2 focal = focal/2. intrinsic = np.asarray([[focal, 0, W/2], [0, focal, H/2], [0,0,1]]) return imgs, poses, render_poses, [H, W, focal], i_split, intrinsic def load_blender_cloud(point_path, point_num): point_norms = None with open(point_path, 'rb') as f: print("point_file_path ################", point_path) all_infos = pickle.load(f) point_xyz = all_infos["point_xyz"] if "point_face_normal" in all_infos: point_norms = all_infos["point_face_normal"] print("surface point cloud ",len(point_xyz), "mean pos:", np.mean(point_xyz, axis=0), "min pos:",np.min(point_xyz, axis=0), "mean max:",np.max(point_xyz, axis=0)) if point_num < len(point_xyz): inds = np.asarray(random.choices(range(len(point_xyz)), k=point_num)) point_norms = point_norms[inds, :] if point_norms is not None else None return point_xyz[inds, :], point_norms else: return point_xyz, point_norms
3,956
29.206107
166
py
pointnerf
pointnerf-master/data/tt_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from . import data_utils from plyfile import PlyData, PlyElement import copy from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w # c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = rot_beta(theta/180.*np.pi) @ c2w # c2w = rot_beta(90/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) return c2w trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) rot_beta = lambda th : np.asarray([ [np.cos(th),-np.sin(th), 0, 0], [np.sin(th),np.cos(th), 0, 0], [0,0,1,0], [0,0,0,1], ], dtype=np.float32) def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class TtFtDataset(BaseDataset): def initialize(self, opt, img_wh=[1920,1080], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample)) self.downSample = downSample self.alphas=None self.scale_factor = 1.0 / 1.0 self.max_len = max_len # self.cam_trans = np.diag(np.array([1, -1, -1, 1], dtype=np.float32)) self.cam_trans = np.diag(np.array([-1, 1, 1, 1], dtype=np.float32)) self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) self.near_far = np.array([opt.near_plane, opt.far_plane]) self.intrinsic = self.get_instrinsic() img = Image.open(self.image_paths[0]) self.ori_img_shape = list(self.transform(img).shape) # (4, h, w) self.intrinsic[0, :] *= (self.width / self.ori_img_shape[2]) self.intrinsic[1, :] *= (self.height / self.ori_img_shape[1]) self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats() if self.split != "render": self.build_init_view_lst() self.total = len(self.id_list) print("dataset total:", self.split, self.total) else: self.get_render_poses() print("render only, pose total:", self.total) def get_render_poses(self): # print("pose file", os.path.join(self.data_dir, self.scan, "test_traj.txt")) # self.render_poses = np.loadtxt(os.path.join(self.data_dir, self.scan, "test_traj.txt")).reshape(-1,4,4) # print("self.render_poses", self.render_poses) # self.total = len(self.render_poses) stride = 100 # self.opt.render_stride # radius = 1.6 # self.opt.render_radius @ self.blender2opencv parameters = {"Ignatius": [1.7, 1.7, -87.0], "Truck": [2.5, 1.5, 91.0], "Caterpillar": [2.2, 2.2, -89.0], "Family": [0.9, 0.9, -91.0] , "Barn": [2.5, 2.5, 88.0]} a, b, phi = parameters[self.opt.scan] # self.opt.render_radius @ self.blender2opencv self.render_poses = np.stack([pose_spherical(angle, phi, self.radius_func(angle, a, b)) @ self.blender2opencv for angle in np.linspace(-180, 180, stride + 1)[:-1]], 0) # print("self.render_poses", self.render_poses[0]) self.total = len(self.render_poses) def radius_func(self, angle, a, b): # return 1.2 + abs(np.cos((180 + angle - 36) * np.pi / 180) * radius) theta = (angle - (36-180)) * np.pi / 180 return a * b / np.sqrt(a*a*np.sin(theta)**2 + b*b*np.cos(theta)**2) def get_instrinsic(self): filepath = os.path.join(self.data_dir, self.scan, "intrinsics.txt") try: intrinsic = np.loadtxt(filepath).astype(np.float32)[:3, :3] return intrinsic except ValueError: pass # Get camera intrinsics with open(filepath, 'r') as file: f, cx, cy, _ = map(float, file.readline().split()) fy=fx = f # Build the intrinsic matrices intrinsic = np.array([[fx, 0., cx], [0., fy, cy], [0., 0, 1]]) return intrinsic @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random samplec parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument( '--img_wh', type=int, nargs=2, default=(1920, 1080), # default=(1088, 640), help='resize target of the image' ) parser.add_argument( '--mvs_img_wh', type=int, nargs=2, # default=(1920, 1080), 1590, 960 default=(1088, 640), help='resize target of the image' ) return parser def build_init_metas(self): colordir = os.path.join(self.data_dir, self.scan, "rgb") train_image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f)) and f.startswith("0")] test_image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f)) and f.startswith("1")] self.train_id_list = list(range(len(train_image_paths))) self.test_id_list = list(range(len(test_image_paths))) self.train_image_paths = ["" for i in self.train_id_list] self.test_image_paths = ["" for i in self.test_id_list] self.train_pos_paths = ["" for i in self.train_id_list] self.test_pos_paths = ["" for i in self.test_id_list] for train_path in train_image_paths: id = int(train_path.split("_")[1]) self.train_image_paths[id] = os.path.join(self.data_dir, self.scan, "rgb/{}".format(train_path)) self.train_pos_paths[id] = os.path.join(self.data_dir, self.scan, "pose/{}.txt".format(train_path[:-4])) for test_path in test_image_paths: id = int(test_path.split("_")[1]) self.test_image_paths[id] = os.path.join(self.data_dir, self.scan, "rgb/{}".format(test_path)) self.test_pos_paths[id] = os.path.join(self.data_dir, self.scan, "pose/{}.txt".format(test_path[:-4])) self.id_list = self.train_id_list if self.split=="train" else self.test_id_list self.pos_paths = self.train_pos_paths if self.split=="train" else self.test_pos_paths self.image_paths = self.train_image_paths if self.split=="train" else self.test_image_paths if self.opt.ranges[0] > -90.0: self.spacemin, self.spacemax = torch.as_tensor(self.opt.ranges[:3]), torch.as_tensor(self.opt.ranges[3:6]) else: minmax = np.loadtxt(os.path.join(self.data_dir, self.scan, "bbox.txt")).astype(np.float32)[:6] self.spacemin, self.spacemax = torch.as_tensor(minmax[:3]), torch.as_tensor(minmax[3:6]) def build_init_view_lst(self): self.view_id_list = [] cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds] # _, _, w2cs, c2ws = self.build_proj_mats(meta=self.testmeta, list=self.test_id_list) # test_cam_xyz_lst = [c2w[:3,3] for c2w in c2ws] cam_points = [np.array([[0, 0, 0.1]], dtype=np.float32) @ c2w[:3, :3].T for c2w in self.cam2worlds] if self.split=="train": cam_xyz = np.stack(cam_xyz_lst, axis=0) cam_points = np.concatenate(cam_points, axis=0) + cam_xyz # test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) print("cam_points", cam_points.shape, cam_xyz.shape, np.linalg.norm(cam_xyz, axis=-1)) triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=cam_points, full_comb=self.opt.full_comb>0) self.view_id_list = [triangles[i] for i in range(len(triangles))] def define_transforms(self): self.transform = T.ToTensor() def build_proj_mats(self): proj_mats, world2cams, cam2worlds, intrinsics = [], [], [], [] list = self.id_list dintrinsic = self.get_instrinsic() dintrinsic[0, :] *= (self.opt.mvs_img_wh[0] / self.ori_img_shape[2]) dintrinsic[1, :] *= (self.opt.mvs_img_wh[1] / self.ori_img_shape[1]) for vid in list: c2w = np.loadtxt(self.pos_paths[vid]) # @ self.cam_trans w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsics.append(dintrinsic) proj_mat_l = np.eye(4) downintrinsic = copy.deepcopy(dintrinsic) downintrinsic[:2] = downintrinsic[:2] / 4 proj_mat_l[:3, :4] = downintrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats = np.stack(proj_mats) intrinsics = np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def read_meta(self): w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.blackimgs = [] self.whiteimgs = [] self.depths = [] self.alphas = [] self.view_id_dict = {} self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3) count = 0 for i, idx in enumerate(self.id_list): frame = self.meta['frames'][idx] image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)] self.alphas += [img[-1:].numpy().astype(np.float32)] self.blackimgs += [img[:3] * img[-1:]] self.whiteimgs += [img[:3] * img[-1:] + (1 - img[-1:])] # ray directions for all pixels, same for all images (same H, W, focal) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def read_img_path(self, image_path, img_wh, black=False): img = Image.open(image_path) img = img.resize(img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) if img.shape[0] == 4: alpha = img[-1:].numpy().astype(np.float32) blackimg = img[:3] * img[-1:] whiteimg = img[:3] * img[-1:] + (1 - img[-1:]) return blackimg, whiteimg, alpha[0,...] > 0 # print("img",img) alpha = torch.norm(1.0 - img, dim=0) > 0.0001 blackimg = None if black: blackimg = img[:3] * alpha[None, ...] # print("alpha", torch.sum(alpha)) return blackimg, img, alpha def get_init_alpha(self): self.alphas = [] for i in self.id_list: vid = i _, _, alpha = self.read_img_path(self.image_paths[vid], self.opt.mvs_img_wh) self.alphas += [alpha[None, ...]] # self.alphas = np.stack(self.alphas).astype(np.float32) # (V, H, W) def get_init_item(self, idx, crop=False): if self.alphas is None: self.get_init_alpha() sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = i blackimg, img, alpha = self.read_img_path(self.image_paths[vid], self.opt.mvs_img_wh, black=True) mvs_images += [blackimg] alphas+= [alpha] imgs += [img] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) near_fars.append(near_far) # print("idx",idx, vid, self.image_paths[vid]) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) # sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = np.stack(alphas).astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} _, img, _ = self.read_img_path(self.image_paths[id], self.img_wh) w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsic _, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item["c2w"] = torch.from_numpy(c2w).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width # item['depths_h'] = self.depths[id] # bounding box if full_img: item['images'] = img[None,...] subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = transform_matrix[0:3, 0:3] campos = transform_matrix[0:3, 3] # focal = self.focal # item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] item['intrinsic'] = self.intrinsic # near far item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1) item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, self.intrinsic, camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() item['id'] = idx if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
31,438
39.82987
175
py
NLRN
NLRN-master/__init__.py
0
0
0
py
NLRN
NLRN-master/trainer.py
"""Trainer """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import argparse import importlib import tensorflow as tf if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--dataset', help='Dataset name', default=None, type=str, ) parser.add_argument( '--model', help='Model name', default=None, type=str, ) parser.add_argument( '--job-dir', help='GCS location to write checkpoints and export models', required=True) # Experiment arguments parser.add_argument( '--save-checkpoints-steps', help='Number of steps to save checkpoint', default=1000, type=int) parser.add_argument( '--train-steps', help='Number of steps to run training totally', default=None, type=int) parser.add_argument( '--eval-steps', help='Number of steps to run evaluation for at each checkpoint', default=None, type=int) parser.add_argument( '--save-summary-steps', help='Number of steps to save summary', default=100, type=int) parser.add_argument( '--random-seed', help='Random seed for TensorFlow', default=None, type=int) # Performance tuning parameters parser.add_argument( '--allow-growth', help='Whether to enable allow_growth in GPU_Options', default=False, action='store_true') parser.add_argument( '--xla', help='Whether to enable XLA auto-jit compilation', default=False, action='store_true') parser.add_argument( '--save-profiling-steps', help='Number of steps to save profiling', default=None, type=int) # Argument to turn on all logging parser.add_argument( '--verbosity', choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'], default='INFO', help='Set logging verbosity') # Parse arguments args, _ = parser.parse_known_args() dataset_module = importlib.import_module('datasets.' + args.dataset if args.dataset else 'datasets') dataset_module.update_argparser(parser) model_module = importlib.import_module('models.' + args.model if args.model else 'models') model_module.update_argparser(parser) hparams = parser.parse_args() print(hparams) # Set python level verbosity tf.logging.set_verbosity(hparams.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[hparams.verbosity] / 10) # Run the training job model_fn = getattr(model_module, 'model_fn') input_fn = getattr(dataset_module, 'input_fn') train_input_fn = lambda: input_fn( mode=tf.estimator.ModeKeys.TRAIN, params=hparams, ) eval_input_fn = lambda: input_fn( mode=tf.estimator.ModeKeys.EVAL, params=hparams, ) predict_input_fn = getattr(dataset_module, 'predict_input_fn', None) session_config = tf.ConfigProto() session_config.gpu_options.allow_growth = hparams.allow_growth if hparams.xla: session_config.graph_options.optimizer_options.global_jit_level = ( tf.OptimizerOptions.ON_1) run_config = tf.estimator.RunConfig( model_dir=hparams.job_dir, tf_random_seed=hparams.random_seed, save_summary_steps=hparams.save_summary_steps, save_checkpoints_steps=hparams.save_checkpoints_steps, session_config=session_config, ) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params=hparams, ) hooks = [] if hparams.save_profiling_steps: hooks.append( tf.train.ProfilerHook( save_steps=hparams.save_profiling_steps, output_dir=hparams.job_dir, )) train_spec = tf.estimator.TrainSpec( input_fn=train_input_fn, max_steps=hparams.train_steps, hooks=hooks, ) eval_spec = tf.estimator.EvalSpec( input_fn=eval_input_fn, steps=hparams.eval_steps, exporters=tf.estimator.LatestExporter( name='Servo', serving_input_receiver_fn=predict_input_fn) if predict_input_fn else None, ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
4,328
28.053691
75
py
NLRN
NLRN-master/common/layers.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf class Conv2DWeightNorm(tf.layers.Conv2D): def build(self, input_shape): self.wn_g = self.add_weight( name='wn_g', shape=(self.filters,), dtype=self.dtype, initializer=tf.initializers.ones, trainable=True, ) super(Conv2DWeightNorm, self).build(input_shape) square_sum = tf.reduce_sum( tf.square(self.kernel), [0, 1, 2], keepdims=False) inv_norm = tf.rsqrt(square_sum) self.kernel = self.kernel * (inv_norm * self.wn_g) def conv2d_weight_norm(inputs, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None): layer = Conv2DWeightNorm( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, dtype=inputs.dtype.base_dtype, _reuse=reuse, _scope=name) return layer.apply(inputs)
2,141
31.454545
63
py
NLRN
NLRN-master/common/__init__.py
0
0
0
py
NLRN
NLRN-master/models/__init__.py
"""Basic Model """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf def update_argparser(parser): parser.add_argument( '--learning_rate', help='Learning rate', default=0.001, ) def model_fn(features, labels, mode, params, config): predictions = None loss = None train_op = None eval_metric_ops = None export_outputs = None x = features['feature'] x = tf.layers.dense(x, 10, activation=tf.nn.relu) x = tf.layers.dense(x, 1, activation=None) predictions = x if mode == tf.estimator.ModeKeys.PREDICT: export_outputs = { tf.saved_model.signature_constants.REGRESS_METHOD_NAME: tf.estimator.export.RegressionOutput(predictions) } else: labels = labels['label'] loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions) if mode == tf.estimator.ModeKeys.EVAL: eval_metric_ops = { 'MSE': tf.metrics.mean_squared_error( labels=labels, predictions=predictions), } if mode == tf.estimator.ModeKeys.TRAIN: train_op = tf.train.AdamOptimizer(params.learning_rate).minimize( loss, global_step=tf.train.get_or_create_global_step(), ) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs=export_outputs, )
1,510
24.610169
79
py
NLRN
NLRN-master/models/nlrn.py
"""NLRN model for denoise dataset """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import models def update_argparser(parser): models.update_argparser(parser) args, _ = parser.parse_known_args() parser.add_argument( '--num-steps', help='Number of steps in recurrent networks', default=12, type=int) parser.add_argument( '--num-filters', help='Number of filters in networks', default=128, type=int) parser.add_argument( '--non-local-field-size', help='Size of receptive field in non-local blocks', default=35, type=int) parser.add_argument( '--init-ckpt', help='Checkpoint path to initialize', default=None, type=str, ) parser.set_defaults( train_steps=500000, learning_rate=((100000, 200000, 300000, 400000, 450000), (1e-3, 5e-4, 2.5e-4, 1.25e-4, 6.25e-5, 3.125e-5)), save_checkpoints_steps=20000, save_summary_steps=1000, ) def model_fn(features, labels, mode, params, config): predictions = None loss = None train_op = None eval_metric_ops = None export_outputs = None scaffold = None sources = features['source'] net = _nlrn(sources, mode, params) predictions = tf.clip_by_value(net, 0.0, 1.0) if mode == tf.estimator.ModeKeys.PREDICT: export_outputs = { tf.saved_model.signature_constants.PREDICT_METHOD_NAME: tf.estimator.export.PredictOutput(predictions) } else: targets = labels['target'] def central_crop(x, size=5): x_shape = tf.shape(x) x = tf.slice( x, [0, x_shape[1] // 2 - size // 2, x_shape[2] // 2 - size // 2, 0], [-1, size, size, -1]) return x loss = tf.losses.mean_squared_error( labels=central_crop(targets), predictions=central_crop(net)) if mode == tf.estimator.ModeKeys.EVAL: def _ignore_boundary(images): boundary_size = 16 images = images[:, boundary_size:-boundary_size, boundary_size: -boundary_size, :] return images def _float32_to_uint8(images): images = images * 255.0 images = tf.round(images) images = tf.saturate_cast(images, tf.uint8) return images psnr = tf.image.psnr( _float32_to_uint8(_ignore_boundary(targets)), _float32_to_uint8(_ignore_boundary(predictions)), max_val=255, ) eval_metric_ops = { 'PSNR': tf.metrics.mean(psnr), } if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.piecewise_constant( global_step, params.learning_rate[0], params.learning_rate[1]) opt = tf.train.AdamOptimizer(learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): gvs = opt.compute_gradients(loss) capped_gvs = [(tf.clip_by_norm(grad, 2.5), var) for grad, var in gvs] train_op = opt.apply_gradients(capped_gvs, global_step=global_step) stats = tf.profiler.profile() print("Total parameters:", stats.total_parameters) if params.init_ckpt: init_fn = tf.contrib.framework.assign_from_checkpoint_fn( params.init_ckpt, tf.global_variables(), ignore_missing_vars=True) scaffold = tf.train.Scaffold(init_fn=init_fn) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs=export_outputs, ) def _nlrn(x, mode, params): training = mode == tf.estimator.ModeKeys.TRAIN skip = x x = tf.layers.batch_normalization(x, training=training) x = tf.layers.conv2d( x, params.num_filters, 3, padding='same', activation=None, name='conv1') y = x with tf.variable_scope("rnn"): for i in range(params.num_steps): if i == 0: x = _residual_block( x, y, params.num_filters, training, name='RB1', reuse=False) else: x = _residual_block( x, y, params.num_filters, training, name='RB1', reuse=True) x = tf.layers.batch_normalization(x, training=training) x = tf.nn.relu(x) x = tf.layers.conv2d( x, params.num_channels, 3, padding='same', activation=None, name='conv_end') return x + skip def _residual_block(x, y, filter_num, training, name, reuse): x = tf.layers.batch_normalization(x, training=training) x = tf.nn.relu(x) x = _non_local_block(x, 64, 128, training, 35, name='non_local', reuse=reuse) x = tf.layers.batch_normalization(x, training=training) x = tf.layers.conv2d( x, filter_num, 3, padding='same', activation=None, name=name + '_a', reuse=reuse) x = tf.layers.batch_normalization(x, training=training) x = tf.nn.relu(x) x = tf.layers.conv2d( x, filter_num, 3, padding='same', activation=None, name=name + '_b', reuse=reuse) x = tf.add(x, y) return x def _non_local_block(x, filter_num, output_filter_num, training, field_size, name, reuse=False): x_theta = tf.layers.conv2d( x, filter_num, 1, padding='same', activation=None, name=name + '_theta', reuse=reuse) x_phi = tf.layers.conv2d( x, filter_num, 1, padding='same', activation=None, name=name + '_phi', reuse=reuse) x_g = tf.layers.conv2d( x, output_filter_num, 1, padding='same', activation=None, name=name + '_g', reuse=reuse, kernel_initializer=tf.zeros_initializer()) if True: x_theta_reshaped = tf.reshape(x_theta, [ tf.shape(x_theta)[0], tf.shape(x_theta)[1] * tf.shape(x_theta)[2], tf.shape(x_theta)[3] ]) x_phi_reshaped = tf.reshape(x_phi, [ tf.shape(x_phi)[0], tf.shape(x_phi)[1] * tf.shape(x_phi)[2], tf.shape(x_phi)[3] ]) x_phi_permuted = tf.transpose(x_phi_reshaped, perm=[0, 2, 1]) x_mul1 = tf.matmul(x_theta_reshaped, x_phi_permuted) x_mul1_softmax = tf.nn.softmax( x_mul1, axis=-1) # normalization for embedded Gaussian x_g_reshaped = tf.reshape(x_g, [ tf.shape(x_g)[0], tf.shape(x_g)[1] * tf.shape(x_g)[2], tf.shape(x_g)[3] ]) x_mul2 = tf.matmul(x_mul1_softmax, x_g_reshaped) x_mul2_reshaped = tf.reshape(x_mul2, [ tf.shape(x_mul2)[0], tf.shape(x_phi)[1], tf.shape(x_phi)[2], output_filter_num ]) else: x_theta = tf.expand_dims(x_theta, -2) x_phi_patches = tf.image.extract_image_patches( x_phi, [1, field_size, field_size, 1], [1, 1, 1, 1], [1, 1, 1, 1], padding='SAME') x_phi_patches = tf.reshape(x_phi_patches, [ tf.shape(x_phi)[0], tf.shape(x_phi)[1], tf.shape(x_phi)[2], field_size * field_size, tf.shape(x_phi)[3], ]) x_mul1 = tf.matmul(x_theta, x_phi_patches, transpose_b=True) x_mul1_softmax = tf.nn.softmax(x_mul1, axis=-1) x_g_patches = tf.image.extract_image_patches( x_g, [1, field_size, field_size, 1], [1, 1, 1, 1], [1, 1, 1, 1], padding='SAME') x_g_patches = tf.reshape(x_g_patches, [ tf.shape(x_g)[0], tf.shape(x_g)[1], tf.shape(x_g)[2], field_size * field_size, tf.shape(x_g)[3], ]) x_mul2 = tf.matmul(x_mul1_softmax, x_g_patches) x_mul2_reshaped = tf.reshape( x_mul2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], output_filter_num]) return tf.add(x, x_mul2_reshaped)
7,920
27.595668
79
py
NLRN
NLRN-master/datasets/denoise.py
"""DIV2K dataset """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import argparse from hashlib import sha256 from PIL import Image import numpy as np import tensorflow as tf import datasets NUM_CHANNELS = 1 def update_argparser(parser): datasets.update_argparser(parser) parser.add_argument( '--noise-sigma', help='Scale for image super-resolution', default=25, type=float) parser.add_argument( '--train-patch-size', help='Number of pixels in height or width of patches', default=43, type=int) parser.add_argument( '--eval-patch-size', help='Number of pixels in height or width of patches', default=43, type=int) parser.add_argument( '--train-flist', help='GCS location to write checkpoints and export models', type=str, required=True) parser.add_argument( '--eval-flist', help='GCS location to write checkpoints and export models', type=str, required=True) parser.set_defaults( num_channels=NUM_CHANNELS, train_batch_size=16, eval_batch_size=1, shuffle_buffer_size=800, ) def _extract(mode, params): flist = { tf.estimator.ModeKeys.TRAIN: params.train_flist, tf.estimator.ModeKeys.EVAL: params.eval_flist, }[mode] with open(flist) as f: image_files = f.read().splitlines() dataset = tf.data.Dataset.from_tensor_slices((image_files,)) dataset = dataset.map( tf.read_file, num_parallel_calls=params.num_data_threads, ) dataset = dataset.cache() def _decode_image(image_file): image = tf.image.decode_png(image_file, channels=params.num_channels) return image dataset = dataset.map( _decode_image, num_parallel_calls=params.num_data_threads, ) return dataset def _transform(dataset, mode, params): if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(params.shuffle_buffer_size) dataset = dataset.repeat() def _preprocess(target): if mode == tf.estimator.ModeKeys.TRAIN: target = tf.image.random_crop( target, [params.train_patch_size, params.train_patch_size, 1]) target = tf.image.random_flip_left_right(target) target = tf.image.random_flip_up_down(target) pred = tf.less( tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32), 0.5) target = tf.cond(pred, lambda: tf.image.rot90(target), lambda: target) else: target = tf.image.resize_image_with_crop_or_pad( target, params.eval_patch_size, params.eval_patch_size) target = tf.image.convert_image_dtype(target, tf.float32) source = target + tf.random.normal( tf.shape(target), mean=0, stddev=params.noise_sigma / 255.0, seed=None if mode == tf.estimator.ModeKeys.TRAIN else 0) return {'source': source}, {'target': target} dataset = dataset.map( _preprocess, num_parallel_calls=params.num_data_threads, ) batch_size = { tf.estimator.ModeKeys.TRAIN: params.train_batch_size, tf.estimator.ModeKeys.EVAL: params.eval_batch_size, }[mode] drop_remainder = { tf.estimator.ModeKeys.TRAIN: True, tf.estimator.ModeKeys.EVAL: False, }[mode] dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) return dataset input_fn = lambda mode, params: ( datasets.input_fn_tplt(mode, params, extract=_extract, transform=_transform)) def predict_input_fn(): input_tensor = tf.placeholder( dtype=tf.float32, shape=[None, None, None, NUM_CHANNELS], name='input_tensor') features = {'source': input_tensor} return tf.estimator.export.ServingInputReceiver( features=features, receiver_tensors={ tf.saved_model.signature_constants.PREDICT_INPUTS: input_tensor }) def test_saved_model(): parser = argparse.ArgumentParser() parser.add_argument( '--model-dir', help='GCS location to load exported model', required=True) parser.add_argument( '--input-dir', help='GCS location to load input images', required=True) parser.add_argument( '--output-dir', help='GCS location to load output images', required=True) parser.add_argument( '--noise-sigma', help='Scale for image super-resolution', default=25, type=float) parser.add_argument( '--patch-size', help='Number of pixels in height or width of patches', default=43, type=int) args = parser.parse_args() with tf.Session(graph=tf.Graph()) as sess: metagraph_def = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], args.model_dir) signature_def = metagraph_def.signature_def[ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] input_tensor = sess.graph.get_tensor_by_name( signature_def.inputs['inputs'].name) output_tensor = sess.graph.get_tensor_by_name( signature_def.outputs['output'].name) if not os.path.isdir(args.output_dir): os.makedirs(args.output_dir) psnr_list = [] for input_file in os.listdir(args.input_dir): print(input_file) sha = sha256(input_file.encode('utf-8')) seed = np.frombuffer(sha.digest(), dtype='uint32') rstate = np.random.RandomState(seed) output_file = os.path.join(args.output_dir, input_file) input_file = os.path.join(args.input_dir, input_file) input_image = np.asarray(Image.open(input_file)) input_image = input_image.astype(np.float32) / 255.0 def forward_images(images): images = output_tensor.eval(feed_dict={input_tensor: images}) return images stride = 7 h_idx_list = list( range(0, input_image.shape[0] - args.patch_size, stride)) + [input_image.shape[0] - args.patch_size] w_idx_list = list( range(0, input_image.shape[1] - args.patch_size, stride)) + [input_image.shape[1] - args.patch_size] output_image = np.zeros(input_image.shape) overlap = np.zeros(input_image.shape) noise_image = input_image + rstate.normal(0, args.noise_sigma / 255.0, input_image.shape) for h_idx in h_idx_list: for w_idx in w_idx_list: # print(h_idx, w_idx) input_patch = noise_image[h_idx:h_idx + args.patch_size, w_idx: w_idx + args.patch_size] input_patch = np.expand_dims(input_patch, axis=-1) input_patch = np.expand_dims(input_patch, axis=0) output_patch = forward_images(input_patch) output_patch = output_patch[0, :, :, 0] output_image[h_idx:h_idx + args.patch_size, w_idx: w_idx + args.patch_size] += output_patch overlap[h_idx:h_idx + args.patch_size, w_idx: w_idx + args.patch_size] += 1 output_image /= overlap def psnr(im1, im2): im1_uint8 = np.rint(np.clip(im1 * 255, 0, 255)) im2_uint8 = np.rint(np.clip(im2 * 255, 0, 255)) diff = np.abs(im1_uint8 - im2_uint8).flatten() rmse = np.sqrt(np.mean(np.square(diff))) psnr = 20 * np.log10(255.0 / rmse) print(psnr) return psnr psnr_list.append(psnr(output_image, input_image)) output_image = np.around(output_image * 255.0).astype(np.uint8) output_image = Image.fromarray(output_image) output_image.save(output_file) print('PSNR: ', np.average(np.array(psnr_list))) if __name__ == '__main__': test_saved_model()
7,652
31.565957
81
py
NLRN
NLRN-master/datasets/__init__.py
"""Basic Dataset """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import random import tensorflow as tf def update_argparser(parser): parser.add_argument( '--train-batch-size', help='Batch size for training steps', type=int, default=32) parser.add_argument( '--eval-batch-size', help='Batch size for evaluation steps', type=int, default=32) parser.add_argument( '--num-data-threads', help='Number of threads for data transformation', type=int, default=8) parser.add_argument( '--shuffle-buffer-size', help='Buffer size for data shuffling', type=int, default=10000) parser.add_argument( '--prefetch-buffer-size', help='Buffer size for batch prefetching', type=int, default=2) def _extract(mode, params): dataset = tf.data.Dataset.range(10000) dataset = tf.data.Dataset.zip((dataset, dataset)) return dataset def _transform(dataset, mode, params): if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(params.shuffle_buffer_size) dataset = dataset.repeat() def _preprocess(feature, label): feature = tf.to_float(feature) label = tf.to_float(label) if mode == tf.estimator.ModeKeys.TRAIN: feature += tf.random_normal([]) label += tf.random_normal([]) feature = tf.expand_dims(feature, -1) label = tf.expand_dims(label, -1) return {'feature': feature}, {'label': label} dataset = dataset.map( _preprocess, num_parallel_calls=params.num_data_threads, ) batch_size = { tf.estimator.ModeKeys.TRAIN: params.train_batch_size, tf.estimator.ModeKeys.EVAL: params.eval_batch_size, }[mode] drop_remainder = { tf.estimator.ModeKeys.TRAIN: True, tf.estimator.ModeKeys.EVAL: False, }[mode] dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) return dataset def _load(dataset, mode, params): dataset = dataset.prefetch(params.prefetch_buffer_size) iterator = dataset.make_one_shot_iterator() features, labels = iterator.get_next() return features, labels def input_fn_tplt(mode, params, extract=_extract, transform=_transform, load=_load): dataset = extract(mode, params) dataset = transform(dataset, mode, params) dataset = load(dataset, mode, params) return dataset input_fn = lambda mode, params: ( input_fn_tplt(mode, params, _extract, _transform, _load)) def predict_input_fn(): serialized_tf_example = tf.placeholder( dtype=tf.string, shape=[None], name='input_example_tensor') feature_spec = { 'feature': tf.FixedLenFeature(shape=[1], dtype=tf.float32), } features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver( features=features, receiver_tensors={ tf.saved_model.signature_constants.REGRESS_INPUTS: serialized_tf_example }) def _create_tf_example(): feature = tf.train.Feature( float_list=tf.train.FloatList(value=[random.randrange(10000)])) example = tf.train.Example( features=tf.train.Features(feature={'feature': feature})) print(example) return example.SerializeToString() def test_saved_model(): parser = argparse.ArgumentParser() parser.add_argument( '--model-dir', help='GCS location to load exported model', required=True) args = parser.parse_args() with tf.Session(graph=tf.Graph()) as sess: metagraph_def = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], args.model_dir) signature_def = metagraph_def.signature_def[ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] input_tensor = sess.graph.get_tensor_by_name( signature_def.inputs['inputs'].name) output_tensor = sess.graph.get_tensor_by_name( signature_def.outputs['outputs'].name) print(output_tensor.eval(feed_dict={input_tensor: [_create_tf_example()]})) if __name__ == '__main__': test_saved_model()
4,176
28.006944
79
py
NLRN
NLRN-master/datasets/div2k.py
"""DIV2K dataset """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import argparse from PIL import Image import numpy as np import tensorflow as tf import datasets REMOTE_URL = 'http://data.vision.ee.ethz.ch/cvl/DIV2K/' TRAIN_LR_ARCHIVE_NAME = lambda s: 'DIV2K_train_LR_bicubic_X{}.zip'.format(s) TRAIN_HR_ARCHIVE_NAME = 'DIV2K_train_HR.zip' EVAL_LR_ARCHIVE_NAME = lambda s: 'DIV2K_valid_LR_bicubic_X{}.zip'.format(s) EVAL_HR_ARCHIVE_NAME = 'DIV2K_valid_HR.zip' LOCAL_DIR = 'data/DIV2K/' TRAIN_LR_DIR = lambda s: LOCAL_DIR + 'DIV2K_train_LR_bicubic/X{}/'.format(s) TRAIN_HR_DIR = LOCAL_DIR + 'DIV2K_train_HR/' EVAL_LR_DIR = lambda s: LOCAL_DIR + 'DIV2K_valid_LR_bicubic/X{}/'.format(s) EVAL_HR_DIR = LOCAL_DIR + 'DIV2K_valid_HR/' NUM_CHANNELS = 3 def update_argparser(parser): datasets.update_argparser(parser) parser.add_argument( '--scale', help='Scale for image super-resolution', default=2, type=int) parser.add_argument( '--lr-patch-size', help='Number of pixels in height or width of LR patches', default=48, type=int) parser.set_defaults( num_channels=NUM_CHANNELS, train_batch_size=16, eval_batch_size=1, shuffle_buffer_size=800, ) def _extract(mode, params): lr_dir = { tf.estimator.ModeKeys.TRAIN: TRAIN_LR_DIR(params.scale), tf.estimator.ModeKeys.EVAL: EVAL_LR_DIR(params.scale), }[mode] #lr_dir = os.path.expanduser(lr_dir) hr_dir = { tf.estimator.ModeKeys.TRAIN: TRAIN_HR_DIR, tf.estimator.ModeKeys.EVAL: EVAL_HR_DIR, }[mode] def list_files(d): files = sorted(os.listdir(d)) files = [os.path.join(d, f) for f in files] return files lr_files = list_files(lr_dir) hr_files = list_files(hr_dir) dataset = tf.data.Dataset.from_tensor_slices((lr_files, hr_files)) def _read_image(lr_file, hr_file): lr_image = tf.image.decode_png(tf.read_file(lr_file), channels=NUM_CHANNELS) hr_image = tf.image.decode_png(tf.read_file(hr_file), channels=NUM_CHANNELS) return lr_image, hr_image dataset = dataset.map( _read_image, num_parallel_calls=params.num_data_threads, ) dataset = dataset.cache() return dataset def _transform(dataset, mode, params): if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(params.shuffle_buffer_size) dataset = dataset.repeat() def _preprocess(lr, hr): if mode == tf.estimator.ModeKeys.TRAIN: lr_shape = tf.shape(lr) lr_up = tf.random_uniform( shape=[], minval=0, maxval=lr_shape[0] - params.lr_patch_size, dtype=tf.int32) lr_left = tf.random_uniform( shape=[], minval=0, maxval=lr_shape[1] - params.lr_patch_size, dtype=tf.int32) lr = tf.slice(lr, [lr_up, lr_left, 0], [params.lr_patch_size, params.lr_patch_size, -1]) hr_up = lr_up * params.scale hr_left = lr_left * params.scale hr_patch_size = params.lr_patch_size * params.scale hr = tf.slice(hr, [hr_up, hr_left, 0], [hr_patch_size, hr_patch_size, -1]) def _to_be_or_not_to_be(values, fn): def _to_be(): return [fn(v) for v in values] def _not_to_be(): return values pred = tf.less( tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32), 0.5) values = tf.cond(pred, _to_be, _not_to_be) return values lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.flip_left_right) lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.flip_up_down) lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.rot90) lr = tf.image.convert_image_dtype(lr, tf.float32) hr = tf.image.convert_image_dtype(hr, tf.float32) return {'source': lr}, {'target': hr} dataset = dataset.map( _preprocess, num_parallel_calls=params.num_data_threads, ) batch_size = { tf.estimator.ModeKeys.TRAIN: params.train_batch_size, tf.estimator.ModeKeys.EVAL: params.eval_batch_size, }[mode] drop_remainder = { tf.estimator.ModeKeys.TRAIN: True, tf.estimator.ModeKeys.EVAL: False, }[mode] dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) return dataset input_fn = lambda mode, params: ( datasets.input_fn_tplt(mode, params, extract=_extract, transform=_transform)) def predict_input_fn(): input_tensor = tf.placeholder( dtype=tf.float32, shape=[None, None, None, 3], name='input_tensor') features = {'source': input_tensor} return tf.estimator.export.ServingInputReceiver( features=features, receiver_tensors={ tf.saved_model.signature_constants.PREDICT_INPUTS: input_tensor }) def test_saved_model(): parser = argparse.ArgumentParser() parser.add_argument( '--model-dir', help='GCS location to load exported model', required=True) parser.add_argument( '--input-dir', help='GCS location to load input images', required=True) parser.add_argument( '--output-dir', help='GCS location to load output images', required=True) parser.add_argument( '--ensemble', help='Whether to ensemble with 8x rotation and flip', default=False, action='store_true') args = parser.parse_args() with tf.Session(graph=tf.Graph()) as sess: metagraph_def = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], args.model_dir) signature_def = metagraph_def.signature_def[ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] input_tensor = sess.graph.get_tensor_by_name( signature_def.inputs['inputs'].name) output_tensor = sess.graph.get_tensor_by_name( signature_def.outputs['output'].name) if not os.path.isdir(args.output_dir): os.mkdir(args.output_dir) for input_file in os.listdir(args.input_dir): print(input_file) output_file = os.path.join(args.output_dir, input_file) input_file = os.path.join(args.input_dir, input_file) input_image = np.asarray(Image.open(input_file)) def forward_images(images): images = images.astype(np.float32) / 255.0 images = output_tensor.eval(feed_dict={input_tensor: images}) return images if args.ensemble: def flip(image): images = [image] images.append(image[::-1, :, :]) images.append(image[:, ::-1, :]) images.append(image[::-1, ::-1, :]) images = np.stack(images) return images def mean_of_flipped(images): image = (images[0] + images[1, ::-1, :, :] + images[2, :, ::-1, :] + images[3, ::-1, ::-1, :]) * 0.25 return image rotate = lambda images: np.swapaxes(images, 1, 2) input_images = flip(input_image) output_image1 = mean_of_flipped(forward_images(input_images)) output_image2 = mean_of_flipped( rotate(forward_images(rotate(input_images)))) output_image = (output_image1 + output_image2) * 0.5 else: input_images = np.expand_dims(input_image, axis=0) output_images = forward_images(input_images) output_image = output_images[0] output_image = np.around(output_image * 255.0).astype(np.uint8) output_image = Image.fromarray(output_image, 'RGB') output_image.save(output_file) if __name__ == '__main__': test_saved_model()
7,489
31.707424
81
py
variational_dropout
variational_dropout-master/train.py
import argparse import torch as t import torch.nn as nn import torchvision.transforms as transforms from tensorboardX import SummaryWriter from torch.autograd import Variable from torch.optim import Adam from torchvision import datasets from models import * if __name__ == "__main__": parser = argparse.ArgumentParser(description='train') parser.add_argument('--num-epochs', type=int, default=60, metavar='NI', help='num epochs (default: 10)') parser.add_argument('--batch-size', type=int, default=70, metavar='BS', help='batch size (default: 70)') parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)') parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR', help='learning rate (default: 0.0005)') parser.add_argument('--mode', type=str, default='vardropout', metavar='M', help='training mode (default: simple)') args = parser.parse_args() writer = SummaryWriter(args.mode) assert args.mode in ['simple', 'dropout', 'vardropout'], 'Invalid mode, should be in [simple, dropout, vardropout]' Model = { 'simple': SimpleModel, 'dropout': DropoutModel, 'vardropout': VariationalDropoutModel } Model = Model[args.mode] dataset = datasets.MNIST(root='data/', transform=transforms.Compose([ transforms.ToTensor()]), download=True, train=True) train_dataloader = t.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True) dataset = datasets.MNIST(root='data/', transform=transforms.Compose([ transforms.ToTensor()]), download=True, train=False) test_dataloader = t.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, drop_last=True) model = Model() if args.use_cuda: model.cuda() optimizer = Adam(model.parameters(), args.learning_rate, eps=1e-6) cross_enropy_averaged = nn.CrossEntropyLoss(size_average=True) for epoch in range(args.num_epochs): for iteration, (input, target) in enumerate(train_dataloader): input = Variable(input).view(-1, 784) target = Variable(target) if args.use_cuda: input, target = input.cuda(), target.cuda() optimizer.zero_grad() loss = None if args.mode == 'simple': loss = model.loss(input=input, target=target, average=True) elif args.mode == 'dropout': loss = model.loss(input=input, target=target, p=0.4, average=True) else: likelihood, kld = model.loss(input=input, target=target, train=True, average=True) coef = min(epoch / 40., 1.) loss = likelihood + kld * coef loss.backward() optimizer.step() if iteration % 50 == 0: print('train epoch {}, iteration {}, loss {}'.format(epoch, iteration, loss.cpu().data.numpy()[0])) if iteration % 100 == 0: loss = 0 for input, target in test_dataloader: input = Variable(input).view(-1, 784) target = Variable(target) if args.use_cuda: input, target = input.cuda(), target.cuda() if args.mode == 'simple': loss += model.loss(input=input, target=target, average=False).cpu().data.numpy()[0] elif args.mode == 'dropout': loss += model.loss(input=input, target=target, p=0., average=False).cpu().data.numpy()[0] else: loss += model.loss(input=input, target=target, train=False, average=False).cpu().data.numpy()[0] loss = loss / (args.batch_size * len(test_dataloader)) print('_____________') print('valid epoch {}, iteration {}'.format(epoch, iteration)) print('_____________') print(loss) print('_____________') writer.add_scalar('data/loss', loss, epoch * len(train_dataloader) + iteration) writer.close()
4,518
39.348214
120
py
variational_dropout
variational_dropout-master/models/dropout_model.py
import torch.nn as nn import torch.nn.functional as F class DropoutModel(nn.Module): def __init__(self): super(DropoutModel, self).__init__() self.fc = nn.ModuleList([ nn.Linear(784, 500), nn.Linear(500, 50), nn.Linear(50, 10) ]) def forward(self, input, p=0): """ :param input: An float tensor with shape of [batch_size, 784] :param p: An float value in [0, 1.] with probability of elements to be zeroed :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood """ result = input for i, layer in enumerate(self.fc): result = F.elu(layer(result)) if i < len(self.fc) - 1: result = F.dropout(result, p, training=True) return result def loss(self, **kwargs): out = self(kwargs['input'], kwargs['p']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
1,009
27.857143
96
py
variational_dropout
variational_dropout-master/models/simple_model.py
import torch.nn as nn import torch.nn.functional as F class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() self.fc = nn.Sequential( nn.Linear(784, 500), nn.ELU(), nn.Linear(500, 50), nn.ELU(), nn.Linear(50, 10) ) def forward(self, input): """ :param input: An float tensor with shape of [batch_size, 784] :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood """ return self.fc(input) def loss(self, **kwargs): out = self(kwargs['input']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
744
24.689655
96
py
variational_dropout
variational_dropout-master/models/variational_dropout_model.py
import torch.nn as nn import torch.nn.functional as F from variational_dropout.variational_dropout import VariationalDropout class VariationalDropoutModel(nn.Module): def __init__(self): super(VariationalDropoutModel, self).__init__() self.fc = nn.ModuleList([ VariationalDropout(784, 500), VariationalDropout(500, 50), nn.Linear(50, 10) ]) def forward(self, input, train=False): """ :param input: An float tensor with shape of [batch_size, 784] :param train: An boolean value indicating whether forward propagation called when training is performed :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood and kld estimation """ result = input if train: kld = 0 for i, layer in enumerate(self.fc): if i != len(self.fc) - 1: result, kld = layer(result, train) result = F.elu(result) kld += kld return self.fc[-1](result), kld for i, layer in enumerate(self.fc): if i != len(self.fc) - 1: result = F.elu(layer(result, train)) return self.fc[-1](result) def loss(self, **kwargs): if kwargs['train']: out, kld = self(kwargs['input'], kwargs['train']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average']), kld out = self(kwargs['input'], kwargs['train']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
1,648
31.333333
111
py
variational_dropout
variational_dropout-master/models/__init__.py
from .simple_model import SimpleModel from .dropout_model import DropoutModel from .variational_dropout_model import VariationalDropoutModel
141
34.5
62
py
variational_dropout
variational_dropout-master/variational_dropout/variational_dropout.py
import math import torch as t import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.nn.parameter import Parameter class VariationalDropout(nn.Module): def __init__(self, input_size, out_size, log_sigma2=-10, threshold=3): """ :param input_size: An int of input size :param log_sigma2: Initial value of log sigma ^ 2. It is crusial for training since it determines initial value of alpha :param threshold: Value for thresholding of validation. If log_alpha > threshold, then weight is zeroed :param out_size: An int of output size """ super(VariationalDropout, self).__init__() self.input_size = input_size self.out_size = out_size self.theta = Parameter(t.FloatTensor(input_size, out_size)) self.bias = Parameter(t.Tensor(out_size)) self.log_sigma2 = Parameter(t.FloatTensor(input_size, out_size).fill_(log_sigma2)) self.reset_parameters() self.k = [0.63576, 1.87320, 1.48695] self.threshold = threshold def reset_parameters(self): stdv = 1. / math.sqrt(self.out_size) self.theta.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) @staticmethod def clip(input, to=8): input = input.masked_fill(input < -to, -to) input = input.masked_fill(input > to, to) return input def kld(self, log_alpha): first_term = self.k[0] * F.sigmoid(self.k[1] + self.k[2] * log_alpha) second_term = 0.5 * t.log(1 + t.exp(-log_alpha)) return -(first_term - second_term - self.k[0]).sum() / (self.input_size * self.out_size) def forward(self, input): """ :param input: An float tensor with shape of [batch_size, input_size] :return: An float tensor with shape of [batch_size, out_size] and negative layer-kld estimation """ log_alpha = self.clip(self.log_sigma2 - t.log(self.theta ** 2)) kld = self.kld(log_alpha) if not self.training: mask = log_alpha > self.threshold return t.addmm(self.bias, input, self.theta.masked_fill(mask, 0)) mu = t.mm(input, self.theta) std = t.sqrt(t.mm(input ** 2, self.log_sigma2.exp()) + 1e-6) eps = Variable(t.randn(*mu.size())) if input.is_cuda: eps = eps.cuda() return std * eps + mu + self.bias, kld def max_alpha(self): log_alpha = self.log_sigma2 - self.theta ** 2 return t.max(log_alpha.exp())
2,575
31.2
111
py
variational_dropout
variational_dropout-master/variational_dropout/__init__.py
0
0
0
py
P3O
P3O-main/plot_log.py
logp = '/home/chenxing/extra_test/BeamRider/ddpo_4/log.txt' with open(logp, 'r') as f: logs = f.read().split('\n') data = {} for log in logs: if 'loss' in log: dat = log.split('|') key, value = dat[1].strip(),float(dat[2]) if key in data.keys(): data[key].append(value) else: data[key] = [value] from matplotlib import pyplot as plt import numpy as np i=1 plt.figure(figsize=(12,8)) for key, value in data.items(): N=10 weights = np.exp(np.linspace(0,1,N)) weights = weights/np.sum(weights) value = np.convolve(weights, value, mode='valid') plt.subplot(2, 3, i) plt.title(key) plt.plot(value) i+=1 plt.subplot(2, 3, i) plt.title(logp.split('/')[-2]) plt.show()
761
22.090909
59
py
P3O
P3O-main/test.py
import gym env = gym.make("Walker2d-v2") # env = gym.make("Ant-v2") env.reset() for _ in range(20): # env.render() action = env.action_space.sample() # User-defined policy function observation, reward, done, info = env.step(action) print(info, reward) if done: env.reset() env.close()
307
22.692308
69
py
P3O
P3O-main/run_experiments.py
a = 'export PYTHONPATH=/home/chenxing/tmp/baselines' b = 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/chenxing/.mujoco/mujoco200/bin' for seed in [1,2,3,4]: algo = 'spg' game = 'HalfCheetah-v2' rename = 'spg+5rvkl' cmd=[ "/home/chenxing/env/bin/python", "-m", "baselines.run", "--alg="+algo, "--seed="+str(seed), "--beta=1", "--env="+game, "--num_timesteps=3e6", "--log_path=/home/chenxing/tmp/logs/"+game+"/"+rename+"_"+str(seed) ] print(a) print(b) print(' '.join(cmd)) print()
593
22.76
82
py
P3O
P3O-main/plot_progress.py
# logp = '/home/chenxing/extra_test/BeamRider/ppo2_1/progress.csv' logp = '/home/chenxing/extra_test/BeamRider/ddpo_1/progress.csv' with open(logp, 'r') as f: logs = f.read().split('\n') data = {} keys = logs[0].split(',') for key in keys: data[key] = [] for log in logs[1:]: if len(log)<2: continue values = log.split(',') a,b = keys[:10], values[:10] for key,value in zip(keys, values): data[key].append(float(value)) from matplotlib import pyplot as plt import numpy as np unwanted = ['eplenmean', 'fps', ] for u in unwanted: data.pop(u) wanted = ['loss/max_ratio'] i=1 fig = plt.figure(figsize=(9,9)) for key, value in data.items(): if i>9:continue N=10 weights = np.exp(np.linspace(0,1,N)) weights = weights/np.sum(weights) value = np.convolve(weights, value, mode='valid') plt.subplot(3, 3, i) plt.title(key) plt.plot(value) i+=1 # plt.subplot(3, 3, i) # plt.title(logp.split('/')[-1]) env_name = logp.split('/')[-3] method_name = logp.split('/')[-2] method_name = method_name.replace('ddpo','P3O').replace('ppo2','PPO').replace('vpgdualclip','Dual-clip PPO') figname= method_name +' Analysis in '+ env_name fig.suptitle(figname) fig = plt.gcf() plt.show() # fig.savefig('test/'+figname+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
1,338
22.086207
108
py
P3O
P3O-main/plot.py
import os import numpy as np import matplotlib # matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode # matplotlib.use('Agg') import matplotlib.pyplot as plt # plt.rcParams['svg.fonttype'] = 'none' # plt.rcParams["font.family"] = "Times New Roman" # plt.rcParams['xtick.direction'] = 'in' # plt.rcParams['ytick.direction'] = 'in' # plt.rc('ytick', labelsize=8) # plt.rc('xtick', labelsize=8) # plt.rcParams['legend.title_fontsize'] = 12 # plt.rc('font', size=12) # rc_fonts = { # 'xtick.direction': 'in', # 'ytick.direction': 'in', # 'xtick.labelsize':16, # 'ytick.labelsize':16, # "font.family": "serif", # "font.size": 16, # 'axes.titlesize':16, # "legend.fontsize":13, # 'figure.figsize': (12, 12/3.0*0.75*2), # # 'figure.figsize': (7, 7/2.0*0.75), # "text.usetex": True, # # 'text.latex.preview': True, # 'text.latex.preamble': # r""" # \usepackage{times} # \usepackage{helvet} # \usepackage{courier} # """, # } # matplotlib.rcParams.update(rc_fonts) plt.style.use('seaborn') from baselines.common import plot_util X_TIMESTEPS = 'timesteps' X_EPISODES = 'episodes' X_WALLTIME = 'walltime_hrs' Y_REWARD = 'reward' Y_TIMESTEPS = 'timesteps' POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME] EPISODES_WINDOW = 100 COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink', 'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'] def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) def window_func(x, y, window, func): yw = rolling_window(y, window) yw_func = func(yw, axis=-1) return x[window-1:], yw_func def ts2xy(ts, xaxis, yaxis): if xaxis == X_TIMESTEPS: x = np.cumsum(ts.l.values) elif xaxis == X_EPISODES: x = np.arange(len(ts)) elif xaxis == X_WALLTIME: x = ts.t.values / 3600. else: raise NotImplementedError if yaxis == Y_REWARD: y = ts.r.values elif yaxis == Y_TIMESTEPS: y = ts.l.values else: raise NotImplementedError return x, y def plot_curves(xy_list, xaxis, yaxis, title): fig = plt.figure(figsize=(8,2)) maxx = max(xy[0][-1] for xy in xy_list) minx = 0 for (i, (x, y)) in enumerate(xy_list): color = COLORS[i % len(COLORS)] plt.scatter(x, y, s=2) x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes plt.plot(x, y_mean, color=color) plt.xlim(minx, maxx) plt.title(title) plt.xlabel(xaxis) plt.ylabel(yaxis) plt.tight_layout() fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout()) plt.grid(True) def group_by_seed(taskpath): return taskpath.dirname.split(os.sep)[-1].split('_')[0] def group_by_name(taskpath): return taskpath.dirname.split(os.sep)[-2] def plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='',row=1, inches=7): results = plot_util.load_results(dirs, enable_monitor=False, enable_progress=True) # plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r.monitor, xaxis, yaxis), split_fn=split_fn, average_group=True, resample=int(1e6)) plot_util.plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True, shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS, ylabel=Y_REWARD,row=row) def main(): import argparse parser = argparse.ArgumentParser() path = [r"C:\Users\chenxing\extra_test"] key = path parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=key) parser.add_argument('--num_timesteps', type=int, default=int(10e6)) parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS) parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD) parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout') args = parser.parse_args() # args.dirs = [os.path.abspath(dir) for dir in args.dirs] plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name, row=1) plt.show() if __name__ == '__main__': main() # paper_image()
4,543
33.424242
143
py
P3O
P3O-main/baselines/results_plotter.py
import numpy as np import matplotlib matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode import matplotlib.pyplot as plt plt.rcParams['svg.fonttype'] = 'none' from baselines.common import plot_util X_TIMESTEPS = 'timesteps' X_EPISODES = 'episodes' X_WALLTIME = 'walltime_hrs' Y_REWARD = 'reward' Y_TIMESTEPS = 'timesteps' POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME] EPISODES_WINDOW = 100 COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink', 'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'] def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) def window_func(x, y, window, func): yw = rolling_window(y, window) yw_func = func(yw, axis=-1) return x[window-1:], yw_func def ts2xy(ts, xaxis, yaxis): if xaxis == X_TIMESTEPS: x = np.cumsum(ts.l.values) elif xaxis == X_EPISODES: x = np.arange(len(ts)) elif xaxis == X_WALLTIME: x = ts.t.values / 3600. else: raise NotImplementedError if yaxis == Y_REWARD: y = ts.r.values elif yaxis == Y_TIMESTEPS: y = ts.l.values else: raise NotImplementedError return x, y def plot_curves(xy_list, xaxis, yaxis, title): fig = plt.figure(figsize=(8,2)) maxx = max(xy[0][-1] for xy in xy_list) minx = 0 for (i, (x, y)) in enumerate(xy_list): color = COLORS[i % len(COLORS)] plt.scatter(x, y, s=2) x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes plt.plot(x, y_mean, color=color) plt.xlim(minx, maxx) plt.title(title) plt.xlabel(xaxis) plt.ylabel(yaxis) plt.tight_layout() fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout()) plt.grid(True) def split_by_task(taskpath): return taskpath['dirname'].split('/')[-1].split('-')[0] def plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task): results = plot_util.load_results(dirs) plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn, average_group=True, resample=int(1e6)) # Example usage in jupyter-notebook # from baselines.results_plotter import plot_results # %matplotlib inline # plot_results("./log") # Here ./log is a directory containing the monitor.csv files def main(): import argparse import os parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log']) parser.add_argument('--num_timesteps', type=int, default=int(10e6)) parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS) parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD) parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout') args = parser.parse_args() args.dirs = [os.path.abspath(dir) for dir in args.dirs] plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name) plt.show() if __name__ == '__main__': main()
3,455
35.378947
144
py
P3O
P3O-main/baselines/logger.py
import os import sys import shutil import os.path as osp import json import time import datetime import tempfile from collections import defaultdict from contextlib import contextmanager DEBUG = 10 INFO = 20 WARN = 30 ERROR = 40 DISABLED = 50 class KVWriter(object): def writekvs(self, kvs): raise NotImplementedError class SeqWriter(object): def writeseq(self, seq): raise NotImplementedError class HumanOutputFormat(KVWriter, SeqWriter): def __init__(self, filename_or_file): #update needed self.k2v = { 'misc/serial_timesteps':"srl_tss", 'misc/nupdates':"npdt", 'misc/total_timesteps':"ttl_tss", 'fps':"fps", 'misc/explained_variance':"ev", 'eprewmean':"rew", 'eplenmean':"len", 'misc/time_elapsed':"time", 'loss/policy_loss':"p_loss", 'loss/value_loss':"v_loss", 'loss/policy_entropy':"etp", 'loss/approxkl':"kl", 'loss/clipfrac':"clipf", 'loss/rAt':'rAt', 'loss/unnormal_pt':"u_pt", 'loss/unnormal_nt':"u_nt" } if isinstance(filename_or_file, str): self.file = open(filename_or_file, 'wt') self.own_file = True else: assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file self.file = filename_or_file self.own_file = False def writekvs(self, kvs): # Create strings for printing key2str = {} for (key, val) in sorted(kvs.items()): if hasattr(val, '__float__'): valstr = '%-8.3g' % val else: valstr = str(val) key2str[self._truncate(key)] = self._truncate(valstr) # Find max widths if len(key2str) == 0: print('WARNING: tried to write empty key-value dict') return else: keywidth = max(map(len, key2str.keys())) valwidth = max(map(len, key2str.values())) # Write out the data lens = [] line1 = "" line2 = "" for k,v in key2str.items(): if k in self.k2v.keys(): cpk = self.k2v[k] else: cpk = k v = v.strip() ocu = max(len(cpk), len(v))+3 lens.append(ocu) line1+= cpk+' '*(ocu -len(cpk)) line2 += v + ' ' * (ocu - len(v)) # dashes = '-' * (keywidth + valwidth + 7) dashes = '-'*sum(lens) lines = [dashes] # for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()): # lines.append('| %s%s | %s%s |' % ( # key, # ' ' * (keywidth - len(key)), # val, # ' ' * (valwidth - len(val)), # )) lines.append(line1) lines.append(line2) lines.append(dashes) self.file.write('\n'.join(lines) + '\n') # Flush the output to the file self.file.flush() def _truncate(self, s): maxlen = 30 return s[:maxlen-3] + '...' if len(s) > maxlen else s def writeseq(self, seq): seq = list(seq) for (i, elem) in enumerate(seq): self.file.write(elem) if i < len(seq) - 1: # add space unless this is the last one self.file.write(' ') self.file.write('\n') self.file.flush() def close(self): if self.own_file: self.file.close() class JSONOutputFormat(KVWriter): def __init__(self, filename): self.file = open(filename, 'wt') def writekvs(self, kvs): for k, v in sorted(kvs.items()): if hasattr(v, 'dtype'): kvs[k] = float(v) self.file.write(json.dumps(kvs) + '\n') self.file.flush() def close(self): self.file.close() class CSVOutputFormat(KVWriter): def __init__(self, filename): self.file = open(filename, 'w+t') self.keys = [] self.sep = ',' def writekvs(self, kvs): # Add our current row to the history extra_keys = list(kvs.keys() - self.keys) extra_keys.sort() if extra_keys: self.keys.extend(extra_keys) self.file.seek(0) lines = self.file.readlines() self.file.seek(0) for (i, k) in enumerate(self.keys): if i > 0: self.file.write(',') self.file.write(k) self.file.write('\n') for line in lines[1:]: self.file.write(line[:-1]) self.file.write(self.sep * len(extra_keys)) self.file.write('\n') for (i, k) in enumerate(self.keys): if i > 0: self.file.write(',') v = kvs.get(k) if v is not None: self.file.write(str(v)) self.file.write('\n') self.file.flush() def close(self): self.file.close() class TensorBoardOutputFormat(KVWriter): """ Dumps key/value pairs into TensorBoard's numeric format. """ def __init__(self, dir): os.makedirs(dir, exist_ok=True) self.dir = dir self.step = 1 prefix = 'events' path = osp.join(osp.abspath(dir), prefix) import tensorflow as tf from tensorflow.python import pywrap_tensorflow from tensorflow.core.util import event_pb2 from tensorflow.python.util import compat self.tf = tf self.event_pb2 = event_pb2 self.pywrap_tensorflow = pywrap_tensorflow self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path)) def writekvs(self, kvs): def summary_val(k, v): kwargs = {'tag': k, 'simple_value': float(v)} return self.tf.Summary.Value(**kwargs) summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()]) event = self.event_pb2.Event(wall_time=time.time(), summary=summary) event.step = self.step # is there any reason why you'd want to specify the step? self.writer.WriteEvent(event) self.writer.Flush() self.step += 1 def close(self): if self.writer: self.writer.Close() self.writer = None def make_output_format(format, ev_dir, log_suffix=''): os.makedirs(ev_dir, exist_ok=True) if format == 'stdout': return HumanOutputFormat(sys.stdout) elif format == 'log': return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix)) elif format == 'json': return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix)) elif format == 'csv': return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix)) elif format == 'tensorboard': return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix)) else: raise ValueError('Unknown format specified: %s' % (format,)) # ================================================================ # API # ================================================================ def logkv(key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. """ get_current().logkv(key, val) def logkv_mean(key, val): """ The same as logkv(), but if called many times, values averaged. """ get_current().logkv_mean(key, val) def logkvs(d): """ Log a dictionary of key-value pairs """ for (k, v) in d.items(): logkv(k, v) def dumpkvs(): """ Write all of the diagnostics from the current iteration """ return get_current().dumpkvs() def getkvs(): return get_current().name2val def log(*args, level=INFO): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). """ get_current().log(*args, level=level) def debug(*args): log(*args, level=DEBUG) def info(*args): log(*args, level=INFO) def warn(*args): log(*args, level=WARN) def error(*args): log(*args, level=ERROR) def set_level(level): """ Set logging threshold on current logger. """ get_current().set_level(level) def set_comm(comm): get_current().set_comm(comm) def get_dir(): """ Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start) """ return get_current().get_dir() record_tabular = logkv dump_tabular = dumpkvs @contextmanager def profile_kv(scopename): logkey = 'wait_' + scopename tstart = time.time() try: yield finally: get_current().name2val[logkey] += time.time() - tstart def profile(n): """ Usage: @profile("my_func") def my_func(): code """ def decorator_with_name(func): def func_wrapper(*args, **kwargs): with profile_kv(n): return func(*args, **kwargs) return func_wrapper return decorator_with_name # ================================================================ # Backend # ================================================================ def get_current(): if Logger.CURRENT is None: _configure_default_logger() return Logger.CURRENT class Logger(object): DEFAULT = None # A logger with no output files. (See right below class definition) # So that you can still log to the terminal without setting up any output files CURRENT = None # Current logger being used by the free functions above def __init__(self, dir, output_formats, comm=None): self.name2val = defaultdict(float) # values this iteration self.name2cnt = defaultdict(int) self.level = INFO self.dir = dir self.output_formats = output_formats self.comm = comm # Logging API, forwarded # ---------------------------------------- def logkv(self, key, val): self.name2val[key] = val def logkv_mean(self, key, val): oldval, cnt = self.name2val[key], self.name2cnt[key] self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1) self.name2cnt[key] = cnt + 1 def dumpkvs(self): if self.comm is None: d = self.name2val else: from baselines.common import mpi_util d = mpi_util.mpi_weighted_mean(self.comm, {name : (val, self.name2cnt.get(name, 1)) for (name, val) in self.name2val.items()}) if self.comm.rank != 0: d['dummy'] = 1 # so we don't get a warning about empty dict out = d.copy() # Return the dict for unit testing purposes for fmt in self.output_formats: if isinstance(fmt, KVWriter): fmt.writekvs(d) self.name2val.clear() self.name2cnt.clear() return out def log(self, *args, level=INFO): if self.level <= level: self._do_log(args) # Configuration # ---------------------------------------- def set_level(self, level): self.level = level def set_comm(self, comm): self.comm = comm def get_dir(self): return self.dir def close(self): for fmt in self.output_formats: fmt.close() # Misc # ---------------------------------------- def _do_log(self, args): for fmt in self.output_formats: if isinstance(fmt, SeqWriter): fmt.writeseq(map(str, args)) def get_rank_without_mpi_import(): # check environment variables here instead of importing mpi4py # to avoid calling MPI_Init() when this module is imported for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']: if varname in os.environ: return int(os.environ[varname]) return 0 def configure(dir=None, format_strs=None, comm=None, log_suffix=''): """ If comm is provided, average all numerical stats across that comm """ if dir is None: dir = os.getenv('OPENAI_LOGDIR') if dir is None: dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f")) assert isinstance(dir, str) dir = os.path.expanduser(dir) os.makedirs(os.path.expanduser(dir), exist_ok=True) rank = get_rank_without_mpi_import() if rank > 0: log_suffix = log_suffix + "-rank%03i" % rank if format_strs is None: if rank == 0: format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',') else: format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',') format_strs = filter(None, format_strs) output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm) if output_formats: log('Logging to %s'%dir) def _configure_default_logger(): configure() Logger.DEFAULT = Logger.CURRENT def reset(): if Logger.CURRENT is not Logger.DEFAULT: Logger.CURRENT.close() Logger.CURRENT = Logger.DEFAULT log('Reset logger') @contextmanager def scoped_configure(dir=None, format_strs=None, comm=None): prevlogger = Logger.CURRENT configure(dir=dir, format_strs=format_strs, comm=comm) try: yield finally: Logger.CURRENT.close() Logger.CURRENT = prevlogger # ================================================================ def _demo(): info("hi") debug("shouldn't appear") set_level(DEBUG) debug("should appear") dir = "/tmp/testlogging" if os.path.exists(dir): shutil.rmtree(dir) configure(dir=dir) logkv("a", 3) logkv("b", 2.5) dumpkvs() logkv("b", -2.5) logkv("a", 5.5) dumpkvs() info("^^^ should see a = 5.5") logkv_mean("b", -22.5) logkv_mean("b", -44.4) logkv("a", 5.5) dumpkvs() info("^^^ should see b = -33.3") logkv("b", -2.5) dumpkvs() logkv("a", "longasslongasslongasslongasslongasslongassvalue") dumpkvs() # ================================================================ # Readers # ================================================================ def read_json(fname): import pandas ds = [] with open(fname, 'rt') as fh: for line in fh: ds.append(json.loads(line)) return pandas.DataFrame(ds) def read_csv(fname): import pandas return pandas.read_csv(fname, index_col=None, comment='#') def read_tb(path): """ path : a tensorboard file OR a directory, where we will find all TB files of the form events.* """ import pandas import numpy as np from glob import glob import tensorflow as tf if osp.isdir(path): fnames = glob(osp.join(path, "events.*")) elif osp.basename(path).startswith("events."): fnames = [path] else: raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path) tag2pairs = defaultdict(list) maxstep = 0 for fname in fnames: for summary in tf.train.summary_iterator(fname): if summary.step > 0: for v in summary.summary.value: pair = (summary.step, v.simple_value) tag2pairs[v.tag].append(pair) maxstep = max(summary.step, maxstep) data = np.empty((maxstep, len(tag2pairs))) data[:] = np.nan tags = sorted(tag2pairs.keys()) for (colidx,tag) in enumerate(tags): pairs = tag2pairs[tag] for (step, value) in pairs: data[step-1, colidx] = value return pandas.DataFrame(data, columns=tags) if __name__ == "__main__": _demo()
15,956
28.38674
122
py
P3O
P3O-main/baselines/run_test.py
import sys import re import multiprocessing import os.path as osp import gym from collections import defaultdict import tensorflow as tf import numpy as np from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env from baselines.common.tf_util import get_session from baselines import logger from importlib import import_module try: from mpi4py import MPI except ImportError: MPI = None try: import pybullet_envs except ImportError: pybullet_envs = None try: import roboschool except ImportError: roboschool = None _game_envs = defaultdict(set) for env in gym.envs.registry.all(): # TODO: solve this with regexes env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # reading benchmark names directly from retro requires # importing retro here, and for some reason that crashes tensorflow # in ubuntu _game_envs['retro'] = { 'BubbleBobble-Nes', 'SuperMarioBros-Nes', 'TwinBee3PokoPokoDaimaou-Nes', 'SpaceHarrier-Nes', 'SonicTheHedgehog-Genesis', 'Vectorman-Genesis', 'FinalFight-Snes', 'SpaceInvaders-Snes', } def train(args, extra_args): env_type, env_id = get_env_type(args) print('env_type: {}'.format(env_type)) total_timesteps = int(args.num_timesteps) seed = args.seed learn = get_learn_function(args.alg) alg_kwargs = get_learn_function_defaults(args.alg, env_type) alg_kwargs.update(extra_args) env = build_env(args) if args.save_video_interval != 0: env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length) if args.network: alg_kwargs['network'] = args.network else: if alg_kwargs.get('network') is None: alg_kwargs['network'] = get_default_network(env_type) print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs)) model = learn( env=env, seed=seed, total_timesteps=total_timesteps, **alg_kwargs ) return model, env def build_env(args): ncpu = multiprocessing.cpu_count() if sys.platform == 'darwin': ncpu //= 2 nenv = args.num_env or ncpu alg = args.alg seed = args.seed env_type, env_id = get_env_type(args) if env_type in {'atari', 'retro'}: if alg == 'deepq': env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True}) elif alg == 'trpo_mpi': env = make_env(env_id, env_type, seed=seed) else: frame_stack_size = 4 env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale) env = VecFrameStack(env, frame_stack_size) else: config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) config.gpu_options.allow_growth = True get_session(config=config) flatten_dict_observations = alg not in {'her'} env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations) if env_type == 'mujoco': env = VecNormalize(env, use_tf=True) return env def get_env_type(args): env_id = args.env if args.env_type is not None: return args.env_type, env_id # Re-parse the gym registry, since we could have new envs since last time. for env in gym.envs.registry.all(): env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # This is a set so add is idempotent if env_id in _game_envs.keys(): env_type = env_id env_id = [g for g in _game_envs[env_type]][0] else: env_type = None for g, e in _game_envs.items(): if env_id in e: env_type = g break if ':' in env_id: env_type = re.sub(r':.*', '', env_id) assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys()) return env_type, env_id def get_default_network(env_type): if env_type in {'atari', 'retro'}: return 'cnn' else: return 'mlp' def get_alg_module(alg, submodule=None): submodule = submodule or alg try: # first try to import the alg module from baselines alg_module = import_module('.'.join(['baselines', alg, submodule])) except ImportError: # then from rl_algs alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule])) return alg_module def get_learn_function(alg): return get_alg_module(alg).learn def get_learn_function_defaults(alg, env_type): try: alg_defaults = get_alg_module(alg, 'defaults') kwargs = getattr(alg_defaults, env_type)() except (ImportError, AttributeError): kwargs = {} return kwargs def parse_cmdline_kwargs(args): ''' convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible ''' def parse(v): assert isinstance(v, str) try: return eval(v) except (NameError, SyntaxError): return v return {k: parse(v) for k,v in parse_unknown_args(args).items()} def configure_logger(log_path, **kwargs): if log_path is not None: logger.configure(log_path) else: logger.configure(**kwargs) def main(args): # configure logger, disable logging in child MPI processes (with rank > 0) arg_parser = common_arg_parser() args, unknown_args = arg_parser.parse_known_args(args) extra_args = parse_cmdline_kwargs(unknown_args) if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: rank = 0 configure_logger(args.log_path) else: rank = MPI.COMM_WORLD.Get_rank() configure_logger(args.log_path, format_strs=[]) model, env = train(args, extra_args) if args.save_path is not None and rank == 0: save_path = osp.expanduser(args.save_path) model.save(save_path) if args.play: with open('/home/dachuang/workspace/extra_test/models/first_log','r') as f: play = eval(f.read()) replay = [] for i in range(len(play)): obs = play[i][0] actions, _, _, _ = model.step(np.array(obs)) replay.append([obs, actions.tolist()]) with open('/home/dachuang/workspace/extra_test/models/second_log','w') as f: f.write(str(replay)) env.close() return model if __name__ == '__main__': main(sys.argv)
6,988
28.242678
176
py
P3O
P3O-main/baselines/run.py
import sys import re import multiprocessing import os.path as osp import gym from collections import defaultdict import tensorflow as tf import numpy as np from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env from baselines.common.tf_util import get_session from baselines import logger from importlib import import_module from termcolor import colored # tf.keras.backend.set_floatx('float64') try: from mpi4py import MPI except ImportError: MPI = None try: import pybullet_envs except ImportError: pybullet_envs = None try: import roboschool except ImportError: roboschool = None _game_envs = defaultdict(set) for env in gym.envs.registry.all(): # TODO: solve this with regexes env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # reading benchmark names directly from retro requires # importing retro here, and for some reason that crashes tensorflow # in ubuntu _game_envs['retro'] = { 'BubbleBobble-Nes', 'SuperMarioBros-Nes', 'TwinBee3PokoPokoDaimaou-Nes', 'SpaceHarrier-Nes', 'SonicTheHedgehog-Genesis', 'Vectorman-Genesis', 'FinalFight-Snes', 'SpaceInvaders-Snes', } def train(args, extra_args): env_type, env_id = get_env_type(args) print('env_type: {}'.format(env_type)) total_timesteps = int(args.num_timesteps) seed = args.seed learn = get_learn_function(args.alg) alg_kwargs = get_learn_function_defaults(args.alg, env_type) alg_kwargs.update(extra_args) env = build_env(args) if args.save_video_interval != 0: env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length) if args.network: alg_kwargs['network'] = args.network else: if alg_kwargs.get('network') is None: alg_kwargs['network'] = get_default_network(env_type) print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs)) # eval_env = build_env(args) model = learn( env=env, seed=seed, total_timesteps=total_timesteps, # eval_env=eval_env, **alg_kwargs ) return model, env def build_env(args): ncpu = multiprocessing.cpu_count() if sys.platform == 'darwin': ncpu //= 2 nenv = args.num_env or ncpu alg = args.alg seed = args.seed env_type, env_id = get_env_type(args) if env_type in {'atari', 'retro'}: if alg == 'deepq': env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True}) elif alg == 'trpo_mpi': env = make_env(env_id, env_type, seed=seed) else: frame_stack_size = 4 env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale) env = VecFrameStack(env, frame_stack_size) else: config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) config.gpu_options.allow_growth = True get_session(config=config) flatten_dict_observations = alg not in {'her'} env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations) if env_type == 'mujoco': env = VecNormalize(env, use_tf=True) return env def get_env_type(args): env_id = args.env if args.env_type is not None: return args.env_type, env_id # Re-parse the gym registry, since we could have new envs since last time. for env in gym.envs.registry.all(): env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # This is a set so add is idempotent if env_id in _game_envs.keys(): env_type = env_id env_id = [g for g in _game_envs[env_type]][0] else: env_type = None for g, e in _game_envs.items(): if env_id in e: env_type = g break if ':' in env_id: env_type = re.sub(r':.*', '', env_id) assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys()) return env_type, env_id def get_default_network(env_type): if env_type in {'atari', 'retro'}: return 'cnn' else: return 'mlp' def get_alg_module(alg, submodule=None): submodule = submodule or alg try: # first try to import the alg module from baselines alg_module = import_module('.'.join(['baselines', alg, submodule])) except ImportError: # then from rl_algs alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule])) return alg_module def get_learn_function(alg): return get_alg_module(alg).learn def get_learn_function_defaults(alg, env_type): try: alg_defaults = get_alg_module(alg, 'defaults') kwargs = getattr(alg_defaults, env_type)() except (ImportError, AttributeError): kwargs = {} return kwargs def parse_cmdline_kwargs(args): ''' convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible ''' def parse(v): assert isinstance(v, str) try: return eval(v) except (NameError, SyntaxError): return v return {k: parse(v) for k,v in parse_unknown_args(args).items()} def configure_logger(log_path, **kwargs): if log_path is not None: logger.configure(log_path) else: logger.configure(**kwargs) def main(args): # configure logger, disable logging in child MPI processes (with rank > 0) arg_parser = common_arg_parser() args, unknown_args = arg_parser.parse_known_args(args) extra_args = parse_cmdline_kwargs(unknown_args) if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: rank = 0 configure_logger(args.log_path) else: rank = MPI.COMM_WORLD.Get_rank() configure_logger(args.log_path, format_strs=[]) model, env = train(args, extra_args) if args.save_path is not None and rank == 0: save_path = osp.expanduser(args.save_path) model.save(save_path) if args.play: # replay = [] logger.log("Running trained model") obs = env.reset() state = model.initial_state if hasattr(model, 'initial_state') else None dones = np.zeros((1,)) # episode_rew = np.zeros(env.num_envs) if isinstance(env, VecEnv) else np.zeros(1) episode_num = 0 re = [] episode_rewards = [] test_run = 10 while test_run>0: if state is not None: actions, _, state, _ = model.step(obs,S=state, M=dones) else: actions, _, _, _ = model.step(obs) # replay.append([obs.tolist(),actions.tolist()]) obs, rew, done, info = env.step(actions) # this rew is modified by RunningMeanStd in the vec_normalize.py , use info.get('episode')['r'] to get episode reward # env.render() re.append(rew) done_any = done.any() if isinstance(done, np.ndarray) else done if done_any: for i in np.nonzero(done)[0]: episode_reward = info[i].get('episode')['r'] # print('episode_rew={}'.format(episode_reward)) episode_rewards.append(episode_reward) test_run -= 1 print(sum(re), len(re)) print("episode rewards",episode_rewards) print("average rewards",sum(episode_rewards) / len(episode_rewards)) # from os.path import expanduser # np.savez('/home/dachuang/workspace/extra_test/models/log_p3o',*replay) # with open(expanduser("~")+'/workspace/extra_test/models/first_log','w') as f: # f.write(str(replay)) env.close() return model if __name__ == '__main__': main(sys.argv)
8,357
29.50365
176
py
P3O
P3O-main/baselines/__init__.py
0
0
0
py
P3O
P3O-main/baselines/vpg/vpg.py
import os import random import time import numpy as np import os.path as osp from baselines import logger from collections import deque from baselines.common import explained_variance, set_global_seeds from baselines.common.policies import build_policy try: from mpi4py import MPI except ImportError: MPI = None from baselines.vpg.runner import Runner def constfn(val): def f(_): return val return f def betafn(val): def f(frac): return val*frac return f def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, beta=15, save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs): ''' Learn policy using SPG algorithm Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) if isinstance(beta, int): beta = betafn(beta) else: assert callable(beta) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0) # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.vpg.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) if init_fn is not None: init_fn() # Start total timer tfirststart = time.perf_counter() # TODO ing back_trail = [] off_update = False nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) betanow = beta(frac) if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...') # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 if update % log_interval == 0 and is_mpi_root: logger.info('Done.') epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] # if len(back_trail)<20: # back_trail.append((obs, returns, masks, actions, values, neglogpacs)) # else: # lendx = (update-1)%20 # back_trail[lendx] = (obs, returns, masks, actions, values, neglogpacs) if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array inds = np.arange(nbatch) model.assign_v() for _ in range(noptepochs): # Randomize the indexes np.random.shuffle(inds) # 0 to batch_size with batch_train_size step for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) # print(lrnow, cliprangenow) res = model.train(lrnow, cliprangenow, betanow, *slices) # print(res[4].item()) mblossvals.append(res) if off_update: num = min(1, len(back_trail)) uu = random.sample(back_trail, num) for u in uu: inds = np.arange(nbatch) for _ in range(noptepochs): np.random.shuffle(inds) for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in u) model.train(lrnow, cliprangenow, betanow, *slices) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update_fn is not None: update_fn(update) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("misc/serial_timesteps", update*nsteps) logger.logkv("misc/nupdates", update) logger.logkv("misc/total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("misc/explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('misc/time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv('loss/' + lossname, lossval) logger.dumpkvs() return model # Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error) def safemean(xs): return np.nan if len(xs) == 0 else np.mean(xs)
11,131
42.826772
184
py
P3O
P3O-main/baselines/vpg/model.py
import tensorflow as tf import functools from baselines.common.tf_util import get_session, save_variables, load_variables from baselines.common.tf_util import initialize from baselines.common.input import observation_placeholder try: from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer from mpi4py import MPI from baselines.common.mpi_util import sync_from_root except ImportError: MPI = None class Model(object): """ We use this object to : __init__: - Creates the step_model - Creates the train_model train(): - Make the training part (feedforward and retropropagation of gradients) save/load(): - Save load the model """ def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None): self.sess = sess = get_session() if MPI is not None and comm is None: comm = MPI.COMM_WORLD # observation_placeholder(ob_space, batch_size=nbatch_train) with tf.variable_scope('vpg_model', reuse=tf.AUTO_REUSE): # CREATE OUR TWO MODELS # act_model that is used for sampling act_model = policy(nbatch_act, 1, sess) # Train model for training if microbatch_size is None: train_model = policy(nbatch_train, nsteps, sess) else: train_model = policy(microbatch_size, nsteps, sess) with tf.variable_scope("oldpi", reuse=tf.AUTO_REUSE): oldpi = policy(nbatch_train, nsteps, sess,observ_placeholder=train_model.X) # CREATE THE PLACEHOLDERS self.A = A = train_model.pdtype.sample_placeholder([None]) self.ADV = ADV = tf.placeholder(tf.float32, [None]) self.R = R = tf.placeholder(tf.float32, [None]) # Keep track of old actor self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None]) # Keep track of old critic self.LR = LR = tf.placeholder(tf.float32, []) # Cliprange self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, []) self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None]) self.BETA = BETA = tf.placeholder(tf.float32, []) self.assign = [tf.assign(oldv, newv) for (oldv, newv) in zip(get_variables("oldpi"), get_variables("vpg_model"))] neglogpac = train_model.pd.neglogp(A) # Calculate the entropy # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy. entropy = tf.reduce_mean(train_model.pd.entropy()) # CALCULATE THE LOSS # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss # Clip the value to reduce variability during Critic training # Get the predicted value vpred = train_model.vf vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) # Unclipped value vf_losses1 = tf.square(vpred - R) # Clipped value # vf_losses2 = tf.square(vpredclipped - R) # vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)) vf_loss = .5 * tf.reduce_mean(vf_losses1) # Calculate ratio (pi current policy / pi old policy) ratio = tf.exp(OLDNEGLOGPAC - neglogpac) # stop_ratio = tf.stop_gradient(ratio) # Defining Loss = - J is equivalent to max J # padv = (tf.math.sign(ADV)+1)/2*ADV # nadv = (-1*tf.math.sign(ADV)+1)/2*ADV # # psadv = (tf.math.sign(ADV)+1)/2 # fr_kl_loss = tf.abs(ADV)*oldpi.pd.kl(train_model.pd) # rv_kl_loss = train_model.pd.kl(oldpi.pd) pg_losses2 = -ADV*ratio pg_loss = tf.reduce_mean(pg_losses2) # pg_loss += tf.reduce_mean(fr_kl_loss) approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC)) test = tf.reduce_mean(pg_loss) # Total loss loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef # UPDATE THE PARAMETERS USING LOSS # 1. Get the model parameters params = tf.trainable_variables('vpg_model') # 2. Build our trainer if comm is not None and comm.Get_size() > 1: self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5) else: self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5) # 3. Calculate the gradients grads_and_var = self.trainer.compute_gradients(loss, params) grads, var = zip(*grads_and_var) if max_grad_norm is not None: # Clip the gradients (normalize) grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) grads_and_var = list(zip(grads, var)) # zip aggregate each gradient with parameters associated # For instance zip(ABCD, xyza) => Ax, By, Cz, Da self.grads = grads self.var = var self._train_op = self.trainer.apply_gradients(grads_and_var) self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'test'] self.stats_list = [pg_loss, vf_loss, entropy, approxkl, test] self.train_model = train_model self.act_model = act_model self.step = act_model.step self.value = act_model.value self.initial_state = act_model.initial_state self.save = functools.partial(save_variables, sess=sess) self.load = functools.partial(load_variables, sess=sess) initialize() global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="") if MPI is not None: sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101 def train(self, lr, cliprange, beta, obs, returns, masks, actions, values, neglogpacs, states=None): # Here we calculate advantage A(s,a) = R + yV(s') - V(s) # Returns = R + yV(s') advs = returns - values # Normalize the advantages advs = (advs - advs.mean()) / (advs.std() + 1e-8) td_map = { self.train_model.X: obs, self.A: actions, self.ADV : advs, self.R : returns, self.LR : lr, self.CLIPRANGE: cliprange, self.BETA: beta, self.OLDNEGLOGPAC : neglogpacs, self.OLDVPRED : values } if states is not None: td_map[self.train_model.S] = states td_map[self.train_model.M] = masks return self.sess.run( self.stats_list + [self._train_op], td_map )[:-1] def assign_v(self): self.sess.run(self.assign) def get_variables(scope): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
6,912
38.502857
114
py
P3O
P3O-main/baselines/vpg/defaults.py
def mujoco(): return dict( nsteps=2048, nminibatches=32, lam=0.95, gamma=0.99, noptepochs=10, log_interval=1, ent_coef=0.0, lr=lambda f: 1e-4 * 1, cliprange=0.2, value_network='copy' ) def atari(): return dict( nsteps=128, nminibatches=4, lam=0.95, gamma=0.99, noptepochs=4, log_interval=1, ent_coef=.01, lr=lambda f : f * 2.5e-4, cliprange=0.1, # value_network='copy' ) def retro(): return atari()
549
19.37037
59
py
P3O
P3O-main/baselines/vpg/runner.py
import numpy as np from baselines.common.runners import AbstractEnvRunner class Runner(AbstractEnvRunner): """ We use this object to make a mini batch of experiences __init__: - Initialize the runner run(): - Make a mini batch """ def __init__(self, *, env, model, nsteps, gamma, lam): super().__init__(env=env, model=model, nsteps=nsteps) # Lambda used in GAE (General Advantage Estimation) self.lam = lam # Discount rate self.gamma = gamma def run(self): # Here, we init the lists that will contain the mb of experiences mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[] mb_states = self.states epinfos = [] # For n in range number of steps for _ in range(self.nsteps): # Given observations, get action value and neglopacs # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones) mb_obs.append(self.obs.copy()) mb_actions.append(actions) mb_values.append(values) mb_neglogpacs.append(neglogpacs) mb_dones.append(self.dones) # Take actions in env and look the results # Infos contains a ton of useful informations self.obs[:], rewards, self.dones, infos = self.env.step(actions) for info in infos: maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) mb_rewards.append(rewards) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_actions = np.asarray(mb_actions) mb_values = np.asarray(mb_values, dtype=np.float32) mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) mb_dones = np.asarray(mb_dones, dtype=np.bool) last_values = self.model.value(self.obs, S=self.states, M=self.dones) # discount/bootstrap off value fn mb_returns = np.zeros_like(mb_rewards) mb_advs = np.zeros_like(mb_rewards) lastgaelam = 0 for t in reversed(range(self.nsteps)): if t == self.nsteps - 1: nextnonterminal = 1.0 - self.dones nextvalues = last_values else: nextnonterminal = 1.0 - mb_dones[t+1] nextvalues = mb_values[t+1] delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t] mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam mb_returns = mb_advs + mb_values return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos) # obs, returns, masks, actions, values, neglogpacs, states = runner.run() def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
3,194
40.493506
109
py
P3O
P3O-main/baselines/vpg/__init__.py
0
0
0
py
P3O
P3O-main/baselines/common/mpi_adam.py
import baselines.common.tf_util as U import tensorflow as tf import numpy as np try: from mpi4py import MPI except ImportError: MPI = None class MpiAdam(object): def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None): self.var_list = var_list self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.scale_grad_by_procs = scale_grad_by_procs size = sum(U.numel(v) for v in var_list) self.m = np.zeros(size, 'float32') self.v = np.zeros(size, 'float32') self.t = 0 self.setfromflat = U.SetFromFlat(var_list) self.getflat = U.GetFlat(var_list) self.comm = MPI.COMM_WORLD if comm is None and MPI is not None else comm def update(self, localg, stepsize): if self.t % 100 == 0: self.check_synced() localg = localg.astype('float32') if self.comm is not None: globalg = np.zeros_like(localg) self.comm.Allreduce(localg, globalg, op=MPI.SUM) if self.scale_grad_by_procs: globalg /= self.comm.Get_size() else: globalg = np.copy(localg) self.t += 1 a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t) self.m = self.beta1 * self.m + (1 - self.beta1) * globalg self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg) step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon) self.setfromflat(self.getflat() + step) def sync(self): if self.comm is None: return theta = self.getflat() self.comm.Bcast(theta, root=0) self.setfromflat(theta) def check_synced(self): if self.comm is None: return if self.comm.Get_rank() == 0: # this is root theta = self.getflat() self.comm.Bcast(theta, root=0) else: thetalocal = self.getflat() thetaroot = np.empty_like(thetalocal) self.comm.Bcast(thetaroot, root=0) assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal) @U.in_session def test_MpiAdam(): np.random.seed(0) tf.set_random_seed(0) a = tf.Variable(np.random.randn(3).astype('float32')) b = tf.Variable(np.random.randn(2,5).astype('float32')) loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b)) stepsize = 1e-2 update_op = tf.train.AdamOptimizer(stepsize).minimize(loss) do_update = U.function([], loss, updates=[update_op]) tf.get_default_session().run(tf.global_variables_initializer()) losslist_ref = [] for i in range(10): l = do_update() print(i, l) losslist_ref.append(l) tf.set_random_seed(0) tf.get_default_session().run(tf.global_variables_initializer()) var_list = [a,b] lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)]) adam = MpiAdam(var_list) losslist_test = [] for i in range(10): l,g = lossandgrad() adam.update(g, stepsize) print(i,l) losslist_test.append(l) np.testing.assert_allclose(np.array(losslist_ref), np.array(losslist_test), atol=1e-4) if __name__ == '__main__': test_MpiAdam()
3,296
30.701923
112
py
P3O
P3O-main/baselines/common/cg.py
import numpy as np def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
897
24.657143
88
py
P3O
P3O-main/baselines/common/runners.py
import numpy as np from abc import ABC, abstractmethod class AbstractEnvRunner(ABC): def __init__(self, *, env, model, nsteps): self.env = env self.model = model self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1 self.batch_ob_shape = (nenv*nsteps,) + env.observation_space.shape self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=env.observation_space.dtype.name) self.obs[:] = env.reset() self.nsteps = nsteps self.states = model.initial_state self.dones = [False for _ in range(nenv)] @abstractmethod def run(self): raise NotImplementedError
670
32.55
106
py
P3O
P3O-main/baselines/common/distributions.py
import tensorflow as tf import numpy as np import baselines.common.tf_util as U from baselines.a2c.utils import fc from tensorflow.python.ops import math_ops class Pd(object): """ A particular probability distribution """ def flatparam(self): raise NotImplementedError def mode(self): raise NotImplementedError def neglogp(self, x): # Usually it's easier to define the negative logprob raise NotImplementedError def kl(self, other): raise NotImplementedError def entropy(self): raise NotImplementedError def sample(self): raise NotImplementedError def logp(self, x): return - self.neglogp(x) def get_shape(self): return self.flatparam().shape @property def shape(self): return self.get_shape() def __getitem__(self, idx): return self.__class__(self.flatparam()[idx]) class PdType(object): """ Parametrized family of probability distributions """ def pdclass(self): raise NotImplementedError def pdfromflat(self, flat): return self.pdclass()(flat) def pdfromlatent(self, latent_vector, init_scale, init_bias): raise NotImplementedError def param_shape(self): raise NotImplementedError def sample_shape(self): raise NotImplementedError def sample_dtype(self): raise NotImplementedError def param_placeholder(self, prepend_shape, name=None): return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name) def sample_placeholder(self, prepend_shape, name=None): return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name) def __eq__(self, other): return (type(self) == type(other)) and (self.__dict__ == other.__dict__) class CategoricalPdType(PdType): def __init__(self, ncat): self.ncat = ncat def pdclass(self): return CategoricalPd def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam def param_shape(self): return [self.ncat] def sample_shape(self): return [] def sample_dtype(self): return tf.int32 class MultiCategoricalPdType(PdType): def __init__(self, nvec): self.ncats = nvec.astype('int32') assert (self.ncats > 0).all() def pdclass(self): return MultiCategoricalPd def pdfromflat(self, flat): return MultiCategoricalPd(self.ncats, flat) def pdfromlatent(self, latent, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent, 'pi', self.ncats.sum(), init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam def param_shape(self): return [sum(self.ncats)] def sample_shape(self): return [len(self.ncats)] def sample_dtype(self): return tf.int32 class DiagGaussianPdType(PdType): def __init__(self, size): self.size = size def pdclass(self): return DiagGaussianPd def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): mean = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias) # logstd = _matching_fc(latent_vector, 'pi/logstd', self.size, init_scale=init_scale, init_bias=init_bias) logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer()) pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1) return self.pdfromflat(pdparam), mean def param_shape(self): return [2*self.size] def sample_shape(self): return [self.size] def sample_dtype(self): return tf.float32 class BernoulliPdType(PdType): def __init__(self, size): self.size = size def pdclass(self): return BernoulliPd def param_shape(self): return [self.size] def sample_shape(self): return [self.size] def sample_dtype(self): return tf.int32 def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0): pdparam = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias) return self.pdfromflat(pdparam), pdparam # WRONG SECOND DERIVATIVES # class CategoricalPd(Pd): # def __init__(self, logits): # self.logits = logits # self.ps = tf.nn.softmax(logits) # @classmethod # def fromflat(cls, flat): # return cls(flat) # def flatparam(self): # return self.logits # def mode(self): # return U.argmax(self.logits, axis=-1) # def logp(self, x): # return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x) # def kl(self, other): # return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \ # - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def entropy(self): # return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def sample(self): # u = tf.random_uniform(tf.shape(self.logits)) # return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) class CategoricalPd(Pd): def __init__(self, logits): self.logits = logits def flatparam(self): return self.logits def mode(self): return tf.argmax(self.logits, axis=-1) @property def mean(self): return tf.nn.softmax(self.logits) def neglogp(self, x): # return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x) # Note: we can't use sparse_softmax_cross_entropy_with_logits because # the implementation does not allow second-order derivatives... if x.dtype in {tf.uint8, tf.int32, tf.int64}: # one-hot encoding x_shape_list = x.shape.as_list() logits_shape_list = self.logits.get_shape().as_list()[:-1] for xs, ls in zip(x_shape_list, logits_shape_list): if xs is not None and ls is not None: assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls) x = tf.one_hot(x, self.logits.get_shape().as_list()[-1]) else: # already encoded assert x.shape.as_list() == self.logits.shape.as_list() return tf.nn.softmax_cross_entropy_with_logits_v2( logits=self.logits, labels=x) def kl(self, other): a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) ea1 = tf.exp(a1) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1) def entropy(self): a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True) ea0 = tf.exp(a0) z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True) p0 = ea0 / z0 return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1) def sample(self): u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype) return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) @classmethod def fromflat(cls, flat): return cls(flat) class MultiCategoricalPd(Pd): def __init__(self, nvec, flat): self.flat = flat self.categoricals = list(map(CategoricalPd, tf.split(flat, np.array(nvec, dtype=np.int32), axis=-1))) def flatparam(self): return self.flat def mode(self): return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32) def neglogp(self, x): return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))]) def kl(self, other): return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)]) def entropy(self): return tf.add_n([p.entropy() for p in self.categoricals]) def sample(self): return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32) @classmethod def fromflat(cls, flat): raise NotImplementedError class DiagGaussianPd(Pd): def __init__(self, flat): self.flat = flat mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat) self.mean = mean self.logstd = logstd self.std = tf.exp(logstd) def flatparam(self): return self.flat def mode(self): return self.mean def neglogp(self, x): pre_sum = 0.5 * (((x - self.mean) / (self.std+1e-8)) ** 2 + 2 * self.logstd + np.log(2 * np.pi)) return tf.reduce_sum(pre_sum, axis=1) def kl(self, other): assert isinstance(other, DiagGaussianPd) return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1) def entropy(self): return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1) def sample(self): return self.mean + self.std * tf.random_normal(tf.shape(self.mean)) @classmethod def fromflat(cls, flat): return cls(flat) # class SquashDiagGaussianPd(Pd): # def __init__(self, flat): # self.flat = flat # mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat) # self.mean = mean # self.logstd = logstd # self.std = tf.exp(logstd) # def flatparam(self): # return self.flat # def mode(self): # return self.mean # def neglogp(self, x): # return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \ # + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \ # + tf.reduce_sum(self.logstd, axis=-1) # def SquashDiagGaussianPd(self, x): # return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \ # + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \ # + tf.reduce_sum(self.logstd, axis=-1) # def kl(self, other): # assert isinstance(other, SquashDiagGaussianPd) # return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1) # def entropy(self): # return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1) # def sample(self): # return self.mean + self.std * tf.random_normal(tf.shape(self.mean)) # @classmethod # def fromflat(cls, flat): # return cls(flat) class BernoulliPd(Pd): def __init__(self, logits): self.logits = logits self.ps = tf.sigmoid(logits) def flatparam(self): return self.logits @property def mean(self): return self.ps def mode(self): return tf.round(self.ps) def neglogp(self, x): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1) def kl(self, other): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1) def entropy(self): return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1) def sample(self): u = tf.random_uniform(tf.shape(self.ps)) return tf.to_float(math_ops.less(u, self.ps)) @classmethod def fromflat(cls, flat): return cls(flat) def make_pdtype(ac_space,squash=False): from gym import spaces if isinstance(ac_space, spaces.Box): assert len(ac_space.shape) == 1 if squash: return DiagGaussianPdType(ac_space.shape[0]) else: return DiagGaussianPdType(ac_space.shape[0]) elif isinstance(ac_space, spaces.Discrete): return CategoricalPdType(ac_space.n) elif isinstance(ac_space, spaces.MultiDiscrete): return MultiCategoricalPdType(ac_space.nvec) elif isinstance(ac_space, spaces.MultiBinary): return BernoulliPdType(ac_space.n) else: raise NotImplementedError def shape_el(v, i): maybe = v.get_shape()[i] if maybe is not None: return maybe else: return tf.shape(v)[i] @U.in_session def test_probtypes(): np.random.seed(0) pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8]) diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101 validate_probtype(diag_gauss, pdparam_diag_gauss) pdparam_categorical = np.array([-.2, .3, .5]) categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101 validate_probtype(categorical, pdparam_categorical) nvec = [1,2,3] pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1]) multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101 validate_probtype(multicategorical, pdparam_multicategorical) pdparam_bernoulli = np.array([-.2, .3, .5]) bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101 validate_probtype(bernoulli, pdparam_bernoulli) def validate_probtype(probtype, pdparam): N = 100000 # Check to see if mean negative log likelihood == differential entropy Mval = np.repeat(pdparam[None, :], N, axis=0) M = probtype.param_placeholder([N]) X = probtype.sample_placeholder([N]) pd = probtype.pdfromflat(M) calcloglik = U.function([X, M], pd.logp(X)) calcent = U.function([M], pd.entropy()) Xval = tf.get_default_session().run(pd.sample(), feed_dict={M:Mval}) logliks = calcloglik(Xval, Mval) entval_ll = - logliks.mean() #pylint: disable=E1101 entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101 entval = calcent(Mval).mean() #pylint: disable=E1101 assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas # Check to see if kldiv[p,q] = - ent[p] - E_p[log q] M2 = probtype.param_placeholder([N]) pd2 = probtype.pdfromflat(M2) q = pdparam + np.random.randn(pdparam.size) * 0.1 Mval2 = np.repeat(q[None, :], N, axis=0) calckl = U.function([M, M2], pd.kl(pd2)) klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101 logliks = calcloglik(Xval, Mval2) klval_ll = - entval - logliks.mean() #pylint: disable=E1101 klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101 assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas print('ok on', probtype, pdparam) def _matching_fc(tensor, name, size, init_scale, init_bias): if tensor.shape[-1] == size: return tensor else: return fc(tensor, name, size, init_scale=init_scale, init_bias=init_bias)
15,187
37.94359
217
py
P3O
P3O-main/baselines/common/mpi_util.py
from collections import defaultdict import os, numpy as np import platform import shutil import subprocess import warnings import sys try: from mpi4py import MPI except ImportError: MPI = None def sync_from_root(sess, variables, comm=None): """ Send the root node's parameters to every worker. Arguments: sess: the TensorFlow session. variables: all parameter variables including optimizer's """ if comm is None: comm = MPI.COMM_WORLD import tensorflow as tf values = comm.bcast(sess.run(variables)) sess.run([tf.assign(var, val) for (var, val) in zip(variables, values)]) def gpu_count(): """ Count the GPUs on this machine. """ if shutil.which('nvidia-smi') is None: return 0 output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv']) return max(0, len(output.split(b'\n')) - 2) def setup_mpi_gpus(): """ Set CUDA_VISIBLE_DEVICES to MPI rank if not already set """ if 'CUDA_VISIBLE_DEVICES' not in os.environ: if sys.platform == 'darwin': # This Assumes if you're on OSX you're just ids = [] # doing a smoke test and don't want GPUs else: lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD) ids = [lrank] os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, ids)) def get_local_rank_size(comm): """ Returns the rank of each process on its machine The processes on a given machine will be assigned ranks 0, 1, 2, ..., N-1, where N is the number of processes on this machine. Useful if you want to assign one gpu per machine """ this_node = platform.node() ranks_nodes = comm.allgather((comm.Get_rank(), this_node)) node2rankssofar = defaultdict(int) local_rank = None for (rank, node) in ranks_nodes: if rank == comm.Get_rank(): local_rank = node2rankssofar[node] node2rankssofar[node] += 1 assert local_rank is not None return local_rank, node2rankssofar[this_node] def share_file(comm, path): """ Copies the file from rank 0 to all other ranks Puts it in the same place on all machines """ localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, 'rb') as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'wb') as fh: fh.write(data) comm.Barrier() def dict_gather(comm, d, op='mean', assert_all_have_data=True): """ Perform a reduction operation over dicts """ if comm is None: return d alldicts = comm.allgather(d) size = comm.size k2li = defaultdict(list) for d in alldicts: for (k,v) in d.items(): k2li[k].append(v) result = {} for (k,li) in k2li.items(): if assert_all_have_data: assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k) if op=='mean': result[k] = np.mean(li, axis=0) elif op=='sum': result[k] = np.sum(li, axis=0) else: assert 0, op return result def mpi_weighted_mean(comm, local_name2valcount): """ Perform a weighted average over dicts that are each on a different node Input: local_name2valcount: dict mapping key -> (value, count) Returns: key -> mean """ all_name2valcount = comm.gather(local_name2valcount) if comm.rank == 0: name2sum = defaultdict(float) name2count = defaultdict(float) for n2vc in all_name2valcount: for (name, (val, count)) in n2vc.items(): try: val = float(val) except ValueError: if comm.rank == 0: warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val)) else: name2sum[name] += val * count name2count[name] += count return {name : name2sum[name] / name2count[name] for name in name2sum} else: return {}
4,259
30.791045
108
py
P3O
P3O-main/baselines/common/schedules.py
"""This file is used for specifying various schedules that evolve over time throughout the execution of the algorithm, such as: - learning rate for the optimizer - exploration epsilon for the epsilon greedy exploration strategy - beta parameter for beta parameter in prioritized replay Each schedule has a function `value(t)` which returns the current value of the parameter given the timestep t of the optimization procedure. """ class Schedule(object): def value(self, t): """Value of the schedule at time t""" raise NotImplementedError() class ConstantSchedule(object): def __init__(self, value): """Value remains constant over time. Parameters ---------- value: float Constant value of the schedule """ self._v = value def value(self, t): """See Schedule.value""" return self._v def linear_interpolation(l, r, alpha): return l + alpha * (r - l) class PiecewiseSchedule(object): def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None): """Piecewise schedule. endpoints: [(int, int)] list of pairs `(time, value)` meanining that schedule should output `value` when `t==time`. All the values for time must be sorted in an increasing order. When t is between two times, e.g. `(time_a, value_a)` and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs `interpolation(value_a, value_b, alpha)` where alpha is a fraction of time passed between `time_a` and `time_b` for time `t`. interpolation: lambda float, float, float: float a function that takes value to the left and to the right of t according to the `endpoints`. Alpha is the fraction of distance from left endpoint to right endpoint that t has covered. See linear_interpolation for example. outside_value: float if the value is requested outside of all the intervals sepecified in `endpoints` this value is returned. If None then AssertionError is raised when outside value is requested. """ idxes = [e[0] for e in endpoints] assert idxes == sorted(idxes) self._interpolation = interpolation self._outside_value = outside_value self._endpoints = endpoints def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value class LinearSchedule(object): def __init__(self, schedule_timesteps, final_p, initial_p=1.0): """Linear interpolation between initial_p and final_p over schedule_timesteps. After this many timesteps pass final_p is returned. Parameters ---------- schedule_timesteps: int Number of timesteps for which to linearly anneal initial_p to final_p initial_p: float initial output value final_p: float final output value """ self.schedule_timesteps = schedule_timesteps self.final_p = final_p self.initial_p = initial_p def value(self, t): """See Schedule.value""" fraction = min(float(t) / self.schedule_timesteps, 1.0) return self.initial_p + fraction * (self.final_p - self.initial_p)
3,702
36.03
90
py
P3O
P3O-main/baselines/common/atari_wrappers.py
import numpy as np import os os.environ.setdefault('PATH', '') from collections import deque import gym from gym import spaces import cv2 cv2.ocl.setUseOpenCL(False) from .wrappers import TimeLimit class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self, **kwargs): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset(**kwargs) if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self, **kwargs): self.env.reset(**kwargs) obs, _, done, _ = self.env.step(1) if done: self.env.reset(**kwargs) obs, _, done, _ = self.env.step(2) if done: self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so it's important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self, **kwargs): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset(**kwargs) else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break # Note that the observation on the done=True frame # doesn't matter max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class ClipRewardEnv(gym.RewardWrapper): def __init__(self, env): gym.RewardWrapper.__init__(self, env) def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): """ Warp frames to 84x84 as done in the Nature paper and later work. If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which observation should be warped. """ super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box( low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8, ) if self._key is None: original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert original_space.dtype == np.uint8 and len(original_space.shape) == 3 def observation(self, obs): if self._key is None: frame = obs else: frame = obs[self._key] if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize( frame, (self._width, self._height), interpolation=cv2.INTER_AREA ) if self._grayscale: frame = np.expand_dims(frame, -1) if self._key is None: obs = frame else: obs = obs.copy() obs[self._key] = frame return obs class FrameStack(gym.Wrapper): def __init__(self, env, k): """Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames """ gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype) def reset(self): ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return LazyFrames(list(self.frames)) class ScaledFloatFrame(gym.ObservationWrapper): def __init__(self, env): gym.ObservationWrapper.__init__(self, env) self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32) def observation(self, observation): # careful! This undoes the memory optimization, use # with smaller replay buffers only. return np.array(observation).astype(np.float32) / 255.0 class LazyFrames(object): def __init__(self, frames): """This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.""" self._frames = frames self._out = None def _force(self): if self._out is None: self._out = np.concatenate(self._frames, axis=-1) self._frames = None return self._out def __array__(self, dtype=None): out = self._force() if dtype is not None: out = out.astype(dtype) return out def __len__(self): return len(self._force()) def __getitem__(self, i): return self._force()[i] def count(self): frames = self._force() return frames.shape[frames.ndim - 1] def frame(self, i): return self._force()[..., i] def make_atari(env_id, max_episode_steps=None): env = gym.make(env_id) assert 'NoFrameskip' in env.spec.id env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=max_episode_steps) return env def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if scale: env = ScaledFloatFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
9,686
32.28866
130
py
P3O
P3O-main/baselines/common/mpi_running_mean_std.py
try: from mpi4py import MPI except ImportError: MPI = None import tensorflow as tf, baselines.common.tf_util as U, numpy as np class RunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm def __init__(self, epsilon=1e-2, shape=()): self._sum = tf.get_variable( dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(0.0), name="runningsum", trainable=False) self._sumsq = tf.get_variable( dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(epsilon), name="runningsumsq", trainable=False) self._count = tf.get_variable( dtype=tf.float64, shape=(), initializer=tf.constant_initializer(epsilon), name="count", trainable=False) self.shape = shape self.mean = tf.to_float(self._sum / self._count) self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 )) newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum') newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var') newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count') self.incfiltparams = U.function([newsum, newsumsq, newcount], [], updates=[tf.assign_add(self._sum, newsum), tf.assign_add(self._sumsq, newsumsq), tf.assign_add(self._count, newcount)]) def update(self, x): x = x.astype('float64') n = int(np.prod(self.shape)) totalvec = np.zeros(n*2+1, 'float64') addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')]) if MPI is not None: MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM) self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n]) @U.in_session def test_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:]) U.initialize() x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.std(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean.eval(), rms.std.eval()] assert np.allclose(ms1, ms2) @U.in_session def test_dist(): np.random.seed(0) p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1)) q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1)) # p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5)) # q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8)) comm = MPI.COMM_WORLD assert comm.Get_size()==2 if comm.Get_rank()==0: x1,x2,x3 = p1,p2,p3 elif comm.Get_rank()==1: x1,x2,x3 = q1,q2,q3 else: assert False rms = RunningMeanStd(epsilon=0.0, shape=(1,)) U.initialize() rms.update(x1) rms.update(x2) rms.update(x3) bigvec = np.concatenate([p1,p2,p3,q1,q2,q3]) def checkallclose(x,y): print(x,y) return np.allclose(x,y) assert checkallclose( bigvec.mean(axis=0), rms.mean.eval(), ) assert checkallclose( bigvec.std(axis=0), rms.std.eval(), ) if __name__ == "__main__": # Run with mpirun -np 2 python <filename> test_dist()
3,706
31.80531
126
py
P3O
P3O-main/baselines/common/test_mpi_util.py
from baselines.common import mpi_util from baselines import logger from baselines.common.tests.test_with_mpi import with_mpi try: from mpi4py import MPI except ImportError: MPI = None @with_mpi() def test_mpi_weighted_mean(): comm = MPI.COMM_WORLD with logger.scoped_configure(comm=comm): if comm.rank == 0: name2valcount = {'a' : (10, 2), 'b' : (20,3)} elif comm.rank == 1: name2valcount = {'a' : (19, 1), 'c' : (42,3)} else: raise NotImplementedError d = mpi_util.mpi_weighted_mean(comm, name2valcount) correctval = {'a' : (10 * 2 + 19) / 3.0, 'b' : 20, 'c' : 42} if comm.rank == 0: assert d == correctval, '{} != {}'.format(d, correctval) for name, (val, count) in name2valcount.items(): for _ in range(count): logger.logkv_mean(name, val) d2 = logger.dumpkvs() if comm.rank == 0: assert d2 == correctval
986
31.9
68
py
P3O
P3O-main/baselines/common/misc_util.py
import gym import numpy as np import os import pickle import random import tempfile import zipfile def zipsame(*seqs): L = len(seqs[0]) assert all(len(seq) == L for seq in seqs[1:]) return zip(*seqs) class EzPickle(object): """Objects that are pickled and unpickled via their constructor arguments. Example usage: class Dog(Animal, EzPickle): def __init__(self, furcolor, tailkind="bushy"): Animal.__init__() EzPickle.__init__(furcolor, tailkind) ... When this object is unpickled, a new Dog will be constructed by passing the provided furcolor and tailkind into the constructor. However, philosophers are still not sure whether it is still the same dog. This is generally needed only for environments which wrap C/C++ code, such as MuJoCo and Atari. """ def __init__(self, *args, **kwargs): self._ezpickle_args = args self._ezpickle_kwargs = kwargs def __getstate__(self): return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs} def __setstate__(self, d): out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"]) self.__dict__.update(out.__dict__) def set_global_seeds(i): try: import MPI rank = MPI.COMM_WORLD.Get_rank() except ImportError: rank = 0 myseed = i + 1000 * rank if i is not None else None try: import tensorflow as tf tf.set_random_seed(myseed) except ImportError: pass np.random.seed(myseed) random.seed(myseed) def pretty_eta(seconds_left): """Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA. """ minutes_left = seconds_left // 60 seconds_left %= 60 hours_left = minutes_left // 60 minutes_left %= 60 days_left = hours_left // 24 hours_left %= 24 def helper(cnt, name): return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else '')) if days_left > 0: msg = helper(days_left, 'day') if hours_left > 0: msg += ' and ' + helper(hours_left, 'hour') return msg if hours_left > 0: msg = helper(hours_left, 'hour') if minutes_left > 0: msg += ' and ' + helper(minutes_left, 'minute') return msg if minutes_left > 0: return helper(minutes_left, 'minute') return 'less than a minute' class RunningAvg(object): def __init__(self, gamma, init_value=None): """Keep a running estimate of a quantity. This is a bit like mean but more sensitive to recent changes. Parameters ---------- gamma: float Must be between 0 and 1, where 0 is the most sensitive to recent changes. init_value: float or None Initial value of the estimate. If None, it will be set on the first update. """ self._value = init_value self._gamma = gamma def update(self, new_val): """Update the estimate. Parameters ---------- new_val: float new observated value of estimated quantity. """ if self._value is None: self._value = new_val else: self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val def __float__(self): """Get the current estimate""" return self._value def boolean_flag(parser, name, default=False, help=None): """Add a boolean flag to argparse parser. Parameters ---------- parser: argparse.Parser parser to add the flag to name: str --<name> will enable the flag, while --no-<name> will disable it default: bool or None default value of the flag help: str help string for the flag """ dest = name.replace('-', '_') parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help) parser.add_argument("--no-" + name, action="store_false", dest=dest) def get_wrapper_by_name(env, classname): """Given an a gym environment possibly wrapped multiple times, returns a wrapper of class named classname or raises ValueError if no such wrapper was applied Parameters ---------- env: gym.Env of gym.Wrapper gym environment classname: str name of the wrapper Returns ------- wrapper: gym.Wrapper wrapper named classname """ currentenv = env while True: if classname == currentenv.class_name(): return currentenv elif isinstance(currentenv, gym.Wrapper): currentenv = currentenv.env else: raise ValueError("Couldn't find wrapper named %s" % classname) def relatively_safe_pickle_dump(obj, path, compression=False): """This is just like regular pickle dump, except from the fact that failure cases are different: - It's never possible that we end up with a pickle in corrupted state. - If a there was a different file at the path, that file will remain unchanged in the even of failure (provided that filesystem rename is atomic). - it is sometimes possible that we end up with useless temp file which needs to be deleted manually (it will be removed automatically on the next function call) The indended use case is periodic checkpoints of experiment state, such that we never corrupt previous checkpoints if the current one fails. Parameters ---------- obj: object object to pickle path: str path to the output file compression: bool if true pickle will be compressed """ temp_storage = path + ".relatively_safe" if compression: # Using gzip here would be simpler, but the size is limited to 2GB with tempfile.NamedTemporaryFile() as uncompressed_file: pickle.dump(obj, uncompressed_file) uncompressed_file.file.flush() with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip: myzip.write(uncompressed_file.name, "data") else: with open(temp_storage, "wb") as f: pickle.dump(obj, f) os.rename(temp_storage, path) def pickle_load(path, compression=False): """Unpickle a possible compressed pickle. Parameters ---------- path: str path to the output file compression: bool if true assumes that pickle was compressed when created and attempts decompression. Returns ------- obj: object the unpickled object """ if compression: with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open("data") as f: return pickle.load(f) else: with open(path, "rb") as f: return pickle.load(f)
7,166
28.372951
97
py
P3O
P3O-main/baselines/common/mpi_fork.py
import os, subprocess, sys def mpi_fork(n, bind_to_core=False): """Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children """ if n<=1: return "child" if os.getenv("IN_MPI") is None: env = os.environ.copy() env.update( MKL_NUM_THREADS="1", OMP_NUM_THREADS="1", IN_MPI="1" ) args = ["mpirun", "-np", str(n)] if bind_to_core: args += ["-bind-to", "core"] args += [sys.executable] + sys.argv subprocess.check_call(args, env=env) return "parent" else: return "child"
667
26.833333
66
py
P3O
P3O-main/baselines/common/dataset.py
import numpy as np class Dataset(object): def __init__(self, data_map, deterministic=False, shuffle=True): self.data_map = data_map self.deterministic = deterministic self.enable_shuffle = shuffle self.n = next(iter(data_map.values())).shape[0] self._next_id = 0 self.shuffle() def shuffle(self): if self.deterministic: return perm = np.arange(self.n) np.random.shuffle(perm) for key in self.data_map: self.data_map[key] = self.data_map[key][perm] self._next_id = 0 def next_batch(self, batch_size): if self._next_id >= self.n and self.enable_shuffle: self.shuffle() cur_id = self._next_id cur_batch_size = min(batch_size, self.n - self._next_id) self._next_id += cur_batch_size data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size] return data_map def iterate_once(self, batch_size): if self.enable_shuffle: self.shuffle() while self._next_id <= self.n - batch_size: yield self.next_batch(batch_size) self._next_id = 0 def subset(self, num_elements, deterministic=True): data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][:num_elements] return Dataset(data_map, deterministic) def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True): assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both' arrays = tuple(map(np.asarray, arrays)) n = arrays[0].shape[0] assert all(a.shape[0] == n for a in arrays[1:]) inds = np.arange(n) if shuffle: np.random.shuffle(inds) sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches for batch_inds in np.array_split(inds, sections): if include_final_partial_batch or len(batch_inds) == batch_size: yield tuple(a[batch_inds] for a in arrays)
2,132
33.967213
110
py
P3O
P3O-main/baselines/common/math_util.py
import numpy as np import scipy.signal def discount(x, gamma): """ computes discounted sums along 0th dimension of x. inputs ------ x: ndarray gamma: float outputs ------- y: ndarray with same shape as x, satisfying y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k], where k = len(x) - t - 1 """ assert x.ndim >= 1 return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1] def explained_variance(ypred,y): """ Computes fraction of variance that ypred explains about y. Returns 1 - Var[y-ypred] / Var[y] interpretation: ev=0 => might as well have predicted zero ev=1 => perfect prediction ev<0 => worse than just predicting zero """ assert y.ndim == 1 and ypred.ndim == 1 vary = np.var(y) return np.nan if vary==0 else 1 - np.var(y-ypred)/vary def explained_variance_2d(ypred, y): assert y.ndim == 2 and ypred.ndim == 2 vary = np.var(y, axis=0) out = 1 - np.var(y-ypred)/vary out[vary < 1e-10] = 0 return out def ncc(ypred, y): return np.corrcoef(ypred, y)[1,0] def flatten_arrays(arrs): return np.concatenate([arr.flat for arr in arrs]) def unflatten_vector(vec, shapes): i=0 arrs = [] for shape in shapes: size = np.prod(shape) arr = vec[i:i+size].reshape(shape) arrs.append(arr) i += size return arrs def discount_with_boundaries(X, New, gamma): """ X: 2d array of floats, time x features New: 2d array of bools, indicating when a new episode has started """ Y = np.zeros_like(X) T = X.shape[0] Y[T-1] = X[T-1] for t in range(T-2, -1, -1): Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1]) return Y def test_discount_with_boundaries(): gamma=0.9 x = np.array([1.0, 2.0, 3.0, 4.0], 'float32') starts = [1.0, 0.0, 0.0, 1.0] y = discount_with_boundaries(x, starts, gamma) assert np.allclose(y, [ 1 + gamma * 2 + gamma**2 * 3, 2 + gamma * 3, 3, 4 ])
2,094
23.360465
75
py
P3O
P3O-main/baselines/common/tf_util.py
import numpy as np import tensorflow as tf # pylint: ignore-module import copy import os import functools import collections import multiprocessing def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value (int or bool). Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. # Arguments condition: scalar tensor. then_expression: TensorFlow operation. else_expression: TensorFlow operation. """ x_shape = copy.copy(then_expression.get_shape()) x = tf.cond(tf.cast(condition, 'bool'), lambda: then_expression, lambda: else_expression) x.set_shape(x_shape) return x # ================================================================ # Extras # ================================================================ def lrelu(x, leak=0.2): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x) # ================================================================ # Mathematical utils # ================================================================ def huber_loss(x, delta=1.0): """Reference: https://en.wikipedia.org/wiki/Huber_loss""" return tf.where( tf.abs(x) < delta, tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta) ) # ================================================================ # Global session # ================================================================ def get_session(config=None): """Get default session or create one with a given config""" sess = tf.get_default_session() if sess is None: sess = make_session(config=config, make_default=True) return sess def make_session(config=None, num_cpu=None, make_default=False, graph=None): """Returns a session that will use <num_cpu> CPU's only""" if num_cpu is None: num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count())) if config is None: config = tf.ConfigProto( allow_soft_placement=True, inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu) config.gpu_options.allow_growth = True if make_default: return tf.InteractiveSession(config=config, graph=graph) else: return tf.Session(config=config, graph=graph) def single_threaded_session(): """Returns a session which will only use a single CPU""" return make_session(num_cpu=1) def in_session(f): @functools.wraps(f) def newfunc(*args, **kwargs): with tf.Session(): f(*args, **kwargs) return newfunc ALREADY_INITIALIZED = set() def initialize(): """Initialize all the uninitialized variables in the global scope.""" new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED get_session().run(tf.variables_initializer(new_variables)) ALREADY_INITIALIZED.update(new_variables) # ================================================================ # Model components # ================================================================ def normc_initializer(std=1.0, axis=0): def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613 out = np.random.randn(*shape).astype(dtype.as_numpy_dtype) out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)) return tf.constant(out) return _initializer def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, summary_tag=None): with tf.variable_scope(name): stride_shape = [1, stride[0], stride[1], 1] filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters] # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = intprod(filter_shape[:3]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = intprod(filter_shape[:2]) * num_filters # initialize weights with random weights w_bound = np.sqrt(6. / (fan_in + fan_out)) w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound), collections=collections) b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections) if summary_tag is not None: tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]), [2, 0, 1, 3]), max_images=10) return tf.nn.conv2d(x, w, stride_shape, pad) + b # ================================================================ # Theano-like Function # ================================================================ def function(inputs, outputs, updates=None, givens=None): """Just like Theano function. Take a bunch of tensorflow placeholders and expressions computed based on those placeholders and produces f(inputs) -> outputs. Function f takes values to be fed to the input's placeholders and produces the values of the expressions in outputs. Input values can be passed in the same order as inputs or can be provided as kwargs based on placeholder name (passed to constructor or accessible via placeholder.op.name). Example: x = tf.placeholder(tf.int32, (), name="x") y = tf.placeholder(tf.int32, (), name="y") z = 3 * x + 2 * y lin = function([x, y], z, givens={y: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(x=3) == 9 assert lin(2, 2) == 10 assert lin(x=2, y=3) == 12 Parameters ---------- inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method] list of input arguments outputs: [tf.Variable] or tf.Variable list of outputs or a single output to be returned from function. Returned value will also have the same shape. updates: [tf.Operation] or tf.Operation list of update functions or single update function that will be run whenever the function is called. The return is ignored. """ if isinstance(outputs, list): return _Function(inputs, outputs, updates, givens=givens) elif isinstance(outputs, (dict, collections.OrderedDict)): f = _Function(inputs, outputs.values(), updates, givens=givens) return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs))) else: f = _Function(inputs, [outputs], updates, givens=givens) return lambda *args, **kwargs: f(*args, **kwargs)[0] class _Function(object): def __init__(self, inputs, outputs, updates, givens): for inpt in inputs: if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0): assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method" self.inputs = inputs self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs} updates = updates or [] self.update_group = tf.group(*updates) self.outputs_update = list(outputs) + [self.update_group] self.givens = {} if givens is None else givens def _feed_input(self, feed_dict, inpt, value): if hasattr(inpt, 'make_feed_dict'): feed_dict.update(inpt.make_feed_dict(value)) else: feed_dict[inpt] = adjust_shape(inpt, value) def __call__(self, *args, **kwargs): assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided" feed_dict = {} # Update feed dict with givens. for inpt in self.givens: feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt])) # Update the args for inpt, value in zip(self.inputs, args): self._feed_input(feed_dict, inpt, value) for inpt_name, value in kwargs.items(): self._feed_input(feed_dict, self.input_names[inpt_name], value) results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1] return results # ================================================================ # Flat vectors # ================================================================ def var_shape(x): out = x.get_shape().as_list() assert all(isinstance(a, int) for a in out), \ "shape function assumes that shape is fully known" return out def numel(x): return intprod(var_shape(x)) def intprod(x): return int(np.prod(x)) def flatgrad(loss, var_list, clip_norm=None): grads = tf.gradients(loss, var_list) if clip_norm is not None: grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads] return tf.concat(axis=0, values=[ tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)]) for (v, grad) in zip(var_list, grads) ]) class SetFromFlat(object): def __init__(self, var_list, dtype=tf.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = tf.placeholder(dtype, [total_size]) start = 0 assigns = [] for (shape, v) in zip(shapes, var_list): size = intprod(shape) assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape))) start += size self.op = tf.group(*assigns) def __call__(self, theta): tf.get_default_session().run(self.op, feed_dict={self.theta: theta}) class GetFlat(object): def __init__(self, var_list): self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list]) def __call__(self): return tf.get_default_session().run(self.op) def flattenallbut0(x): return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])]) # ============================================================= # TF placeholders management # ============================================================ _PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape) def get_placeholder(name, dtype, shape): if name in _PLACEHOLDER_CACHE: out, dtype1, shape1 = _PLACEHOLDER_CACHE[name] if out.graph == tf.get_default_graph(): assert dtype1 == dtype and shape1 == shape, \ 'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape) return out out = tf.placeholder(dtype=dtype, shape=shape, name=name) _PLACEHOLDER_CACHE[name] = (out, dtype, shape) return out def get_placeholder_cached(name): return _PLACEHOLDER_CACHE[name][0] # ================================================================ # Diagnostics # ================================================================ def display_var_info(vars): from baselines import logger count_params = 0 for v in vars: name = v.name if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue v_params = np.prod(v.shape.as_list()) count_params += v_params if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape))) logger.info("Total model parameters: %0.2f million" % (count_params*1e-6)) def get_available_gpus(session_config=None): # based on recipe from https://stackoverflow.com/a/38580201 # Unless we allocate a session here, subsequent attempts to create one # will ignore our custom config (in particular, allow_growth=True will have # no effect). if session_config is None: session_config = get_session()._config from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices(session_config) return [x.name for x in local_device_protos if x.device_type == 'GPU'] # ================================================================ # Saving variables # ================================================================ def load_state(fname, sess=None): from baselines import logger logger.warn('load_state method is deprecated, please use load_variables instead') sess = sess or get_session() saver = tf.train.Saver() saver.restore(tf.get_default_session(), fname) def save_state(fname, sess=None): from baselines import logger logger.warn('save_state method is deprecated, please use save_variables instead') sess = sess or get_session() dirname = os.path.dirname(fname) if any(dirname): os.makedirs(dirname, exist_ok=True) saver = tf.train.Saver() saver.save(tf.get_default_session(), fname) # The methods above and below are clearly doing the same thing, and in a rather similar way # TODO: ensure there is no subtle differences and remove one def save_variables(save_path, variables=None, sess=None): import joblib sess = sess or get_session() variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) ps = sess.run(variables) save_dict = {v.name: value for v, value in zip(variables, ps)} dirname = os.path.dirname(save_path) if any(dirname): os.makedirs(dirname, exist_ok=True) joblib.dump(save_dict, save_path) def load_variables(load_path, variables=None, sess=None): import joblib sess = sess or get_session() variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) loaded_params = joblib.load(os.path.expanduser(load_path)) print(os.path.abspath(load_path)) restores = [] if isinstance(loaded_params, list): assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)' for d, v in zip(loaded_params, variables): restores.append(v.assign(d)) else: for v in variables: restores.append(v.assign(loaded_params[v.name])) sess.run(restores) # ================================================================ # Shape adjustment for feeding into tf placeholders # ================================================================ def adjust_shape(placeholder, data): ''' adjust shape of the data to the shape of the placeholder if possible. If shape is incompatible, AssertionError is thrown Parameters: placeholder tensorflow input placeholder data input data to be (potentially) reshaped to be fed into placeholder Returns: reshaped data ''' if not isinstance(data, np.ndarray) and not isinstance(data, list): return data if isinstance(data, list): data = np.array(data) placeholder_shape = [x or -1 for x in placeholder.shape.as_list()] assert _check_shape(placeholder_shape, data.shape), \ 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape) return np.reshape(data, placeholder_shape) def _check_shape(placeholder_shape, data_shape): ''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)''' return True squeezed_placeholder_shape = _squeeze_shape(placeholder_shape) squeezed_data_shape = _squeeze_shape(data_shape) for i, s_data in enumerate(squeezed_data_shape): s_placeholder = squeezed_placeholder_shape[i] if s_placeholder != -1 and s_data != s_placeholder: return False return True def _squeeze_shape(shape): return [x for x in shape if x != 1] # ================================================================ # Tensorboard interfacing # ================================================================ def launch_tensorboard_in_background(log_dir): ''' To log the Tensorflow graph when using rl-algs algorithms, you can run the following code in your main script: import threading, time def start_tensorboard(session): time.sleep(10) # Wait until graph is setup tb_path = osp.join(logger.get_dir(), 'tb') summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph) summary_op = tf.summary.merge_all() launch_tensorboard_in_background(tb_path) session = tf.get_default_session() t = threading.Thread(target=start_tensorboard, args=([session])) t.start() ''' import subprocess subprocess.Popen(['tensorboard', '--logdir', log_dir])
17,008
37.136771
144
py
P3O
P3O-main/baselines/common/tile_images.py
import numpy as np def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
763
30.833333
80
py
P3O
P3O-main/baselines/common/running_mean_std.py
import tensorflow as tf import numpy as np from baselines.common.tf_util import get_session class RunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm def __init__(self, epsilon=1e-4, shape=()): self.mean = np.zeros(shape, 'float64') self.var = np.ones(shape, 'float64') self.count = epsilon def update(self, x): batch_mean = np.mean(x, axis=0) batch_var = np.var(x, axis=0) batch_count = x.shape[0] self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean, batch_var, batch_count): self.mean, self.var, self.count = update_mean_var_count_from_moments( self.mean, self.var, self.count, batch_mean, batch_var, batch_count) def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count): delta = batch_mean - mean tot_count = count + batch_count new_mean = mean + delta * batch_count / tot_count m_a = var * count m_b = batch_var * batch_count M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count new_var = M2 / tot_count new_count = tot_count return new_mean, new_var, new_count class TfRunningMeanStd(object): # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm ''' TensorFlow variables-based implmentation of computing running mean and std Benefit of this implementation is that it can be saved / loaded together with the tensorflow model ''' def __init__(self, epsilon=1e-4, shape=(), scope=''): sess = get_session() self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64) self._new_var = tf.placeholder(shape=shape, dtype=tf.float64) self._new_count = tf.placeholder(shape=(), dtype=tf.float64) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): self._mean = tf.get_variable('mean', initializer=np.zeros(shape, 'float64'), dtype=tf.float64) self._var = tf.get_variable('std', initializer=np.ones(shape, 'float64'), dtype=tf.float64) self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64) self.update_ops = tf.group([ self._var.assign(self._new_var), self._mean.assign(self._new_mean), self._count.assign(self._new_count) ]) sess.run(tf.variables_initializer([self._mean, self._var, self._count])) self.sess = sess self._set_mean_var_count() def _set_mean_var_count(self): self.mean, self.var, self.count = self.sess.run([self._mean, self._var, self._count]) def update(self, x): batch_mean = np.mean(x, axis=0) batch_var = np.var(x, axis=0) batch_count = x.shape[0] new_mean, new_var, new_count = update_mean_var_count_from_moments(self.mean, self.var, self.count, batch_mean, batch_var, batch_count) self.sess.run(self.update_ops, feed_dict={ self._new_mean: new_mean, self._new_var: new_var, self._new_count: new_count }) self._set_mean_var_count() def test_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:]) x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.var(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean, rms.var] np.testing.assert_allclose(ms1, ms2) def test_tf_runningmeanstd(): for (x1, x2, x3) in [ (np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)), ]: rms = TfRunningMeanStd(epsilon=0.0, shape=x1.shape[1:], scope='running_mean_std' + str(np.random.randint(0, 128))) x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.var(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean, rms.var] np.testing.assert_allclose(ms1, ms2) def profile_tf_runningmeanstd(): import time from baselines.common import tf_util tf_util.get_session( config=tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1, allow_soft_placement=True )) x = np.random.random((376,)) n_trials = 10000 rms = RunningMeanStd() tfrms = TfRunningMeanStd() tic1 = time.time() for _ in range(n_trials): rms.update(x) tic2 = time.time() for _ in range(n_trials): tfrms.update(x) tic3 = time.time() print('rms update time ({} trials): {} s'.format(n_trials, tic2 - tic1)) print('tfrms update time ({} trials): {} s'.format(n_trials, tic3 - tic2)) tic1 = time.time() for _ in range(n_trials): z1 = rms.mean tic2 = time.time() for _ in range(n_trials): z2 = tfrms.mean assert z1 == z2 tic3 = time.time() print('rms get mean time ({} trials): {} s'.format(n_trials, tic2 - tic1)) print('tfrms get mean time ({} trials): {} s'.format(n_trials, tic3 - tic2)) ''' options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101 run_metadata = tf.RunMetadata() profile_opts = dict(options=options, run_metadata=run_metadata) from tensorflow.python.client import timeline fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101 chrome_trace = fetched_timeline.generate_chrome_trace_format() outfile = '/tmp/timeline.json' with open(outfile, 'wt') as f: f.write(chrome_trace) print('Successfully saved profile to {}. Exiting.'.format(outfile)) exit(0) ''' if __name__ == '__main__': profile_tf_runningmeanstd()
6,081
31.351064
142
py
P3O
P3O-main/baselines/common/retro_wrappers.py
from collections import deque import cv2 cv2.ocl.setUseOpenCL(False) from .atari_wrappers import WarpFrame, ClipRewardEnv, FrameStack, ScaledFloatFrame from .wrappers import TimeLimit import numpy as np import gym class StochasticFrameSkip(gym.Wrapper): def __init__(self, env, n, stickprob): gym.Wrapper.__init__(self, env) self.n = n self.stickprob = stickprob self.curac = None self.rng = np.random.RandomState() self.supports_want_render = hasattr(env, "supports_want_render") def reset(self, **kwargs): self.curac = None return self.env.reset(**kwargs) def step(self, ac): done = False totrew = 0 for i in range(self.n): # First step after reset, use action if self.curac is None: self.curac = ac # First substep, delay with probability=stickprob elif i==0: if self.rng.rand() > self.stickprob: self.curac = ac # Second substep, new action definitely kicks in elif i==1: self.curac = ac if self.supports_want_render and i<self.n-1: ob, rew, done, info = self.env.step(self.curac, want_render=False) else: ob, rew, done, info = self.env.step(self.curac) totrew += rew if done: break return ob, totrew, done, info def seed(self, s): self.rng.seed(s) class PartialFrameStack(gym.Wrapper): def __init__(self, env, k, channel=1): """ Stack one channel (channel keyword) from previous frames """ gym.Wrapper.__init__(self, env) shp = env.observation_space.shape self.channel = channel self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] + k - 1), dtype=env.observation_space.dtype) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape def reset(self): ob = self.env.reset() assert ob.shape[2] > self.channel for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, ac): ob, reward, done, info = self.env.step(ac) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return np.concatenate([frame if i==self.k-1 else frame[:,:,self.channel:self.channel+1] for (i, frame) in enumerate(self.frames)], axis=2) class Downsample(gym.ObservationWrapper): def __init__(self, env, ratio): """ Downsample images by a factor of ratio """ gym.ObservationWrapper.__init__(self, env) (oldh, oldw, oldc) = env.observation_space.shape newshape = (oldh//ratio, oldw//ratio, oldc) self.observation_space = gym.spaces.Box(low=0, high=255, shape=newshape, dtype=np.uint8) def observation(self, frame): height, width, _ = self.observation_space.shape frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA) if frame.ndim == 2: frame = frame[:,:,None] return frame class Rgb2gray(gym.ObservationWrapper): def __init__(self, env): """ Downsample images by a factor of ratio """ gym.ObservationWrapper.__init__(self, env) (oldh, oldw, _oldc) = env.observation_space.shape self.observation_space = gym.spaces.Box(low=0, high=255, shape=(oldh, oldw, 1), dtype=np.uint8) def observation(self, frame): frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) return frame[:,:,None] class MovieRecord(gym.Wrapper): def __init__(self, env, savedir, k): gym.Wrapper.__init__(self, env) self.savedir = savedir self.k = k self.epcount = 0 def reset(self): if self.epcount % self.k == 0: self.env.unwrapped.movie_path = self.savedir else: self.env.unwrapped.movie_path = None self.env.unwrapped.movie = None self.epcount += 1 return self.env.reset() class AppendTimeout(gym.Wrapper): def __init__(self, env): gym.Wrapper.__init__(self, env) self.action_space = env.action_space self.timeout_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0]), dtype=np.float32) self.original_os = env.observation_space if isinstance(self.original_os, gym.spaces.Dict): import copy ordered_dict = copy.deepcopy(self.original_os.spaces) ordered_dict['value_estimation_timeout'] = self.timeout_space self.observation_space = gym.spaces.Dict(ordered_dict) self.dict_mode = True else: self.observation_space = gym.spaces.Dict({ 'original': self.original_os, 'value_estimation_timeout': self.timeout_space }) self.dict_mode = False self.ac_count = None while 1: if not hasattr(env, "_max_episode_steps"): # Looking for TimeLimit wrapper that has this field env = env.env continue break self.timeout = env._max_episode_steps def step(self, ac): self.ac_count += 1 ob, rew, done, info = self.env.step(ac) return self._process(ob), rew, done, info def reset(self): self.ac_count = 0 return self._process(self.env.reset()) def _process(self, ob): fracmissing = 1 - self.ac_count / self.timeout if self.dict_mode: ob['value_estimation_timeout'] = fracmissing else: return { 'original': ob, 'value_estimation_timeout': fracmissing } class StartDoingRandomActionsWrapper(gym.Wrapper): """ Warning: can eat info dicts, not good if you depend on them """ def __init__(self, env, max_random_steps, on_startup=True, every_episode=False): gym.Wrapper.__init__(self, env) self.on_startup = on_startup self.every_episode = every_episode self.random_steps = max_random_steps self.last_obs = None if on_startup: self.some_random_steps() def some_random_steps(self): self.last_obs = self.env.reset() n = np.random.randint(self.random_steps) #print("running for random %i frames" % n) for _ in range(n): self.last_obs, _, done, _ = self.env.step(self.env.action_space.sample()) if done: self.last_obs = self.env.reset() def reset(self): return self.last_obs def step(self, a): self.last_obs, rew, done, info = self.env.step(a) if done: self.last_obs = self.env.reset() if self.every_episode: self.some_random_steps() return self.last_obs, rew, done, info def make_retro(*, game, state=None, max_episode_steps=4500, **kwargs): import retro if state is None: state = retro.State.DEFAULT env = retro.make(game, state, **kwargs) env = StochasticFrameSkip(env, n=4, stickprob=0.25) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=max_episode_steps) return env def wrap_deepmind_retro(env, scale=True, frame_stack=4): """ Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind """ env = WarpFrame(env) env = ClipRewardEnv(env) if frame_stack > 1: env = FrameStack(env, frame_stack) if scale: env = ScaledFloatFrame(env) return env class SonicDiscretizer(gym.ActionWrapper): """ Wrap a gym-retro environment and make it use discrete actions for the Sonic game. """ def __init__(self, env): super(SonicDiscretizer, self).__init__(env) buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"] actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']] self._actions = [] for action in actions: arr = np.array([False] * 12) for button in action: arr[buttons.index(button)] = True self._actions.append(arr) self.action_space = gym.spaces.Discrete(len(self._actions)) def action(self, a): # pylint: disable=W0221 return self._actions[a].copy() class RewardScaler(gym.RewardWrapper): """ Bring rewards to a reasonable scale for PPO. This is incredibly important and effects performance drastically. """ def __init__(self, env, scale=0.01): super(RewardScaler, self).__init__(env) self.scale = scale def reward(self, reward): return reward * self.scale class AllowBacktracking(gym.Wrapper): """ Use deltas in max(X) as the reward, rather than deltas in X. This way, agents are not discouraged too heavily from exploring backwards if there is no way to advance head-on in the level. """ def __init__(self, env): super(AllowBacktracking, self).__init__(env) self._cur_x = 0 self._max_x = 0 def reset(self, **kwargs): # pylint: disable=E0202 self._cur_x = 0 self._max_x = 0 return self.env.reset(**kwargs) def step(self, action): # pylint: disable=E0202 obs, rew, done, info = self.env.step(action) self._cur_x += rew rew = max(0, self._cur_x - self._max_x) self._max_x = max(self._max_x, self._cur_x) return obs, rew, done, info
9,752
33.708185
107
py
P3O
P3O-main/baselines/common/wrappers.py
import gym class TimeLimit(gym.Wrapper): def __init__(self, env, max_episode_steps=None): super(TimeLimit, self).__init__(env) self._max_episode_steps = max_episode_steps self._elapsed_steps = 0 def step(self, ac): observation, reward, done, info = self.env.step(ac) self._elapsed_steps += 1 if self._elapsed_steps >= self._max_episode_steps: done = True info['TimeLimit.truncated'] = True return observation, reward, done, info def reset(self, **kwargs): self._elapsed_steps = 0 return self.env.reset(**kwargs) class ClipActionsWrapper(gym.Wrapper): def step(self, action): import numpy as np action = np.nan_to_num(action) action = np.clip(action, self.action_space.low, self.action_space.high) return self.env.step(action) def reset(self, **kwargs): return self.env.reset(**kwargs)
946
30.566667
79
py
P3O
P3O-main/baselines/common/segment_tree.py
import operator class SegmentTree(object): def __init__(self, capacity, operation, neutral_element): """Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient ( O(log segment size) ) `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must form a mathematical group together with the set of possible values for array elements (i.e. be associative) neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum. """ assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2." self._capacity = capacity self._value = [neutral_element for _ in range(2 * capacity)] self._operation = operation def _reduce_helper(self, start, end, node, node_start, node_end): if start == node_start and end == node_end: return self._value[node] mid = (node_start + node_end) // 2 if end <= mid: return self._reduce_helper(start, end, 2 * node, node_start, mid) else: if mid + 1 <= start: return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end) else: return self._operation( self._reduce_helper(start, mid, 2 * node, node_start, mid), self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end) ) def reduce(self, start=0, end=None): """Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements. """ if end is None: end = self._capacity if end < 0: end += self._capacity end -= 1 return self._reduce_helper(start, end, 1, 0, self._capacity - 1) def __setitem__(self, idx, val): # index of the leaf idx += self._capacity self._value[idx] = val idx //= 2 while idx >= 1: self._value[idx] = self._operation( self._value[2 * idx], self._value[2 * idx + 1] ) idx //= 2 def __getitem__(self, idx): assert 0 <= idx < self._capacity return self._value[self._capacity + idx] class SumSegmentTree(SegmentTree): def __init__(self, capacity): super(SumSegmentTree, self).__init__( capacity=capacity, operation=operator.add, neutral_element=0.0 ) def sum(self, start=0, end=None): """Returns arr[start] + ... + arr[end]""" return super(SumSegmentTree, self).reduce(start, end) def find_prefixsum_idx(self, prefixsum): """Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """ assert 0 <= prefixsum <= self.sum() + 1e-5 idx = 1 while idx < self._capacity: # while non-leaf if self._value[2 * idx] > prefixsum: idx = 2 * idx else: prefixsum -= self._value[2 * idx] idx = 2 * idx + 1 return idx - self._capacity class MinSegmentTree(SegmentTree): def __init__(self, capacity): super(MinSegmentTree, self).__init__( capacity=capacity, operation=min, neutral_element=float('inf') ) def min(self, start=0, end=None): """Returns min(arr[start], ..., arr[end])""" return super(MinSegmentTree, self).reduce(start, end)
4,899
32.561644
109
py
P3O
P3O-main/baselines/common/policies.py
import tensorflow as tf from baselines.common import tf_util from baselines.a2c.utils import fc from baselines.common.distributions import make_pdtype from baselines.common.input import observation_placeholder, encode_observation from baselines.common.tf_util import adjust_shape from baselines.common.mpi_running_mean_std import RunningMeanStd from baselines.common.models import get_network_builder import gym class PolicyWithValue(object): """ Encapsulates fields and methods for RL policy and value function estimation with shared parameters """ def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, vf_latent2=None, sess=None, **tensors): """ Parameters: ---------- env RL environment observations tensorflow placeholder in which the observations will be fed latent latent state from which policy distribution parameters should be inferred vf_latent latent state from which value function should be inferred (if None, then latent is used) sess tensorflow session to run calculations in (if None, default session is used) **tensors tensorflow tensors for additional attributes such as state or mask """ self.X = observations self.state = tf.constant([]) self.initial_state = None self.__dict__.update(tensors) vf_latent = vf_latent if vf_latent is not None else latent vf_latent = tf.layers.flatten(vf_latent) vf_latent2 = tf.layers.flatten(vf_latent2) latent = tf.layers.flatten(latent) # Based on the action space, will select what probability distribution type self.pdtype = make_pdtype(env.action_space) self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01) if "squash" in tensors.keys(): action = self.pd.sample() neglogp = self.pd.neglogp(action) self.neglogp = neglogp + tf.reduce_sum(2 * (tf.log(2.0) - action - tf.nn.softplus(-2 * action)), axis=1) # Squash those unbounded actions! self.action = tf.tanh(action)*env.action_space.high else: # Take an action self.action = self.pd.sample() # Calculate the neg log of our probability self.neglogp = self.pd.neglogp(self.action) self.sess = sess or tf.get_default_session() if estimate_q: assert isinstance(env.action_space, gym.spaces.Discrete) self.q = fc(vf_latent, 'q', env.action_space.n) self.vf = self.q # self.q2 = fc(vf_latent2, 'q2', env.action_space.n) # self.vf2 = self.q2 else: self.vf = fc(vf_latent, 'vf', 1) self.vf = self.vf[:,0] # self.vf2 = fc(vf_latent2, 'vf2', 1) # self.vf2 = self.vf2[:, 0] def _evaluate(self, variables, observation, **extra_feed): sess = self.sess feed_dict = {self.X: adjust_shape(self.X, observation)} for inpt_name, data in extra_feed.items(): if inpt_name in self.__dict__.keys(): inpt = self.__dict__[inpt_name] if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder': feed_dict[inpt] = adjust_shape(inpt, data) return sess.run(variables, feed_dict) def step(self, observation, **extra_feed): """ Compute next action(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- (action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple """ a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed) if state.size == 0: state = None return a, v, state, neglogp def value(self, ob, *args, **kwargs): """ Compute value estimate(s) given the observation(s) Parameters: ---------- observation observation data (either single or a batch) **extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__) Returns: ------- value estimate """ return self._evaluate(self.vf, ob, *args, **kwargs) def save(self, save_path): tf_util.save_state(save_path, sess=self.sess) def load(self, load_path): tf_util.load_state(load_path, sess=self.sess) def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs): if isinstance(policy_network, str): network_type = policy_network policy_network = get_network_builder(network_type)(**policy_kwargs) def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None): ob_space = env.observation_space X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch) extra_tensors = {} if "squash" in policy_kwargs.keys(): extra_tensors['squash'] = policy_kwargs['squash'] if normalize_observations and X.dtype == tf.float32: encoded_x, rms = _normalize_clip_observation(X) extra_tensors['rms'] = rms else: encoded_x = X encoded_x = encode_observation(ob_space, encoded_x) with tf.variable_scope('pi', reuse=tf.AUTO_REUSE): policy_latent = policy_network(encoded_x) if isinstance(policy_latent, tuple): policy_latent, recurrent_tensors = policy_latent if recurrent_tensors is not None: # recurrent architecture, need a few more steps nenv = nbatch // nsteps assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps) policy_latent, recurrent_tensors = policy_network(encoded_x, nenv) extra_tensors.update(recurrent_tensors) _v_net = value_network if _v_net is None or _v_net == 'shared': vf_latent = policy_latent else: if _v_net == 'copy': _v_net = policy_network _v_net2 = policy_network else: assert callable(_v_net) with tf.variable_scope('vf', reuse=tf.AUTO_REUSE): # TODO recurrent architectures are not supported with value_network=copy yet vf_latent = _v_net(encoded_x) with tf.variable_scope('vf2', reuse=tf.AUTO_REUSE): # TODO recurrent architectures are not supported with value_network=copy yet vf_latent2 = _v_net2(encoded_x) policy = PolicyWithValue( env=env, observations=X, latent=policy_latent, vf_latent=vf_latent, vf_latent2=vf_latent2, sess=sess, estimate_q=estimate_q, **extra_tensors ) return policy return policy_fn def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]): rms = RunningMeanStd(shape=x.shape[1:]) norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range)) return norm_x, rms
7,682
35.240566
137
py
P3O
P3O-main/baselines/common/models.py
import numpy as np import tensorflow as tf from baselines.a2c import utils from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch from baselines.common.mpi_running_mean_std import RunningMeanStd mapping = {} def register(name): def _thunk(func): mapping[name] = func return func return _thunk def nature_cnn(unscaled_images, **conv_kwargs): """ CNN from Nature paper. """ scaled_images = tf.cast(unscaled_images, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) h3 = conv_to_fc(h3) return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) def build_impala_cnn(unscaled_images, depths=[16,32,32], **conv_kwargs): """ Model used in the paper "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561 """ layer_num = 0 def get_layer_num_str(): nonlocal layer_num num_str = str(layer_num) layer_num += 1 return num_str def conv_layer(out, depth): return tf.layers.conv2d(out, depth, 3, padding='same', name='layer_' + get_layer_num_str()) def residual_block(inputs): depth = inputs.get_shape()[-1].value out = tf.nn.relu(inputs) out = conv_layer(out, depth) out = tf.nn.relu(out) out = conv_layer(out, depth) return out + inputs def conv_sequence(inputs, depth): out = conv_layer(inputs, depth) out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same') out = residual_block(out) out = residual_block(out) return out out = tf.cast(unscaled_images, tf.float32) / 255. for depth in depths: out = conv_sequence(out, depth) out = tf.layers.flatten(out) out = tf.nn.relu(out) out = tf.layers.dense(out, 256, activation=tf.nn.relu, name='layer_' + get_layer_num_str()) return out @register("mlp") def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False,**kwargs): """ Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder """ def network_fn(X): h = tf.layers.flatten(X) for i in range(num_layers): h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)) if layer_norm: h = tf.contrib.layers.layer_norm(h, center=True, scale=True) h = activation(h) return h return network_fn @register("squash_mlp") def squash_mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu, layer_norm=False): """ Stack of fully-connected layers to be used in a policy / q-function approximator Parameters: ---------- num_layers: int number of fully-connected layers (default: 2) num_hidden: int size of fully-connected layers (default: 64) activation: activation function (default: tf.tanh) Returns: ------- function that builds fully connected network with a given input tensor / placeholder """ def network_fn(X): h = tf.layers.flatten(X) for i in range(num_layers): h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2)) if layer_norm: h = tf.contrib.layers.layer_norm(h, center=True, scale=True) h = activation(h) return h return network_fn @register("cnn") def cnn(**conv_kwargs): def network_fn(X): return nature_cnn(X, **conv_kwargs) return network_fn @register("impala_cnn") def impala_cnn(**conv_kwargs): def network_fn(X): return build_impala_cnn(X) return network_fn @register("cnn_small") def cnn_small(**conv_kwargs): def network_fn(X): h = tf.cast(X, tf.float32) / 255. activ = tf.nn.relu h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h = conv_to_fc(h) h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2))) return h return network_fn @register("lstm") def lstm(nlstm=128, layer_norm=False): """ Builds LSTM (Long-Short Term Memory) network to be used in a policy. Note that the resulting function returns not only the output of the LSTM (i.e. hidden state of lstm for each step in the sequence), but also a dictionary with auxiliary tensors to be set as policy attributes. Specifically, S is a placeholder to feed current state (LSTM state has to be managed outside policy) M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too) initial_state is a numpy array containing initial lstm state (usually zeros) state is the output LSTM state (to be fed into S at the next call) An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example Parameters: ---------- nlstm: int LSTM hidden state size layer_norm: bool if True, layer-normalized version of LSTM is used Returns: ------- function that builds LSTM with a given input tensor / placeholder """ def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = tf.layers.flatten(X) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn @register("cnn_lstm") def cnn_lstm(nlstm=128, layer_norm=False, conv_fn=nature_cnn, **conv_kwargs): def network_fn(X, nenv=1): nbatch = X.shape[0] nsteps = nbatch // nenv h = conv_fn(X, **conv_kwargs) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) if layer_norm: h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm) else: h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm) h = seq_to_batch(h5) initial_state = np.zeros(S.shape.as_list(), dtype=float) return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state} return network_fn @register("impala_cnn_lstm") def impala_cnn_lstm(): return cnn_lstm(nlstm=256, conv_fn=build_impala_cnn) @register("cnn_lnlstm") def cnn_lnlstm(nlstm=128, **conv_kwargs): return cnn_lstm(nlstm, layer_norm=True, **conv_kwargs) @register("conv_only") def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs): ''' convolutions-only net Parameters: ---------- conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer. Returns: function that takes tensorflow tensor as input and returns the output of the last convolutional layer ''' def network_fn(X): out = tf.cast(X, tf.float32) / 255. with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = tf.contrib.layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu, **conv_kwargs) return out return network_fn def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]): rms = RunningMeanStd(shape=x.shape[1:]) norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range)) return norm_x, rms def get_network_builder(name): """ If you want to register your own network outside models.py, you just need: Usage Example: ------------- from baselines.common.models import register @register("your_network_name") def your_network_define(**net_kwargs): ... return network_fn """ if callable(name): return name elif name in mapping: return mapping[name] else: raise ValueError('Unknown network type: {}'.format(name))
9,515
30.098039
140
py
P3O
P3O-main/baselines/common/mpi_adam_optimizer.py
import numpy as np import tensorflow as tf from baselines.common import tf_util as U from baselines.common.tests.test_with_mpi import with_mpi from baselines import logger try: from mpi4py import MPI except ImportError: MPI = None class MpiAdamOptimizer(tf.train.AdamOptimizer): """Adam optimizer that averages gradients across mpi processes.""" def __init__(self, comm, grad_clip=None, mpi_rank_weight=1, **kwargs): self.comm = comm self.grad_clip = grad_clip self.mpi_rank_weight = mpi_rank_weight tf.train.AdamOptimizer.__init__(self, **kwargs) def compute_gradients(self, loss, var_list, **kwargs): grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs) grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None] flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0) * self.mpi_rank_weight shapes = [v.shape.as_list() for g, v in grads_and_vars] sizes = [int(np.prod(s)) for s in shapes] total_weight = np.zeros(1, np.float32) self.comm.Allreduce(np.array([self.mpi_rank_weight], dtype=np.float32), total_weight, op=MPI.SUM) total_weight = total_weight[0] buf = np.zeros(sum(sizes), np.float32) countholder = [0] # Counts how many times _collect_grads has been called stat = tf.reduce_sum(grads_and_vars[0][1]) # sum of first variable def _collect_grads(flat_grad, np_stat): if self.grad_clip is not None: gradnorm = np.linalg.norm(flat_grad) if gradnorm > 1: flat_grad /= gradnorm logger.logkv_mean('gradnorm', gradnorm) logger.logkv_mean('gradclipfrac', float(gradnorm > 1)) self.comm.Allreduce(flat_grad, buf, op=MPI.SUM) np.divide(buf, float(total_weight), out=buf) if countholder[0] % 100 == 0: check_synced(np_stat, self.comm) countholder[0] += 1 return buf avg_flat_grad = tf.py_func(_collect_grads, [flat_grad, stat], tf.float32) avg_flat_grad.set_shape(flat_grad.shape) avg_grads = tf.split(avg_flat_grad, sizes, axis=0) avg_grads_and_vars = [(tf.reshape(g, v.shape), v) for g, (_, v) in zip(avg_grads, grads_and_vars)] return avg_grads_and_vars def check_synced(localval, comm=None): """ It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise Arguments: comm: MPI communicator localval: list of local variables (list of variables on current worker to be compared with the other workers) """ comm = comm or MPI.COMM_WORLD vals = comm.gather(localval) if comm.rank == 0: assert all(val==vals[0] for val in vals[1:]),\ 'MpiAdamOptimizer detected that different workers have different weights: {}'.format(vals) @with_mpi(timeout=5) def test_nonfreeze(): np.random.seed(0) tf.set_random_seed(0) a = tf.Variable(np.random.randn(3).astype('float32')) b = tf.Variable(np.random.randn(2,5).astype('float32')) loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b)) stepsize = 1e-2 # for some reason the session config with inter_op_parallelism_threads was causing # nested sess.run calls to freeze config = tf.ConfigProto(inter_op_parallelism_threads=1) sess = U.get_session(config=config) update_op = MpiAdamOptimizer(comm=MPI.COMM_WORLD, learning_rate=stepsize).minimize(loss) sess.run(tf.global_variables_initializer()) losslist_ref = [] for i in range(100): l,_ = sess.run([loss, update_op]) print(i, l) losslist_ref.append(l)
3,976
42.703297
117
py
P3O
P3O-main/baselines/common/__init__.py
# flake8: noqa F403 from baselines.common.console_util import * from baselines.common.dataset import Dataset from baselines.common.math_util import * from baselines.common.misc_util import *
191
31
44
py
P3O
P3O-main/baselines/common/mpi_moments.py
from mpi4py import MPI import numpy as np from baselines.common import zipsame def mpi_mean(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert x.ndim > 0 if comm is None: comm = MPI.COMM_WORLD xsum = x.sum(axis=axis, keepdims=keepdims) n = xsum.size localsum = np.zeros(n+1, x.dtype) localsum[:n] = xsum.ravel() localsum[n] = x.shape[axis] # globalsum = np.zeros_like(localsum) # comm.Allreduce(localsum, globalsum, op=MPI.SUM) globalsum = comm.allreduce(localsum, op=MPI.SUM) return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n] def mpi_moments(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert x.ndim > 0 mean, count = mpi_mean(x, axis=axis, comm=comm, keepdims=True) sqdiffs = np.square(x - mean) meansqdiff, count1 = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True) assert count1 == count std = np.sqrt(meansqdiff) if not keepdims: newshape = mean.shape[:axis] + mean.shape[axis+1:] mean = mean.reshape(newshape) std = std.reshape(newshape) return mean, std, count def test_runningmeanstd(): import subprocess subprocess.check_call(['mpirun', '-np', '3', 'python','-c', 'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()']) def _helper_runningmeanstd(): comm = MPI.COMM_WORLD np.random.seed(0) for (triple,axis) in [ ((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0), ((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0), ((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1), ]: x = np.concatenate(triple, axis=axis) ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]] ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis) for (a1,a2) in zipsame(ms1, ms2): print(a1, a2) assert np.allclose(a1, a2) print("ok!")
2,018
31.564516
101
py
P3O
P3O-main/baselines/common/console_util.py
from __future__ import print_function from contextlib import contextmanager import numpy as np import time import shlex import subprocess # ================================================================ # Misc # ================================================================ def fmt_row(width, row, header=False): out = " | ".join(fmt_item(x, width) for x in row) if header: out = out + "\n" + "-"*len(out) return out def fmt_item(x, l): if isinstance(x, np.ndarray): assert x.ndim==0 x = x.item() if isinstance(x, (float, np.float32, np.float64)): v = abs(x) if (v < 1e-4 or v > 1e+4) and v > 0: rep = "%7.2e" % x else: rep = "%7.5f" % x else: rep = str(x) return " "*(l - len(rep)) + rep color2num = dict( gray=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37, crimson=38 ) def colorize(string, color='green', bold=False, highlight=False): attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string) def print_cmd(cmd, dry=False): if isinstance(cmd, str): # for shell=True pass else: cmd = ' '.join(shlex.quote(arg) for arg in cmd) print(colorize(('CMD: ' if not dry else 'DRY: ') + cmd)) def get_git_commit(cwd=None): return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=cwd).decode('utf8') def get_git_commit_message(cwd=None): return subprocess.check_output(['git', 'show', '-s', '--format=%B', 'HEAD'], cwd=cwd).decode('utf8') def ccap(cmd, dry=False, env=None, **kwargs): print_cmd(cmd, dry) if not dry: subprocess.check_call(cmd, env=env, **kwargs) MESSAGE_DEPTH = 0 @contextmanager def timed(msg): global MESSAGE_DEPTH #pylint: disable=W0603 print(colorize('\t'*MESSAGE_DEPTH + '=: ' + msg, color='magenta')) tstart = time.time() MESSAGE_DEPTH += 1 yield MESSAGE_DEPTH -= 1 print(colorize('\t'*MESSAGE_DEPTH + "done in %.3f seconds"%(time.time() - tstart), color='magenta'))
2,179
25.91358
104
py
P3O
P3O-main/baselines/common/cmd_util.py
""" Helpers for scripts like run_atari.py. """ import os try: from mpi4py import MPI except ImportError: MPI = None import gym from gym.wrappers import FlattenObservation, FilterObservation from baselines import logger from baselines.bench import Monitor from baselines.common import set_global_seeds from baselines.common.atari_wrappers import make_atari, wrap_deepmind from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.common import retro_wrappers from baselines.common.wrappers import ClipActionsWrapper def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, env_kwargs=None, start_index=0, reward_scale=1.0, flatten_dict_observations=True, gamestate=None, initializer=None, force_dummy=False): """ Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. """ wrapper_kwargs = wrapper_kwargs or {} env_kwargs = env_kwargs or {} mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 seed = seed + 10000 * mpi_rank if seed is not None else None logger_dir = logger.get_dir() def make_thunk(rank, initializer=None): return lambda: make_env( env_id=env_id, env_type=env_type, mpi_rank=mpi_rank, subrank=rank, seed=seed, reward_scale=reward_scale, gamestate=gamestate, flatten_dict_observations=flatten_dict_observations, wrapper_kwargs=wrapper_kwargs, env_kwargs=env_kwargs, logger_dir=logger_dir, initializer=initializer ) set_global_seeds(seed) if not force_dummy and num_env > 1: return SubprocVecEnv([make_thunk(i + start_index, initializer=initializer) for i in range(num_env)]) else: return DummyVecEnv([make_thunk(i + start_index, initializer=None) for i in range(num_env)]) def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None): if initializer is not None: initializer(mpi_rank=mpi_rank, subrank=subrank) wrapper_kwargs = wrapper_kwargs or {} env_kwargs = env_kwargs or {} if ':' in env_id: import re import importlib module_name = re.sub(':.*','',env_id) env_id = re.sub('.*:', '', env_id) importlib.import_module(module_name) if env_type == 'atari': env = make_atari(env_id) elif env_type == 'retro': import retro gamestate = gamestate or retro.State.DEFAULT env = retro_wrappers.make_retro(game=env_id, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE, state=gamestate) else: env = gym.make(env_id, **env_kwargs) if flatten_dict_observations and isinstance(env.observation_space, gym.spaces.Dict): env = FlattenObservation(env) env.seed(seed + subrank if seed is not None else None) env = Monitor(env, logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)), allow_early_resets=True) if env_type == 'atari': env = wrap_deepmind(env, **wrapper_kwargs) elif env_type == 'retro': if 'frame_stack' not in wrapper_kwargs: wrapper_kwargs['frame_stack'] = 1 env = retro_wrappers.wrap_deepmind_retro(env, **wrapper_kwargs) if isinstance(env.action_space, gym.spaces.Box): env = ClipActionsWrapper(env) if reward_scale != 1: env = retro_wrappers.RewardScaler(env, reward_scale) return env def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env def make_robotics_env(env_id, seed, rank=0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ set_global_seeds(seed) env = gym.make(env_id) env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal'])) env = Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)), info_keywords=('is_success',)) env.seed(seed) return env def arg_parser(): """ Create an empty argparse.ArgumentParser. """ import argparse return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) def atari_arg_parser(): """ Create an argparse.ArgumentParser for run_atari.py. """ print('Obsolete - use common_arg_parser instead') return common_arg_parser() def mujoco_arg_parser(): print('Obsolete - use common_arg_parser instead') return common_arg_parser() def common_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str) parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') parser.add_argument('--num_timesteps', type=float, default=1e6), parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--log_path', help='Directory to save learning curve data.', default=None, type=str) parser.add_argument('--play', default=False, action='store_true') return parser def robotics_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--num-timesteps', type=int, default=int(1e6)) return parser def parse_unknown_args(args): """ Parse arguments not consumed by arg parser into a dictionary """ retval = {} preceded_by_key = False for arg in args: if arg.startswith('--'): if '=' in arg: key = arg.split('=')[0][2:] value = arg.split('=')[1] retval[key] = value else: key = arg[2:] preceded_by_key = True elif preceded_by_key: retval[key] = arg preceded_by_key = False return retval
7,922
37.275362
204
py
P3O
P3O-main/baselines/common/input.py
import numpy as np import tensorflow as tf from gym.spaces import Discrete, Box, MultiDiscrete def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) def observation_input(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space, and add input encoder of the appropriate type. ''' placeholder = observation_placeholder(ob_space, batch_size, name) return placeholder, encode_observation(ob_space, placeholder) def encode_observation(ob_space, placeholder): ''' Encode input in the way that is appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space placeholder: tf.placeholder observation input placeholder ''' if isinstance(ob_space, Discrete): return tf.to_float(tf.one_hot(placeholder, ob_space.n)) elif isinstance(ob_space, Box): return tf.to_float(placeholder) elif isinstance(ob_space, MultiDiscrete): placeholder = tf.cast(placeholder, tf.int32) one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])] return tf.concat(one_hots, axis=-1) else: raise NotImplementedError
2,071
30.876923
121
py
P3O
P3O-main/baselines/common/plot_util.py
import matplotlib.pyplot as plt import os.path as osp import json import os import numpy as np import pandas from collections import defaultdict, namedtuple from baselines.bench import monitor from baselines.logger import read_json, read_csv def smooth(y, radius, mode='two_sided', valid_only=False): ''' Smooth signal y, where radius is determines the size of the window mode='twosided': average over the window [max(index - radius, 0), min(index + radius, len(y)-1)] mode='causal': average over the window [max(index - radius, 0), index] valid_only: put nan in entries where the full-sized window is not available ''' assert mode in ('two_sided', 'causal') if len(y) < 2*radius+1: return np.ones_like(y) * y.mean() elif mode == 'two_sided': convkernel = np.ones(2 * radius+1) out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same') if valid_only: out[:radius] = out[-radius:] = np.nan elif mode == 'causal': convkernel = np.ones(radius) out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full') out = out[:-radius+1] if valid_only: out[:radius] = np.nan return out def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' low = xolds[0] if low is None else low high = xolds[-1] if high is None else high assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0]) assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1]) assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds)) xolds = xolds.astype('float64') yolds = yolds.astype('float64') luoi = 0 # last unused old index sum_y = 0. count_y = 0. xnews = np.linspace(low, high, n) decay_period = (high - low) / (n - 1) * decay_steps interstep_decay = np.exp(- 1. / decay_steps) sum_ys = np.zeros_like(xnews) count_ys = np.zeros_like(xnews) for i in range(n): xnew = xnews[i] sum_y *= interstep_decay count_y *= interstep_decay while True: if luoi >= len(xolds): break xold = xolds[luoi] if xold <= xnew: decay = np.exp(- (xnew - xold) / decay_period) sum_y += decay * yolds[luoi] count_y += decay luoi += 1 else: break sum_ys[i] = sum_y count_ys[i] = count_y ys = sum_ys / count_ys ys[count_ys < low_counts_threshold] = np.nan return xnews, ys, count_ys def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): ''' perform symmetric EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments: xolds: array or list - x values of data. Needs to be sorted in ascending order yolds: array of list - y values of data. Has to have the same length as xolds low: float - min value of the new x grid. By default equals to xolds[0] high: float - max value of the new x grid. By default equals to xolds[-1] n: int - number of points in new x grid decay_steps: float - EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int - y values with counts less than this value will be set to NaN Returns: tuple sum_ys, count_ys where xs - array with new x grid ys - array of EMA of y at each point of the new x grid count_ys - array of EMA of y counts at each point of the new x grid ''' xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0) _, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0) ys2 = ys2[::-1] count_ys2 = count_ys2[::-1] count_ys = count_ys1 + count_ys2 ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys ys[count_ys < low_counts_threshold] = np.nan return xs, ys, count_ys Result = namedtuple('Result', 'monitor progress dirname metadata') Result.__new__.__defaults__ = (None,) * len(Result._fields) def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False): ''' load summaries of runs from a list of directories (including subdirectories) Arguments: enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False Returns: List of Result objects with the following fields: - dirname - path to the directory data was loaded from - metadata - run metadata (such as command-line arguments and anything else in metadata.json file - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory) - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file ''' import re if isinstance(root_dir_or_dirs, str): rootdirs = [osp.expanduser(root_dir_or_dirs)] else: rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs] allresults = [] for rootdir in rootdirs: assert osp.exists(rootdir), "%s doesn't exist"%rootdir for dirname, dirs, files in os.walk(rootdir): if '-proc' in dirname: files[:] = [] continue monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv') if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \ any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv # used to be uncommented, which means do not go deeper than current directory if any of the data files # are found # dirs[:] = [] result = {'dirname' : dirname} if "metadata.json" in files: with open(osp.join(dirname, "metadata.json"), "r") as fh: result['metadata'] = json.load(fh) progjson = osp.join(dirname, "progress.json") progcsv = osp.join(dirname, "progress.csv") if enable_progress: if osp.exists(progjson): result['progress'] = pandas.DataFrame(read_json(progjson)) elif osp.exists(progcsv): try: print(progcsv) result['progress'] = read_csv(progcsv) except pandas.errors.EmptyDataError: print('skipping progress file in ', dirname, 'empty data') else: if verbose: print('skipping %s: no progress file'%dirname) if enable_monitor: try: result['monitor'] = pandas.DataFrame(monitor.load_results(dirname)) except monitor.LoadMonitorResultsError: print('skipping %s: no monitor files'%dirname) except Exception as e: print('exception loading monitor file in %s: %s'%(dirname, e)) if result.get('monitor') is not None or result.get('progress') is not None: allresults.append(Result(**result)) if verbose: print('successfully loaded %s'%dirname) if verbose: print('loaded %i results'%len(allresults)) return allresults COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple', 'orange', 'teal', 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'] MARKERS=[".", ",", "^", "1", "s", "p", "*", "+", "x", "D"] LINESTYLE=['-', '--', '-.', ':'] def set_size(w,h, ax=None): """ w, h: width, height in inches """ if not ax: ax=plt.gca() l = ax.figure.subplotpars.left r = ax.figure.subplotpars.right t = ax.figure.subplotpars.top b = ax.figure.subplotpars.bottom figw = float(w)/(r-l) figh = float(h)/(t-b) ax.figure.set_size_inches(figw, figh) def default_xy_fn(r): try: x = np.cumsum(r.monitor.l) y = smooth(r.monitor.r, radius=10) except: y = smooth(r.progress['return-average'], radius=10) x = r.progress['total-samples'] return x,y def default_split_fn(r): import re # match name between slash and -<digits> at the end of the string # (slash in the beginning or -<digits> in the end or either may be missing) match = re.search(r'[^/-]+(?=(-\d+)?\Z)', r.dirname) if match: return match.group(0) def plot_results( allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, shaded_line=False, legend_outside=False, resample=0, smooth_step=1.0, xlabel=None, ylabel=None, row=1 ): ''' Plot multiple Results objects xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values. By default, x is cumsum of episode lengths, and y is episode rewards split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by. That is, the results r for which split_fn(r) is different will be put on different sub-panels. By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are stacked vertically in the figure. group_fn: function Result -> hashable - function that converts results objects into keys to group curves by. That is, the results r for which group_fn(r) is the same will be put into the same group. Curves in the same group have the same color (if average_group is False), or averaged over (if average_group is True). The default value is the same as default value for split_fn average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling (if resample = 0, will use 512 steps) shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be shown (only applicable if average_group = True) shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves (that is, standard deviation divided by square root of number of curves) will be shown (only applicable if average_group = True) figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of sub-panels. legend_outside: bool - if True, will place the legend outside of the sub-panels. resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric EMA smoothing (see the docstring for symmetric_ema). Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default value is 512. smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step). See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. ''' if split_fn is None: split_fn = lambda _ : '' if group_fn is None: group_fn = lambda _ : '' sk2r = defaultdict(list) # splitkey2results for result in allresults: splitkey = split_fn(result) sk2r[splitkey].append(result) assert len(sk2r) > 0 assert isinstance(resample, int), "0: don't resample. <integer>: that many samples" ll = len(sk2r) nrows=row ncols=ll//nrows # mycyler = plt.cycler(marker=MARKERS, # linestyle=LINESTYLE) # linecycle = cycler(linestyle='-', '--', '-.', ':') # figsize = (2.7 * ncols, 2 * nrows) # figsize = (7, 5.25) f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False) # f.set_size_inches(inches, inches*0.75/ncols) groups = list(set(group_fn(result) for result in allresults)) default_samples = 512 if average_group: resample = resample or default_samples fmts=['-x', '-+', '-.', '-s','-*', '-^', ] g2ls = [] g2cs = [] for (isplit, sk) in enumerate(sk2r.keys()): # for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']): # plt.gca().set_prop_cycle(markercycle) g2l = {} g2c = defaultdict(int) sresults = sk2r[sk] gresults = defaultdict(list) idx_row = isplit // ncols idx_col = isplit % ncols ax = axarr[idx_row][idx_col] for result in sresults: group = group_fn(result) g2c[group] += 1 x, y = xy_fn(result) if x is None: x = np.arange(len(y)) x, y = map(np.asarray, (x, y)) if average_group: gresults[group].append((x,y)) else: if resample: x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step) l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)]) g2l[group] = l if average_group: # print(sorted(groups)) for idx, group in enumerate(sorted(groups)): xys = gresults[group] if not any(xys): continue if group=='ddpo': color = 'red' fmt = '-' else: color = COLORS[idx % len(COLORS)] fmt = fmts[idx % len(fmts)] # print(groups.index(group), idx) origxs = [xy[0] for xy in xys] minxlen = min(map(len, origxs)) def allequal(qs): return all((q==qs[0]).all() for q in qs[1:]) if resample: print(isplit, sk) low = max(x[0] for x in origxs) high = min(x[-1] for x in origxs) usex = np.linspace(low, high, resample) ys = [] for (x, y) in xys: ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) else: assert allequal([x[:minxlen] for x in origxs]),\ 'If you want to average unevenly sampled data, set resample=<number of samples you want>' usex = origxs[0] ys = [xy[1][:minxlen] for xy in xys] ymean = np.mean(ys, axis=0) ystd = np.std(ys, axis=0) ystderr = ystd / np.sqrt(len(ys)) # TODO need_point=5 # axarr[idx_row][idx_col].xaxis.set_major_locator(plt.MultipleLocator(1e6)) # #把x轴的主刻度设置为3的倍数 # axarr[idx_row][idx_col].yaxis.set_major_locator(plt.MultipleLocator(1e3)) # axarr[idx_row][idx_col].ticklabel_format(style='sci',scilimits=(0,0),axis='both') l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point) # set_size(4, 3,axarr[idx_row][idx_col]) g2l[group] = l if shaded_err: if shaded_line: ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5) else: ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4) if shaded_std: if shaded_line: x = usex[::default_samples//need_point] ymin = ymean - ystd ymax = ymean + ystd ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5) else: ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2) # https://matplotlib.org/users/legend_guide.html plt.tight_layout() # if any(g2l.keys()): # ll = ax.legend( # g2l.values(), # # ['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(), # [g.replace('vpgkl','').replace('vpgsigmoid','') for g in g2l] if average_group else g2l.keys(), # loc=2 if legend_outside else None, # bbox_to_anchor=(1,1) if legend_outside else None) # ll.get_frame().set_alpha(None) # ll.get_frame().set_facecolor((0, 0, 1, 0)) # ll.get_frame().set_edgecolor('white') # ax.set_title('('+chr(isplit+97)+') '+sk, y=-0.4) ax.set_title('('+chr(isplit+97)+') '+sk) # ax.set_title(sk) # add xlabels, but only to the bottom row if xlabel is not None: for ax in axarr.flatten(): plt.sca(ax) # plt.xlabel('('+chr(id+97)+') '+xlabel) plt.xlabel('timesteps') # add ylabels, but only to left column if ylabel is not None: for ax in axarr[:,0]: plt.sca(ax) plt.ylabel(ylabel) g2ls.append(g2l) tt= {'ppo2':'PPO','ddpo':'DDPO','vpgdualclip':'dual-clip PPO', 'acktr':'ACKTR','trpo':'TRPO','a2c':'A2C'} tt_s = g2ls[0] if 'ddpo' in tt_s.keys(): ad = {'ddpo':tt_s.pop('ddpo')} ad.update(tt_s) else: ad =tt_s # legend= f.legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],bbox_to_anchor=(0.5,-0.03), loc="lower center",bbox_transform=f.transFigure, ncol=5,borderaxespad=0) legend= axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],borderaxespad=0) legend.get_frame().set_alpha(None) legend.get_frame().set_facecolor((0, 0, 0, 0)) legend.get_frame().set_edgecolor((0, 0, 0, 0)) return f, axarr def regression_analysis(df): xcols = list(df.columns.copy()) xcols.remove('score') ycols = ['score'] import statsmodels.api as sm mod = sm.OLS(df[ycols], sm.add_constant(df[xcols]), hasconst=False) res = mod.fit() print(res.summary()) def test_smooth(): norig = 100 nup = 300 ndown = 30 xs = np.cumsum(np.random.rand(norig) * 10 / norig) yclean = np.sin(xs) ys = yclean + .1 * np.random.randn(yclean.size) xup, yup, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), nup, decay_steps=nup/ndown) xdown, ydown, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), ndown, decay_steps=ndown/ndown) xsame, ysame, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), norig, decay_steps=norig/ndown) plt.plot(xs, ys, label='orig', marker='x') plt.plot(xup, yup, label='up', marker='x') plt.plot(xdown, ydown, label='down', marker='x') plt.plot(xsame, ysame, label='same', marker='x') plt.plot(xs, yclean, label='clean', marker='x') plt.legend() plt.show()
21,954
42.648111
181
py
P3O
P3O-main/baselines/common/tests/test_env_after_learn.py
import pytest import gym import tensorflow as tf from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.run import get_learn_function from baselines.common.tf_util import make_session algos = ['a2c', 'acer', 'acktr', 'deepq', 'ppo2', 'trpo_mpi'] @pytest.mark.parametrize('algo', algos) def test_env_after_learn(algo): def make_env(): # acktr requires too much RAM, fails on travis env = gym.make('CartPole-v1' if algo == 'acktr' else 'PongNoFrameskip-v4') return env make_session(make_default=True, graph=tf.Graph()) env = SubprocVecEnv([make_env]) learn = get_learn_function(algo) # Commenting out the following line resolves the issue, though crash happens at env.reset(). learn(network='mlp', env=env, total_timesteps=0, load_path=None, seed=None) env.reset() env.close()
865
29.928571
96
py
P3O
P3O-main/baselines/common/tests/test_fetchreach.py
import pytest import gym from baselines.run import get_learn_function from baselines.common.tests.util import reward_per_episode_test from baselines.common.tests import mark_slow pytest.importorskip('mujoco_py') common_kwargs = dict( network='mlp', seed=0, ) learn_kwargs = { 'her': dict(total_timesteps=2000) } @mark_slow @pytest.mark.parametrize("alg", learn_kwargs.keys()) def test_fetchreach(alg): ''' Test if the algorithm (with an mlp policy) can learn the FetchReach task ''' kwargs = common_kwargs.copy() kwargs.update(learn_kwargs[alg]) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) def env_fn(): env = gym.make('FetchReach-v1') env.seed(0) return env reward_per_episode_test(env_fn, learn_fn, -15) if __name__ == '__main__': test_fetchreach('her')
860
20
65
py
P3O
P3O-main/baselines/common/tests/test_with_mpi.py
import os import sys import subprocess import cloudpickle import base64 import pytest from functools import wraps try: from mpi4py import MPI except ImportError: MPI = None def with_mpi(nproc=2, timeout=30, skip_if_no_mpi=True): def outer_thunk(fn): @wraps(fn) def thunk(*args, **kwargs): serialized_fn = base64.b64encode(cloudpickle.dumps(lambda: fn(*args, **kwargs))) subprocess.check_call([ 'mpiexec','-n', str(nproc), sys.executable, '-m', 'baselines.common.tests.test_with_mpi', serialized_fn ], env=os.environ, timeout=timeout) if skip_if_no_mpi: return pytest.mark.skipif(MPI is None, reason="MPI not present")(thunk) else: return thunk return outer_thunk if __name__ == '__main__': if len(sys.argv) > 1: fn = cloudpickle.loads(base64.b64decode(sys.argv[1])) assert callable(fn) fn()
997
24.589744
92
py
P3O
P3O-main/baselines/common/tests/test_tf_util.py
# tests for tf_util import tensorflow as tf from baselines.common.tf_util import ( function, initialize, single_threaded_session ) def test_function(): with tf.Graph().as_default(): x = tf.placeholder(tf.int32, (), name="x") y = tf.placeholder(tf.int32, (), name="y") z = 3 * x + 2 * y lin = function([x, y], z, givens={y: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(x=3) == 9 assert lin(2, 2) == 10 assert lin(x=2, y=3) == 12 def test_multikwargs(): with tf.Graph().as_default(): x = tf.placeholder(tf.int32, (), name="x") with tf.variable_scope("other"): x2 = tf.placeholder(tf.int32, (), name="x") z = 3 * x + 2 * x2 lin = function([x, x2], z, givens={x2: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(2, 2) == 10 if __name__ == '__main__': test_function() test_multikwargs()
1,072
23.953488
55
py
P3O
P3O-main/baselines/common/tests/test_schedules.py
import numpy as np from baselines.common.schedules import ConstantSchedule, PiecewiseSchedule def test_piecewise_schedule(): ps = PiecewiseSchedule([(-5, 100), (5, 200), (10, 50), (100, 50), (200, -50)], outside_value=500) assert np.isclose(ps.value(-10), 500) assert np.isclose(ps.value(0), 150) assert np.isclose(ps.value(5), 200) assert np.isclose(ps.value(9), 80) assert np.isclose(ps.value(50), 50) assert np.isclose(ps.value(80), 50) assert np.isclose(ps.value(150), 0) assert np.isclose(ps.value(175), -25) assert np.isclose(ps.value(201), 500) assert np.isclose(ps.value(500), 500) assert np.isclose(ps.value(200 - 1e-10), -50) def test_constant_schedule(): cs = ConstantSchedule(5) for i in range(-100, 100): assert np.isclose(cs.value(i), 5)
823
29.518519
101
py
P3O
P3O-main/baselines/common/tests/test_identity.py
import pytest from baselines.common.tests.envs.identity_env import DiscreteIdentityEnv, BoxIdentityEnv, MultiDiscreteIdentityEnv from baselines.run import get_learn_function from baselines.common.tests.util import simple_test from baselines.common.tests import mark_slow common_kwargs = dict( total_timesteps=30000, network='mlp', gamma=0.9, seed=0, ) learn_kwargs = { 'a2c' : {}, 'acktr': {}, 'deepq': {}, 'ddpg': dict(layer_norm=True), 'ppo2': dict(lr=1e-3, nsteps=64, ent_coef=0.0), 'trpo_mpi': dict(timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.01) } algos_disc = ['a2c', 'acktr', 'deepq', 'ppo2', 'trpo_mpi'] algos_multidisc = ['a2c', 'acktr', 'ppo2', 'trpo_mpi'] algos_cont = ['a2c', 'acktr', 'ddpg', 'ppo2', 'trpo_mpi'] @mark_slow @pytest.mark.parametrize("alg", algos_disc) def test_discrete_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: DiscreteIdentityEnv(10, episode_len=100) simple_test(env_fn, learn_fn, 0.9) @mark_slow @pytest.mark.parametrize("alg", algos_multidisc) def test_multidiscrete_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: MultiDiscreteIdentityEnv((3,3), episode_len=100) simple_test(env_fn, learn_fn, 0.9) @mark_slow @pytest.mark.parametrize("alg", algos_cont) def test_continuous_identity(alg): ''' Test if the algorithm (with an mlp policy) can learn an identity transformation (i.e. return observation as an action) to a required precision ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) env_fn = lambda: BoxIdentityEnv((1,), episode_len=100) simple_test(env_fn, learn_fn, -0.1) if __name__ == '__main__': test_multidiscrete_identity('acktr')
2,304
28.935065
114
py
P3O
P3O-main/baselines/common/tests/test_segment_tree.py
import numpy as np from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree def test_tree_set(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[3] = 3.0 assert np.isclose(tree.sum(), 4.0) assert np.isclose(tree.sum(0, 2), 0.0) assert np.isclose(tree.sum(0, 3), 1.0) assert np.isclose(tree.sum(2, 3), 1.0) assert np.isclose(tree.sum(2, -1), 1.0) assert np.isclose(tree.sum(2, 4), 4.0) def test_tree_set_overlap(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[2] = 3.0 assert np.isclose(tree.sum(), 3.0) assert np.isclose(tree.sum(2, 3), 3.0) assert np.isclose(tree.sum(2, -1), 3.0) assert np.isclose(tree.sum(2, 4), 3.0) assert np.isclose(tree.sum(1, 2), 0.0) def test_prefixsum_idx(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[3] = 3.0 assert tree.find_prefixsum_idx(0.0) == 2 assert tree.find_prefixsum_idx(0.5) == 2 assert tree.find_prefixsum_idx(0.99) == 2 assert tree.find_prefixsum_idx(1.01) == 3 assert tree.find_prefixsum_idx(3.00) == 3 assert tree.find_prefixsum_idx(4.00) == 3 def test_prefixsum_idx2(): tree = SumSegmentTree(4) tree[0] = 0.5 tree[1] = 1.0 tree[2] = 1.0 tree[3] = 3.0 assert tree.find_prefixsum_idx(0.00) == 0 assert tree.find_prefixsum_idx(0.55) == 1 assert tree.find_prefixsum_idx(0.99) == 1 assert tree.find_prefixsum_idx(1.51) == 2 assert tree.find_prefixsum_idx(3.00) == 3 assert tree.find_prefixsum_idx(5.50) == 3 def test_max_interval_tree(): tree = MinSegmentTree(4) tree[0] = 1.0 tree[2] = 0.5 tree[3] = 3.0 assert np.isclose(tree.min(), 0.5) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 0.5) assert np.isclose(tree.min(0, -1), 0.5) assert np.isclose(tree.min(2, 4), 0.5) assert np.isclose(tree.min(3, 4), 3.0) tree[2] = 0.7 assert np.isclose(tree.min(), 0.7) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 0.7) assert np.isclose(tree.min(0, -1), 0.7) assert np.isclose(tree.min(2, 4), 0.7) assert np.isclose(tree.min(3, 4), 3.0) tree[2] = 4.0 assert np.isclose(tree.min(), 1.0) assert np.isclose(tree.min(0, 2), 1.0) assert np.isclose(tree.min(0, 3), 1.0) assert np.isclose(tree.min(0, -1), 1.0) assert np.isclose(tree.min(2, 4), 3.0) assert np.isclose(tree.min(2, 3), 4.0) assert np.isclose(tree.min(2, -1), 4.0) assert np.isclose(tree.min(3, 4), 3.0) if __name__ == '__main__': test_tree_set() test_tree_set_overlap() test_prefixsum_idx() test_prefixsum_idx2() test_max_interval_tree()
2,691
24.884615
72
py
P3O
P3O-main/baselines/common/tests/test_mnist.py
import pytest # from baselines.acer import acer_simple as acer from baselines.common.tests.envs.mnist_env import MnistEnv from baselines.common.tests.util import simple_test from baselines.run import get_learn_function from baselines.common.tests import mark_slow # TODO investigate a2c and ppo2 failures - is it due to bad hyperparameters for this problem? # GitHub issue https://github.com/openai/baselines/issues/189 common_kwargs = { 'seed': 0, 'network':'cnn', 'gamma':0.9, 'pad':'SAME' } learn_args = { 'a2c': dict(total_timesteps=50000), 'acer': dict(total_timesteps=20000), 'deepq': dict(total_timesteps=5000), 'acktr': dict(total_timesteps=30000), 'ppo2': dict(total_timesteps=50000, lr=1e-3, nsteps=128, ent_coef=0.0), 'trpo_mpi': dict(total_timesteps=80000, timesteps_per_batch=100, cg_iters=10, lam=1.0, max_kl=0.001) } #tests pass, but are too slow on travis. Same algorithms are covered # by other tests with less compute-hungry nn's and by benchmarks @pytest.mark.skip @mark_slow @pytest.mark.parametrize("alg", learn_args.keys()) def test_mnist(alg): ''' Test if the algorithm can learn to classify MNIST digits. Uses CNN policy. ''' learn_kwargs = learn_args[alg] learn_kwargs.update(common_kwargs) learn = get_learn_function(alg) learn_fn = lambda e: learn(env=e, **learn_kwargs) env_fn = lambda: MnistEnv(episode_len=100) simple_test(env_fn, learn_fn, 0.6) if __name__ == '__main__': test_mnist('acer')
1,515
29.32
104
py
P3O
P3O-main/baselines/common/tests/util.py
import tensorflow as tf import numpy as np from baselines.common.vec_env.dummy_vec_env import DummyVecEnv N_TRIALS = 10000 N_EPISODES = 100 _sess_config = tf.ConfigProto( allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 ) def simple_test(env_fn, learn_fn, min_reward_fraction, n_trials=N_TRIALS): def seeded_env_fn(): env = env_fn() env.seed(0) return env np.random.seed(0) env = DummyVecEnv([seeded_env_fn]) with tf.Graph().as_default(), tf.Session(config=_sess_config).as_default(): tf.set_random_seed(0) model = learn_fn(env) sum_rew = 0 done = True for i in range(n_trials): if done: obs = env.reset() state = model.initial_state if state is not None: a, v, state, _ = model.step(obs, S=state, M=[False]) else: a, v, _, _ = model.step(obs) obs, rew, done, _ = env.step(a) sum_rew += float(rew) print("Reward in {} trials is {}".format(n_trials, sum_rew)) assert sum_rew > min_reward_fraction * n_trials, \ 'sum of rewards {} is less than {} of the total number of trials {}'.format(sum_rew, min_reward_fraction, n_trials) def reward_per_episode_test(env_fn, learn_fn, min_avg_reward, n_trials=N_EPISODES): env = DummyVecEnv([env_fn]) with tf.Graph().as_default(), tf.Session(config=_sess_config).as_default(): model = learn_fn(env) N_TRIALS = 100 observations, actions, rewards = rollout(env, model, N_TRIALS) rewards = [sum(r) for r in rewards] avg_rew = sum(rewards) / N_TRIALS print("Average reward in {} episodes is {}".format(n_trials, avg_rew)) assert avg_rew > min_avg_reward, \ 'average reward in {} episodes ({}) is less than {}'.format(n_trials, avg_rew, min_avg_reward) def rollout(env, model, n_trials): rewards = [] actions = [] observations = [] for i in range(n_trials): obs = env.reset() state = model.initial_state if hasattr(model, 'initial_state') else None episode_rew = [] episode_actions = [] episode_obs = [] while True: if state is not None: a, v, state, _ = model.step(obs, S=state, M=[False]) else: a,v, _, _ = model.step(obs) obs, rew, done, _ = env.step(a) episode_rew.append(rew) episode_actions.append(a) episode_obs.append(obs) if done: break rewards.append(episode_rew) actions.append(episode_actions) observations.append(episode_obs) return observations, actions, rewards def smoketest(argstr, **kwargs): import tempfile import subprocess import os argstr = 'python -m baselines.run ' + argstr for key, value in kwargs: argstr += ' --{}={}'.format(key, value) tempdir = tempfile.mkdtemp() env = os.environ.copy() env['OPENAI_LOGDIR'] = tempdir subprocess.run(argstr.split(' '), env=env) return tempdir
3,181
33.215054
127
py
P3O
P3O-main/baselines/common/tests/test_plot_util.py
# smoke tests of plot_util from baselines.common import plot_util as pu from baselines.common.tests.util import smoketest def test_plot_util(): nruns = 4 logdirs = [smoketest('--alg=ppo2 --env=CartPole-v0 --num_timesteps=10000') for _ in range(nruns)] data = pu.load_results(logdirs) assert len(data) == 4 _, axes = pu.plot_results(data[:1]); assert len(axes) == 1 _, axes = pu.plot_results(data, tiling='vertical'); assert axes.shape==(4,1) _, axes = pu.plot_results(data, tiling='horizontal'); assert axes.shape==(1,4) _, axes = pu.plot_results(data, tiling='symmetric'); assert axes.shape==(2,2) _, axes = pu.plot_results(data, split_fn=lambda _: ''); assert len(axes) == 1
717
38.888889
101
py
P3O
P3O-main/baselines/common/tests/__init__.py
import os, pytest mark_slow = pytest.mark.skipif(not os.getenv('RUNSLOW'), reason='slow')
89
44
71
py
P3O
P3O-main/baselines/common/tests/test_doc_examples.py
import pytest try: import mujoco_py _mujoco_present = True except BaseException: mujoco_py = None _mujoco_present = False @pytest.mark.skipif( not _mujoco_present, reason='error loading mujoco - either mujoco / mujoco key not present, or LD_LIBRARY_PATH is not pointing to mujoco library' ) def test_lstm_example(): import tensorflow as tf from baselines.common import policies, models, cmd_util from baselines.common.vec_env.dummy_vec_env import DummyVecEnv # create vectorized environment venv = DummyVecEnv([lambda: cmd_util.make_mujoco_env('Reacher-v2', seed=0)]) with tf.Session() as sess: # build policy based on lstm network with 128 units policy = policies.build_policy(venv, models.lstm(128))(nbatch=1, nsteps=1) # initialize tensorflow variables sess.run(tf.global_variables_initializer()) # prepare environment variables ob = venv.reset() state = policy.initial_state done = [False] step_counter = 0 # run a single episode until the end (i.e. until done) while True: action, _, state, _ = policy.step(ob, S=state, M=done) ob, reward, done, _ = venv.step(action) step_counter += 1 if done: break assert step_counter > 5
1,351
26.591837
128
py
P3O
P3O-main/baselines/common/tests/test_serialization.py
import os import gym import tempfile import pytest import tensorflow as tf import numpy as np from baselines.common.tests.envs.mnist_env import MnistEnv from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.run import get_learn_function from baselines.common.tf_util import make_session, get_session from functools import partial learn_kwargs = { 'deepq': {}, 'a2c': {}, 'acktr': {}, 'acer': {}, 'ppo2': {'nminibatches': 1, 'nsteps': 10}, 'trpo_mpi': {}, } network_kwargs = { 'mlp': {}, 'cnn': {'pad': 'SAME'}, 'lstm': {}, 'cnn_lnlstm': {'pad': 'SAME'} } @pytest.mark.parametrize("learn_fn", learn_kwargs.keys()) @pytest.mark.parametrize("network_fn", network_kwargs.keys()) def test_serialization(learn_fn, network_fn): ''' Test if the trained model can be serialized ''' if network_fn.endswith('lstm') and learn_fn in ['acer', 'acktr', 'trpo_mpi', 'deepq']: # TODO make acktr work with recurrent policies # and test # github issue: https://github.com/openai/baselines/issues/660 return def make_env(): env = MnistEnv(episode_len=100) env.seed(10) return env env = DummyVecEnv([make_env]) ob = env.reset().copy() learn = get_learn_function(learn_fn) kwargs = {} kwargs.update(network_kwargs[network_fn]) kwargs.update(learn_kwargs[learn_fn]) learn = partial(learn, env=env, network=network_fn, seed=0, **kwargs) with tempfile.TemporaryDirectory() as td: model_path = os.path.join(td, 'serialization_test_model') with tf.Graph().as_default(), make_session().as_default(): model = learn(total_timesteps=100) model.save(model_path) mean1, std1 = _get_action_stats(model, ob) variables_dict1 = _serialize_variables() with tf.Graph().as_default(), make_session().as_default(): model = learn(total_timesteps=0, load_path=model_path) mean2, std2 = _get_action_stats(model, ob) variables_dict2 = _serialize_variables() for k, v in variables_dict1.items(): np.testing.assert_allclose(v, variables_dict2[k], atol=0.01, err_msg='saved and loaded variable {} value mismatch'.format(k)) np.testing.assert_allclose(mean1, mean2, atol=0.5) np.testing.assert_allclose(std1, std2, atol=0.5) @pytest.mark.parametrize("learn_fn", learn_kwargs.keys()) @pytest.mark.parametrize("network_fn", ['mlp']) def test_coexistence(learn_fn, network_fn): ''' Test if more than one model can exist at a time ''' if learn_fn == 'deepq': # TODO enable multiple DQN models to be useable at the same time # github issue https://github.com/openai/baselines/issues/656 return if network_fn.endswith('lstm') and learn_fn in ['acktr', 'trpo_mpi', 'deepq']: # TODO make acktr work with recurrent policies # and test # github issue: https://github.com/openai/baselines/issues/660 return env = DummyVecEnv([lambda: gym.make('CartPole-v0')]) learn = get_learn_function(learn_fn) kwargs = {} kwargs.update(network_kwargs[network_fn]) kwargs.update(learn_kwargs[learn_fn]) learn = partial(learn, env=env, network=network_fn, total_timesteps=0, **kwargs) make_session(make_default=True, graph=tf.Graph()) model1 = learn(seed=1) make_session(make_default=True, graph=tf.Graph()) model2 = learn(seed=2) model1.step(env.observation_space.sample()) model2.step(env.observation_space.sample()) def _serialize_variables(): sess = get_session() variables = tf.trainable_variables() values = sess.run(variables) return {var.name: value for var, value in zip(variables, values)} def _get_action_stats(model, ob): ntrials = 1000 if model.initial_state is None or model.initial_state == []: actions = np.array([model.step(ob)[0] for _ in range(ntrials)]) else: actions = np.array([model.step(ob, S=model.initial_state, M=[False])[0] for _ in range(ntrials)]) mean = np.mean(actions, axis=0) std = np.std(actions, axis=0) return mean, std
4,273
29.528571
105
py
P3O
P3O-main/baselines/common/tests/test_cartpole.py
import pytest import gym from baselines.run import get_learn_function from baselines.common.tests.util import reward_per_episode_test from baselines.common.tests import mark_slow common_kwargs = dict( total_timesteps=30000, network='mlp', gamma=1.0, seed=0, ) learn_kwargs = { 'a2c' : dict(nsteps=32, value_network='copy', lr=0.05), 'acer': dict(value_network='copy'), 'acktr': dict(nsteps=32, value_network='copy', is_async=False), 'deepq': dict(total_timesteps=20000), 'ppo2': dict(value_network='copy'), 'trpo_mpi': {} } @mark_slow @pytest.mark.parametrize("alg", learn_kwargs.keys()) def test_cartpole(alg): ''' Test if the algorithm (with an mlp policy) can learn to balance the cartpole ''' kwargs = common_kwargs.copy() kwargs.update(learn_kwargs[alg]) learn_fn = lambda e: get_learn_function(alg)(env=e, **kwargs) def env_fn(): env = gym.make('CartPole-v0') env.seed(0) return env reward_per_episode_test(env_fn, learn_fn, 100) if __name__ == '__main__': test_cartpole('acer')
1,098
22.891304
67
py
P3O
P3O-main/baselines/common/tests/test_fixed_sequence.py
import pytest from baselines.common.tests.envs.fixed_sequence_env import FixedSequenceEnv from baselines.common.tests.util import simple_test from baselines.run import get_learn_function from baselines.common.tests import mark_slow common_kwargs = dict( seed=0, total_timesteps=50000, ) learn_kwargs = { 'a2c': {}, 'ppo2': dict(nsteps=10, ent_coef=0.0, nminibatches=1), # TODO enable sequential models for trpo_mpi (proper handling of nbatch and nsteps) # github issue: https://github.com/openai/baselines/issues/188 # 'trpo_mpi': lambda e, p: trpo_mpi.learn(policy_fn=p(env=e), env=e, max_timesteps=30000, timesteps_per_batch=100, cg_iters=10, gamma=0.9, lam=1.0, max_kl=0.001) } alg_list = learn_kwargs.keys() rnn_list = ['lstm'] @mark_slow @pytest.mark.parametrize("alg", alg_list) @pytest.mark.parametrize("rnn", rnn_list) def test_fixed_sequence(alg, rnn): ''' Test if the algorithm (with a given policy) can learn an identity transformation (i.e. return observation as an action) ''' kwargs = learn_kwargs[alg] kwargs.update(common_kwargs) env_fn = lambda: FixedSequenceEnv(n_actions=10, episode_len=5) learn = lambda e: get_learn_function(alg)( env=e, network=rnn, **kwargs ) simple_test(env_fn, learn, 0.7) if __name__ == '__main__': test_fixed_sequence('ppo2', 'lstm')
1,389
25.226415
165
py
P3O
P3O-main/baselines/common/tests/envs/mnist_env.py
import os.path as osp import numpy as np import tempfile from gym import Env from gym.spaces import Discrete, Box class MnistEnv(Env): def __init__( self, episode_len=None, no_images=None ): import filelock from tensorflow.examples.tutorials.mnist import input_data # we could use temporary directory for this with a context manager and # TemporaryDirecotry, but then each test that uses mnist would re-download the data # this way the data is not cleaned up, but we only download it once per machine mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data') with filelock.FileLock(mnist_path + '.lock'): self.mnist = input_data.read_data_sets(mnist_path) self.np_random = np.random.RandomState() self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1)) self.action_space = Discrete(10) self.episode_len = episode_len self.time = 0 self.no_images = no_images self.train_mode() self.reset() def reset(self): self._choose_next_state() self.time = 0 return self.state[0] def step(self, actions): rew = self._get_reward(actions) self._choose_next_state() done = False if self.episode_len and self.time >= self.episode_len: rew = 0 done = True return self.state[0], rew, done, {} def seed(self, seed=None): self.np_random.seed(seed) def train_mode(self): self.dataset = self.mnist.train def test_mode(self): self.dataset = self.mnist.test def _choose_next_state(self): max_index = (self.no_images if self.no_images is not None else self.dataset.num_examples) - 1 index = self.np_random.randint(0, max_index) image = self.dataset.images[index].reshape(28,28,1)*255 label = self.dataset.labels[index] self.state = (image, label) self.time += 1 def _get_reward(self, actions): return 1 if self.state[1] == actions else 0
2,110
28.319444
101
py
P3O
P3O-main/baselines/common/tests/envs/fixed_sequence_env.py
import numpy as np from gym import Env from gym.spaces import Discrete class FixedSequenceEnv(Env): def __init__( self, n_actions=10, episode_len=100 ): self.action_space = Discrete(n_actions) self.observation_space = Discrete(1) self.np_random = np.random.RandomState(0) self.episode_len = episode_len self.sequence = [self.np_random.randint(0, self.action_space.n) for _ in range(self.episode_len)] self.time = 0 def reset(self): self.time = 0 return 0 def step(self, actions): rew = self._get_reward(actions) self._choose_next_state() done = False if self.episode_len and self.time >= self.episode_len: done = True return 0, rew, done, {} def seed(self, seed=None): self.np_random.seed(seed) def _choose_next_state(self): self.time += 1 def _get_reward(self, actions): return 1 if actions == self.sequence[self.time] else 0
1,054
22.977273
71
py
P3O
P3O-main/baselines/common/tests/envs/identity_env.py
import numpy as np from abc import abstractmethod from gym import Env from gym.spaces import MultiDiscrete, Discrete, Box from collections import deque class IdentityEnv(Env): def __init__( self, episode_len=None, delay=0, zero_first_rewards=True ): self.observation_space = self.action_space self.episode_len = episode_len self.time = 0 self.delay = delay self.zero_first_rewards = zero_first_rewards self.q = deque(maxlen=delay+1) def reset(self): self.q.clear() for _ in range(self.delay + 1): self.q.append(self.action_space.sample()) self.time = 0 return self.q[-1] def step(self, actions): rew = self._get_reward(self.q.popleft(), actions) if self.zero_first_rewards and self.time < self.delay: rew = 0 self.q.append(self.action_space.sample()) self.time += 1 done = self.episode_len is not None and self.time >= self.episode_len return self.q[-1], rew, done, {} def seed(self, seed=None): self.action_space.seed(seed) @abstractmethod def _get_reward(self, state, actions): raise NotImplementedError class DiscreteIdentityEnv(IdentityEnv): def __init__( self, dim, episode_len=None, delay=0, zero_first_rewards=True ): self.action_space = Discrete(dim) super().__init__(episode_len=episode_len, delay=delay, zero_first_rewards=zero_first_rewards) def _get_reward(self, state, actions): return 1 if state == actions else 0 class MultiDiscreteIdentityEnv(IdentityEnv): def __init__( self, dims, episode_len=None, delay=0, ): self.action_space = MultiDiscrete(dims) super().__init__(episode_len=episode_len, delay=delay) def _get_reward(self, state, actions): return 1 if all(state == actions) else 0 class BoxIdentityEnv(IdentityEnv): def __init__( self, shape, episode_len=None, ): self.action_space = Box(low=-1.0, high=1.0, shape=shape, dtype=np.float32) super().__init__(episode_len=episode_len) def _get_reward(self, state, actions): diff = actions - state diff = diff[:] return -0.5 * np.dot(diff, diff)
2,444
25.868132
101
py
P3O
P3O-main/baselines/common/tests/envs/__init__.py
0
0
0
py
P3O
P3O-main/baselines/common/tests/envs/identity_env_test.py
from baselines.common.tests.envs.identity_env import DiscreteIdentityEnv def test_discrete_nodelay(): nsteps = 100 eplen = 50 env = DiscreteIdentityEnv(10, episode_len=eplen) ob = env.reset() for t in range(nsteps): action = env.action_space.sample() next_ob, rew, done, info = env.step(action) assert rew == (1 if action == ob else 0) if (t + 1) % eplen == 0: assert done next_ob = env.reset() else: assert not done ob = next_ob def test_discrete_delay1(): eplen = 50 env = DiscreteIdentityEnv(10, episode_len=eplen, delay=1) ob = env.reset() prev_ob = None for t in range(eplen): action = env.action_space.sample() next_ob, rew, done, info = env.step(action) if t > 0: assert rew == (1 if action == prev_ob else 0) else: assert rew == 0 prev_ob = ob ob = next_ob if t < eplen - 1: assert not done assert done
1,034
26.972973
72
py
P3O
P3O-main/baselines/common/vec_env/vec_video_recorder.py
import os from baselines import logger from baselines.common.vec_env import VecEnvWrapper from gym.wrappers.monitoring import video_recorder class VecVideoRecorder(VecEnvWrapper): """ Wrap VecEnv to record rendered image as mp4 video. """ def __init__(self, venv, directory, record_video_trigger, video_length=200): """ # Arguments venv: VecEnv to wrap directory: Where to save videos record_video_trigger: Function that defines when to start recording. The function takes the current number of step, and returns whether we should start recording or not. video_length: Length of recorded video """ VecEnvWrapper.__init__(self, venv) self.record_video_trigger = record_video_trigger self.video_recorder = None self.directory = os.path.abspath(directory) if not os.path.exists(self.directory): os.mkdir(self.directory) self.file_prefix = "vecenv" self.file_infix = '{}'.format(os.getpid()) self.step_id = 0 self.video_length = video_length self.recording = False self.recorded_frames = 0 def reset(self): obs = self.venv.reset() self.start_video_recorder() return obs def start_video_recorder(self): self.close_video_recorder() base_path = os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.step_id)) self.video_recorder = video_recorder.VideoRecorder( env=self.venv, base_path=base_path, metadata={'step_id': self.step_id} ) self.video_recorder.capture_frame() self.recorded_frames = 1 self.recording = True def _video_enabled(self): return self.record_video_trigger(self.step_id) def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() self.step_id += 1 if self.recording: self.video_recorder.capture_frame() self.recorded_frames += 1 if self.recorded_frames > self.video_length: logger.info("Saving video to ", self.video_recorder.path) self.close_video_recorder() elif self._video_enabled(): self.start_video_recorder() return obs, rews, dones, infos def close_video_recorder(self): if self.recording: self.video_recorder.close() self.recording = False self.recorded_frames = 0 def close(self): VecEnvWrapper.close(self) self.close_video_recorder() def __del__(self): self.close()
2,746
29.522222
130
py
P3O
P3O-main/baselines/common/vec_env/vec_normalize.py
from . import VecEnvWrapper import numpy as np class VecNormalize(VecEnvWrapper): """ A vectorized wrapper that normalizes the observations and returns from an environment. """ def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, use_tf=False): VecEnvWrapper.__init__(self, venv) if use_tf: from baselines.common.running_mean_std import TfRunningMeanStd self.ob_rms = TfRunningMeanStd(shape=self.observation_space.shape, scope='ob_rms') if ob else None self.ret_rms = TfRunningMeanStd(shape=(), scope='ret_rms') if ret else None else: from baselines.common.running_mean_std import RunningMeanStd self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None self.ret_rms = RunningMeanStd(shape=()) if ret else None self.clipob = clipob self.cliprew = cliprew self.ret = np.zeros(self.num_envs) self.gamma = gamma self.epsilon = epsilon def step_wait(self): obs, rews, news, infos = self.venv.step_wait() self.ret = self.ret * self.gamma + rews obs = self._obfilt(obs) if self.ret_rms: self.ret_rms.update(self.ret) rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew) self.ret[news] = 0. return obs, rews, news, infos def _obfilt(self, obs): if self.ob_rms: self.ob_rms.update(obs) obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob) return obs else: return obs def reset(self): self.ret = np.zeros(self.num_envs) obs = self.venv.reset() return self._obfilt(obs)
1,854
37.645833
120
py
P3O
P3O-main/baselines/common/vec_env/test_vec_env.py
""" Tests for asynchronous vectorized environments. """ import gym import numpy as np import pytest from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from baselines.common.tests.test_with_mpi import with_mpi def assert_venvs_equal(venv1, venv2, num_steps): """ Compare two environments over num_steps steps and make sure that the observations produced by each are the same when given the same actions. """ assert venv1.num_envs == venv2.num_envs assert venv1.observation_space.shape == venv2.observation_space.shape assert venv1.observation_space.dtype == venv2.observation_space.dtype assert venv1.action_space.shape == venv2.action_space.shape assert venv1.action_space.dtype == venv2.action_space.dtype try: obs1, obs2 = venv1.reset(), venv2.reset() assert np.array(obs1).shape == np.array(obs2).shape assert np.array(obs1).shape == (venv1.num_envs,) + venv1.observation_space.shape assert np.allclose(obs1, obs2) venv1.action_space.seed(1337) for _ in range(num_steps): actions = np.array([venv1.action_space.sample() for _ in range(venv1.num_envs)]) for venv in [venv1, venv2]: venv.step_async(actions) outs1 = venv1.step_wait() outs2 = venv2.step_wait() for out1, out2 in zip(outs1[:3], outs2[:3]): assert np.array(out1).shape == np.array(out2).shape assert np.allclose(out1, out2) assert list(outs1[3]) == list(outs2[3]) finally: venv1.close() venv2.close() @pytest.mark.parametrize('klass', (ShmemVecEnv, SubprocVecEnv)) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) def test_vec_env(klass, dtype): # pylint: disable=R0914 """ Test that a vectorized environment is equivalent to DummyVecEnv, since DummyVecEnv is less likely to be error prone. """ num_envs = 3 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = DummyVecEnv(fns) env2 = klass(fns) assert_venvs_equal(env1, env2, num_steps=num_steps) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) @pytest.mark.parametrize('num_envs_in_series', (3, 4, 6)) def test_sync_sampling(dtype, num_envs_in_series): """ Test that a SubprocVecEnv running with envs in series outputs the same as DummyVecEnv. """ num_envs = 12 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = DummyVecEnv(fns) env2 = SubprocVecEnv(fns, in_series=num_envs_in_series) assert_venvs_equal(env1, env2, num_steps=num_steps) @pytest.mark.parametrize('dtype', ('uint8', 'float32')) @pytest.mark.parametrize('num_envs_in_series', (3, 4, 6)) def test_sync_sampling_sanity(dtype, num_envs_in_series): """ Test that a SubprocVecEnv running with envs in series outputs the same as SubprocVecEnv without running in series. """ num_envs = 12 num_steps = 100 shape = (3, 8) def make_fn(seed): """ Get an environment constructor with a seed. """ return lambda: SimpleEnv(seed, shape, dtype) fns = [make_fn(i) for i in range(num_envs)] env1 = SubprocVecEnv(fns) env2 = SubprocVecEnv(fns, in_series=num_envs_in_series) assert_venvs_equal(env1, env2, num_steps=num_steps) class SimpleEnv(gym.Env): """ An environment with a pre-determined observation space and RNG seed. """ def __init__(self, seed, shape, dtype): np.random.seed(seed) self._dtype = dtype self._start_obs = np.array(np.random.randint(0, 0x100, size=shape), dtype=dtype) self._max_steps = seed + 1 self._cur_obs = None self._cur_step = 0 # this is 0xFF instead of 0x100 because the Box space includes # the high end, while randint does not self.action_space = gym.spaces.Box(low=0, high=0xFF, shape=shape, dtype=dtype) self.observation_space = self.action_space def step(self, action): self._cur_obs += np.array(action, dtype=self._dtype) self._cur_step += 1 done = self._cur_step >= self._max_steps reward = self._cur_step / self._max_steps return self._cur_obs, reward, done, {'foo': 'bar' + str(reward)} def reset(self): self._cur_obs = self._start_obs self._cur_step = 0 return self._cur_obs def render(self, mode=None): raise NotImplementedError @with_mpi() def test_mpi_with_subprocvecenv(): shape = (2,3,4) nenv = 1 venv = SubprocVecEnv([lambda: SimpleEnv(0, shape, 'float32')] * nenv) ob = venv.reset() venv.close() assert ob.shape == (nenv,) + shape
5,162
31.471698
92
py
P3O
P3O-main/baselines/common/vec_env/vec_env.py
import contextlib import os from abc import ABC, abstractmethod from baselines.common.tile_images import tile_images class AlreadySteppingError(Exception): """ Raised when an asynchronous step is running while step_async() is called again. """ def __init__(self): msg = 'already running an async step' Exception.__init__(self, msg) class NotSteppingError(Exception): """ Raised when an asynchronous step is not running but step_wait() is called. """ def __init__(self): msg = 'not running an async step' Exception.__init__(self, msg) class VecEnv(ABC): """ An abstract asynchronous, vectorized environment. Used to batch data from multiple copies of an environment, so that each observation becomes an batch of observations, and expected action is a batch of actions to be applied per-environment. """ closed = False viewer = None metadata = { 'render.modes': ['human', 'rgb_array'] } def __init__(self, num_envs, observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.action_space = action_space @abstractmethod def reset(self): """ Reset all the environments and return an array of observations, or a dict of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass @abstractmethod def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass @abstractmethod def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, dones, infos): - obs: an array of observations, or a dict of arrays of observations. - rews: an array of rewards - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close_extras(self): """ Clean up the extra resources, beyond what's in this base class. Only runs when not self.closed. """ pass def close(self): if self.closed: return if self.viewer is not None: self.viewer.close() self.close_extras() self.closed = True def step(self, actions): """ Step the environments synchronously. This is available for backwards compatibility. """ self.step_async(actions) return self.step_wait() def render(self, mode='human'): imgs = self.get_images() bigimg = tile_images(imgs) if mode == 'human': self.get_viewer().imshow(bigimg) return self.get_viewer().isopen elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): """ Return RGB images from each environment """ raise NotImplementedError @property def unwrapped(self): if isinstance(self, VecEnvWrapper): return self.venv.unwrapped else: return self def get_viewer(self): if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer class VecEnvWrapper(VecEnv): """ An environment wrapper that applies to an entire batch of environments at once. """ def __init__(self, venv, observation_space=None, action_space=None): self.venv = venv super().__init__(num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space, action_space=action_space or venv.action_space) def step_async(self, actions): self.venv.step_async(actions) @abstractmethod def reset(self): pass @abstractmethod def step_wait(self): pass def close(self): return self.venv.close() def render(self, mode='human'): return self.venv.render(mode=mode) def get_images(self): return self.venv.get_images() def __getattr__(self, name): if name.startswith('_'): raise AttributeError("attempted to get missing private attribute '{}'".format(name)) return getattr(self.venv, name) class VecEnvObservationWrapper(VecEnvWrapper): @abstractmethod def process(self, obs): pass def reset(self): obs = self.venv.reset() return self.process(obs) def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() return self.process(obs), rews, dones, infos class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) @contextlib.contextmanager def clear_mpi_env_vars(): """ from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing Processes. """ removed_environment = {} for k, v in list(os.environ.items()): for prefix in ['OMPI_', 'PMI_']: if k.startswith(prefix): removed_environment[k] = v del os.environ[k] try: yield finally: os.environ.update(removed_environment)
6,195
26.660714
219
py
P3O
P3O-main/baselines/common/vec_env/vec_monitor.py
from . import VecEnvWrapper from baselines.bench.monitor import ResultsWriter import numpy as np import time from collections import deque class VecMonitor(VecEnvWrapper): def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()): VecEnvWrapper.__init__(self, venv) self.eprets = None self.eplens = None self.epcount = 0 self.tstart = time.time() if filename: self.results_writer = ResultsWriter(filename, header={'t_start': self.tstart}, extra_keys=info_keywords) else: self.results_writer = None self.info_keywords = info_keywords self.keep_buf = keep_buf if self.keep_buf: self.epret_buf = deque([], maxlen=keep_buf) self.eplen_buf = deque([], maxlen=keep_buf) def reset(self): obs = self.venv.reset() self.eprets = np.zeros(self.num_envs, 'f') self.eplens = np.zeros(self.num_envs, 'i') return obs def step_wait(self): obs, rews, dones, infos = self.venv.step_wait() self.eprets += rews self.eplens += 1 newinfos = list(infos[:]) for i in range(len(dones)): if dones[i]: info = infos[i].copy() ret = self.eprets[i] eplen = self.eplens[i] epinfo = {'r': ret, 'l': eplen, 't': round(time.time() - self.tstart, 6)} for k in self.info_keywords: epinfo[k] = info[k] info['episode'] = epinfo if self.keep_buf: self.epret_buf.append(ret) self.eplen_buf.append(eplen) self.epcount += 1 self.eprets[i] = 0 self.eplens[i] = 0 if self.results_writer: self.results_writer.write_row(epinfo) newinfos[i] = info return obs, rews, dones, newinfos
1,971
34.214286
90
py
P3O
P3O-main/baselines/common/vec_env/dummy_vec_env.py
import numpy as np from .vec_env import VecEnv from .util import copy_obs_dict, dict_to_obs, obs_space_info class DummyVecEnv(VecEnv): """ VecEnv that does runs multiple environments sequentially, that is, the step and reset commands are send to one environment at a time. Useful when debugging and when num_env == 1 (in the latter case, avoids communication overhead) """ def __init__(self, env_fns): """ Arguments: env_fns: iterable of callables functions that build environments """ self.envs = [fn() for fn in env_fns] env = self.envs[0] VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space) obs_space = env.observation_space self.keys, shapes, dtypes = obs_space_info(obs_space) self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys } self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool) self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32) self.buf_infos = [{} for _ in range(self.num_envs)] self.actions = None self.spec = self.envs[0].spec def step_async(self, actions): listify = True try: if len(actions) == self.num_envs: listify = False except TypeError: pass if not listify: self.actions = actions else: assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(actions, self.num_envs) self.actions = [actions] def step_wait(self): for e in range(self.num_envs): action = self.actions[e] # if isinstance(self.envs[e].action_space, spaces.Discrete): # action = int(action) obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action) if self.buf_dones[e]: obs = self.envs[e].reset() self._save_obs(e, obs) return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy()) def reset(self): for e in range(self.num_envs): obs = self.envs[e].reset() self._save_obs(e, obs) return self._obs_from_buf() def _save_obs(self, e, obs): for k in self.keys: if k is None: self.buf_obs[k][e] = obs else: self.buf_obs[k][e] = obs[k] def _obs_from_buf(self): return dict_to_obs(copy_obs_dict(self.buf_obs)) def get_images(self): return [env.render(mode='rgb_array') for env in self.envs] def render(self, mode='human'): if self.num_envs == 1: return self.envs[0].render(mode=mode) else: return super().render(mode=mode)
2,923
34.658537
157
py
P3O
P3O-main/baselines/common/vec_env/util.py
""" Helpers for dealing with vectorized environments. """ from collections import OrderedDict import gym import numpy as np def copy_obs_dict(obs): """ Deep-copy an observation dict. """ return {k: np.copy(v) for k, v in obs.items()} def dict_to_obs(obs_dict): """ Convert an observation dict into a raw array if the original observation space was not a Dict space. """ if set(obs_dict.keys()) == {None}: return obs_dict[None] return obs_dict def obs_space_info(obs_space): """ Get dict-structured information about a gym.Space. Returns: A tuple (keys, shapes, dtypes): keys: a list of dict keys. shapes: a dict mapping keys to shapes. dtypes: a dict mapping keys to dtypes. """ if isinstance(obs_space, gym.spaces.Dict): assert isinstance(obs_space.spaces, OrderedDict) subspaces = obs_space.spaces elif isinstance(obs_space, gym.spaces.Tuple): assert isinstance(obs_space.spaces, tuple) subspaces = {i: obs_space.spaces[i] for i in range(len(obs_space.spaces))} else: subspaces = {None: obs_space} keys = [] shapes = {} dtypes = {} for key, box in subspaces.items(): keys.append(key) shapes[key] = box.shape dtypes[key] = box.dtype return keys, shapes, dtypes def obs_to_dict(obs): """ Convert an observation into a dict. """ if isinstance(obs, dict): return obs return {None: obs}
1,513
23.031746
82
py
P3O
P3O-main/baselines/common/vec_env/__init__.py
from .vec_env import AlreadySteppingError, NotSteppingError, VecEnv, VecEnvWrapper, VecEnvObservationWrapper, CloudpickleWrapper from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from .vec_frame_stack import VecFrameStack from .vec_monitor import VecMonitor from .vec_normalize import VecNormalize from .vec_remove_dict_obs import VecExtractDictObs __all__ = ['AlreadySteppingError', 'NotSteppingError', 'VecEnv', 'VecEnvWrapper', 'VecEnvObservationWrapper', 'CloudpickleWrapper', 'DummyVecEnv', 'ShmemVecEnv', 'SubprocVecEnv', 'VecFrameStack', 'VecMonitor', 'VecNormalize', 'VecExtractDictObs']
668
59.818182
246
py
P3O
P3O-main/baselines/common/vec_env/subproc_vec_env.py
import multiprocessing as mp import numpy as np from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars def worker(remote, parent_remote, env_fn_wrappers): def step_env(env, action): ob, reward, done, info = env.step(action) if done: ob = env.reset() return ob, reward, done, info parent_remote.close() envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x] try: while True: cmd, data = remote.recv() if cmd == 'step': remote.send([step_env(env, action) for env, action in zip(envs, data)]) elif cmd == 'reset': remote.send([env.reset() for env in envs]) elif cmd == 'render': remote.send([env.render(mode='rgb_array') for env in envs]) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces_spec': remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec))) else: raise NotImplementedError except KeyboardInterrupt: print('SubprocVecEnv worker: got KeyboardInterrupt') finally: for env in envs: env.close() class SubprocVecEnv(VecEnv): """ VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes. Recommended to use when num_envs > 1 and step() can be a bottleneck. """ def __init__(self, env_fns, spaces=None, context='spawn', in_series=1): """ Arguments: env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable in_series: number of environments to run in series in a single process (e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series) """ self.waiting = False self.closed = False self.in_series = in_series nenvs = len(env_fns) assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series" self.nremotes = nenvs // in_series env_fns = np.array_split(env_fns, self.nremotes) ctx = mp.get_context(context) self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)]) self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang with clear_mpi_env_vars(): p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces_spec', None)) observation_space, action_space, self.spec = self.remotes[0].recv().x self.viewer = None VecEnv.__init__(self, nenvs, observation_space, action_space) def step_async(self, actions): self._assert_not_closed() actions = np.array_split(actions, self.nremotes) for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): self._assert_not_closed() results = [remote.recv() for remote in self.remotes] results = _flatten_list(results) self.waiting = False obs, rews, dones, infos = zip(*results) return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos def reset(self): self._assert_not_closed() for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] obs = _flatten_list(obs) return _flatten_obs(obs) def close_extras(self): self.closed = True if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def get_images(self): self._assert_not_closed() for pipe in self.remotes: pipe.send(('render', None)) imgs = [pipe.recv() for pipe in self.remotes] imgs = _flatten_list(imgs) return imgs def _assert_not_closed(self): assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()" def __del__(self): if not self.closed: self.close() def _flatten_obs(obs): assert isinstance(obs, (list, tuple)) assert len(obs) > 0 if isinstance(obs[0], dict): keys = obs[0].keys() return {k: np.stack([o[k] for o in obs]) for k in keys} else: return np.stack(obs) def _flatten_list(l): assert isinstance(l, (list, tuple)) assert len(l) > 0 assert all([len(l_) > 0 for l_ in l]) return [l__ for l_ in l for l__ in l_]
5,069
35.47482
128
py
P3O
P3O-main/baselines/common/vec_env/test_video_recorder.py
""" Tests for asynchronous vectorized environments. """ import gym import pytest import os import glob import tempfile from .dummy_vec_env import DummyVecEnv from .shmem_vec_env import ShmemVecEnv from .subproc_vec_env import SubprocVecEnv from .vec_video_recorder import VecVideoRecorder @pytest.mark.parametrize('klass', (DummyVecEnv, ShmemVecEnv, SubprocVecEnv)) @pytest.mark.parametrize('num_envs', (1, 4)) @pytest.mark.parametrize('video_length', (10, 100)) @pytest.mark.parametrize('video_interval', (1, 50)) def test_video_recorder(klass, num_envs, video_length, video_interval): """ Wrap an existing VecEnv with VevVideoRecorder, Make (video_interval + video_length + 1) steps, then check that the file is present """ def make_fn(): env = gym.make('PongNoFrameskip-v4') return env fns = [make_fn for _ in range(num_envs)] env = klass(fns) with tempfile.TemporaryDirectory() as video_path: env = VecVideoRecorder(env, video_path, record_video_trigger=lambda x: x % video_interval == 0, video_length=video_length) env.reset() for _ in range(video_interval + video_length + 1): env.step([0] * num_envs) env.close() recorded_video = glob.glob(os.path.join(video_path, "*.mp4")) # first and second step assert len(recorded_video) == 2 # Files are not empty assert all(os.stat(p).st_size != 0 for p in recorded_video)
1,467
28.36
130
py
P3O
P3O-main/baselines/common/vec_env/shmem_vec_env.py
""" An interface for asynchronous vectorized environments. """ import multiprocessing as mp import numpy as np from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars import ctypes from baselines import logger from .util import dict_to_obs, obs_space_info, obs_to_dict _NP_TO_CT = {np.float32: ctypes.c_float, np.int32: ctypes.c_int32, np.int8: ctypes.c_int8, np.uint8: ctypes.c_char, np.bool: ctypes.c_bool} class ShmemVecEnv(VecEnv): """ Optimized version of SubprocVecEnv that uses shared variables to communicate observations. """ def __init__(self, env_fns, spaces=None, context='spawn'): """ If you don't specify observation_space, we'll have to create a dummy environment to get it. """ ctx = mp.get_context(context) if spaces: observation_space, action_space = spaces else: logger.log('Creating dummy env object to get spaces') with logger.scoped_configure(format_strs=[]): dummy = env_fns[0]() observation_space, action_space = dummy.observation_space, dummy.action_space dummy.close() del dummy VecEnv.__init__(self, len(env_fns), observation_space, action_space) self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space) self.obs_bufs = [ {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} for _ in env_fns] self.parent_pipes = [] self.procs = [] with clear_mpi_env_vars(): for env_fn, obs_buf in zip(env_fns, self.obs_bufs): wrapped_fn = CloudpickleWrapper(env_fn) parent_pipe, child_pipe = ctx.Pipe() proc = ctx.Process(target=_subproc_worker, args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys)) proc.daemon = True self.procs.append(proc) self.parent_pipes.append(parent_pipe) proc.start() child_pipe.close() self.waiting_step = False self.viewer = None def reset(self): if self.waiting_step: logger.warn('Called reset() while waiting for the step to complete') self.step_wait() for pipe in self.parent_pipes: pipe.send(('reset', None)) return self._decode_obses([pipe.recv() for pipe in self.parent_pipes]) def step_async(self, actions): assert len(actions) == len(self.parent_pipes) for pipe, act in zip(self.parent_pipes, actions): pipe.send(('step', act)) self.waiting_step = True def step_wait(self): outs = [pipe.recv() for pipe in self.parent_pipes] self.waiting_step = False obs, rews, dones, infos = zip(*outs) return self._decode_obses(obs), np.array(rews), np.array(dones), infos def close_extras(self): if self.waiting_step: self.step_wait() for pipe in self.parent_pipes: pipe.send(('close', None)) for pipe in self.parent_pipes: pipe.recv() pipe.close() for proc in self.procs: proc.join() def get_images(self, mode='human'): for pipe in self.parent_pipes: pipe.send(('render', None)) return [pipe.recv() for pipe in self.parent_pipes] def _decode_obses(self, obs): result = {} for k in self.obs_keys: bufs = [b[k] for b in self.obs_bufs] o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs] result[k] = np.array(o) return dict_to_obs(result) def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys): """ Control a single environment instance using IPC and shared memory. """ def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: dst = obs_bufs[k].get_obj() dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212 np.copyto(dst_np, flatdict[k]) env = env_fn_wrapper.x() parent_pipe.close() try: while True: cmd, data = pipe.recv() if cmd == 'reset': pipe.send(_write_obs(env.reset())) elif cmd == 'step': obs, reward, done, info = env.step(data) if done: obs = env.reset() pipe.send((_write_obs(obs), reward, done, info)) elif cmd == 'render': pipe.send(env.render(mode='rgb_array')) elif cmd == 'close': pipe.send(None) break else: raise RuntimeError('Got unrecognized cmd %s' % cmd) except KeyboardInterrupt: print('ShmemVecEnv worker: got KeyboardInterrupt') finally: env.close()
5,178
35.471831
129
py
P3O
P3O-main/baselines/common/vec_env/vec_frame_stack.py
from .vec_env import VecEnvWrapper import numpy as np from gym import spaces class VecFrameStack(VecEnvWrapper): def __init__(self, venv, nstack): self.venv = venv self.nstack = nstack wos = venv.observation_space # wrapped ob space low = np.repeat(wos.low, self.nstack, axis=-1) high = np.repeat(wos.high, self.nstack, axis=-1) self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype) observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype) VecEnvWrapper.__init__(self, venv, observation_space=observation_space) def step_wait(self): obs, rews, news, infos = self.venv.step_wait() self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1) for (i, new) in enumerate(news): if new: self.stackedobs[i] = 0 self.stackedobs[..., -obs.shape[-1]:] = obs return self.stackedobs, rews, news, infos def reset(self): obs = self.venv.reset() self.stackedobs[...] = 0 self.stackedobs[..., -obs.shape[-1]:] = obs return self.stackedobs
1,150
36.129032
94
py
P3O
P3O-main/baselines/common/vec_env/vec_remove_dict_obs.py
from .vec_env import VecEnvObservationWrapper class VecExtractDictObs(VecEnvObservationWrapper): def __init__(self, venv, key): self.key = key super().__init__(venv=venv, observation_space=venv.observation_space.spaces[self.key]) def process(self, obs): return obs[self.key]
321
28.272727
70
py
P3O
P3O-main/baselines/clip/clip.py
import os import time import numpy as np import os.path as osp from baselines import logger from collections import deque from baselines.common import explained_variance, set_global_seeds from baselines.common.policies import build_policy try: from mpi4py import MPI except ImportError: MPI = None from baselines.clip.runner import Runner def constfn(val): def f(_): return val return f def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, beta=15, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs): ''' Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347) Parameters: ---------- network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list) specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets. See common/models.py/lstm for more details on using recurrent nets in policies env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation. The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class. nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where nenv is number of environment copies simulated in parallel) total_timesteps: int number of timesteps (i.e. number of actions taken in the environment) ent_coef: float policy entropy coefficient in the optimization objective lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training. vf_coef: float value function loss coefficient in the optimization objective max_grad_norm: float or None gradient norm clipping coefficient gamma: float discounting factor lam: float advantage estimation discounting factor (lambda in the paper) log_interval: int number of timesteps between logging events nminibatches: int number of training minibatches per update. For recurrent policies, should be smaller or equal than number of environments run in parallel. noptepochs: int number of training epochs per update cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training and 0 is the end of the training save_interval: int number of timesteps between saving events load_path: str path to load the model from **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network For instance, 'mlp' network architecture has arguments num_hidden and num_layers. ''' set_global_seeds(seed) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) policy = build_policy(env, network, **network_kwargs) # Get the nb of env nenvs = env.num_envs # Get state_space and action_space ob_space = env.observation_space ac_space = env.action_space # Calculate the batch_size nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0) # Instantiate the model object (that creates act_model and train_model) if model_fn is None: from baselines.clip.model import Model model_fn = Model model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nsteps, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight) if load_path is not None: model.load(load_path) # Instantiate the runner object runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) if eval_env is not None: eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam) epinfobuf = deque(maxlen=100) if eval_env is not None: eval_epinfobuf = deque(maxlen=100) if init_fn is not None: init_fn() # Start total timer tfirststart = time.perf_counter() nupdates = total_timesteps//nbatch for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 # Start timer tstart = time.perf_counter() frac = 1.0 - (update - 1.0) / nupdates # Calculate the learning rate lrnow = lr(frac) # Calculate the cliprange cliprangenow = cliprange(frac) if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...') # Get minibatch obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632 if eval_env is not None: eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632 if update % log_interval == 0 and is_mpi_root: logger.info('Done.') epinfobuf.extend(epinfos) if eval_env is not None: eval_epinfobuf.extend(eval_epinfos) # Here what we're going to do is for each minibatch calculate the loss and append it. mblossvals = [] if states is None: # nonrecurrent version # Index of each element of batch_size # Create the indices array for _ in range(noptepochs): slices = (obs, returns, masks, actions, values, neglogpacs) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envsperbatch = nenvs // nminibatches envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # Feedforward --> get losses --> update lossvals = np.mean(mblossvals, axis=0) # End timer tnow = time.perf_counter() # Calculate the fps (frame per second) fps = int(nbatch / (tnow - tstart)) if update_fn is not None: update_fn(update) if update % log_interval == 0 or update == 1: # Calculates if value function is a good predicator of the returns (ev > 1) # or if it's just worse than predicting nothing (ev =< 0) ev = explained_variance(values, returns) logger.logkv("misc/serial_timesteps", update*nsteps) logger.logkv("misc/nupdates", update) logger.logkv("misc/total_timesteps", update*nbatch) logger.logkv("fps", fps) logger.logkv("misc/explained_variance", float(ev)) logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf])) logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf])) if eval_env is not None: logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) ) logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) ) logger.logkv('misc/time_elapsed', tnow - tfirststart) for (lossval, lossname) in zip(lossvals, model.loss_names): logger.logkv('loss/' + lossname, lossval) logger.dumpkvs() if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root: checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) savepath = osp.join(checkdir, '%.5i'%update) print('Saving to', savepath) model.save(savepath) return model # Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error) def safemean(xs): return np.nan if len(xs) == 0 else np.mean(xs)
9,868
44.270642
184
py
P3O
P3O-main/baselines/clip/model.py
import numpy as np import tensorflow as tf import functools from baselines.common.tf_util import get_session, save_variables, load_variables from baselines.common.tf_util import initialize try: from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer from mpi4py import MPI from baselines.common.mpi_util import sync_from_root except ImportError: MPI = None class Model(object): """ We use this object to : __init__: - Creates the step_model - Creates the train_model train(): - Make the training part (feedforward and retropropagation of gradients) save/load(): - Save load the model """ def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None): self.sess = sess = get_session() if MPI is not None and comm is None: comm = MPI.COMM_WORLD with tf.variable_scope('clip_model', reuse=tf.AUTO_REUSE): # CREATE OUR TWO MODELS # act_model that is used for sampling act_model = policy(nbatch_act, 1, sess) # Train model for training if microbatch_size is None: train_model = policy(nbatch_train, nsteps, sess) else: train_model = policy(microbatch_size, nsteps, sess) # CREATE THE PLACEHOLDERS self.A = A = train_model.pdtype.sample_placeholder([None]) self.ADV = ADV = tf.placeholder(tf.float32, [None]) self.R = R = tf.placeholder(tf.float32, [None]) # Keep track of old actor self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None]) # Keep track of old critic self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None]) self.LR = LR = tf.placeholder(tf.float32, []) # Cliprange self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, []) neglogpac = train_model.pd.neglogp(A) # Calculate the entropy # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy. entropy = tf.reduce_mean(train_model.pd.entropy()) # CALCULATE THE LOSS # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss # Clip the value to reduce variability during Critic training # Get the predicted value vpred = train_model.vf vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) # Unclipped value vf_losses1 = tf.square(vpred - R) # Clipped value vf_losses2 = tf.square(vpredclipped - R) vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)) # Calculate ratio (pi current policy / pi old policy) ratio = tf.exp(OLDNEGLOGPAC - neglogpac) self.NEG = neglogpac # Defining Loss = - J is equivalent to max J pg_losses = -ADV * ratio pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE) # Final PG loss pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2)) # pg_loss = tf.reduce_mean(pg_losses2) approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC)) clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE))) # DEON metric ptadv = (tf.math.sign(ADV) + 1) / 2 nta = (-1 * tf.math.sign(ratio -1) + 1) / 2 ntadv = (-1*tf.math.sign(ADV) + 1) / 2 pta = (tf.math.sign(ratio -1) + 1) / 2 unnormal_pt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ptadv*nta) unnormal_nt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ntadv*pta) # Total loss loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef # UPDATE THE PARAMETERS USING LOSS # 1. Get the model parameters params = tf.trainable_variables('clip_model') # 2. Build our trainer if comm is not None and comm.Get_size() > 1: self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5) else: self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5) # 3. Calculate the gradients grads_and_var = self.trainer.compute_gradients(loss, params) grads, var = zip(*grads_and_var) if max_grad_norm is not None: # Clip the gradients (normalize) grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) grads_and_var = list(zip(grads, var)) # zip aggregate each gradient with parameters associated # For instance zip(ABCD, xyza) => Ax, By, Cz, Da self.grads = grads self.var = var self._train_op = self.trainer.apply_gradients(grads_and_var) self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'unnormal_pt','unnormal_nt'] self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac, unnormal_pt, unnormal_nt] self.train_model = train_model self.act_model = act_model self.step = act_model.step self.value = act_model.value self.initial_state = act_model.initial_state self.save = functools.partial(save_variables, sess=sess) self.load = functools.partial(load_variables, sess=sess) initialize() global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="") if MPI is not None: sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101 def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None): # Here we calculate advantage A(s,a) = R + yV(s') - V(s) # Returns = R + yV(s') advs = returns - values # Normalize the advantages # advs = (advs - advs.mean()) / (advs.std() + 1e-8) td_map = { self.train_model.X : obs, self.A : actions, self.ADV : advs, self.R : returns, self.LR : lr, self.CLIPRANGE : cliprange, self.OLDNEGLOGPAC : neglogpacs, self.OLDVPRED : values } if states is not None: td_map[self.train_model.S] = states td_map[self.train_model.M] = masks # print('adv') # print(advs) # print('old') # print(np.exp(-neglogpacs)) # print('new') # neg = self.sess.run(self.NEG, td_map) # self.sess.run(self.lo, td_map) # print(np.exp(-neg)) res = self.sess.run( self.stats_list + [self._train_op], td_map )[:-1] # if res[5]!=0 or res[6]!=0: # print(advs) # print(np.exp(-neglogpacs)) # print(np.exp(-neg)) return res
7,025
35.78534
126
py