File size: 15,512 Bytes
b506011
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
from collections import Counter
from functools import reduce
import logging
from operator import add
import os
from pathlib import Path
from typing import Any, Optional

import hydra
import numpy as np
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Callback, LightningModule, seed_everything, Trainer
from pytorch_lightning.plugins import DDPPlugin
from pytorch_lightning.utilities import rank_zero_only
import torch
import torch.distributed as dist
from torch.nn import Linear

import policy_models
from policy_training.training import is_multi_gpu_training, log_rank_0

"""This script will collect data snt store it with a fixed window size"""

logger = logging.getLogger(__name__)


def merge_data(list_of_data):
    merged_data = {
        "language": {"ann": [], "task": [], "emb": []},
        "info": {"episodes": [], "indx": []},
    }
    for d in list_of_data:
        for k in d:
            for k2, v2 in d[k].items():
                if isinstance(v2, list):
                    merged_data[k][k2] += v2
                elif isinstance(v2, np.ndarray) and len(merged_data[k][k2]) == 0:
                    merged_data[k][k2] = v2
                elif isinstance(v2, np.ndarray) and len(merged_data[k][k2]) != 0:
                    merged_data[k][k2] = np.concatenate((merged_data[k][k2], v2), axis=0)
                else:
                    print(type(v2))
                    raise ValueError
    return merged_data


class Annotator(Callback):
    def __init__(self, cfg):
        self.envs = None  # type: Any
        self.cfg = cfg
        self.device = None
        self.lang_folder = cfg.lang_folder
        self.tasks = hydra.utils.instantiate(cfg.callbacks.rollout.tasks)
        self.demo_task_counter_train = Counter()
        self.demo_task_counter_val = Counter()
        self.train_dataset = None
        self.val_dataset = None
        self.file_name = "auto_lang_ann.npy"  # + save_format
        self.train_lang_folder = None
        self.val_lang_folder = None
        self.collected_data_train = {
            "language": {"ann": [], "task": [], "emb": []},
            "info": {"episodes": [], "indx": []},
        }
        self.collected_data_val = {
            "language": {"ann": [], "task": [], "emb": []},
            "info": {"episodes": [], "indx": []},
        }
        self.lang_model = None
        self.num_samples_train = None
        self.num_samples_val = None
        self.finished_annotation_val = False
        self.scene_idx_info = None

    @rank_zero_only
    def create_folders(self):
        self.train_lang_folder = self.train_dataset.abs_datasets_dir / self.lang_folder
        self.train_lang_folder.mkdir(parents=True, exist_ok=True)

        self.val_lang_folder = self.val_dataset.abs_datasets_dir / self.lang_folder
        self.val_lang_folder.mkdir(parents=True, exist_ok=True)

    @rank_zero_only
    def compute_val_embeddings(self):
        val_sent = OmegaConf.load(Path(policy_models.__file__).parent / f"../conf/annotations/{self.cfg.rollout_sentences}.yaml")
        embeddings = {}
        for task, ann in val_sent.items():
            embeddings[task] = {}
            language_embedding = self.lang_model(list(ann))
            embeddings[task]["emb"] = language_embedding.cpu().numpy()
            embeddings[task]["ann"] = ann
        np.save(self.val_lang_folder / "embeddings", embeddings)
        logger.info("Done saving val language embeddings for Rollouts !")

    def init_vars(self, trainer, pl_module):
        self.device = pl_module.device
        self.val_dataset = trainer.val_dataloaders[0].dataset.datasets["vis"]  # type: ignore
        self.train_dataset = trainer.train_dataloader.dataset.datasets["vis"]
        self.scene_idx_info = np.load(self.train_dataset.abs_datasets_dir / "scene_info.npy", allow_pickle=True).item()

        self.envs = {
            scene: hydra.utils.instantiate(
                self.cfg.callbacks.rollout.env_cfg, self.val_dataset, pl_module.device, scene=scene, cameras=()
            )
            for scene, _ in self.scene_idx_info.items()
        }
        if self.cfg.validation_scene not in self.envs:
            self.envs[self.cfg.validation_scene] = hydra.utils.instantiate(
                self.cfg.callbacks.rollout.env_cfg,
                self.val_dataset,
                pl_module.device,
                scene=self.cfg.validation_scene,
                cameras=(),
            )

        self.create_folders()
        self.lang_model = hydra.utils.instantiate(self.cfg.model)
        self.compute_val_embeddings()
        self.num_samples_train = int(self.cfg.eps * len(self.train_dataset) / len(self.cfg.annotations.keys()))
        self.num_samples_val = int(self.cfg.eps * len(self.val_dataset) / len(self.cfg.annotations.keys()))

    def on_validation_start(self, trainer: Trainer, pl_module: LightningModule, dataloader_idx: int) -> None:
        """Called when the validation loop begins."""
        if self.envs is None:
            self.init_vars(trainer, pl_module)

    def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
        if self.envs is None:
            self.init_vars(trainer, pl_module)

    def on_validation_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Any,
        batch: Any,
        batch_idx: int,
        dataloader_idx: int,
    ) -> None:
        batch = batch["vis"] if isinstance(batch, dict) else batch
        self.collected_data_val, self.demo_task_counter_val, current_task_counter = self.annotate(
            batch,
            self.val_dataset,
            self.collected_data_val,
            self.demo_task_counter_val,
            self.num_samples_val,
        )
        if dist.is_available() and dist.is_initialized():
            global_counters = [None for _ in range(torch.distributed.get_world_size())]
            torch.distributed.all_gather_object(global_counters, current_task_counter)
            current_task_counter = reduce(add, global_counters)
        self.demo_task_counter_val += current_task_counter
        if self.check_done(
            self.demo_task_counter_val, self.num_samples_val, batch_idx, trainer.num_val_batches[0], "val"
        ):
            print()
            print()
            print()
            logger.info("Finished annotating val dataset")
            print()
            print()
            print()
            self.finished_annotation_val = True

    def on_train_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Any,
        batch: Any,
        batch_idx: int,
        dataloader_idx: int,
        unused: Optional[int] = 0,
    ) -> None:
        batch = batch["vis"] if isinstance(batch, dict) else batch

        self.collected_data_train, self.demo_task_counter_train, current_task_counter = self.annotate(
            batch, self.train_dataset, self.collected_data_train, self.demo_task_counter_train, self.num_samples_train
        )
        if dist.is_available() and dist.is_initialized():
            global_counters = [None for _ in range(torch.distributed.get_world_size())]
            torch.distributed.all_gather_object(global_counters, current_task_counter)
            current_task_counter = reduce(add, global_counters)
        self.demo_task_counter_train += current_task_counter
        if self.check_done(
            self.demo_task_counter_train, self.num_samples_train, batch_idx, trainer.num_training_batches, "train"
        ):
            print()
            print()
            print()
            log_rank_0("Finished annotating train dataset")
            print()
            print()
            print()
            pl_module.finished_annotation_train = True  # type: ignore

    def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, unused: Optional[int] = None) -> None:
        self.save_and_postprocess(self.collected_data_train, self.train_lang_folder, "train", len(self.train_dataset))

    def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule, dataloader_idx: int) -> None:
        self.save_and_postprocess(self.collected_data_val, self.val_lang_folder, "val", len(self.val_dataset))

    def save_and_postprocess(self, collected_data, lang_folder, mod, length):
        if dist.is_available() and dist.is_initialized():
            global_collected_data = [None for _ in range(dist.get_world_size())]
            torch.distributed.all_gather_object(global_collected_data, collected_data)
            if dist.get_rank() == 0:
                global_collected_data = merge_data(global_collected_data)
                np.save("lang_ann", global_collected_data)
        else:
            np.save("lang_ann", collected_data)
        if self.cfg.postprocessing:
            language = collected_data["language"]["ann"]
            language_embedding = self.lang_model(language)
            collected_data["language"]["emb"] = language_embedding.cpu().numpy()
            logger.info(f"Done extracting {mod} language embeddings !")

        if dist.is_available() and dist.is_initialized():
            global_collected_data = [None for _ in range(dist.get_world_size())]
            torch.distributed.all_gather_object(global_collected_data, collected_data)
            if dist.get_rank() != 0:
                return
            collected_data = merge_data(global_collected_data)

        np.save(self.file_name, collected_data)
        np.save(lang_folder / self.file_name, collected_data)
        logger.info(f"Done saving {mod} language annotations !")

        lang_length = float(len(collected_data["language"]["ann"]))
        logger.info(
            f"\nVision Dataset contains  {length} datapoints "
            f"\nLanguage Dataset contains {lang_length} datapoints "
            f"\n    VISION --> {100.0 * length / (length + lang_length):.3f} %"
            f"\n    LANGUAGE --> {100.0 * lang_length / (length + lang_length):.3f} %"
        )

    def check_done(self, counter, num_samples, batch_idx, num_batches, mode):
        if batch_idx % 10 == 0:
            log_rank_0(f"{mode} Tasks Objective: {num_samples}")
            log_rank_0(f"Tasks Lang: {self.cfg.annotations.keys()}")
            log_rank_0(f"Tasks Annotations Progress: {counter}")
            log_rank_0(
                "Progress [ "
                + "=" * int(0.5 * 100 * batch_idx / num_batches)
                + ">"
                + "-" * int(0.5 * 100 * (num_batches - batch_idx) / num_batches)
                + str(round(100 * batch_idx / num_batches))
                + "%"
                + "]"
            )
        return len(counter.values()) >= len(self.cfg.annotations) and min(counter.values()) >= num_samples

    def select_env(self, dataset, idx):
        if "validation" in dataset.abs_datasets_dir.as_posix():
            return self.envs[self.cfg.validation_scene]
        seq_idx = dataset.episode_lookup[idx]
        for scene, interval in self.scene_idx_info.items():
            if interval[0] <= seq_idx <= interval[1]:
                return self.envs[scene]
        raise ValueError

    def annotate(self, episode, dataset, collected_data, global_task_counter, num_samples):
        state_obs, rgb_obs, depth_obs, actions, _, reset_info, idx = episode
        batch_size, seq_length = state_obs.shape[0], state_obs.shape[1]
        current_task_counter = Counter()
        for i in range(batch_size):
            env = self.select_env(dataset, idx[i])
            # reset env to state of last step in the episode (goal state)
            env.reset(reset_info, i, -1)
            goal_info = env.get_info()

            prior_steps = np.random.randint(16, 32)
            env.reset(reset_info, i, prior_steps)
            middle_info = env.get_info()

            env.reset(reset_info, i, seq_length - 16)
            close_to_end_info = env.get_info()

            # check if task was achieved in sequence
            task_info = self.tasks.get_task_info(middle_info, goal_info)
            if (
                len(task_info) != 1
                or not task_info <= self.cfg.annotations.keys()
                or len(self.tasks.get_task_info_for_set(middle_info, close_to_end_info, task_info))
            ):
                continue
            task = list(task_info)[0]
            if global_task_counter[task] + current_task_counter[task] >= num_samples:
                continue
            # reset self.env to state of first step in the episode
            env.reset(reset_info, i, 0)
            start_info = env.get_info()

            env.reset(reset_info, i, 32)
            middle_info2 = env.get_info()

            if len(self.tasks.get_task_info_for_set(start_info, goal_info, task_info)) and not len(
                self.tasks.get_task_info(start_info, middle_info2)
            ):
                start_idx = idx[i]
                window_size = seq_length
            else:
                start_idx = idx[i] + prior_steps
                window_size = seq_length - prior_steps

            # seq_length = torch.unique(actions[i], dim=0).shape[0]
            current_task_counter += Counter(task_info)
            collected_data = self.label_seq(collected_data, dataset, window_size, start_idx, task)
        return collected_data, global_task_counter, current_task_counter

    def label_seq(self, collected_data, dataset, seq_length, idx, task):
        seq_idx = dataset.episode_lookup[idx]
        collected_data["info"]["indx"].append((seq_idx, seq_idx + seq_length))
        task_lang = self.cfg.annotations[task]
        lang_ann = task_lang[np.random.randint(len(task_lang))]
        collected_data["language"]["ann"].append(lang_ann)
        collected_data["language"]["task"].append(task)
        return collected_data


class LangAnnotationModel(LightningModule):
    def __init__(self):
        super().__init__()
        self.finished_annotation_train = False
        self.dummy_net = Linear(1, 1)

    def on_train_batch_start(self, batch: Any, batch_idx: int, unused: Optional[int] = 0) -> None:
        if self.finished_annotation_train:
            return -1  # type: ignore

    def training_step(self, batch, batch_idx):
        return self.dummy_net(torch.Tensor([0.0]).to(self.device))

    def validation_step(self, *args, **kwargs):
        pass

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=0.02)


@hydra.main(config_path="../../conf", config_name="lang_ann.yaml")
def main(cfg: DictConfig) -> None:
    os.environ["TOKENIZERS_PARALLELISM"] = "true"
    # sets seeds for numpy, torch, python.random and PYTHONHASHSEED.
    seed_everything(cfg.seed)
    datamodule = hydra.utils.instantiate(cfg.datamodule)
    callbacks = Annotator(cfg)

    dummy_model = LangAnnotationModel()

    trainer_args = {
        **cfg.trainer,
        "callbacks": callbacks,
        "num_sanity_val_steps": 0,
        "max_epochs": 1,
        "progress_bar_refresh_rate": 0,
        "weights_summary": None,
    }
    # Configure multi-GPU training
    if is_multi_gpu_training(trainer_args["gpus"]):  # type: ignore
        trainer_args["accelerator"] = "ddp"
        trainer_args["plugins"] = DDPPlugin(find_unused_parameters=False)

    trainer = Trainer(**trainer_args)

    trainer.fit(dummy_model, datamodule=datamodule)
    trainer.validate(dummy_model, datamodule=datamodule)  # type: ignore


if __name__ == "__main__":
    main()