BiliSakura commited on
Commit
0965e71
·
verified ·
1 Parent(s): ccc9716

Update all files for BitDance-ImageNet-diffusers

Browse files
Files changed (1) hide show
  1. BitDance_L_1x/transformer/dataset.py +240 -0
BitDance_L_1x/transformer/dataset.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import io
3
+ import math
4
+ import os
5
+ import pickle
6
+ import tarfile
7
+ from functools import lru_cache
8
+
9
+ import numpy as np
10
+ import torch
11
+ from PIL import Image
12
+ from torch.utils.data import Dataset
13
+ from torchvision.datasets import ImageFolder
14
+ import torchvision.datasets as datasets
15
+
16
+
17
+ @contextlib.contextmanager
18
+ def numpy_seed(seed, *addl_seeds):
19
+ """Context manager which seeds the NumPy PRNG with the specified seed and
20
+ restores the state afterward"""
21
+ if seed is None:
22
+ yield
23
+ return
24
+
25
+ def check_seed(s):
26
+ assert type(s) == int or type(s) == np.int32 or type(s) == np.int64
27
+
28
+ check_seed(seed)
29
+ if len(addl_seeds) > 0:
30
+ for s in addl_seeds:
31
+ check_seed(s)
32
+ seed = int(hash((seed, *addl_seeds)) % 1e8)
33
+ state = np.random.get_state()
34
+ np.random.seed(seed)
35
+ try:
36
+ yield
37
+ finally:
38
+ np.random.set_state(state)
39
+
40
+
41
+ def build_flat_index(outer_path: str, idx_path: str):
42
+ if os.path.exists(idx_path):
43
+ print(f"Index file {idx_path} already exists. Skipping index building.")
44
+ return pickle.load(open(idx_path, "rb"))
45
+ entries = [] # (offset, size, label)
46
+ cats = set()
47
+ idx = 0
48
+ with tarfile.open(outer_path, "r:") as outer:
49
+ for sub in outer.getmembers():
50
+ if not sub.isfile() or not sub.name.endswith(".tar"):
51
+ continue
52
+ outer_off = sub.offset_data
53
+ sub_fobj = outer.extractfile(sub)
54
+ with tarfile.open(fileobj=sub_fobj, mode="r:") as inner:
55
+ for m in inner.getmembers():
56
+ if not m.isfile():
57
+ continue
58
+ cat = m.name.split("_", 1)[0]
59
+ cats.add(cat)
60
+ abs_off = outer_off + m.offset_data
61
+ entries.append((abs_off, m.size, cat))
62
+ if idx % 1000 == 1:
63
+ print(idx, m.name, abs_off, m.size, cat)
64
+ idx += 1
65
+ sorted_cats = sorted(cats)
66
+ cat2idx = {c: i for i, c in enumerate(sorted_cats)}
67
+
68
+ flat = [(off, size, cat2idx[c]) for off, size, c in entries]
69
+
70
+ os.makedirs(os.path.dirname(idx_path), exist_ok=True)
71
+ with open(idx_path, "wb") as f:
72
+ pickle.dump(
73
+ flat,
74
+ f,
75
+ )
76
+ print(f"Built flat index with {len(flat)} images.")
77
+ return flat
78
+
79
+
80
+ class ImageNetTarDataset(Dataset):
81
+ """
82
+ ImageNet dataset stored in a tar file, avoid to decompress the whole dataset.
83
+ You can direct use the original downloaded tar file (ILSVRC2012_img_train.tar) from official ImageNet website.
84
+ The best practice is to copy the tar file to node's local disk or ramdisk (like /dev/shm/) first, to avoid remote I/O bottleneck.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ tar_file,
90
+ ):
91
+ self.tar_file = tar_file
92
+ self.tar_handle = None
93
+ self.files = build_flat_index(tar_file, tar_file + ".index")
94
+ self.num_examples = len(self.files)
95
+
96
+ def __len__(self):
97
+ return self.num_examples
98
+
99
+ def get_raw_image(self, index):
100
+ if self.tar_handle is None:
101
+ self.tar_handle = open(self.tar_file, "rb")
102
+
103
+ offset, size, label = self.files[index]
104
+ self.tar_handle.seek(offset)
105
+ data = self.tar_handle.read(size)
106
+ image = Image.open(io.BytesIO(data)).convert("RGB")
107
+ return image, label
108
+
109
+ @lru_cache(maxsize=16)
110
+ def __getitem__(self, idx):
111
+ return self.get_raw_image(idx)
112
+
113
+
114
+ def center_crop_arr(pil_image, image_size):
115
+ """
116
+ Center cropping implementation from ADM.
117
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
118
+ """
119
+ while min(*pil_image.size) >= 2 * image_size:
120
+ pil_image = pil_image.resize(
121
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
122
+ )
123
+
124
+ scale = image_size / min(*pil_image.size)
125
+ pil_image = pil_image.resize(
126
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
127
+ )
128
+
129
+ arr = np.array(pil_image)
130
+ crop_y = (arr.shape[0] - image_size) // 2
131
+ crop_x = (arr.shape[1] - image_size) // 2
132
+ return Image.fromarray(
133
+ arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
134
+ )
135
+
136
+
137
+ def numpy_randrange(start, end):
138
+ return int(np.random.randint(start, end))
139
+
140
+
141
+ def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
142
+ min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
143
+ max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
144
+ smaller_dim_size = numpy_randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
145
+
146
+ # We are not on a new enough PIL to support the `reducing_gap`
147
+ # argument, which uses BOX downsampling at powers of two first.
148
+ # Thus, we do it by hand to improve downsample quality.
149
+ while min(*pil_image.size) >= 2 * smaller_dim_size:
150
+ pil_image = pil_image.resize(
151
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
152
+ )
153
+
154
+ scale = smaller_dim_size / min(*pil_image.size)
155
+ pil_image = pil_image.resize(
156
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
157
+ )
158
+
159
+ arr = np.array(pil_image)
160
+ crop_y = numpy_randrange(0, arr.shape[0] - image_size + 1)
161
+ crop_x = numpy_randrange(0, arr.shape[1] - image_size + 1)
162
+ return Image.fromarray(
163
+ arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
164
+ )
165
+
166
+
167
+ def crop(pil_image, left, top, right, bottom):
168
+ """
169
+ Crop the image to the specified box.
170
+ """
171
+ return pil_image.crop((left, top, right, bottom))
172
+
173
+
174
+ class ImageCropDataset(Dataset):
175
+
176
+ def __init__(
177
+ self,
178
+ raw_dataset,
179
+ resolution,
180
+ patch_size,
181
+ seed=42,
182
+ ):
183
+ self.raw_dataset = raw_dataset
184
+ self.resolution = resolution
185
+ self.patch_size = patch_size
186
+ self.aug_ratio = 1.0
187
+ self.seed = seed
188
+ self.epoch = None
189
+
190
+ def set_epoch(self, epoch):
191
+ self.epoch = epoch
192
+
193
+ def set_aug_ratio(self, aug_ratio):
194
+ self.aug_ratio = aug_ratio
195
+
196
+ def __len__(self):
197
+ return len(self.raw_dataset)
198
+
199
+ def crop_and_flip(self, image):
200
+ is_aug = np.random.rand() < self.aug_ratio
201
+ if not is_aug:
202
+ image = center_crop_arr(image, self.resolution)
203
+ else:
204
+ image = random_crop_arr(image, self.resolution)
205
+
206
+ arr = np.asarray(image)
207
+
208
+ is_flip = int(np.random.randint(0, 2))
209
+ if is_flip == 1:
210
+ # horizontal flip
211
+ arr = arr[:, ::-1, :]
212
+
213
+ return arr.transpose(2, 0, 1) # HWC to CHW
214
+
215
+ def __getitem__(self, idx):
216
+ with numpy_seed(self.seed, self.epoch, idx):
217
+ image, label = self.raw_dataset[idx]
218
+ samples = self.crop_and_flip(image)
219
+ # to [-1, 1]
220
+ samples = (samples.astype(np.float32) / 255.0 - 0.5) * 2.0
221
+ samples = torch.from_numpy(samples).float()
222
+ return (
223
+ samples,
224
+ torch.tensor(label).long(),
225
+ )
226
+
227
+
228
+ def build_dataset(args):
229
+ # use tarred imagenet dataset if data_path ends with .tar
230
+ raw_dataset = (
231
+ ImageNetTarDataset(args.data_path)
232
+ if args.data_path.endswith(".tar")
233
+ else ImageFolder(args.data_path)
234
+ )
235
+ return ImageCropDataset(
236
+ raw_dataset,
237
+ args.image_size,
238
+ args.patch_size,
239
+ seed=args.global_seed if hasattr(args, "global_seed") else 42,
240
+ )