| | import os |
| | import json |
| | import torch |
| | import argparse |
| | from safetensors.torch import load_file |
| | from huggingface_hub import hf_hub_download, list_repo_files |
| | from transformers import AutoTokenizer, AutoModel |
| | from PIL import Image |
| | from torchvision import transforms as T |
| | from torchvision.transforms.functional import InterpolationMode |
| |
|
| | |
| | |
| | |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("--segment", type=int, required=True, help="Segment index (0–7)") |
| | parser.add_argument("--num_segments", type=int, default=8, help="Total number of segments") |
| | args = parser.parse_args() |
| | |
| | |
| | |
| | def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"): |
| | cached_path = hf_hub_download( |
| | repo_id=repo_id, |
| | filename=filename, |
| | revision="7bb7c7f3d379c5145bb06d2cf0949c66ac9a2c4e", |
| | repo_type=repo_type, |
| | local_files_only=True |
| | ) |
| | return load_file(cached_path) |
| |
|
| | |
| | |
| | |
| | PROMPT = """You are given an image of the indoor scene. Your task is to generate captions that are 100% accurate to the image, with no hallucination. |
| | |
| | Instructions: |
| | - Generate exactly 10 different CLIP-Style captions for the image. |
| | - Captions must clearly describe the visible objects, their attributes, and their spatial relationships. |
| | - Use spatial prepositions such as: on, under, next to, beside, behind, in front of, between, above, below. |
| | - Focus only on what is visible in the image. Do not speculate or add details that are not present. |
| | - Be precise and factual. Avoid opinions, emotions, or subjective language. |
| | |
| | Examples: |
| | 1. Gray laptop centered on desk, coffee mug to the side, rolling chair positioned behind. |
| | 2. Light gray couch, wooden table placed in front, standing lamp on the left. |
| | 3. White bed with pillows, small wooden nightstand beside, bedside lamp on top. |
| | 4. Rectangular table with two chairs around, bright window in background, curtain partly covering. |
| | |
| | Now, generate 10 captions following these rules. **Output must be a numbered list (1. ... 2. ... up to 10.).** |
| | """ |
| |
|
| |
|
| | |
| | |
| | |
| | path = "OpenGVLab/InternVL3-38B" |
| | model = AutoModel.from_pretrained( |
| | path, |
| | torch_dtype=torch.bfloat16, |
| | load_in_8bit=True, |
| | low_cpu_mem_usage=True, |
| | use_flash_attn=True, |
| | trust_remote_code=True |
| | ).eval() |
| | tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False) |
| |
|
| | |
| | |
| | |
| | from torchvision import transforms as T |
| | from torchvision.transforms.functional import InterpolationMode |
| |
|
| | IMAGENET_MEAN = (0.485, 0.456, 0.406) |
| | IMAGENET_STD = (0.229, 0.224, 0.225) |
| |
|
| | def build_transform(input_size=448): |
| | return T.Compose([ |
| | T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), |
| | T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), |
| | T.ToTensor(), |
| | T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) |
| | ]) |
| |
|
| | def load_image(image_pil, input_size=448): |
| | transform = build_transform(input_size=input_size) |
| | return transform(image_pil).unsqueeze(0) |
| |
|
| | |
| | |
| | |
| | repo_id = "MatchLab/PointMapVerse" |
| | all_files = list_repo_files(repo_id, repo_type="dataset") |
| | safetensor_files = [f for f in all_files if f.endswith(".safetensors")] |
| |
|
| | |
| | segment_size = len(safetensor_files) // args.num_segments |
| | start = args.segment * segment_size |
| | end = (args.segment + 1) * segment_size if args.segment < args.num_segments - 1 else len(safetensor_files) |
| | segment_files = safetensor_files[start:end] |
| |
|
| | print(f"Segment {args.segment}: processing {len(segment_files)} files") |
| |
|
| | results = {} |
| | BATCH_SIZE = 32 |
| |
|
| | os.makedirs("captions", exist_ok=True) |
| | out_file = f"captions/captions_segment{args.segment}.json" |
| |
|
| | if os.path.exists(out_file): |
| | with open(out_file, "r") as f: |
| | results = json.load(f) |
| | print(f"Resuming from {len(results)} existing results in {out_file}") |
| | else: |
| | results = {} |
| | |
| | for st_file in segment_files: |
| | print(f"Processing {st_file} ...") |
| | data = load_safetensor_from_hf(repo_id, st_file) |
| | images = data['color_images'] |
| |
|
| | |
| | for b_start in range(0, len(images), BATCH_SIZE): |
| | batch_imgs = images[b_start:b_start + BATCH_SIZE] |
| |
|
| | pixel_values_list, num_patches_list, keys_to_process = [], [], [] |
| | for idx, img in enumerate(batch_imgs): |
| | key = f"{st_file.split('.safetensors')[0]}_{b_start + idx}" |
| |
|
| | |
| | if key in results: |
| | continue |
| |
|
| | image_pil = Image.fromarray((img.cpu().numpy()).astype("uint8")) |
| | pixel_tensor = load_image(image_pil).to(torch.bfloat16).cuda() |
| | num_patches_list.append(pixel_tensor.size(0)) |
| | pixel_values_list.append(pixel_tensor) |
| | keys_to_process.append(key) |
| |
|
| | if not keys_to_process: |
| | continue |
| |
|
| | |
| | pixel_values = torch.cat(pixel_values_list, dim=0) |
| |
|
| | questions = [f"<image>\n{PROMPT}"] * len(keys_to_process) |
| | generation_config = dict(max_new_tokens=512, do_sample=True) |
| |
|
| | responses = model.batch_chat( |
| | tokenizer, |
| | pixel_values, |
| | num_patches_list=num_patches_list, |
| | questions=questions, |
| | generation_config=generation_config, |
| | ) |
| |
|
| | for key, response in zip(keys_to_process, responses): |
| | results[key] = response |
| |
|
| | |
| | with open(out_file, "w") as f: |
| | json.dump(results, f, indent=2) |
| |
|
| | print(f"Progress saved: {len(results)} results in {out_file}") |