cpuai commited on
Commit
0846da3
·
verified ·
1 Parent(s): 377b8f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -189
app.py CHANGED
@@ -9,10 +9,8 @@ from datetime import datetime
9
  import gradio as gr
10
 
11
  try:
12
- # Hugging Face Spaces 环境
13
  import spaces
14
  except ImportError:
15
- # 本地运行时,提供一个空装饰器兼容
16
  class spaces:
17
  class GPU:
18
  def __init__(self, duration=60):
@@ -26,38 +24,31 @@ from flow.model import Model
26
  from flow.configs.schema import ModelConfig
27
  from flow.utils import get_random_color, recenter_foreground
28
  from vae.utils import postprocess_mesh
29
-
30
- # 下载模型权重
31
  from huggingface_hub import hf_hub_download
32
 
33
  # =========================
34
- # CPU 运行参数
35
  # =========================
36
  DEVICE = torch.device("cpu")
37
  DTYPE = torch.float32
38
 
39
- # 可根据 Hugging Face CPU 空间资源调整线程数
40
- # 一般 2~4 比较稳,过高不一定更快
41
  CPU_THREADS = int(os.environ.get("CPU_THREADS", "2"))
42
  torch.set_num_threads(CPU_THREADS)
43
- torch.set_num_interop_threads(max(1, min(CPU_THREADS, 2)))
44
-
45
- # 为了减少 CPU 空间内存压力,允许通过环境变量控制
46
- DEFAULT_NUM_STEPS = int(os.environ.get("DEFAULT_NUM_STEPS", "20"))
47
- DEFAULT_GRID_RES = int(os.environ.get("DEFAULT_GRID_RES", "256"))
48
- DEFAULT_CFG_SCALE = float(os.environ.get("DEFAULT_CFG_SCALE", "7.0"))
49
-
50
- flow_ckpt_path = hf_hub_download(repo_id="nvidia/PartPacker", filename="flow.pt")
51
- vae_ckpt_path = hf_hub_download(repo_id="nvidia/PartPacker", filename="vae.pt")
52
 
53
  TRIMESH_GLB_EXPORT = np.array(
54
  [[0, 1, 0], [0, 0, 1], [1, 0, 0]],
55
  dtype=np.float32
56
  )
57
-
58
  MAX_SEED = np.iinfo(np.int32).max
59
  bg_remover = rembg.new_session()
60
 
 
 
 
 
 
 
61
  # =========================
62
  # 模型配置
63
  # =========================
@@ -77,32 +68,58 @@ model_config = ModelConfig(
77
  )
78
 
79
  # =========================
80
- # 初始化型(CPU)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  # =========================
82
- print("正在加载模型到 CPU...")
83
- model = Model(model_config).eval().to(DEVICE, dtype=DTYPE)
 
 
84
 
85
- # 显式使用 CPU 加载权重
86
  ckpt_dict = torch.load(flow_ckpt_path, map_location=DEVICE, weights_only=True)
87
  model.load_state_dict(ckpt_dict, strict=True)
 
 
 
 
 
88
  print("模型加载完成。")
 
89
 
90
 
91
  def get_random_seed(randomize_seed, seed):
92
- """
93
- 获取随机种子。
94
- """
95
  if randomize_seed:
96
  seed = np.random.randint(0, MAX_SEED)
97
- return seed
98
 
99
 
100
  def process_image(image_path):
101
  """
102
  处理输入图片:
103
- 1. 读
104
- 2. 如果没有 alpha 通道则自动抠图
105
- 3. 主体重新居中
106
  4. 缩放到模型输入尺寸
107
  """
108
  if image_path is None:
@@ -113,15 +130,13 @@ def process_image(image_path):
113
  raise gr.Error("图片读取失败,请上传有效图片。")
114
 
115
  if image.ndim == 2:
116
- # 灰度图转 RGBA
117
  image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGBA)
118
 
119
  if image.shape[-1] == 4:
120
  image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
121
  else:
122
  image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
123
- # 没有 alpha 通道时自动去背景
124
- image = rembg.remove(image, session=bg_remover) # [H, W, 4]
125
 
126
  mask = image[..., -1] > 0
127
  image = recenter_foreground(image, mask, border_ratio=0.1)
@@ -131,55 +146,71 @@ def process_image(image_path):
131
 
132
  def process_3d(
133
  input_image,
134
- num_steps=DEFAULT_NUM_STEPS,
135
- cfg_scale=DEFAULT_CFG_SCALE,
136
- grid_res=DEFAULT_GRID_RES,
137
  seed=42,
138
- simplify_mesh=False,
139
- target_num_faces=50000
140
  ):
141
  """
142
- CPU 上执行 3D 生成
143
- 注意:
144
- - CPU 推理会很慢
145
- - 建议降低 num_steps 和 grid_res
146
  """
147
  if input_image is None:
148
- raise gr.Error("请先处理并确认输入图片。")
149
 
150
  try:
151
- # 设置随机种子
152
- kiui.seed_everything(seed)
153
 
154
- # 输出目录
155
  os.makedirs("output", exist_ok=True)
156
  output_glb_path = f"output/partpacker_{datetime.now().strftime('%Y%m%d_%H%M%S')}.glb"
157
 
158
- # 输入图像:RGBA uint8 -> float32
159
  image = input_image.astype(np.float32) / 255.0
160
-
161
- # 将透明背景混合到白底
162
- image = image[..., :3] * image[..., 3:4] + (1 - image[..., 3:4])
163
 
164
  image_tensor = (
165
  torch.from_numpy(image)
166
  .permute(2, 0, 1)
167
  .contiguous()
168
  .unsqueeze(0)
169
- .to(DEVICE, dtype=DTYPE)
170
  )
171
 
172
- data = {"cond_images": image_tensor}
 
 
 
 
 
 
173
 
174
- # 主模型推理
175
  with torch.inference_mode():
176
- results = model(data, num_steps=int(num_steps), cfg_scale=float(cfg_scale))
 
 
 
 
177
 
178
  latent = results["latent"]
179
 
180
- # 切分两个 part
181
- data_part0 = {"latent": latent[:, : model.config.latent_size, :]}
182
- data_part1 = {"latent": latent[:, model.config.latent_size:, :]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  with torch.inference_mode():
185
  results_part0 = model.vae(data_part0, resolution=int(grid_res))
@@ -190,14 +221,12 @@ def process_3d(
190
 
191
  parts = []
192
 
193
- # 处理第一部分 mesh
194
  vertices, faces = results_part0["meshes"][0]
195
  mesh_part0 = trimesh.Trimesh(vertices, faces, process=False)
196
  mesh_part0.vertices = mesh_part0.vertices @ TRIMESH_GLB_EXPORT.T
197
  mesh_part0 = postprocess_mesh(mesh_part0, int(target_num_faces))
198
  parts.extend(mesh_part0.split(only_watertight=False))
199
 
200
- # 处理第二部分 mesh
201
  vertices, faces = results_part1["meshes"][0]
202
  mesh_part1 = trimesh.Trimesh(vertices, faces, process=False)
203
  mesh_part1.vertices = mesh_part1.vertices @ TRIMESH_GLB_EXPORT.T
@@ -205,13 +234,11 @@ def process_3d(
205
  parts.extend(mesh_part1.split(only_watertight=False))
206
 
207
  if len(parts) == 0:
208
- raise gr.Error("模型生成失败:没得到有效网格。你可以尝试更换图片或降低参数。")
209
 
210
- # 给不同 part 赋不同颜色
211
  for j, part in enumerate(parts):
212
  part.visual.vertex_colors = get_random_color(j, use_float=True)
213
 
214
- # 导出为 GLB
215
  scene = trimesh.Scene(parts)
216
  scene.export(output_glb_path)
217
 
@@ -219,167 +246,92 @@ def process_3d(
219
 
220
  except Exception as e:
221
  raise gr.Error(
222
- f"CPU 生成失败:{str(e)}\n\n"
223
- f"建议尝试:\n"
224
- f"1. 将 Inference Steps 降到 10~20\n"
225
- f"2. Grid Resolution 降到 256\n"
226
- f"3. 勾选 Simplify Mesh\n"
227
- f"4. 使用主体清晰、背景简单的图片"
 
 
228
  )
229
 
230
 
231
- # =========================
232
- # Gradio UI
233
- # =========================
234
-
235
- _TITLE = "🎨 Image to 3D Model - CPU Version for Hugging Face Spaces"
236
 
237
  _DESCRIPTION = """
238
- <div style="text-align: center; margin-bottom: 20px;">
239
- <h3 style="color: #2e7d32;">✨ CPU 版本:将 2D 图片转换为 3D 模型 ✨</h3>
240
- </div>
241
-
242
- ### 🚀 说明
243
- 这是一个适配 **Hugging Face CPU Space** 的版本,已经移除了 GPU 强依赖。
244
-
245
- ### ⚠️ 注意
246
- - CPU 推理会明显比 GPU 慢���多
247
- - 建议使用较低参数,避免超时或内存不足
248
- - 推荐从默认参数开始测试
249
-
250
- ### 📖 使用方法:
251
- 1. 上传图片
252
- 2. 可选调整参数
253
- 3. 点击生成
254
- 4. 等待生成 GLB 模型
255
-
256
- ### 💡 CPU 环境建议:
257
- - Inference Steps:建议 10~20
258
- - Grid Resolution:建议 256
259
- - 建议勾选 Simplify Mesh
260
  """
261
 
262
- block = gr.Blocks(title=_TITLE).queue(max_size=4)
263
 
264
  with block:
265
- with gr.Row():
266
- with gr.Column():
267
- gr.Markdown("# " + _TITLE)
268
-
269
  gr.Markdown(_DESCRIPTION)
270
 
271
  with gr.Row():
272
- with gr.Column(scale=1):
273
- with gr.Row():
274
- input_image = gr.Image(
275
- label="📷 Upload Image",
276
- type="filepath"
277
- )
278
-
279
- seg_image = gr.Image(
280
- label="🔍 Processed Image",
281
- type="numpy",
282
- interactive=False,
283
- image_mode="RGBA"
284
- )
285
-
286
- with gr.Accordion("⚙️ Advanced Settings", open=False):
287
- gr.Markdown("""
288
- ### 参数说明(CPU 推荐):
289
- - **Inference Steps**:步数越多越慢,CPU 建议 10~20
290
- - **CFG Scale**:控制生成贴合程度
291
- - **Grid Resolution**:越高越精细,但 CPU 更慢、更吃内存
292
- - **Random Seed**:固定后可复现结果
293
- - **Simplify Mesh**:建议开启,减少面数
294
- """)
295
 
 
296
  num_steps = gr.Slider(
297
  label="Inference Steps",
298
  minimum=1,
299
- maximum=50,
300
  step=1,
301
- value=DEFAULT_NUM_STEPS,
302
- info="CPU 推荐:10~20"
303
  )
304
-
305
  cfg_scale = gr.Slider(
306
  label="CFG Scale",
307
  minimum=2.0,
308
  maximum=10.0,
309
  step=0.1,
310
- value=DEFAULT_CFG_SCALE,
311
- info="推荐:6~8"
312
  )
313
-
314
  input_grid_res = gr.Slider(
315
  label="Grid Resolution",
316
- minimum=128,
317
- maximum=384,
318
  step=1,
319
- value=DEFAULT_GRID_RES,
320
- info="CPU 推荐:256"
321
  )
322
-
323
  with gr.Row():
324
- randomize_seed = gr.Checkbox(
325
- label="Randomize Seed",
326
- value=True,
327
- info="每次使用不同种子"
328
- )
329
-
330
- seed = gr.Slider(
331
- label="Seed Value",
332
- minimum=0,
333
- maximum=MAX_SEED,
334
- step=1,
335
- value=0
336
- )
337
 
338
  with gr.Row():
339
- simplify_mesh = gr.Checkbox(
340
- label="Simplify Mesh",
341
- value=True,
342
- info="CPU 环境建议开启"
343
- )
344
-
345
  target_num_faces = gr.Slider(
346
- label="Target Face Count",
347
  minimum=5000,
348
- maximum=100000,
349
  step=1000,
350
- value=50000,
351
- info="越低越轻量"
352
  )
353
 
354
- button_gen = gr.Button("🎯 Generate 3D Model", variant="primary", size="lg")
355
-
356
- with gr.Column(scale=1):
357
- output_model = gr.Model3D(
358
- label="🎭 3D Model Preview",
359
- height=512
360
- )
361
 
362
- gr.Markdown("""
363
- ### 📌 预览操作:
364
- - 🖱️ 左键拖动:旋转
365
- - 🖱️ 右键拖动:平移
366
- - 🖱️ 滚轮:缩放
367
- - 📥 可下载生���的 GLB 文件
368
- """)
369
 
370
  with gr.Row():
371
- gr.Markdown("### 🖼️ Example Images (Click to Try):")
372
  gr.Examples(
373
  examples=[
374
  ["examples/rabbit.png"],
375
  ["examples/robot.png"],
376
  ["examples/teapot.png"],
377
- ["examples/barrel.png"],
378
- ["examples/cactus.png"],
379
- ["examples/cyan_car.png"],
380
- ["examples/pickup.png"],
381
- ["examples/swivelchair.png"],
382
- ["examples/warhammer.png"],
383
  ],
384
  fn=process_image,
385
  inputs=[input_image],
@@ -387,18 +339,6 @@ with block:
387
  cache_examples=False
388
  )
389
 
390
- gr.Markdown("""
391
- ---
392
- ### ⚠️ Important Notes:
393
- - 这是 CPU 版,速度会比较慢
394
- - 若 Hugging Face CPU Space 配置较低,可能会出现超时或内存不足
395
- - 最适合主体清晰、背景简单的图片
396
- - 如果失败,请先降低参数再试
397
-
398
- ### 🤝 Technical Support:
399
- Powered by NVIDIA PartPacker technology.
400
- """)
401
-
402
  button_gen.click(
403
  fn=process_image,
404
  inputs=[input_image],
 
9
  import gradio as gr
10
 
11
  try:
 
12
  import spaces
13
  except ImportError:
 
14
  class spaces:
15
  class GPU:
16
  def __init__(self, duration=60):
 
24
  from flow.configs.schema import ModelConfig
25
  from flow.utils import get_random_color, recenter_foreground
26
  from vae.utils import postprocess_mesh
 
 
27
  from huggingface_hub import hf_hub_download
28
 
29
  # =========================
30
+ # CPU 基础设置
31
  # =========================
32
  DEVICE = torch.device("cpu")
33
  DTYPE = torch.float32
34
 
 
 
35
  CPU_THREADS = int(os.environ.get("CPU_THREADS", "2"))
36
  torch.set_num_threads(CPU_THREADS)
37
+ torch.set_num_interop_threads(max(1, min(2, CPU_THREADS)))
 
 
 
 
 
 
 
 
38
 
39
  TRIMESH_GLB_EXPORT = np.array(
40
  [[0, 1, 0], [0, 0, 1], [1, 0, 0]],
41
  dtype=np.float32
42
  )
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  bg_remover = rembg.new_session()
45
 
46
+ # =========================
47
+ # 下载模型
48
+ # =========================
49
+ flow_ckpt_path = hf_hub_download(repo_id="nvidia/PartPacker", filename="flow.pt")
50
+ vae_ckpt_path = hf_hub_download(repo_id="nvidia/PartPacker", filename="vae.pt")
51
+
52
  # =========================
53
  # 模型配置
54
  # =========================
 
68
  )
69
 
70
  # =========================
71
+ # 工具函数:强制整个块转 float32
72
+ # =========================
73
+ def force_module_fp32(module: torch.nn.Module):
74
+ """
75
+ 递归把模块参数和 buffer 全部转成 float32。
76
+ 这一步是解决 CPU 下 bfloat16/float32 混用问题的关键。
77
+ """
78
+ module.to(device=DEVICE)
79
+ module.float()
80
+
81
+ for child in module.children():
82
+ force_module_fp32(child)
83
+
84
+ for name, buf in module.named_buffers(recurse=False):
85
+ if torch.is_floating_point(buf):
86
+ setattr(module, name, buf.to(device=DEVICE, dtype=torch.float32))
87
+
88
+ return module
89
+
90
+
91
+ # =========================
92
+ # 初始化模型(CPU + float32)
93
  # =========================
94
+ print("正在加载模型到 CPU ...")
95
+ model = Model(model_config)
96
+ model.eval()
97
+ model.to(DEVICE)
98
 
99
+ # 显式 CPU 加载权重
100
  ckpt_dict = torch.load(flow_ckpt_path, map_location=DEVICE, weights_only=True)
101
  model.load_state_dict(ckpt_dict, strict=True)
102
+
103
+ # 关键:再次强制整个模型为 float32
104
+ force_module_fp32(model)
105
+ model.eval()
106
+
107
  print("模型加载完成。")
108
+ print("主模型 dtype:", next(model.parameters()).dtype)
109
 
110
 
111
  def get_random_seed(randomize_seed, seed):
 
 
 
112
  if randomize_seed:
113
  seed = np.random.randint(0, MAX_SEED)
114
+ return int(seed)
115
 
116
 
117
  def process_image(image_path):
118
  """
119
  处理输入图片:
120
+ 1. 读图
121
+ 2. 没有 alpha 自动去背景
122
+ 3. 主体居中
123
  4. 缩放到模型输入尺寸
124
  """
125
  if image_path is None:
 
130
  raise gr.Error("图片读取失败,请上传有效图片。")
131
 
132
  if image.ndim == 2:
 
133
  image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGBA)
134
 
135
  if image.shape[-1] == 4:
136
  image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
137
  else:
138
  image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
139
+ image = rembg.remove(image, session=bg_remover)
 
140
 
141
  mask = image[..., -1] > 0
142
  image = recenter_foreground(image, mask, border_ratio=0.1)
 
146
 
147
  def process_3d(
148
  input_image,
149
+ num_steps=10,
150
+ cfg_scale=7.0,
151
+ grid_res=128,
152
  seed=42,
153
+ simplify_mesh=True,
154
+ target_num_faces=20000
155
  ):
156
  """
157
+ CPU 3D 生成
 
 
 
158
  """
159
  if input_image is None:
160
+ raise gr.Error("请先上传并处理图片。")
161
 
162
  try:
163
+ kiui.seed_everything(int(seed))
 
164
 
 
165
  os.makedirs("output", exist_ok=True)
166
  output_glb_path = f"output/partpacker_{datetime.now().strftime('%Y%m%d_%H%M%S')}.glb"
167
 
168
+ # RGBA -> float32
169
  image = input_image.astype(np.float32) / 255.0
170
+ image = image[..., :3] * image[..., 3:4] + (1.0 - image[..., 3:4])
 
 
171
 
172
  image_tensor = (
173
  torch.from_numpy(image)
174
  .permute(2, 0, 1)
175
  .contiguous()
176
  .unsqueeze(0)
177
+ .to(device=DEVICE, dtype=torch.float32)
178
  )
179
 
180
+ data = {
181
+ "cond_images": image_tensor.float()
182
+ }
183
+
184
+ # 再保险:推理前确保模型仍是 float32
185
+ force_module_fp32(model)
186
+ model.eval()
187
 
 
188
  with torch.inference_mode():
189
+ results = model(
190
+ data,
191
+ num_steps=int(num_steps),
192
+ cfg_scale=float(cfg_scale)
193
+ )
194
 
195
  latent = results["latent"]
196
 
197
+ # 关键:latent 强制 float32
198
+ if isinstance(latent, torch.Tensor):
199
+ latent = latent.to(device=DEVICE, dtype=torch.float32).contiguous()
200
+ else:
201
+ raise gr.Error("模型输出 latent 异常。")
202
+
203
+ # VAE 输入前再做 float32 保证
204
+ data_part0 = {
205
+ "latent": latent[:, : model.config.latent_size, :].float().contiguous()
206
+ }
207
+ data_part1 = {
208
+ "latent": latent[:, model.config.latent_size:, :].float().contiguous()
209
+ }
210
+
211
+ # 再保险:把 VAE 也强制成 float32
212
+ force_module_fp32(model.vae)
213
+ model.vae.eval()
214
 
215
  with torch.inference_mode():
216
  results_part0 = model.vae(data_part0, resolution=int(grid_res))
 
221
 
222
  parts = []
223
 
 
224
  vertices, faces = results_part0["meshes"][0]
225
  mesh_part0 = trimesh.Trimesh(vertices, faces, process=False)
226
  mesh_part0.vertices = mesh_part0.vertices @ TRIMESH_GLB_EXPORT.T
227
  mesh_part0 = postprocess_mesh(mesh_part0, int(target_num_faces))
228
  parts.extend(mesh_part0.split(only_watertight=False))
229
 
 
230
  vertices, faces = results_part1["meshes"][0]
231
  mesh_part1 = trimesh.Trimesh(vertices, faces, process=False)
232
  mesh_part1.vertices = mesh_part1.vertices @ TRIMESH_GLB_EXPORT.T
 
234
  parts.extend(mesh_part1.split(only_watertight=False))
235
 
236
  if len(parts) == 0:
237
+ raise gr.Error("没有生成有效网格,请一张更清晰、背景更简单的图片。")
238
 
 
239
  for j, part in enumerate(parts):
240
  part.visual.vertex_colors = get_random_color(j, use_float=True)
241
 
 
242
  scene = trimesh.Scene(parts)
243
  scene.export(output_glb_path)
244
 
 
246
 
247
  except Exception as e:
248
  raise gr.Error(
249
+ "CPU 生成失败:"
250
+ + str(e)
251
+ + "\n\n建议:\n"
252
+ "1. Inference Steps 先设为 10\n"
253
+ "2. Grid Resolution 先设为 128\n"
254
+ "3. 勾选 Simplify Mesh\n"
255
+ "4. Target Face Count 设为 20000\n"
256
+ "5. 使用主体清晰、背景简单的 PNG 图片"
257
  )
258
 
259
 
260
+ _TITLE = "🎨 Image to 3D Model - CPU Version"
 
 
 
 
261
 
262
  _DESCRIPTION = """
263
+ ### CPU 版说明
264
+ 这是适配 Hugging Face CPU Space 的版本
265
+
266
+ ### 建议参数
267
+ - Inference Steps10
268
+ - CFG Scale:7.0
269
+ - Grid Resolution:128
270
+ - Simplify Mesh开启
271
+ - Target Face Count:20000
272
+
273
+ ### 注意
274
+ 该模型原本更适合 GPU,CPU 下会比较慢。
 
 
 
 
 
 
 
 
 
 
275
  """
276
 
277
+ block = gr.Blocks(title=_TITLE).queue(max_size=2)
278
 
279
  with block:
280
+ gr.Markdown("# " + _TITLE)
 
 
 
281
  gr.Markdown(_DESCRIPTION)
282
 
283
  with gr.Row():
284
+ with gr.Column():
285
+ input_image = gr.Image(label="上传图片", type="filepath")
286
+ seg_image = gr.Image(label="处理后图片", type="numpy", interactive=False, image_mode="RGBA")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
288
+ with gr.Accordion("高级设置", open=False):
289
  num_steps = gr.Slider(
290
  label="Inference Steps",
291
  minimum=1,
292
+ maximum=30,
293
  step=1,
294
+ value=10
 
295
  )
 
296
  cfg_scale = gr.Slider(
297
  label="CFG Scale",
298
  minimum=2.0,
299
  maximum=10.0,
300
  step=0.1,
301
+ value=7.0
 
302
  )
 
303
  input_grid_res = gr.Slider(
304
  label="Grid Resolution",
305
+ minimum=64,
306
+ maximum=256,
307
  step=1,
308
+ value=128
 
309
  )
 
310
  with gr.Row():
311
+ randomize_seed = gr.Checkbox(label="随机种子", value=True)
312
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
 
 
 
 
 
 
 
 
 
 
 
313
 
314
  with gr.Row():
315
+ simplify_mesh = gr.Checkbox(label="简化网格", value=True)
 
 
 
 
 
316
  target_num_faces = gr.Slider(
317
+ label="目标面数",
318
  minimum=5000,
319
+ maximum=50000,
320
  step=1000,
321
+ value=20000
 
322
  )
323
 
324
+ button_gen = gr.Button("生成 3D 模型", variant="primary")
 
 
 
 
 
 
325
 
326
+ with gr.Column():
327
+ output_model = gr.Model3D(label="3D 预览", height=512)
 
 
 
 
 
328
 
329
  with gr.Row():
 
330
  gr.Examples(
331
  examples=[
332
  ["examples/rabbit.png"],
333
  ["examples/robot.png"],
334
  ["examples/teapot.png"],
 
 
 
 
 
 
335
  ],
336
  fn=process_image,
337
  inputs=[input_image],
 
339
  cache_examples=False
340
  )
341
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  button_gen.click(
343
  fn=process_image,
344
  inputs=[input_image],