File size: 518 Bytes
48b4d02
 
f9ef18f
48b4d02
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from transformers import BlipProcessor, BlipForConditionalGeneration
import torch
import gradio as gr

processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

def caption(img):
    inputs = processor(img, return_tensors="pt")
    out = model.generate(**inputs)
    return processor.decode(out[0], skip_special_tokens=True)

demo = gr.Interface(caption, gr.Image(type="pil"), "text")
demo.launch()