text stringlengths 1 93.6k |
|---|
from cog import BasePredictor, Input, Path
|
from utility import tensor2rgb
|
from models.stmfnet import STMFNet
|
STMFNET_WEIGHTS_URL = "https://weights.replicate.delivery/default/stmfnet/stmfnet.pth"
|
STMFNET_WEIGHTS_PATH = "stmfnet.pth"
|
def download_weights(url, dest):
|
start = time.time()
|
print("downloading url: ", url)
|
print("downloading to: ", dest)
|
try:
|
subprocess.check_call(["pget", "-x", url, dest], close_fds=False)
|
except subprocess.CalledProcessError:
|
print("Extraction with -x failed. Trying download without extraction...")
|
subprocess.check_call(["pget", url, dest], close_fds=False)
|
print("downloading took: ", time.time() - start)
|
class Predictor(BasePredictor):
|
def setup(self):
|
"""
|
Set up the prediction environment.
|
This method initializes the model, its parameters, and the GPU device for computation.
|
It also loads the STMFNet model using the specified checkpoint.
|
Lastly, it ensures there is an output directory for storing the enhanced videos.
|
"""
|
if not os.path.exists(STMFNET_WEIGHTS_PATH):
|
download_weights(STMFNET_WEIGHTS_URL, STMFNET_WEIGHTS_PATH)
|
args = SimpleNamespace(
|
**{
|
"gpu_id": (gpu_id := 0),
|
"net": (net := "STMFNet"),
|
"checkpoint": (checkpoint := STMFNET_WEIGHTS_PATH),
|
"size": (size := "1920x1080"),
|
"patch_size": (patch_size := None),
|
"overlap": (overlap := None),
|
"batch_size": (batch_size := None),
|
"out_fps": (out_fps := 144),
|
"out_dir": (out_dir := "."),
|
"featc": (featc := [64, 128, 256, 512]),
|
"featnet": (featnet := "UMultiScaleResNext"),
|
"featnorm": (featnorm := "batch"),
|
"kernel_size": (kernel_size := 5),
|
"dilation": (dilation := 1),
|
"finetune_pwc": (finetune_pwc := False),
|
}
|
)
|
torch.cuda.set_device(gpu_id)
|
self.net = net
|
self.size = size
|
self.model = STMFNet(args).cuda()
|
print("Loading the model...")
|
checkpoint = torch.load(checkpoint)
|
self.model.load_state_dict(checkpoint["state_dict"])
|
self.model.eval()
|
if not os.path.exists(out_dir):
|
os.makedirs(out_dir)
|
def predict(
|
self,
|
mp4: Path = Input(description="Upload an mp4 video file."),
|
framerate_multiplier: int = Input(
|
description="Determines how many intermediate frames to generate between original frames. E.g., a value of 2 will double the frame rate, and 4 will quadruple it, etc.",
|
default=2,
|
choices=[2, 4, 8, 16, 32],
|
),
|
keep_original_duration: bool = Input(
|
description="Should the enhanced video retain the original duration? If set to `True`, the model will adjust the frame rate to maintain the video's original duration after adding interpolated frames. If set to `False`, the frame rate will be set based on `custom_fps`.",
|
default=True,
|
),
|
custom_fps: float = Input(
|
description="Set `keep_original_duration` to `False` to use this! Desired frame rate (fps) for the enhanced video. This will only be considered if `keep_original_duration` is set to `False`.",
|
default=None,
|
ge=1,
|
le=240,
|
),
|
) -> Iterator[Path]:
|
"""
|
Enhance a video by increasing its frame rate using frame interpolation.
|
Parameters:
|
- mp4 (Path): Path to the video file.
|
- keep_original_duration (bool): Indicator to maintain the original video duration after frame interpolation.
|
- custom_fps (float): Target frame rate for the enhanced video when not maintaining the original duration.
|
- framerate_multiplier (int): Multiplier for the number of frames.
|
Returns:
|
Iterator[Path]: Paths to the generated enhanced video files.
|
"""
|
num_iterations = int(math.log2(framerate_multiplier))
|
original_seq_name = os.path.basename(mp4).split(".")[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.