File size: 365 Bytes
8b56af0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
from einops import rearrange
import gradio as gr
import torch
import torch.nn.functional as F
from PIL import Image, ImageOps
from transformers import AutoModel, CLIPImageProcessor
hf_repo = "nvidia/RADIO-L"
image_processor = CLIPImageProcessor.from_pretrained(hf_repo)
model = AutoModel.from_pretrained(hf_repo, trust_remote_code=True)
model.eval().cuda()
|