Spaces:
Runtime error
Runtime error
# import streamlit as st | |
# import torch | |
# import torchvision.transforms as T | |
# from PIL import Image | |
# | |
# # Assuming the necessary packages (featup, clip, etc.) are installed and accessible | |
# from featup.util import norm, unnorm | |
# from featup.plotting import plot_feats | |
# | |
# # Setup - ensure the repository content is accessible in the environment | |
# | |
# # Streamlit UI | |
# st.title("Feature Upsampling Demo") | |
# | |
# # File uploader | |
# uploaded_file = st.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"]) | |
# if uploaded_file is not None: | |
# image = Image.open(uploaded_file).convert("RGB") | |
# | |
# # Image preprocessing | |
# input_size = 224 | |
# transform = T.Compose([ | |
# T.Resize(input_size), | |
# T.CenterCrop((input_size, input_size)), | |
# T.ToTensor(), | |
# norm | |
# ]) | |
# | |
# image_tensor = transform(image).unsqueeze(0) # Assuming CUDA is available, .cuda() | |
# | |
# # Model selection | |
# model_option = st.selectbox( | |
# 'Choose a model for feature upsampling', | |
# ('dino16', 'dinov2', 'clip', 'resnet50') | |
# ) | |
# | |
# if st.button('Upsample Features'): | |
# # Load the selected model | |
# upsampler = torch.hub.load("mhamilton723/FeatUp", model_option).cuda() | |
# hr_feats = upsampler(image_tensor) | |
# lr_feats = upsampler.model(image_tensor) | |
# | |
# # Plotting - adjust the plot_feats function or find an alternative to display images in Streamlit | |
# # This step will likely need customization to display within Streamlit's interface | |
# plot_feats(unnorm(image_tensor)[0], lr_feats[0], hr_feats[0]) | |
import streamlit as st | |
import torch | |
def check_gpu_status(): | |
# Check if CUDA (GPU support) is available in PyTorch | |
cuda_available = torch.cuda.is_available() | |
gpu_count = torch.cuda.device_count() | |
gpu_name = torch.cuda.get_device_name(0) if cuda_available else "Not Available" | |
return cuda_available, gpu_count, gpu_name | |
# Streamlit page configuration | |
st.title("PyTorch GPU Availability Test") | |
# Checking the GPU status | |
cuda_available, gpu_count, gpu_name = check_gpu_status() | |
# Displaying the results | |
if cuda_available: | |
st.success(f"GPU is available! π") | |
st.info(f"Number of GPUs available: {gpu_count}") | |
st.info(f"GPU Name: {gpu_name}") | |
else: | |
st.error("GPU is not available. π’") | |