|
import streamlit as st |
|
import pandas as pd |
|
|
|
st.set_page_config(page_title="World Model Challenge", layout="wide") |
|
|
|
|
|
st.sidebar.title("Navigation") |
|
st.sidebar.markdown("[Overview](#overview)") |
|
st.sidebar.markdown("[Motivation](#motivation)") |
|
st.sidebar.markdown("[Challenges](#challenges)") |
|
st.sidebar.markdown("[Datasets](#datasets)") |
|
st.sidebar.markdown("[Scoring](#scoring)") |
|
|
|
|
|
st.markdown("<h1 id='overview'>Overview</h1>", unsafe_allow_html=True) |
|
st.header("Welcome") |
|
st.write( |
|
"Welcome to the World Model Challenge server. This platform hosts three challenges designed to advance research in world models for robotics: " |
|
"Compression, Sampling, and Evaluation." |
|
) |
|
|
|
|
|
st.markdown("<h1 id='motivation'>Motivation</h1>", unsafe_allow_html=True) |
|
st.header("Motivation") |
|
st.write( |
|
"Real-world robotics faces a constant challenge—environments are dynamic and ever-changing, which makes it difficult to reliably evaluate robot performance. " |
|
"World models address this by learning to simulate complex interactions from raw sensor data. These learned simulators allow for robust testing and continuous improvement of robot policies without the limitations of physical testing." |
|
) |
|
|
|
|
|
st.markdown("<h1 id='challenges'>Challenges</h1>", unsafe_allow_html=True) |
|
st.header("The Challenges") |
|
st.subheader("Compression Challenge") |
|
st.write( |
|
"Train a model to compress our robots' logs effectively while preserving critical details needed to understand and predict future interactions. " |
|
"Performance is measured by the model's loss—the lower the loss, the better it captures real-world complexities." |
|
) |
|
st.subheader("Sampling Challenge") |
|
st.write( |
|
"Predict a video frame two seconds ahead given a short clip of robot interactions. The goal is to create a coherent and plausible continuation that accurately reflects scene dynamics. " |
|
"Submissions are judged by how closely they match the actual frame." |
|
) |
|
st.subheader("Evaluation Challenge") |
|
st.write( |
|
"Can you predict a robot's real-world performance without physically deploying it? You'll be given several policies for a specific task and must rank them by expected performance. " |
|
"Your ranking is then compared to the true ranking of the policies." |
|
) |
|
|
|
|
|
st.markdown("<h1 id='datasets'>Datasets</h1>", unsafe_allow_html=True) |
|
st.header("Datasets") |
|
st.write( |
|
"We offer two key datasets for the 1X World Model Challenge:\n\n" |
|
"**Raw Data:** The [world_model_raw_data](https://huggingface.co/datasets/1x-technologies/world_model_raw_data) dataset provides raw sensor data, video logs, and annotated robot state sequences from diverse real-world scenarios. " |
|
"It is split into 100 shards—each containing a 512x512 MP4 video, a segment index mapping, and state arrays—and is licensed under CC-BY-NC-SA 4.0.\n\n" |
|
"**Tokenized Data:** The [world_model_tokenized_data](https://huggingface.co/datasets/1x-technologies/world_model_tokenized_data) dataset tokenizes raw video sequences using the NVIDIA Cosmos Tokenizer. " |
|
"This compact representation is optimal for the Compression Challenge and is released under the Apache 2.0 license." |
|
) |
|
|
|
|
|
st.markdown("<h1 id='scoring'>Scoring</h1>", unsafe_allow_html=True) |
|
st.header("Scoring") |
|
st.write( |
|
"Our scoring system rewards performance in all three challenges, with extra emphasis on the Evaluation Challenge. " |
|
"A team's final rank is determined by the total points earned across the challenges." |
|
) |
|
st.subheader("Points Breakdown") |
|
col1, col2, col3 = st.columns(3) |
|
with col1: |
|
st.markdown('<h3 style="margin-left:20px;">Compression</h3>', unsafe_allow_html=True) |
|
st.markdown( |
|
"- **1st Place:** 10 points\n" |
|
"- **2nd Place:** 7 points\n" |
|
"- **3rd Place:** 5 points" |
|
) |
|
with col2: |
|
st.markdown('<h3 style="margin-left:20px;">Sampling</h3>', unsafe_allow_html=True) |
|
st.markdown( |
|
"- **1st Place:** 10 points\n" |
|
"- **2nd Place:** 7 points\n" |
|
"- **3rd Place:** 5 points" |
|
) |
|
with col3: |
|
st.markdown('<h3 style="margin-left:20px;">Evaluation</h3>', unsafe_allow_html=True) |
|
st.markdown( |
|
"- **1st Place:** 20 points\n" |
|
"- **2nd Place:** 14 points\n" |
|
"- **3rd Place:** 10 points" |
|
) |
|
with st.expander("Tie-Breakers"): |
|
st.write( |
|
"If teams tie in total points, the following tie-breakers will be applied in order:\n" |
|
"1. Highest Evaluation Challenge score\n" |
|
"2. Highest Sampling Challenge score\n" |
|
"3. Highest Compression Challenge score" |
|
) |
|
st.write( |
|
"The overall leaderboard, which shows the total points across all challenges, will go live on **March 10th**. " |
|
"Additionally, each challenge will have its own leaderboard on their respective Hugging Face submission servers." |
|
) |
|
|