Spaces:
Sleeping
Sleeping
File size: 6,244 Bytes
53715b3 7409f0d 301d4ae d804135 301d4ae d804135 7409f0d 53715b3 e55fb20 51f2947 e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae e55fb20 301d4ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import gradio as gr
import pandas as pd
import numpy as np
import onnxruntime as ort
import sys
from pathlib import Path
sys.path.append("rd2l_pred")
from training_data_prep import list_format, modification, league_money, df_gen
from feature_engineering import heroes, hero_information
# Global variables for model and feature columns
MODEL = None
FEATURE_COLUMNS = None
def load_model():
"""Load the ONNX model and get input features"""
global MODEL, FEATURE_COLUMNS
try:
model_path = Path("model/rd2l_forest.onnx")
if not model_path.exists():
return "Model file not found at: " + str(model_path)
MODEL = ort.InferenceSession(str(model_path))
# Load feature columns from a saved reference - you'll need to create this
try:
FEATURE_COLUMNS = pd.read_csv("model/feature_columns.csv")["columns"].tolist()
except:
# Fallback to basic features if feature columns file not found
FEATURE_COLUMNS = ["player_id", "mmr", "p1", "p2", "p3", "p4", "p5",
"total_games_played", "total_winrate"]
return "Model loaded successfully"
except Exception as e:
return f"Error loading model: {str(e)}"
def process_player_data(player_id, mmr, comf_1, comf_2, comf_3, comf_4, comf_5):
"""Process player data similar to training pipeline"""
try:
# Clean player ID from URL if needed
if "/" in player_id:
player_id = player_id.split("/")[-1]
# Create initial player series
player_data = {
"player_id": player_id,
"mmr": float(mmr),
"p1": int(comf_1),
"p2": int(comf_2),
"p3": int(comf_3),
"p4": int(comf_4),
"p5": int(comf_5)
}
# Get hero statistics using OpenDota API
try:
hero_stats = hero_information(player_id)
# Merge hero stats with player data
player_data.update(hero_stats.to_dict())
except Exception as e:
print(f"Warning - Error fetching hero data: {str(e)}")
# If hero stats fail, add placeholder values
player_data.update({
"total_games_played": 0,
"total_winrate": 0.0
})
# Convert to DataFrame for consistency with training
df = pd.DataFrame([player_data])
# Ensure all required columns exist
if FEATURE_COLUMNS:
for col in FEATURE_COLUMNS:
if col not in df.columns:
df[col] = 0
# Reorder columns to match model input
df = df[FEATURE_COLUMNS]
return df
except Exception as e:
return f"Error processing player data: {str(e)}"
def predict_cost(user_id, mmr, comf_1, comf_2, comf_3, comf_4, comf_5):
"""Main prediction function for Gradio interface"""
try:
# Check if model is loaded
if MODEL is None:
result = load_model()
if not result.startswith("Model loaded"):
return result
# Process input data
processed_data = process_player_data(user_id, mmr, comf_1, comf_2, comf_3, comf_4, comf_5)
if isinstance(processed_data, str): # Error occurred
return processed_data
# Print debug information
print("Processed data shape:", processed_data.shape)
print("Processed data columns:", processed_data.columns.tolist())
# Make prediction
try:
input_name = MODEL.get_inputs()[0].name
prediction = MODEL.run(None, {input_name: processed_data.values.astype(np.float32)})[0]
predicted_cost = round(float(prediction[0]), 2)
except Exception as e:
return f"Error during prediction: {str(e)}\nProcessed data shape: {processed_data.shape}"
return f"""Predicted Cost: {predicted_cost}
Player Details:
- MMR: {mmr}
- Position Comfort:
* Pos 1: {comf_1}
* Pos 2: {comf_2}
* Pos 3: {comf_3}
* Pos 4: {comf_4}
* Pos 5: {comf_5}
Note: This prediction is based on historical data and player statistics from OpenDota."""
except Exception as e:
return f"Error in prediction pipeline: {str(e)}"
# Create Gradio interface
demo = gr.Interface(
fn=predict_cost,
inputs=[
gr.Textbox(label="Player ID or Link to OpenDota/Dotabuff",
placeholder="Enter player ID or full profile URL"),
gr.Number(label="MMR", value=3000),
gr.Slider(1, 5, value=3, step=1, label="Comfort (Pos 1)"),
gr.Slider(1, 5, value=3, step=1, label="Comfort (Pos 2)"),
gr.Slider(1, 5, value=3, step=1, label="Comfort (Pos 3)"),
gr.Slider(1, 5, value=3, step=1, label="Comfort (Pos 4)"),
gr.Slider(1, 5, value=3, step=1, label="Comfort (Pos 5)")
],
examples=[
["https://www.dotabuff.com/players/188649776", 6812, 5, 5, 4, 2, 1]
],
outputs=gr.Textbox(label="Prediction Results"),
title="RD2L Player Cost Predictor",
description="""This tool predicts the auction cost for RD2L players based on their MMR,
position comfort levels, and historical performance data from OpenDota.
Enter a player's OpenDota ID or profile URL along with their current stats
to get a predicted cost.""",
article="""### How it works
- The predictor uses machine learning trained on historical RD2L draft data
- Player statistics are fetched from OpenDota API
- Position comfort levels range from 1 (least comfortable) to 5 (most comfortable)
- Predictions are based on both current stats and historical performance
### Notes
- MMR should be the player's current solo MMR
- Position comfort should reflect actual role experience
- Predictions are estimates and may vary from actual draft results"""
)
# Load model on startup
print(load_model())
if __name__ == "__main__":
demo.launch()
|