import React, { useEffect, useState } from "react"; import { useRouter, usePathname, useSearchParams } from "next/navigation"; import { modelHubCall } from "./networking"; import { getConfigFieldSetting, updateConfigFieldSetting } from "./networking"; import { Card, Text, Title, Grid, Button, Badge, Tab, TabGroup, TabList, TabPanel, TabPanels, } from "@tremor/react"; import { RightOutlined, CopyOutlined } from "@ant-design/icons"; import { Modal, Tooltip, message } from "antd"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; interface ModelHubProps { accessToken: string | null; publicPage: boolean; premiumUser: boolean; } interface ModelInfo { model_group: string; mode: string; supports_function_calling: boolean; supports_vision: boolean; max_input_tokens?: number; max_output_tokens?: number; input_cost_per_token?: number; output_cost_per_token?: number; supported_openai_params?: string[]; } const ModelHub: React.FC = ({ accessToken, publicPage, premiumUser, }) => { const [publicPageAllowed, setPublicPageAllowed] = useState(false); const [modelHubData, setModelHubData] = useState(null); const [isModalVisible, setIsModalVisible] = useState(false); const [isPublicPageModalVisible, setIsPublicPageModalVisible] = useState(false); const [selectedModel, setSelectedModel] = useState(null); const router = useRouter(); useEffect(() => { if (!accessToken) { return; } const fetchData = async () => { try { const _modelHubData = await modelHubCall(accessToken); console.log("ModelHubData:", _modelHubData); setModelHubData(_modelHubData.data); getConfigFieldSetting(accessToken, "enable_public_model_hub") .then((data) => { console.log(`data: ${JSON.stringify(data)}`); if (data.field_value == true) { setPublicPageAllowed(true); } }) .catch((error) => { // do nothing }); } catch (error) { console.error("There was an error fetching the model data", error); } }; fetchData(); }, [accessToken, publicPage]); const showModal = (model: ModelInfo) => { setSelectedModel(model); setIsModalVisible(true); }; const goToPublicModelPage = () => { router.replace(`/model_hub?key=${accessToken}`); }; const handleMakePublicPage = async () => { if (!accessToken) { return; } updateConfigFieldSetting(accessToken, "enable_public_model_hub", true).then( (data) => { setIsPublicPageModalVisible(true); } ); }; const handleOk = () => { setIsModalVisible(false); setIsPublicPageModalVisible(false); setSelectedModel(null); }; const handleCancel = () => { setIsModalVisible(false); setIsPublicPageModalVisible(false); setSelectedModel(null); }; const copyToClipboard = (text: string) => { navigator.clipboard.writeText(text); }; return (
{(publicPage && publicPageAllowed) || publicPage == false ? (
Model Hub {publicPage == false ? ( premiumUser ? ( ) : ( ) ) : (

Filter by key:

{`/ui/model_hub?key=`}
)}
{modelHubData && modelHubData.map((model: ModelInfo) => (
                    {model.model_group}
                    
                       copyToClipboard(model.model_group)}
                        style={{ cursor: "pointer", marginRight: "10px" }}
                      />
                    
                  
Max Input Tokens:{" "} {model?.max_input_tokens ? model?.max_input_tokens : "Unknown"} Max Output Tokens:{" "} {model?.max_output_tokens ? model?.max_output_tokens : "Unknown"} Input Cost Per 1M Tokens (USD):{" "} {model?.input_cost_per_token ? `$${(model.input_cost_per_token * 1_000_000).toFixed(2)}` : "Unknown"} Output Cost Per 1M Tokens (USD):{" "} {model?.output_cost_per_token ? `$${(model.output_cost_per_token * 1_000_000).toFixed(2)}` : "Unknown"}
))}
) : ( Public Model Hub not enabled.

Ask your proxy admin to enable this on their Admin UI.

)}
Shareable Link: {`/ui/model_hub?key=`}
{selectedModel && (

Model Information & Usage

Model Information OpenAI Python SDK Supported OpenAI Params LlamaIndex Langchain Py Model Group:
{JSON.stringify(selectedModel, null, 2)}
{` import openai client = openai.OpenAI( api_key="your_api_key", base_url="http://0.0.0.0:4000" # LiteLLM Proxy is OpenAI compatible, Read More: https://docs.litellm.ai/docs/proxy/user_keys ) response = client.chat.completions.create( model="${selectedModel.model_group}", # model to send to the proxy messages = [ { "role": "user", "content": "this is a test request, write a short poem" } ] ) print(response) `} {`${selectedModel.supported_openai_params?.map((param) => `${param}\n`).join("")}`} {` import os, dotenv from llama_index.llms import AzureOpenAI from llama_index.embeddings import AzureOpenAIEmbedding from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext llm = AzureOpenAI( engine="${selectedModel.model_group}", # model_name on litellm proxy temperature=0.0, azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint api_key="sk-1234", # litellm proxy API Key api_version="2023-07-01-preview", ) embed_model = AzureOpenAIEmbedding( deployment_name="azure-embedding-model", azure_endpoint="http://0.0.0.0:4000", api_key="sk-1234", api_version="2023-07-01-preview", ) documents = SimpleDirectoryReader("llama_index_data").load_data() service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) index = VectorStoreIndex.from_documents(documents, service_context=service_context) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(response) `} {` from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import HumanMessage, SystemMessage chat = ChatOpenAI( openai_api_base="http://0.0.0.0:4000", model = "${selectedModel.model_group}", temperature=0.1 ) messages = [ SystemMessage( content="You are a helpful assistant that im using to make a test request to." ), HumanMessage( content="test from litellm. tell me why it's amazing in 1 sentence" ), ] response = chat(messages) print(response) `}
{/*

Additional Params: {JSON.stringify(selectedModel.litellm_params)}

*/} {/* Add other model details here */}
)}
); }; export default ModelHub;