Felix Zieger
commited on
Commit
·
65676ec
1
Parent(s):
1a45d5d
update
Browse files- LICENSE +21 -0
- index.html +3 -3
- package-lock.json +0 -0
- package.json +6 -4
- scripts/update-model-names.js +111 -0
- src/components/GameContainer.tsx +42 -16
- src/components/admin/GameDetailsView.tsx +14 -4
- src/components/game/GuessDisplay.tsx +4 -1
- src/components/game/ModelSelector.tsx +122 -0
- src/components/game/SentenceBuilder.tsx +6 -1
- src/components/game/WelcomeScreen.tsx +12 -2
- src/components/game/guess-display/GuessDescription.tsx +28 -6
- src/components/game/guess-display/GuessResult.tsx +6 -3
- src/components/game/sentence-builder/InputForm.tsx +6 -1
- src/components/game/sentence-builder/SentenceDisplay.tsx +7 -2
- src/i18n/translations/de.ts +16 -3
- src/i18n/translations/en.ts +15 -2
- src/i18n/translations/es.ts +10 -3
- src/i18n/translations/fr.ts +11 -2
- src/i18n/translations/it.ts +16 -3
- src/i18n/translations/pt.ts +16 -4
- src/index.css +7 -2
- src/lib/modelNames.ts +300 -0
- src/lib/wordProcessing.ts +7 -1
- src/services/aiService.ts +6 -4
- supabase/functions/generate-word/index.ts +10 -10
- supabase/functions/guess-word/index.ts +10 -10
- vite.config.ts +1 -6
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2025 Felix Zieger
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
index.html
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
<head>
|
4 |
<meta charset="UTF-8" />
|
5 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
6 |
<title>Think in Sync</title>
|
7 |
<meta name="description" content="A word puzzle game." />
|
8 |
<meta name="author" content="Team M1X" />
|
@@ -10,7 +10,7 @@
|
|
10 |
<script defer data-domain="think-in-sync.com" src="https://plausible.sonnenhof-zieger.de/js/script.js"></script>
|
11 |
</head>
|
12 |
|
13 |
-
<body>
|
14 |
<div id="root"></div>
|
15 |
<script src="https://cdn.gpteng.co/gptengineer.js" type="module"></script>
|
16 |
<script type="module" src="/src/main.tsx"></script>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
+
<html lang="en" style="height: 100%;">
|
3 |
<head>
|
4 |
<meta charset="UTF-8" />
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, interactive-widget=resizes-content" />
|
6 |
<title>Think in Sync</title>
|
7 |
<meta name="description" content="A word puzzle game." />
|
8 |
<meta name="author" content="Team M1X" />
|
|
|
10 |
<script defer data-domain="think-in-sync.com" src="https://plausible.sonnenhof-zieger.de/js/script.js"></script>
|
11 |
</head>
|
12 |
|
13 |
+
<body style="height: 100%;">
|
14 |
<div id="root"></div>
|
15 |
<script src="https://cdn.gpteng.co/gptengineer.js" type="module"></script>
|
16 |
<script type="module" src="/src/main.tsx"></script>
|
package-lock.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
package.json
CHANGED
@@ -8,7 +8,9 @@
|
|
8 |
"build": "vite build",
|
9 |
"build:dev": "vite build --mode development",
|
10 |
"lint": "eslint .",
|
11 |
-
"preview": "vite preview"
|
|
|
|
|
12 |
},
|
13 |
"dependencies": {
|
14 |
"@hookform/resolvers": "^3.9.0",
|
@@ -72,17 +74,17 @@
|
|
72 |
"@types/node": "^22.5.5",
|
73 |
"@types/react": "^18.3.3",
|
74 |
"@types/react-dom": "^18.3.0",
|
75 |
-
"@vitejs/plugin-react-swc": "^3.
|
76 |
"autoprefixer": "^10.4.20",
|
77 |
"eslint": "^9.9.0",
|
78 |
"eslint-plugin-react-hooks": "^5.1.0-rc.0",
|
79 |
"eslint-plugin-react-refresh": "^0.4.9",
|
80 |
"globals": "^15.9.0",
|
81 |
-
"lovable-tagger": "^1.0.19",
|
82 |
"postcss": "^8.4.47",
|
83 |
"tailwindcss": "^3.4.11",
|
|
|
84 |
"typescript": "^5.5.3",
|
85 |
"typescript-eslint": "^8.0.1",
|
86 |
-
"vite": "^
|
87 |
}
|
88 |
}
|
|
|
8 |
"build": "vite build",
|
9 |
"build:dev": "vite build --mode development",
|
10 |
"lint": "eslint .",
|
11 |
+
"preview": "vite preview",
|
12 |
+
"update-model-names": "node scripts/update-model-names.js",
|
13 |
+
"deploy-functions": "supabase functions deploy --use-api"
|
14 |
},
|
15 |
"dependencies": {
|
16 |
"@hookform/resolvers": "^3.9.0",
|
|
|
74 |
"@types/node": "^22.5.5",
|
75 |
"@types/react": "^18.3.3",
|
76 |
"@types/react-dom": "^18.3.0",
|
77 |
+
"@vitejs/plugin-react-swc": "^3.8.1",
|
78 |
"autoprefixer": "^10.4.20",
|
79 |
"eslint": "^9.9.0",
|
80 |
"eslint-plugin-react-hooks": "^5.1.0-rc.0",
|
81 |
"eslint-plugin-react-refresh": "^0.4.9",
|
82 |
"globals": "^15.9.0",
|
|
|
83 |
"postcss": "^8.4.47",
|
84 |
"tailwindcss": "^3.4.11",
|
85 |
+
"ts-node": "^10.9.2",
|
86 |
"typescript": "^5.5.3",
|
87 |
"typescript-eslint": "^8.0.1",
|
88 |
+
"vite": "^6.2.3"
|
89 |
}
|
90 |
}
|
scripts/update-model-names.js
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// ES Module version of the model names updater
|
2 |
+
import axios from 'axios';
|
3 |
+
import fs from 'fs';
|
4 |
+
import path from 'path';
|
5 |
+
import { fileURLToPath } from 'url';
|
6 |
+
|
7 |
+
// Get the current file's directory
|
8 |
+
const __filename = fileURLToPath(import.meta.url);
|
9 |
+
const __dirname = path.dirname(__filename);
|
10 |
+
|
11 |
+
// Set up unhandled rejection handler
|
12 |
+
process.on('unhandledRejection', (reason, promise) => {
|
13 |
+
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
14 |
+
process.exit(1);
|
15 |
+
});
|
16 |
+
|
17 |
+
/**
|
18 |
+
* Clean up model name by removing ":", "(free)" and deduplicating words
|
19 |
+
* @param {string} name - Original model name
|
20 |
+
* @returns {string} - Cleaned model name
|
21 |
+
*/
|
22 |
+
function cleanModelName(name) {
|
23 |
+
// Remove ": " and " (free)" from name
|
24 |
+
let cleanedName = name.replace(/:\s*/g, ' ').replace(/\s*\(free\)/g, '');
|
25 |
+
|
26 |
+
// Split by space to check for duplicated words
|
27 |
+
const words = cleanedName.split(' ');
|
28 |
+
const result = [];
|
29 |
+
|
30 |
+
for (let i = 0; i < words.length; i++) {
|
31 |
+
// Skip this word if it's the same as the next word
|
32 |
+
if (i < words.length - 1 && words[i].toLowerCase() === words[i + 1].toLowerCase()) {
|
33 |
+
continue;
|
34 |
+
}
|
35 |
+
result.push(words[i]);
|
36 |
+
}
|
37 |
+
|
38 |
+
return result.join(' ').trim();
|
39 |
+
}
|
40 |
+
|
41 |
+
async function updateModelNames() {
|
42 |
+
try {
|
43 |
+
console.log('Fetching models from OpenRouter API...');
|
44 |
+
const response = await axios.get('https://openrouter.ai/api/v1/models');
|
45 |
+
|
46 |
+
if (!response.data || !response.data.data) {
|
47 |
+
console.error('Invalid response format:', JSON.stringify(response.data));
|
48 |
+
process.exit(1);
|
49 |
+
}
|
50 |
+
|
51 |
+
const models = response.data.data;
|
52 |
+
console.log(`Received ${models.length} models from API`);
|
53 |
+
|
54 |
+
const modelNames = {};
|
55 |
+
|
56 |
+
models.forEach((model) => {
|
57 |
+
modelNames[model.id] = cleanModelName(model.name);
|
58 |
+
});
|
59 |
+
|
60 |
+
// Get path to the modelNames.ts file
|
61 |
+
const modelNamesPath = path.join(__dirname, '..', 'src', 'lib', 'modelNames.ts');
|
62 |
+
console.log(`Updating file at: ${modelNamesPath}`);
|
63 |
+
|
64 |
+
try {
|
65 |
+
const currentContent = fs.readFileSync(modelNamesPath, 'utf-8');
|
66 |
+
console.log('Successfully read existing modelNames.ts file');
|
67 |
+
|
68 |
+
// Create the new content
|
69 |
+
const newContent = `export const modelNames: Record<string, string> = ${JSON.stringify(modelNames, null, 2)};
|
70 |
+
|
71 |
+
export const getModelDisplayName = (modelId: string): string => {
|
72 |
+
return modelNames[modelId] || modelId;
|
73 |
+
};`;
|
74 |
+
|
75 |
+
// Write the updated content back to the file
|
76 |
+
fs.writeFileSync(modelNamesPath, newContent);
|
77 |
+
console.log('Successfully updated model names!');
|
78 |
+
} catch (fileError) {
|
79 |
+
console.error('Error working with the file:', fileError);
|
80 |
+
process.exit(1);
|
81 |
+
}
|
82 |
+
} catch (error) {
|
83 |
+
console.error('Caught error in updateModelNames:');
|
84 |
+
|
85 |
+
if (axios.isAxiosError(error)) {
|
86 |
+
console.error('API Error:', {
|
87 |
+
status: error.response?.status,
|
88 |
+
statusText: error.response?.statusText,
|
89 |
+
data: error.response?.data,
|
90 |
+
message: error.message
|
91 |
+
});
|
92 |
+
} else if (error instanceof Error) {
|
93 |
+
console.error('Error details:', error.message, error.stack);
|
94 |
+
} else {
|
95 |
+
console.error('Unknown error type:', typeof error, JSON.stringify(error));
|
96 |
+
}
|
97 |
+
|
98 |
+
process.exit(1);
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
// Run the function using an immediately invoked async function expression (IIFE)
|
103 |
+
(async () => {
|
104 |
+
try {
|
105 |
+
await updateModelNames();
|
106 |
+
console.log('Script completed successfully!');
|
107 |
+
} catch (err) {
|
108 |
+
console.error('Final error handler caught:', err);
|
109 |
+
process.exit(1);
|
110 |
+
}
|
111 |
+
})();
|
src/components/GameContainer.tsx
CHANGED
@@ -7,6 +7,7 @@ import { getDailyGame } from "@/services/dailyGameService";
|
|
7 |
import { useToast } from "@/components/ui/use-toast";
|
8 |
import { WelcomeScreen } from "./game/WelcomeScreen";
|
9 |
import { ThemeSelector } from "./game/ThemeSelector";
|
|
|
10 |
import { SentenceBuilder } from "./game/SentenceBuilder";
|
11 |
import { GuessDisplay } from "./game/GuessDisplay";
|
12 |
import { GameReview } from "./game/GameReview";
|
@@ -17,7 +18,12 @@ import { supabase } from "@/integrations/supabase/client";
|
|
17 |
import { Language } from "@/i18n/translations";
|
18 |
import { normalizeWord } from "@/lib/wordProcessing";
|
19 |
|
20 |
-
type GameState = "welcome" | "theme-selection" | "building-sentence" | "showing-guess" | "game-review" | "invitation";
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
export const GameContainer = () => {
|
23 |
const [searchParams] = useSearchParams();
|
@@ -33,7 +39,7 @@ export const GameContainer = () => {
|
|
33 |
const [words, setWords] = useState<string[]>([]);
|
34 |
const [currentWordIndex, setCurrentWordIndex] = useState<number>(0);
|
35 |
const [playerInput, setPlayerInput] = useState<string>("");
|
36 |
-
const [sentence, setSentence] = useState<
|
37 |
const [isAiThinking, setIsAiThinking] = useState(false);
|
38 |
const [aiGuess, setAiGuess] = useState<string>("");
|
39 |
const [aiModel, setAiModel] = useState<string>("");
|
@@ -62,15 +68,20 @@ export const GameContainer = () => {
|
|
62 |
|
63 |
useEffect(() => {
|
64 |
if (location.pathname === '/' && gameId) {
|
65 |
-
|
66 |
-
|
|
|
|
|
67 |
}
|
68 |
-
}, [location.pathname, gameId]);
|
69 |
|
70 |
const handleStartDaily = async () => {
|
71 |
try {
|
72 |
const dailyGameId = await getDailyGame(language);
|
73 |
-
|
|
|
|
|
|
|
74 |
} catch (error) {
|
75 |
console.error('Error starting daily game:', error);
|
76 |
toast({
|
@@ -166,9 +177,21 @@ export const GameContainer = () => {
|
|
166 |
|
167 |
const handleThemeSelect = async (theme: string) => {
|
168 |
setCurrentTheme(theme);
|
|
|
|
|
|
|
|
|
|
|
169 |
try {
|
170 |
-
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
const { data: gameData, error: gameError } = await supabase
|
174 |
.from('games')
|
@@ -187,7 +210,7 @@ export const GameContainer = () => {
|
|
187 |
setGameState("building-sentence");
|
188 |
setSuccessfulRounds(0);
|
189 |
setTotalWordsInSuccessfulRounds(0);
|
190 |
-
console.log("Game started with theme:",
|
191 |
} catch (error) {
|
192 |
console.error('Error starting new game:', error);
|
193 |
toast({
|
@@ -203,14 +226,14 @@ export const GameContainer = () => {
|
|
203 |
if (!playerInput.trim()) return;
|
204 |
|
205 |
const word = playerInput.trim();
|
206 |
-
const newSentence = [...sentence, word];
|
207 |
setSentence(newSentence);
|
208 |
setPlayerInput("");
|
209 |
|
210 |
setIsAiThinking(true);
|
211 |
try {
|
212 |
-
const aiWord = await generateAIResponse(currentWord, newSentence, language);
|
213 |
-
const newSentenceWithAi = [...newSentence, aiWord];
|
214 |
setSentence(newSentenceWithAi);
|
215 |
} catch (error) {
|
216 |
console.error('Error in AI turn:', error);
|
@@ -250,17 +273,17 @@ export const GameContainer = () => {
|
|
250 |
const handleMakeGuess = async () => {
|
251 |
setIsAiThinking(true);
|
252 |
try {
|
253 |
-
let finalSentence = sentence;
|
254 |
if (playerInput.trim()) {
|
255 |
-
finalSentence = [...sentence, playerInput.trim()];
|
256 |
setSentence(finalSentence);
|
257 |
setPlayerInput("");
|
258 |
}
|
259 |
|
260 |
if (finalSentence.length === 0) return;
|
261 |
|
262 |
-
const sentenceString = finalSentence.join(' ');
|
263 |
-
const { guess, model } = await guessWord(sentenceString, language);
|
264 |
setAiGuess(guess);
|
265 |
setAiModel(model);
|
266 |
|
@@ -350,6 +373,8 @@ export const GameContainer = () => {
|
|
350 |
<WelcomeScreen onStartDaily={handleStartDaily} onStartNew={handleStart} />
|
351 |
) : gameState === "theme-selection" ? (
|
352 |
<ThemeSelector onThemeSelect={handleThemeSelect} onBack={handleBack} />
|
|
|
|
|
353 |
) : gameState === "invitation" ? (
|
354 |
<GameInvitation onContinue={handleInvitationContinue} onBack={handleBack} />
|
355 |
) : gameState === "building-sentence" ? (
|
@@ -379,6 +404,7 @@ export const GameContainer = () => {
|
|
379 |
sessionId={sessionId}
|
380 |
currentTheme={currentTheme}
|
381 |
normalizeWord={(word: string) => normalizeWord(word, language)}
|
|
|
382 |
/>
|
383 |
) : (
|
384 |
<GameReview
|
|
|
7 |
import { useToast } from "@/components/ui/use-toast";
|
8 |
import { WelcomeScreen } from "./game/WelcomeScreen";
|
9 |
import { ThemeSelector } from "./game/ThemeSelector";
|
10 |
+
import { ModelSelector } from "./game/ModelSelector";
|
11 |
import { SentenceBuilder } from "./game/SentenceBuilder";
|
12 |
import { GuessDisplay } from "./game/GuessDisplay";
|
13 |
import { GameReview } from "./game/GameReview";
|
|
|
18 |
import { Language } from "@/i18n/translations";
|
19 |
import { normalizeWord } from "@/lib/wordProcessing";
|
20 |
|
21 |
+
type GameState = "welcome" | "theme-selection" | "model-selection" | "building-sentence" | "showing-guess" | "game-review" | "invitation";
|
22 |
+
|
23 |
+
interface SentenceWord {
|
24 |
+
word: string;
|
25 |
+
provider: 'player' | 'ai';
|
26 |
+
}
|
27 |
|
28 |
export const GameContainer = () => {
|
29 |
const [searchParams] = useSearchParams();
|
|
|
39 |
const [words, setWords] = useState<string[]>([]);
|
40 |
const [currentWordIndex, setCurrentWordIndex] = useState<number>(0);
|
41 |
const [playerInput, setPlayerInput] = useState<string>("");
|
42 |
+
const [sentence, setSentence] = useState<SentenceWord[]>([]);
|
43 |
const [isAiThinking, setIsAiThinking] = useState(false);
|
44 |
const [aiGuess, setAiGuess] = useState<string>("");
|
45 |
const [aiModel, setAiModel] = useState<string>("");
|
|
|
68 |
|
69 |
useEffect(() => {
|
70 |
if (location.pathname === '/' && gameId) {
|
71 |
+
if (gameState !== "model-selection") {
|
72 |
+
console.log("Location changed to root with active gameId, handling back navigation");
|
73 |
+
handleBack();
|
74 |
+
}
|
75 |
}
|
76 |
+
}, [location.pathname, gameId, gameState]);
|
77 |
|
78 |
const handleStartDaily = async () => {
|
79 |
try {
|
80 |
const dailyGameId = await getDailyGame(language);
|
81 |
+
if (dailyGameId) {
|
82 |
+
setGameId(dailyGameId);
|
83 |
+
setGameState("model-selection");
|
84 |
+
}
|
85 |
} catch (error) {
|
86 |
console.error('Error starting daily game:', error);
|
87 |
toast({
|
|
|
177 |
|
178 |
const handleThemeSelect = async (theme: string) => {
|
179 |
setCurrentTheme(theme);
|
180 |
+
setGameState("model-selection");
|
181 |
+
};
|
182 |
+
|
183 |
+
const handleModelSelect = async (model: string) => {
|
184 |
+
setAiModel(model);
|
185 |
try {
|
186 |
+
let newGameId = gameId;
|
187 |
+
let newSessionId = "";
|
188 |
+
|
189 |
+
// If we don't have a gameId (not from daily challenge), create a new game
|
190 |
+
if (!newGameId) {
|
191 |
+
newGameId = await createGame(currentTheme, language);
|
192 |
+
}
|
193 |
+
|
194 |
+
newSessionId = await createSession(newGameId);
|
195 |
|
196 |
const { data: gameData, error: gameError } = await supabase
|
197 |
.from('games')
|
|
|
210 |
setGameState("building-sentence");
|
211 |
setSuccessfulRounds(0);
|
212 |
setTotalWordsInSuccessfulRounds(0);
|
213 |
+
console.log("Game started with theme:", currentTheme, "language:", language, "model:", model);
|
214 |
} catch (error) {
|
215 |
console.error('Error starting new game:', error);
|
216 |
toast({
|
|
|
226 |
if (!playerInput.trim()) return;
|
227 |
|
228 |
const word = playerInput.trim();
|
229 |
+
const newSentence: SentenceWord[] = [...sentence, { word, provider: 'player' as const }];
|
230 |
setSentence(newSentence);
|
231 |
setPlayerInput("");
|
232 |
|
233 |
setIsAiThinking(true);
|
234 |
try {
|
235 |
+
const aiWord = await generateAIResponse(currentWord, newSentence.map(w => w.word), language, aiModel);
|
236 |
+
const newSentenceWithAi: SentenceWord[] = [...newSentence, { word: aiWord, provider: 'ai' as const }];
|
237 |
setSentence(newSentenceWithAi);
|
238 |
} catch (error) {
|
239 |
console.error('Error in AI turn:', error);
|
|
|
273 |
const handleMakeGuess = async () => {
|
274 |
setIsAiThinking(true);
|
275 |
try {
|
276 |
+
let finalSentence: SentenceWord[] = sentence;
|
277 |
if (playerInput.trim()) {
|
278 |
+
finalSentence = [...sentence, { word: playerInput.trim(), provider: 'player' as const }];
|
279 |
setSentence(finalSentence);
|
280 |
setPlayerInput("");
|
281 |
}
|
282 |
|
283 |
if (finalSentence.length === 0) return;
|
284 |
|
285 |
+
const sentenceString = finalSentence.map(w => w.word).join(' ');
|
286 |
+
const { guess, model } = await guessWord(sentenceString, language, aiModel);
|
287 |
setAiGuess(guess);
|
288 |
setAiModel(model);
|
289 |
|
|
|
373 |
<WelcomeScreen onStartDaily={handleStartDaily} onStartNew={handleStart} />
|
374 |
) : gameState === "theme-selection" ? (
|
375 |
<ThemeSelector onThemeSelect={handleThemeSelect} onBack={handleBack} />
|
376 |
+
) : gameState === "model-selection" ? (
|
377 |
+
<ModelSelector onModelSelect={handleModelSelect} onBack={handleBack} />
|
378 |
) : gameState === "invitation" ? (
|
379 |
<GameInvitation onContinue={handleInvitationContinue} onBack={handleBack} />
|
380 |
) : gameState === "building-sentence" ? (
|
|
|
404 |
sessionId={sessionId}
|
405 |
currentTheme={currentTheme}
|
406 |
normalizeWord={(word: string) => normalizeWord(word, language)}
|
407 |
+
aiModel={aiModel}
|
408 |
/>
|
409 |
) : (
|
410 |
<GameReview
|
src/components/admin/GameDetailsView.tsx
CHANGED
@@ -18,6 +18,11 @@ interface GameResult {
|
|
18 |
is_correct: boolean;
|
19 |
}
|
20 |
|
|
|
|
|
|
|
|
|
|
|
21 |
interface ComparisonDialogProps {
|
22 |
isOpen: boolean;
|
23 |
onClose: () => void;
|
@@ -28,6 +33,13 @@ interface ComparisonDialogProps {
|
|
28 |
const ComparisonDialog = ({ isOpen, onClose, currentResult, friendResult }: ComparisonDialogProps) => {
|
29 |
const t = useTranslation();
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
return (
|
32 |
<Dialog open={isOpen} onOpenChange={onClose}>
|
33 |
<DialogContent>
|
@@ -42,8 +54,7 @@ const ComparisonDialog = ({ isOpen, onClose, currentResult, friendResult }: Comp
|
|
42 |
<h3 className="font-semibold mb-2">{t.game.review.yourDescription}</h3>
|
43 |
)}
|
44 |
<GuessDescription
|
45 |
-
sentence={currentResult?.description
|
46 |
-
aiGuess={currentResult?.ai_guess || ''}
|
47 |
/>
|
48 |
<p className="text-sm text-gray-600 mt-2">
|
49 |
{t.guess.aiGuessedDescription}: <span className="font-medium">{currentResult?.ai_guess}</span>
|
@@ -53,8 +64,7 @@ const ComparisonDialog = ({ isOpen, onClose, currentResult, friendResult }: Comp
|
|
53 |
<div>
|
54 |
<h3 className="font-semibold mb-2">{t.game.review.friendDescription}</h3>
|
55 |
<GuessDescription
|
56 |
-
sentence={friendResult.description
|
57 |
-
aiGuess={friendResult.ai_guess || ''}
|
58 |
/>
|
59 |
<p className="text-sm text-gray-600 mt-2">
|
60 |
{t.guess.aiGuessedDescription}: <span className="font-medium">{friendResult.ai_guess}</span>
|
|
|
18 |
is_correct: boolean;
|
19 |
}
|
20 |
|
21 |
+
interface SentenceWord {
|
22 |
+
word: string;
|
23 |
+
provider: 'player' | 'ai';
|
24 |
+
}
|
25 |
+
|
26 |
interface ComparisonDialogProps {
|
27 |
isOpen: boolean;
|
28 |
onClose: () => void;
|
|
|
33 |
const ComparisonDialog = ({ isOpen, onClose, currentResult, friendResult }: ComparisonDialogProps) => {
|
34 |
const t = useTranslation();
|
35 |
|
36 |
+
const convertToSentenceWords = (description: string): SentenceWord[] => {
|
37 |
+
return description.split(' ').map((word, index) => ({
|
38 |
+
word,
|
39 |
+
provider: index % 2 === 0 ? 'player' as const : 'ai' as const
|
40 |
+
}));
|
41 |
+
};
|
42 |
+
|
43 |
return (
|
44 |
<Dialog open={isOpen} onOpenChange={onClose}>
|
45 |
<DialogContent>
|
|
|
54 |
<h3 className="font-semibold mb-2">{t.game.review.yourDescription}</h3>
|
55 |
)}
|
56 |
<GuessDescription
|
57 |
+
sentence={currentResult?.description ? convertToSentenceWords(currentResult.description) : []}
|
|
|
58 |
/>
|
59 |
<p className="text-sm text-gray-600 mt-2">
|
60 |
{t.guess.aiGuessedDescription}: <span className="font-medium">{currentResult?.ai_guess}</span>
|
|
|
64 |
<div>
|
65 |
<h3 className="font-semibold mb-2">{t.game.review.friendDescription}</h3>
|
66 |
<GuessDescription
|
67 |
+
sentence={friendResult.description ? convertToSentenceWords(friendResult.description) : []}
|
|
|
68 |
/>
|
69 |
<p className="text-sm text-gray-600 mt-2">
|
70 |
{t.guess.aiGuessedDescription}: <span className="font-medium">{friendResult.ai_guess}</span>
|
src/components/game/GuessDisplay.tsx
CHANGED
@@ -19,6 +19,7 @@ interface GuessDisplayProps {
|
|
19 |
onGameReview: () => void;
|
20 |
onBack?: () => void;
|
21 |
normalizeWord: (word: string) => string;
|
|
|
22 |
}
|
23 |
|
24 |
export const GuessDisplay = ({
|
@@ -33,6 +34,7 @@ export const GuessDisplay = ({
|
|
33 |
onBack,
|
34 |
onGameReview,
|
35 |
normalizeWord,
|
|
|
36 |
}: GuessDisplayProps) => {
|
37 |
const [showConfirmDialog, setShowConfirmDialog] = useState(false);
|
38 |
const t = useTranslation();
|
@@ -73,12 +75,13 @@ export const GuessDisplay = ({
|
|
73 |
|
74 |
<WordDisplay currentWord={currentWord} />
|
75 |
|
76 |
-
<GuessDescription sentence={sentence}
|
77 |
|
78 |
<GuessResult
|
79 |
aiGuess={aiGuess}
|
80 |
isCorrect={isGuessCorrect()}
|
81 |
onNextRound={onNextRound}
|
|
|
82 |
/>
|
83 |
|
84 |
<ActionButtons
|
|
|
19 |
onGameReview: () => void;
|
20 |
onBack?: () => void;
|
21 |
normalizeWord: (word: string) => string;
|
22 |
+
aiModel?: string;
|
23 |
}
|
24 |
|
25 |
export const GuessDisplay = ({
|
|
|
34 |
onBack,
|
35 |
onGameReview,
|
36 |
normalizeWord,
|
37 |
+
aiModel,
|
38 |
}: GuessDisplayProps) => {
|
39 |
const [showConfirmDialog, setShowConfirmDialog] = useState(false);
|
40 |
const t = useTranslation();
|
|
|
75 |
|
76 |
<WordDisplay currentWord={currentWord} />
|
77 |
|
78 |
+
<GuessDescription sentence={sentence} model={aiModel} />
|
79 |
|
80 |
<GuessResult
|
81 |
aiGuess={aiGuess}
|
82 |
isCorrect={isGuessCorrect()}
|
83 |
onNextRound={onNextRound}
|
84 |
+
model={aiModel}
|
85 |
/>
|
86 |
|
87 |
<ActionButtons
|
src/components/game/ModelSelector.tsx
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { useState, useEffect, useRef } from "react";
|
2 |
+
import { Button } from "@/components/ui/button";
|
3 |
+
import { motion } from "framer-motion";
|
4 |
+
import { useTranslation } from "@/hooks/useTranslation";
|
5 |
+
import { useContext } from "react";
|
6 |
+
import { LanguageContext } from "@/contexts/LanguageContext";
|
7 |
+
import { ArrowLeft } from "lucide-react";
|
8 |
+
import { modelNames } from "@/lib/modelNames";
|
9 |
+
|
10 |
+
interface ModelSelectorProps {
|
11 |
+
onModelSelect: (model: string) => void;
|
12 |
+
onBack: () => void;
|
13 |
+
}
|
14 |
+
|
15 |
+
// TODO: Once user authentication is implemented, this will be replaced with a dynamic list
|
16 |
+
// based on the user's subscription level
|
17 |
+
const AVAILABLE_MODELS = [
|
18 |
+
"google/gemini-2.0-flash-lite-001",
|
19 |
+
// "x-ai/grok-2-1212",
|
20 |
+
"deepseek/deepseek-chat:free",
|
21 |
+
"meta-llama/llama-3.3-70b-instruct:free",
|
22 |
+
];
|
23 |
+
|
24 |
+
export const ModelSelector = ({ onModelSelect, onBack }: ModelSelectorProps) => {
|
25 |
+
const [selectedModel, setSelectedModel] = useState<string>(AVAILABLE_MODELS[0]);
|
26 |
+
const [isGenerating, setIsGenerating] = useState(false);
|
27 |
+
const t = useTranslation();
|
28 |
+
const { language } = useContext(LanguageContext);
|
29 |
+
|
30 |
+
const handleSubmit = async () => {
|
31 |
+
if (!selectedModel) return;
|
32 |
+
|
33 |
+
setIsGenerating(true);
|
34 |
+
try {
|
35 |
+
await onModelSelect(selectedModel);
|
36 |
+
} finally {
|
37 |
+
setIsGenerating(false);
|
38 |
+
}
|
39 |
+
};
|
40 |
+
|
41 |
+
useEffect(() => {
|
42 |
+
const handleKeyPress = (e: KeyboardEvent) => {
|
43 |
+
if (e.target instanceof HTMLInputElement) return;
|
44 |
+
|
45 |
+
// Allow backspace to go back
|
46 |
+
if (e.key === 'backspace') {
|
47 |
+
e.preventDefault();
|
48 |
+
onBack();
|
49 |
+
}
|
50 |
+
|
51 |
+
// Allow enter to submit if a model is selected
|
52 |
+
if (e.key === 'enter' && selectedModel) {
|
53 |
+
handleSubmit();
|
54 |
+
}
|
55 |
+
|
56 |
+
// Model selection shortcuts
|
57 |
+
switch(e.key.toLowerCase()) {
|
58 |
+
case 'a':
|
59 |
+
setSelectedModel(AVAILABLE_MODELS[0]);
|
60 |
+
break;
|
61 |
+
case 'b':
|
62 |
+
setSelectedModel(AVAILABLE_MODELS[1]);
|
63 |
+
break;
|
64 |
+
case 'c':
|
65 |
+
setSelectedModel(AVAILABLE_MODELS[2]);
|
66 |
+
break;
|
67 |
+
case 'enter':
|
68 |
+
if (selectedModel) {
|
69 |
+
handleSubmit();
|
70 |
+
}
|
71 |
+
break;
|
72 |
+
}
|
73 |
+
};
|
74 |
+
|
75 |
+
window.addEventListener('keydown', handleKeyPress);
|
76 |
+
return () => window.removeEventListener('keydown', handleKeyPress);
|
77 |
+
}, [selectedModel, onBack, handleSubmit]);
|
78 |
+
|
79 |
+
return (
|
80 |
+
<motion.div
|
81 |
+
initial={{ opacity: 0 }}
|
82 |
+
animate={{ opacity: 1 }}
|
83 |
+
className="space-y-6"
|
84 |
+
>
|
85 |
+
<div className="flex items-center justify-between mb-4">
|
86 |
+
<Button
|
87 |
+
variant="ghost"
|
88 |
+
size="icon"
|
89 |
+
onClick={onBack}
|
90 |
+
className="hover:bg-gray-100"
|
91 |
+
>
|
92 |
+
<ArrowLeft className="h-4 w-4" />
|
93 |
+
</Button>
|
94 |
+
<h2 className="text-2xl font-bold text-gray-900">{t.models.title}</h2>
|
95 |
+
<div className="w-8" /> {/* Spacer for centering */}
|
96 |
+
</div>
|
97 |
+
|
98 |
+
<p className="text-gray-600 text-center">{t.models.subtitle}</p>
|
99 |
+
|
100 |
+
<div className="space-y-4">
|
101 |
+
{AVAILABLE_MODELS.map((modelId, index) => (
|
102 |
+
<Button
|
103 |
+
key={modelId}
|
104 |
+
variant={selectedModel === modelId ? "default" : "outline"}
|
105 |
+
className="w-full justify-between"
|
106 |
+
onClick={() => setSelectedModel(modelId)}
|
107 |
+
>
|
108 |
+
{modelNames[modelId]} <span className="text-sm opacity-50">{t.themes.pressKey} {String.fromCharCode(65 + index)}</span>
|
109 |
+
</Button>
|
110 |
+
))}
|
111 |
+
</div>
|
112 |
+
|
113 |
+
<Button
|
114 |
+
onClick={handleSubmit}
|
115 |
+
className="w-full"
|
116 |
+
disabled={!selectedModel || isGenerating}
|
117 |
+
>
|
118 |
+
{isGenerating ? t.models.generating : `${t.models.continue} ⏎`}
|
119 |
+
</Button>
|
120 |
+
</motion.div>
|
121 |
+
);
|
122 |
+
};
|
src/components/game/SentenceBuilder.tsx
CHANGED
@@ -17,10 +17,15 @@ import { SentenceDisplay } from "./sentence-builder/SentenceDisplay";
|
|
17 |
import { InputForm } from "./sentence-builder/InputForm";
|
18 |
import { Button } from "@/components/ui/button";
|
19 |
|
|
|
|
|
|
|
|
|
|
|
20 |
interface SentenceBuilderProps {
|
21 |
currentWord: string;
|
22 |
successfulRounds: number;
|
23 |
-
sentence:
|
24 |
playerInput: string;
|
25 |
isAiThinking: boolean;
|
26 |
onInputChange: (value: string) => void;
|
|
|
17 |
import { InputForm } from "./sentence-builder/InputForm";
|
18 |
import { Button } from "@/components/ui/button";
|
19 |
|
20 |
+
interface SentenceWord {
|
21 |
+
word: string;
|
22 |
+
provider: 'player' | 'ai';
|
23 |
+
}
|
24 |
+
|
25 |
interface SentenceBuilderProps {
|
26 |
currentWord: string;
|
27 |
successfulRounds: number;
|
28 |
+
sentence: SentenceWord[];
|
29 |
playerInput: string;
|
30 |
isAiThinking: boolean;
|
31 |
onInputChange: (value: string) => void;
|
src/components/game/WelcomeScreen.tsx
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
|
2 |
import { motion } from "framer-motion";
|
3 |
-
import { useState } from "react";
|
4 |
import { HighScoreBoard } from "../HighScoreBoard";
|
5 |
import { Dialog, DialogContent } from "@/components/ui/dialog";
|
6 |
import { LanguageSelector } from "./LanguageSelector";
|
@@ -25,6 +24,17 @@ export const WelcomeScreen = ({ onStartDaily, onStartNew }: WelcomeScreenProps)
|
|
25 |
const [showStats, setShowStats] = useState(false);
|
26 |
const t = useTranslation();
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
return (
|
29 |
<>
|
30 |
<motion.div
|
|
|
|
|
1 |
import { motion } from "framer-motion";
|
2 |
+
import { useState, useEffect } from "react";
|
3 |
import { HighScoreBoard } from "../HighScoreBoard";
|
4 |
import { Dialog, DialogContent } from "@/components/ui/dialog";
|
5 |
import { LanguageSelector } from "./LanguageSelector";
|
|
|
24 |
const [showStats, setShowStats] = useState(false);
|
25 |
const t = useTranslation();
|
26 |
|
27 |
+
useEffect(() => {
|
28 |
+
const handleKeyPress = (event: KeyboardEvent) => {
|
29 |
+
if (event.key === 'Enter') {
|
30 |
+
onStartDaily();
|
31 |
+
}
|
32 |
+
};
|
33 |
+
|
34 |
+
window.addEventListener('keydown', handleKeyPress);
|
35 |
+
return () => window.removeEventListener('keydown', handleKeyPress);
|
36 |
+
}, [onStartDaily]);
|
37 |
+
|
38 |
return (
|
39 |
<>
|
40 |
<motion.div
|
src/components/game/guess-display/GuessDescription.tsx
CHANGED
@@ -1,19 +1,41 @@
|
|
1 |
import { useTranslation } from "@/hooks/useTranslation";
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
interface GuessDescriptionProps {
|
4 |
-
sentence:
|
5 |
-
|
6 |
}
|
7 |
|
8 |
-
export const GuessDescription = ({ sentence,
|
9 |
const t = useTranslation();
|
10 |
|
11 |
return (
|
12 |
<div className="space-y-2">
|
13 |
-
<p className="text-sm text-gray-600">
|
|
|
|
|
|
|
|
|
|
|
14 |
<div className="rounded-lg bg-gray-50">
|
15 |
-
<p className="p-4 text-2xl tracking-wider text-gray-800">
|
16 |
-
{sentence.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
</p>
|
18 |
</div>
|
19 |
</div>
|
|
|
1 |
import { useTranslation } from "@/hooks/useTranslation";
|
2 |
+
import { getModelDisplayName } from "@/lib/modelNames";
|
3 |
+
|
4 |
+
interface SentenceWord {
|
5 |
+
word: string;
|
6 |
+
provider: 'player' | 'ai';
|
7 |
+
}
|
8 |
|
9 |
interface GuessDescriptionProps {
|
10 |
+
sentence: SentenceWord[];
|
11 |
+
model?: string;
|
12 |
}
|
13 |
|
14 |
+
export const GuessDescription = ({ sentence, model }: GuessDescriptionProps) => {
|
15 |
const t = useTranslation();
|
16 |
|
17 |
return (
|
18 |
<div className="space-y-2">
|
19 |
+
<p className="text-sm text-gray-600">
|
20 |
+
<span className="inline-block border-b-2 border-blue-500">{t.guess.you}</span>
|
21 |
+
{" "}{t.guess.and}{" "}
|
22 |
+
<span className="inline-block border-b-2 border-green-500">{model ? getModelDisplayName(model) : t.guess.aiModel}</span>
|
23 |
+
{" "}{t.guess.providedDescription}
|
24 |
+
</p>
|
25 |
<div className="rounded-lg bg-gray-50">
|
26 |
+
<p className="p-4 text-2xl tracking-wider text-gray-800 flex flex-wrap gap-1">
|
27 |
+
{sentence.map((wordObj, index) => (
|
28 |
+
<span
|
29 |
+
key={index}
|
30 |
+
className={`inline-block ${
|
31 |
+
wordObj.provider === 'player'
|
32 |
+
? "border-b-2 border-blue-500" // Player words
|
33 |
+
: "border-b-2 border-green-500" // AI words
|
34 |
+
}`}
|
35 |
+
>
|
36 |
+
{wordObj.word}
|
37 |
+
</span>
|
38 |
+
))}
|
39 |
</p>
|
40 |
</div>
|
41 |
</div>
|
src/components/game/guess-display/GuessResult.tsx
CHANGED
@@ -1,19 +1,22 @@
|
|
1 |
import { useTranslation } from "@/hooks/useTranslation";
|
2 |
-
import {
|
3 |
|
4 |
interface GuessResultProps {
|
5 |
aiGuess: string;
|
6 |
isCorrect: boolean;
|
7 |
onNextRound: () => void;
|
|
|
8 |
}
|
9 |
|
10 |
-
export const GuessResult = ({ aiGuess, isCorrect, onNextRound }: GuessResultProps) => {
|
11 |
const t = useTranslation();
|
12 |
|
13 |
return (
|
14 |
<div className="space-y-4">
|
15 |
<p className="text-sm text-gray-600">
|
16 |
-
{t.guess.aiGuessedDescription}
|
|
|
|
|
17 |
</p>
|
18 |
<div className={`rounded-lg ${isCorrect ? 'bg-green-50' : 'bg-red-50'}`}>
|
19 |
<p className={`p-4 text-2xl font-bold tracking-wider ${isCorrect ? 'text-green-600' : 'text-red-600'}`}>
|
|
|
1 |
import { useTranslation } from "@/hooks/useTranslation";
|
2 |
+
import { getModelDisplayName } from "@/lib/modelNames";
|
3 |
|
4 |
interface GuessResultProps {
|
5 |
aiGuess: string;
|
6 |
isCorrect: boolean;
|
7 |
onNextRound: () => void;
|
8 |
+
model?: string;
|
9 |
}
|
10 |
|
11 |
+
export const GuessResult = ({ aiGuess, isCorrect, onNextRound, model }: GuessResultProps) => {
|
12 |
const t = useTranslation();
|
13 |
|
14 |
return (
|
15 |
<div className="space-y-4">
|
16 |
<p className="text-sm text-gray-600">
|
17 |
+
{t.guess.aiGuessedDescription.prefix}{" "}
|
18 |
+
{model ? getModelDisplayName(model) : t.guess.aiGuessedDescription.aiName}{" "}
|
19 |
+
{t.guess.aiGuessedDescription.suffix}
|
20 |
</p>
|
21 |
<div className={`rounded-lg ${isCorrect ? 'bg-green-50' : 'bg-red-50'}`}>
|
22 |
<p className={`p-4 text-2xl font-bold tracking-wider ${isCorrect ? 'text-green-600' : 'text-red-600'}`}>
|
src/components/game/sentence-builder/InputForm.tsx
CHANGED
@@ -3,6 +3,11 @@ import { Input } from "@/components/ui/input";
|
|
3 |
import { Button } from "@/components/ui/button";
|
4 |
import { useTranslation } from "@/hooks/useTranslation";
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
interface InputFormProps {
|
7 |
playerInput: string;
|
8 |
onInputChange: (value: string) => void;
|
@@ -13,7 +18,7 @@ interface InputFormProps {
|
|
13 |
containsTargetWord: boolean;
|
14 |
isTooLong: boolean;
|
15 |
isValidInput: boolean;
|
16 |
-
sentence:
|
17 |
}
|
18 |
|
19 |
export const InputForm = ({
|
|
|
3 |
import { Button } from "@/components/ui/button";
|
4 |
import { useTranslation } from "@/hooks/useTranslation";
|
5 |
|
6 |
+
interface SentenceWord {
|
7 |
+
word: string;
|
8 |
+
provider: 'player' | 'ai';
|
9 |
+
}
|
10 |
+
|
11 |
interface InputFormProps {
|
12 |
playerInput: string;
|
13 |
onInputChange: (value: string) => void;
|
|
|
18 |
containsTargetWord: boolean;
|
19 |
isTooLong: boolean;
|
20 |
isValidInput: boolean;
|
21 |
+
sentence: SentenceWord[];
|
22 |
}
|
23 |
|
24 |
export const InputForm = ({
|
src/components/game/sentence-builder/SentenceDisplay.tsx
CHANGED
@@ -1,7 +1,12 @@
|
|
1 |
import { motion } from "framer-motion";
|
2 |
|
|
|
|
|
|
|
|
|
|
|
3 |
interface SentenceDisplayProps {
|
4 |
-
sentence:
|
5 |
}
|
6 |
|
7 |
export const SentenceDisplay = ({ sentence }: SentenceDisplayProps) => {
|
@@ -14,7 +19,7 @@ export const SentenceDisplay = ({ sentence }: SentenceDisplayProps) => {
|
|
14 |
className="mb-4 text-left p-3 rounded-lg bg-gray-50"
|
15 |
>
|
16 |
<p className="text-gray-700">
|
17 |
-
{sentence.join(" ")}
|
18 |
</p>
|
19 |
</motion.div>
|
20 |
);
|
|
|
1 |
import { motion } from "framer-motion";
|
2 |
|
3 |
+
interface SentenceWord {
|
4 |
+
word: string;
|
5 |
+
provider: 'player' | 'ai';
|
6 |
+
}
|
7 |
+
|
8 |
interface SentenceDisplayProps {
|
9 |
+
sentence: SentenceWord[];
|
10 |
}
|
11 |
|
12 |
export const SentenceDisplay = ({ sentence }: SentenceDisplayProps) => {
|
|
|
19 |
className="mb-4 text-left p-3 rounded-lg bg-gray-50"
|
20 |
>
|
21 |
<p className="text-gray-700">
|
22 |
+
{sentence.map(w => w.word).join(" ")}
|
23 |
</p>
|
24 |
</motion.div>
|
25 |
);
|
src/i18n/translations/de.ts
CHANGED
@@ -99,14 +99,21 @@ export const de = {
|
|
99 |
guess: {
|
100 |
title: "KI-Vermutung",
|
101 |
goalDescription: "Dein Ziel war es folgendes Wort zu beschreiben",
|
102 |
-
providedDescription: "
|
103 |
-
aiGuessedDescription:
|
|
|
|
|
|
|
|
|
104 |
correct: "Das ist richtig!",
|
105 |
incorrect: "Das ist falsch.",
|
106 |
nextRound: "Nächste Runde",
|
107 |
playAgain: "Erneut spielen",
|
108 |
viewLeaderboard: "In Bestenliste eintragen",
|
109 |
-
cheatingDetected: "Betrugsversuch erkannt!"
|
|
|
|
|
|
|
110 |
},
|
111 |
themes: {
|
112 |
title: "Wähle ein Thema",
|
@@ -180,5 +187,11 @@ export const de = {
|
|
180 |
"Die KI wird nach jedem Satz versuchen, dein Wort zu erraten"
|
181 |
]
|
182 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
}
|
184 |
};
|
|
|
99 |
guess: {
|
100 |
title: "KI-Vermutung",
|
101 |
goalDescription: "Dein Ziel war es folgendes Wort zu beschreiben",
|
102 |
+
providedDescription: "beschriebt das Wort mit",
|
103 |
+
aiGuessedDescription: {
|
104 |
+
prefix: "Basierend auf dieser Beschreibung",
|
105 |
+
aiName: "die KI",
|
106 |
+
suffix: "hat geraten"
|
107 |
+
},
|
108 |
correct: "Das ist richtig!",
|
109 |
incorrect: "Das ist falsch.",
|
110 |
nextRound: "Nächste Runde",
|
111 |
playAgain: "Erneut spielen",
|
112 |
viewLeaderboard: "In Bestenliste eintragen",
|
113 |
+
cheatingDetected: "Betrugsversuch erkannt!",
|
114 |
+
you: "Du",
|
115 |
+
and: "und",
|
116 |
+
aiModel: "KI-Modell"
|
117 |
},
|
118 |
themes: {
|
119 |
title: "Wähle ein Thema",
|
|
|
187 |
"Die KI wird nach jedem Satz versuchen, dein Wort zu erraten"
|
188 |
]
|
189 |
}
|
190 |
+
},
|
191 |
+
models: {
|
192 |
+
title: "KI-Modell wählen",
|
193 |
+
subtitle: "Wähle das KI-Modell, das mit dir zusammen spielen wird",
|
194 |
+
continue: "Weiter",
|
195 |
+
generating: "Wird generiert..."
|
196 |
}
|
197 |
};
|
src/i18n/translations/en.ts
CHANGED
@@ -98,14 +98,21 @@ export const en = {
|
|
98 |
guess: {
|
99 |
title: "AI's Guess",
|
100 |
goalDescription: "Your goal was to describe the word",
|
101 |
-
providedDescription: "
|
102 |
-
aiGuessedDescription:
|
|
|
|
|
|
|
|
|
103 |
correct: "This is right!",
|
104 |
incorrect: "This is wrong.",
|
105 |
nextRound: "Next Round",
|
106 |
playAgain: "Play Again",
|
107 |
viewLeaderboard: "Save your score",
|
108 |
cheatingDetected: "Cheating detected!",
|
|
|
|
|
|
|
109 |
},
|
110 |
themes: {
|
111 |
title: "Choose a Theme",
|
@@ -179,5 +186,11 @@ export const en = {
|
|
179 |
"The AI will try to guess your word after each sentence"
|
180 |
]
|
181 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
}
|
183 |
};
|
|
|
98 |
guess: {
|
99 |
title: "AI's Guess",
|
100 |
goalDescription: "Your goal was to describe the word",
|
101 |
+
providedDescription: "provided the description",
|
102 |
+
aiGuessedDescription: {
|
103 |
+
prefix: "Based on this description,",
|
104 |
+
aiName: "the AI",
|
105 |
+
suffix: "guessed"
|
106 |
+
},
|
107 |
correct: "This is right!",
|
108 |
incorrect: "This is wrong.",
|
109 |
nextRound: "Next Round",
|
110 |
playAgain: "Play Again",
|
111 |
viewLeaderboard: "Save your score",
|
112 |
cheatingDetected: "Cheating detected!",
|
113 |
+
you: "You",
|
114 |
+
and: "and",
|
115 |
+
aiModel: "AI Model"
|
116 |
},
|
117 |
themes: {
|
118 |
title: "Choose a Theme",
|
|
|
186 |
"The AI will try to guess your word after each sentence"
|
187 |
]
|
188 |
}
|
189 |
+
},
|
190 |
+
models: {
|
191 |
+
title: "Choose an AI Model",
|
192 |
+
subtitle: "Select the AI model that will play together with you",
|
193 |
+
continue: "Continue",
|
194 |
+
generating: "Generating..."
|
195 |
}
|
196 |
};
|
src/i18n/translations/es.ts
CHANGED
@@ -99,14 +99,21 @@ export const es = {
|
|
99 |
guess: {
|
100 |
title: "Suposición de la IA",
|
101 |
goalDescription: "Tu objetivo era describir la palabra",
|
102 |
-
providedDescription: "
|
103 |
-
aiGuessedDescription:
|
|
|
|
|
|
|
|
|
104 |
correct: "¡Esto es correcto!",
|
105 |
incorrect: "Esto es incorrecto.",
|
106 |
nextRound: "Siguiente Ronda",
|
107 |
playAgain: "Jugar de Nuevo",
|
108 |
viewLeaderboard: "Ver Clasificación",
|
109 |
-
cheatingDetected: "¡Trampa detectada!"
|
|
|
|
|
|
|
110 |
},
|
111 |
themes: {
|
112 |
title: "Elige un Tema",
|
|
|
99 |
guess: {
|
100 |
title: "Suposición de la IA",
|
101 |
goalDescription: "Tu objetivo era describir la palabra",
|
102 |
+
providedDescription: "proporcionaron la descripción",
|
103 |
+
aiGuessedDescription: {
|
104 |
+
prefix: "Basándose en esta descripción,",
|
105 |
+
aiName: "la IA",
|
106 |
+
suffix: "adivinó"
|
107 |
+
},
|
108 |
correct: "¡Esto es correcto!",
|
109 |
incorrect: "Esto es incorrecto.",
|
110 |
nextRound: "Siguiente Ronda",
|
111 |
playAgain: "Jugar de Nuevo",
|
112 |
viewLeaderboard: "Ver Clasificación",
|
113 |
+
cheatingDetected: "¡Trampa detectada!",
|
114 |
+
you: "Tú",
|
115 |
+
and: "y",
|
116 |
+
aiModel: "Modelo de IA"
|
117 |
},
|
118 |
themes: {
|
119 |
title: "Elige un Tema",
|
src/i18n/translations/fr.ts
CHANGED
@@ -98,14 +98,17 @@ export const fr = {
|
|
98 |
guess: {
|
99 |
title: "Devinette de l'IA",
|
100 |
goalDescription: "Votre objectif était de décrire le mot",
|
101 |
-
providedDescription: "
|
102 |
aiGuessedDescription: "Sur la base de cette description, l'IA a deviné",
|
103 |
correct: "C'est correct !",
|
104 |
incorrect: "C'est incorrect.",
|
105 |
nextRound: "Tour Suivant",
|
106 |
playAgain: "Rejouer",
|
107 |
viewLeaderboard: "Voir les Scores",
|
108 |
-
cheatingDetected: "Tentative de triche détectée !"
|
|
|
|
|
|
|
109 |
},
|
110 |
themes: {
|
111 |
title: "Choisissez un Thème",
|
@@ -179,5 +182,11 @@ export const fr = {
|
|
179 |
"L'IA essaiera de deviner votre mot après chaque phrase"
|
180 |
]
|
181 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
}
|
183 |
};
|
|
|
98 |
guess: {
|
99 |
title: "Devinette de l'IA",
|
100 |
goalDescription: "Votre objectif était de décrire le mot",
|
101 |
+
providedDescription: "ont fourni la description",
|
102 |
aiGuessedDescription: "Sur la base de cette description, l'IA a deviné",
|
103 |
correct: "C'est correct !",
|
104 |
incorrect: "C'est incorrect.",
|
105 |
nextRound: "Tour Suivant",
|
106 |
playAgain: "Rejouer",
|
107 |
viewLeaderboard: "Voir les Scores",
|
108 |
+
cheatingDetected: "Tentative de triche détectée !",
|
109 |
+
you: "Vous",
|
110 |
+
and: "et",
|
111 |
+
aiModel: "Modèle IA"
|
112 |
},
|
113 |
themes: {
|
114 |
title: "Choisissez un Thème",
|
|
|
182 |
"L'IA essaiera de deviner votre mot après chaque phrase"
|
183 |
]
|
184 |
}
|
185 |
+
},
|
186 |
+
models: {
|
187 |
+
title: "Choisissez un Modèle IA",
|
188 |
+
subtitle: "Sélectionnez le modèle IA qui jouera avec vous",
|
189 |
+
continue: "Continuer",
|
190 |
+
generating: "Génération en cours..."
|
191 |
}
|
192 |
};
|
src/i18n/translations/it.ts
CHANGED
@@ -100,14 +100,21 @@ export const it = {
|
|
100 |
sentence: "La tua frase",
|
101 |
aiGuessed: "L'IA ha indovinato",
|
102 |
goalDescription: "Il tuo obiettivo era descrivere la parola",
|
103 |
-
providedDescription: "
|
104 |
-
aiGuessedDescription:
|
|
|
|
|
|
|
|
|
105 |
correct: "Corretto! L'IA ha indovinato la parola!",
|
106 |
incorrect: "Sbagliato. Riprova!",
|
107 |
nextRound: "Prossimo Turno",
|
108 |
playAgain: "Gioca Ancora",
|
109 |
viewLeaderboard: "Vedi Classifica",
|
110 |
-
cheatingDetected: "Tentativo di imbroglio rilevato!"
|
|
|
|
|
|
|
111 |
},
|
112 |
themes: {
|
113 |
title: "Scegli un Tema",
|
@@ -181,5 +188,11 @@ export const it = {
|
|
181 |
"L'IA cercherà di indovinare la tua parola dopo ogni frase"
|
182 |
]
|
183 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
}
|
185 |
};
|
|
|
100 |
sentence: "La tua frase",
|
101 |
aiGuessed: "L'IA ha indovinato",
|
102 |
goalDescription: "Il tuo obiettivo era descrivere la parola",
|
103 |
+
providedDescription: "hanno fornito la descrizione",
|
104 |
+
aiGuessedDescription: {
|
105 |
+
prefix: "Basandosi su questa descrizione,",
|
106 |
+
aiName: "l'IA",
|
107 |
+
suffix: "ha indovinato"
|
108 |
+
},
|
109 |
correct: "Corretto! L'IA ha indovinato la parola!",
|
110 |
incorrect: "Sbagliato. Riprova!",
|
111 |
nextRound: "Prossimo Turno",
|
112 |
playAgain: "Gioca Ancora",
|
113 |
viewLeaderboard: "Vedi Classifica",
|
114 |
+
cheatingDetected: "Tentativo di imbroglio rilevato!",
|
115 |
+
you: "Tu",
|
116 |
+
and: "e",
|
117 |
+
aiModel: "Modello IA"
|
118 |
},
|
119 |
themes: {
|
120 |
title: "Scegli un Tema",
|
|
|
188 |
"L'IA cercherà di indovinare la tua parola dopo ogni frase"
|
189 |
]
|
190 |
}
|
191 |
+
},
|
192 |
+
models: {
|
193 |
+
title: "Scegli un Modello IA",
|
194 |
+
subtitle: "Seleziona il modello IA che giocherà insieme a te",
|
195 |
+
continue: "Continua",
|
196 |
+
generating: "Generazione in corso..."
|
197 |
}
|
198 |
};
|
src/i18n/translations/pt.ts
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
export const pt = {
|
3 |
game: {
|
4 |
title: "Think in Sync",
|
@@ -100,14 +99,21 @@ export const pt = {
|
|
100 |
guess: {
|
101 |
title: "Suposição da IA",
|
102 |
goalDescription: "Seu objetivo era descrever a palavra",
|
103 |
-
providedDescription: "
|
104 |
-
aiGuessedDescription:
|
|
|
|
|
|
|
|
|
105 |
correct: "Isso está correto!",
|
106 |
incorrect: "Isso está incorreto.",
|
107 |
nextRound: "Próxima Rodada",
|
108 |
playAgain: "Jogar Novamente",
|
109 |
viewLeaderboard: "Ver Placar",
|
110 |
-
cheatingDetected: "Trapaça detectada!"
|
|
|
|
|
|
|
111 |
},
|
112 |
themes: {
|
113 |
title: "Escolha um Tema",
|
@@ -181,5 +187,11 @@ export const pt = {
|
|
181 |
"A IA tentará adivinhar sua palavra após cada frase"
|
182 |
]
|
183 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
}
|
185 |
};
|
|
|
|
|
1 |
export const pt = {
|
2 |
game: {
|
3 |
title: "Think in Sync",
|
|
|
99 |
guess: {
|
100 |
title: "Suposição da IA",
|
101 |
goalDescription: "Seu objetivo era descrever a palavra",
|
102 |
+
providedDescription: "forneceram a descrição",
|
103 |
+
aiGuessedDescription: {
|
104 |
+
prefix: "Com base nesta descrição,",
|
105 |
+
aiName: "a IA",
|
106 |
+
suffix: "adivinhou"
|
107 |
+
},
|
108 |
correct: "Isso está correto!",
|
109 |
incorrect: "Isso está incorreto.",
|
110 |
nextRound: "Próxima Rodada",
|
111 |
playAgain: "Jogar Novamente",
|
112 |
viewLeaderboard: "Ver Placar",
|
113 |
+
cheatingDetected: "Trapaça detectada!",
|
114 |
+
you: "Você",
|
115 |
+
and: "e",
|
116 |
+
aiModel: "Modelo de IA"
|
117 |
},
|
118 |
themes: {
|
119 |
title: "Escolha um Tema",
|
|
|
187 |
"A IA tentará adivinhar sua palavra após cada frase"
|
188 |
]
|
189 |
}
|
190 |
+
},
|
191 |
+
models: {
|
192 |
+
title: "Escolha um Modelo de IA",
|
193 |
+
subtitle: "Selecione o modelo de IA que jogará junto com você",
|
194 |
+
continue: "Continuar",
|
195 |
+
generating: "Gerando..."
|
196 |
}
|
197 |
};
|
src/index.css
CHANGED
@@ -37,7 +37,12 @@
|
|
37 |
}
|
38 |
|
39 |
@layer base {
|
40 |
-
body {
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
42 |
}
|
43 |
}
|
|
|
37 |
}
|
38 |
|
39 |
@layer base {
|
40 |
+
html, body {
|
41 |
+
height: 100%;
|
42 |
+
background: linear-gradient(to bottom right, rgb(249, 250, 251), rgb(243, 244, 246));
|
43 |
+
}
|
44 |
+
|
45 |
+
#root {
|
46 |
+
height: 100%;
|
47 |
}
|
48 |
}
|
src/lib/modelNames.ts
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export const modelNames: Record<string, string> = {
|
2 |
+
"allenai/molmo-7b-d:free": "AllenAI Molmo 7B D",
|
3 |
+
"bytedance-research/ui-tars-72b:free": "Bytedance UI-TARS 72B",
|
4 |
+
"qwen/qwen2.5-vl-3b-instruct:free": "Qwen Qwen2.5 VL 3B Instruct",
|
5 |
+
"google/gemini-2.5-pro-exp-03-25:free": "Google Gemini Pro 2.5 Experimental",
|
6 |
+
"qwen/qwen2.5-vl-32b-instruct:free": "Qwen Qwen2.5 VL 32B Instruct",
|
7 |
+
"deepseek/deepseek-chat-v3-0324:free": "DeepSeek V3 0324",
|
8 |
+
"deepseek/deepseek-chat-v3-0324": "DeepSeek V3 0324",
|
9 |
+
"featherless/qwerky-72b:free": "Qwerky 72b",
|
10 |
+
"openai/o1-pro": "OpenAI o1-pro",
|
11 |
+
"mistralai/mistral-small-3.1-24b-instruct:free": "Mistral Small 3.1 24B",
|
12 |
+
"mistralai/mistral-small-3.1-24b-instruct": "Mistral Small 3.1 24B",
|
13 |
+
"open-r1/olympiccoder-7b:free": "OlympicCoder 7B",
|
14 |
+
"open-r1/olympiccoder-32b:free": "OlympicCoder 32B",
|
15 |
+
"steelskull/l3.3-electra-r1-70b": "SteelSkull L3.3 Electra R1 70B",
|
16 |
+
"allenai/olmo-2-0325-32b-instruct": "AllenAI Olmo 2 32B Instruct",
|
17 |
+
"google/gemma-3-1b-it:free": "Google Gemma 3 1B",
|
18 |
+
"google/gemma-3-4b-it:free": "Google Gemma 3 4B",
|
19 |
+
"ai21/jamba-1.6-large": "AI21 Jamba 1.6 Large",
|
20 |
+
"ai21/jamba-1.6-mini": "AI21 Jamba Mini 1.6",
|
21 |
+
"google/gemma-3-12b-it:free": "Google Gemma 3 12B",
|
22 |
+
"cohere/command-a": "Cohere Command A",
|
23 |
+
"openai/gpt-4o-mini-search-preview": "OpenAI GPT-4o-mini Search Preview",
|
24 |
+
"openai/gpt-4o-search-preview": "OpenAI GPT-4o Search Preview",
|
25 |
+
"tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3": "Swallow Llama 3.1 Swallow 70B Instruct V0.3",
|
26 |
+
"rekaai/reka-flash-3:free": "Reka Flash 3",
|
27 |
+
"google/gemma-3-27b-it:free": "Google Gemma 3 27B",
|
28 |
+
"google/gemma-3-27b-it": "Google Gemma 3 27B",
|
29 |
+
"thedrummer/anubis-pro-105b-v1": "TheDrummer Anubis Pro 105B V1",
|
30 |
+
"latitudegames/wayfarer-large-70b-llama-3.3": "LatitudeGames Wayfarer Large 70B Llama 3.3",
|
31 |
+
"thedrummer/skyfall-36b-v2": "TheDrummer Skyfall 36B V2",
|
32 |
+
"microsoft/phi-4-multimodal-instruct": "Microsoft Phi 4 Multimodal Instruct",
|
33 |
+
"perplexity/sonar-reasoning-pro": "Perplexity Sonar Reasoning Pro",
|
34 |
+
"perplexity/sonar-pro": "Perplexity Sonar Pro",
|
35 |
+
"perplexity/sonar-deep-research": "Perplexity Sonar Deep Research",
|
36 |
+
"deepseek/deepseek-r1-zero:free": "DeepSeek R1 Zero",
|
37 |
+
"qwen/qwq-32b:free": "Qwen QwQ 32B",
|
38 |
+
"qwen/qwq-32b": "Qwen QwQ 32B",
|
39 |
+
"qwen/qwen2.5-32b-instruct": "Qwen Qwen2.5 32B Instruct",
|
40 |
+
"moonshotai/moonlight-16b-a3b-instruct:free": "Moonshot AI Moonlight 16B A3B Instruct",
|
41 |
+
"nousresearch/deephermes-3-llama-3-8b-preview:free": "Nous DeepHermes 3 Llama 3 8B Preview",
|
42 |
+
"openai/gpt-4.5-preview": "OpenAI GPT-4.5 (Preview)",
|
43 |
+
"google/gemini-2.0-flash-lite-001": "Google Gemini 2.0 Flash Lite",
|
44 |
+
"anthropic/claude-3.7-sonnet:beta": "Anthropic Claude 3.7 Sonnet (self-moderated)",
|
45 |
+
"anthropic/claude-3.7-sonnet": "Anthropic Claude 3.7 Sonnet",
|
46 |
+
"anthropic/claude-3.7-sonnet:thinking": "Anthropic Claude 3.7 Sonnet (thinking)",
|
47 |
+
"perplexity/r1-1776": "Perplexity R1 1776",
|
48 |
+
"mistralai/mistral-saba": "Mistral Saba",
|
49 |
+
"cognitivecomputations/dolphin3.0-r1-mistral-24b:free": "Dolphin3.0 R1 Mistral 24B",
|
50 |
+
"cognitivecomputations/dolphin3.0-mistral-24b:free": "Dolphin3.0 Mistral 24B",
|
51 |
+
"meta-llama/llama-guard-3-8b": "Llama Guard 3 8B",
|
52 |
+
"openai/o3-mini-high": "OpenAI o3 Mini High",
|
53 |
+
"allenai/llama-3.1-tulu-3-405b": "Llama 3.1 Tulu 3 405B",
|
54 |
+
"deepseek/deepseek-r1-distill-llama-8b": "DeepSeek R1 Distill Llama 8B",
|
55 |
+
"google/gemini-2.0-flash-001": "Google Gemini Flash 2.0",
|
56 |
+
"google/gemini-2.0-flash-lite-preview-02-05:free": "Google Gemini Flash Lite 2.0 Preview",
|
57 |
+
"google/gemini-2.0-pro-exp-02-05:free": "Google Gemini Pro 2.0 Experimental",
|
58 |
+
"qwen/qwen-vl-plus": "Qwen VL Plus",
|
59 |
+
"aion-labs/aion-1.0": "AionLabs Aion-1.0",
|
60 |
+
"aion-labs/aion-1.0-mini": "AionLabs Aion-1.0-Mini",
|
61 |
+
"aion-labs/aion-rp-llama-3.1-8b": "AionLabs Aion-RP 1.0 (8B)",
|
62 |
+
"qwen/qwen-vl-max": "Qwen VL Max",
|
63 |
+
"qwen/qwen-turbo": "Qwen Qwen-Turbo",
|
64 |
+
"qwen/qwen2.5-vl-72b-instruct:free": "Qwen Qwen2.5 VL 72B Instruct",
|
65 |
+
"qwen/qwen2.5-vl-72b-instruct": "Qwen Qwen2.5 VL 72B Instruct",
|
66 |
+
"qwen/qwen-plus": "Qwen Qwen-Plus",
|
67 |
+
"qwen/qwen-max": "Qwen Qwen-Max",
|
68 |
+
"openai/o3-mini": "OpenAI o3 Mini",
|
69 |
+
"deepseek/deepseek-r1-distill-qwen-1.5b": "DeepSeek R1 Distill Qwen 1.5B",
|
70 |
+
"mistralai/mistral-small-24b-instruct-2501:free": "Mistral Small 3",
|
71 |
+
"mistralai/mistral-small-24b-instruct-2501": "Mistral Small 3",
|
72 |
+
"deepseek/deepseek-r1-distill-qwen-32b:free": "DeepSeek R1 Distill Qwen 32B",
|
73 |
+
"deepseek/deepseek-r1-distill-qwen-32b": "DeepSeek R1 Distill Qwen 32B",
|
74 |
+
"deepseek/deepseek-r1-distill-qwen-14b:free": "DeepSeek R1 Distill Qwen 14B",
|
75 |
+
"deepseek/deepseek-r1-distill-qwen-14b": "DeepSeek R1 Distill Qwen 14B",
|
76 |
+
"perplexity/sonar-reasoning": "Perplexity Sonar Reasoning",
|
77 |
+
"perplexity/sonar": "Perplexity Sonar",
|
78 |
+
"liquid/lfm-7b": "Liquid LFM 7B",
|
79 |
+
"liquid/lfm-3b": "Liquid LFM 3B",
|
80 |
+
"deepseek/deepseek-r1-distill-llama-70b:free": "DeepSeek R1 Distill Llama 70B",
|
81 |
+
"deepseek/deepseek-r1-distill-llama-70b": "DeepSeek R1 Distill Llama 70B",
|
82 |
+
"google/gemini-2.0-flash-thinking-exp:free": "Google Gemini 2.0 Flash Thinking Experimental 01-21",
|
83 |
+
"deepseek/deepseek-r1:free": "DeepSeek R1",
|
84 |
+
"deepseek/deepseek-r1": "DeepSeek R1",
|
85 |
+
"sophosympatheia/rogue-rose-103b-v0.2:free": "Rogue Rose 103B v0.2",
|
86 |
+
"minimax/minimax-01": "MiniMax MiniMax-01",
|
87 |
+
"mistralai/codestral-2501": "Mistral Codestral 2501",
|
88 |
+
"microsoft/phi-4": "Microsoft Phi 4",
|
89 |
+
"sao10k/l3.1-70b-hanami-x1": "Sao10K Llama 3.1 70B Hanami x1",
|
90 |
+
"deepseek/deepseek-chat:free": "DeepSeek V3",
|
91 |
+
"deepseek/deepseek-chat": "DeepSeek V3",
|
92 |
+
"google/gemini-2.0-flash-thinking-exp-1219:free": "Google Gemini 2.0 Flash Thinking Experimental",
|
93 |
+
"sao10k/l3.3-euryale-70b": "Sao10K Llama 3.3 Euryale 70B",
|
94 |
+
"openai/o1": "OpenAI o1",
|
95 |
+
"eva-unit-01/eva-llama-3.33-70b": "EVA Llama 3.33 70B",
|
96 |
+
"x-ai/grok-2-vision-1212": "xAI Grok 2 Vision 1212",
|
97 |
+
"x-ai/grok-2-1212": "xAI Grok 2 1212",
|
98 |
+
"cohere/command-r7b-12-2024": "Cohere Command R7B (12-2024)",
|
99 |
+
"google/gemini-2.0-flash-exp:free": "Google Gemini Flash 2.0 Experimental",
|
100 |
+
"meta-llama/llama-3.3-70b-instruct:free": "Meta Llama 3.3 70B Instruct",
|
101 |
+
"meta-llama/llama-3.3-70b-instruct": "Meta Llama 3.3 70B Instruct",
|
102 |
+
"amazon/nova-lite-v1": "Amazon Nova Lite 1.0",
|
103 |
+
"amazon/nova-micro-v1": "Amazon Nova Micro 1.0",
|
104 |
+
"amazon/nova-pro-v1": "Amazon Nova Pro 1.0",
|
105 |
+
"qwen/qwq-32b-preview:free": "Qwen QwQ 32B Preview",
|
106 |
+
"qwen/qwq-32b-preview": "Qwen QwQ 32B Preview",
|
107 |
+
"google/learnlm-1.5-pro-experimental:free": "Google LearnLM 1.5 Pro Experimental",
|
108 |
+
"eva-unit-01/eva-qwen-2.5-72b": "EVA Qwen2.5 72B",
|
109 |
+
"openai/gpt-4o-2024-11-20": "OpenAI GPT-4o (2024-11-20)",
|
110 |
+
"mistralai/mistral-large-2411": "Mistral Large 2411",
|
111 |
+
"mistralai/mistral-large-2407": "Mistral Large 2407",
|
112 |
+
"mistralai/pixtral-large-2411": "Mistral Pixtral Large 2411",
|
113 |
+
"x-ai/grok-vision-beta": "xAI Grok Vision Beta",
|
114 |
+
"infermatic/mn-inferor-12b": "Infermatic Mistral Nemo Inferor 12B",
|
115 |
+
"qwen/qwen-2.5-coder-32b-instruct:free": "Qwen2.5 Coder 32B Instruct",
|
116 |
+
"qwen/qwen-2.5-coder-32b-instruct": "Qwen2.5 Coder 32B Instruct",
|
117 |
+
"raifle/sorcererlm-8x22b": "SorcererLM 8x22B",
|
118 |
+
"eva-unit-01/eva-qwen-2.5-32b": "EVA Qwen2.5 32B",
|
119 |
+
"thedrummer/unslopnemo-12b": "Unslopnemo 12B",
|
120 |
+
"anthropic/claude-3.5-haiku:beta": "Anthropic Claude 3.5 Haiku (self-moderated)",
|
121 |
+
"anthropic/claude-3.5-haiku": "Anthropic Claude 3.5 Haiku",
|
122 |
+
"anthropic/claude-3.5-haiku-20241022:beta": "Anthropic Claude 3.5 Haiku (2024-10-22) (self-moderated)",
|
123 |
+
"anthropic/claude-3.5-haiku-20241022": "Anthropic Claude 3.5 Haiku (2024-10-22)",
|
124 |
+
"anthropic/claude-3.5-sonnet:beta": "Anthropic Claude 3.5 Sonnet (self-moderated)",
|
125 |
+
"anthropic/claude-3.5-sonnet": "Anthropic Claude 3.5 Sonnet",
|
126 |
+
"anthracite-org/magnum-v4-72b": "Magnum v4 72B",
|
127 |
+
"neversleep/llama-3.1-lumimaid-70b": "NeverSleep Lumimaid v0.2 70B",
|
128 |
+
"x-ai/grok-beta": "xAI Grok Beta",
|
129 |
+
"mistralai/ministral-3b": "Mistral Ministral 3B",
|
130 |
+
"mistralai/ministral-8b": "Mistral Ministral 8B",
|
131 |
+
"qwen/qwen-2.5-7b-instruct": "Qwen2.5 7B Instruct",
|
132 |
+
"nvidia/llama-3.1-nemotron-70b-instruct:free": "NVIDIA Llama 3.1 Nemotron 70B Instruct",
|
133 |
+
"nvidia/llama-3.1-nemotron-70b-instruct": "NVIDIA Llama 3.1 Nemotron 70B Instruct",
|
134 |
+
"inflection/inflection-3-pi": "Inflection 3 Pi",
|
135 |
+
"inflection/inflection-3-productivity": "Inflection 3 Productivity",
|
136 |
+
"google/gemini-flash-1.5-8b": "Google Gemini Flash 1.5 8B",
|
137 |
+
"liquid/lfm-40b": "Liquid LFM 40B MoE",
|
138 |
+
"thedrummer/rocinante-12b": "Rocinante 12B",
|
139 |
+
"anthracite-org/magnum-v2-72b": "Magnum v2 72B",
|
140 |
+
"meta-llama/llama-3.2-90b-vision-instruct": "Meta Llama 3.2 90B Vision Instruct",
|
141 |
+
"meta-llama/llama-3.2-1b-instruct:free": "Meta Llama 3.2 1B Instruct",
|
142 |
+
"meta-llama/llama-3.2-1b-instruct": "Meta Llama 3.2 1B Instruct",
|
143 |
+
"meta-llama/llama-3.2-3b-instruct:free": "Meta Llama 3.2 3B Instruct",
|
144 |
+
"meta-llama/llama-3.2-3b-instruct": "Meta Llama 3.2 3B Instruct",
|
145 |
+
"meta-llama/llama-3.2-11b-vision-instruct:free": "Meta Llama 3.2 11B Vision Instruct",
|
146 |
+
"meta-llama/llama-3.2-11b-vision-instruct": "Meta Llama 3.2 11B Vision Instruct",
|
147 |
+
"qwen/qwen-2.5-72b-instruct:free": "Qwen2.5 72B Instruct",
|
148 |
+
"qwen/qwen-2.5-72b-instruct": "Qwen2.5 72B Instruct",
|
149 |
+
"qwen/qwen-2.5-vl-72b-instruct": "Qwen Qwen2.5-VL 72B Instruct",
|
150 |
+
"neversleep/llama-3.1-lumimaid-8b": "NeverSleep Lumimaid v0.2 8B",
|
151 |
+
"openai/o1-preview-2024-09-12": "OpenAI o1-preview (2024-09-12)",
|
152 |
+
"openai/o1-mini": "OpenAI o1-mini",
|
153 |
+
"openai/o1-mini-2024-09-12": "OpenAI o1-mini (2024-09-12)",
|
154 |
+
"openai/o1-preview": "OpenAI o1-preview",
|
155 |
+
"mistralai/pixtral-12b": "Mistral Pixtral 12B",
|
156 |
+
"cohere/command-r-plus-08-2024": "Cohere Command R+ (08-2024)",
|
157 |
+
"cohere/command-r-08-2024": "Cohere Command R (08-2024)",
|
158 |
+
"sao10k/l3.1-euryale-70b": "Sao10K Llama 3.1 Euryale 70B v2.2",
|
159 |
+
"qwen/qwen-2.5-vl-7b-instruct:free": "Qwen Qwen2.5-VL 7B Instruct",
|
160 |
+
"qwen/qwen-2.5-vl-7b-instruct": "Qwen Qwen2.5-VL 7B Instruct",
|
161 |
+
"google/gemini-flash-1.5-8b-exp": "Google Gemini Flash 1.5 8B Experimental",
|
162 |
+
"ai21/jamba-1-5-large": "AI21 Jamba 1.5 Large",
|
163 |
+
"ai21/jamba-1-5-mini": "AI21 Jamba 1.5 Mini",
|
164 |
+
"microsoft/phi-3.5-mini-128k-instruct": "Microsoft Phi-3.5 Mini 128K Instruct",
|
165 |
+
"nousresearch/hermes-3-llama-3.1-70b": "Nous Hermes 3 70B Instruct",
|
166 |
+
"nousresearch/hermes-3-llama-3.1-405b": "Nous Hermes 3 405B Instruct",
|
167 |
+
"openai/chatgpt-4o-latest": "OpenAI ChatGPT-4o",
|
168 |
+
"aetherwiing/mn-starcannon-12b": "Aetherwiing Starcannon 12B",
|
169 |
+
"sao10k/l3-lunaris-8b": "Sao10K Llama 3 8B Lunaris",
|
170 |
+
"openai/gpt-4o-2024-08-06": "OpenAI GPT-4o (2024-08-06)",
|
171 |
+
"nothingiisreal/mn-celeste-12b": "Mistral Nemo 12B Celeste",
|
172 |
+
"meta-llama/llama-3.1-405b": "Meta Llama 3.1 405B (base)",
|
173 |
+
"perplexity/llama-3.1-sonar-large-128k-online": "Perplexity Llama 3.1 Sonar 70B Online",
|
174 |
+
"perplexity/llama-3.1-sonar-small-128k-online": "Perplexity Llama 3.1 Sonar 8B Online",
|
175 |
+
"meta-llama/llama-3.1-8b-instruct:free": "Meta Llama 3.1 8B Instruct",
|
176 |
+
"meta-llama/llama-3.1-8b-instruct": "Meta Llama 3.1 8B Instruct",
|
177 |
+
"meta-llama/llama-3.1-405b-instruct": "Meta Llama 3.1 405B Instruct",
|
178 |
+
"meta-llama/llama-3.1-70b-instruct": "Meta Llama 3.1 70B Instruct",
|
179 |
+
"mistralai/codestral-mamba": "Mistral Codestral Mamba",
|
180 |
+
"mistralai/mistral-nemo:free": "Mistral Nemo",
|
181 |
+
"mistralai/mistral-nemo": "Mistral Nemo",
|
182 |
+
"openai/gpt-4o-mini": "OpenAI GPT-4o-mini",
|
183 |
+
"openai/gpt-4o-mini-2024-07-18": "OpenAI GPT-4o-mini (2024-07-18)",
|
184 |
+
"qwen/qwen-2-7b-instruct:free": "Qwen 2 7B Instruct",
|
185 |
+
"qwen/qwen-2-7b-instruct": "Qwen 2 7B Instruct",
|
186 |
+
"google/gemma-2-27b-it": "Google Gemma 2 27B",
|
187 |
+
"alpindale/magnum-72b": "Magnum 72B",
|
188 |
+
"google/gemma-2-9b-it:free": "Google Gemma 2 9B",
|
189 |
+
"google/gemma-2-9b-it": "Google Gemma 2 9B",
|
190 |
+
"01-ai/yi-large": "01.AI Yi Large",
|
191 |
+
"ai21/jamba-instruct": "AI21 Jamba Instruct",
|
192 |
+
"anthropic/claude-3.5-sonnet-20240620:beta": "Anthropic Claude 3.5 Sonnet (2024-06-20) (self-moderated)",
|
193 |
+
"anthropic/claude-3.5-sonnet-20240620": "Anthropic Claude 3.5 Sonnet (2024-06-20)",
|
194 |
+
"sao10k/l3-euryale-70b": "Sao10k Llama 3 Euryale 70B v2.1",
|
195 |
+
"cognitivecomputations/dolphin-mixtral-8x22b": "Dolphin 2.9.2 Mixtral 8x22B 🐬",
|
196 |
+
"qwen/qwen-2-72b-instruct": "Qwen 2 72B Instruct",
|
197 |
+
"mistralai/mistral-7b-instruct-v0.3": "Mistral 7B Instruct v0.3",
|
198 |
+
"nousresearch/hermes-2-pro-llama-3-8b": "NousResearch Hermes 2 Pro - Llama-3 8B",
|
199 |
+
"mistralai/mistral-7b-instruct:free": "Mistral 7B Instruct",
|
200 |
+
"mistralai/mistral-7b-instruct": "Mistral 7B Instruct",
|
201 |
+
"microsoft/phi-3-mini-128k-instruct:free": "Microsoft Phi-3 Mini 128K Instruct",
|
202 |
+
"microsoft/phi-3-mini-128k-instruct": "Microsoft Phi-3 Mini 128K Instruct",
|
203 |
+
"microsoft/phi-3-medium-128k-instruct:free": "Microsoft Phi-3 Medium 128K Instruct",
|
204 |
+
"microsoft/phi-3-medium-128k-instruct": "Microsoft Phi-3 Medium 128K Instruct",
|
205 |
+
"neversleep/llama-3-lumimaid-70b": "NeverSleep Llama 3 Lumimaid 70B",
|
206 |
+
"google/gemini-flash-1.5": "Google Gemini Flash 1.5",
|
207 |
+
"openai/gpt-4o-2024-05-13": "OpenAI GPT-4o (2024-05-13)",
|
208 |
+
"meta-llama/llama-guard-2-8b": "Meta LlamaGuard 2 8B",
|
209 |
+
"openai/gpt-4o": "OpenAI GPT-4o",
|
210 |
+
"openai/gpt-4o:extended": "OpenAI GPT-4o (extended)",
|
211 |
+
"neversleep/llama-3-lumimaid-8b:extended": "NeverSleep Llama 3 Lumimaid 8B (extended)",
|
212 |
+
"neversleep/llama-3-lumimaid-8b": "NeverSleep Llama 3 Lumimaid 8B",
|
213 |
+
"sao10k/fimbulvetr-11b-v2": "Fimbulvetr 11B v2",
|
214 |
+
"meta-llama/llama-3-8b-instruct:free": "Meta Llama 3 8B Instruct",
|
215 |
+
"meta-llama/llama-3-8b-instruct": "Meta Llama 3 8B Instruct",
|
216 |
+
"meta-llama/llama-3-70b-instruct": "Meta Llama 3 70B Instruct",
|
217 |
+
"mistralai/mixtral-8x22b-instruct": "Mistral Mixtral 8x22B Instruct",
|
218 |
+
"microsoft/wizardlm-2-7b": "WizardLM-2 7B",
|
219 |
+
"microsoft/wizardlm-2-8x22b": "WizardLM-2 8x22B",
|
220 |
+
"google/gemini-pro-1.5": "Google Gemini Pro 1.5",
|
221 |
+
"openai/gpt-4-turbo": "OpenAI GPT-4 Turbo",
|
222 |
+
"cohere/command-r-plus": "Cohere Command R+",
|
223 |
+
"cohere/command-r-plus-04-2024": "Cohere Command R+ (04-2024)",
|
224 |
+
"sophosympatheia/midnight-rose-70b": "Midnight Rose 70B",
|
225 |
+
"cohere/command": "Cohere Command",
|
226 |
+
"cohere/command-r": "Cohere Command R",
|
227 |
+
"anthropic/claude-3-haiku:beta": "Anthropic Claude 3 Haiku (self-moderated)",
|
228 |
+
"anthropic/claude-3-haiku": "Anthropic Claude 3 Haiku",
|
229 |
+
"anthropic/claude-3-sonnet:beta": "Anthropic Claude 3 Sonnet (self-moderated)",
|
230 |
+
"anthropic/claude-3-sonnet": "Anthropic Claude 3 Sonnet",
|
231 |
+
"anthropic/claude-3-opus:beta": "Anthropic Claude 3 Opus (self-moderated)",
|
232 |
+
"anthropic/claude-3-opus": "Anthropic Claude 3 Opus",
|
233 |
+
"cohere/command-r-03-2024": "Cohere Command R (03-2024)",
|
234 |
+
"mistralai/mistral-large": "Mistral Large",
|
235 |
+
"google/gemma-7b-it": "Google Gemma 7B",
|
236 |
+
"openai/gpt-4-turbo-preview": "OpenAI GPT-4 Turbo Preview",
|
237 |
+
"openai/gpt-3.5-turbo-0613": "OpenAI GPT-3.5 Turbo (older v0613)",
|
238 |
+
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo": "Nous Hermes 2 Mixtral 8x7B DPO",
|
239 |
+
"mistralai/mistral-tiny": "Mistral Tiny",
|
240 |
+
"mistralai/mistral-medium": "Mistral Medium",
|
241 |
+
"mistralai/mistral-small": "Mistral Small",
|
242 |
+
"mistralai/mistral-7b-instruct-v0.2": "Mistral 7B Instruct v0.2",
|
243 |
+
"cognitivecomputations/dolphin-mixtral-8x7b": "Dolphin 2.6 Mixtral 8x7B 🐬",
|
244 |
+
"google/gemini-pro": "Google Gemini Pro 1.0",
|
245 |
+
"google/gemini-pro-vision": "Google Gemini Pro Vision 1.0",
|
246 |
+
"mistralai/mixtral-8x7b": "Mistral Mixtral 8x7B (base)",
|
247 |
+
"mistralai/mixtral-8x7b-instruct": "Mistral Mixtral 8x7B Instruct",
|
248 |
+
"openchat/openchat-7b:free": "OpenChat 3.5 7B",
|
249 |
+
"openchat/openchat-7b": "OpenChat 3.5 7B",
|
250 |
+
"neversleep/noromaid-20b": "Noromaid 20B",
|
251 |
+
"anthropic/claude-2:beta": "Anthropic Claude v2 (self-moderated)",
|
252 |
+
"anthropic/claude-2": "Anthropic Claude v2",
|
253 |
+
"anthropic/claude-2.1:beta": "Anthropic Claude v2.1 (self-moderated)",
|
254 |
+
"anthropic/claude-2.1": "Anthropic Claude v2.1",
|
255 |
+
"teknium/openhermes-2.5-mistral-7b": "OpenHermes 2.5 Mistral 7B",
|
256 |
+
"alpindale/goliath-120b": "Goliath 120B",
|
257 |
+
"undi95/toppy-m-7b:free": "Toppy M 7B",
|
258 |
+
"undi95/toppy-m-7b": "Toppy M 7B",
|
259 |
+
"openrouter/auto": "Auto Router",
|
260 |
+
"openai/gpt-4-1106-preview": "OpenAI GPT-4 Turbo (older v1106)",
|
261 |
+
"openai/gpt-3.5-turbo-1106": "OpenAI GPT-3.5 Turbo 16k (older v1106)",
|
262 |
+
"google/palm-2-chat-bison-32k": "Google PaLM 2 Chat 32k",
|
263 |
+
"google/palm-2-codechat-bison-32k": "Google PaLM 2 Code Chat 32k",
|
264 |
+
"jondurbin/airoboros-l2-70b": "Airoboros 70B",
|
265 |
+
"xwin-lm/xwin-lm-70b": "Xwin 70B",
|
266 |
+
"openai/gpt-3.5-turbo-instruct": "OpenAI GPT-3.5 Turbo Instruct",
|
267 |
+
"mistralai/mistral-7b-instruct-v0.1": "Mistral 7B Instruct v0.1",
|
268 |
+
"pygmalionai/mythalion-13b": "Pygmalion Mythalion 13B",
|
269 |
+
"openai/gpt-4-32k-0314": "OpenAI GPT-4 32k (older v0314)",
|
270 |
+
"openai/gpt-3.5-turbo-16k": "OpenAI GPT-3.5 Turbo 16k",
|
271 |
+
"openai/gpt-4-32k": "OpenAI GPT-4 32k",
|
272 |
+
"nousresearch/nous-hermes-llama2-13b": "Nous Hermes 13B",
|
273 |
+
"mancer/weaver": "Mancer Weaver (alpha)",
|
274 |
+
"huggingfaceh4/zephyr-7b-beta:free": "Hugging Face Zephyr 7B",
|
275 |
+
"anthropic/claude-2.0:beta": "Anthropic Claude v2.0 (self-moderated)",
|
276 |
+
"anthropic/claude-2.0": "Anthropic Claude v2.0",
|
277 |
+
"undi95/remm-slerp-l2-13b": "ReMM SLERP 13B",
|
278 |
+
"google/palm-2-codechat-bison": "Google PaLM 2 Code Chat",
|
279 |
+
"google/palm-2-chat-bison": "Google PaLM 2 Chat",
|
280 |
+
"gryphe/mythomax-l2-13b:free": "MythoMax 13B",
|
281 |
+
"gryphe/mythomax-l2-13b": "MythoMax 13B",
|
282 |
+
"meta-llama/llama-2-70b-chat": "Meta Llama 2 70B Chat",
|
283 |
+
"meta-llama/llama-2-13b-chat": "Meta Llama 2 13B Chat",
|
284 |
+
"openai/gpt-4-0314": "OpenAI GPT-4 (older v0314)",
|
285 |
+
"openai/gpt-4": "OpenAI GPT-4",
|
286 |
+
"openai/gpt-3.5-turbo": "OpenAI GPT-3.5 Turbo",
|
287 |
+
"openai/gpt-3.5-turbo-0125": "OpenAI GPT-3.5 Turbo 16k"
|
288 |
+
};
|
289 |
+
|
290 |
+
export const customModelNames: Record<string, string> = {
|
291 |
+
"google/gemini-2.0-flash-exp:free": "Gemini",
|
292 |
+
"google/gemini-2.0-flash-lite-001": "Gemini",
|
293 |
+
"x-ai/grok-2-1212": "Grok",
|
294 |
+
"deepseek/deepseek-chat:free": "DeepSeek",
|
295 |
+
"meta-llama/llama-3.3-70b-instruct:free": "Llama"
|
296 |
+
};
|
297 |
+
|
298 |
+
export const getModelDisplayName = (modelId: string): string => {
|
299 |
+
return customModelNames[modelId] || modelNames[modelId] || modelId;
|
300 |
+
};
|
src/lib/wordProcessing.ts
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
import nlp from 'compromise';
|
3 |
|
4 |
export const normalizeWord = (word: string, language: string = 'en'): string => {
|
@@ -20,6 +19,13 @@ export const normalizeWord = (word: string, language: string = 'en'): string =>
|
|
20 |
.normalize('NFD')
|
21 |
.replace(/[\u0300-\u036f]/g, '')
|
22 |
.toLowerCase()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
.replace(/[^a-z]/g, '')
|
24 |
.trim();
|
25 |
};
|
|
|
|
|
1 |
import nlp from 'compromise';
|
2 |
|
3 |
export const normalizeWord = (word: string, language: string = 'en'): string => {
|
|
|
19 |
.normalize('NFD')
|
20 |
.replace(/[\u0300-\u036f]/g, '')
|
21 |
.toLowerCase()
|
22 |
+
|
23 |
+
// Handle German umlauts and their alternative spellings
|
24 |
+
.replace(/ü/g, 'ue')
|
25 |
+
.replace(/ä/g, 'ae')
|
26 |
+
.replace(/ö/g, 'oe')
|
27 |
+
.replace(/ß/g, 'ss')
|
28 |
+
|
29 |
.replace(/[^a-z]/g, '')
|
30 |
.trim();
|
31 |
};
|
src/services/aiService.ts
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
|
2 |
import { supabase } from "@/integrations/supabase/client";
|
3 |
|
4 |
-
export const generateAIResponse = async (currentWord: string, currentSentence: string[], language: string = 'en'): Promise<string> => {
|
5 |
console.log('Calling generate-word function with:', { currentWord, currentSentence, language });
|
6 |
|
7 |
const { data, error } = await supabase.functions.invoke('generate-word', {
|
8 |
body: {
|
9 |
currentWord,
|
10 |
currentSentence: currentSentence.join(' '),
|
11 |
-
language
|
|
|
12 |
}
|
13 |
});
|
14 |
|
@@ -29,7 +30,7 @@ export const generateAIResponse = async (currentWord: string, currentSentence: s
|
|
29 |
return data.word;
|
30 |
};
|
31 |
|
32 |
-
export const guessWord = async (sentence: string, language: string): Promise<{ guess: string; model: string }> => {
|
33 |
console.log('Processing guess for sentence:', sentence);
|
34 |
|
35 |
const words = sentence.trim().split(/\s+/);
|
@@ -39,7 +40,8 @@ export const guessWord = async (sentence: string, language: string): Promise<{ g
|
|
39 |
const { data, error } = await supabase.functions.invoke('guess-word', {
|
40 |
body: {
|
41 |
sentence,
|
42 |
-
language
|
|
|
43 |
}
|
44 |
});
|
45 |
|
|
|
1 |
|
2 |
import { supabase } from "@/integrations/supabase/client";
|
3 |
|
4 |
+
export const generateAIResponse = async (currentWord: string, currentSentence: string[], language: string = 'en', model?: string): Promise<string> => {
|
5 |
console.log('Calling generate-word function with:', { currentWord, currentSentence, language });
|
6 |
|
7 |
const { data, error } = await supabase.functions.invoke('generate-word', {
|
8 |
body: {
|
9 |
currentWord,
|
10 |
currentSentence: currentSentence.join(' '),
|
11 |
+
language,
|
12 |
+
model
|
13 |
}
|
14 |
});
|
15 |
|
|
|
30 |
return data.word;
|
31 |
};
|
32 |
|
33 |
+
export const guessWord = async (sentence: string, language: string, model?: string): Promise<{ guess: string; model: string }> => {
|
34 |
console.log('Processing guess for sentence:', sentence);
|
35 |
|
36 |
const words = sentence.trim().split(/\s+/);
|
|
|
40 |
const { data, error } = await supabase.functions.invoke('guess-word', {
|
41 |
body: {
|
42 |
sentence,
|
43 |
+
language,
|
44 |
+
model
|
45 |
}
|
46 |
});
|
47 |
|
supabase/functions/generate-word/index.ts
CHANGED
@@ -62,16 +62,16 @@ const openRouterModels = [
|
|
62 |
'mistralai/mistral-nemo'
|
63 |
];
|
64 |
|
65 |
-
async function generateWord(currentWord: string, existingSentence: string, language: string) {
|
66 |
const openRouterKey = Deno.env.get('OPENROUTER_API_KEY');
|
67 |
if (!openRouterKey) {
|
68 |
throw new Error('OpenRouter API key not configured');
|
69 |
}
|
70 |
|
71 |
const prompts = languagePrompts[language as keyof typeof languagePrompts] || languagePrompts.en;
|
72 |
-
const
|
73 |
|
74 |
-
console.log('Using OpenRouter with model:',
|
75 |
|
76 |
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
77 |
method: "POST",
|
@@ -82,7 +82,7 @@ async function generateWord(currentWord: string, existingSentence: string, langu
|
|
82 |
"Content-Type": "application/json"
|
83 |
},
|
84 |
body: JSON.stringify({
|
85 |
-
model:
|
86 |
messages: [
|
87 |
{
|
88 |
role: "system",
|
@@ -119,7 +119,7 @@ async function generateWord(currentWord: string, existingSentence: string, langu
|
|
119 |
.split(' ')[0]
|
120 |
.replace(/[.,!?]$/, '');
|
121 |
|
122 |
-
return { word, model:
|
123 |
}
|
124 |
|
125 |
serve(async (req) => {
|
@@ -128,16 +128,16 @@ serve(async (req) => {
|
|
128 |
}
|
129 |
|
130 |
try {
|
131 |
-
const { currentWord, currentSentence, language = 'en' } = await req.json();
|
132 |
-
console.log('Generating word for:', { currentWord, currentSentence, language });
|
133 |
|
134 |
const existingSentence = currentSentence || '';
|
135 |
|
136 |
try {
|
137 |
-
const { word, model } = await generateWord(currentWord, existingSentence, language);
|
138 |
-
console.log('Successfully generated word:', word, 'using model:',
|
139 |
return new Response(
|
140 |
-
JSON.stringify({ word, model }),
|
141 |
{ headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
142 |
);
|
143 |
} catch (error) {
|
|
|
62 |
'mistralai/mistral-nemo'
|
63 |
];
|
64 |
|
65 |
+
async function generateWord(currentWord: string, existingSentence: string, language: string, model?: string) {
|
66 |
const openRouterKey = Deno.env.get('OPENROUTER_API_KEY');
|
67 |
if (!openRouterKey) {
|
68 |
throw new Error('OpenRouter API key not configured');
|
69 |
}
|
70 |
|
71 |
const prompts = languagePrompts[language as keyof typeof languagePrompts] || languagePrompts.en;
|
72 |
+
const selectedModel = model || openRouterModels[Math.floor(Math.random() * openRouterModels.length)];
|
73 |
|
74 |
+
console.log('Using OpenRouter with model:', selectedModel);
|
75 |
|
76 |
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
77 |
method: "POST",
|
|
|
82 |
"Content-Type": "application/json"
|
83 |
},
|
84 |
body: JSON.stringify({
|
85 |
+
model: selectedModel,
|
86 |
messages: [
|
87 |
{
|
88 |
role: "system",
|
|
|
119 |
.split(' ')[0]
|
120 |
.replace(/[.,!?]$/, '');
|
121 |
|
122 |
+
return { word, model: selectedModel };
|
123 |
}
|
124 |
|
125 |
serve(async (req) => {
|
|
|
128 |
}
|
129 |
|
130 |
try {
|
131 |
+
const { currentWord, currentSentence, language = 'en', model } = await req.json();
|
132 |
+
console.log('Generating word for:', { currentWord, currentSentence, language, model });
|
133 |
|
134 |
const existingSentence = currentSentence || '';
|
135 |
|
136 |
try {
|
137 |
+
const { word, model: usedModel } = await generateWord(currentWord, existingSentence, language, model);
|
138 |
+
console.log('Successfully generated word:', word, 'using model:', usedModel);
|
139 |
return new Response(
|
140 |
+
JSON.stringify({ word, model: usedModel }),
|
141 |
{ headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
142 |
);
|
143 |
} catch (error) {
|
supabase/functions/guess-word/index.ts
CHANGED
@@ -54,11 +54,11 @@ const openRouterModels = [
|
|
54 |
'mistralai/mistral-nemo'
|
55 |
];
|
56 |
|
57 |
-
async function generateGuess(sentence: string, language: string) {
|
58 |
const prompts = languagePrompts[language as keyof typeof languagePrompts] || languagePrompts.en;
|
59 |
-
const
|
60 |
|
61 |
-
console.log('Using OpenRouter with model:',
|
62 |
|
63 |
try {
|
64 |
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
@@ -70,7 +70,7 @@ async function generateGuess(sentence: string, language: string) {
|
|
70 |
"Content-Type": "application/json"
|
71 |
},
|
72 |
body: JSON.stringify({
|
73 |
-
model:
|
74 |
messages: [
|
75 |
{
|
76 |
role: "system",
|
@@ -102,7 +102,7 @@ async function generateGuess(sentence: string, language: string) {
|
|
102 |
const data = await response.json();
|
103 |
return {
|
104 |
guess: data.choices[0].message.content.trim().toUpperCase(),
|
105 |
-
model:
|
106 |
};
|
107 |
} catch (error) {
|
108 |
console.error('Error in generateGuess:', error);
|
@@ -120,14 +120,14 @@ serve(async (req) => {
|
|
120 |
}
|
121 |
|
122 |
try {
|
123 |
-
const { sentence, language = 'en' } = await req.json();
|
124 |
-
console.log('Trying to guess word from sentence:', sentence, 'language:', language);
|
125 |
|
126 |
-
const { guess, model } = await generateGuess(sentence, language);
|
127 |
-
console.log('Successfully generated guess:', guess, 'using model:',
|
128 |
|
129 |
return new Response(
|
130 |
-
JSON.stringify({ guess, model }),
|
131 |
{ headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
132 |
);
|
133 |
} catch (error) {
|
|
|
54 |
'mistralai/mistral-nemo'
|
55 |
];
|
56 |
|
57 |
+
async function generateGuess(sentence: string, language: string, model?: string) {
|
58 |
const prompts = languagePrompts[language as keyof typeof languagePrompts] || languagePrompts.en;
|
59 |
+
const selectedModel = model || openRouterModels[Math.floor(Math.random() * openRouterModels.length)];
|
60 |
|
61 |
+
console.log('Using OpenRouter with model:', selectedModel);
|
62 |
|
63 |
try {
|
64 |
const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
|
|
|
70 |
"Content-Type": "application/json"
|
71 |
},
|
72 |
body: JSON.stringify({
|
73 |
+
model: selectedModel,
|
74 |
messages: [
|
75 |
{
|
76 |
role: "system",
|
|
|
102 |
const data = await response.json();
|
103 |
return {
|
104 |
guess: data.choices[0].message.content.trim().toUpperCase(),
|
105 |
+
model: selectedModel
|
106 |
};
|
107 |
} catch (error) {
|
108 |
console.error('Error in generateGuess:', error);
|
|
|
120 |
}
|
121 |
|
122 |
try {
|
123 |
+
const { sentence, language = 'en', model } = await req.json();
|
124 |
+
console.log('Trying to guess word from sentence:', sentence, 'language:', language, 'model:', model);
|
125 |
|
126 |
+
const { guess, model: usedModel } = await generateGuess(sentence, language, model);
|
127 |
+
console.log('Successfully generated guess:', guess, 'using model:', usedModel);
|
128 |
|
129 |
return new Response(
|
130 |
+
JSON.stringify({ guess, model: usedModel }),
|
131 |
{ headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
|
132 |
);
|
133 |
} catch (error) {
|
vite.config.ts
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import { defineConfig } from "vite";
|
2 |
import react from "@vitejs/plugin-react-swc";
|
3 |
import path from "path";
|
4 |
-
import { componentTagger } from "lovable-tagger";
|
5 |
|
6 |
// https://vitejs.dev/config/
|
7 |
export default defineConfig(({ mode }) => ({
|
@@ -9,11 +8,7 @@ export default defineConfig(({ mode }) => ({
|
|
9 |
host: "::",
|
10 |
port: 8080,
|
11 |
},
|
12 |
-
plugins: [
|
13 |
-
react(),
|
14 |
-
mode === 'development' &&
|
15 |
-
componentTagger(),
|
16 |
-
].filter(Boolean),
|
17 |
resolve: {
|
18 |
alias: {
|
19 |
"@": path.resolve(__dirname, "./src"),
|
|
|
1 |
import { defineConfig } from "vite";
|
2 |
import react from "@vitejs/plugin-react-swc";
|
3 |
import path from "path";
|
|
|
4 |
|
5 |
// https://vitejs.dev/config/
|
6 |
export default defineConfig(({ mode }) => ({
|
|
|
8 |
host: "::",
|
9 |
port: 8080,
|
10 |
},
|
11 |
+
plugins: [react()],
|
|
|
|
|
|
|
|
|
12 |
resolve: {
|
13 |
alias: {
|
14 |
"@": path.resolve(__dirname, "./src"),
|