Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
a425931
1
Parent(s):
8d1d1b5
testing continuous simulation
Browse files- api.py +2 -2
- api_core.py +40 -41
- api_session.py +1 -1
- build/web/flutter_bootstrap.js +1 -1
- build/web/flutter_service_worker.js +2 -2
- build/web/index.html +1 -1
- build/web/main.dart.js +0 -0
- docs/for-bots/flutter/flutter-videos/fvp_usage_example__multi_textures.dart +1 -1
- lib/main.dart +1 -1
- lib/screens/home_screen.dart +1 -1
- lib/screens/video_screen.dart +0 -1
- lib/services/chat_service.dart +38 -0
- lib/services/clip_queue/clip_generation_handler.dart +0 -1
- lib/services/clip_queue/clip_queue_manager.dart +20 -34
- lib/services/clip_queue/queue_stats_logger.dart +0 -2
- lib/services/clip_queue/video_clip.dart +0 -1
- lib/services/html_stub.dart +1 -0
- lib/services/websocket_api_service.dart +46 -21
- lib/services/websocket_core_interface.dart +2 -0
- lib/widgets/ai_content_disclaimer.dart +0 -1
- lib/widgets/maintenance_screen.dart +0 -1
- lib/widgets/video_card.dart +0 -1
- lib/widgets/video_player/buffer_manager.dart +0 -1
- lib/widgets/video_player/nano_clip_manager.dart +0 -1
- lib/widgets/video_player/playback_controller.dart +0 -1
- lib/widgets/video_player/video_player_widget.dart +5 -7
- lib/widgets/web_utils.dart +0 -1
api.py
CHANGED
@@ -2,7 +2,7 @@ import asyncio
|
|
2 |
import json
|
3 |
import logging
|
4 |
import os
|
5 |
-
import
|
6 |
import time
|
7 |
import uuid
|
8 |
from aiohttp import web, WSMsgType
|
@@ -233,7 +233,7 @@ async def init_app() -> web.Application:
|
|
233 |
|
234 |
# Set up static file serving
|
235 |
# Define the path to the public directory
|
236 |
-
public_path =
|
237 |
if not public_path.exists():
|
238 |
public_path.mkdir(parents=True, exist_ok=True)
|
239 |
|
|
|
2 |
import json
|
3 |
import logging
|
4 |
import os
|
5 |
+
from pathlib import Path
|
6 |
import time
|
7 |
import uuid
|
8 |
from aiohttp import web, WSMsgType
|
|
|
233 |
|
234 |
# Set up static file serving
|
235 |
# Define the path to the public directory
|
236 |
+
public_path = Path(__file__).parent / 'build' / 'web'
|
237 |
if not public_path.exists():
|
238 |
public_path.mkdir(parents=True, exist_ok=True)
|
239 |
|
api_core.py
CHANGED
@@ -473,7 +473,7 @@ A video can be anything from a tutorial, webcam, trailer, movie, live stream etc
|
|
473 |
# Determine if this is the first simulation
|
474 |
is_first_simulation = evolution_count == 0 or not condensed_history
|
475 |
|
476 |
-
logger.info(f"simulate(): is_first_simulation={is_first_simulation}")
|
477 |
|
478 |
# Create an appropriate prompt based on whether this is the first simulation
|
479 |
chat_section = ""
|
@@ -492,15 +492,21 @@ Original description:
|
|
492 |
{chat_section}
|
493 |
|
494 |
Instructions:
|
495 |
-
1. Imagine the next logical scene or development that would follow
|
496 |
-
2.
|
497 |
-
3.
|
498 |
-
4.
|
499 |
-
5.
|
500 |
-
|
501 |
-
Return
|
502 |
-
|
503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
else:
|
505 |
prompt = f"""You are tasked with continuing to evolve the narrative for a video titled: "{original_title}"
|
506 |
|
@@ -516,14 +522,20 @@ Current description (most recent scene):
|
|
516 |
|
517 |
Instructions:
|
518 |
1. Imagine the next logical scene or development that would follow the current description.
|
519 |
-
2.
|
520 |
-
3.
|
521 |
-
4.
|
522 |
-
5.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
523 |
|
524 |
-
|
525 |
-
EVOLVED_DESCRIPTION: [your new evolved description here]
|
526 |
-
CONDENSED_HISTORY: [your updated scene history summary]"""
|
527 |
|
528 |
# Generate the evolved description
|
529 |
response = await asyncio.get_event_loop().run_in_executor(
|
@@ -531,45 +543,32 @@ CONDENSED_HISTORY: [your updated scene history summary]"""
|
|
531 |
lambda: self.inference_client.text_generation(
|
532 |
prompt,
|
533 |
model=TEXT_MODEL,
|
534 |
-
max_new_tokens=
|
535 |
-
temperature=0.
|
536 |
)
|
537 |
)
|
|
|
|
|
538 |
|
539 |
-
#
|
540 |
-
evolved_description =
|
541 |
-
new_condensed_history = ""
|
542 |
-
|
543 |
-
# Parse the response
|
544 |
-
if "EVOLVED_DESCRIPTION:" in response and "CONDENSED_HISTORY:" in response:
|
545 |
-
parts = response.split("CONDENSED_HISTORY:")
|
546 |
-
if len(parts) >= 2:
|
547 |
-
desc_part = parts[0].strip()
|
548 |
-
if "EVOLVED_DESCRIPTION:" in desc_part:
|
549 |
-
evolved_description = desc_part.split("EVOLVED_DESCRIPTION:", 1)[1].strip()
|
550 |
-
new_condensed_history = parts[1].strip()
|
551 |
|
552 |
-
# If
|
553 |
if not evolved_description:
|
554 |
evolved_description = current_description
|
555 |
-
logger.warning(f"
|
556 |
-
|
557 |
-
if not new_condensed_history and condensed_history:
|
558 |
-
new_condensed_history = condensed_history
|
559 |
-
logger.warning(f"Failed to parse condensed history, using current history as fallback")
|
560 |
-
elif not new_condensed_history:
|
561 |
-
new_condensed_history = f"The video begins with {original_title}: {original_description[:100]}..."
|
562 |
|
|
|
563 |
return {
|
564 |
"evolved_description": evolved_description,
|
565 |
-
"condensed_history":
|
566 |
}
|
567 |
|
568 |
except Exception as e:
|
569 |
logger.error(f"Error simulating video: {str(e)}")
|
570 |
return {
|
571 |
"evolved_description": current_description,
|
572 |
-
"condensed_history": condensed_history
|
573 |
}
|
574 |
|
575 |
|
|
|
473 |
# Determine if this is the first simulation
|
474 |
is_first_simulation = evolution_count == 0 or not condensed_history
|
475 |
|
476 |
+
#logger.info(f"simulate(): is_first_simulation={is_first_simulation}")
|
477 |
|
478 |
# Create an appropriate prompt based on whether this is the first simulation
|
479 |
chat_section = ""
|
|
|
492 |
{chat_section}
|
493 |
|
494 |
Instructions:
|
495 |
+
1. Imagine the next logical scene or development that would follow the current description.
|
496 |
+
2. Consider the video context and recent events
|
497 |
+
3. Create a natural progression from previous clips
|
498 |
+
4. Take into account user suggestions (chat messages) into the scene
|
499 |
+
5. IMPORTANT: viewers have shared messages, consider their input in priority to guide your story, and incorporate relevant suggestions or reactions into your narrative evolution.
|
500 |
+
6. Keep visual consistency with previous clips (in most cases you should repeat the same exact description of the location, characters etc but only change a few elements. If this is a webcam scenario, don't touch the camera orientation or focus)
|
501 |
+
7. Return ONLY the caption text, no additional formatting or explanation
|
502 |
+
8. Write in English, about 200 words.
|
503 |
+
9. Keep the visual style consistant, but content as well (repeat the style, character, locations, appearance etc..from the previous description, when it makes sense).
|
504 |
+
10. Your caption must describe visual elements of the scene in details, including: camera angle and focus, people's appearance, age, look, costumes, clothes, the location visual characteristics and geometry, lighting, action, objects, weather, textures, lighting.
|
505 |
+
11. Please write in the same style as the original description, by keeping things brief etc.
|
506 |
+
|
507 |
+
Remember to obey to what users said in the chat history!!
|
508 |
+
|
509 |
+
Now, you must write down the new scene description (don't write a long story! write a synthetic description!):"""
|
510 |
else:
|
511 |
prompt = f"""You are tasked with continuing to evolve the narrative for a video titled: "{original_title}"
|
512 |
|
|
|
522 |
|
523 |
Instructions:
|
524 |
1. Imagine the next logical scene or development that would follow the current description.
|
525 |
+
2. Consider the video context and recent events
|
526 |
+
3. Create a natural progression from previous clips
|
527 |
+
4. Take into account user suggestions (chat messages) into the scene
|
528 |
+
5. IMPORTANT: if viewers have shared messages, consider their input in priority to guide your story, and incorporate relevant suggestions or reactions into your narrative evolution.
|
529 |
+
6. Keep visual consistency with previous clips (in most cases you should repeat the same exact description of the location, characters etc but only change a few elements. If this is a webcam scenario, don't touch the camera orientation or focus)
|
530 |
+
7. Return ONLY the caption text, no additional formatting or explanation
|
531 |
+
8. Write in English, about 200 words.
|
532 |
+
9. Keep the visual style consistant, but content as well (repeat the style, character, locations, appearance etc..from the previous description, when it makes sense).
|
533 |
+
10. Your caption must describe visual elements of the scene in details, including: camera angle and focus, people's appearance, age, look, costumes, clothes, the location visual characteristics and geometry, lighting, action, objects, weather, textures, lighting.
|
534 |
+
11. Please write in the same style as the original description, by keeping things brief etc.
|
535 |
+
|
536 |
+
Remember to obey to what users said in the chat history!!
|
537 |
|
538 |
+
Now, you must write down the new scene description (don't write a long story! write a synthetic description!):"""
|
|
|
|
|
539 |
|
540 |
# Generate the evolved description
|
541 |
response = await asyncio.get_event_loop().run_in_executor(
|
|
|
543 |
lambda: self.inference_client.text_generation(
|
544 |
prompt,
|
545 |
model=TEXT_MODEL,
|
546 |
+
max_new_tokens=280,
|
547 |
+
temperature=0.68
|
548 |
)
|
549 |
)
|
550 |
+
|
551 |
+
# print("RAW RESPONSE: ", response)
|
552 |
|
553 |
+
# Just use the whole response as the evolved description
|
554 |
+
evolved_description = response.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
555 |
|
556 |
+
# If response is empty, use fallback
|
557 |
if not evolved_description:
|
558 |
evolved_description = current_description
|
559 |
+
logger.warning(f"Empty response, using current description as fallback")
|
|
|
|
|
|
|
|
|
|
|
|
|
560 |
|
561 |
+
# Pass the condensed history through unchanged
|
562 |
return {
|
563 |
"evolved_description": evolved_description,
|
564 |
+
"condensed_history": condensed_history
|
565 |
}
|
566 |
|
567 |
except Exception as e:
|
568 |
logger.error(f"Error simulating video: {str(e)}")
|
569 |
return {
|
570 |
"evolved_description": current_description,
|
571 |
+
"condensed_history": condensed_history
|
572 |
}
|
573 |
|
574 |
|
api_session.py
CHANGED
@@ -286,7 +286,7 @@ class UserSession:
|
|
286 |
evolution_count = data.get('evolution_count', 0)
|
287 |
chat_messages = data.get('chat_messages', '')
|
288 |
|
289 |
-
logger.info(f"Processing video simulation for user {self.user_id}, video_id={video_id}, evolution_count={evolution_count}")
|
290 |
|
291 |
# Validate required parameters
|
292 |
if not original_title or not original_description or not current_description:
|
|
|
286 |
evolution_count = data.get('evolution_count', 0)
|
287 |
chat_messages = data.get('chat_messages', '')
|
288 |
|
289 |
+
# logger.info(f"Processing video simulation for user {self.user_id}, video_id={video_id}, evolution_count={evolution_count}")
|
290 |
|
291 |
# Validate required parameters
|
292 |
if not original_title or not original_description or not current_description:
|
build/web/flutter_bootstrap.js
CHANGED
@@ -39,6 +39,6 @@ _flutter.buildConfig = {"engineRevision":"382be0028d370607f76215a9be322e5514b263
|
|
39 |
|
40 |
_flutter.loader.load({
|
41 |
serviceWorkerSettings: {
|
42 |
-
serviceWorkerVersion: "
|
43 |
}
|
44 |
});
|
|
|
39 |
|
40 |
_flutter.loader.load({
|
41 |
serviceWorkerSettings: {
|
42 |
+
serviceWorkerVersion: "2697941998"
|
43 |
}
|
44 |
});
|
build/web/flutter_service_worker.js
CHANGED
@@ -3,12 +3,12 @@ const MANIFEST = 'flutter-app-manifest';
|
|
3 |
const TEMP = 'flutter-temp-cache';
|
4 |
const CACHE_NAME = 'flutter-app-cache';
|
5 |
|
6 |
-
const RESOURCES = {"flutter_bootstrap.js": "
|
7 |
"version.json": "68350cac7987de2728345c72918dd067",
|
8 |
"tikslop.png": "570e1db759046e2d224fef729983634e",
|
9 |
"index.html": "3a7029b3672560e7938aab6fa4d30a46",
|
10 |
"/": "3a7029b3672560e7938aab6fa4d30a46",
|
11 |
-
"main.dart.js": "
|
12 |
"tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
|
13 |
"flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
|
14 |
"favicon.png": "c8a183c516004e648a7bac7497c89b97",
|
|
|
3 |
const TEMP = 'flutter-temp-cache';
|
4 |
const CACHE_NAME = 'flutter-app-cache';
|
5 |
|
6 |
+
const RESOURCES = {"flutter_bootstrap.js": "e872c517f2e5a256b7e7e90083844b1e",
|
7 |
"version.json": "68350cac7987de2728345c72918dd067",
|
8 |
"tikslop.png": "570e1db759046e2d224fef729983634e",
|
9 |
"index.html": "3a7029b3672560e7938aab6fa4d30a46",
|
10 |
"/": "3a7029b3672560e7938aab6fa4d30a46",
|
11 |
+
"main.dart.js": "2b68b90804df2f31f56c7601f5c5048f",
|
12 |
"tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
|
13 |
"flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
|
14 |
"favicon.png": "c8a183c516004e648a7bac7497c89b97",
|
build/web/index.html
CHANGED
@@ -156,7 +156,7 @@
|
|
156 |
</script>
|
157 |
|
158 |
<!-- Add version parameter for cache busting -->
|
159 |
-
<script src="flutter_bootstrap.js?v=
|
160 |
|
161 |
<!-- Add cache busting script -->
|
162 |
<script>
|
|
|
156 |
</script>
|
157 |
|
158 |
<!-- Add version parameter for cache busting -->
|
159 |
+
<script src="flutter_bootstrap.js?v=1747835844" async></script>
|
160 |
|
161 |
<!-- Add cache busting script -->
|
162 |
<script>
|
build/web/main.dart.js
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
docs/for-bots/flutter/flutter-videos/fvp_usage_example__multi_textures.dart
CHANGED
@@ -5,7 +5,7 @@ void main(List<String> args) async {
|
|
5 |
runApp(const SinglePlayerMultipleVideoWidget());
|
6 |
}
|
7 |
class SinglePlayerMultipleVideoWidget extends StatefulWidget {
|
8 |
-
const SinglePlayerMultipleVideoWidget({
|
9 |
|
10 |
@override
|
11 |
State<SinglePlayerMultipleVideoWidget> createState() =>
|
|
|
5 |
runApp(const SinglePlayerMultipleVideoWidget());
|
6 |
}
|
7 |
class SinglePlayerMultipleVideoWidget extends StatefulWidget {
|
8 |
+
const SinglePlayerMultipleVideoWidget({super.key});
|
9 |
|
10 |
@override
|
11 |
State<SinglePlayerMultipleVideoWidget> createState() =>
|
lib/main.dart
CHANGED
@@ -91,7 +91,7 @@ void main() async {
|
|
91 |
wsService.statusStream.listen((status) {
|
92 |
if (status == ConnectionStatus.maintenance) {
|
93 |
// Force update to maintenance screen if server goes into maintenance mode later
|
94 |
-
runApp(TikSlopApp(home:
|
95 |
}
|
96 |
});
|
97 |
|
|
|
91 |
wsService.statusStream.listen((status) {
|
92 |
if (status == ConnectionStatus.maintenance) {
|
93 |
// Force update to maintenance screen if server goes into maintenance mode later
|
94 |
+
runApp(const TikSlopApp(home: MaintenanceScreen(error: null)));
|
95 |
}
|
96 |
});
|
97 |
|
lib/screens/home_screen.dart
CHANGED
@@ -30,7 +30,7 @@ class HomeScreen extends StatefulWidget {
|
|
30 |
class _HomeScreenState extends State<HomeScreen> {
|
31 |
final _searchController = TextEditingController();
|
32 |
final _websocketService = WebSocketApiService();
|
33 |
-
List<VideoResult> _results = [];
|
34 |
bool _isSearching = false;
|
35 |
String? _currentSearchQuery;
|
36 |
StreamSubscription? _searchSubscription;
|
|
|
30 |
class _HomeScreenState extends State<HomeScreen> {
|
31 |
final _searchController = TextEditingController();
|
32 |
final _websocketService = WebSocketApiService();
|
33 |
+
final List<VideoResult> _results = [];
|
34 |
bool _isSearching = false;
|
35 |
String? _currentSearchQuery;
|
36 |
StreamSubscription? _searchSubscription;
|
lib/screens/video_screen.dart
CHANGED
@@ -9,7 +9,6 @@ import 'package:tikslop/widgets/web_utils.dart';
|
|
9 |
import 'package:flutter/foundation.dart';
|
10 |
import 'package:flutter/material.dart';
|
11 |
import 'package:flutter/services.dart';
|
12 |
-
import 'package:universal_html/html.dart' if (dart.library.io) 'package:tikslop/services/html_stub.dart' as html;
|
13 |
import '../config/config.dart';
|
14 |
import '../models/video_result.dart';
|
15 |
import '../services/websocket_api_service.dart';
|
|
|
9 |
import 'package:flutter/foundation.dart';
|
10 |
import 'package:flutter/material.dart';
|
11 |
import 'package:flutter/services.dart';
|
|
|
12 |
import '../config/config.dart';
|
13 |
import '../models/video_result.dart';
|
14 |
import '../services/websocket_api_service.dart';
|
lib/services/chat_service.dart
CHANGED
@@ -20,6 +20,10 @@ class ChatService {
|
|
20 |
final _chatController = StreamController<ChatMessage>.broadcast();
|
21 |
Stream<ChatMessage> get chatStream => _chatController.stream;
|
22 |
|
|
|
|
|
|
|
|
|
23 |
final WebSocketApiService _websocketService = WebSocketApiService();
|
24 |
String? _userId;
|
25 |
String? _username;
|
@@ -145,9 +149,43 @@ class ChatService {
|
|
145 |
// Only add messages if they're for the current room
|
146 |
if (message.videoId == _currentRoomId) {
|
147 |
_chatController.add(message);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
debugPrint('Received chat message: ${message.id} from ${message.username}');
|
149 |
}
|
150 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
// This method is only for application shutdown
|
153 |
// Individual widgets should use leaveRoom instead
|
|
|
20 |
final _chatController = StreamController<ChatMessage>.broadcast();
|
21 |
Stream<ChatMessage> get chatStream => _chatController.stream;
|
22 |
|
23 |
+
// Store recent messages for each room
|
24 |
+
final Map<String, List<ChatMessage>> _recentMessages = {};
|
25 |
+
static const int _maxRecentMessages = 5;
|
26 |
+
|
27 |
final WebSocketApiService _websocketService = WebSocketApiService();
|
28 |
String? _userId;
|
29 |
String? _username;
|
|
|
149 |
// Only add messages if they're for the current room
|
150 |
if (message.videoId == _currentRoomId) {
|
151 |
_chatController.add(message);
|
152 |
+
|
153 |
+
// Store this message in the recent messages for this room
|
154 |
+
if (!_recentMessages.containsKey(message.videoId)) {
|
155 |
+
_recentMessages[message.videoId] = [];
|
156 |
+
}
|
157 |
+
|
158 |
+
_recentMessages[message.videoId]!.add(message);
|
159 |
+
|
160 |
+
// Keep only most recent messages
|
161 |
+
if (_recentMessages[message.videoId]!.length > _maxRecentMessages) {
|
162 |
+
_recentMessages[message.videoId]!.removeAt(0);
|
163 |
+
}
|
164 |
+
|
165 |
debugPrint('Received chat message: ${message.id} from ${message.username}');
|
166 |
}
|
167 |
}
|
168 |
+
|
169 |
+
/// Get recent messages for a video room
|
170 |
+
Future<List<ChatMessage>> getRecentMessages(String videoId) async {
|
171 |
+
// Initialize if needed
|
172 |
+
if (!_isInitialized) {
|
173 |
+
await initialize();
|
174 |
+
}
|
175 |
+
|
176 |
+
// If not in the requested room, try to join it
|
177 |
+
if (_currentRoomId != videoId) {
|
178 |
+
try {
|
179 |
+
await joinRoom(videoId);
|
180 |
+
} catch (e) {
|
181 |
+
debugPrint('Error joining room to get messages: $e');
|
182 |
+
return [];
|
183 |
+
}
|
184 |
+
}
|
185 |
+
|
186 |
+
// Return the stored messages or an empty list
|
187 |
+
return _recentMessages[videoId] ?? [];
|
188 |
+
}
|
189 |
|
190 |
// This method is only for application shutdown
|
191 |
// Individual widgets should use leaveRoom instead
|
lib/services/clip_queue/clip_generation_handler.dart
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
// lib/services/clip_queue/clip_generation_handler.dart
|
2 |
|
3 |
import 'dart:async';
|
4 |
-
import 'package:flutter/foundation.dart';
|
5 |
import 'package:tikslop/config/config.dart';
|
6 |
import '../websocket_api_service.dart';
|
7 |
import '../../models/video_result.dart';
|
|
|
1 |
// lib/services/clip_queue/clip_generation_handler.dart
|
2 |
|
3 |
import 'dart:async';
|
|
|
4 |
import 'package:tikslop/config/config.dart';
|
5 |
import '../websocket_api_service.dart';
|
6 |
import '../../models/video_result.dart';
|
lib/services/clip_queue/clip_queue_manager.dart
CHANGED
@@ -104,8 +104,8 @@ class ClipQueueManager {
|
|
104 |
void _addChatMessage(ChatMessage message) {
|
105 |
if (message.videoId == videoId) {
|
106 |
_recentChatMessages.add(message);
|
107 |
-
// Keep only the
|
108 |
-
if (_recentChatMessages.length >
|
109 |
_recentChatMessages.removeAt(0);
|
110 |
}
|
111 |
ClipQueueConstants.logEvent('Added chat message: ${message.content.substring(0, min(20, message.content.length))}...');
|
@@ -172,7 +172,7 @@ class ClipQueueManager {
|
|
172 |
if (_isDisposed) return;
|
173 |
|
174 |
final newClip = VideoClip(
|
175 |
-
prompt: "${video.title}\n${video.description}",
|
176 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
177 |
orientation: _currentOrientation,
|
178 |
);
|
@@ -248,7 +248,7 @@ class ClipQueueManager {
|
|
248 |
_descriptionEvolutionTimer = Timer.periodic(
|
249 |
Duration(seconds: checkInterval),
|
250 |
(timer) async {
|
251 |
-
debugPrint('SIMULATION: Timer check triggered');
|
252 |
if (_isDisposed) {
|
253 |
debugPrint('SIMULATION: Skipping because manager is disposed');
|
254 |
return;
|
@@ -256,7 +256,7 @@ class ClipQueueManager {
|
|
256 |
|
257 |
// Skip if simulation is paused (due to video playback being paused)
|
258 |
if (_isSimulationPaused) {
|
259 |
-
debugPrint('SIMULATION: Skipping because it is paused');
|
260 |
ClipQueueConstants.logEvent('Skipping simulation because it is paused');
|
261 |
return;
|
262 |
}
|
@@ -265,7 +265,7 @@ class ClipQueueManager {
|
|
265 |
// but since clip generation is constant, we'll now run them in parallel
|
266 |
final isGenerating = _activeGenerations.isNotEmpty;
|
267 |
if (isGenerating) {
|
268 |
-
debugPrint('SIMULATION: Continuing with simulation despite active generations');
|
269 |
ClipQueueConstants.logEvent('Running simulation in parallel with active generations');
|
270 |
// We no longer return early here
|
271 |
}
|
@@ -273,7 +273,7 @@ class ClipQueueManager {
|
|
273 |
// Calculate time since last simulation
|
274 |
final now = DateTime.now();
|
275 |
final duration = now.difference(_lastDescriptionEvolutionTime);
|
276 |
-
debugPrint('SIMULATION: Time since last simulation: ${duration.inSeconds}s (frequency: ${Configuration.instance.simLoopFrequencyInSec}s)');
|
277 |
|
278 |
// If we've waited long enough, simulate the video
|
279 |
if (duration.inSeconds >= Configuration.instance.simLoopFrequencyInSec) {
|
@@ -282,7 +282,7 @@ class ClipQueueManager {
|
|
282 |
await _evolveDescription();
|
283 |
_lastDescriptionEvolutionTime = now;
|
284 |
} else {
|
285 |
-
debugPrint('SIMULATION: Not enough time elapsed since last simulation');
|
286 |
}
|
287 |
},
|
288 |
);
|
@@ -315,8 +315,10 @@ class ClipQueueManager {
|
|
315 |
try {
|
316 |
// Format recent chat messages as a string for the simulation prompt
|
317 |
String chatMessagesString = getChatMessagesString();
|
|
|
318 |
if (chatMessagesString.isNotEmpty) {
|
319 |
ClipQueueConstants.logEvent('Including ${_recentChatMessages.length} chat messages in simulation');
|
|
|
320 |
}
|
321 |
|
322 |
// Use the WebSocketService to simulate the video
|
@@ -334,8 +336,8 @@ class ClipQueueManager {
|
|
334 |
final newEvolvedDescription = result['evolved_description'] as String;
|
335 |
final newCondensedHistory = result['condensed_history'] as String;
|
336 |
|
337 |
-
debugPrint('SIMULATION: Received evolved description (${newEvolvedDescription.length} chars)');
|
338 |
-
debugPrint('SIMULATION: First 100 chars: ${newEvolvedDescription.substring(0, min(100, newEvolvedDescription.length))}...');
|
339 |
|
340 |
video = video.copyWith(
|
341 |
evolvedDescription: newEvolvedDescription,
|
@@ -343,11 +345,11 @@ class ClipQueueManager {
|
|
343 |
);
|
344 |
|
345 |
_evolutionCounter++;
|
346 |
-
debugPrint('SIMULATION: Video simulated (iteration $_evolutionCounter)');
|
347 |
-
ClipQueueConstants.logEvent('Video simulated (iteration $_evolutionCounter)');
|
348 |
|
349 |
// Emit the updated video to the stream for subscribers
|
350 |
-
debugPrint('SIMULATION: Emitting updated video to stream');
|
351 |
_videoUpdateController.add(video);
|
352 |
|
353 |
onQueueUpdated?.call();
|
@@ -368,7 +370,7 @@ class ClipQueueManager {
|
|
368 |
|
369 |
// If we've been successful before but failed now, we can continue using the last evolved description
|
370 |
if (_evolutionCounter > 0) {
|
371 |
-
ClipQueueConstants.logEvent('Continuing with previous description');
|
372 |
}
|
373 |
}
|
374 |
}
|
@@ -407,16 +409,8 @@ class ClipQueueManager {
|
|
407 |
|
408 |
// First ensure we have the correct buffer size
|
409 |
while (_clipBuffer.length < Configuration.instance.renderQueueBufferSize) {
|
410 |
-
// Determine which description to use for the prompt
|
411 |
-
String descriptionToUse = video.description;
|
412 |
-
|
413 |
-
// If we have an evolved description, use that instead
|
414 |
-
if (video.evolvedDescription.isNotEmpty) {
|
415 |
-
descriptionToUse = video.evolvedDescription;
|
416 |
-
}
|
417 |
-
|
418 |
final newClip = VideoClip(
|
419 |
-
prompt: "${video.title}\n${
|
420 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
421 |
orientation: _currentOrientation,
|
422 |
);
|
@@ -435,8 +429,9 @@ class ClipQueueManager {
|
|
435 |
for (final clip in failedClips) {
|
436 |
_clipBuffer.remove(clip);
|
437 |
final newClip = VideoClip(
|
438 |
-
prompt: "${video.title}\n${video.description}",
|
439 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
|
|
440 |
);
|
441 |
_clipBuffer.add(newClip);
|
442 |
}
|
@@ -538,18 +533,9 @@ class ClipQueueManager {
|
|
538 |
_clipBuffer.remove(clip);
|
539 |
_clipHistory.add(clip);
|
540 |
|
541 |
-
// Determine which description to use for the prompt
|
542 |
-
String descriptionToUse = video.description;
|
543 |
-
|
544 |
-
// If we have an evolved description, use that instead
|
545 |
-
if (video.evolvedDescription.isNotEmpty) {
|
546 |
-
descriptionToUse = video.evolvedDescription;
|
547 |
-
ClipQueueConstants.logEvent('Using evolved description for new clip (evolution #$_evolutionCounter)');
|
548 |
-
}
|
549 |
-
|
550 |
// Add a new pending clip with current orientation
|
551 |
final newClip = VideoClip(
|
552 |
-
prompt: "${video.title}\n${
|
553 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
554 |
orientation: _currentOrientation,
|
555 |
);
|
|
|
104 |
void _addChatMessage(ChatMessage message) {
|
105 |
if (message.videoId == videoId) {
|
106 |
_recentChatMessages.add(message);
|
107 |
+
// Keep only the 5 most recent messages
|
108 |
+
if (_recentChatMessages.length > 5) {
|
109 |
_recentChatMessages.removeAt(0);
|
110 |
}
|
111 |
ClipQueueConstants.logEvent('Added chat message: ${message.content.substring(0, min(20, message.content.length))}...');
|
|
|
172 |
if (_isDisposed) return;
|
173 |
|
174 |
final newClip = VideoClip(
|
175 |
+
prompt: "${video.title}\n${video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription}",
|
176 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
177 |
orientation: _currentOrientation,
|
178 |
);
|
|
|
248 |
_descriptionEvolutionTimer = Timer.periodic(
|
249 |
Duration(seconds: checkInterval),
|
250 |
(timer) async {
|
251 |
+
// debugPrint('SIMULATION: Timer check triggered');
|
252 |
if (_isDisposed) {
|
253 |
debugPrint('SIMULATION: Skipping because manager is disposed');
|
254 |
return;
|
|
|
256 |
|
257 |
// Skip if simulation is paused (due to video playback being paused)
|
258 |
if (_isSimulationPaused) {
|
259 |
+
// debugPrint('SIMULATION: Skipping because it is paused');
|
260 |
ClipQueueConstants.logEvent('Skipping simulation because it is paused');
|
261 |
return;
|
262 |
}
|
|
|
265 |
// but since clip generation is constant, we'll now run them in parallel
|
266 |
final isGenerating = _activeGenerations.isNotEmpty;
|
267 |
if (isGenerating) {
|
268 |
+
// debugPrint('SIMULATION: Continuing with simulation despite active generations');
|
269 |
ClipQueueConstants.logEvent('Running simulation in parallel with active generations');
|
270 |
// We no longer return early here
|
271 |
}
|
|
|
273 |
// Calculate time since last simulation
|
274 |
final now = DateTime.now();
|
275 |
final duration = now.difference(_lastDescriptionEvolutionTime);
|
276 |
+
// debugPrint('SIMULATION: Time since last simulation: ${duration.inSeconds}s (frequency: ${Configuration.instance.simLoopFrequencyInSec}s)');
|
277 |
|
278 |
// If we've waited long enough, simulate the video
|
279 |
if (duration.inSeconds >= Configuration.instance.simLoopFrequencyInSec) {
|
|
|
282 |
await _evolveDescription();
|
283 |
_lastDescriptionEvolutionTime = now;
|
284 |
} else {
|
285 |
+
// debugPrint('SIMULATION: Not enough time elapsed since last simulation');
|
286 |
}
|
287 |
},
|
288 |
);
|
|
|
315 |
try {
|
316 |
// Format recent chat messages as a string for the simulation prompt
|
317 |
String chatMessagesString = getChatMessagesString();
|
318 |
+
|
319 |
if (chatMessagesString.isNotEmpty) {
|
320 |
ClipQueueConstants.logEvent('Including ${_recentChatMessages.length} chat messages in simulation');
|
321 |
+
debugPrint('SIMULATION: Including chat messages: $chatMessagesString');
|
322 |
}
|
323 |
|
324 |
// Use the WebSocketService to simulate the video
|
|
|
336 |
final newEvolvedDescription = result['evolved_description'] as String;
|
337 |
final newCondensedHistory = result['condensed_history'] as String;
|
338 |
|
339 |
+
// debugPrint('SIMULATION: Received evolved description (${newEvolvedDescription.length} chars)');
|
340 |
+
// debugPrint('SIMULATION: First 100 chars: ${newEvolvedDescription.substring(0, min(100, newEvolvedDescription.length))}...');
|
341 |
|
342 |
video = video.copyWith(
|
343 |
evolvedDescription: newEvolvedDescription,
|
|
|
345 |
);
|
346 |
|
347 |
_evolutionCounter++;
|
348 |
+
// debugPrint('SIMULATION: Video simulated (iteration $_evolutionCounter)');
|
349 |
+
// ClipQueueConstants.logEvent('Video simulated (iteration $_evolutionCounter)');
|
350 |
|
351 |
// Emit the updated video to the stream for subscribers
|
352 |
+
// debugPrint('SIMULATION: Emitting updated video to stream');
|
353 |
_videoUpdateController.add(video);
|
354 |
|
355 |
onQueueUpdated?.call();
|
|
|
370 |
|
371 |
// If we've been successful before but failed now, we can continue using the last evolved description
|
372 |
if (_evolutionCounter > 0) {
|
373 |
+
// ClipQueueConstants.logEvent('Continuing with previous description');
|
374 |
}
|
375 |
}
|
376 |
}
|
|
|
409 |
|
410 |
// First ensure we have the correct buffer size
|
411 |
while (_clipBuffer.length < Configuration.instance.renderQueueBufferSize) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
final newClip = VideoClip(
|
413 |
+
prompt: "${video.title}\n${video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription}",
|
414 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
415 |
orientation: _currentOrientation,
|
416 |
);
|
|
|
429 |
for (final clip in failedClips) {
|
430 |
_clipBuffer.remove(clip);
|
431 |
final newClip = VideoClip(
|
432 |
+
prompt: "${video.title}\n${video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription}",
|
433 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
434 |
+
orientation: _currentOrientation, // Use the current orientation here too
|
435 |
);
|
436 |
_clipBuffer.add(newClip);
|
437 |
}
|
|
|
533 |
_clipBuffer.remove(clip);
|
534 |
_clipHistory.add(clip);
|
535 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
536 |
// Add a new pending clip with current orientation
|
537 |
final newClip = VideoClip(
|
538 |
+
prompt: "${video.title}\n${video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription}",
|
539 |
seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
|
540 |
orientation: _currentOrientation,
|
541 |
);
|
lib/services/clip_queue/queue_stats_logger.dart
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
// lib/services/clip_queue/queue_stats_logger.dart
|
2 |
|
3 |
-
import 'dart:async';
|
4 |
-
import 'package:flutter/foundation.dart';
|
5 |
import 'package:collection/collection.dart';
|
6 |
import 'video_clip.dart';
|
7 |
import 'clip_states.dart';
|
|
|
1 |
// lib/services/clip_queue/queue_stats_logger.dart
|
2 |
|
|
|
|
|
3 |
import 'package:collection/collection.dart';
|
4 |
import 'video_clip.dart';
|
5 |
import 'clip_states.dart';
|
lib/services/clip_queue/video_clip.dart
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
|
3 |
import 'dart:async';
|
4 |
import 'package:uuid/uuid.dart';
|
5 |
-
import 'package:flutter/foundation.dart';
|
6 |
import 'clip_states.dart';
|
7 |
import '../../models/video_orientation.dart';
|
8 |
|
|
|
2 |
|
3 |
import 'dart:async';
|
4 |
import 'package:uuid/uuid.dart';
|
|
|
5 |
import 'clip_states.dart';
|
6 |
import '../../models/video_orientation.dart';
|
7 |
|
lib/services/html_stub.dart
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
/// Stub implementation for dart:html when not on web platform
|
2 |
/// This file is imported when dart.library.html is not available
|
|
|
3 |
|
4 |
class Window {
|
5 |
final Document document = Document();
|
|
|
1 |
/// Stub implementation for dart:html when not on web platform
|
2 |
/// This file is imported when dart.library.html is not available
|
3 |
+
library;
|
4 |
|
5 |
class Window {
|
6 |
final Document document = Document();
|
lib/services/websocket_api_service.dart
CHANGED
@@ -219,7 +219,7 @@ class WebSocketApiService {
|
|
219 |
String get anonLimitMessage => _anonLimitMessage;
|
220 |
|
221 |
// Message to display when device limit is exceeded
|
222 |
-
String _deviceLimitMessage = 'Too many connections from this device. Please close other tabs running TikSlop.';
|
223 |
String get deviceLimitMessage => _deviceLimitMessage;
|
224 |
|
225 |
// Stream to notify listeners when anonymous limit status changes
|
@@ -280,7 +280,7 @@ class WebSocketApiService {
|
|
280 |
// Check if we're exceeding the limit, but only for non-anonymous users
|
281 |
// For anonymous users, we rely on the server-side IP check
|
282 |
if (_userRole != 'anon' && connections.length > _maxDeviceConnections) {
|
283 |
-
debugPrint('Device connection limit exceeded: ${connections.length} connections for $
|
284 |
return false;
|
285 |
}
|
286 |
|
@@ -386,18 +386,18 @@ class WebSocketApiService {
|
|
386 |
// Prevent multiple simultaneous connection attempts
|
387 |
return _connectionLock.synchronized(() async {
|
388 |
if (_status == ConnectionStatus.connected) {
|
389 |
-
debugPrint('WebSocketApiService: Already connected, skipping connection attempt');
|
390 |
return;
|
391 |
}
|
392 |
|
393 |
if (_status == ConnectionStatus.connecting) {
|
394 |
-
debugPrint('WebSocketApiService: Connection already in progress, waiting...');
|
395 |
|
396 |
// Wait for a short time to see if connection completes
|
397 |
for (int i = 0; i < 10; i++) {
|
398 |
await Future.delayed(const Duration(milliseconds: 200));
|
399 |
if (_status == ConnectionStatus.connected) {
|
400 |
-
debugPrint('WebSocketApiService: Connection completed while waiting');
|
401 |
return;
|
402 |
}
|
403 |
if (_status == ConnectionStatus.error || _status == ConnectionStatus.maintenance) {
|
@@ -412,7 +412,7 @@ class WebSocketApiService {
|
|
412 |
|
413 |
try {
|
414 |
_setStatus(ConnectionStatus.connecting);
|
415 |
-
debugPrint('WebSocketApiService: Setting status to CONNECTING');
|
416 |
|
417 |
// Close existing channel if any
|
418 |
await _channel?.sink.close();
|
@@ -632,7 +632,7 @@ class WebSocketApiService {
|
|
632 |
}
|
633 |
|
634 |
// Setup stream listener with error handling
|
635 |
-
debugPrint('WebSocketApiService: Setting up stream listeners...');
|
636 |
_channel!.stream.listen(
|
637 |
_handleMessage,
|
638 |
onError: _handleError,
|
@@ -646,7 +646,7 @@ class WebSocketApiService {
|
|
646 |
_startConnectionHeartbeat();
|
647 |
}
|
648 |
|
649 |
-
debugPrint('WebSocketApiService: Setting status to CONNECTED');
|
650 |
_setStatus(ConnectionStatus.connected);
|
651 |
_reconnectAttempts = 0;
|
652 |
|
@@ -678,7 +678,7 @@ class WebSocketApiService {
|
|
678 |
|
679 |
void addSubscriber(String id) {
|
680 |
_subscribers[id] = (_subscribers[id] ?? 0) + 1;
|
681 |
-
debugPrint('WebSocket subscriber added: $id (total: ${_subscribers[id]})');
|
682 |
}
|
683 |
|
684 |
void removeSubscriber(String id) {
|
@@ -687,15 +687,15 @@ class WebSocketApiService {
|
|
687 |
if (_subscribers[id]! <= 0) {
|
688 |
_subscribers.remove(id);
|
689 |
}
|
690 |
-
debugPrint('WebSocket subscriber removed: $id (remaining: ${_subscribers[id] ?? 0})');
|
691 |
}
|
692 |
}
|
693 |
|
694 |
Future<void> joinChatRoom(String videoId) async {
|
695 |
-
|
696 |
|
697 |
if (!isConnected) {
|
698 |
-
debugPrint('WebSocketApiService: Not connected, connecting first...');
|
699 |
await connect();
|
700 |
}
|
701 |
|
@@ -708,7 +708,7 @@ class WebSocketApiService {
|
|
708 |
timeout: const Duration(seconds: 10),
|
709 |
);
|
710 |
|
711 |
-
debugPrint('WebSocketApiService: Join chat room response received: $response');
|
712 |
|
713 |
if (!response['success']) {
|
714 |
final error = response['error'] ?? 'Failed to join chat room';
|
@@ -721,7 +721,7 @@ class WebSocketApiService {
|
|
721 |
_handleChatHistory(response);
|
722 |
}
|
723 |
|
724 |
-
debugPrint('WebSocketApiService: Successfully joined chat room: $videoId');
|
725 |
} catch (e) {
|
726 |
debugPrint('WebSocketApiService: Error joining chat room: $e');
|
727 |
rethrow;
|
@@ -740,7 +740,7 @@ class WebSocketApiService {
|
|
740 |
),
|
741 |
timeout: const Duration(seconds: 5),
|
742 |
);
|
743 |
-
debugPrint('Successfully left chat room: $videoId');
|
744 |
} catch (e) {
|
745 |
debugPrint('Failed to leave chat room: $e');
|
746 |
}
|
@@ -800,13 +800,13 @@ class WebSocketApiService {
|
|
800 |
_activeSearches[query] = false;
|
801 |
|
802 |
if (_disposed) {
|
803 |
-
debugPrint('Search terminated: Service disposed');
|
804 |
} else if (failedAttempts >= maxFailedAttempts) {
|
805 |
-
debugPrint('Search terminated: Max failures ($maxFailedAttempts) reached');
|
806 |
} else if ((_currentSearchState?.resultCount ?? 0) >= maxResults) {
|
807 |
-
debugPrint('Search terminated: Max results ($maxResults) reached');
|
808 |
} else {
|
809 |
-
debugPrint('Search terminated: Search cancelled');
|
810 |
}
|
811 |
}
|
812 |
|
@@ -1182,7 +1182,7 @@ class WebSocketApiService {
|
|
1182 |
action: 'generate_video',
|
1183 |
params: {
|
1184 |
'title': video.title,
|
1185 |
-
'description': video.description,
|
1186 |
'video_prompt_prefix': settings.videoPromptPrefix,
|
1187 |
'options': {
|
1188 |
'enhance_prompt': enhancePrompt,
|
@@ -1249,6 +1249,31 @@ class WebSocketApiService {
|
|
1249 |
debugPrint('WebSocketApiService: Sending simulation request for video $videoId (evolution #$evolutionCount)');
|
1250 |
|
1251 |
try {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1252 |
final response = await _sendRequest(
|
1253 |
WebSocketRequest(
|
1254 |
action: 'simulate',
|
@@ -1259,7 +1284,7 @@ class WebSocketApiService {
|
|
1259 |
'current_description': currentDescription,
|
1260 |
'condensed_history': condensedHistory,
|
1261 |
'evolution_count': evolutionCount,
|
1262 |
-
'chat_messages':
|
1263 |
},
|
1264 |
),
|
1265 |
timeout: const Duration(seconds: 60),
|
|
|
219 |
String get anonLimitMessage => _anonLimitMessage;
|
220 |
|
221 |
// Message to display when device limit is exceeded
|
222 |
+
final String _deviceLimitMessage = 'Too many connections from this device. Please close other tabs running TikSlop.';
|
223 |
String get deviceLimitMessage => _deviceLimitMessage;
|
224 |
|
225 |
// Stream to notify listeners when anonymous limit status changes
|
|
|
280 |
// Check if we're exceeding the limit, but only for non-anonymous users
|
281 |
// For anonymous users, we rely on the server-side IP check
|
282 |
if (_userRole != 'anon' && connections.length > _maxDeviceConnections) {
|
283 |
+
debugPrint('Device connection limit exceeded: ${connections.length} connections for $_userRole user');
|
284 |
return false;
|
285 |
}
|
286 |
|
|
|
386 |
// Prevent multiple simultaneous connection attempts
|
387 |
return _connectionLock.synchronized(() async {
|
388 |
if (_status == ConnectionStatus.connected) {
|
389 |
+
// debugPrint('WebSocketApiService: Already connected, skipping connection attempt');
|
390 |
return;
|
391 |
}
|
392 |
|
393 |
if (_status == ConnectionStatus.connecting) {
|
394 |
+
// debugPrint('WebSocketApiService: Connection already in progress, waiting...');
|
395 |
|
396 |
// Wait for a short time to see if connection completes
|
397 |
for (int i = 0; i < 10; i++) {
|
398 |
await Future.delayed(const Duration(milliseconds: 200));
|
399 |
if (_status == ConnectionStatus.connected) {
|
400 |
+
// debugPrint('WebSocketApiService: Connection completed while waiting');
|
401 |
return;
|
402 |
}
|
403 |
if (_status == ConnectionStatus.error || _status == ConnectionStatus.maintenance) {
|
|
|
412 |
|
413 |
try {
|
414 |
_setStatus(ConnectionStatus.connecting);
|
415 |
+
// debugPrint('WebSocketApiService: Setting status to CONNECTING');
|
416 |
|
417 |
// Close existing channel if any
|
418 |
await _channel?.sink.close();
|
|
|
632 |
}
|
633 |
|
634 |
// Setup stream listener with error handling
|
635 |
+
// debugPrint('WebSocketApiService: Setting up stream listeners...');
|
636 |
_channel!.stream.listen(
|
637 |
_handleMessage,
|
638 |
onError: _handleError,
|
|
|
646 |
_startConnectionHeartbeat();
|
647 |
}
|
648 |
|
649 |
+
// debugPrint('WebSocketApiService: Setting status to CONNECTED');
|
650 |
_setStatus(ConnectionStatus.connected);
|
651 |
_reconnectAttempts = 0;
|
652 |
|
|
|
678 |
|
679 |
void addSubscriber(String id) {
|
680 |
_subscribers[id] = (_subscribers[id] ?? 0) + 1;
|
681 |
+
// debugPrint('WebSocket subscriber added: $id (total: ${_subscribers[id]})');
|
682 |
}
|
683 |
|
684 |
void removeSubscriber(String id) {
|
|
|
687 |
if (_subscribers[id]! <= 0) {
|
688 |
_subscribers.remove(id);
|
689 |
}
|
690 |
+
// debugPrint('WebSocket subscriber removed: $id (remaining: ${_subscribers[id] ?? 0})');
|
691 |
}
|
692 |
}
|
693 |
|
694 |
Future<void> joinChatRoom(String videoId) async {
|
695 |
+
// debugPrint('WebSocketApiService: Attempting to join chat room: $videoId');
|
696 |
|
697 |
if (!isConnected) {
|
698 |
+
// debugPrint('WebSocketApiService: Not connected, connecting first...');
|
699 |
await connect();
|
700 |
}
|
701 |
|
|
|
708 |
timeout: const Duration(seconds: 10),
|
709 |
);
|
710 |
|
711 |
+
// debugPrint('WebSocketApiService: Join chat room response received: $response');
|
712 |
|
713 |
if (!response['success']) {
|
714 |
final error = response['error'] ?? 'Failed to join chat room';
|
|
|
721 |
_handleChatHistory(response);
|
722 |
}
|
723 |
|
724 |
+
// debugPrint('WebSocketApiService: Successfully joined chat room: $videoId');
|
725 |
} catch (e) {
|
726 |
debugPrint('WebSocketApiService: Error joining chat room: $e');
|
727 |
rethrow;
|
|
|
740 |
),
|
741 |
timeout: const Duration(seconds: 5),
|
742 |
);
|
743 |
+
// debugPrint('Successfully left chat room: $videoId');
|
744 |
} catch (e) {
|
745 |
debugPrint('Failed to leave chat room: $e');
|
746 |
}
|
|
|
800 |
_activeSearches[query] = false;
|
801 |
|
802 |
if (_disposed) {
|
803 |
+
// debugPrint('Search terminated: Service disposed');
|
804 |
} else if (failedAttempts >= maxFailedAttempts) {
|
805 |
+
// debugPrint('Search terminated: Max failures ($maxFailedAttempts) reached');
|
806 |
} else if ((_currentSearchState?.resultCount ?? 0) >= maxResults) {
|
807 |
+
// debugPrint('Search terminated: Max results ($maxResults) reached');
|
808 |
} else {
|
809 |
+
// debugPrint('Search terminated: Search cancelled');
|
810 |
}
|
811 |
}
|
812 |
|
|
|
1182 |
action: 'generate_video',
|
1183 |
params: {
|
1184 |
'title': video.title,
|
1185 |
+
'description': video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription,
|
1186 |
'video_prompt_prefix': settings.videoPromptPrefix,
|
1187 |
'options': {
|
1188 |
'enhance_prompt': enhancePrompt,
|
|
|
1249 |
debugPrint('WebSocketApiService: Sending simulation request for video $videoId (evolution #$evolutionCount)');
|
1250 |
|
1251 |
try {
|
1252 |
+
// If chat messages are provided directly, use them; otherwise the default empty string is used
|
1253 |
+
String formattedChatMessages = chatMessages;
|
1254 |
+
|
1255 |
+
// If no chat messages were provided but we have a chat stream, try to get recent messages
|
1256 |
+
if (formattedChatMessages.isEmpty) {
|
1257 |
+
// Check if we have any active chat messages in our stream
|
1258 |
+
try {
|
1259 |
+
// Get messages directly from the chatController's history
|
1260 |
+
final List<ChatMessage> recentChatMessages = [];
|
1261 |
+
// We'd ideally query for recent messages in a real implementation
|
1262 |
+
// but to avoid circular dependencies we'll use any messages provided to us
|
1263 |
+
|
1264 |
+
if (recentChatMessages.isNotEmpty) {
|
1265 |
+
formattedChatMessages = recentChatMessages.map((msg) =>
|
1266 |
+
"${msg.username}: ${msg.content}"
|
1267 |
+
).join("\n");
|
1268 |
+
debugPrint('WebSocketApiService: Including ${recentChatMessages.length} chat messages in simulation');
|
1269 |
+
}
|
1270 |
+
} catch (e) {
|
1271 |
+
debugPrint('WebSocketApiService: Error getting chat messages: $e');
|
1272 |
+
}
|
1273 |
+
}
|
1274 |
+
|
1275 |
+
debugPrint('WebSocketApiService: Chat messages included: ${formattedChatMessages.isNotEmpty ? 'Yes' : 'No'}');
|
1276 |
+
|
1277 |
final response = await _sendRequest(
|
1278 |
WebSocketRequest(
|
1279 |
action: 'simulate',
|
|
|
1284 |
'current_description': currentDescription,
|
1285 |
'condensed_history': condensedHistory,
|
1286 |
'evolution_count': evolutionCount,
|
1287 |
+
'chat_messages': formattedChatMessages,
|
1288 |
},
|
1289 |
),
|
1290 |
timeout: const Duration(seconds: 60),
|
lib/services/websocket_core_interface.dart
CHANGED
@@ -2,6 +2,8 @@
|
|
2 |
|
3 |
/// This file provides a subset of WebSocket functionality needed for the nano player
|
4 |
/// It's a simplified version that avoids exposing the entire WebSocketApiService
|
|
|
|
|
5 |
|
6 |
/// WebSocketRequest model
|
7 |
class WebSocketRequest {
|
|
|
2 |
|
3 |
/// This file provides a subset of WebSocket functionality needed for the nano player
|
4 |
/// It's a simplified version that avoids exposing the entire WebSocketApiService
|
5 |
+
library;
|
6 |
+
|
7 |
|
8 |
/// WebSocketRequest model
|
9 |
class WebSocketRequest {
|
lib/widgets/ai_content_disclaimer.dart
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
|
3 |
import 'package:flutter/material.dart';
|
4 |
import 'package:google_fonts/google_fonts.dart';
|
5 |
-
import '../theme/colors.dart';
|
6 |
|
7 |
class AiContentDisclaimer extends StatelessWidget {
|
8 |
final bool isInteractive;
|
|
|
2 |
|
3 |
import 'package:flutter/material.dart';
|
4 |
import 'package:google_fonts/google_fonts.dart';
|
|
|
5 |
|
6 |
class AiContentDisclaimer extends StatelessWidget {
|
7 |
final bool isInteractive;
|
lib/widgets/maintenance_screen.dart
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
// lib/widgets/maintenance_screen.dart
|
2 |
import 'package:flutter/material.dart';
|
3 |
-
import 'package:tikslop/theme/colors.dart';
|
4 |
|
5 |
class MaintenanceScreen extends StatelessWidget {
|
6 |
final Exception? error;
|
|
|
1 |
// lib/widgets/maintenance_screen.dart
|
2 |
import 'package:flutter/material.dart';
|
|
|
3 |
|
4 |
class MaintenanceScreen extends StatelessWidget {
|
5 |
final Exception? error;
|
lib/widgets/video_card.dart
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import 'package:flutter/material.dart';
|
2 |
-
import 'dart:convert';
|
3 |
import '../theme/colors.dart';
|
4 |
import '../models/video_result.dart';
|
5 |
import './video_player/index.dart';
|
|
|
1 |
import 'package:flutter/material.dart';
|
|
|
2 |
import '../theme/colors.dart';
|
3 |
import '../models/video_result.dart';
|
4 |
import './video_player/index.dart';
|
lib/widgets/video_player/buffer_manager.dart
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
import 'dart:async';
|
4 |
import 'package:flutter/foundation.dart';
|
5 |
import 'package:tikslop/config/config.dart';
|
6 |
-
import 'package:tikslop/services/clip_queue/video_clip.dart';
|
7 |
import 'package:tikslop/services/clip_queue/clip_queue_manager.dart';
|
8 |
import 'package:video_player/video_player.dart';
|
9 |
import 'package:tikslop/models/video_result.dart';
|
|
|
3 |
import 'dart:async';
|
4 |
import 'package:flutter/foundation.dart';
|
5 |
import 'package:tikslop/config/config.dart';
|
|
|
6 |
import 'package:tikslop/services/clip_queue/clip_queue_manager.dart';
|
7 |
import 'package:video_player/video_player.dart';
|
8 |
import 'package:tikslop/models/video_result.dart';
|
lib/widgets/video_player/nano_clip_manager.dart
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
// lib/widgets/video_player/nano_clip_manager.dart
|
2 |
|
3 |
import 'dart:async';
|
4 |
-
import 'dart:convert';
|
5 |
import 'package:flutter/foundation.dart';
|
6 |
import 'package:tikslop/models/video_result.dart';
|
7 |
import 'package:tikslop/services/clip_queue/video_clip.dart';
|
|
|
1 |
// lib/widgets/video_player/nano_clip_manager.dart
|
2 |
|
3 |
import 'dart:async';
|
|
|
4 |
import 'package:flutter/foundation.dart';
|
5 |
import 'package:tikslop/models/video_result.dart';
|
6 |
import 'package:tikslop/services/clip_queue/video_clip.dart';
|
lib/widgets/video_player/playback_controller.dart
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
|
3 |
import 'dart:async';
|
4 |
import 'package:flutter/foundation.dart';
|
5 |
-
import 'package:flutter/material.dart';
|
6 |
import 'package:video_player/video_player.dart';
|
7 |
import 'package:tikslop/config/config.dart';
|
8 |
import 'package:tikslop/services/clip_queue/video_clip.dart';
|
|
|
2 |
|
3 |
import 'dart:async';
|
4 |
import 'package:flutter/foundation.dart';
|
|
|
5 |
import 'package:video_player/video_player.dart';
|
6 |
import 'package:tikslop/config/config.dart';
|
7 |
import 'package:tikslop/services/clip_queue/video_clip.dart';
|
lib/widgets/video_player/video_player_widget.dart
CHANGED
@@ -302,11 +302,11 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
|
|
302 |
VideoPlayerController? newController;
|
303 |
|
304 |
if (_playbackController.nextController != null) {
|
305 |
-
debugPrint('Using preloaded controller for clip ${clip.seed}');
|
306 |
newController = _playbackController.nextController;
|
307 |
_playbackController.nextController = null;
|
308 |
} else {
|
309 |
-
debugPrint('Creating new controller for clip ${clip.seed}');
|
310 |
newController = VideoPlayerController.networkUrl(
|
311 |
Uri.parse(clip.base64Data!),
|
312 |
);
|
@@ -338,7 +338,7 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
|
|
338 |
|
339 |
if (widget.autoPlay) {
|
340 |
await newController.play();
|
341 |
-
debugPrint('Started playback of clip ${clip.seed}');
|
342 |
_playbackController.startPlaybackTimer();
|
343 |
}
|
344 |
|
@@ -444,10 +444,8 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
|
|
444 |
_isDisposed = true;
|
445 |
|
446 |
// Ensure simulation is paused when widget is disposed
|
447 |
-
|
448 |
-
|
449 |
-
}
|
450 |
-
|
451 |
// Unregister the observer
|
452 |
WidgetsBinding.instance.removeObserver(this);
|
453 |
|
|
|
302 |
VideoPlayerController? newController;
|
303 |
|
304 |
if (_playbackController.nextController != null) {
|
305 |
+
// debugPrint('Using preloaded controller for clip ${clip.seed}');
|
306 |
newController = _playbackController.nextController;
|
307 |
_playbackController.nextController = null;
|
308 |
} else {
|
309 |
+
// debugPrint('Creating new controller for clip ${clip.seed}');
|
310 |
newController = VideoPlayerController.networkUrl(
|
311 |
Uri.parse(clip.base64Data!),
|
312 |
);
|
|
|
338 |
|
339 |
if (widget.autoPlay) {
|
340 |
await newController.play();
|
341 |
+
// debugPrint('Started playback of clip ${clip.seed}');
|
342 |
_playbackController.startPlaybackTimer();
|
343 |
}
|
344 |
|
|
|
444 |
_isDisposed = true;
|
445 |
|
446 |
// Ensure simulation is paused when widget is disposed
|
447 |
+
_bufferManager.queueManager.setSimulationPaused(true);
|
448 |
+
|
|
|
|
|
449 |
// Unregister the observer
|
450 |
WidgetsBinding.instance.removeObserver(this);
|
451 |
|
lib/widgets/web_utils.dart
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import 'dart:async';
|
2 |
import 'package:flutter/foundation.dart';
|
3 |
|
4 |
// Platform-specific imports handling
|
|
|
|
|
1 |
import 'package:flutter/foundation.dart';
|
2 |
|
3 |
// Platform-specific imports handling
|