ruslanmv commited on
Commit
0021fbd
·
1 Parent(s): 55eedc0

Update generateStoryLines.ts

Browse files
src/app/server/actions/generateStoryLines.ts CHANGED
@@ -1,160 +1,51 @@
1
- "use server";
2
 
3
- import "server-only";
4
- import type { TTSVoice, StoryLine } from "@/types";
5
 
6
- const BASE = (process.env.AI_STORY_API_GRADIO_URL || "").replace(/\/+$/, "");
7
- const SECRET = process.env.AI_STORY_API_SECRET_TOKEN || "";
8
- const DEBUG = (process.env.DEBUG_STORY_API || "").toLowerCase() === "true";
9
- const FN_INDEX = Number(process.env.AI_STORY_API_FN_INDEX ?? 0); // default 0
10
-
11
- function assertEnv() {
12
- if (!BASE) throw new Error("Missing AI_STORY_API_GRADIO_URL");
13
- if (!SECRET) throw new Error("Missing AI_STORY_API_SECRET_TOKEN");
14
- }
15
-
16
- function logInfo(...args: any[]) {
17
- // Always log compact request summary
18
- console.log("[story-api]", ...args);
19
- }
20
- function logDebug(...args: any[]) {
21
- if (DEBUG) console.debug("[story-api:debug]", ...args);
22
- }
23
- function logError(...args: any[]) {
24
- console.error("[story-api:error]", ...args);
25
- }
26
-
27
- function abbreviate(s: string, n = 200): string {
28
- if (s == null) return String(s);
29
- return s.length > n ? s.slice(0, n) + "…" : s;
30
- }
31
-
32
- function safePromptPreview(p: string) {
33
- const cropped = (p || "").slice(0, 60).replace(/\s+/g, " ").trim();
34
- return `${cropped}${cropped.length < (p || "").length ? "…" : ""}`;
35
- }
36
-
37
- function withTimeout<T>(p: Promise<T>, ms = 90_000) {
38
- return Promise.race<T>([
39
- p,
40
- new Promise<T>((_, rej) =>
41
- setTimeout(() => rej(new Error(`Request timed out after ${ms} ms`)), ms)
42
- ) as Promise<T>,
43
- ]);
44
- }
45
-
46
- async function timedFetch(url: string, init: RequestInit) {
47
- const t0 = Date.now();
48
- const res = await withTimeout(fetch(url, init));
49
- const ms = Date.now() - t0;
50
- return { res, ms };
51
- }
52
-
53
- async function postPredict(body: any) {
54
- const headers = { "Content-Type": "application/json", Accept: "application/json" };
55
- const endpoints = [`${BASE}/api/predict`, `${BASE}/run/predict`];
56
-
57
- let lastErr: Error | null = null;
58
-
59
- for (const url of endpoints) {
60
- try {
61
- logDebug("POST", url, "body:", abbreviate(JSON.stringify(body), 300));
62
- const { res, ms } = await timedFetch(
63
- url,
64
- {
65
- method: "POST",
66
- headers,
67
- body: JSON.stringify(body),
68
- cache: "no-store",
69
- // keepalive: true, // optional
70
- }
71
- );
72
-
73
- const text = await res.text();
74
- let json: any = null;
75
- try {
76
- json = text ? JSON.parse(text) : null;
77
- } catch {
78
- // non-JSON (HTML cold start page? proxy error?), keep raw text for logs
79
- }
80
-
81
- if (res.ok) {
82
- logInfo(`OK ${res.status} in ${ms}ms @ ${url}`);
83
- logDebug("response json:", abbreviate(JSON.stringify(json), 500));
84
- return json;
85
- }
86
-
87
- // Surface meaningful backend errors (token, queue, NSFW, validation, etc.)
88
- const detail =
89
- json?.detail ??
90
- json?.error ??
91
- json?.message ??
92
- text ??
93
- "(empty body)";
94
- const message = `HTTP ${res.status} ${res.statusText} @ ${url} in ${ms}ms — ${abbreviate(
95
- String(detail),
96
- 1200
97
- )}`;
98
- logError(message);
99
- lastErr = new Error(message);
100
- } catch (e: any) {
101
- const msg = `${url} network error: ${e?.message || e}`;
102
- logError(msg);
103
- lastErr = new Error(msg);
104
- }
105
- }
106
-
107
- throw lastErr || new Error("All predict endpoints failed");
108
- }
109
 
110
  export async function generateStoryLines(prompt: string, voice: TTSVoice): Promise<StoryLine[]> {
111
- assertEnv();
112
-
113
- if (!prompt || prompt.trim().length < 4) {
114
- throw new Error("Prompt is too short.");
115
  }
116
 
117
- // (Optional) lightweight prompt policy guard; adjust/remove per your policy
118
- // const banned = /(sexual|sexy|porn|nsfw|explicit)/i;
119
- // if (banned.test(prompt)) {
120
- // throw new Error("This demo does not support explicit content. Please try a different prompt.");
121
- // }
122
-
123
- logInfo(`user requested "${safePromptPreview(prompt)}"`, `(voice=${voice})`);
124
-
125
- const body = {
126
- fn_index: FN_INDEX, // must match your Space's endpoint index
127
- data: [SECRET, prompt, voice],
128
- };
129
-
130
- const json = await postPredict(body);
131
-
132
- // Gradio payloads are typically { data: [...] }
133
- const data = json?.data;
134
- if (!Array.isArray(data)) {
135
- const s = abbreviate(JSON.stringify(json), 1000);
136
- logError("Unexpected response shape:", s);
137
- throw new Error(`Unexpected response shape from backend: ${s}`);
 
 
 
 
 
 
 
 
 
 
 
138
  }
139
 
140
- // Your Python backend returns an array in data[0], each item {text, audio}
141
- const lines = (data[0] as StoryLine[]) || [];
142
- if (!Array.isArray(lines)) {
143
- const s = abbreviate(JSON.stringify(data[0]), 600);
144
- logError("Unexpected payload in data[0]:", s);
145
- throw new Error(`Unexpected payload in data[0]: ${s}`);
146
- }
147
-
148
- const cleaned: StoryLine[] = lines.map((l) => ({
149
- text: (l.text || "")
150
- .replaceAll(" .", ".")
151
- .replaceAll(" ,", ",")
152
- .replaceAll(" !", "!")
153
- .replaceAll(" ?", "?")
154
- .trim(),
155
- audio: l.audio,
156
- }));
157
-
158
- logDebug(`returned ${cleaned.length} lines`);
159
- return cleaned;
160
- }
 
1
+ "use server"
2
 
3
+ import { Story, StoryLine, TTSVoice } from "@/types"
 
4
 
5
+ const instance = `${process.env.AI_STORY_API_GRADIO_URL || ""}`
6
+ const secretToken = `${process.env.AI_STORY_API_SECRET_TOKEN || ""}`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  export async function generateStoryLines(prompt: string, voice: TTSVoice): Promise<StoryLine[]> {
9
+ if (!prompt?.length) {
10
+ throw new Error(`prompt is too short!`)
 
 
11
  }
12
 
13
+ const cropped = prompt.slice(0, 30)
14
+ console.log(`user requested "${cropped}${cropped !== prompt ? "..." : ""}"`)
15
+
16
+ // positivePrompt = filterOutBadWords(positivePrompt)
17
+
18
+ const res = await fetch(instance + (instance.endsWith("/") ? "" : "/") + "api/predict", {
19
+ method: "POST",
20
+ headers: {
21
+ "Content-Type": "application/json",
22
+ // Authorization: `Bearer ${token}`,
23
+ },
24
+ body: JSON.stringify({
25
+ fn_index: 0, // <- important!
26
+ data: [
27
+ secretToken,
28
+ prompt,
29
+ voice,
30
+ ],
31
+ }),
32
+ cache: "no-store",
33
+ // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
34
+ // next: { revalidate: 1 }
35
+ })
36
+
37
+
38
+ const rawJson = await res.json()
39
+ const data = rawJson.data as StoryLine[][]
40
+
41
+ const stories = data?.[0] || []
42
+
43
+ if (res.status !== 200) {
44
+ throw new Error('Failed to fetch data')
45
  }
46
 
47
+ return stories.map(line => ({
48
+ text: line.text.replaceAll(" .", ".").replaceAll(" ?", "?").replaceAll(" !", "!").trim(),
49
+ audio: line.audio
50
+ }))
51
+ }