Spaces:
Paused
Paused
matt HOFFNER
commited on
Commit
·
b32fabf
1
Parent(s):
b4aa6a7
initial support for direct url llm support
Browse files- components/Playground/index.tsx +19 -3
- pages/api/chat/index.ts +13 -16
components/Playground/index.tsx
CHANGED
@@ -37,6 +37,7 @@ const Playground = () => {
|
|
37 |
const [isSystemInputVisible, setSystemInputVisible] = useState(false);
|
38 |
const [isModelInputVisible, setModelInputVisible] = useState(false);
|
39 |
const [aiProvider, setAIProvider] = useState<string>("openai");
|
|
|
40 |
|
41 |
const [systemMessage, setSystemMessage] = useState(
|
42 |
DEFAULT_PROMPT
|
@@ -48,8 +49,10 @@ const Playground = () => {
|
|
48 |
|
49 |
const modifiedHandleSubmit = async (e: FormEvent<HTMLFormElement>, chatRequestOptions?: ChatRequestOptions) => {
|
50 |
e.preventDefault();
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
};
|
54 |
|
55 |
useEffect(() => {
|
@@ -68,7 +71,8 @@ const Playground = () => {
|
|
68 |
const { append, messages, input, setInput, handleSubmit, ...rest } = useChat({
|
69 |
body: {
|
70 |
systemMessage: systemMessage,
|
71 |
-
aiProvider: aiProvider
|
|
|
72 |
},
|
73 |
onError: (error) => {
|
74 |
console.error(error);
|
@@ -156,7 +160,19 @@ const Playground = () => {
|
|
156 |
>
|
157 |
<option value="openai">gpt-4</option>
|
158 |
<option value="meta-llama/Llama-2-70b-chat-hf">meta-llama/Llama-2-70b-chat-hf</option>
|
|
|
|
|
|
|
|
|
159 |
</select>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
</div>
|
161 |
)}
|
162 |
|
|
|
37 |
const [isSystemInputVisible, setSystemInputVisible] = useState(false);
|
38 |
const [isModelInputVisible, setModelInputVisible] = useState(false);
|
39 |
const [aiProvider, setAIProvider] = useState<string>("openai");
|
40 |
+
const [urlOption, setUrlOption] = useState<string | any>(null);
|
41 |
|
42 |
const [systemMessage, setSystemMessage] = useState(
|
43 |
DEFAULT_PROMPT
|
|
|
49 |
|
50 |
const modifiedHandleSubmit = async (e: FormEvent<HTMLFormElement>, chatRequestOptions?: ChatRequestOptions) => {
|
51 |
e.preventDefault();
|
52 |
+
await handleSubmit(e, {
|
53 |
+
...chatRequestOptions,
|
54 |
+
aiProvider,
|
55 |
+
} as any);
|
56 |
};
|
57 |
|
58 |
useEffect(() => {
|
|
|
71 |
const { append, messages, input, setInput, handleSubmit, ...rest } = useChat({
|
72 |
body: {
|
73 |
systemMessage: systemMessage,
|
74 |
+
aiProvider: aiProvider,
|
75 |
+
url: urlOption
|
76 |
},
|
77 |
onError: (error) => {
|
78 |
console.error(error);
|
|
|
160 |
>
|
161 |
<option value="openai">gpt-4</option>
|
162 |
<option value="meta-llama/Llama-2-70b-chat-hf">meta-llama/Llama-2-70b-chat-hf</option>
|
163 |
+
<label className="block text-gray-700 text-sm font-bold mb-2" htmlFor="url-option">
|
164 |
+
Model URL Option:
|
165 |
+
</label>
|
166 |
+
|
167 |
</select>
|
168 |
+
<input
|
169 |
+
id="url-option"
|
170 |
+
type="url"
|
171 |
+
placeholder="Enter URL"
|
172 |
+
value={urlOption}
|
173 |
+
onChange={(e) => setUrlOption(e.target.value)}
|
174 |
+
className="border p-2 rounded-md shadow-sm w-full bg-transparent text-gray-700 mt-2"
|
175 |
+
/>
|
176 |
</div>
|
177 |
)}
|
178 |
|
pages/api/chat/index.ts
CHANGED
@@ -2,21 +2,20 @@ import { OpenAIStream, StreamingTextResponse } from "ai";
|
|
2 |
import { Configuration, OpenAIApi } from "openai-edge";
|
3 |
import { HfInference } from '@huggingface/inference';
|
4 |
import { HuggingFaceStream } from 'ai';
|
5 |
-
import { experimental_buildLlama2Prompt } from 'ai/prompts'
|
6 |
-
|
7 |
-
// Configurations for OpenAI
|
8 |
-
const openaiConfig = new Configuration({
|
9 |
-
apiKey: process.env.OPENAI_API_KEY,
|
10 |
-
});
|
11 |
-
const openai = new OpenAIApi(openaiConfig);
|
12 |
-
|
13 |
-
// Create a new HuggingFace Inference instance
|
14 |
-
const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);
|
15 |
|
16 |
export const runtime = 'edge';
|
17 |
|
18 |
export default async function(req: Request) {
|
19 |
-
let { messages, aiProvider = 'openai', systemMessage } = await req.json();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
// Prepend the system message if it's not already there
|
22 |
if (messages.length === 0 || messages[0].role !== "system") {
|
@@ -25,14 +24,12 @@ export default async function(req: Request) {
|
|
25 |
content: systemMessage
|
26 |
}, ...messages];
|
27 |
}
|
28 |
-
|
29 |
if (aiProvider === 'openai') {
|
30 |
const response = await openai.createChatCompletion({
|
31 |
model: 'gpt-4',
|
32 |
stream: true,
|
33 |
messages
|
34 |
});
|
35 |
-
|
36 |
const stream = OpenAIStream(response);
|
37 |
return new StreamingTextResponse(stream);
|
38 |
} else {
|
@@ -41,10 +38,10 @@ export default async function(req: Request) {
|
|
41 |
model: aiProvider,
|
42 |
inputs: experimental_buildLlama2Prompt(messages),
|
43 |
parameters: {
|
44 |
-
|
45 |
-
|
46 |
}
|
47 |
-
|
48 |
|
49 |
const stream = HuggingFaceStream(response);
|
50 |
return new StreamingTextResponse(stream);
|
|
|
2 |
import { Configuration, OpenAIApi } from "openai-edge";
|
3 |
import { HfInference } from '@huggingface/inference';
|
4 |
import { HuggingFaceStream } from 'ai';
|
5 |
+
import { experimental_buildLlama2Prompt } from 'ai/prompts';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
export const runtime = 'edge';
|
8 |
|
9 |
export default async function(req: Request) {
|
10 |
+
let { messages, aiProvider = 'openai', systemMessage, url } = await req.json();
|
11 |
+
|
12 |
+
// Set up configurations for OpenAI
|
13 |
+
const openaiConfig = new Configuration({
|
14 |
+
apiKey: process.env.OPENAI_API_KEY,
|
15 |
+
baseURL: url ? url : undefined, // Set baseURL to the provided 'url' if it exists
|
16 |
+
} as any);
|
17 |
+
const openai = new OpenAIApi(openaiConfig);
|
18 |
+
const Hf = new HfInference(process.env.HUGGINGFACE_API_KEY);
|
19 |
|
20 |
// Prepend the system message if it's not already there
|
21 |
if (messages.length === 0 || messages[0].role !== "system") {
|
|
|
24 |
content: systemMessage
|
25 |
}, ...messages];
|
26 |
}
|
|
|
27 |
if (aiProvider === 'openai') {
|
28 |
const response = await openai.createChatCompletion({
|
29 |
model: 'gpt-4',
|
30 |
stream: true,
|
31 |
messages
|
32 |
});
|
|
|
33 |
const stream = OpenAIStream(response);
|
34 |
return new StreamingTextResponse(stream);
|
35 |
} else {
|
|
|
38 |
model: aiProvider,
|
39 |
inputs: experimental_buildLlama2Prompt(messages),
|
40 |
parameters: {
|
41 |
+
repetition_penalty: 1,
|
42 |
+
return_full_text: false
|
43 |
}
|
44 |
+
});
|
45 |
|
46 |
const stream = HuggingFaceStream(response);
|
47 |
return new StreamingTextResponse(stream);
|