matt HOFFNER commited on
Commit
50e3782
·
1 Parent(s): 8eb7b1d

pass model directly

Browse files
components/Playground/index.tsx CHANGED
@@ -155,7 +155,7 @@ const Playground = () => {
155
  className="border p-2 rounded-md shadow-sm w-full bg-transparent text-gray-700 mt-2"
156
  >
157
  <option value="openai">gpt-4</option>
158
- <option value="huggingface">meta-llama/Llama-2-70b-chat-hf</option>
159
  </select>
160
  </div>
161
  )}
 
155
  className="border p-2 rounded-md shadow-sm w-full bg-transparent text-gray-700 mt-2"
156
  >
157
  <option value="openai">gpt-4</option>
158
+ <option value="meta-llama/Llama-2-70b-chat-hf">meta-llama/Llama-2-70b-chat-hf</option>
159
  </select>
160
  </div>
161
  )}
pages/api/chat/index.ts CHANGED
@@ -35,10 +35,10 @@ export default async function(req: Request) {
35
 
36
  const stream = OpenAIStream(response);
37
  return new StreamingTextResponse(stream);
38
- } else if (aiProvider === 'huggingface') {
39
  const response = Hf.textGenerationStream({
40
  // @ts-ignore
41
- model: 'meta-llama/Llama-2-70b-chat-hf',
42
  inputs: experimental_buildLlama2Prompt(messages),
43
  parameters: {
44
  max_new_tokens: 500,
@@ -50,7 +50,5 @@ export default async function(req: Request) {
50
 
51
  const stream = HuggingFaceStream(response);
52
  return new StreamingTextResponse(stream);
53
- } else {
54
- throw new Error(`Unsupported AI provider: ${aiProvider}`);
55
  }
56
  }
 
35
 
36
  const stream = OpenAIStream(response);
37
  return new StreamingTextResponse(stream);
38
+ } else {
39
  const response = Hf.textGenerationStream({
40
  // @ts-ignore
41
+ model: aiProvider,
42
  inputs: experimental_buildLlama2Prompt(messages),
43
  parameters: {
44
  max_new_tokens: 500,
 
50
 
51
  const stream = HuggingFaceStream(response);
52
  return new StreamingTextResponse(stream);
 
 
53
  }
54
  }