File size: 2,998 Bytes
fac66ea 11f5144 247453d 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
const { createApp, ref, onMounted } = Vue;
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest";
const textGenerationModels = [
"mistralai/Mistral-7B-v0.1",
"bigscience/bloom"
]
const responseLengthToTokenCount = {
"short": 100,
"medium": 250,
"long": 500,
}
const app = createApp({
setup() {
const token = ref(localStorage.getItem("token") || "");
const userPrompt = ref("Write about the difference between Star Wars and Star Trek");
const currentGeneratedText = ref("");
const models = ref([]);
const selectedModel = ref("");
const isRunning = ref(false);
const responseLength = ref("medium");
let controller;
const createTextGenerationStream = (hfInstance, prompt, abortControllerSignal) => {
return hfInstance.textGenerationStream(
{
model: selectedModel.value,
inputs: prompt,
parameters: { max_new_tokens: responseLengthToTokenCount[responseLength.value] },
},
{
use_cache: false,
signal: abortControllerSignal,
}
);
};
const generateTextStream = async function* (hfInstance, abortSignal, prompt) {
let generatedText = ""
for await (const output of createTextGenerationStream(hfInstance, prompt, abortSignal)) {
generatedText += output.token.text;
yield generatedText;
}
};
const run = async () => {
isRunning.value = true;
currentGeneratedText.value = "";
controller = new AbortController();
localStorage.setItem("token", token.value);
const hfInstance = new HfInference(token.value);
try {
for await (const textStream of generateTextStream(
hfInstance,
controller.signal,
userPrompt.value
)) {
currentGeneratedText.value = textStream;
}
} catch (e) {
console.log(e);
}
};
const stop = () => {
if (controller) {
controller.abort();
}
isRunning.value = false;
};
onMounted(async () => {
const localStorageToken = localStorage.getItem("token")
if (localStorageToken) {
token.value = localStorageToken;
}
models.value = textGenerationModels
selectedModel.value = textGenerationModels[0]
});
return {
token,
userPrompt,
currentGeneratedText,
run,
stop,
models,
selectedModel,
isRunning,
responseLength,
};
},
});
app.mount("#app");
|