Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from transformers import AutoModel, AutoTokenizer
|
|
7 |
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
|
8 |
from parler_tts import ParlerTTSForConditionalGeneration
|
9 |
import soundfile as sf
|
10 |
-
from llama_index import
|
11 |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
12 |
from llama_index.langchain_helpers.text_splitter import RecursiveCharacterTextSplitter
|
13 |
from langchain import OpenAI
|
@@ -97,7 +97,7 @@ def handle_input(user_prompt, image=None, video=None, audio=None, doc=None):
|
|
97 |
def create_rag_index(file_name, file_content):
|
98 |
docs = SimpleDirectoryReader(file_name, file_content).load_data()
|
99 |
service_context = ServiceContext.from_defaults(llm_predictor=LLMPredictor(llm=OpenAI(temperature=0)))
|
100 |
-
index =
|
101 |
return index
|
102 |
|
103 |
# Function to encode video
|
|
|
7 |
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
|
8 |
from parler_tts import ParlerTTSForConditionalGeneration
|
9 |
import soundfile as sf
|
10 |
+
from llama_index import GPTVectorStoreIndex, LLMPredictor, ServiceContext
|
11 |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
12 |
from llama_index.langchain_helpers.text_splitter import RecursiveCharacterTextSplitter
|
13 |
from langchain import OpenAI
|
|
|
97 |
def create_rag_index(file_name, file_content):
|
98 |
docs = SimpleDirectoryReader(file_name, file_content).load_data()
|
99 |
service_context = ServiceContext.from_defaults(llm_predictor=LLMPredictor(llm=OpenAI(temperature=0)))
|
100 |
+
index = GPTVectorStoreIndex.from_documents(docs, service_context=service_context)
|
101 |
return index
|
102 |
|
103 |
# Function to encode video
|