Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,44 +47,11 @@ from tensorflow import keras
|
|
| 47 |
from PIL import Image
|
| 48 |
|
| 49 |
# Cell 1: Image Classification Model
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
'brown-glass',
|
| 56 |
-
'cardboard',
|
| 57 |
-
'clothes',
|
| 58 |
-
'green-glass',
|
| 59 |
-
'metal',
|
| 60 |
-
'paper',
|
| 61 |
-
'plastic',
|
| 62 |
-
'shoes',
|
| 63 |
-
'trash',
|
| 64 |
-
'white-glass']
|
| 65 |
-
|
| 66 |
-
# Function to predict image label and score
|
| 67 |
-
def predict_image(input):
|
| 68 |
-
# Resize the image to the size expected by the model
|
| 69 |
-
image = input.resize((244, 224))
|
| 70 |
-
# Convert the image to a NumPy array
|
| 71 |
-
image_array = tf.keras.preprocessing.image.img_to_array(image)
|
| 72 |
-
# Normalize the image
|
| 73 |
-
image_array /= 255.0
|
| 74 |
-
# Expand the dimensions to create a batch
|
| 75 |
-
image_array = tf.expand_dims(image_array, 0)
|
| 76 |
-
# Predict using the model
|
| 77 |
-
predictions = model1.predict(image_array)
|
| 78 |
-
|
| 79 |
-
# Get the predicted class label
|
| 80 |
-
predicted_class_index = tf.argmax(predictions, axis=1).numpy()[0]
|
| 81 |
-
predicted_class_label = class_labels[predicted_class_index]
|
| 82 |
-
|
| 83 |
-
# Get the confidence score of the predicted class
|
| 84 |
-
confidence_score = predictions[0][predicted_class_index]
|
| 85 |
-
|
| 86 |
-
# Return predicted class label and confidence score
|
| 87 |
-
return {predicted_class_label: confidence_score}
|
| 88 |
|
| 89 |
|
| 90 |
image_gradio_app = gr.Interface(
|
|
@@ -95,100 +62,12 @@ image_gradio_app = gr.Interface(
|
|
| 95 |
theme=theme
|
| 96 |
)
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
# split documents
|
| 101 |
-
text_splitter = RecursiveCharacterTextSplitter(
|
| 102 |
-
chunk_size=1024,
|
| 103 |
-
chunk_overlap=150,
|
| 104 |
-
length_function=len
|
| 105 |
-
)
|
| 106 |
-
docs = text_splitter.split_documents(data)
|
| 107 |
-
# define embedding
|
| 108 |
-
embeddings = HuggingFaceEmbeddings(model_name='thenlper/gte-small')
|
| 109 |
-
# create vector database from data
|
| 110 |
-
persist_directory = 'docs/chroma/'
|
| 111 |
-
|
| 112 |
-
# Remove old database files if any
|
| 113 |
-
shutil.rmtree(persist_directory, ignore_errors=True)
|
| 114 |
-
vectordb = Chroma.from_documents(
|
| 115 |
-
documents=docs,
|
| 116 |
-
embedding=embeddings,
|
| 117 |
-
persist_directory=persist_directory
|
| 118 |
-
)
|
| 119 |
-
# define retriever
|
| 120 |
-
retriever = vectordb.as_retriever(search_kwargs={"k": 2}, search_type="mmr")
|
| 121 |
-
|
| 122 |
-
class FinalAnswer(BaseModel):
|
| 123 |
-
question: str = Field(description="the original question")
|
| 124 |
-
answer: str = Field(description="the extracted answer")
|
| 125 |
-
|
| 126 |
-
# Assuming you have a parser for the FinalAnswer class
|
| 127 |
-
parser = PydanticOutputParser(pydantic_object=FinalAnswer)
|
| 128 |
-
|
| 129 |
-
template = """
|
| 130 |
-
Your name is Greta and you are a recycling chatbot with the objective to anwer questions from user in English or Spanish /
|
| 131 |
-
Use the following pieces of context to answer the question /
|
| 132 |
-
If the question is English answer in English /
|
| 133 |
-
If the question is Spanish answer in Spanish /
|
| 134 |
-
Do not mention the word context when you answer a question /
|
| 135 |
-
Answer the question fully and provide as much relevant detail as possible. Do not cut your response short /
|
| 136 |
-
Context: {context}
|
| 137 |
-
User: {question}
|
| 138 |
-
{format_instructions}
|
| 139 |
-
"""
|
| 140 |
-
|
| 141 |
-
# Create the chat prompt templates
|
| 142 |
-
sys_prompt = SystemMessagePromptTemplate.from_template(template)
|
| 143 |
-
qa_prompt = ChatPromptTemplate(
|
| 144 |
-
messages=[
|
| 145 |
-
sys_prompt,
|
| 146 |
-
HumanMessagePromptTemplate.from_template("{question}")],
|
| 147 |
-
partial_variables={"format_instructions": parser.get_format_instructions()}
|
| 148 |
-
)
|
| 149 |
-
llm = HuggingFaceHub(
|
| 150 |
-
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 151 |
-
task="text-generation",
|
| 152 |
-
model_kwargs={
|
| 153 |
-
"max_new_tokens": 2000,
|
| 154 |
-
"top_k": 30,
|
| 155 |
-
"temperature": 0.1,
|
| 156 |
-
"repetition_penalty": 1.03
|
| 157 |
-
},
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 161 |
-
llm = llm,
|
| 162 |
-
memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", input_key='question', output_key='output'),
|
| 163 |
-
retriever = retriever,
|
| 164 |
-
verbose = True,
|
| 165 |
-
combine_docs_chain_kwargs={'prompt': qa_prompt},
|
| 166 |
-
get_chat_history = lambda h : h,
|
| 167 |
-
rephrase_question = False,
|
| 168 |
-
output_key = 'output',
|
| 169 |
-
)
|
| 170 |
-
|
| 171 |
-
def chat_interface(question,history):
|
| 172 |
-
result = qa_chain.invoke({'question': question})
|
| 173 |
-
output_string = result['output']
|
| 174 |
-
|
| 175 |
-
# Find the index of the last occurrence of "answer": in the string
|
| 176 |
-
answer_index = output_string.rfind('"answer":')
|
| 177 |
-
|
| 178 |
-
# Extract the substring starting from the "answer": index
|
| 179 |
-
answer_part = output_string[answer_index + len('"answer":'):].strip()
|
| 180 |
-
|
| 181 |
-
# Find the next occurrence of a double quote to get the start of the answer value
|
| 182 |
-
quote_index = answer_part.find('"')
|
| 183 |
-
|
| 184 |
-
# Extract the answer value between double quotes
|
| 185 |
-
answer_value = answer_part[quote_index + 1:answer_part.find('"', quote_index + 1)]
|
| 186 |
-
|
| 187 |
-
return answer_value
|
| 188 |
|
| 189 |
|
| 190 |
chatbot_gradio_app = gr.ChatInterface(
|
| 191 |
-
fn=
|
| 192 |
title=custom_title
|
| 193 |
)
|
| 194 |
|
|
|
|
| 47 |
from PIL import Image
|
| 48 |
|
| 49 |
# Cell 1: Image Classification Model
|
| 50 |
+
pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
|
| 51 |
+
|
| 52 |
+
def predict_image(image):
|
| 53 |
+
predictions = pipeline(image)
|
| 54 |
+
return {p["label"]: p["score"] for p in predictions}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
|
| 57 |
image_gradio_app = gr.Interface(
|
|
|
|
| 62 |
theme=theme
|
| 63 |
)
|
| 64 |
|
| 65 |
+
def echo(message, history):
|
| 66 |
+
return message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
|
| 69 |
chatbot_gradio_app = gr.ChatInterface(
|
| 70 |
+
fn=echo,
|
| 71 |
title=custom_title
|
| 72 |
)
|
| 73 |
|