test-space-2 / app.py
paloma99's picture
Update app.py
cca430f verified
raw
history blame
2.44 kB
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
import theme
theme = theme.Theme()
import os
import sys
sys.path.append('../..')
#langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
from langchain.chains import (
LLMChain, ConversationalRetrievalChain)
from langchain.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
from langchain.document_loaders import PyPDFDirectoryLoader
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
from langchain_community.llms import HuggingFaceHub
from langchain_community.document_loaders import WebBaseLoader
from pydantic import BaseModel
import shutil
custom_title = "<span style='color: rgb(243, 239, 224);'>Green Greta</span>"
from huggingface_hub import from_pretrained_keras
import tensorflow as tf
from tensorflow import keras
from PIL import Image
# Cell 1: Image Classification Model
pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
def predict_image(image):
predictions = pipeline(image)
return {p["label"]: p["score"] for p in predictions}
image_gradio_app = gr.Interface(
fn=predict_image,
inputs=gr.Image(label="Image", sources=['upload', 'webcam'], type="pil"),
outputs=[gr.Label(label="Result")],
title=custom_title,
theme=theme
)
def echo(message, history):
return message
chatbot_gradio_app = gr.ChatInterface(
fn=echo,
title=custom_title
)
# Combine both interfaces into a single app
app = gr.TabbedInterface(
[image_gradio_app, chatbot_gradio_app],
tab_names=["Green Greta Image Classification","Green Greta Chat"],
theme=theme
)
app.queue()
app.launch()