File size: 2,438 Bytes
9cdf6e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a8d06e
 
 
 
 
 
9cdf6e8
cca430f
 
 
 
 
9cdf6e8
 
 
 
 
 
 
 
 
 
cca430f
 
9cdf6e8
 
 
cca430f
9cdf6e8
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
import theme

theme = theme.Theme()

import os
import sys
sys.path.append('../..')

#langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
from langchain.chains import (
    LLMChain, ConversationalRetrievalChain)
from langchain.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate,  MessagesPlaceholder
from langchain.document_loaders import PyPDFDirectoryLoader
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
from langchain_community.llms import HuggingFaceHub
from langchain_community.document_loaders import WebBaseLoader

from pydantic import BaseModel
import shutil



custom_title = "<span style='color: rgb(243, 239, 224);'>Green Greta</span>"


from huggingface_hub import from_pretrained_keras

import tensorflow as tf
from tensorflow import keras
from PIL import Image

# Cell 1: Image Classification Model
pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")

def predict_image(image):
  predictions = pipeline(image)
  return {p["label"]: p["score"] for p in predictions}


image_gradio_app = gr.Interface(
    fn=predict_image,
    inputs=gr.Image(label="Image", sources=['upload', 'webcam'], type="pil"),
    outputs=[gr.Label(label="Result")],
    title=custom_title,
    theme=theme
)

def echo(message, history):
    return message


chatbot_gradio_app = gr.ChatInterface(
    fn=echo,
    title=custom_title
)

# Combine both interfaces into a single app
app = gr.TabbedInterface(
    [image_gradio_app, chatbot_gradio_app],
    tab_names=["Green Greta Image Classification","Green Greta Chat"],
    theme=theme
)

app.queue()
app.launch()