File size: 6,541 Bytes
9b968be
 
ea53bcc
9b968be
ea53bcc
 
 
9b968be
ea53bcc
9b968be
91ac79b
ad0c1dd
 
524c9af
9b968be
ecae6dc
 
 
 
 
 
 
 
 
 
 
ea53bcc
 
9b968be
ea53bcc
63b55ed
ad0c1dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63b55ed
ad0c1dd
 
 
 
 
 
57babb1
63b55ed
 
 
 
d91e2c5
 
ea53bcc
 
 
 
 
9b968be
 
 
 
3600cbf
ab20d84
75a11b1
9b968be
 
 
 
d91e2c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75a11b1
 
 
d91e2c5
 
5f1c654
75a11b1
 
524c9af
75a11b1
 
 
524c9af
75a11b1
 
ea53bcc
 
 
 
b24890a
ea53bcc
75a11b1
 
ea53bcc
 
63b55ed
ea53bcc
b24890a
 
ea53bcc
b24890a
 
8c5b0dd
b24890a
 
 
 
 
 
 
8c5b0dd
ea53bcc
b24890a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63b55ed
b24890a
 
d91e2c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b24890a
 
ea53bcc
b24890a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import streamlit as st
import pandas as pd
import openai
import joblib
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from huggingface_hub import hf_hub_download
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import torch
from datetime import datetime

# Dataset loading function with caching
@st.cache_data
def load_datasets():
    try:
        with st.spinner('Loading dataset...'):
            original_data = pd.read_csv('CTP_Model1.csv', low_memory=False)
            return original_data
    except Exception as e:
        st.error(f"Error loading dataset: {str(e)}")
        raise e

def load_image(image_file):
    return Image.open(image_file)

def classify_image(image):
    try:
        # Load the model and feature extractor
        model_name = "dima806/car_models_image_detection"
        feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
        model = AutoModelForImageClassification.from_pretrained(model_name)

        # Preprocess the image
        inputs = feature_extractor(images=image, return_tensors="pt")

        # Perform inference
        with torch.no_grad():
            outputs = model(**inputs)

        # Get the predicted class
        logits = outputs.logits
        predicted_class_idx = logits.argmax(-1).item()
        
        # Get the class label and score
        predicted_class_label = model.config.id2label[predicted_class_idx]
        score = torch.nn.functional.softmax(logits, dim=-1)[0, predicted_class_idx].item()

        # Return the top prediction
        return [{'label': predicted_class_label, 'score': score}]
    
    except Exception as e:
        st.error(f"Classification error: {e}")
        return None

def get_car_overview(brand, model, year):
    prompt = f"Provide an overview of the following car:\nYear: {year}\nMake: {brand}\nModel: {model}\n"
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": prompt}]
    )
    return response.choices[0].message['content']

def load_model_and_encodings():
    try:
        with st.spinner('Loading model...'):
            model_content = hf_hub_download(repo_id="EdBoy2202/car_prediction_model", filename="car_price_modelv3.pkl")
            model = joblib.load(model_content)
        return model
    except Exception as e:
        st.error(f"Error loading model: {str(e)}")
        raise e

def predict_price(model, brand, model_name, year):
    # Create a dictionary with default values
    input_data = {
        'year': year,
        'make': brand,
        'model': model_name,
        'trim': 'Base',  # Default trim
        'condition': 'Used',  # Default condition
        'fuel': 'Gasoline',  # Default fuel type
        'odometer': year * 12000,  # Estimate based on year and average annual mileage
        'title_status': 'Clean',  # Default title status
        'transmission': 'Automatic',  # Default transmission
        'drive': 'Fwd',  # Default drive
        'size': 'Mid-Size',  # Default size
        'type': 'Sedan',  # Default type
        'paint_color': 'White'  # Default color
    }
    
    # Calculate age
    current_year = datetime.now().year
    input_data['age'] = current_year - year
    input_data['age_squared'] = input_data['age'] ** 2
    
    # Prepare the input for the model
    input_df = pd.DataFrame([input_data])
    
    # Make sure to only include columns that the model expects
    model_columns = model.feature_names_in_
    input_df = input_df[model_columns]
    
    # Predict the price
    predicted_price = model.predict(input_df)
    return predicted_price[0]

# Streamlit App
st.title("Auto Appraise")
st.write("Upload a car image or take a picture to get its brand, model, overview, and expected price!")

# Load model and encodings
model = load_model_and_encodings()

# Initialize OpenAI API key
openai.api_key = st.secrets["GPT_TOKEN"]

# File uploader for image
uploaded_file = st.file_uploader("Choose a car image", type=["jpg", "jpeg", "png"])

# Camera input as an alternative (optional)
camera_image = st.camera_input("Or take a picture of the car")

# Process the image (either uploaded or from camera)
image = None
if uploaded_file is not None:
    image = Image.open(uploaded_file)
    st.write("Image uploaded successfully.")
elif camera_image is not None:
    image = Image.open(camera_image)
    st.write("Image captured successfully.")

if image is not None:
    st.image(image, caption='Processed Image', use_container_width=True)

    # Classify the car image
    with st.spinner('Analyzing image...'):
        car_classifications = classify_image(image)
    
    if car_classifications:
        st.write("Image classification successful.")
        st.subheader("Car Classification Results:")
        for classification in car_classifications:
            st.write(f"Model: {classification['label']}")
            st.write(f"Confidence: {classification['score']*100:.2f}%")
        
        # Use the top prediction for further processing
        top_prediction = car_classifications[0]['label']
        brand, model_name = top_prediction.split(' ', 1)
        
        st.write(f"Identified Car: {brand} {model_name}")

        # Get additional information using GPT-3.5-turbo
        current_year = datetime.now().year
        overview = get_car_overview(brand, model_name, current_year)
        st.write("Car Overview:")
        st.write(overview)

        # Interactive Price Prediction
        st.subheader("Price Prediction Over Time")
        selected_years = st.slider("Select range of years for price prediction", 
                                     min_value=2000, max_value=2023, value=(2010, 2023))

        years = np.arange(selected_years[0], selected_years[1] + 1)
        predicted_prices = []

        for year in years:
            price = predict_price(model, brand, model_name, year)
            predicted_prices.append(price)

        # Plotting the results
        plt.figure(figsize=(10, 5))
        plt.plot(years, predicted_prices, marker='o')
        plt.title(f"Predicted Price of {brand} {model_name} Over Time")
        plt.xlabel("Year")
        plt.ylabel("Predicted Price ($)")
        plt.grid()
        st.pyplot(plt)

    else:
        st.error("Could not classify the image. Please try again with a different image.")
else:
    st.write("Please upload an image or take a picture to proceed.")