Commit
·
b2773c3
1
Parent(s):
660bb11
Data transformation completed
Browse files- .gitignore +2 -0
- Dockerfile +17 -0
- anime_recommender/constant/__pycache__/__init__.cpython-310.pyc +0 -0
- anime_recommender/entity/__pycache__/artifact_entity.cpython-310.pyc +0 -0
- anime_recommender/entity/__pycache__/config_entity.cpython-310.pyc +0 -0
- anime_recommender/entity/artifact_entity.py +3 -0
- anime_recommender/entity/config_entity.py +11 -0
- anime_recommender/source/data_transformation.py +115 -0
- app.py +232 -0
- run_pipeline.py +9 -9
.gitignore
CHANGED
@@ -2,3 +2,5 @@ ars/
|
|
2 |
.env
|
3 |
Artifacts/
|
4 |
logs/
|
|
|
|
|
|
2 |
.env
|
3 |
Artifacts/
|
4 |
logs/
|
5 |
+
__pycache__/
|
6 |
+
model_trainer/
|
Dockerfile
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python image as a base
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copy the app files into the container
|
8 |
+
COPY . .
|
9 |
+
|
10 |
+
# Install required packages
|
11 |
+
RUN pip install -r requirements.txt
|
12 |
+
|
13 |
+
# Expose the port that Streamlit uses
|
14 |
+
EXPOSE 8501
|
15 |
+
|
16 |
+
# Run the Streamlit app
|
17 |
+
CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
anime_recommender/constant/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/anime_recommender/constant/__pycache__/__init__.cpython-310.pyc and b/anime_recommender/constant/__pycache__/__init__.cpython-310.pyc differ
|
|
anime_recommender/entity/__pycache__/artifact_entity.cpython-310.pyc
CHANGED
Binary files a/anime_recommender/entity/__pycache__/artifact_entity.cpython-310.pyc and b/anime_recommender/entity/__pycache__/artifact_entity.cpython-310.pyc differ
|
|
anime_recommender/entity/__pycache__/config_entity.cpython-310.pyc
CHANGED
Binary files a/anime_recommender/entity/__pycache__/config_entity.cpython-310.pyc and b/anime_recommender/entity/__pycache__/config_entity.cpython-310.pyc differ
|
|
anime_recommender/entity/artifact_entity.py
CHANGED
@@ -5,3 +5,6 @@ from typing import Optional
|
|
5 |
class DataIngestionArtifact:
|
6 |
feature_store_anime_file_path:str
|
7 |
feature_store_userrating_file_path:str
|
|
|
|
|
|
|
|
5 |
class DataIngestionArtifact:
|
6 |
feature_store_anime_file_path:str
|
7 |
feature_store_userrating_file_path:str
|
8 |
+
@dataclass
|
9 |
+
class DataTransformationArtifact:
|
10 |
+
merged_file_path:str
|
anime_recommender/entity/config_entity.py
CHANGED
@@ -29,3 +29,14 @@ class DataIngestionConfig:
|
|
29 |
self.feature_store_userrating_file_path: str = os.path.join(self.data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR, RATING_FILE_NAME)
|
30 |
self.anime_filepath: str = ANIME_FILE_PATH
|
31 |
self.rating_filepath: str = RATING_FILE_PATH
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
self.feature_store_userrating_file_path: str = os.path.join(self.data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR, RATING_FILE_NAME)
|
30 |
self.anime_filepath: str = ANIME_FILE_PATH
|
31 |
self.rating_filepath: str = RATING_FILE_PATH
|
32 |
+
|
33 |
+
class DataTransformationConfig:
|
34 |
+
"""
|
35 |
+
Configuration for data transformation, including paths for transformed data and preprocessing objects.
|
36 |
+
"""
|
37 |
+
def __init__(self,training_pipeline_config:TrainingPipelineConfig):
|
38 |
+
"""
|
39 |
+
Initialize data transformation paths.
|
40 |
+
"""
|
41 |
+
self.data_transformation_dir:str = os.path.join(training_pipeline_config.artifact_dir,DATA_TRANSFORMATION_DIR)
|
42 |
+
self.merged_file_path:str = os.path.join(self.data_transformation_dir,DATA_TRANSFORMATION_TRANSFORMED_DATA_DIR,MERGED_FILE_NAME)
|
anime_recommender/source/data_transformation.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
from anime_recommender.loggers.logging import logging
|
6 |
+
from anime_recommender.exception.exception import AnimeRecommendorException
|
7 |
+
from anime_recommender.utils.main_utils.utils import export_data_to_dataframe
|
8 |
+
from anime_recommender.constant import *
|
9 |
+
from anime_recommender.entity.config_entity import DataTransformationConfig
|
10 |
+
from anime_recommender.entity.artifact_entity import DataIngestionArtifact,DataTransformationArtifact
|
11 |
+
|
12 |
+
class DataTransformation:
|
13 |
+
"""
|
14 |
+
Class for handling data transformation for energy generation models.
|
15 |
+
"""
|
16 |
+
def __init__(self,data_ingestion_artifact:DataIngestionArtifact,data_transformation_config:DataTransformationConfig):
|
17 |
+
"""
|
18 |
+
Initializes the DataTransformation class with the given data ingestion and configuration artifacts.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
data_ingestion_artifact (DataIngestionArtifact): The artifact containing ingested data paths.
|
22 |
+
data_transformation_config (DataTransformationConfig): Configuration object for data transformation.
|
23 |
+
"""
|
24 |
+
try:
|
25 |
+
self.data_ingestion_artifact = data_ingestion_artifact
|
26 |
+
self.data_transformation_config = data_transformation_config
|
27 |
+
except Exception as e:
|
28 |
+
raise AnimeRecommendorException(e,sys)
|
29 |
+
|
30 |
+
@staticmethod
|
31 |
+
def read_data(file_path)->pd.DataFrame:
|
32 |
+
"""
|
33 |
+
Reads data from a CSV file.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
file_path (str): Path to the CSV file.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
pd.DataFrame: The DataFrame containing the data from the CSV file.
|
40 |
+
"""
|
41 |
+
try:
|
42 |
+
return pd.read_csv(file_path)
|
43 |
+
except Exception as e:
|
44 |
+
raise AnimeRecommendorException(e,sys)
|
45 |
+
|
46 |
+
@staticmethod
|
47 |
+
def merge_data(anime_df: pd.DataFrame, rating_df: pd.DataFrame) -> pd.DataFrame:
|
48 |
+
"""
|
49 |
+
Merges the anime and rating DataFrames on 'anime_id'.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
anime_df (pd.DataFrame): DataFrame containing anime information.
|
53 |
+
rating_df (pd.DataFrame): DataFrame containing user rating information.
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
pd.DataFrame: Merged DataFrame on 'anime_id'.
|
57 |
+
"""
|
58 |
+
try:
|
59 |
+
merged_df = pd.merge(rating_df, anime_df, on="anime_id", how="inner")
|
60 |
+
logging.info(f"Shape of the Merged dataframe:{merged_df.shape}")
|
61 |
+
logging.info(f"Column names: {merged_df.columns}")
|
62 |
+
return merged_df
|
63 |
+
except Exception as e:
|
64 |
+
raise AnimeRecommendorException(e, sys)
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
def clean_filter_data(merged_df: pd.DataFrame) -> pd.DataFrame:
|
68 |
+
"""
|
69 |
+
Cleans the merged DataFrame by replacing 'UNKNOWN' with NaN, filling NaN values with median and also filters the data.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
merged_df (pd.DataFrame): Merged DataFrame to clean and filter.
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
pd.DataFrame: Cleaned and Filtered DataFrame with NaN values handled.
|
76 |
+
"""
|
77 |
+
try:
|
78 |
+
merged_df['average_rating'].replace('UNKNOWN', np.nan)
|
79 |
+
merged_df['average_rating'] = pd.to_numeric(merged_df['average_rating'], errors='coerce')
|
80 |
+
merged_df['average_rating'].fillna(merged_df['average_rating'].median())
|
81 |
+
merged_df = merged_df[merged_df['average_rating'] > 6]
|
82 |
+
cols_to_drop = [ 'username', 'overview', 'type', 'episodes', 'producers',
|
83 |
+
'licensors', 'studios', 'source', 'rank', 'popularity',
|
84 |
+
'favorites', 'scored by', 'members' ]
|
85 |
+
cleaned_df = merged_df.copy()
|
86 |
+
cleaned_df.drop(columns=cols_to_drop, inplace=True)
|
87 |
+
logging.info(f"Shape of the Merged dataframe:{cleaned_df.shape}")
|
88 |
+
logging.info(f"Column names: {cleaned_df.columns}")
|
89 |
+
logging.info(f"Preview of the merged DataFrame:\n{cleaned_df.head()}")
|
90 |
+
return cleaned_df
|
91 |
+
except Exception as e:
|
92 |
+
raise AnimeRecommendorException(e, sys)
|
93 |
+
|
94 |
+
def initiate_data_transformation(self)->DataTransformationArtifact:
|
95 |
+
"""
|
96 |
+
Initiates the data transformation process by reading, transforming, and saving the data.
|
97 |
+
|
98 |
+
Returns:
|
99 |
+
DataTransformationArtifact: The artifact containing paths to the transformed data.
|
100 |
+
"""
|
101 |
+
logging.info("Entering initiate_data_transformation method of DataTransformation class.")
|
102 |
+
try:
|
103 |
+
anime_df = DataTransformation.read_data(self.data_ingestion_artifact.feature_store_anime_file_path)
|
104 |
+
rating_df = DataTransformation.read_data(self.data_ingestion_artifact.feature_store_userrating_file_path)
|
105 |
+
merged_df = DataTransformation.merge_data(anime_df, rating_df)
|
106 |
+
transformed_df = DataTransformation.clean_filter_data(merged_df)
|
107 |
+
|
108 |
+
export_data_to_dataframe(transformed_df, self.data_transformation_config.merged_file_path)
|
109 |
+
data_transformation_artifact = DataTransformationArtifact(
|
110 |
+
merged_file_path=self.data_transformation_config.merged_file_path
|
111 |
+
)
|
112 |
+
|
113 |
+
return data_transformation_artifact
|
114 |
+
except Exception as e:
|
115 |
+
raise AnimeRecommendorException(e,sys)
|
app.py
CHANGED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import pandas as pd
|
3 |
+
import streamlit as st
|
4 |
+
from anime_recommender.content_filtering_models import ContentBasedRecommender
|
5 |
+
from anime_recommender.collaborative_filtering_models import CollaborativeAnimeRecommender
|
6 |
+
from anime_recommender.popularity_based_filtering import PopularityBasedFiltering
|
7 |
+
import joblib
|
8 |
+
from huggingface_hub import hf_hub_download
|
9 |
+
from datasets import load_dataset
|
10 |
+
|
11 |
+
st.set_page_config(page_title="Anime Recommendation System", layout="wide")
|
12 |
+
|
13 |
+
if "anime_data" not in st.session_state or "anime_user_ratings" not in st.session_state:
|
14 |
+
# Load datasets from Hugging Face (assuming no splits)
|
15 |
+
animedataset = load_dataset("krishnaveni76/Animes", split=None)
|
16 |
+
mergeddataset = load_dataset("krishnaveni76/Anime_UserRatings", split=None)
|
17 |
+
|
18 |
+
# Convert the dataset to Pandas DataFrame
|
19 |
+
st.session_state.anime_data = pd.DataFrame(animedataset["train"])
|
20 |
+
st.session_state.anime_user_ratings = pd.DataFrame(mergeddataset["train"])
|
21 |
+
|
22 |
+
|
23 |
+
# Access the data from session state
|
24 |
+
anime_data = st.session_state.anime_data
|
25 |
+
anime_user_ratings = st.session_state.anime_user_ratings
|
26 |
+
|
27 |
+
# Display dataset info
|
28 |
+
st.write("Anime Data:")
|
29 |
+
st.dataframe(anime_data)
|
30 |
+
|
31 |
+
st.write("Anime User Ratings Data:")
|
32 |
+
st.dataframe(anime_user_ratings)
|
33 |
+
|
34 |
+
# Define your repository name
|
35 |
+
repo_name = "krishnaveni76/anime-recommendation-models"
|
36 |
+
|
37 |
+
# Load models
|
38 |
+
cosine_similarity_model_path = hf_hub_download(repo_name, "cosine_similarity.pkl")
|
39 |
+
item_based_knn_model_path = hf_hub_download(repo_name, "itembasedknn.pkl")
|
40 |
+
user_based_knn_model_path = hf_hub_download(repo_name, "userbasedknn.pkl")
|
41 |
+
svd_model_path = hf_hub_download(repo_name, "svd.pkl")
|
42 |
+
|
43 |
+
# # Load the models into memory
|
44 |
+
# with open(cosine_similarity_model_path, "rb") as f:
|
45 |
+
# cosine_similarity_model = joblib.load(f)
|
46 |
+
|
47 |
+
with open(item_based_knn_model_path, "rb") as f:
|
48 |
+
item_based_knn_model = joblib.load(f)
|
49 |
+
|
50 |
+
with open(user_based_knn_model_path, "rb") as f:
|
51 |
+
user_based_knn_model = joblib.load(f)
|
52 |
+
|
53 |
+
with open(svd_model_path, "rb") as f:
|
54 |
+
svd_model = joblib.load(f)
|
55 |
+
|
56 |
+
# Now you can use these models for recommendations
|
57 |
+
print("Models loaded successfully!")
|
58 |
+
|
59 |
+
# Streamlit UI
|
60 |
+
app_selector = st.sidebar.radio(
|
61 |
+
"Select App", ("Content-Based Recommender", "Collaborative Recommender", "Top Anime Recommender")
|
62 |
+
)
|
63 |
+
|
64 |
+
if app_selector == "Content-Based Recommender":
|
65 |
+
st.title("Content-Based Recommender System")
|
66 |
+
try:
|
67 |
+
|
68 |
+
anime_list = anime_data["name"].tolist()
|
69 |
+
anime_name = st.selectbox("Select an Anime", anime_list)
|
70 |
+
|
71 |
+
# Set number of recommendations
|
72 |
+
max_recommendations = min(len(anime_data), 100)
|
73 |
+
n_recommendations = st.slider("Number of Recommendations", 1, max_recommendations, 10)
|
74 |
+
|
75 |
+
# Inject custom CSS for anime name font size
|
76 |
+
st.markdown(
|
77 |
+
"""
|
78 |
+
<style>
|
79 |
+
.anime-title {
|
80 |
+
font-size: 14px !important;
|
81 |
+
font-weight: bold;
|
82 |
+
text-align: center;
|
83 |
+
margin-top: 5px;
|
84 |
+
}
|
85 |
+
</style>
|
86 |
+
""",
|
87 |
+
unsafe_allow_html=True,
|
88 |
+
)
|
89 |
+
# Get Recommendations
|
90 |
+
if st.button("Get Recommendations"):
|
91 |
+
try:
|
92 |
+
recommender = ContentBasedRecommender(anime_data)
|
93 |
+
recommendations = recommender.get_rec_cosine(anime_name, n_recommendations=n_recommendations,model_path=cosine_similarity_model_path)
|
94 |
+
|
95 |
+
if isinstance(recommendations, str):
|
96 |
+
st.warning(recommendations)
|
97 |
+
elif recommendations.empty:
|
98 |
+
st.warning("No recommendations found.")
|
99 |
+
else:
|
100 |
+
st.write(f"Here are the Content-based Recommendations for {anime_name}:")
|
101 |
+
cols = st.columns(5)
|
102 |
+
for i, row in enumerate(recommendations.iterrows()):
|
103 |
+
col = cols[i % 5]
|
104 |
+
with col:
|
105 |
+
st.image(row[1]['Image URL'], use_container_width=True)
|
106 |
+
st.markdown(
|
107 |
+
f"<div class='anime-title'>{row[1]['Anime name']}</div>",
|
108 |
+
unsafe_allow_html=True,
|
109 |
+
)
|
110 |
+
st.caption(f"Genres: {row[1]['Genres']} | Rating: {row[1]['Rating']}")
|
111 |
+
except Exception as e:
|
112 |
+
st.error(f"Unexpected error: {str(e)}")
|
113 |
+
|
114 |
+
except Exception as e:
|
115 |
+
st.error(f"Unexpected error: {str(e)}")
|
116 |
+
|
117 |
+
elif app_selector == "Collaborative Recommender":
|
118 |
+
st.title("Collaborative Recommender System")
|
119 |
+
|
120 |
+
try:
|
121 |
+
|
122 |
+
# Sidebar for choosing the collaborative filtering method
|
123 |
+
collaborative_method = st.sidebar.selectbox(
|
124 |
+
"Choose a collaborative filtering method:",
|
125 |
+
["SVD Collaborative Filtering", "User-Based Collaborative Filtering", "Anime-Based KNN Collaborative Filtering"]
|
126 |
+
)
|
127 |
+
|
128 |
+
# User input
|
129 |
+
if collaborative_method == "SVD Collaborative Filtering" or collaborative_method == "User-Based Collaborative Filtering":
|
130 |
+
user_ids = anime_user_ratings['user_id'].unique() # Get unique user IDs
|
131 |
+
user_id = st.selectbox("Select a user ID ", user_ids)
|
132 |
+
n_recommendations = st.slider("Number of Recommendations:", min_value=1, max_value=50, value=10)
|
133 |
+
elif collaborative_method == "Anime-Based KNN Collaborative Filtering":
|
134 |
+
anime_list = anime_user_ratings["name"].dropna().unique().tolist() # Ensure no NaN values in anime names
|
135 |
+
anime_name = st.selectbox("Select an Anime", anime_list)
|
136 |
+
n_recommendations = st.slider("Number of Recommendations:", min_value=1, max_value=50, value=10)
|
137 |
+
|
138 |
+
# Get recommendations
|
139 |
+
if st.button("Get Recommendations"):
|
140 |
+
# Load the recommender
|
141 |
+
recommender = CollaborativeAnimeRecommender(anime_user_ratings)
|
142 |
+
if collaborative_method == "SVD Collaborative Filtering":
|
143 |
+
recommendations = recommender.get_svd_recommendations(user_id, n=n_recommendations, svd_model=svd_model)
|
144 |
+
# st.write(recommendations.head())
|
145 |
+
elif collaborative_method == "User-Based Collaborative Filtering":
|
146 |
+
recommendations = recommender.get_user_based_recommendations(user_id, n_recommendations=n_recommendations, knn_user_model=user_based_knn_model)
|
147 |
+
elif collaborative_method == "Anime-Based KNN Collaborative Filtering":
|
148 |
+
if anime_name:
|
149 |
+
recommendations = recommender.get_item_based_recommendations(anime_name, n_recommendations=n_recommendations, knn_item_model=item_based_knn_model)
|
150 |
+
else:
|
151 |
+
st.error("Invalid Anime Name. Please enter a valid anime title.")
|
152 |
+
|
153 |
+
if isinstance(recommendations, pd.DataFrame) and not recommendations.empty:
|
154 |
+
if len(recommendations) < n_recommendations:
|
155 |
+
st.warning(f"Only {len(recommendations)} recommendations available, fewer than the requested {n_recommendations}.")
|
156 |
+
st.write(f"Here are the Collaborative Recommendations:")
|
157 |
+
cols = st.columns(5)
|
158 |
+
for i, row in enumerate(recommendations.iterrows()):
|
159 |
+
col = cols[i % 5]
|
160 |
+
with col:
|
161 |
+
st.image(row[1]['Image URL'], use_container_width=True)
|
162 |
+
st.markdown(
|
163 |
+
f"<div class='anime-title'>{row[1]['Anime Name']}</div>",
|
164 |
+
unsafe_allow_html=True,
|
165 |
+
)
|
166 |
+
st.caption(f"Genres: {row[1]['Genres']} | Rating: {row[1]['Rating']}")
|
167 |
+
else:
|
168 |
+
st.error("No recommendations found.")
|
169 |
+
except Exception as e:
|
170 |
+
st.error(f"An error occurred: {e}")
|
171 |
+
|
172 |
+
elif app_selector == "Top Anime Recommender":
|
173 |
+
st.title("Top Anime Recommender System")
|
174 |
+
|
175 |
+
try:
|
176 |
+
# Sidebar for choosing the popularity-based filtering method
|
177 |
+
popularity_method = st.sidebar.selectbox(
|
178 |
+
"Choose a Popularity-Based Filtering method:",
|
179 |
+
[
|
180 |
+
"Popular Animes",
|
181 |
+
"Top Ranked Animes",
|
182 |
+
"Overall Top Rated Animes",
|
183 |
+
"Favorite Animes",
|
184 |
+
"Top Animes by Members",
|
185 |
+
"Popular Anime Among Members",
|
186 |
+
"Top Average Rated Animes",
|
187 |
+
]
|
188 |
+
)
|
189 |
+
|
190 |
+
n_recommendations = st.slider("Number of Recommendations:", min_value=1, max_value=50, value=10)
|
191 |
+
|
192 |
+
if st.button("Get Top Anime"):
|
193 |
+
# Load the popularity-based recommender
|
194 |
+
recommender = PopularityBasedFiltering(anime_data)
|
195 |
+
|
196 |
+
# Get recommendations based on selected method
|
197 |
+
if popularity_method == "Popular Animes":
|
198 |
+
recommendations = recommender.popular_animes(n=n_recommendations)
|
199 |
+
elif popularity_method == "Top Ranked Animes":
|
200 |
+
recommendations = recommender.top_ranked_animes(n=n_recommendations)
|
201 |
+
elif popularity_method == "Overall Top Rated Animes":
|
202 |
+
recommendations = recommender.overall_top_rated_animes(n=n_recommendations)
|
203 |
+
elif popularity_method == "Favorite Animes":
|
204 |
+
recommendations = recommender.favorite_animes(n=n_recommendations)
|
205 |
+
elif popularity_method == "Top Animes by Members":
|
206 |
+
recommendations = recommender.top_animes_members(n=n_recommendations)
|
207 |
+
elif popularity_method == "Popular Anime Among Members":
|
208 |
+
recommendations = recommender.popular_anime_among_members(n=n_recommendations)
|
209 |
+
elif popularity_method == "Top Average Rated Animes":
|
210 |
+
recommendations = recommender.top_avg_rated(n=n_recommendations)
|
211 |
+
else:
|
212 |
+
st.error("Invalid selection. Please choose a valid method.")
|
213 |
+
recommendations = None
|
214 |
+
|
215 |
+
# Display recommendations
|
216 |
+
if isinstance(recommendations, pd.DataFrame) and not recommendations.empty:
|
217 |
+
st.write(f"Here are the {popularity_method}:")
|
218 |
+
cols = st.columns(5)
|
219 |
+
for i, row in recommendations.iterrows():
|
220 |
+
col = cols[i % 5]
|
221 |
+
with col:
|
222 |
+
st.image(row['Image URL'], use_container_width=True)
|
223 |
+
st.markdown(
|
224 |
+
f"<div class='anime-title'>{row['Anime name']}</div>",
|
225 |
+
unsafe_allow_html=True,
|
226 |
+
)
|
227 |
+
st.caption(f"Genres: {row['Genres']} | Rating: {row['Rating']}")
|
228 |
+
else:
|
229 |
+
st.error("No recommendations found.")
|
230 |
+
except Exception as e:
|
231 |
+
st.error(f"An error occurred: {e}")
|
232 |
+
|
run_pipeline.py
CHANGED
@@ -2,9 +2,9 @@ import sys
|
|
2 |
from anime_recommender.loggers.logging import logging
|
3 |
from anime_recommender.exception.exception import AnimeRecommendorException
|
4 |
from anime_recommender.source.data_ingestion import DataIngestion
|
5 |
-
from anime_recommender.entity.config_entity import TrainingPipelineConfig,DataIngestionConfig
|
6 |
# ,DataTransformationConfig,CollaborativeModelConfig,ContentBasedModelConfig
|
7 |
-
|
8 |
# from anime_recommender.source.collaborative_recommenders import CollaborativeModelTrainer
|
9 |
# from anime_recommender.source.content_based_recommenders import ContentBasedModelTrainer
|
10 |
# from anime_recommender.source.popularity_based_recommenders import PopularityBasedRecommendor
|
@@ -19,13 +19,13 @@ if __name__ == "__main__":
|
|
19 |
logging.info(f"Data ingestion completed.")
|
20 |
print(data_ingestion_artifact)
|
21 |
|
22 |
-
#
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
# # Collaborative Model Training
|
31 |
# collaborative_model_trainer_config = CollaborativeModelConfig(training_pipeline_config)
|
|
|
2 |
from anime_recommender.loggers.logging import logging
|
3 |
from anime_recommender.exception.exception import AnimeRecommendorException
|
4 |
from anime_recommender.source.data_ingestion import DataIngestion
|
5 |
+
from anime_recommender.entity.config_entity import TrainingPipelineConfig,DataIngestionConfig,DataTransformationConfig
|
6 |
# ,DataTransformationConfig,CollaborativeModelConfig,ContentBasedModelConfig
|
7 |
+
from anime_recommender.source.data_transformation import DataTransformation
|
8 |
# from anime_recommender.source.collaborative_recommenders import CollaborativeModelTrainer
|
9 |
# from anime_recommender.source.content_based_recommenders import ContentBasedModelTrainer
|
10 |
# from anime_recommender.source.popularity_based_recommenders import PopularityBasedRecommendor
|
|
|
19 |
logging.info(f"Data ingestion completed.")
|
20 |
print(data_ingestion_artifact)
|
21 |
|
22 |
+
# Data Transformation
|
23 |
+
data_transformation_config = DataTransformationConfig(training_pipeline_config)
|
24 |
+
data_transformation = DataTransformation(data_ingestion_artifact,data_transformation_config)
|
25 |
+
logging.info("Initiating Data Transformation.")
|
26 |
+
data_transformation_artifact = data_transformation.initiate_data_transformation()
|
27 |
+
logging.info("Data Transformation Completed.")
|
28 |
+
print(data_transformation_artifact)
|
29 |
|
30 |
# # Collaborative Model Training
|
31 |
# collaborative_model_trainer_config = CollaborativeModelConfig(training_pipeline_config)
|