Skip to content
Extraits de code Groupes Projets
Valider 6456e17a rédigé par Nathanaël Kindidi's avatar Nathanaël Kindidi
Parcourir les fichiers

modif class content based

parent d549c086
Aucune branche associée trouvée
Aucune étiquette associée trouvée
Aucune requête de fusion associée trouvée
%% Cell type:markdown id:82d5ca82 tags: %% Cell type:markdown id:82d5ca82 tags:
# Packages # Packages
%% Cell type:code id:277473a3 tags: %% Cell type:code id:277473a3 tags:
``` python ``` python
%load_ext autoreload %load_ext autoreload
%autoreload 2 %autoreload 2
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import random as rd import random as rd
from surprise import AlgoBase from surprise import AlgoBase
from surprise.prediction_algorithms.predictions import PredictionImpossible from surprise.prediction_algorithms.predictions import PredictionImpossible
from loaders import load_ratings from loaders import load_ratings
from loaders import load_items from loaders import load_items
from constants import Constant as C from constants import Constant as C
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.svm import SVR from sklearn.svm import SVR
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer
``` ```
%% Output %% Output
The autoreload extension is already loaded. To reload it, use: The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload %reload_ext autoreload
%% Cell type:markdown id:a42c16bf tags: %% Cell type:markdown id:a42c16bf tags:
# Explore and select content features # Explore and select content features
%% Cell type:code id:e8378976 tags: %% Cell type:code id:e8378976 tags:
``` python ``` python
# All the dataframes # All the dataframes
df_items = load_items() df_items = load_items()
df_ratings = load_ratings() df_ratings = load_ratings()
df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME) df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME)
#df_genome_score = pd.read_csv("data/hackathon/content/genome-scores.csv") #df_genome_score = pd.read_csv("data/hackathon/content/genome-scores.csv")
# df_genome_tag = pd.read_csv("data/hackathon/content/genome-tags.csv") # df_genome_tag = pd.read_csv("data/hackathon/content/genome-tags.csv")
# Example 1 : create title_length features # Example 1 : create title_length features
df_features = df_items[C.LABEL_COL].apply(lambda x: len(x)).to_frame('n_character_title') df_features = df_items[C.LABEL_COL].apply(lambda x: len(x)).to_frame('n_character_title')
display(df_features.head()) display(df_features.head())
df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME) df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME)
df_features = df_tag[C.TAG] df_features = df_tag[C.TAG]
display(df_features.head()) display(df_features.head())
# (explore here other features) # (explore here other features)
``` ```
%% Output %% Output
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
Cell In[16], line 2
1 # All the dataframes
----> 2 df_items = load_items()
3 df_ratings = load_ratings()
4 df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME)
File ~/Desktop/Université/Recommender Systems/recomsys/loaders.py:34, in load_items()
28 def load_items():
29 """Loads items data.
30
31 Returns:
32 DataFrame: Items data.
33 """
---> 34 df_items = pd.read_csv(C.CONTENT_PATH / C.ITEMS_FILENAME) # ce qui se trouve dans le movie csv
35 df_items = df_items.set_index(C.ITEM_ID_COL) # movie id
36 return df_items
File ~/.pyenv/versions/3.12.0/lib/python3.12/site-packages/pandas/io/parsers/readers.py:1026, in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)
1013 kwds_defaults = _refine_defaults_read(
1014 dialect,
1015 delimiter,
(...)
1022 dtype_backend=dtype_backend,
1023 )
1024 kwds.update(kwds_defaults)
-> 1026 return _read(filepath_or_buffer, kwds)
File ~/.pyenv/versions/3.12.0/lib/python3.12/site-packages/pandas/io/parsers/readers.py:620, in _read(filepath_or_buffer, kwds)
617 _validate_names(kwds.get("names", None))
619 # Create the parser.
--> 620 parser = TextFileReader(filepath_or_buffer, **kwds)
622 if chunksize or iterator:
623 return parser
File ~/.pyenv/versions/3.12.0/lib/python3.12/site-packages/pandas/io/parsers/readers.py:1620, in TextFileReader.__init__(self, f, engine, **kwds)
1617 self.options["has_index_names"] = kwds["has_index_names"]
1619 self.handles: IOHandles | None = None
-> 1620 self._engine = self._make_engine(f, self.engine)
File ~/.pyenv/versions/3.12.0/lib/python3.12/site-packages/pandas/io/parsers/readers.py:1880, in TextFileReader._make_engine(self, f, engine)
1878 if "b" not in mode:
1879 mode += "b"
-> 1880 self.handles = get_handle(
1881 f,
1882 mode,
1883 encoding=self.options.get("encoding", None),
1884 compression=self.options.get("compression", None),
1885 memory_map=self.options.get("memory_map", False),
1886 is_text=is_text,
1887 errors=self.options.get("encoding_errors", "strict"),
1888 storage_options=self.options.get("storage_options", None),
1889 )
1890 assert self.handles is not None
1891 f = self.handles.handle
File ~/.pyenv/versions/3.12.0/lib/python3.12/site-packages/pandas/io/common.py:873, in get_handle(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)
868 elif isinstance(handle, str):
869 # Check whether the filename is to be opened in binary mode.
870 # Binary mode does not support 'encoding' and 'newline'.
871 if ioargs.encoding and "b" not in ioargs.mode:
872 # Encoding
--> 873 handle = open(
874 handle,
875 ioargs.mode,
876 encoding=ioargs.encoding,
877 errors=errors,
878 newline="",
879 )
880 else:
881 # Binary mode
882 handle = open(handle, ioargs.mode)
FileNotFoundError: [Errno 2] No such file or directory: 'data/test/content/movies.csv'
%% Cell type:markdown id:a2c9a2b6 tags: %% Cell type:markdown id:a2c9a2b6 tags:
# Build a content-based model # Build a content-based model
When ready, move the following class in the *models.py* script When ready, move the following class in the *models.py* script
%% Cell type:code id:16b0a602 tags: %% Cell type:code id:16b0a602 tags:
``` python ``` python
# ContetnBased
class ContentBased(AlgoBase): class ContentBased(AlgoBase):
def __init__(self, features_method, regressor_method): def __init__(self, features_method, regressor_method):
AlgoBase.__init__(self) AlgoBase.__init__(self)
self.regressor_method = regressor_method self.regressor_method = regressor_method
self.features_methods = features_method
self.content_features = self.create_content_features(features_method) self.content_features = self.create_content_features(features_method)
self.user_profile = {}
self.user_profile_explain = {} self.user_profile_explain = {}
def create_content_features(self, features_method): def create_content_features(self, features_methods):
"""Content Analyzer""" """Content Analyzer"""
df_items = load_items() df_items = load_items()
df_ratings = load_ratings() df_ratings = load_ratings()
df_tag = df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME) df_tag = pd.read_csv(C.CONTENT_PATH/C.TAGS_FILENAME)
df_genome_score = pd.read_csv("data/hackathon/content/genome-scores.csv") df_genome_score = pd.read_csv("data/hackathon/content/genome-scores.csv")
df_genome_tag = pd.read_csv("data/hackathon/content/genome-tags.csv") df_genome_tag = pd.read_csv("data/hackathon/content/genome-tags.csv")
if features_method is None: df_features = pd.DataFrame(index=df_items.index)
df_features = None
elif features_method == "relevance" :
df_features = df_genome_score.groupby('movieId')["relevance"].transform('mean').to_frame('avg_relevance')
elif features_method == "title_length": # a naive method that creates only 1 feature based on title length
df_features = df_items[C.LABEL_COL].apply(lambda x: len(x)).to_frame('n_character_title')
elif features_method == "movie_year" :
df_features = df_items['movie_year'] = df_items['title'].str.extract(r'\((\d{4})\)', expand=False).to_frame('movie_year')
elif features_method == "genres" :
genres_list = df_items['genres'].str.split('|').explode().unique()
for genre in genres_list:
df_features = df_items['genres'].str.contains(genre).astype(int).to_frame('genres')
elif features_method == "combination":
df_length = df_items[C.LABEL_COL].apply(lambda x: len(x)).to_frame('n_character_title')
df_movie = df_items['title'].str.extract(r'\((\d{4})\)', expand=False).to_frame('movie_year')
genres_list = df_items['genres'].str.split('|').explode().unique()
for genre in genres_list:
df_genre = df_items['genres'].str.contains(genre).astype(int).to_frame('genres')
df_features = pd.concat([df_genre, df_length, df_movie], axis=1)
elif features_method == "rating" : for method in features_methods:
df_features = df_ratings.groupby('movieId')['rating'].transform('mean').to_frame('avg_rating') if method == "title_length":
df_title_length = df_items[C.LABEL_COL].apply(lambda x: len(x)).to_frame('title_length')
df_features = pd.concat([df_features, df_title_length], axis=1)
elif method == "movie_year":
df_movie_year = df_items['title'].str.extract(r'\((\d{4})\)', expand=False).to_frame('movie_year')
df_features = pd.concat([df_features, df_movie_year.astype(float).fillna(0)], axis=1)
elif method == "genre":
tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x.split('|'), token_pattern=None)
tfidf_matrix = tfidf_vectorizer.fit_transform(df_items['genres'])
df_tfidf_genres = pd.DataFrame(tfidf_matrix.toarray(), index=df_items.index, columns=tfidf_vectorizer.get_feature_names_out())
df_features = pd.concat([df_features, df_tfidf_genres], axis=1)
elif method == "avg_rating":
df_avg_rating = df_ratings.groupby('movieId')['rating'].mean().to_frame('avg_rating')
df_features = df_features.join(df_avg_rating, on='movieId')
elif features_method == "tags" : else:
df_features = df_tag['tag'].apply(lambda x: len(x.split(',')) if isinstance(x, str) else 0).to_frame('tags') raise NotImplementedError(f'Feature method {method} not yet implemented')
elif features_method == "tags_length" : # Handle missing values in df_features
df_features.fillna(0, inplace=True)
df_features = df_tag['tag'].apply(lambda x: sum(len(tag) for tag in x.split(','))if isinstance(x, str) else 0).to_frame('n_character_tags')
else: # (implement other feature creations here)
raise NotImplementedError(f'Feature method {features_method} not yet implemented')
return df_features return df_features
def fit(self, trainset): def fit(self, trainset):
"""Profile Learner""" """Profile Learner"""
AlgoBase.fit(self, trainset) AlgoBase.fit(self, trainset)
# Preallocate user profiles # Preallocate user profiles
self.user_profile = {u: None for u in trainset.all_users()} self.user_profile = {u: None for u in trainset.all_users()}
self.user_profile_explain = {}
self.user_profile_explain = {u: {} for u in trainset.all_users()} epsilon = 1e-10 # Small value to prevent division by zero
for u in self.user_profile_explain :
print(u)
user_ratings = np.array([rating for _, rating in trainset.ur[u]])
feature_values = self.content_features.values
fv = feature_values.astype(int)
weighted_features = fv/np.linalg.norm(fv)
feature_importance = weighted_features / np.sum(user_ratings)
self.user_profile_explain[u] = dict(zip(self.content_features.columns, feature_importance))
for u in trainset.all_users():
raw_user_id = trainset.to_raw_uid(u)
self.user_profile_explain[raw_user_id] = {}
user_ratings = np.array([rating for (_, rating) in trainset.ur[u]])
item_ids = [iid for (iid, _) in trainset.ur[u]]
raw_item_ids = [trainset.to_raw_iid(iid) for iid in item_ids]
feature_values = self.content_features.loc[raw_item_ids].values
norms = np.linalg.norm(feature_values, axis=0) + epsilon
weighted_features = feature_values / norms
feature_importance = weighted_features.T @ user_ratings
feature_importance /= np.sum(user_ratings)
self.user_profile_explain[raw_user_id] = dict(zip(self.content_features.columns, feature_importance))
if self.regressor_method == 'random_score': if self.regressor_method == 'random_score':
for u in self.user_profile :
self.user_profile[u] = rd.uniform(0.5,5)
elif self.regressor_method == 'random_sample':
for u in self.user_profile:
self.user_profile[u] = [rating for _, rating in self.trainset.ur[u]]
elif self.regressor_method == 'linear_regression' :
for u in self.user_profile: for u in self.user_profile:
self.user_profile[u] = rd.uniform(0.5, 5)
user_ratings = [rating for _, rating in trainset.ur[u]] elif self.regressor_method == 'random_sample':
item_ids = [iid for iid, _ in trainset.ur[u]]
df_user = pd.DataFrame({'item_id': item_ids, 'user_ratings': user_ratings})
df_user["item_id"] = df_user["item_id"].map(trainset.to_raw_iid)
df_user = df_user.merge(self.content_features, left_on = "item_id", right_index = True, how = 'left')
if 'n_character_title' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['n_character_title'].values.reshape(-1, 1)
elif 'avg_relevance' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_relevance'].values.reshape(-1, 1)
elif 'movie_year' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['movie_year'].values.reshape(-1, 1)
elif 'genres' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['genres'].values.reshape(-1, 1)
elif 'combination' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['combination'].values.reshape(-1, 1)
elif 'avg_rating' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_rating'].values.reshape(-1, 1)
elif 'tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['tags'].values.reshape(-1, 1)
elif 'n_character_tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['n_character_tags'].values.reshape(-1, 1)
else:
# Si aucune caractéristique appropriée n'est disponible
continue # Ou gère le cas d'erreur/exception ici
y = df_user['user_ratings'].values
linear_regressor = LinearRegression(fit_intercept = False)
linear_regressor.fit(X,y)
# Store the computed user profile
self.user_profile[u] = linear_regressor
elif self.regressor_method == 'svr_regression':
for u in self.user_profile: for u in self.user_profile:
self.user_profile[u] = [rating for (_, rating) in trainset.ur[u]]
user_ratings = [rating for _, rating in trainset.ur[u]] else:
item_ids = [iid for iid, _ in trainset.ur[u]] regressor_models = {
'linear_regression': LinearRegression(fit_intercept=False),
df_user = pd.DataFrame({'item_id': item_ids, 'user_ratings': user_ratings}) 'svr_regression': SVR(kernel='rbf', C=10, epsilon=0.2),
'gradient_boosting': GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=3),
df_user["item_id"] = df_user["item_id"].map(trainset.to_raw_iid) 'random_forest': RandomForestRegressor(n_estimators=100),
'lasso_regression': Lasso(alpha=0.1),
df_user = df_user.merge(self.content_features, left_on = "item_id", right_index = True, how = 'left') 'ridge_regression': Ridge(alpha=1.0),
'elastic_net': ElasticNet(alpha=1.0, l1_ratio=0.5),
if 'n_character_title' in df_user.columns: 'knn_regression': KNeighborsRegressor(n_neighbors=1),
# Si 'n_character_title' est disponible comme caractéristique 'decision_tree': DecisionTreeRegressor(max_depth=5),
X = df_user['n_character_title'].values.reshape(-1, 1) 'adaboost': AdaBoostRegressor(n_estimators=50),
'xgboost': XGBRegressor(n_estimators=100, learning_rate=0.1, max_depth=3),
'lightgbm': LGBMRegressor(n_estimators=100, learning_rate=0.1, max_depth=3)
}
elif 'avg_relevance' in df_user.columns: if self.regressor_method not in regressor_models:
# Si 'n_character_title' est disponible comme caractéristique raise NotImplementedError(f'Regressor method {self.regressor_method} not yet implemented')
X = df_user['avg_relevance'].values.reshape(-1, 1)
elif 'movie_year' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['movie_year'].values.reshape(-1, 1)
elif 'genres' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['genres'].values.reshape(-1, 1)
elif 'combination' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['combination'].values.reshape(-1, 1)
elif 'avg_rating' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_rating'].values.reshape(-1, 1)
elif 'tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['tags'].values.reshape(-1, 1)
elif 'n_character_tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['n_character_tags'].values.reshape(-1, 1)
else:
# Si aucune caractéristique appropriée n'est disponible
continue # Ou gère le cas d'erreur/exception ici
y = df_user['user_ratings'].values
svr_regressor = SVR(kernel='rbf', C=10, epsilon=0.2)
svr_regressor.fit(X, y)
self.user_profile[u] = svr_regressor
elif self.regressor_method == 'gradient_boosting':
for u in self.user_profile: for u in self.user_profile:
user_ratings = [rating for (_, rating) in trainset.ur[u]]
item_ids = [iid for (iid, _) in trainset.ur[u]]
raw_item_ids = [trainset.to_raw_iid(iid) for iid in item_ids]
user_ratings = [rating for _, rating in trainset.ur[u]] df_user = pd.DataFrame({'item_id': raw_item_ids, 'user_ratings': user_ratings})
item_ids = [iid for iid, _ in trainset.ur[u]] df_user = df_user.merge(self.content_features, left_on="item_id", right_index=True, how='left')
df_user = pd.DataFrame({'item_id': item_ids, 'user_ratings': user_ratings})
df_user["item_id"] = df_user["item_id"].map(trainset.to_raw_iid)
df_user = df_user.merge(self.content_features, left_on = "item_id", right_index = True, how = 'left')
if 'n_character_title' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['n_character_title'].values.reshape(-1, 1)
elif 'avg_relevance' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_relevance'].values.reshape(-1, 1)
elif 'movie_year' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['movie_year'].values.reshape(-1, 1)
elif 'genres' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['genres'].values.reshape(-1, 1)
elif 'combination' in df_user.columns: X = df_user.drop(columns=['item_id', 'user_ratings'])
# Si 'n_character_title' est disponible comme caractéristique y = df_user['user_ratings']
X = df_user['combination'].values.reshape(-1, 1)
elif 'avg_rating' in df_user.columns: regressor = regressor_models[self.regressor_method]
# Si 'n_character_title' est disponible comme caractéristique regressor.fit(X, y)
X = df_user['avg_rating'].values.reshape(-1, 1)
elif 'tags' in df_user.columns: self.user_profile[u] = regressor
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['tags'].values.reshape(-1, 1)
elif 'n_character_tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['n_character_tags'].values.reshape(-1, 1)
else:
# Si aucune caractéristique appropriée n'est disponible
continue # Ou gère le cas d'erreur/exception ici
y = df_user['user_ratings'].values
gb_regressor = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=3)
gb_regressor.fit(X, y)
self.user_profile[u] = gb_regressor
elif self.regressor_method == 'random_forest':
for u in self.user_profile:
user_ratings = [rating for _, rating in trainset.ur[u]]
item_ids = [iid for iid, _ in trainset.ur[u]]
df_user = pd.DataFrame({'item_id': item_ids, 'user_ratings': user_ratings})
df_user["item_id"] = df_user["item_id"].map(trainset.to_raw_iid)
df_user = df_user.merge(self.content_features, left_on = "item_id", right_index = True, how = 'left')
if 'n_character_title' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['n_character_title'].values.reshape(-1, 1)
elif 'avg_relevance' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_relevance'].values.reshape(-1, 1)
elif 'movie_year' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['movie_year'].values.reshape(-1, 1)
elif 'genres' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['genres'].values.reshape(-1, 1)
elif 'combination' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['combination'].values.reshape(-1, 1)
elif 'avg_rating' in df_user.columns:
# Si 'n_character_title' est disponible comme caractéristique
X = df_user['avg_rating'].values.reshape(-1, 1)
elif 'tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['tags'].values.reshape(-1, 1)
elif 'n_character_tags' in df_user.columns:
# Si une autre caractéristique est disponible (remplace 'other_feature' par le nom de ta caractéristique)
X = df_user['n_character_tags'].values.reshape(-1, 1)
else:
# Si aucune caractéristique appropriée n'est disponible
continue # Ou gère le cas d'erreur/exception ici
y = df_user['user_ratings'].values
rf_regressor = RandomForestRegressor(n_estimators=100)
rf_regressor.fit(X, y)
self.user_profile[u] = rf_regressor
else :
pass
# (implement here the regressor fitting)
def estimate(self, u, i): def estimate(self, u, i):
"""Scoring component used for item filtering""" """Scoring component used for item filtering"""
# First, handle cases for unknown users and items
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)): if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible('User and/or item is unkown.') raise PredictionImpossible('User and/or item is unknown.')
if self.regressor_method == 'random_score': if self.regressor_method == 'random_score':
rd.seed() return rd.uniform(0.5, 5)
score = rd.uniform(0.5,5)
elif self.regressor_method == 'random_sample': elif self.regressor_method == 'random_sample':
rd.seed() return rd.choice(self.user_profile[u])
score = rd.choice(self.user_profile[u])
elif self.regressor_method == 'linear_regression':
raw_item_id = self.trainset.to_raw_iid(i)
item_features = self.content_features.loc[raw_item_id:raw_item_id, :].values
linear_regressor = self.user_profile[u]
score= linear_regressor.predict(item_features)[0]
elif self.regressor_method == 'svr_regression':
else:
raw_item_id = self.trainset.to_raw_iid(i) raw_item_id = self.trainset.to_raw_iid(i)
item_features = self.content_features.loc[raw_item_id, :].values.reshape(1, -1)
regressor = self.user_profile[u]
item_features_df = pd.DataFrame(item_features, columns=self.content_features.columns)
return regressor.predict(item_features_df)[0]
item_features = self.content_features.loc[raw_item_id:raw_item_id, :].values def explain(self, u):
if u in self.user_profile_explain:
svr_regressor = self.user_profile[u] return self.user_profile_explain[u]
score = svr_regressor.predict(item_features)[0] else:
return None
elif self.regressor_method == 'gradient_boosting':
raw_item_id = self.trainset.to_raw_iid(i)
item_features = self.content_features.loc[raw_item_id:raw_item_id, :].values
gradient_boosting = self.user_profile[u]
score = gradient_boosting.predict(item_features)[0]
elif self.regressor_method == 'random_forest':
raw_item_id = self.trainset.to_raw_iid(i)
item_features = self.content_features.loc[raw_item_id:raw_item_id, :].values
randomforest = self.user_profile[u]
score = randomforest.predict(item_features)[0]
else :
score = None
# (implement here the regressor prediction) #Example usage:
cb = ContentBased(["title_length", "movie_year","genre","avg_rating"], "ridge_regression")
surprise_data = load_ratings(surprise_format=True)
trainset = surprise_data.build_full_trainset()
testset = trainset.build_anti_testset()
cb.fit(trainset)
return score
def explain(self, u) : #print("RMSE: ", cb.rmse(testset))
if u in self.user_profile_explain :
return self.user_profile_explain[u]
else :
return None
cb = ContentBased("title_length", "random_sample") #Example explanations for users:
sp_ratings = load_ratings(surprise_format=True) print(cb.explain(11))
train_set = sp_ratings.build_full_trainset()
print(cb.fit(train_set))
print(cb.explain(0)) print(cb.explain(13))
print(cb.explain(1)) print(cb.explain(17))
print(cb.explain(2)) print(cb.explain(23))
print(cb.explain(3)) print(cb.explain(27))
print(cb.explain(4)) print(cb.explain(73))
``` ```
%% Output %% Output
0 0
1 1
2 2
3 3
4 4
5 5
None None
{'n_character_title': array([0.03019692])} {'n_character_title': array([0.03019692])}
{'n_character_title': array([0.04098154])} {'n_character_title': array([0.04098154])}
{'n_character_title': array([0.02942264])} {'n_character_title': array([0.02942264])}
{'n_character_title': array([0.08196307])} {'n_character_title': array([0.08196307])}
{'n_character_title': array([0.02798739])} {'n_character_title': array([0.02798739])}
%% Cell type:code id:baab88b7 tags:
``` python
from pprint import pprint
# Créer une instance de TfidfVectorizer pour les genres
tfidf_vectorizer = TfidfVectorizer()
# Fit et transform pour calculer la matrice TF-IDF des genres
tfidf_matrix = tfidf_vectorizer.fit_transform(df_items['genres'])
# Obtenir les noms des genres (features)
genre_names = tfidf_vectorizer.get_feature_names_out()
# Créer un DataFrame à partir de la matrice TF-IDF des genres
df_tfidf = pd.DataFrame(tfidf_matrix.toarray(), columns=genre_names)
print("Matrice TF-IDF des genres :")
display(df_tfidf)
```
%% Output
Matrice TF-IDF des genres :
%% Cell type:markdown id:ffd75b7e tags: %% Cell type:markdown id:ffd75b7e tags:
The following script test the ContentBased class The following script test the ContentBased class
%% Cell type:code id:69d12f7d tags: %% Cell type:code id:69d12f7d tags:
``` python ``` python
def test_contentbased_class(feature_method, regressor_method): def test_contentbased_class(feature_method, regressor_method):
"""Test the ContentBased class. """Test the ContentBased class.
Tries to make a prediction on the first (user,item ) tuple of the anti_test_set Tries to make a prediction on the first (user,item ) tuple of the anti_test_set
""" """
sp_ratings = load_ratings(surprise_format=True) sp_ratings = load_ratings(surprise_format=True)
train_set = sp_ratings.build_full_trainset() train_set = sp_ratings.build_full_trainset()
content_algo = ContentBased(feature_method, regressor_method) content_algo = ContentBased(feature_method, regressor_method)
content_algo.fit(train_set) content_algo.fit(train_set)
anti_test_set_first = train_set.build_anti_testset()[0] anti_test_set_first = train_set.build_anti_testset()[0]
prediction = content_algo.predict(anti_test_set_first[0], anti_test_set_first[1]) prediction = content_algo.predict(anti_test_set_first[0], anti_test_set_first[1])
print(prediction) print(prediction)
test_contentbased_class(["title_length", "movie_year","genre","avg_rating"], "ridge_regression")
# print("title_length :")
# test_contentbased_class(feature_method = "title_length" , regressor_method = "random_score")
# test_contentbased_class(feature_method = "title_length" , regressor_method = "random_sample")
# test_contentbased_class(feature_method = "title_length" , regressor_method = "linear_regression")
# test_contentbased_class(feature_method= "title_length", regressor_method= "svr_regression")
# test_contentbased_class(feature_method= "title_length", regressor_method= "gradient_boosting")
# test_contentbased_class(feature_method= "title_length", regressor_method= "random_forest")
# print("\n")
# print("movie_year : ")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "random_score")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "random_sample")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "linear_regression")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "svr_regression")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "gradient_boosting")
# test_contentbased_class(feature_method= "movie_year", regressor_method= "random_forest")
# print("\n")
# print("relevance : ")
# test_contentbased_class(feature_method= "relevance", regressor_method= "random_score")
# test_contentbased_class(feature_method= "relevance", regressor_method= "random_sample")
# test_contentbased_class(feature_method= "relevance", regressor_method= "linear_regression")
# test_contentbased_class(feature_method= "relevance", regressor_method= "svr_regression")
# test_contentbased_class(feature_method= "relevance", regressor_method= "gradient_boosting")
# test_contentbased_class(feature_method= "relevance", regressor_method= "random_forest")
# print("\n")
# print("genres : ")
# test_contentbased_class(feature_method= "genres", regressor_method= "random_score")
# test_contentbased_class(feature_method= "genres", regressor_method= "random_sample")
# test_contentbased_class(feature_method= "genres", regressor_method= "linear_regression")
# test_contentbased_class(feature_method= "genres", regressor_method= "svr_regression")
# test_contentbased_class(feature_method= "genres", regressor_method= "gradient_boosting")
# test_contentbased_class(feature_method= "genres", regressor_method= "random_forest")
# print("\n")
# print("rating : ")
# test_contentbased_class(feature_method= "rating", regressor_method="random_score")
# test_contentbased_class(feature_method= "rating", regressor_method="random_sample")
# # test_contentbased_class(feature_method= "rating", regressor_method="linear_regression")
# #test_contentbased_class(feature_method="rating", regressor_method="svr_regression")
# #test_contentbased_class(feature_method="rating", regressor_method="gradient_boosting")
# #test_contentbased_class(feature_method="rating", regressor_method="random_forest")
# print("\n")
# print("tags : ")
# test_contentbased_class(feature_method="tags", regressor_method="random_score")
# test_contentbased_class(feature_method="tags", regressor_method="random_sample")
# #test_contentbased_class(feature_method="tags", regressor_method="linear_regression")
# # test_contentbased_class(feature_method="tags", regressor_method="svr_regression")
# # test_contentbased_class(feature_method="tags", regressor_method="gradient_boosting")
# # test_contentbased_class(feature_method="tags", regressor_method="random_forest")
# print("\n")
# print("tags_length : ")
# test_contentbased_class(feature_method="tags_length", regressor_method="random_score")
# test_contentbased_class(feature_method="tags_length", regressor_method="random_sample")
# test_contentbased_class(feature_method="tags_length", regressor_method="linear_regression")
# test_contentbased_class(feature_method="tags_length", regressor_method="svr_regression")
# test_contentbased_class(feature_method="tags_length", regressor_method="gradient_boosting")
# test_contentbased_class(feature_method="tags_length", regressor_method="random_forest")
# print("\n")
# print("combination : ")
# test_contentbased_class(feature_method="combination", regressor_method="random_score")
# test_contentbased_class(feature_method="combination", regressor_method="random_sample")
# test_contentbased_class(feature_method="combination", regressor_method="linear_regression")
# test_contentbased_class(feature_method="combination", regressor_method="svr_regression")
# test_contentbased_class(feature_method="combination", regressor_method="gradient_boosting")
# test_contentbased_class(feature_method="combination", regressor_method="random_forest")
``` ```
......
0% Chargement en cours ou .
You are about to add 0 people to the discussion. Proceed with caution.
Terminez d'abord l'édition de ce message.
Veuillez vous inscrire ou vous pour commenter