diff --git a/evaluator.ipynb b/evaluator.ipynb
index fd760dd3a1b7288f06a78fab469d3c99d94e4324..3a7e79f2d760a8472a1358d956cf96d23973ad84 100644
--- a/evaluator.ipynb
+++ b/evaluator.ipynb
@@ -59,7 +59,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 2,
    "id": "d6d82188",
    "metadata": {},
    "outputs": [],
@@ -193,7 +193,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": 5,
    "id": "f1849e55",
    "metadata": {},
    "outputs": [],
@@ -246,51 +246,19 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 6,
    "id": "704f4d2a",
    "metadata": {},
    "outputs": [
     {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Handling model baseline_1\n",
-      "Training split predictions\n",
-      "- computing metric mae\n",
-      "- computing metric rmse\n",
-      "Training loo predictions\n",
-      "Training full predictions\n",
-      "Handling model baseline_2\n",
-      "Training split predictions\n",
-      "- computing metric mae\n",
-      "- computing metric rmse\n",
-      "Training loo predictions\n",
-      "Training full predictions\n",
-      "Handling model baseline_3\n",
-      "Training split predictions\n",
-      "- computing metric mae\n",
-      "- computing metric rmse\n",
-      "Training loo predictions\n",
-      "Training full predictions\n",
-      "Handling model baseline_4\n",
-      "Training split predictions\n",
-      "- computing metric mae\n",
-      "- computing metric rmse\n",
-      "Training loo predictions\n",
-      "Training full predictions\n",
-      "Handling model ContentBased\n"
-     ]
-    },
-    {
-     "ename": "TypeError",
-     "evalue": "ContentBased.__init__() missing 2 required positional arguments: 'features_method' and 'regressor_method'",
+     "ename": "NameError",
+     "evalue": "name 'accuracy' is not defined",
      "output_type": "error",
      "traceback": [
       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
-      "Cell \u001b[0;32mIn[12], line 19\u001b[0m\n\u001b[1;32m     17\u001b[0m sp_ratings \u001b[38;5;241m=\u001b[39m load_ratings(surprise_format\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m     18\u001b[0m precomputed_dict \u001b[38;5;241m=\u001b[39m precomputed_information(pd\u001b[38;5;241m.\u001b[39mread_csv(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata/tiny/evidence/ratings.csv\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n\u001b[0;32m---> 19\u001b[0m evaluation_report \u001b[38;5;241m=\u001b[39m \u001b[43mcreate_evaluation_report\u001b[49m\u001b[43m(\u001b[49m\u001b[43mEvalConfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msp_ratings\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprecomputed_dict\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mAVAILABLE_METRICS\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     20\u001b[0m export_evaluation_report(evaluation_report)\n",
-      "Cell \u001b[0;32mIn[10], line 81\u001b[0m, in \u001b[0;36mcreate_evaluation_report\u001b[0;34m(eval_config, sp_ratings, precomputed_dict, available_metrics)\u001b[0m\n\u001b[1;32m     79\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m model_name, model, arguments \u001b[38;5;129;01min\u001b[39;00m eval_config\u001b[38;5;241m.\u001b[39mmodels:\n\u001b[1;32m     80\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mHandling model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m---> 81\u001b[0m     algo \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43marguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     82\u001b[0m     evaluation_dict[model_name] \u001b[38;5;241m=\u001b[39m {}\n\u001b[1;32m     84\u001b[0m     \u001b[38;5;66;03m# Type 1 : split evaluations\u001b[39;00m\n",
-      "\u001b[0;31mTypeError\u001b[0m: ContentBased.__init__() missing 2 required positional arguments: 'features_method' and 'regressor_method'"
+      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[6], line 3\u001b[0m\n\u001b[1;32m      1\u001b[0m AVAILABLE_METRICS \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m      2\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msplit\u001b[39m\u001b[38;5;124m\"\u001b[39m: {\n\u001b[0;32m----> 3\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmae\u001b[39m\u001b[38;5;124m\"\u001b[39m: (\u001b[43maccuracy\u001b[49m\u001b[38;5;241m.\u001b[39mmae, {\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mverbose\u001b[39m\u001b[38;5;124m'\u001b[39m: \u001b[38;5;28;01mFalse\u001b[39;00m}),\n\u001b[1;32m      4\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrmse\u001b[39m\u001b[38;5;124m\"\u001b[39m: (accuracy\u001b[38;5;241m.\u001b[39mrmse, {\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mverbose\u001b[39m\u001b[38;5;124m'\u001b[39m: \u001b[38;5;28;01mFalse\u001b[39;00m})\n\u001b[1;32m      5\u001b[0m         \u001b[38;5;66;03m# Add new split metrics here if needed\u001b[39;00m\n\u001b[1;32m      6\u001b[0m     },\n\u001b[1;32m      7\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mloo\u001b[39m\u001b[38;5;124m\"\u001b[39m: {\n\u001b[1;32m      8\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhit_rate\u001b[39m\u001b[38;5;124m\"\u001b[39m: (get_hit_rate, {}),\n\u001b[1;32m      9\u001b[0m         \u001b[38;5;66;03m# Add new loo metrics here if needed\u001b[39;00m\n\u001b[1;32m     10\u001b[0m     },\n\u001b[1;32m     11\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfull\u001b[39m\u001b[38;5;124m\"\u001b[39m: {\n\u001b[1;32m     12\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnovelty\u001b[39m\u001b[38;5;124m\"\u001b[39m: (get_novelty, {}),\n\u001b[1;32m     13\u001b[0m         \u001b[38;5;66;03m# Add new full metrics here if needed\u001b[39;00m\n\u001b[1;32m     14\u001b[0m     }\n\u001b[1;32m     15\u001b[0m }\n\u001b[1;32m     17\u001b[0m sp_ratings \u001b[38;5;241m=\u001b[39m load_ratings(surprise_format\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m     18\u001b[0m precomputed_dict \u001b[38;5;241m=\u001b[39m precomputed_information(pd\u001b[38;5;241m.\u001b[39mread_csv(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata/tiny/evidence/ratings.csv\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n",
+      "\u001b[0;31mNameError\u001b[0m: name 'accuracy' is not defined"
      ]
     }
    ],