diff --git a/tutorials/model_ensembling.ipynb b/tutorials/model_ensembling.ipynb
index 12e69820..43c7344e 100644
--- a/tutorials/model_ensembling.ipynb
+++ b/tutorials/model_ensembling.ipynb
@@ -166,7 +166,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"id": "bd266ee7",
"metadata": {},
"outputs": [
@@ -209,7 +209,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 2,
"id": "92a57076",
"metadata": {},
"outputs": [
@@ -262,7 +262,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 3,
"id": "ea466b90",
"metadata": {},
"outputs": [
@@ -280,7 +280,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2667.90it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1933.61it/s]\n"
]
},
{
@@ -295,7 +295,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:34<00:00, 8.78it/s, loss=173]\n"
+ "100%|██████████| 300/300 [00:17<00:00, 17.04it/s, loss=173]\n"
]
},
{
@@ -311,7 +311,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:01<00:00, 838.15it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1826.79it/s]"
]
},
{
@@ -321,10 +321,17 @@
"\n",
"TEST:\n",
"...\n",
- " | Precision@50 | Recall@50 | Train (s) | Test (s)\n",
- "--- + ------------ + --------- + --------- + --------\n",
- "BPR | 0.0985 | 0.4922 | 3.3865 | 0.3619\n",
- "WMF | 0.1133 | 0.5583 | 457.1005 | 1.1311\n",
+ " | Precision@100 | Recall@100 | Train (s) | Test (s)\n",
+ "--- + ------------- + ---------- + --------- + --------\n",
+ "BPR | 0.0706 | 0.6607 | 1.5255 | 0.4941\n",
+ "WMF | 0.0772 | 0.7208 | 322.8325 | 0.5201\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
"\n"
]
}
@@ -334,7 +341,7 @@
"wmf_model = WMF(k=10, max_iter=300, a=1.0, b=0.1, learning_rate=0.001, lambda_u=0.01, lambda_v=0.01, seed=123) # Initialize WMF model\n",
"\n",
"models = [bpr_model, wmf_model]\n",
- "metrics = [Precision(k=50), Recall(k=50)] # Set metrics for experiment\n",
+ "metrics = [Precision(k=100), Recall(k=100)] # Set metrics for experiment\n",
"\n",
"experiment = Experiment(rs, models, metrics, user_based=True).run() # Run Experiment to compare BPR model to WMF model individually"
]
@@ -367,7 +374,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 4,
"id": "6687eea0",
"metadata": {},
"outputs": [
@@ -576,12 +583,12 @@
"\n",
"But first, let's create a `training_data_df` dataframe with all training data.\n",
"\n",
- "The training data consists of 80000 triplets of **User Index**, **Item Index** and **Rating** rows as seen in the dataset summary in Section 2.1."
+ "The training data consists of 80000 triplets of **User Index**, **Item Index** and **Rating** rows as seen in the dataset summary in Section 1.2."
]
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 5,
"id": "139fc938",
"metadata": {},
"outputs": [
@@ -616,12 +623,12 @@
"\n",
"Let's filter based on a particular user to learn more about the user.\n",
"\n",
- "We set ``UIDX`` to user index **3**, and ``TOPK`` to **50**, to get the top 50 recommendations in each model for comparison."
+ "We set ``UIDX`` to user index **3**, and ``TOPK`` to **100**, to get the top 100 recommendations in each model for comparison."
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 6,
"id": "3c91c6ae",
"metadata": {},
"outputs": [
@@ -734,7 +741,7 @@
"source": [
"# Let's define the user index and top-k movies to be recommended\n",
"UIDX = 3\n",
- "TOPK = 50\n",
+ "TOPK = 100\n",
"\n",
"# Positively rated items by a user (rating >= 4.0 as rating_threshold used earlier, and user index = UIDX)\n",
"positively_rated_items = training_data_df[\n",
@@ -777,7 +784,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 7,
"id": "72759171",
"metadata": {},
"outputs": [
@@ -1178,7 +1185,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 8,
"id": "283ca840",
"metadata": {},
"outputs": [
@@ -1212,7 +1219,6 @@
" \n",
" \n",
" | \n",
- " Train Data % | \n",
" BPR % | \n",
" WMF % | \n",
"
\n",
@@ -1220,80 +1226,70 @@
"
\n",
" \n",
" Drama | \n",
- " 22.6 | \n",
- " 40.0 | \n",
- " 52.0 | \n",
+ " 43.0 | \n",
+ " 49.0 | \n",
"
\n",
" \n",
" Comedy | \n",
- " 13.9 | \n",
- " 32.0 | \n",
- " 42.0 | \n",
+ " 31.0 | \n",
+ " 40.0 | \n",
"
\n",
" \n",
" Romance | \n",
- " 10.8 | \n",
- " 42.0 | \n",
- " 36.0 | \n",
+ " 33.0 | \n",
+ " 32.0 | \n",
"
\n",
" \n",
" Action | \n",
- " 10.6 | \n",
- " 32.0 | \n",
+ " 31.0 | \n",
" 16.0 | \n",
"
\n",
" \n",
" Thriller | \n",
- " 9.7 | \n",
- " 30.0 | \n",
- " 14.0 | \n",
+ " 29.0 | \n",
+ " 12.0 | \n",
"
\n",
" \n",
" Adventure | \n",
- " 6.9 | \n",
- " 16.0 | \n",
- " 10.0 | \n",
+ " 17.0 | \n",
+ " 11.0 | \n",
"
\n",
" \n",
" Children's | \n",
- " 4.4 | \n",
- " 4.0 | \n",
" 4.0 | \n",
+ " 6.0 | \n",
"
\n",
" \n",
" War | \n",
- " 3.9 | \n",
- " 12.0 | \n",
+ " 11.0 | \n",
" 10.0 | \n",
"
\n",
" \n",
" Crime | \n",
- " 3.9 | \n",
- " 6.0 | \n",
- " 2.0 | \n",
+ " 9.0 | \n",
+ " 4.0 | \n",
"
\n",
" \n",
" Sci-Fi | \n",
- " 3.5 | \n",
- " 8.0 | \n",
- " 8.0 | \n",
+ " 11.0 | \n",
+ " 9.0 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Train Data % BPR % WMF %\n",
- "Drama 22.6 40.0 52.0\n",
- "Comedy 13.9 32.0 42.0\n",
- "Romance 10.8 42.0 36.0\n",
- "Action 10.6 32.0 16.0\n",
- "Thriller 9.7 30.0 14.0\n",
- "Adventure 6.9 16.0 10.0\n",
- "Children's 4.4 4.0 4.0\n",
- "War 3.9 12.0 10.0\n",
- "Crime 3.9 6.0 2.0\n",
- "Sci-Fi 3.5 8.0 8.0"
+ " BPR % WMF %\n",
+ "Drama 43.0 49.0\n",
+ "Comedy 31.0 40.0\n",
+ "Romance 33.0 32.0\n",
+ "Action 31.0 16.0\n",
+ "Thriller 29.0 12.0\n",
+ "Adventure 17.0 11.0\n",
+ "Children's 4.0 6.0\n",
+ "War 11.0 10.0\n",
+ "Crime 9.0 4.0\n",
+ "Sci-Fi 11.0 9.0"
]
},
"metadata": {},
@@ -1317,7 +1313,7 @@
"combined_df = combined_df.sort_values(\"Train Data %\", ascending=False)\n",
"\n",
"# Let's take a look at the genre distribution by percentages\n",
- "display(\"Train Data to Recommended % Distribution\", combined_df[['Train Data %', 'BPR %', 'WMF %']][:10])"
+ "display(\"Train Data to Recommended % Distribution\", combined_df[['BPR %', 'WMF %']][:10])"
]
},
{
@@ -1325,10 +1321,6 @@
"id": "c30fe92b",
"metadata": {},
"source": [
- "Note that many movies belong to multiple genres, so the sum of the genre counts may exceed the total number of recommendations.\n",
- "\n",
- "-------\n",
- "\n",
"Now that we have seen the distribution of individual models, we are curious about what kind of distribution we will get from ensembling these models.\n",
"\n",
"Let's see what happens when we ensemble these two models. "
@@ -1380,7 +1372,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 22,
"id": "b349407b",
"metadata": {},
"outputs": [
@@ -1409,7 +1401,6 @@
" BPR Rank | \n",
" WMF Rank | \n",
" Borda Rank | \n",
- " Borda Count | \n",
" \n",
" \n",
" \n",
@@ -1419,7 +1410,6 @@
" 8 | \n",
" 1 | \n",
" 1 | \n",
- " 3293 | \n",
" \n",
" \n",
" 194 | \n",
@@ -1427,7 +1417,6 @@
" 7 | \n",
" 11 | \n",
" 2 | \n",
- " 3284 | \n",
"
\n",
" \n",
" 425 | \n",
@@ -1435,7 +1424,6 @@
" 15 | \n",
" 18 | \n",
" 3 | \n",
- " 3269 | \n",
"
\n",
" \n",
" 382 | \n",
@@ -1443,7 +1431,6 @@
" 27 | \n",
" 8 | \n",
" 4 | \n",
- " 3267 | \n",
"
\n",
" \n",
" 310 | \n",
@@ -1451,19 +1438,18 @@
" 26 | \n",
" 9 | \n",
" 4 | \n",
- " 3267 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " ItemID BPR Rank WMF Rank Borda Rank Borda Count\n",
- "152 313 8 1 1 3293\n",
- "194 739 7 11 2 3284\n",
- "425 237 15 18 3 3269\n",
- "382 655 27 8 4 3267\n",
- "310 692 26 9 4 3267"
+ " ItemID BPR Rank WMF Rank Borda Rank\n",
+ "152 313 8 1 1\n",
+ "194 739 7 11 2\n",
+ "425 237 15 18 3\n",
+ "382 655 27 8 4\n",
+ "310 692 26 9 4"
]
},
"metadata": {},
@@ -1497,15 +1483,7 @@
"rank_df.sort_values(\"Borda Rank\", inplace=True)\n",
"\n",
"# Now let's take a look at the table with Borda Count \n",
- "display(rank_df[[\"ItemID\", \"BPR Rank\", \"WMF Rank\", \"Borda Rank\", \"Borda Count\"]].head(5))"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "90a095da",
- "metadata": {},
- "source": [
- "Now that we have Borda Count, let's rerank this list and to provide the ensembled model's recommendation."
+ "display(rank_df[[\"ItemID\", \"BPR Rank\", \"WMF Rank\", \"Borda Rank\"]].head(5))"
]
},
{
@@ -1524,14 +1502,14 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 23,
"id": "ac86f568",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "'Borda Count Recommendations Distribution'"
+ "'BPR + WMF Borda Count Recommendations Distribution'"
]
},
"metadata": {},
@@ -1558,99 +1536,88 @@
" \n",
" \n",
" | \n",
- " Train Data % | \n",
" BPR % | \n",
" WMF % | \n",
- " Borda Count % | \n",
+ " BPR + WMF Borda Count % | \n",
"
\n",
" \n",
" \n",
" \n",
" Drama | \n",
- " 22.6 | \n",
- " 40.0 | \n",
- " 52.0 | \n",
- " 40.0 | \n",
+ " 43.0 | \n",
+ " 49.0 | \n",
+ " 51.0 | \n",
"
\n",
" \n",
" Comedy | \n",
- " 13.9 | \n",
+ " 31.0 | \n",
+ " 40.0 | \n",
" 32.0 | \n",
- " 42.0 | \n",
- " 44.0 | \n",
"
\n",
" \n",
" Romance | \n",
- " 10.8 | \n",
- " 42.0 | \n",
- " 36.0 | \n",
- " 44.0 | \n",
+ " 33.0 | \n",
+ " 32.0 | \n",
+ " 35.0 | \n",
"
\n",
" \n",
" Action | \n",
- " 10.6 | \n",
- " 32.0 | \n",
+ " 31.0 | \n",
" 16.0 | \n",
- " 24.0 | \n",
+ " 25.0 | \n",
"
\n",
" \n",
" Thriller | \n",
- " 9.7 | \n",
- " 30.0 | \n",
- " 14.0 | \n",
- " 18.0 | \n",
+ " 29.0 | \n",
+ " 12.0 | \n",
+ " 22.0 | \n",
"
\n",
" \n",
" Adventure | \n",
- " 6.9 | \n",
- " 16.0 | \n",
- " 10.0 | \n",
- " 16.0 | \n",
+ " 17.0 | \n",
+ " 11.0 | \n",
+ " 15.0 | \n",
"
\n",
" \n",
" Children's | \n",
- " 4.4 | \n",
- " 4.0 | \n",
" 4.0 | \n",
+ " 6.0 | \n",
" 4.0 | \n",
"
\n",
" \n",
" War | \n",
- " 3.9 | \n",
- " 12.0 | \n",
+ " 11.0 | \n",
" 10.0 | \n",
- " 14.0 | \n",
+ " 13.0 | \n",
"
\n",
" \n",
" Crime | \n",
- " 3.9 | \n",
- " 6.0 | \n",
- " 2.0 | \n",
+ " 9.0 | \n",
" 4.0 | \n",
+ " 6.0 | \n",
"
\n",
" \n",
" Sci-Fi | \n",
- " 3.5 | \n",
- " 8.0 | \n",
+ " 11.0 | \n",
+ " 9.0 | \n",
" 8.0 | \n",
- " 16.0 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Train Data % BPR % WMF % Borda Count %\n",
- "Drama 22.6 40.0 52.0 40.0\n",
- "Comedy 13.9 32.0 42.0 44.0\n",
- "Romance 10.8 42.0 36.0 44.0\n",
- "Action 10.6 32.0 16.0 24.0\n",
- "Thriller 9.7 30.0 14.0 18.0\n",
- "Adventure 6.9 16.0 10.0 16.0\n",
- "Children's 4.4 4.0 4.0 4.0\n",
- "War 3.9 12.0 10.0 14.0\n",
- "Crime 3.9 6.0 2.0 4.0\n",
- "Sci-Fi 3.5 8.0 8.0 16.0"
+ " BPR % WMF % BPR + WMF Borda Count %\n",
+ "Drama 43.0 49.0 51.0\n",
+ "Comedy 31.0 40.0 32.0\n",
+ "Romance 33.0 32.0 35.0\n",
+ "Action 31.0 16.0 25.0\n",
+ "Thriller 29.0 12.0 22.0\n",
+ "Adventure 17.0 11.0 15.0\n",
+ "Children's 4.0 6.0 4.0\n",
+ "War 11.0 10.0 13.0\n",
+ "Crime 9.0 4.0 6.0\n",
+ "Sci-Fi 11.0 9.0 8.0"
]
},
"metadata": {},
@@ -1659,19 +1626,19 @@
],
"source": [
"UIDX = 3\n",
- "TOPK = 50\n",
+ "TOPK = 100\n",
"\n",
- "borda_count_topk = rank_df[\"ItemID\"].values[:TOPK] # Get top K (50) Item IDs\n",
+ "borda_count_topk = rank_df[\"ItemID\"].values[:TOPK] # Get top K (100) Item IDs\n",
"\n",
"borda_df = item_df.loc[[int(i) for i in borda_count_topk]] # Filter genre data frame by the top item IDs\n",
"\n",
"# Add Borda Count results into 'combined_df' dataframe for comparison\n",
"combined_df[\"Borda Count Sum\"] = borda_df.select_dtypes(np.number).sum() # group by genre, and calculate sum of each genre\n",
- "combined_df[\"Borda Count %\"] = combined_df[\"Borda Count Sum\"] / TOPK * 100 # Calculate percentage of sum to total\n",
- "combined_df[\"Borda Count %\"] = combined_df[\"Borda Count %\"].round(1) # rounding for readability purposes\n",
+ "combined_df[\"BPR + WMF Borda Count %\"] = combined_df[\"Borda Count Sum\"] / TOPK * 100 # Calculate percentage of sum to total\n",
+ "combined_df[\"BPR + WMF Borda Count %\"] = combined_df[\"BPR + WMF Borda Count %\"].round(1) # rounding for readability purposes\n",
"\n",
"# Let's take a look at the genre distribution of train data, BPR, WMF and the newly added Borda Count\n",
- "display(\"Borda Count Recommendations Distribution\", combined_df[[\"Train Data %\", \"BPR %\", \"WMF %\", \"Borda Count %\"]][:10])"
+ "display(\"BPR + WMF Borda Count Recommendations Distribution\", combined_df[[\"BPR %\", \"WMF %\", \"BPR + WMF Borda Count %\"]][:10])"
]
},
{
@@ -1707,7 +1674,7 @@
},
{
"cell_type": "code",
- "execution_count": 28,
+ "execution_count": 25,
"id": "5ce879a6",
"metadata": {},
"outputs": [
@@ -1723,7 +1690,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:17<00:00, 17.30it/s, loss=173]\n"
+ "100%|██████████| 300/300 [00:19<00:00, 15.19it/s, loss=173]\n"
]
},
{
@@ -1739,7 +1706,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2085.29it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1686.97it/s]\n"
]
},
{
@@ -1754,7 +1721,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:17<00:00, 16.77it/s, loss=175]\n"
+ "100%|██████████| 300/300 [00:22<00:00, 13.31it/s, loss=175]\n"
]
},
{
@@ -1770,7 +1737,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2573.25it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1802.24it/s]\n"
]
},
{
@@ -1785,7 +1752,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:17<00:00, 17.36it/s, loss=172]\n"
+ "100%|██████████| 300/300 [00:18<00:00, 16.14it/s, loss=172]\n"
]
},
{
@@ -1801,7 +1768,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2610.95it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2258.92it/s]\n"
]
},
{
@@ -1816,7 +1783,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:17<00:00, 16.71it/s, loss=172]\n"
+ "100%|██████████| 300/300 [00:19<00:00, 15.44it/s, loss=172]\n"
]
},
{
@@ -1832,7 +1799,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2892.63it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2035.62it/s]\n"
]
},
{
@@ -1847,7 +1814,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:18<00:00, 16.50it/s, loss=171]\n"
+ "100%|██████████| 300/300 [00:18<00:00, 16.61it/s, loss=171]\n"
]
},
{
@@ -1863,7 +1830,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2258.68it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1887.01it/s]\n"
]
},
{
@@ -1878,7 +1845,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:19<00:00, 15.14it/s, loss=147]\n"
+ "100%|██████████| 300/300 [00:20<00:00, 14.82it/s, loss=147]\n"
]
},
{
@@ -1894,7 +1861,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2439.94it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2230.47it/s]\n"
]
},
{
@@ -1909,7 +1876,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:20<00:00, 14.99it/s, loss=129]\n"
+ "100%|██████████| 300/300 [00:22<00:00, 13.39it/s, loss=129]\n"
]
},
{
@@ -1925,7 +1892,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [-1:59:59<00:00, -604.26it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2078.40it/s]\n"
]
},
{
@@ -1940,7 +1907,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:20<00:00, 14.33it/s, loss=115] \n"
+ "100%|██████████| 300/300 [00:20<00:00, 14.64it/s, loss=115] \n"
]
},
{
@@ -1956,7 +1923,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2769.90it/s]\n"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2170.89it/s]\n"
]
},
{
@@ -1971,7 +1938,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 300/300 [00:25<00:00, 11.98it/s, loss=102] \n"
+ "100%|██████████| 300/300 [00:31<00:00, 9.43it/s, loss=102] \n"
]
},
{
@@ -1987,7 +1954,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Ranking: 100%|██████████| 940/940 [00:00<00:00, 2749.19it/s]"
+ "Ranking: 100%|██████████| 940/940 [00:00<00:00, 1773.88it/s]"
]
},
{
@@ -1997,17 +1964,17 @@
"\n",
"TEST:\n",
"...\n",
- " | Precision@50 | Recall@50 | Train (s) | Test (s)\n",
- "------- + ------------ + --------- + --------- + --------\n",
- "WMF_123 | 0.1133 | 0.5583 | 17.7788 | 0.4557\n",
- "WMF_456 | 0.1099 | 0.5500 | 18.0442 | 0.3699\n",
- "WMF_789 | 0.1131 | 0.5604 | 17.4421 | 0.3643\n",
- "WMF_888 | 0.1126 | 0.5529 | 18.0820 | 0.3295\n",
- "WMF_999 | 0.1134 | 0.5597 | 18.3150 | 0.4208\n",
- "WMF_k20 | 0.1153 | 0.5736 | 19.9460 | 0.3897\n",
- "WMF_k30 | 0.1108 | 0.5530 | 20.1790 | -1.5478\n",
- "WMF_k40 | 0.1075 | 0.5419 | 21.0963 | 0.3441\n",
- "WMF_k50 | 0.1044 | 0.5290 | 25.1880 | 0.3466\n",
+ " | Precision@100 | Recall@100 | Train (s) | Test (s)\n",
+ "------- + ------------- + ---------- + --------- + --------\n",
+ "WMF_123 | 0.0772 | 0.7208 | 20.1040 | 0.5692\n",
+ "WMF_456 | 0.0756 | 0.7123 | 22.7402 | 0.5303\n",
+ "WMF_789 | 0.0772 | 0.7231 | 18.7722 | 0.4210\n",
+ "WMF_888 | 0.0769 | 0.7171 | 19.6074 | 0.4667\n",
+ "WMF_999 | 0.0773 | 0.7191 | 18.2538 | 0.5029\n",
+ "WMF_k20 | 0.0777 | 0.7294 | 20.4318 | 0.4263\n",
+ "WMF_k30 | 0.0749 | 0.7066 | 22.5763 | 0.4582\n",
+ "WMF_k40 | 0.0724 | 0.6874 | 20.7039 | 0.4384\n",
+ "WMF_k50 | 0.0706 | 0.6758 | 32.0025 | 0.5375\n",
"\n"
]
},
@@ -2034,6 +2001,8 @@
"\n",
"models = [wmf_model_123, wmf_model_456, wmf_model_789, wmf_model_888, wmf_model_999, wmf_model_k20, wmf_model_k30, wmf_model_k40, wmf_model_k50]\n",
"\n",
+ "metrics = [Precision(k=100), Recall(k=100)] # The same metrics as before\n",
+ "\n",
"# Let's run an experiment to take a look at how different these models are, with just different random seeds!\n",
"experiment = Experiment(rs, models, metrics, user_based=True).run()"
]
@@ -2085,76 +2054,76 @@
" \n",
" | \n",
" ItemID | \n",
- " WMF Borda Count | \n",
+ " WMF Family Borda Count | \n",
"
\n",
" \n",
" \n",
" \n",
- " 152 | \n",
- " 313 | \n",
- " 3293 | \n",
+ " 37 | \n",
+ " 318 | \n",
+ " 14758 | \n",
"
\n",
" \n",
- " 194 | \n",
- " 739 | \n",
- " 3284 | \n",
+ " 152 | \n",
+ " 313 | \n",
+ " 14708 | \n",
"
\n",
" \n",
- " 425 | \n",
- " 237 | \n",
- " 3269 | \n",
+ " 197 | \n",
+ " 191 | \n",
+ " 14660 | \n",
"
\n",
" \n",
- " 310 | \n",
- " 692 | \n",
- " 3267 | \n",
- "
\n",
- " \n",
- " 382 | \n",
- " 655 | \n",
- " 3267 | \n",
+ " 132 | \n",
+ " 272 | \n",
+ " 14633 | \n",
"
\n",
" \n",
" 156 | \n",
" 64 | \n",
- " 3266 | \n",
+ " 14632 | \n",
"
\n",
" \n",
- " 511 | \n",
- " 471 | \n",
- " 3264 | \n",
+ " 61 | \n",
+ " 204 | \n",
+ " 14603 | \n",
"
\n",
" \n",
- " 422 | \n",
- " 282 | \n",
- " 3263 | \n",
+ " 279 | \n",
+ " 402 | \n",
+ " 14598 | \n",
"
\n",
" \n",
- " 188 | \n",
- " 294 | \n",
- " 3260 | \n",
+ " 305 | \n",
+ " 181 | \n",
+ " 14582 | \n",
"
\n",
" \n",
- " 8 | \n",
- " 15 | \n",
- " 3257 | \n",
+ " 405 | \n",
+ " 22 | \n",
+ " 14581 | \n",
+ "
\n",
+ " \n",
+ " 604 | \n",
+ " 215 | \n",
+ " 14541 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " ItemID WMF Borda Count\n",
- "152 313 3293\n",
- "194 739 3284\n",
- "425 237 3269\n",
- "310 692 3267\n",
- "382 655 3267\n",
- "156 64 3266\n",
- "511 471 3264\n",
- "422 282 3263\n",
- "188 294 3260\n",
- "8 15 3257"
+ " ItemID WMF Family Borda Count\n",
+ "37 318 14758\n",
+ "152 313 14708\n",
+ "197 191 14660\n",
+ "132 272 14633\n",
+ "156 64 14632\n",
+ "61 204 14603\n",
+ "279 402 14598\n",
+ "305 181 14582\n",
+ "405 22 14581\n",
+ "604 215 14541"
]
},
"metadata": {},
@@ -2168,7 +2137,7 @@
"})\n",
"\n",
"# Add a column named 'Ensembled WMF Model'\n",
- "rank_2_df[\"WMF Borda Count\"] = 0\n",
+ "rank_2_df[\"WMF Family Borda Count\"] = 0\n",
"\n",
"# Calculate the points (inverse of rank) for each of the models and accumulate them into the 'WMF Borda Count' column\n",
"# We use the same formula as the 'Borda Count' calculation\n",
@@ -2178,23 +2147,15 @@
" rank_2_df[name + \"_score\"] = scores\n",
" rank_2_df[name + \"_rank\"] = rank_2_df[name + \"_score\"].rank(ascending=False).astype(int)\n",
" rank_2_df[name + \"_points\"] = total_items - rank_2_df[name + \"_rank\"]\n",
- " rank_2_df[\"WMF Borda Count\"] = rank_2_df[\"WMF Borda Count\"] + rank_2_df[name + \"_points\"]\n",
+ " rank_2_df[\"WMF Family Borda Count\"] = rank_2_df[\"WMF Family Borda Count\"] + rank_2_df[name + \"_points\"]\n",
"\n",
"# Let's sort and view the top recommendations!\n",
- "display(\"Top 10 Recommendations for WMF Borda Count\", rank_2_df[[\"ItemID\", \"WMF Borda Count\"]].sort_values(\"WMF Borda Count\", ascending=False).head(10))"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "361d3fd9",
- "metadata": {},
- "source": [
- ">> TODO: Add the different variants - WMF Rank, Borda Rank, Borda Count"
+ "display(\"Top 10 Recommendations for WMF Borda Count\", rank_2_df[[\"ItemID\", \"WMF Family Borda Count\"]].sort_values(\"WMF Family Borda Count\", ascending=False).head(10))"
]
},
{
"cell_type": "code",
- "execution_count": 24,
+ "execution_count": 27,
"id": "8224e10e",
"metadata": {},
"outputs": [
@@ -2228,77 +2189,88 @@
" \n",
" \n",
" | \n",
- " Train Data % | \n",
- " WMF Borda Count % | \n",
+ " WMF % | \n",
+ " BPR + WMF Borda Count % | \n",
+ " WMF Family Borda Count % | \n",
"
\n",
" \n",
" \n",
" \n",
" Drama | \n",
- " 22.6 | \n",
- " 40.0 | \n",
+ " 49.0 | \n",
+ " 51.0 | \n",
+ " 53.0 | \n",
"
\n",
" \n",
" Comedy | \n",
- " 13.9 | \n",
- " 44.0 | \n",
+ " 40.0 | \n",
+ " 32.0 | \n",
+ " 29.0 | \n",
"
\n",
" \n",
" Romance | \n",
- " 10.8 | \n",
- " 44.0 | \n",
+ " 32.0 | \n",
+ " 35.0 | \n",
+ " 33.0 | \n",
"
\n",
" \n",
" Action | \n",
- " 10.6 | \n",
- " 24.0 | \n",
+ " 16.0 | \n",
+ " 25.0 | \n",
+ " 20.0 | \n",
"
\n",
" \n",
" Thriller | \n",
- " 9.7 | \n",
+ " 12.0 | \n",
+ " 22.0 | \n",
" 18.0 | \n",
"
\n",
" \n",
" Adventure | \n",
- " 6.9 | \n",
- " 16.0 | \n",
+ " 11.0 | \n",
+ " 15.0 | \n",
+ " 13.0 | \n",
"
\n",
" \n",
" Children's | \n",
- " 4.4 | \n",
+ " 6.0 | \n",
" 4.0 | \n",
+ " 8.0 | \n",
"
\n",
" \n",
" War | \n",
- " 3.9 | \n",
- " 14.0 | \n",
+ " 10.0 | \n",
+ " 13.0 | \n",
+ " 11.0 | \n",
"
\n",
" \n",
" Crime | \n",
- " 3.9 | \n",
" 4.0 | \n",
+ " 6.0 | \n",
+ " 7.0 | \n",
"
\n",
" \n",
" Sci-Fi | \n",
- " 3.5 | \n",
- " 16.0 | \n",
+ " 9.0 | \n",
+ " 8.0 | \n",
+ " 10.0 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Train Data % WMF Borda Count %\n",
- "Drama 22.6 40.0\n",
- "Comedy 13.9 44.0\n",
- "Romance 10.8 44.0\n",
- "Action 10.6 24.0\n",
- "Thriller 9.7 18.0\n",
- "Adventure 6.9 16.0\n",
- "Children's 4.4 4.0\n",
- "War 3.9 14.0\n",
- "Crime 3.9 4.0\n",
- "Sci-Fi 3.5 16.0"
+ " WMF % BPR + WMF Borda Count % WMF Family Borda Count %\n",
+ "Drama 49.0 51.0 53.0\n",
+ "Comedy 40.0 32.0 29.0\n",
+ "Romance 32.0 35.0 33.0\n",
+ "Action 16.0 25.0 20.0\n",
+ "Thriller 12.0 22.0 18.0\n",
+ "Adventure 11.0 15.0 13.0\n",
+ "Children's 6.0 4.0 8.0\n",
+ "War 10.0 13.0 11.0\n",
+ "Crime 4.0 6.0 7.0\n",
+ "Sci-Fi 9.0 8.0 10.0"
]
},
"metadata": {},
@@ -2307,15 +2279,15 @@
],
"source": [
"# Now, let's add them to the combined dataframe for comparison with earlier models\n",
- "wmf_borda_count_topk = rank_2_df.sort_values(\"WMF Borda Count\", ascending=False)[\"ItemID\"].values[:TOPK]\n",
+ "wmf_borda_count_topk = rank_2_df.sort_values(\"WMF Family Borda Count\", ascending=False)[\"ItemID\"].values[:TOPK]\n",
"wmf_borda_df = item_df.loc[[int(i) for i in wmf_borda_count_topk]]\n",
"\n",
- "combined_df[\"WMF Borda Count Sum\"] = wmf_borda_df.select_dtypes(np.number).sum()\n",
- "combined_df[\"WMF Borda Count %\"] = combined_df[\"WMF Borda Count Sum\"] / TOPK * 100\n",
- "combined_df[\"WMF Borda Count %\"] = combined_df[\"WMF Borda Count %\"].round(1)\n",
+ "combined_df[\"WMF Family Borda Count Sum\"] = wmf_borda_df.select_dtypes(np.number).sum()\n",
+ "combined_df[\"WMF Family Borda Count %\"] = combined_df[\"WMF Family Borda Count Sum\"] / TOPK * 100\n",
+ "combined_df[\"WMF Family Borda Count %\"] = combined_df[\"WMF Family Borda Count %\"].round(1)\n",
"\n",
"# Let's compare the recommendation distribution\n",
- "display(\"Combined Recommendations Distribution\", combined_df[[\"Train Data %\", \"WMF Borda Count %\"]][:10])"
+ "display(\"Combined Recommendations Distribution\", combined_df[[\"WMF %\", \"BPR + WMF Borda Count %\", \"WMF Family Borda Count %\"]][:10])"
]
},
{
@@ -2368,7 +2340,7 @@
},
{
"cell_type": "code",
- "execution_count": 32,
+ "execution_count": 28,
"id": "380223e2",
"metadata": {},
"outputs": [
@@ -2376,7 +2348,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 9/9 [02:30<00:00, 16.68s/it]\n"
+ "100%|██████████| 9/9 [03:29<00:00, 23.28s/it]\n"
]
},
{
@@ -2644,7 +2616,7 @@
},
{
"cell_type": "code",
- "execution_count": 33,
+ "execution_count": 29,
"id": "16a564bd",
"metadata": {},
"outputs": [
@@ -2661,7 +2633,7 @@
],
"source": [
"UIDX = 3\n",
- "TOPK = 50\n",
+ "TOPK = 100\n",
"\n",
"# Let's now fit into a Linear Regression model\n",
"regr = linear_model.LinearRegression(fit_intercept=False) # force model to only use predictions from WMF models\n",
@@ -2693,9 +2665,7 @@
"id": "048c684f",
"metadata": {},
"source": [
- "Coefficients of the Linear Regression model indicate the importance of each base model in the ensemble. A higher coefficient suggests that the model plays a more significant role in the final prediction.\n",
- "\n",
- "The coefficients from the model indicate that the `WMF_k50` model received the highest weight, suggesting it plays a significant role in the final predictions.\n",
+ "Coefficients of the Linear Regression model indicate the contributions of each base model in the ensemble.\n",
"\n",
"We have successfully trained a **Linear Regression** model using the predictions from the 9 WMF base models, which included variations with different seeds and latent factors.\n",
"\n",
@@ -2716,7 +2686,7 @@
},
{
"cell_type": "code",
- "execution_count": 52,
+ "execution_count": 30,
"id": "0fe095ce",
"metadata": {},
"outputs": [
@@ -2750,9 +2720,8 @@
" \n",
" \n",
" | \n",
- " Train Data % | \n",
" WMF % | \n",
- " WMF Borda Count % | \n",
+ " WMF Family Borda Count % | \n",
" WMF Linear Regression % | \n",
" WMF Random Forest % | \n",
"
\n",
@@ -2760,112 +2729,102 @@
" \n",
" \n",
" Drama | \n",
- " 22.6 | \n",
- " 52.0 | \n",
+ " 49.0 | \n",
+ " 53.0 | \n",
+ " 58.0 | \n",
" 40.0 | \n",
- " 54.0 | \n",
- " 60.0 | \n",
"
\n",
" \n",
" Comedy | \n",
- " 13.9 | \n",
- " 42.0 | \n",
- " 44.0 | \n",
- " 24.0 | \n",
+ " 40.0 | \n",
+ " 29.0 | \n",
" 26.0 | \n",
+ " 35.0 | \n",
"
\n",
" \n",
" Romance | \n",
- " 10.8 | \n",
- " 36.0 | \n",
- " 44.0 | \n",
- " 24.0 | \n",
+ " 32.0 | \n",
+ " 33.0 | \n",
" 24.0 | \n",
+ " 35.0 | \n",
"
\n",
" \n",
" Action | \n",
- " 10.6 | \n",
" 16.0 | \n",
+ " 20.0 | \n",
+ " 20.0 | \n",
" 24.0 | \n",
- " 26.0 | \n",
- " 22.0 | \n",
"
\n",
" \n",
" Thriller | \n",
- " 9.7 | \n",
- " 14.0 | \n",
- " 18.0 | \n",
- " 24.0 | \n",
+ " 12.0 | \n",
" 18.0 | \n",
+ " 23.0 | \n",
+ " 21.0 | \n",
"
\n",
" \n",
" Adventure | \n",
- " 6.9 | \n",
- " 10.0 | \n",
- " 16.0 | \n",
- " 18.0 | \n",
- " 16.0 | \n",
+ " 11.0 | \n",
+ " 13.0 | \n",
+ " 11.0 | \n",
+ " 14.0 | \n",
"
\n",
" \n",
" Children's | \n",
- " 4.4 | \n",
- " 4.0 | \n",
- " 4.0 | \n",
- " 4.0 | \n",
- " 2.0 | \n",
+ " 6.0 | \n",
+ " 8.0 | \n",
+ " 5.0 | \n",
+ " 6.0 | \n",
"
\n",
" \n",
" War | \n",
- " 3.9 | \n",
" 10.0 | \n",
- " 14.0 | \n",
- " 8.0 | \n",
- " 6.0 | \n",
+ " 11.0 | \n",
+ " 9.0 | \n",
+ " 5.0 | \n",
"
\n",
" \n",
" Crime | \n",
- " 3.9 | \n",
- " 2.0 | \n",
" 4.0 | \n",
+ " 7.0 | \n",
+ " 9.0 | \n",
" 8.0 | \n",
- " 6.0 | \n",
"
\n",
" \n",
" Sci-Fi | \n",
- " 3.5 | \n",
- " 8.0 | \n",
- " 16.0 | \n",
+ " 9.0 | \n",
+ " 10.0 | \n",
" 6.0 | \n",
- " 4.0 | \n",
+ " 9.0 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Train Data % WMF % WMF Borda Count % WMF Linear Regression % \\\n",
- "Drama 22.6 52.0 40.0 54.0 \n",
- "Comedy 13.9 42.0 44.0 24.0 \n",
- "Romance 10.8 36.0 44.0 24.0 \n",
- "Action 10.6 16.0 24.0 26.0 \n",
- "Thriller 9.7 14.0 18.0 24.0 \n",
- "Adventure 6.9 10.0 16.0 18.0 \n",
- "Children's 4.4 4.0 4.0 4.0 \n",
- "War 3.9 10.0 14.0 8.0 \n",
- "Crime 3.9 2.0 4.0 8.0 \n",
- "Sci-Fi 3.5 8.0 16.0 6.0 \n",
+ " WMF % WMF Family Borda Count % WMF Linear Regression % \\\n",
+ "Drama 49.0 53.0 58.0 \n",
+ "Comedy 40.0 29.0 26.0 \n",
+ "Romance 32.0 33.0 24.0 \n",
+ "Action 16.0 20.0 20.0 \n",
+ "Thriller 12.0 18.0 23.0 \n",
+ "Adventure 11.0 13.0 11.0 \n",
+ "Children's 6.0 8.0 5.0 \n",
+ "War 10.0 11.0 9.0 \n",
+ "Crime 4.0 7.0 9.0 \n",
+ "Sci-Fi 9.0 10.0 6.0 \n",
"\n",
" WMF Random Forest % \n",
- "Drama 60.0 \n",
- "Comedy 26.0 \n",
- "Romance 24.0 \n",
- "Action 22.0 \n",
- "Thriller 18.0 \n",
- "Adventure 16.0 \n",
- "Children's 2.0 \n",
- "War 6.0 \n",
- "Crime 6.0 \n",
- "Sci-Fi 4.0 "
+ "Drama 40.0 \n",
+ "Comedy 35.0 \n",
+ "Romance 35.0 \n",
+ "Action 24.0 \n",
+ "Thriller 21.0 \n",
+ "Adventure 14.0 \n",
+ "Children's 6.0 \n",
+ "War 5.0 \n",
+ "Crime 8.0 \n",
+ "Sci-Fi 9.0 "
]
},
"metadata": {},
@@ -2874,10 +2833,10 @@
],
"source": [
"UIDX = 3\n",
- "TOPK = 50\n",
+ "TOPK = 100\n",
"\n",
"# Let's now train a Random Forest model\n",
- "randomforest_model = RandomForestRegressor(n_estimators=30, random_state=42) \n",
+ "randomforest_model = RandomForestRegressor(n_estimators=50, max_depth=2, random_state=42) \n",
"randomforest_model.fit(X_train, y_train) # Train the model\n",
"\n",
"# Input: 5 base model predicted ratings. Output: final predicted rating based on random forest\n",
@@ -2897,7 +2856,7 @@
"combined_df[\"WMF Random Forest %\"] = combined_df[\"WMF Random Forest %\"].round(1) # round values for readability\n",
"\n",
"# Now let's take a look at how the genre distribution is\n",
- "display(\"Combined Recommendations Distribution\", combined_df[[\"Train Data %\", \"WMF %\", \"WMF Borda Count %\", \"WMF Linear Regression %\", \"WMF Random Forest %\"]][:10])"
+ "display(\"Combined Recommendations Distribution\", combined_df[[\"WMF %\", \"WMF Family Borda Count %\", \"WMF Linear Regression %\", \"WMF Random Forest %\"]][:10])"
]
},
{
@@ -2929,7 +2888,7 @@
"id": "2d02a252",
"metadata": {},
"source": [
- "In the beginning, we have split the dataset into training and testing sets. Now, we will evaluate the performance of the ensemble models using **Precision@50** and **Recall@50** metrics.\n",
+ "In the beginning, we have split the dataset into training and testing sets. Now, we will evaluate the performance of the ensemble models using **Precision@100** and **Recall@100** metrics.\n",
"\n",
"We will use the test set to evaluate the models. \n",
"\n",
@@ -2938,7 +2897,7 @@
},
{
"cell_type": "code",
- "execution_count": 35,
+ "execution_count": 31,
"id": "553f4f32",
"metadata": {},
"outputs": [
@@ -2953,7 +2912,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 11/11 [03:33<00:00, 19.39s/it]\n"
+ "100%|██████████| 11/11 [04:42<00:00, 25.70s/it]\n"
]
}
],
@@ -2993,27 +2952,27 @@
"\n",
"Once we have the scores calculated, we will sum them up according to the Borda Count formula outlined in Sections 2 and 3.\n",
"\n",
- "**Simple Borda Count**: \n",
+ "**BPR + WMF Borda Count**: \n",
"To clarify, our basic Borda Count model includes the **BPR Model** and the **WMF Model**.\n",
"\n",
- "**WMF Borda Count**: \n",
- "The `WMF Borda Count` model, on the other hand, consists of multiple variations:\n",
+ "**WMF Family Borda Count**: \n",
+ "The `WMF Family Borda Count` model, on the other hand, consists of multiple variations:\n",
"- Models initialized with different random seeds: **wmf_model_123**, **wmf_model_456**, **wmf_model_789**, **wmf_model_888**, and **wmf_model_999**.\n",
"- Models with different latent factors: **wmf_model_k20**, **wmf_model_k30**, **wmf_model_k40**, and **wmf_model_k50**."
]
},
{
"cell_type": "code",
- "execution_count": 36,
+ "execution_count": 32,
"id": "240fec5f",
"metadata": {},
"outputs": [],
"source": [
"borda_count_models = [bpr_model, wmf_model]\n",
- "rank_df[\"Borda Count\"] = rank_df[[model.name + \"_points\" for model in borda_count_models]].sum(axis=1) # Sum up points of BPR and WMF\n",
+ "rank_df[\"BPR + WMF Borda Count\"] = rank_df[[model.name + \"_points\" for model in borda_count_models]].sum(axis=1) # Sum up points of BPR and WMF\n",
"\n",
"wmf_borda_count_models = [wmf_model_123, wmf_model_456, wmf_model_789, wmf_model_888, wmf_model_999, wmf_model_k20, wmf_model_k30, wmf_model_k40, wmf_model_k50]\n",
- "rank_df[\"WMF Borda Count\"] = rank_df[[model.name + \"_points\" for model in wmf_borda_count_models]].sum(axis=1) # Sum up points of all WMF models\n",
+ "rank_df[\"WMF Family Borda Count\"] = rank_df[[model.name + \"_points\" for model in wmf_borda_count_models]].sum(axis=1) # Sum up points of all WMF models\n",
"\n",
"# Now, lets add them into the `all_df` dataframe for comparison\n",
"all_df.sort_values(by=[\"user_idx\", \"item_idx\"], inplace=True) # ensure that the dataframe is sorted by user index and item index\n",
@@ -3021,8 +2980,8 @@
"all_df[\"BPR_score\"] = rank_df[\"BPR_score\"].values\n",
"all_df[\"WMF_score\"] = rank_df[\"WMF_score\"].values\n",
"\n",
- "all_df[\"Borda Count\"] = rank_df[\"Borda Count\"].values\n",
- "all_df[\"WMF Borda Count\"] = rank_df[\"WMF Borda Count\"].values"
+ "all_df[\"BPR + WMF Borda Count\"] = rank_df[\"BPR + WMF Borda Count\"].values\n",
+ "all_df[\"WMF Family Borda Count\"] = rank_df[\"WMF Family Borda Count\"].values"
]
},
{
@@ -3036,12 +2995,12 @@
"\n",
"### 5.2 Results for Borda Count of BPR and WMF\n",
"\n",
- "We calculate the **Precision@50** and **Recall@50** values for the Borda Count model, which combines the BPR and WMF models."
+ "We calculate the **Precision@100** and **Recall@100** values for the BPR + WMF Borda Count model, which combines the BPR and WMF models."
]
},
{
"cell_type": "code",
- "execution_count": 38,
+ "execution_count": 33,
"id": "916b390b",
"metadata": {},
"outputs": [
@@ -3049,13 +3008,13 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 3/3 [00:14<00:00, 4.86s/it]\n"
+ "100%|██████████| 3/3 [00:20<00:00, 6.81s/it]\n"
]
},
{
"data": {
"text/plain": [
- "'Base BPR, WMF comparison with Borda Count (BPR + WMF)'"
+ "'Base BPR and Base WMF in comparison with BPR + WMF Borda Count'"
]
},
"metadata": {},
@@ -3085,32 +3044,32 @@
" Metrics | \n",
" BPR_score | \n",
" WMF_score | \n",
- " Borda Count | \n",
+ " BPR + WMF Borda Count | \n",
" \n",
" \n",
" \n",
" \n",
" 0 | \n",
- " Precision@50 | \n",
- " 0.099574 | \n",
- " 0.103213 | \n",
- " 0.103043 | \n",
+ " Precision@100 | \n",
+ " 0.083255 | \n",
+ " 0.084979 | \n",
+ " 0.086021 | \n",
"
\n",
" \n",
" 1 | \n",
- " Recall@50 | \n",
- " 0.363850 | \n",
- " 0.372319 | \n",
- " 0.374389 | \n",
+ " Recall@100 | \n",
+ " 0.545496 | \n",
+ " 0.558411 | \n",
+ " 0.565059 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Metrics BPR_score WMF_score Borda Count\n",
- "0 Precision@50 0.099574 0.103213 0.103043\n",
- "1 Recall@50 0.363850 0.372319 0.374389"
+ " Metrics BPR_score WMF_score BPR + WMF Borda Count\n",
+ "0 Precision@100 0.083255 0.084979 0.086021\n",
+ "1 Recall@100 0.545496 0.558411 0.565059"
]
},
"metadata": {},
@@ -3118,10 +3077,10 @@
}
],
"source": [
- "models = [\"BPR_score\", \"WMF_score\", \"Borda Count\"]\n",
+ "models = [\"BPR_score\", \"WMF_score\", \"BPR + WMF Borda Count\"]\n",
"\n",
"result_data = {\n",
- " \"Metrics\": [\"Precision@50\", \"Recall@50\"],\n",
+ " \"Metrics\": [\"Precision@100\", \"Recall@100\"],\n",
"}\n",
"\n",
"test_users = set(test_set.uir_tuple[0])\n",
@@ -3145,7 +3104,15 @@
"# Now let's take a look at the results\n",
"result_df = pd.DataFrame(result_data)\n",
"\n",
- "display(\"Base BPR, WMF comparison with Borda Count (BPR + WMF)\", result_df[[\"Metrics\", \"BPR_score\", \"WMF_score\", \"Borda Count\"]])"
+ "display(\"Base BPR and Base WMF in comparison with BPR + WMF Borda Count\", result_df[[\"Metrics\", \"BPR_score\", \"WMF_score\", \"BPR + WMF Borda Count\"]])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "581d7584",
+ "metadata": {},
+ "source": [
+ "We observe better recall performance in Borda Count compared to the individual models."
]
},
{
@@ -3155,12 +3122,12 @@
"source": [
"### 5.3 Results for WMF Related Models\n",
"\n",
- "We calculate the **Precision@50** and **Recall@50** values for the WMF related models."
+ "We calculate the **Precision@100** and **Recall@100** values for the WMF related models."
]
},
{
"cell_type": "code",
- "execution_count": 53,
+ "execution_count": 34,
"id": "7188ca7d",
"metadata": {},
"outputs": [
@@ -3168,7 +3135,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 3/3 [00:13<00:00, 4.44s/it]\n"
+ "100%|██████████| 3/3 [00:15<00:00, 5.11s/it]\n"
]
},
{
@@ -3202,7 +3169,7 @@
" \n",
" | \n",
" Metrics | \n",
- " WMF Borda Count | \n",
+ " WMF Family Borda Count | \n",
" WMF Linear Regression | \n",
" WMF Random Forest | \n",
"
\n",
@@ -3210,26 +3177,30 @@
" \n",
" \n",
" 0 | \n",
- " Precision@50 | \n",
- " 0.099894 | \n",
- " 0.075894 | \n",
- " 0.06917 | \n",
+ " Precision@100 | \n",
+ " 0.083596 | \n",
+ " 0.067691 | \n",
+ " 0.065957 | \n",
"
\n",
" \n",
" 1 | \n",
- " Recall@50 | \n",
- " 0.379790 | \n",
- " 0.312299 | \n",
- " 0.28381 | \n",
+ " Recall@100 | \n",
+ " 0.567710 | \n",
+ " 0.488312 | \n",
+ " 0.441106 | \n",
"
\n",
" \n",
"\n",
""
],
"text/plain": [
- " Metrics WMF Borda Count WMF Linear Regression WMF Random Forest\n",
- "0 Precision@50 0.099894 0.075894 0.06917\n",
- "1 Recall@50 0.379790 0.312299 0.28381"
+ " Metrics WMF Family Borda Count WMF Linear Regression \\\n",
+ "0 Precision@100 0.083596 0.067691 \n",
+ "1 Recall@100 0.567710 0.488312 \n",
+ "\n",
+ " WMF Random Forest \n",
+ "0 0.065957 \n",
+ "1 0.441106 "
]
},
"metadata": {},
@@ -3237,10 +3208,10 @@
}
],
"source": [
- "models = [\"WMF Borda Count\", \"WMF Linear Regression\", \"WMF Random Forest\"]\n",
+ "models = [\"WMF Family Borda Count\", \"WMF Linear Regression\", \"WMF Random Forest\"]\n",
"\n",
"result_data = {\n",
- " \"Metrics\": [\"Precision@50\", \"Recall@50\"],\n",
+ " \"Metrics\": [\"Precision@100\", \"Recall@100\"],\n",
"}\n",
"\n",
"test_users = set(test_set.uir_tuple[0])\n",
@@ -3264,7 +3235,7 @@
"# Now let's take a look at the results\n",
"result_df = pd.DataFrame(result_data)\n",
"\n",
- "display(\"WMF Models Comparison\", result_df[[\"Metrics\", \"WMF Borda Count\", \"WMF Linear Regression\", \"WMF Random Forest\"]])"
+ "display(\"WMF Models Comparison\", result_df[[\"Metrics\", \"WMF Family Borda Count\", \"WMF Linear Regression\", \"WMF Random Forest\"]])"
]
},
{
@@ -3272,7 +3243,9 @@
"id": "42885a8b",
"metadata": {},
"source": [
- "With the results in hand, we can now compare the performance of the ensemble models against the individual base models.\n",
+ "However, we also observe that performance varies, and may not always provide an improvement over the individual models.\n",
+ "\n",
+ "One of the other ways that could be explored will be to create an new ensemble, utilizing the many different base models that Cornac supports.\n",
"\n",
"During the development of these models, we find that there are many ways to experiment about to improve the models. However, there is also a risk of overfitting the model to the training data.\n",
"\n",
@@ -3286,29 +3259,21 @@
"source": [
"## 6. Conclusion\n",
"\n",
- "Our results show that **Precision@50** and **Recall@50** can vary significantly across models and ensembling techniques. There’s no one-size-fits-all solution—different datasets may favor different models or ensembles.\n",
+ "Our results show that there’s no one-size-fits-all solution.\n",
"\n",
- "### Key Takeaways\n",
+ "### Which models and configurations perform best?\n",
"\n",
"Testing multiple models and ensemble techniques helps find the best approach for each dataset. While ensembling can improve accuracy, results will depend on how well models complement each other.\n",
"\n",
"- **Try Different Base Models**: Cornac offers a variety of models; experimenting with each helps reveal what works best.\n",
- "- **Explore Ensembling Methods**: Beyond Borda Count, techniques like averaging or stacking may yield better results.\n",
"- **Adjust Model Parameters**: Tuning settings can optimize individual models and enhance ensemble performance.\n",
- "- **Balance Diversity and Simplicity**: Borda Count’s ability to combine diverse predictions can help, but may not always outperform simpler methods.\n",
"\n",
- "### Considerations for Model Ensembling\n",
+ "### Is Ensembling Always Better?\n",
"\n",
"- **Performance vs. Resources**: Ensembles often require more computation, so it’s important to balance resource use with performance gains.\n",
"- **Know When Not to Ensemble**: In some cases, a single well-tuned model may work as well as, or even better than, an ensemble.\n",
"\n",
- "### Questions for Further Exploration\n",
- "\n",
- "- Which base models and configurations perform best?\n",
- "- What makes an ensemble effective?\n",
- "- When should simpler, single models be preferred?\n",
- "\n",
- "These questions guide future experiments as we continue improving recommendation systems."
+ "These questions guide future experiments as we continue experimenting towards better recommendation systems."
]
}
],