@@ -156,23 +156,25 @@ class IALSRecommender(
156
156
BaseRecommenderWithUserEmbedding ,
157
157
BaseRecommenderWithItemEmbedding ,
158
158
):
159
- r"""Implementation of Implicit Alternating Least Squares(IALS ) or Weighted Matrix Factorization(WMF).
159
+ r"""Implementation of implicit Alternating Least Squares (iALS ) or Weighted Matrix Factorization (WMF).
160
160
161
161
By default, it tries to minimize the following loss:
162
162
163
163
.. math ::
164
164
165
- \frac{1}{2} \sum _{u, i \in S} X_ {ui} (\mathbf{u}_u \cdot \mathbf{v}_i - 1) ^ 2
166
- + \frac{\alpha_0}{2} \sum_{u, i} X_{ui} (\mathbf{u}_u \cdot \mathbf{v}_i) ^ 2 +
165
+ \frac{1}{2} \sum _{u, i \in S} c_ {ui} (\mathbf{u}_u \cdot \mathbf{v}_i - 1) ^ 2
166
+ + \frac{\alpha_0}{2} \sum_{u, i} (\mathbf{u}_u \cdot \mathbf{v}_i) ^ 2 +
167
167
\frac{\text{reg}}{2} \left( \sum_u (\alpha_0 I + N_u) ^ \nu || \mathbf{u}_u || ^2 + \sum_i (\alpha_0 U + N_i) ^ \nu || \mathbf{v}_i || ^2 \right)
168
168
169
+ where :math:`S` denotes the set of all pairs wher :math:`X_{ui}` is non-zero.
170
+
169
171
See the seminal paper:
170
172
171
173
- `Collaborative filtering for implicit feedback datasets
172
174
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.167.5120&rep=rep1&type=pdf>`_
173
175
174
176
175
- To speed up the learning procedure, we have also implemented the conjugate gradient descent version following :
177
+ By default it uses a conjugate gradient descent version:
176
178
177
179
- `Applications of the conjugate gradient method for implicit feedback collaborative filtering
178
180
<https://dl.acm.org/doi/abs/10.1145/2043932.2043987>`_
@@ -188,20 +190,26 @@ class IALSRecommender(
188
190
n_components (int, optional):
189
191
The dimension for latent factor. Defaults to 20.
190
192
alpha0 (float, optional):
191
- The "unovserved " weight
193
+ The "unobserved " weight.
192
194
reg (float, optional) :
193
195
Regularization coefficient for both user & item factors. Defaults to 1e-3.
194
196
nu (float, optional) :
195
197
Controlles frequency regularization introduced in the paper,
196
198
"Revisiting the Performance of iALS on Item Recommendation Benchmarks".
197
199
confidence_scaling (str, optional) :
198
200
Specifies how to scale confidence scaling :math:`c_{ui}`. Must be either "none" or "log".
199
- If "none", the non-zero "rating" :math:`r_{ui}` yields
201
+ If "none", the non-zero (not-necessarily 1) :math:`X_{ui}` yields
202
+
200
203
.. math ::
201
- c_{ui} = 1 + \alpha r_{ui}
204
+ c_{ui} = A + X_{ui}
205
+
202
206
If "log",
207
+
203
208
.. math ::
204
- c_{ui} = 1 + \alpha \log (1 + r_{ui} / \epsilon )
209
+ c_{ui} = A + \log (1 + X_{ui} / \epsilon )
210
+
211
+ The constant :math:`A` above will be 0 if ``loss_type`` is ``"IALSPP"``, :math:`\alpha_0` if ``loss_type`` is ``"ORIGINAL"``.
212
+
205
213
Defaults to "none".
206
214
epsilon (float, optional):
207
215
The :math:`\epsilon` parameter for log-scaling described above.
@@ -240,6 +248,18 @@ class IALSRecommender(
240
248
Maximal number of conjute gradient descent steps during the prediction time,
241
249
i.e., the case when a user unseen at the training time is given as a history matrix.
242
250
Defaults to 5.
251
+
252
+ Examples:
253
+
254
+ >>> from irspack import IALSRecommender, rowwise_train_test_split, Evaluator
255
+ >>> from irspack.utils.sample_data import mf_example_data
256
+ >>> X = mf_example_data(100, 30, random_state=1)
257
+ >>> X_train, X_test = rowwise_train_test_split(X, random_state=0)
258
+ >>> rec = IALSRecommender(X_train)
259
+ >>> rec.learn()
260
+ >>> evaluator=Evaluator(X_test)
261
+ >>> print(evaluator.get_scores(rec, [20]))
262
+ OrderedDict([('hit@20', 1.0), ('recall@20', 0.9003412698412698), ('ndcg@20', 0.6175493479217139), ('map@20', 0.3848785870622406), ('precision@20', 0.3385), ('gini_index@20', 0.0814), ('entropy@20', 3.382497875272383), ('appeared_item@20', 30.0)])
243
263
"""
244
264
245
265
config_class = IALSConfig
0 commit comments