@@ -173,11 +173,11 @@ void ENSEMBLE::train(
173173 }
174174 pred.setConstant (this ->initialPred );
175175 pred += offset;
176- this ->initial_score = loss (y, pred, loss_function, w, this );
176+ this ->initial_score = loss (y, pred, loss_function, w, extra_param );
177177
178178 // First tree
179- g = dloss (y, pred, loss_function, this ) * w;
180- h = ddloss (y, pred, loss_function, this ) * w;
179+ g = dloss (y, pred, loss_function, extra_param ) * w;
180+ h = ddloss (y, pred, loss_function, extra_param ) * w;
181181 this ->first_tree = new GBTREE;
182182 this ->first_tree ->train (g, h, X, cir_sim, greedy_complexities, learning_rate);
183183 GBTREE* current_tree = this ->first_tree ;
@@ -187,7 +187,7 @@ void ENSEMBLE::train(
187187 verbose,
188188 1 ,
189189 current_tree->getNumLeaves (),
190- loss (y, pred, loss_function, w, this ),
190+ loss (y, pred, loss_function, w, extra_param ),
191191 this ->estimate_generalization_loss (1 )
192192 );
193193
@@ -197,8 +197,8 @@ void ENSEMBLE::train(
197197 if (i % 1 == 0 )
198198 Rcpp::checkUserInterrupt ();
199199 // Calculate gradients
200- g = dloss (y, pred, loss_function, this ) * w;
201- h = ddloss (y, pred, loss_function, this ) * w;
200+ g = dloss (y, pred, loss_function, extra_param ) * w;
201+ h = ddloss (y, pred, loss_function, extra_param ) * w;
202202 // Check for perfect fit
203203 if (((g.array ())/h.array ()).matrix ().maxCoeff () < 1e-12 ){
204204 // Every perfect step is below tresh
@@ -212,7 +212,7 @@ void ENSEMBLE::train(
212212 // Calculate expected generalization loss for tree
213213 expected_loss = tree_expected_test_reduction (new_tree, learning_rate);
214214 // Update ensemble training loss and ensemble optimism for iteration k-1
215- ensemble_training_loss = loss (y, pred, loss_function, w, this );
215+ ensemble_training_loss = loss (y, pred, loss_function, w, extra_param );
216216 ensemble_approx_training_loss = this ->estimate_training_loss (i-1 ) +
217217 new_tree->getTreeScore () * (-2 )*learning_rate*(learning_rate/2 - 1 );
218218 ensemble_optimism = this ->estimate_optimism (i-1 ) +
@@ -253,18 +253,18 @@ void ENSEMBLE::train_from_preds(Tvec<double> &pred, Tvec<double> &y, Tmat<double
253253 Tvec<double > g (n), h (n);
254254
255255 // Initial prediction
256- g = dloss (y, pred, loss_function, this )*w;
257- h = ddloss (y, pred, loss_function, this )*w;
256+ g = dloss (y, pred, loss_function, extra_param )*w;
257+ h = ddloss (y, pred, loss_function, extra_param )*w;
258258 this ->initialPred = - g.sum () / h.sum ();
259259 pred = pred.array () + this ->initialPred ;
260- this ->initial_score = loss (y, pred, loss_function, w, this ); // (y - pred).squaredNorm() / n;
260+ this ->initial_score = loss (y, pred, loss_function, w, extra_param ); // (y - pred).squaredNorm() / n;
261261
262262 // Prepare cir matrix
263263 Tmat<double > cir_sim = cir_sim_mat (100 , 100 );
264264
265265 // First tree
266- g = dloss (y, pred, loss_function, this )*w;
267- h = ddloss (y, pred, loss_function, this )*w;
266+ g = dloss (y, pred, loss_function, extra_param )*w;
267+ h = ddloss (y, pred, loss_function, extra_param )*w;
268268 this ->first_tree = new GBTREE;
269269 this ->first_tree ->train (g, h, X, cir_sim, greedy_complexities, learning_rate_set);
270270 GBTREE* current_tree = this ->first_tree ;
@@ -277,7 +277,7 @@ void ENSEMBLE::train_from_preds(Tvec<double> &pred, Tvec<double> &y, Tmat<double
277277 std::setprecision (4 ) <<
278278 " it: " << 1 <<
279279 " | n-leaves: " << current_tree->getNumLeaves () <<
280- " | tr loss: " << loss (y, pred, loss_function, w, this ) <<
280+ " | tr loss: " << loss (y, pred, loss_function, w, extra_param ) <<
281281 " | gen loss: " << this ->estimate_generalization_loss (1 ) <<
282282 std::endl;
283283 }
@@ -292,8 +292,8 @@ void ENSEMBLE::train_from_preds(Tvec<double> &pred, Tvec<double> &y, Tmat<double
292292
293293 // TRAINING
294294 GBTREE* new_tree = new GBTREE ();
295- g = dloss (y, pred, loss_function, this )*w;
296- h = ddloss (y, pred, loss_function, this )*w;
295+ g = dloss (y, pred, loss_function, extra_param )*w;
296+ h = ddloss (y, pred, loss_function, extra_param )*w;
297297 new_tree->train (g, h, X, cir_sim, greedy_complexities, learning_rate_set);
298298
299299 // EXPECTED LOSS
@@ -310,7 +310,7 @@ void ENSEMBLE::train_from_preds(Tvec<double> &pred, Tvec<double> &y, Tmat<double
310310 std::setprecision (4 ) <<
311311 " it: " << i <<
312312 " | n-leaves: " << current_tree->getNumLeaves () <<
313- " | tr loss: " << loss (y, pred, loss_function, w, this ) <<
313+ " | tr loss: " << loss (y, pred, loss_function, w, extra_param ) <<
314314 " | gen loss: " << this ->estimate_generalization_loss (i-1 ) + expected_loss <<
315315 std::endl;
316316
@@ -504,7 +504,7 @@ Tvec<double> ENSEMBLE::convergence(Tvec<double> &y, Tmat<double> &X){
504504 w.setOnes ();
505505
506506 // After each update (tree), compute loss
507- loss_val[0 ] = loss (y, pred, this ->loss_function , w, this );
507+ loss_val[0 ] = loss (y, pred, this ->loss_function , w, extra_param );
508508
509509 GBTREE* current = this ->first_tree ;
510510 for (int k=1 ; k<(K+1 ); k++)
@@ -513,7 +513,7 @@ Tvec<double> ENSEMBLE::convergence(Tvec<double> &y, Tmat<double> &X){
513513 pred = pred + (this ->learning_rate ) * (current->predict_data (X));
514514
515515 // Compute loss
516- loss_val[k] = loss (y, pred, this ->loss_function , w, this );
516+ loss_val[k] = loss (y, pred, this ->loss_function , w, extra_param );
517517
518518 // Update to next tree
519519 current = current->next_tree ;
0 commit comments