@@ -915,10 +915,13 @@ def _init_fb_params(self):
915
915
self ._cs [:] = 1.0
916
916
917
917
def _init_random_responsibility (self ,x ):
918
- self .xi_mats [:] = self .rng .dirichlet (np .ones (self .c_num_classes ** 2 ),self .xi_mats .shape [0 ]).reshape (self .xi_mats .shape )
919
- self .xi_mats [0 ] = 0.0
920
- self .gamma_vecs [:] = self .xi_mats .sum (axis = 1 )
921
- self .gamma_vecs [0 ] = self .xi_mats [1 ].sum (axis = 1 )
918
+ if self ._length == 1 :
919
+ self .gamma_vecs [0 ] = self .rng .dirichlet (np .ones (self .c_num_classes ))
920
+ else :
921
+ self .xi_mats [:] = self .rng .dirichlet (np .ones (self .c_num_classes ** 2 ),self .xi_mats .shape [0 ]).reshape (self .xi_mats .shape )
922
+ self .xi_mats [0 ] = 0.0
923
+ self .gamma_vecs [:] = self .xi_mats .sum (axis = 1 )
924
+ self .gamma_vecs [0 ] = self .xi_mats [1 ].sum (axis = 1 )
922
925
self ._calc_n_m_x_bar_s (x )
923
926
924
927
def _init_subsampling (self ,x ):
@@ -1195,27 +1198,30 @@ def visualize_posterior(self):
1195
1198
>>> learn_model.update_posterior(x)
1196
1199
>>> learn_model.visualize_posterior()
1197
1200
hn_alpha_vec:
1198
- [103.89444088 97.10555912 ]
1201
+ [153.65657765 47.34342235 ]
1199
1202
E[pi_vec]:
1200
- [0.51688777 0.48311223 ]
1203
+ [0.76446059 0.23553941 ]
1201
1204
hn_zeta_vecs:
1202
- [[0.90892737 0.09107263]
1203
- [0.08810659 0.91189341]]
1205
+ [[147.64209251 5.51848792]
1206
+ [ 5.51448518 42.3249344 ]]
1207
+ E[a_mat]
1208
+ [[0.96396927 0.03603073]
1209
+ [0.11527074 0.88472926]]
1204
1210
hn_m_vecs (equivalent to E[mu_vecs]):
1205
- [[-2.00135008 ]
1206
- [ 1.97461369 ]]
1211
+ [[ 1.99456861 ]
1212
+ [-2.15581846 ]]
1207
1213
hn_kappas:
1208
- [104.39444088 97.60555912 ]
1214
+ [154.15657765 47.84342235 ]
1209
1215
hn_nus:
1210
- [104.39444088 97.60555912 ]
1216
+ [154.15657765 47.84342235 ]
1211
1217
hn_w_mats:
1212
- [[[0.00890667 ]]
1218
+ [[[0.00525177 ]]
1213
1219
1214
- [[0.01009789 ]]]
1220
+ [[0.02569298 ]]]
1215
1221
E[lambda_mats]=
1216
- [[[0.92980714 ]]
1222
+ [[[0.8095951 ]]
1217
1223
1218
- [[0.98560985 ]]]
1224
+ [[1.22924015 ]]]
1219
1225
1220
1226
.. image:: ./images/hiddenmarkovnormal_posterior.png
1221
1227
"""
@@ -1224,6 +1230,8 @@ def visualize_posterior(self):
1224
1230
print ("E[pi_vec]:" )
1225
1231
print (f"{ self .hn_eta_vec / self .hn_eta_vec .sum ()} " )
1226
1232
print ("hn_zeta_vecs:" )
1233
+ print (f"{ self .hn_zeta_vecs } " )
1234
+ print ("E[a_mat]" )
1227
1235
print (f"{ self .hn_zeta_vecs / self .hn_zeta_vecs .sum (axis = 1 ,keepdims = True )} " )
1228
1236
print ("hn_m_vecs (equivalent to E[mu_vecs]):" )
1229
1237
print (f"{ self .hn_m_vecs } " )
@@ -1285,47 +1293,107 @@ def get_p_params(self):
1285
1293
Returns
1286
1294
-------
1287
1295
p_params : dict of {str: numpy.ndarray}
1288
- * ``"xxx"`` : the value of ``self.xxx``
1296
+ * ``"p_a_mat"`` : the value of ``self.p_a_mat``
1297
+ * ``"p_mu_vecs"`` : the value of ``self.p_mu_vecs``
1298
+ * ``"p_nus"`` : the value of ``self.p_nus``
1299
+ * ``"p_lambda_mats"`` : the value of ``self.p_lambda_mats``
1289
1300
"""
1290
- return {'p_mu_vecs' :self .p_mu_vecs ,
1301
+ return {'p_a_mat' :self .p_a_mat ,
1302
+ 'p_mu_vecs' :self .p_mu_vecs ,
1291
1303
'p_nus' :self .p_nus ,
1292
1304
'p_lambda_mats' :self .p_lambda_mats }
1293
1305
1294
1306
def calc_pred_dist (self ):
1295
1307
"""Calculate the parameters of the predictive distribution."""
1296
- pass
1308
+ self .p_a_mat [:] = self .hn_zeta_vecs / self .hn_zeta_vecs .sum (axis = 1 ,keepdims = True )
1309
+ self .p_mu_vecs [:] = self .hn_m_vecs
1310
+ self .p_nus [:] = self .hn_nus - self .c_degree + 1
1311
+ self .p_lambda_mats [:] = (self .hn_kappas * self .p_nus / (self .hn_kappas + 1 ))[:,np .newaxis ,np .newaxis ] * self .hn_w_mats
1297
1312
1298
1313
def make_prediction (self ,loss = "squared" ):
1299
1314
"""Predict a new data point under the given criterion.
1300
1315
1301
1316
Parameters
1302
1317
----------
1303
1318
loss : str, optional
1304
- Loss function underlying the Bayes risk function, by default \" xxx \" .
1305
- This function supports \" xxx \" and \" xxx \" .
1319
+ Loss function underlying the Bayes risk function, by default \" squared \" .
1320
+ This function supports \" squared \" and \" 0-1 \" .
1306
1321
1307
1322
Returns
1308
1323
-------
1309
- predicted_value : {float, numpy.ndarray}
1324
+ Predicted_value : {float, numpy.ndarray}
1310
1325
The predicted value under the given loss function.
1311
1326
"""
1312
- pass
1327
+ if loss == "squared" :
1328
+ return np .sum ((self .gamma_vecs [- 1 ] @ self .p_a_mat )[:,np .newaxis ] * self .p_mu_vecs , axis = 0 )
1329
+ elif loss == "0-1" :
1330
+ tmp_max = - 1.0
1331
+ tmp_argmax = np .empty ([self .c_degree ])
1332
+ for k in range (self .c_num_classes ):
1333
+ val = ss_multivariate_t .pdf (x = self .p_mu_vecs [k ],
1334
+ loc = self .p_mu_vecs [k ],
1335
+ shape = np .linalg .inv (self .p_lambda_mats [k ]),
1336
+ df = self .p_nus [k ])
1337
+ if val * (self .gamma_vecs [- 1 ] @ self .p_a_mat )[k ] > tmp_max :
1338
+ tmp_argmax [:] = self .p_mu_vecs [k ]
1339
+ tmp_max = val * (self .gamma_vecs [- 1 ] @ self .p_a_mat )[k ]
1340
+ return tmp_argmax
1341
+ else :
1342
+ raise (CriteriaError (f"loss={ loss } is unsupported. "
1343
+ + "This function supports \" squared\" and \" 0-1\" ." ))
1313
1344
1314
- def pred_and_update (self ,x ,loss = "squared" ):
1345
+ def pred_and_update (
1346
+ self ,
1347
+ x ,
1348
+ loss = "squared" ,
1349
+ max_itr = 100 ,
1350
+ num_init = 10 ,
1351
+ tolerance = 1.0E-8 ,
1352
+ init_type = 'random_responsibility'
1353
+ ):
1315
1354
"""Predict a new data point and update the posterior sequentially.
1316
1355
1317
1356
h0_params will be overwritten by current hn_params
1318
1357
before updating hn_params by x.
1319
1358
1320
1359
Parameters
1321
1360
----------
1361
+ x : numpy.ndarray
1362
+ It must be a `c_degree`-dimensional vector
1363
+ loss : str, optional
1364
+ Loss function underlying the Bayes risk function, by default \" squared\" .
1365
+ This function supports \" squared\" and \" 0-1\" .
1366
+ max_itr : int, optional
1367
+ maximum number of iterations, by default 100
1368
+ num_init : int, optional
1369
+ number of initializations, by default 10
1370
+ tolerance : float, optional
1371
+ convergence croterion of variational lower bound, by default 1.0E-8
1372
+ init_type : str, optional
1373
+ type of initialization, by default 'random_responsibility'
1374
+ * 'random_responsibility': randomly assign responsibility to r_vecs
1375
+ * 'subsampling': for each latent class, extract a subsample whose size is int(np.sqrt(x.shape[0])).
1376
+ and use its mean and covariance matrix as an initial values of hn_m_vecs and hn_lambda_mats.
1322
1377
1323
1378
Returns
1324
1379
-------
1325
1380
predicted_value : {float, numpy.ndarray}
1326
1381
The predicted value under the given loss function.
1327
1382
"""
1328
- pass
1383
+ _check .float_vec (x ,'x' ,DataFormatError )
1384
+ if x .shape != (self .c_degree ,):
1385
+ raise (DataFormatError (f"x must be a 1-dimensional float array whose size is c_degree: { self .c_degree } ." ))
1386
+ self .calc_pred_dist ()
1387
+ prediction = self .make_prediction (loss = loss )
1388
+ self .overwrite_h0_params ()
1389
+ self .update_posterior (
1390
+ x [np .newaxis ,:],
1391
+ max_itr = max_itr ,
1392
+ num_init = num_init ,
1393
+ tolerance = tolerance ,
1394
+ init_type = init_type
1395
+ )
1396
+ return prediction
1329
1397
1330
1398
def estimate_latent_vars (self ,x ,loss = '' ):
1331
1399
"""Estimate latent variables under the given criterion.
0 commit comments