Skip to content

Commit 0693d5e

Browse files
update docs
1 parent 0c7632a commit 0693d5e

File tree

173 files changed

+1048
-866
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

173 files changed

+1048
-866
lines changed

adapt/base.py

Lines changed: 234 additions & 56 deletions
Large diffs are not rendered by default.

adapt/feature_based/_adda.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,11 @@ class ADDA(BaseAdaptDeep):
123123
>>> ys = 0.2 * Xs[:, 0]
124124
>>> yt = 0.2 * Xt[:, 0]
125125
>>> model = ADDA(random_state=0)
126-
>>> model.fit(Xs, ys, Xt, yt, epochs=100, verbose=0)
127-
>>> model.history_src_["task_t"][-1]
128-
0.0234...
129-
>>> model.history_["task_t"][-1]
130-
0.0009...
126+
>>> model.fit(Xs, ys, Xt, epochs=100, verbose=0)
127+
>>> np.abs(model.predict_task(Xt, domain="src").ravel() - yt).mean()
128+
0.1531...
129+
>>> np.abs(model.predict_task(Xt, domain="tgt").ravel() - yt).mean()
130+
0.0227...
131131
132132
See also
133133
--------
@@ -242,8 +242,6 @@ def train_step(self, data):
242242
disc_loss += sum(self.discriminator_.losses)
243243
enc_loss += sum(self.encoder_.losses)
244244

245-
print(task_loss.shape, enc_loss.shape, disc_loss.shape)
246-
247245
# Compute gradients
248246
trainable_vars_task = self.task_.trainable_variables
249247
trainable_vars_enc = self.encoder_.trainable_variables

adapt/feature_based/_cdan.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,10 @@ class CDAN(BaseAdaptDeep):
4343
.. math::
4444
4545
\min_{\phi, F} & \; \mathcal{L}_{task}(F(\phi(X_S)), y_S) -
46-
\lambda \\left( \log(1 - D(\phi(X_S) \\bigotimes F(X_S)) + \\\\
47-
\log(D(\phi(X_T) \\bigotimes F(X_T)) \\right) \\\\
48-
\max_{D} & \; \log(1 - D(\phi(X_S) \\bigotimes F(X_S)) + \\\\
49-
\log(D(\phi(X_T) \\bigotimes F(X_T))
46+
\lambda \\left( \log(1 - D(\phi(X_S) \\otimes F(X_S)) +
47+
\log(D(\phi(X_T) \\otimes F(X_T)) \\right) \\\\
48+
\max_{D} & \; \log(1 - D(\phi(X_S) \\otimes F(X_S)) +
49+
\log(D(\phi(X_T) \\otimes F(X_T))
5050
5151
Where:
5252
@@ -55,7 +55,7 @@ class CDAN(BaseAdaptDeep):
5555
- :math:`\phi, F, D` are respectively the **encoder**, the **task**
5656
and the **discriminator** networks
5757
- :math:`\lambda` is the trade-off parameter.
58-
- :math:`\phi(X_S) \\bigotimes F(X_S)` is the multilinear map between
58+
- :math:`\phi(X_S) \\otimes F(X_S)` is the multilinear map between
5959
the encoded sources and the task predictions.
6060
6161
In CDAN+E, an entropy regularization is added to prioritize the
@@ -65,15 +65,15 @@ class CDAN(BaseAdaptDeep):
6565
.. math::
6666
6767
\min_{\phi, F} & \; \mathcal{L}_{task}(F(\phi(X_S)), y_S) -
68-
\lambda \\left( \log(1 - W_S D(\phi(X_S) \\bigotimes F(X_S)) + \\\\
69-
W_T \log(D(\phi(X_T) \\bigotimes F(X_T)) \\right) \\\\
70-
\max_{D} & \; \log(1 - W_S D(\phi(X_S) \\bigotimes F(X_S)) + \\\\
71-
W_T \log(D(\phi(X_T) \\bigotimes F(X_T))
68+
\lambda \\left( \log(1 - W_S D(\phi(X_S) \\otimes F(X_S)) +
69+
W_T \log(D(\phi(X_T) \\otimes F(X_T)) \\right) \\\\
70+
\max_{D} & \; \log(1 - W_S D(\phi(X_S) \\otimes F(X_S)) +
71+
W_T \log(D(\phi(X_T) \\otimes F(X_T))
7272
7373
Where:
7474
75-
- :math:`W_S = 1+\exp{-\\text{entropy}(F(X_S))}`
76-
- :math:`\\text{entropy}(F(X_S)) = - \sum_{i < C} F(X_S)_i \log(F(X_S)_i)`
75+
- :math:`W_S = 1+\exp^{-\\text{ent}(F(X_S))}`
76+
- :math:`\\text{ent}(F(X_S)) = - \sum_{i < C} F(X_S)_i \log(F(X_S)_i)`
7777
with :math:`C` the number of classes.
7878
7979
.. figure:: ../_static/images/cdan.png

adapt/feature_based/_coral.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@ class CORAL(BaseAdaptEstimator):
4141
and the features transformation can be computed through this
4242
four steps algorithm:
4343
44-
- :math:`C_S = \\lambda Cov(X_S) + I_p`
45-
- :math:`C_T = \\lambda Cov(X_T) + I_p`
44+
- :math:`C_S = Cov(X_S) + \\lambda I_p`
45+
- :math:`C_T = Cov(X_T) + \\lambda I_p`
4646
- :math:`X_S = X_S C_S^{-\\frac{1}{2}}`
4747
- :math:`X_S^{enc} = X_S C_T^{\\frac{1}{2}}`
4848
@@ -80,22 +80,22 @@ class CORAL(BaseAdaptEstimator):
8080
>>> yt = np.zeros(100)
8181
>>> ys[Xs[:, 1]>0] = 1
8282
>>> yt[(Xt[:, 1]-0.5*Xt[:, 0])>0] = 1
83-
>>> model = CORAL(lambda_=0.)
83+
>>> model = CORAL(lambda_=1000.)
8484
>>> model.fit(Xs, ys, Xt);
8585
Covariance Matrix alignement...
8686
Previous covariance difference: 0.258273
87-
New covariance difference: 0.258273
87+
New covariance difference: 0.258072
8888
Fit estimator...
8989
>>> model.estimator_.score(Xt, yt)
9090
0.5750...
91-
>>> model = CORAL(lambda_=100.)
91+
>>> model = CORAL(lambda_=0.)
9292
>>> model.fit(Xs, ys, Xt);
9393
Covariance Matrix alignement...
9494
Previous covariance difference: 0.258273
95-
New covariance difference: 0.040564
95+
New covariance difference: 0.000000
9696
Fit estimator...
9797
>>> model.estimator_.score(Xt, yt)
98-
0.5992...
98+
0.5717...
9999
100100
See also
101101
--------

adapt/feature_based/_dann.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,13 @@ class DANN(BaseAdaptDeep):
9696
>>> ys = 0.2 * Xs[:, 0]
9797
>>> yt = 0.2 * Xt[:, 0]
9898
>>> model = DANN(lambda_=0., random_state=0)
99-
>>> model.fit(Xs, ys, Xt, yt, epochs=100, verbose=0)
100-
>>> model.history_["task_t"][-1]
101-
0.0240...
99+
>>> model.fit(Xs, ys, Xt, epochs=100, verbose=0)
100+
>>> model.score_estimator(Xt, yt)
101+
0.0231...
102102
>>> model = DANN(lambda_=0.1, random_state=0)
103-
>>> model.fit(Xs, ys, Xt, yt, epochs=100, verbose=0)
104-
>>> model.history_["task_t"][-1]
105-
0.0022...
103+
>>> model.fit(Xs, ys, Xt, epochs=100, verbose=0)
104+
>>> model.score_estimator(Xt, yt)
105+
0.0010...
106106
107107
See also
108108
--------
@@ -185,7 +185,6 @@ def train_step(self, data):
185185
disc_loss += sum(self.discriminator_.losses)
186186
enc_loss += sum(self.encoder_.losses)
187187

188-
print(task_loss.shape, enc_loss.shape, disc_loss.shape)
189188

190189
# Compute gradients
191190
trainable_vars_task = self.task_.trainable_variables
@@ -209,7 +208,6 @@ def train_step(self, data):
209208
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
210209
logs.update({"disc_loss": disc_loss})
211210
logs.update(disc_metrics)
212-
logs.update({"lambda": _lambda_})
213211
return logs
214212

215213

adapt/feature_based/_deepcoral.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import tensorflow as tf
77

88
from adapt.base import BaseAdaptDeep, make_insert_doc
9-
from adapt.utils import check_network
9+
from adapt.utils import check_network, get_default_encoder, get_default_task
1010

1111
EPS = np.finfo(np.float32).eps
1212

@@ -93,13 +93,13 @@ class DeepCORAL(BaseAdaptDeep):
9393
>>> ys[Xs[:, 1]>0] = 1
9494
>>> yt[(Xt[:, 1]-0.5*Xt[:, 0])>0] = 1
9595
>>> model = DeepCORAL(lambda_=0., random_state=0)
96-
>>> model.fit(Xs, ys, Xt, yt, epochs=500, batch_size=100, verbose=0)
97-
>>> model.history_["task_t"][-1]
98-
1.30188e-05
96+
>>> model.fit(Xs, ys, Xt, epochs=500, batch_size=100, verbose=0)
97+
>>> model.score_estimator(Xt, yt)
98+
0.0574...
9999
>>> model = DeepCORAL(lambda_=1., random_state=0)
100-
>>> model.fit(Xs, ys, Xt, yt, epochs=500, batch_size=100, verbose=0)
101-
>>> model.history_["task_t"][-1]
102-
5.4704474e-06
100+
>>> model.fit(Xs, ys, Xt, epochs=500, batch_size=100, verbose=0)
101+
>>> model.score_estimator(Xt, yt)
102+
0.0649...
103103
104104
See also
105105
--------
@@ -200,8 +200,6 @@ def train_step(self, data):
200200

201201
loss += sum(self.task_.losses) + sum(self.encoder_.losses)
202202

203-
print(cov_src.shape, cov_tgt.shape, disc_loss.shape)
204-
205203
# Compute gradients
206204
trainable_vars = self.task_.trainable_variables + self.encoder_.trainable_variables
207205

adapt/feature_based/_fe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,9 @@ class FE(BaseAdaptEstimator):
8080
Previous shape: (100, 1)
8181
New shape: (100, 3)
8282
Fit estimator...
83-
>>> np.abs(model.predict(Xt, "src") - yt).mean()
83+
>>> np.abs(model.predict(Xt, domain="src") - yt).mean()
8484
0.9846...
85-
>>> np.abs(model.predict(Xt, "tgt") - yt).mean()
85+
>>> np.abs(model.predict(Xt, domain="tgt") - yt).mean()
8686
0.1010...
8787
8888
References

adapt/feature_based/_mcd.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,6 @@ def train_step(self, data):
174174
disc_loss += sum(self.discriminator_.losses)
175175
enc_loss += sum(self.encoder_.losses)
176176

177-
print(discrepancy.shape, enc_loss.shape, disc_loss.shape)
178177

179178
# Compute gradients
180179
trainable_vars_task = self.task_.trainable_variables

adapt/feature_based/_mdd.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ def train_step(self, data):
120120

121121
# Compute Disc
122122
if name == "categorical_crossentropy":
123-
print("ok")
124123
argmax_src = tf.one_hot(tf.math.argmax(ys_pred, -1),
125124
tf.shape(ys_pred)[1])
126125
argmax_tgt = tf.one_hot(tf.math.argmax(yt_pred, -1),
@@ -147,7 +146,6 @@ def train_step(self, data):
147146
disc_loss += sum(self.discriminator_.losses)
148147
enc_loss += sum(self.encoder_.losses)
149148

150-
print(task_loss.shape, enc_loss.shape, disc_loss.shape)
151149

152150
# Compute gradients
153151
trainable_vars_task = self.task_.trainable_variables

adapt/feature_based/_wdgrl.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,12 +90,12 @@ class WDGRL(BaseAdaptDeep):
9090
>>> yt = 0.2 * Xt[:, 0]
9191
>>> model = WDGRL(lambda_=0., random_state=0)
9292
>>> model.fit(Xs, ys, Xt, yt, epochs=100, verbose=0)
93-
>>> model.history_["task_t"][-1]
94-
0.0223...
93+
>>> model.score_estimator(Xt, yt)
94+
0.0231...
9595
>>> model = WDGRL(lambda_=1, random_state=0)
9696
>>> model.fit(Xs, ys, Xt, yt, epochs=100, verbose=0)
97-
>>> model.history_["task_t"][-1]
98-
0.0044...
97+
>>> model.score_estimator(Xt, yt)
98+
0.0014...
9999
100100
See also
101101
--------
@@ -181,8 +181,6 @@ def train_step(self, data):
181181
disc_loss += sum(self.discriminator_.losses)
182182
enc_loss += sum(self.encoder_.losses)
183183

184-
print(task_loss.shape, enc_loss.shape, disc_loss.shape)
185-
186184
# Compute gradients
187185
trainable_vars_task = self.task_.trainable_variables
188186
trainable_vars_enc = self.encoder_.trainable_variables

0 commit comments

Comments
 (0)