Skip to content

Commit a0ea711

Browse files
Add tests
1 parent 008c6d8 commit a0ea711

File tree

4 files changed

+59
-29
lines changed

4 files changed

+59
-29
lines changed

adapt/parameter_based/_regular.py

Lines changed: 3 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -17,31 +17,6 @@
1717
check_network)
1818

1919

20-
def get_zeros_network(name=None):
21-
"""
22-
Return a tensorflow Model of two hidden layers
23-
with 10 neurons each and relu activations. The
24-
last layer is composed of one neuron with linear
25-
activation.
26-
27-
Returns
28-
-------
29-
tensorflow Model
30-
"""
31-
model = Sequential(name=name)
32-
model.add(Flatten())
33-
model.add(Dense(10, activation="relu",
34-
kernel_initializer="zeros",
35-
bias_initializer="zeros"))
36-
model.add(Dense(10, activation="relu",
37-
kernel_initializer="zeros",
38-
bias_initializer="zeros"))
39-
model.add(Dense(1, activation=None,
40-
kernel_initializer="zeros",
41-
bias_initializer="zeros"))
42-
return model
43-
44-
4520
@make_insert_doc(supervised=True)
4621
class RegularTransferLR(BaseAdaptEstimator):
4722
"""
@@ -183,20 +158,20 @@ def fit(self, Xt=None, yt=None, **fit_params):
183158
yt_ndim_below_one_ = True
184159

185160
if beta_src.ndim <= 1:
186-
beta_src.reshape(1, -1)
161+
beta_src = beta_src.reshape(1, -1)
187162

188163
if beta_src.shape[0] != yt.shape[1]:
189164
raise ValueError("The number of features of `yt`"
190165
" does not match the number of coefs in 'estimator', "
191-
"expected %i, got %i"(beta_src.shape[0], yt.shape[1]))
166+
"expected %i, got %i"%(beta_src.shape[0], yt.shape[1]))
192167

193168
if beta_src.shape[1] != Xt.shape[1]:
194169
beta_shape = beta_src.shape[1]; Xt_shape = Xt.shape[1]
195170
if self.estimator_.fit_intercept:
196171
beta_shape -= 1; Xt_shape -= 1
197172
raise ValueError("The number of features of `Xt`"
198173
" does not match the number of coefs in 'estimator', "
199-
"expected %i, got %i"(beta_shape, Xt_shape))
174+
"expected %i, got %i"%(beta_shape, Xt_shape))
200175

201176
beta_tgt = []
202177
for i in range(yt.shape[1]):

tests/test_finetuning.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,3 +56,9 @@ def test_finetune():
5656

5757
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
5858
assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() == 0
59+
60+
61+
def test_finetune_pretrain():
62+
model = FineTuning(encoder=encoder, task=task, pretrain=True, pretrain__epochs=2,
63+
loss="bce", optimizer="adam", random_state=0)
64+
model.fit(Xs, ys, epochs=1, verbose=0)

tests/test_fmmd.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import numpy as np
2+
import tensorflow as tf
23

34
from adapt.feature_based import fMMD
5+
from adapt.feature_based._fmmd import _get_optim_function
46

57
np.random.seed(0)
68
n = 50
@@ -24,4 +26,18 @@ def test_fmmd():
2426

2527
fmmd.set_params(kernel="poly", degree=2, gamma=0.1)
2628
fmmd.fit_transform(Xs, Xt);
27-
assert fmmd.features_scores_[-2:].sum() > 10 * fmmd.features_scores_[:-2].sum()
29+
assert fmmd.features_scores_[-2:].sum() > 10 * fmmd.features_scores_[:-2].sum()
30+
31+
32+
def test_kernel_fct():
33+
fct = _get_optim_function(Xs, Xt, kernel="linear")
34+
fct(tf.identity(np.ones(6)))
35+
36+
fct = _get_optim_function(Xs, Xt, kernel="rbf")
37+
fct(tf.identity(np.ones(6)))
38+
39+
fct = _get_optim_function(Xs, Xt, kernel="poly")
40+
fct(tf.identity(np.ones(6)))
41+
42+
43+

tests/test_regular.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
Test functions for regular module.
33
"""
44

5+
import pytest
56
import numpy as np
67
from sklearn.linear_model import LinearRegression, LogisticRegression
78
import tensorflow as tf
@@ -81,6 +82,26 @@ def test_regularlr_multioutput():
8182
assert np.all(model.coef_.shape == (2, 5))
8283
assert np.all(model.intercept_.shape == (2,))
8384
assert model.score(X, y) > 0.9
85+
86+
87+
def test_regularlr_error():
88+
np.random.seed(0)
89+
Xs = np.random.randn(100, 5)
90+
Xt = np.random.randn(100, 5)
91+
ys = np.random.randn(100)
92+
yt = np.random.randn(100)
93+
lr = LinearRegression()
94+
lr.fit(Xs, ys)
95+
model = RegularTransferLR(lr, lambda_=1.)
96+
model.fit(Xt, yt)
97+
98+
with pytest.raises(ValueError) as excinfo:
99+
model.fit(np.random.randn(100, 4), yt)
100+
assert "expected 5, got 4" in str(excinfo.value)
101+
102+
with pytest.raises(ValueError) as excinfo:
103+
model.fit(Xt, np.random.randn(100, 2))
104+
assert "expected 1, got 2" in str(excinfo.value)
84105

85106

86107
def test_regularlc_fit():
@@ -135,3 +156,15 @@ def test_regularnn_fit():
135156
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
136157
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
137158

159+
160+
def test_regularnn_reg():
161+
tf.random.set_seed(0)
162+
np.random.seed(0)
163+
network = _get_network()
164+
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
165+
model = RegularTransferNN(network, regularizer="l1")
166+
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
167+
168+
with pytest.raises(ValueError) as excinfo:
169+
model = RegularTransferNN(network, regularizer="l3")
170+
assert "l1' or 'l2', got, l3" in str(excinfo.value)

0 commit comments

Comments
 (0)