Skip to content

Commit 71f839c

Browse files
Update docs
1 parent 27e1320 commit 71f839c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+283
-105
lines changed

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,6 @@ var/
1717
*.egg-info/
1818
.installed.cfg
1919
*.egg
20-
docs_build/
20+
docs_build/
21+
docs/html/
22+
docs/doctrees/

adapt/base.py

Lines changed: 82 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,12 @@
5959
"""
6060
)
6161

62+
base_doc_Xt = """
63+
Xt : numpy array (default=None)
64+
Target input data.
65+
"""
6266

63-
base_doc_1 = """
67+
base_doc_Xt_yt = """
6468
Xt : numpy array (default=None)
6569
Target input data.
6670
@@ -86,8 +90,53 @@
8690
``_get_legal_params(params)``.
8791
"""
8892

93+
base_doc_other_params="""
94+
Yields
95+
------
96+
optimizer : str or instance of tf.keras.optimizers (default="rmsprop")
97+
Optimizer for the task. It should be an
98+
instance of tf.keras.optimizers as:
99+
``tf.keras.optimizers.SGD(0.001)`` or
100+
``tf.keras.optimizers.Adam(lr=0.001, beta_1=0.5)``.
101+
A string can also be given as ``"adam"``.
102+
Default optimizer is ``rmsprop``.
103+
104+
loss : str or instance of tf.keras.losses (default="mse")
105+
Loss for the task. It should be an
106+
instance of tf.keras.losses as:
107+
``tf.keras.losses.MeanSquaredError()`` or
108+
``tf.keras.losses.CategoricalCrossentropy()``.
109+
A string can also be given as ``"mse"`` or
110+
``categorical_crossentropy``.
111+
Default loss is ``mse``.
112+
113+
metrics : list of str or list of tf.keras.metrics.Metric instance
114+
List of metrics to be evaluated by the model during training
115+
and testing. Typically you will use ``metrics=['accuracy']``.
116+
117+
optimizer_enc : str or instance of tf.keras.optimizers
118+
If the Adapt Model has an ``encoder`` attribute,
119+
a specific optimizer for the ``encoder`` network can
120+
be given. Typically, this parameter can be used to
121+
give a smaller learning rate to the encoder.
122+
If not specified, ``optimizer_enc=optimizer``.
123+
124+
optimizer_disc : str or instance of tf.keras.optimizers
125+
If the Adapt Model has a ``discriminator`` attribute,
126+
a specific optimizer for the ``discriminator`` network can
127+
be given. If not specified, ``optimizer_disc=optimizer``.
128+
129+
kwargs : key, value arguments
130+
Any arguments of the ``fit`` method from the Tensorflow
131+
Model can be given, as ``epochs`` and ``batch_size``.
132+
Specific arguments from ``optimizer`` can also be given
133+
as ``learning_rate`` or ``beta_1`` for ``Adam``.
134+
This allows to perform ``GridSearchCV`` from scikit-learn
135+
on these arguments.
136+
"""
137+
89138

90-
def make_insert_doc(estimators=["estimator"]):
139+
def make_insert_doc(estimators=["estimator"], supervised=False):
91140
"""
92141
Abstract for adding common parameters
93142
to the docstring
@@ -101,8 +150,15 @@ def make_insert_doc(estimators=["estimator"]):
101150
-------
102151
func
103152
"""
104-
105153
def insert_base_doc(func):
154+
# Change signature of Deep Model
155+
if "BaseAdaptDeep" in func.__bases__[0].__name__:
156+
sign = inspect.signature(func.__init__)
157+
parameters = dict(sign.parameters)
158+
parameters.pop("self", None)
159+
sign = sign.replace(parameters=list(parameters.values()))
160+
func.__signature__ = sign
161+
106162
doc = func.__doc__
107163
if "Parameters" in doc:
108164
splits = doc.split("Parameters")
@@ -129,11 +185,21 @@ def insert_base_doc(func):
129185
doc_est = ""
130186
for est in estimators:
131187
doc_est += base_doc_est[est]
188+
189+
if supervised:
190+
doc_1 = base_doc_Xt_yt
191+
else:
192+
doc_1 = base_doc_Xt
193+
194+
doc_2 = base_doc_2
195+
if "BaseAdaptDeep" in func.__bases__[0].__name__:
196+
doc_2 += base_doc_other_params
197+
132198
splits[1] = (
133199
splits[1][:i-1]+
134-
doc_est+base_doc_1+
200+
doc_est+doc_1+
135201
splits[1][i-1:j+1]+
136-
base_doc_2+
202+
doc_2+
137203
splits[1][j+1:]
138204
)
139205
new_doc = splits[0]+"Parameters"+splits[1]
@@ -362,11 +428,11 @@ def fit(self, X, y, Xt=None, yt=None, domains=None, **fit_params):
362428
363429
For feature-based models, the transformation of the
364430
input features ``Xs`` and ``Xt`` is first fitted. In a second
365-
stage, the ``estimator`` is fitted on the transformed features.
431+
stage, the ``estimator_`` is fitted on the transformed features.
366432
367433
For instance-based models, source importance weights are
368434
first learned based on ``Xs, ys`` and ``Xt``. In a second
369-
stage, the ``estimator`` is fitted on ``Xs, ys`` with the learned
435+
stage, the ``estimator_`` is fitted on ``Xs, ys`` with the learned
370436
importance weights.
371437
372438
Parameters
@@ -382,8 +448,9 @@ def fit(self, X, y, Xt=None, yt=None, domains=None, **fit_params):
382448
given in `init` is used.
383449
384450
yt : array (default=None)
385-
Target input data. If None, the `Xt` argument
386-
given in `init` is used.
451+
Target input data. Only needed for supervised
452+
and semi-supervised Adapt model.
453+
If None, the `yt` argument given in `init` is used.
387454
388455
domains : array (default=None)
389456
Vector giving the domain for each source
@@ -851,6 +918,9 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
851918
"""
852919
Fit Model. Note that ``fit`` does not reset
853920
the model but extend the training.
921+
922+
Notice also that the compile method will be called
923+
if the model has not been compiled yet.
854924
855925
Parameters
856926
----------
@@ -865,8 +935,9 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
865935
given in `init` is used.
866936
867937
yt : array (default=None)
868-
Target input data. If None, the `Xt` argument
869-
given in `init` is used.
938+
Target input data. Only needed for supervised
939+
and semi-supervised Adapt model.
940+
If None, the `yt` argument given in `init` is used.
870941
871942
domains : array (default=None)
872943
Vector giving the domain for each source

adapt/feature_based/_adda.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,6 @@ def __init__(self,
145145
task=None,
146146
discriminator=None,
147147
Xt=None,
148-
yt=None,
149148
pretrain=True,
150149
tol=0.001,
151150
copy=True,

adapt/feature_based/_ccsa.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,25 +8,27 @@
88
EPS = np.finfo(np.float32).eps
99

1010
def pairwise_y(X, Y):
11-
batch_size = tf.shape(X)[0]
11+
batch_size_x = tf.shape(X)[0]
12+
batch_size_y = tf.shape(Y)[0]
1213
dim = tf.reduce_prod(tf.shape(X)[1:])
13-
X = tf.reshape(X, (batch_size, dim))
14-
Y = tf.reshape(Y, (batch_size, dim))
15-
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
16-
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
14+
X = tf.reshape(X, (batch_size_x, dim))
15+
Y = tf.reshape(Y, (batch_size_y, dim))
16+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size_y])
17+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size_x])
1718
return tf.reduce_sum(tf.abs(X-tf.transpose(Y)), 1)/2
1819

1920
def pairwise_X(X, Y):
20-
batch_size = tf.shape(X)[0]
21+
batch_size_x = tf.shape(X)[0]
22+
batch_size_y = tf.shape(Y)[0]
2123
dim = tf.reduce_prod(tf.shape(X)[1:])
22-
X = tf.reshape(X, (batch_size, dim))
23-
Y = tf.reshape(Y, (batch_size, dim))
24-
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
25-
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
24+
X = tf.reshape(X, (batch_size_x, dim))
25+
Y = tf.reshape(Y, (batch_size_y, dim))
26+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size_y])
27+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size_x])
2628
return tf.reduce_sum(tf.square(X-tf.transpose(Y)), 1)
2729

2830

29-
@make_insert_doc(["encoder", "task"])
31+
@make_insert_doc(["encoder", "task"], supervised=True)
3032
class CCSA(BaseAdaptDeep):
3133
"""
3234
CCSA :

adapt/feature_based/_coral.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,6 @@ class CORAL(BaseAdaptEstimator):
111111
def __init__(self,
112112
estimator=None,
113113
Xt=None,
114-
yt=None,
115114
lambda_=1e-5,
116115
copy=True,
117116
verbose=1,

adapt/feature_based/_dann.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,8 @@ class DANN(BaseAdaptDeep):
5858
5959
Parameters
6060
----------
61-
lambda_ : float (default=0.1)
61+
lambda_ : float or tensorflow Variable (default=0.1)
6262
Trade-off parameter.
63-
64-
gamma : float (default=10.0)
65-
Increase rate parameter.
66-
Give the increase rate of the trade-off parameter if
67-
``lambda_`` is set to ``None``.
6863
6964
Attributes
7065
----------

adapt/feature_based/_deepcoral.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ class DeepCORAL(BaseAdaptDeep):
5757
5858
Parameters
5959
----------
60-
lambda_ : float (default=1.)
60+
lambda_ : float or tensorflow Variable (default=1.)
6161
Trade-off parameter.
6262
6363
match_mean : bool (default=False)
@@ -116,7 +116,6 @@ def __init__(self,
116116
encoder=None,
117117
task=None,
118118
Xt=None,
119-
yt=None,
120119
lambda_=1.,
121120
match_mean=False,
122121
copy=True,

adapt/feature_based/_fe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from adapt.utils import check_arrays
1212

1313

14-
@make_insert_doc()
14+
@make_insert_doc(supervised=True)
1515
class FE(BaseAdaptEstimator):
1616
"""
1717
FE: Frustratingly Easy Domain Adaptation.

adapt/feature_based/_fmmd.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,13 @@
88

99

1010
def pairwise_X(X, Y):
11-
batch_size = tf.shape(X)[0]
11+
batch_size_x = tf.shape(X)[0]
12+
batch_size_y = tf.shape(Y)[0]
1213
dim = tf.reduce_prod(tf.shape(X)[1:])
13-
X = tf.reshape(X, (batch_size, dim))
14-
Y = tf.reshape(Y, (batch_size, dim))
15-
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
16-
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
14+
X = tf.reshape(X, (batch_size_x, dim))
15+
Y = tf.reshape(Y, (batch_size_y, dim))
16+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size_y])
17+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size_x])
1718
return tf.reduce_sum(tf.square(X-tf.transpose(Y)), 1)
1819

1920

@@ -110,8 +111,17 @@ class fMMD(BaseAdaptEstimator):
110111
Choose the kernel between
111112
['linear', 'rbf', 'poly'].
112113
The kernels are computed as follows:
113-
``rbf(X, Y) = exp(gamma * ||X-Y||^2)``
114-
``poly(X, Y) = (gamma * <X, Y> + coef)^degree``
114+
- kernel = linear::
115+
116+
k(X, Y) = <X, Y>
117+
118+
- kernel = rbf::
119+
120+
k(X, Y) = exp(gamma * ||X-Y||^2)
121+
122+
- kernel = poly::
123+
124+
poly(X, Y) = (gamma * <X, Y> + coef)^degree
115125
116126
gamma : float (default=1.)
117127
Gamma multiplier for the 'rbf' and
@@ -143,7 +153,6 @@ class fMMD(BaseAdaptEstimator):
143153
def __init__(self,
144154
estimator=None,
145155
Xt=None,
146-
yt=None,
147156
threshold="auto",
148157
kernel="linear",
149158
gamma=1.,

adapt/feature_based/_mcd.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ def __init__(self,
7676
encoder=None,
7777
task=None,
7878
Xt=None,
79-
yt=None,
8079
pretrain=True,
8180
n_steps=1,
8281
copy=True,

0 commit comments

Comments
 (0)