Skip to content

Commit b54dfdd

Browse files
Add 6 new methods
1 parent e919420 commit b54dfdd

File tree

17 files changed

+1072
-3
lines changed

17 files changed

+1072
-3
lines changed

adapt/base.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,9 @@ def insert_base_doc(func):
141141
new_doc = doc
142142

143143
func.__doc__ = new_doc
144+
145+
if str(inspect.signature(func)) == "(*args, **kwargs)":
146+
func.__signature__ = inspect.signature(func.__init__)
144147

145148
return func
146149
return insert_base_doc

adapt/feature_based/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,5 +11,9 @@
1111
from ._mdd import MDD
1212
from ._wdgrl import WDGRL
1313
from ._cdan import CDAN
14+
from ._sa import SA
15+
from ._fmmd import fMMD
16+
from ._ccsa import CCSA
1417

15-
__all__ = ["FE", "CORAL", "DeepCORAL", "ADDA", "DANN", "MCD", "MDD", "WDGRL", "CDAN"]
18+
__all__ = ["FE", "CORAL", "DeepCORAL", "ADDA", "DANN",
19+
"MCD", "MDD", "WDGRL", "CDAN", "SA", "fMMD", "CCSA"]

adapt/feature_based/_ccsa.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
4+
from adapt.base import BaseAdaptDeep, make_insert_doc
5+
from adapt.utils import set_random_seed
6+
7+
8+
EPS = np.finfo(np.float32).eps
9+
10+
def pairwise_y(X, Y):
11+
batch_size = tf.shape(X)[0]
12+
dim = tf.reduce_prod(tf.shape(X)[1:])
13+
X = tf.reshape(X, (batch_size, dim))
14+
Y = tf.reshape(Y, (batch_size, dim))
15+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
16+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
17+
return tf.reduce_sum(tf.abs(X-tf.transpose(Y)), 1)/2
18+
19+
def pairwise_X(X, Y):
20+
batch_size = tf.shape(X)[0]
21+
dim = tf.reduce_prod(tf.shape(X)[1:])
22+
X = tf.reshape(X, (batch_size, dim))
23+
Y = tf.reshape(Y, (batch_size, dim))
24+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
25+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
26+
return tf.reduce_sum(tf.square(X-tf.transpose(Y)), 1)
27+
28+
29+
@make_insert_doc(["encoder", "task"])
30+
class CCSA(BaseAdaptDeep):
31+
"""
32+
CCSA :
33+
34+
Parameters
35+
----------
36+
margin : float (default=1.)
37+
Margin for the inter-class separation.
38+
The higher the margin, the more the classes
39+
will be separated in the encoded space.
40+
41+
gamma : float (default=0.5)
42+
Trade-off parameter. ``0<gamma<1``
43+
If ``gamma`` is close to 1 more
44+
importance are given to the task. If
45+
gamma is close to 0, more importance
46+
are given to the contrastive loss.
47+
48+
Attributes
49+
----------
50+
encoder_ : tensorflow Model
51+
encoder network.
52+
53+
task_ : tensorflow Model
54+
task network.
55+
56+
history_ : dict
57+
history of the losses and metrics across the epochs.
58+
If ``yt`` is given in ``fit`` method, target metrics
59+
and losses are recorded too.
60+
"""
61+
62+
def __init__(self,
63+
encoder=None,
64+
task=None,
65+
Xt=None,
66+
yt=None,
67+
margin=1.,
68+
gamma=0.5,
69+
copy=True,
70+
verbose=1,
71+
random_state=None,
72+
**params):
73+
74+
names = self._get_param_names()
75+
kwargs = {k: v for k, v in locals().items() if k in names}
76+
kwargs.update(params)
77+
super().__init__(**kwargs)
78+
79+
80+
def train_step(self, data):
81+
# Unpack the data.
82+
Xs, Xt, ys, yt = self._unpack_data(data)
83+
84+
# loss
85+
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape:
86+
# Forward pass
87+
Xs_enc = self.encoder_(Xs, training=True)
88+
ys_pred = self.task_(Xs_enc, training=True)
89+
90+
Xt_enc = self.encoder_(Xt, training=True)
91+
92+
dist_y = pairwise_y(ys, yt)
93+
dist_X = pairwise_X(Xs_enc, Xt_enc)
94+
95+
contrastive_loss = tf.reduce_sum(dist_y * tf.maximum(0., self.margin - dist_X), 1) / (tf.reduce_sum(dist_y, 1) + EPS)
96+
contrastive_loss += tf.reduce_sum((1-dist_y) * dist_X, 1) / (tf.reduce_sum(1-dist_y, 1) + EPS)
97+
contrastive_loss = tf.reduce_mean(contrastive_loss)
98+
contrastive_loss *= 0.5
99+
100+
# Reshape
101+
ys_pred = tf.reshape(ys_pred, tf.shape(ys))
102+
103+
# Compute the loss value
104+
task_loss = tf.reduce_mean(self.task_loss_(ys, ys_pred))
105+
106+
enc_loss = self.gamma * task_loss + (1-self.gamma) * contrastive_loss
107+
108+
task_loss += sum(self.task_.losses)
109+
enc_loss += sum(self.encoder_.losses)
110+
111+
# Compute gradients
112+
trainable_vars_task = self.task_.trainable_variables
113+
trainable_vars_enc = self.encoder_.trainable_variables
114+
115+
gradients_task = task_tape.gradient(task_loss, trainable_vars_task)
116+
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
117+
118+
# Update weights
119+
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
120+
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
121+
122+
# Update metrics
123+
self.compiled_metrics.update_state(ys, ys_pred)
124+
self.compiled_loss(ys, ys_pred)
125+
# Return a dict mapping metric names to current value
126+
logs = {m.name: m.result() for m in self.metrics}
127+
logs.update({"contrast": contrastive_loss})
128+
return logs

adapt/feature_based/_fe.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,11 @@ class FE(BaseAdaptEstimator):
5959
6060
Attributes
6161
----------
62+
estimator_ : object
63+
Fitted estimator.
64+
65+
n_domains_ : int
66+
Number of domains given in fit.
6267
6368
See also
6469
--------

adapt/feature_based/_fmmd.py

Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
from sklearn.base import check_array
4+
from cvxopt import solvers, matrix
5+
6+
from adapt.base import BaseAdaptEstimator, make_insert_doc
7+
from adapt.utils import set_random_seed
8+
9+
10+
def pairwise_X(X, Y):
11+
batch_size = tf.shape(X)[0]
12+
dim = tf.reduce_prod(tf.shape(X)[1:])
13+
X = tf.reshape(X, (batch_size, dim))
14+
Y = tf.reshape(Y, (batch_size, dim))
15+
X = tf.tile(tf.expand_dims(X, -1), [1, 1, batch_size])
16+
Y = tf.tile(tf.expand_dims(Y, -1), [1, 1, batch_size])
17+
return tf.reduce_sum(tf.square(X-tf.transpose(Y)), 1)
18+
19+
20+
def _get_optim_function(Xs, Xt, kernel="linear", gamma=1., degree=2, coef=1.):
21+
22+
n = len(Xs)
23+
m = len(Xt)
24+
p = Xs.shape[1]
25+
26+
Lxx = tf.ones((n,n), dtype=tf.float64) * (1./(n**2))
27+
Lxy = tf.ones((n,m), dtype=tf.float64) * (-1./(n*m))
28+
Lyy = tf.ones((m,m), dtype=tf.float64) * (1./(m**2))
29+
Lyx = tf.ones((m,n), dtype=tf.float64) * (-1./(n*m))
30+
31+
L = tf.concat((Lxx, Lxy), axis=1)
32+
L = tf.concat((L, tf.concat((Lyx, Lyy), axis=1)), axis=0)
33+
34+
if kernel == "linear":
35+
36+
@tf.function
37+
def func(W):
38+
Kxx = tf.matmul(tf.matmul(Xs, tf.linalg.diag(W**1)), tf.transpose(Xs))
39+
Kyy = tf.matmul(tf.matmul(Xt, tf.linalg.diag(W**1)), tf.transpose(Xt))
40+
Kxy = tf.matmul(tf.matmul(Xs, tf.linalg.diag(W**1)), tf.transpose(Xt))
41+
42+
K = tf.concat((Kxx, Kxy), axis=1)
43+
K = tf.concat((K, tf.concat((Kyy, tf.transpose(Kxy)), axis=1)), axis=0)
44+
45+
f = -tf.linalg.trace(tf.matmul(K, L))
46+
Df = tf.gradients(f, W)
47+
H = tf.hessians(f, W)
48+
return f, Df, H
49+
50+
elif kernel == "rbf":
51+
52+
@tf.function
53+
def func(W):
54+
Kxx = pairwise_X(tf.matmul(Xs, tf.linalg.diag(W**1)), Xs)
55+
Kyy = pairwise_X(tf.matmul(Xt, tf.linalg.diag(W**1)), Xt)
56+
Kxy = pairwise_X(tf.matmul(Xs, tf.linalg.diag(W**1)), Xt)
57+
58+
K = tf.concat((Kxx, Kxy), axis=1)
59+
K = tf.concat((K, tf.concat((Kyy, tf.transpose(Kxy)), axis=1)), axis=0)
60+
K = tf.exp(-gamma * K)
61+
62+
f = -tf.linalg.trace(tf.matmul(K, L))
63+
Df = tf.gradients(f, W)
64+
H = tf.hessians(f, W)
65+
return f, Df, H
66+
67+
elif kernel == "poly":
68+
69+
@tf.function
70+
def func(W):
71+
Kxx = tf.matmul(tf.matmul(Xs, tf.linalg.diag(W**1)), tf.transpose(Xs))
72+
Kyy = tf.matmul(tf.matmul(Xt, tf.linalg.diag(W**1)), tf.transpose(Xt))
73+
Kxy = tf.matmul(tf.matmul(Xs, tf.linalg.diag(W**1)), tf.transpose(Xt))
74+
75+
K = tf.concat((Kxx, Kxy), axis=1)
76+
K = tf.concat((K, tf.concat((Kyy, tf.transpose(Kxy)), axis=1)), axis=0)
77+
K = (gamma * K + coef)**degree
78+
79+
f = -tf.linalg.trace(tf.matmul(K, L))
80+
Df = tf.gradients(f, W)
81+
H = tf.hessians(f, W)
82+
return f, Df, H
83+
84+
else:
85+
raise ValueError("kernel param should be in ['linear', 'rbf', 'poly']")
86+
87+
return func
88+
89+
90+
@make_insert_doc()
91+
class fMMD(BaseAdaptEstimator):
92+
"""
93+
fMMD : feature Selection with MMD
94+
95+
LDM selects input features inorder to minimize the
96+
maximum mean discrepancy (MMD) between the source and
97+
the target data.
98+
99+
Parameters
100+
----------
101+
threshold : float or 'auto' (default='auto')
102+
Threshold on ``features_scores_`` all
103+
feature with score above threshold will be
104+
removed.
105+
If 'auto' the threshold is chosen to maximize
106+
the difference between scores of selected features
107+
and removed ones.
108+
109+
kernel : str (default='linear')
110+
Choose the kernel between
111+
['linear', 'rbf', 'poly'].
112+
The kernels are computed as follows:
113+
``rbf(X, Y) = exp(gamma * ||X-Y||^2)``
114+
``poly(X, Y) = (gamma * <X, Y> + coef)^degree``
115+
116+
gamma : float (default=1.)
117+
Gamma multiplier for the 'rbf' and
118+
'poly' kernels.
119+
120+
degree : int (default=2)
121+
Degree of the 'poly' kernel
122+
123+
coef : float (default=1.)
124+
Coef of the 'poly' kernel
125+
126+
Attributes
127+
----------
128+
estimator_ : object
129+
Estimator.
130+
131+
selected_features_ : numpy array
132+
The selected features
133+
134+
features_scores_ : numpy array
135+
The score attributed to each feature
136+
137+
See also
138+
--------
139+
CORAL
140+
FE
141+
"""
142+
143+
def __init__(self,
144+
estimator=None,
145+
Xt=None,
146+
yt=None,
147+
threshold="auto",
148+
kernel="linear",
149+
gamma=1.,
150+
degree=2,
151+
coef=1.,
152+
copy=True,
153+
verbose=1,
154+
random_state=None,
155+
**params):
156+
157+
names = self._get_param_names()
158+
kwargs = {k: v for k, v in locals().items() if k in names}
159+
kwargs.update(params)
160+
super().__init__(**kwargs)
161+
162+
163+
def fit_transform(self, Xs, Xt, **fit_params):
164+
Xs = check_array(Xs)
165+
Xt = check_array(Xt)
166+
set_random_seed(self.random_state)
167+
168+
n = len(Xs)
169+
m = len(Xt)
170+
p = Xs.shape[1]
171+
172+
optim_func = _get_optim_function(tf.identity(Xs),
173+
tf.identity(Xt),
174+
self.kernel,
175+
self.gamma,
176+
self.degree,
177+
self.coef)
178+
179+
def F(x=None, z=None):
180+
if x is None: return 0, matrix(1.0, (p,1))
181+
x = tf.identity(np.array(x).ravel())
182+
f, Df, H = optim_func(x)
183+
f = f.numpy()
184+
Df = Df[0].numpy().reshape(1, -1)
185+
H = H[0].numpy()
186+
if z is None: return matrix(f), matrix(Df)
187+
return matrix(f), matrix(Df), matrix(H)
188+
189+
linear_const_G = -np.eye(p)
190+
squared_constraint_G = np.concatenate((np.zeros((1, p)), -np.eye(p)), axis=0)
191+
192+
linear_const_h = np.zeros(p)
193+
squared_constraint_h = np.concatenate((np.ones(1), np.zeros(p)))
194+
195+
G = matrix(np.concatenate((linear_const_G, squared_constraint_G)))
196+
h = matrix(np.concatenate((linear_const_h, squared_constraint_h)))
197+
dims = {'l': p, 'q': [p+1], 's': []}
198+
sol = solvers.cp(F, G, h, dims)
199+
200+
W = np.array(sol["x"]).ravel()
201+
202+
self.selected_features_ = np.zeros(p, dtype=bool)
203+
204+
if self.threshold == "auto":
205+
args = np.argsort(W).ravel()
206+
max_diff_arg = np.argmax(np.diff(W[args]))
207+
threshold = W[args[max_diff_arg]]
208+
self.selected_features_[W<=threshold] = 1
209+
else:
210+
self.selected_features_[W<=self.threshold] = 1
211+
212+
if np.sum(self.selected_features_) == 0:
213+
raise Exception("No features selected")
214+
215+
self.features_scores_ = W
216+
return Xs[:, self.selected_features_]
217+
218+
219+
def transform(self, X):
220+
X = check_array(X)
221+
return X[:, self.selected_features_]

0 commit comments

Comments
 (0)