diff --git a/.github/workflows/check-docs.yml b/.github/workflows/check-docs.yml
index f0a4bb1..7a9576f 100644
--- a/.github/workflows/check-docs.yml
+++ b/.github/workflows/check-docs.yml
@@ -36,7 +36,7 @@ jobs:
mv -v docs/html/* docs/
sudo rm -r -f docs/html
touch .nojekyll
- - uses: actions/upload-artifact@v1
+ - uses: actions/upload-artifact@v3
with:
name: DocumentationHTML
path: docs/
diff --git a/.github/workflows/run-test.yml b/.github/workflows/run-test.yml
index d4bac54..d8d96b3 100644
--- a/.github/workflows/run-test.yml
+++ b/.github/workflows/run-test.yml
@@ -13,7 +13,7 @@ jobs:
build:
strategy:
matrix:
- python-version: ['3.8', '3.9', '3.10', '3.11']
+ python-version: ['3.9', '3.10', '3.11', '3.12']
os: [ubuntu-latest, windows-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
diff --git a/README.md b/README.md
index b03345e..f74709a 100644
--- a/README.md
+++ b/README.md
@@ -296,3 +296,4 @@ If you use this library in your research, please cite ADAPT using the following
This work has been funded by Michelin and the Industrial Data Analytics and Machine Learning chair from ENS Paris-Saclay, Borelli center.
[](https://www.michelin.com/) [](https://centreborelli.ens-paris-saclay.fr/fr/chaire-idaml) [](https://centreborelli.ens-paris-saclay.fr/fr)
+
diff --git a/adapt/base.py b/adapt/base.py
index e953955..cf2ff59 100644
--- a/adapt/base.py
+++ b/adapt/base.py
@@ -18,13 +18,14 @@
except:
from scikeras.wrappers import KerasClassifier, KerasRegressor
try:
- from tensorflow.keras.optimizers.legacy import RMSprop
-except:
from tensorflow.keras.optimizers import RMSprop
+except:
+ from tensorflow.keras.optimizers.legacy import RMSprop
from adapt.utils import (check_estimator,
check_network,
+ check_if_compiled,
check_arrays,
set_random_seed,
check_sample_weight,
@@ -560,7 +561,7 @@ def fit_estimator(self, X, y, sample_weight=None,
if isinstance(self.estimator_, Model):
compile_params = self._filter_params(self.estimator_.compile)
if not "loss" in compile_params:
- if estimator._is_compiled:
+ if hasattr(estimator, "loss"):
compile_params["loss"] = deepcopy(estimator.loss)
else:
raise ValueError("The given `estimator` argument"
@@ -568,7 +569,7 @@ def fit_estimator(self, X, y, sample_weight=None,
"Please give a compiled estimator or "
"give a `loss` and `optimizer` arguments.")
if not "optimizer" in compile_params:
- if estimator._is_compiled:
+ if hasattr(estimator, "optimizer"):
compile_params["optimizer"] = deepcopy(estimator.optimizer)
else:
if not isinstance(compile_params["optimizer"], str):
@@ -889,6 +890,12 @@ def _from_config_keras_model(self, dict_):
return model
+def _ensure_compiled(method):
+ def wrapper(self, *args, **kwargs):
+ if not check_if_compiled(self):
+ self.compile()
+ return method(self, *args, **kwargs)
+ return wrapper
class BaseAdaptDeep(Model, BaseAdapt):
@@ -925,8 +932,8 @@ def __init__(self,
setattr(self, key, value)
self._self_setattr_tracking = True
-
-
+
+
def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
"""
Fit Model. Note that ``fit`` does not reset
@@ -1016,6 +1023,10 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
dataset_src = tf.data.Dataset.zip((dataset_Xs, dataset_ys))
else:
+ if self._check_for_batch(X):
+ raise ValueError(
+ "X is already divided in batches. Please pass a full Dataset instead,"
+ " and use the `batch_size` argument for batch division.")
dataset_src = X
### 2.2 Target
@@ -1036,30 +1047,18 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
dataset_tgt = tf.data.Dataset.zip((dataset_Xt, dataset_yt))
else:
+ if self._check_for_batch(Xt):
+ raise ValueError(
+ "Xt is already divided in batches. Please pass a full Dataset instead,"
+ " and use the `batch_size` argument for batch division.")
dataset_tgt = Xt
-
- # 3. Initialize networks
- if not hasattr(self, "_is_fitted"):
- self._is_fitted = True
- self._initialize_networks()
- if isinstance(Xt, tf.data.Dataset):
- first_elem = next(iter(Xt))
- if not isinstance(first_elem, tuple):
- shape = first_elem.shape
- else:
- shape = first_elem[0].shape
- if self._check_for_batch(Xt):
- shape = shape[1:]
- else:
- shape = Xt.shape[1:]
- self._initialize_weights(shape)
-
+
# 3.5 Get datasets length
self.length_src_ = self._get_length_dataset(dataset_src, domain="src")
self.length_tgt_ = self._get_length_dataset(dataset_tgt, domain="tgt")
-
+
# 4. Prepare validation dataset
if validation_data is None and validation_split>0.:
if shuffle:
@@ -1110,7 +1109,7 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
if self.pretrain_:
- if self._is_compiled:
+ if check_if_compiled(self):
warnings.warn("The model has already been compiled. "
"To perform pretraining, the model will be "
"compiled again. Please make sure to pass "
@@ -1118,6 +1117,22 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
compile_params = self._filter_params(super().compile, prefix="pretrain")
self.compile(**compile_params)
+
+ # 5.5 Initialize networks for pretraining
+ if not hasattr(self, "_is_fitted"):
+ self._is_fitted = True
+ self._initialize_networks()
+ if isinstance(Xt, tf.data.Dataset):
+ first_elem = next(iter(Xt))
+ if not isinstance(first_elem, tuple):
+ shape = first_elem.shape
+ else:
+ shape = first_elem[0].shape
+ if self._check_for_batch(Xt):
+ shape = shape[1:]
+ else:
+ shape = Xt.shape[1:]
+ self._initialize_weights(shape)
if not hasattr(self, "pretrain_history_"):
self.pretrain_history_ = {}
@@ -1132,8 +1147,7 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
# !!! shuffle is already done
dataset = tf.data.Dataset.zip((dataset_src, dataset_tgt))
- if not self._check_for_batch(dataset):
- dataset = dataset.batch(pre_batch_size)
+ dataset = dataset.batch(pre_batch_size)
hist = super().fit(dataset, validation_data=validation_data,
epochs=pre_epochs, verbose=pre_verbose, **prefit_params)
@@ -1142,19 +1156,34 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
self.pretrain_history_[k] = self.pretrain_history_.get(k, []) + v
self._initialize_pretain_networks()
-
- # 6. Compile
- if (not self._is_compiled) or (self.pretrain_):
+
+ # 6 Compile
+ if (not check_if_compiled(self)) or (self.pretrain_):
self.compile()
-
+
if not hasattr(self, "history_"):
self.history_ = {}
+ # 6.5 Initialize networks
+ if not hasattr(self, "_is_fitted"):
+ self._is_fitted = True
+ self._initialize_networks()
+ if isinstance(Xt, tf.data.Dataset):
+ first_elem = next(iter(Xt))
+ if not isinstance(first_elem, tuple):
+ shape = first_elem.shape
+ else:
+ shape = first_elem[0].shape
+ if self._check_for_batch(Xt):
+ shape = shape[1:]
+ else:
+ shape = Xt.shape[1:]
+ self._initialize_weights(shape)
+
# .7 Training
dataset = tf.data.Dataset.zip((dataset_src, dataset_tgt))
- if not self._check_for_batch(dataset):
- dataset = dataset.batch(batch_size)
+ dataset = dataset.batch(batch_size)
self.pretrain_ = False
@@ -1275,31 +1304,35 @@ def compile(self,
metrics_task = metrics
if metrics_disc is None:
metrics_disc = []
-
- self.disc_metrics = [tf.keras.metrics.get(m) for m in metrics_disc]
- for metric, i in zip(self.disc_metrics,
- range(len(self.disc_metrics))):
- if hasattr(metric, "name"):
- name = metric.name
- elif hasattr(metric, "__name__"):
- name = metric.__name__
- elif hasattr(metric, "__class__"):
- name = metric.__class__.__name__
- else:
- name = "met"
- if "_" in name:
- new_name = ""
- for split in name.split("_"):
- if len(split) > 0:
- new_name += split[0]
- name = new_name
- else:
- name = name[:3]
- metric.name = name
-
- if metric.name in ["acc", "Acc", "accuracy", "Accuracy"]:
- self.disc_metrics[i] = accuracy
- self.disc_metrics[i].name = "acc"
+
+ if not hasattr(self, "disc_metrics"):
+ for i in range(len(metrics_disc)):
+ if metrics_disc[i] == "acc":
+ metrics_disc[i] = "accuracy"
+ self.disc_metrics = [tf.keras.metrics.get(m) for m in metrics_disc]
+ for metric, i in zip(self.disc_metrics,
+ range(len(self.disc_metrics))):
+ if hasattr(metric, "name"):
+ name = metric.name
+ elif hasattr(metric, "__name__"):
+ name = metric.__name__
+ elif hasattr(metric, "__class__"):
+ name = metric.__class__.__name__
+ else:
+ name = "met"
+ if "_" in name:
+ new_name = ""
+ for split in name.split("_"):
+ if len(split) > 0:
+ new_name += split[0]
+ name = new_name
+ else:
+ name = name[:3]
+ metric.name = name
+
+ if metric.name in ["acc", "Acc", "accuracy", "Accuracy"]:
+ self.disc_metrics[i] = accuracy
+ self.disc_metrics[i].name = "acc"
compile_params = dict(
optimizer=optimizer,
@@ -1330,7 +1363,7 @@ def compile(self,
optimizer = compile_params["optimizer"]
compile_params["optimizer"] = optimizer
- if not "loss" in compile_params:
+ if not "loss" in compile_params or compile_params["loss"] is None:
compile_params["loss"] = "mse"
self.task_loss_ = tf.keras.losses.get(compile_params["loss"])
@@ -1341,9 +1374,14 @@ def compile(self,
# Set optimizer for encoder and discriminator
if not hasattr(self, "optimizer_enc"):
- self.optimizer_enc = self.optimizer
+ self.optimizer_enc = self.optimizer.__class__.from_config(self.optimizer.get_config())
if not hasattr(self, "optimizer_disc"):
- self.optimizer_disc = self.optimizer
+ self.optimizer_disc = self.optimizer.__class__.from_config(self.optimizer.get_config())
+
+ if self.pretrain_:
+ self.pretrain_optimizer = self.optimizer.__class__.from_config(self.optimizer.get_config())
+ self.pretrain_optimizer_enc = self.optimizer_enc.__class__.from_config(self.optimizer_enc.get_config())
+ self.pretrain_optimizer_disc = self.optimizer_disc.__class__.from_config(self.optimizer_disc.get_config())
def call(self, inputs):
@@ -1358,23 +1396,18 @@ def train_step(self, data):
# Run forward pass.
with tf.GradientTape() as tape:
y_pred = self(Xs, training=True)
- loss = self.compiled_loss(
- ys, y_pred, regularization_losses=self.losses)
+ if hasattr(self, "_compile_loss") and self._compile_loss is not None:
+ loss = self._compile_loss(ys, y_pred)
+ else:
+ loss = self.compiled_loss(ys, y_pred)
+
loss = tf.reduce_mean(loss)
+ loss += sum(self.losses)
# Run backwards pass.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
- self.compiled_metrics.update_state(ys, y_pred)
- # Collect metrics to return
- return_metrics = {}
- for metric in self.metrics:
- result = metric.result()
- if isinstance(result, dict):
- return_metrics.update(result)
- else:
- return_metrics[metric.name] = result
- return return_metrics
+ return self._update_logs(ys, y_pred)
def predict(self,
@@ -1382,10 +1415,7 @@ def predict(self,
batch_size=None,
verbose=0,
steps=None,
- callbacks=None,
- max_queue_size=10,
- workers=1,
- use_multiprocessing=False):
+ callbacks=None):
"""
Generates output predictions for the input samples.
@@ -1423,25 +1453,6 @@ def predict(self,
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
- max_queue_size: int (default=10)
- Used for generator or `keras.utils.Sequence`
- input only. Maximum size for the generator queue.
- If unspecified, `max_queue_size` will default to 10.
-
- workers: int (default=1)
- Used for generator or `keras.utils.Sequence` input
- only. Maximum number of processes to spin up when using
- process-based threading. If unspecified, `workers` will default
- to 1.
-
- use_multiprocessing: bool (default=False)
- Used for generator or `keras.utils.Sequence` input only.
- If `True`, use process-based
- threading. If unspecified, `use_multiprocessing` will default to
- `False`. Note that because this implementation relies on
- multiprocessing, you should not pass non-picklable arguments to
- the generator as they can't be passed easily to children processes.
-
Returns
-------
y_pred : array
@@ -1451,10 +1462,7 @@ def predict(self,
batch_size=batch_size,
verbose=verbose,
steps=steps,
- callbacks=callbacks,
- max_queue_size=max_queue_size,
- workers=workers,
- use_multiprocessing=use_multiprocessing)
+ callbacks=callbacks)
def transform(self, X):
@@ -1601,11 +1609,13 @@ def _get_legal_params(self, params):
def _initialize_weights(self, shape_X):
- self(np.zeros((1,) + shape_X))
if hasattr(self, "encoder_"):
- X_enc = self.encoder_(np.zeros((1,) + shape_X))
+ self.encoder_.build((None,) + shape_X)
if hasattr(self, "discriminator_"):
- self.discriminator_(X_enc)
+ self.discriminator_.build(self.encoder_.output_shape)
+ if hasattr(self, "task_"):
+ self.task_.build(self.encoder_.output_shape)
+ self.build((None,) + shape_X)
def _get_length_dataset(self, dataset, domain="src"):
@@ -1677,6 +1687,26 @@ def _initialize_networks(self):
self.discriminator_ = check_network(self.discriminator,
copy=self.copy,
name="discriminator")
-
+
+
def _initialize_pretain_networks(self):
pass
+
+
+ def _update_logs(self, y, y_pred):
+ version_str = tf.__version__
+ version_tuple = tuple(map(int, version_str.split('.')))
+ if version_tuple >= (2, 16, 0):
+ for metric in self.metrics:
+ metric.update_state(y, y_pred)
+ else:
+ self.compiled_metrics.update_state(y, y_pred)
+ # Collect metrics to return
+ return_metrics = {}
+ for metric in self.metrics:
+ result = metric.result()
+ if isinstance(result, dict):
+ return_metrics.update(result)
+ else:
+ return_metrics[metric.name] = result
+ return return_metrics
\ No newline at end of file
diff --git a/adapt/feature_based/_adda.py b/adapt/feature_based/_adda.py
index e95622b..1dec07e 100644
--- a/adapt/feature_based/_adda.py
+++ b/adapt/feature_based/_adda.py
@@ -183,14 +183,11 @@ def pretrain_step(self, data):
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
# Update weights
- self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
- self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
+ self.pretrain_optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
+ self.pretrain_optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
return logs
@@ -211,6 +208,8 @@ def train_step(self, data):
else:
# encoder src is not needed if pretrain=False
Xs_enc = Xs
+
+ ys_pred = self.task_(Xs_enc, training=False)
ys_disc = self.discriminator_(Xs_enc, training=True)
@@ -245,7 +244,8 @@ def train_step(self, data):
# self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
# logs = {m.name: m.result() for m in self.metrics}
- logs = self._get_disc_metrics(ys_disc, yt_disc)
+ logs = self._update_logs(ys, ys_pred)
+ logs.update(self._get_disc_metrics(ys_disc, yt_disc))
return logs
@@ -262,15 +262,11 @@ def _get_disc_metrics(self, ys_disc, yt_disc):
))
return disc_dict
-
- def _initialize_weights(self, shape_X):
- # Init weights encoder
- self(np.zeros((1,) + shape_X))
-
- # Set same weights to encoder_src
+
+ def _initialize_networks(self):
+ super()._initialize_networks()
if self.pretrain:
# encoder src is not needed if pretrain=False
- self.encoder_(np.zeros((1,) + shape_X))
self.encoder_src_ = check_network(self.encoder_,
copy=True,
name="encoder_src")
diff --git a/adapt/feature_based/_ccsa.py b/adapt/feature_based/_ccsa.py
index eb23257..a12d34d 100644
--- a/adapt/feature_based/_ccsa.py
+++ b/adapt/feature_based/_ccsa.py
@@ -190,9 +190,6 @@ def train_step(self, data):
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
logs.update({"contrast": contrastive_loss})
return logs
\ No newline at end of file
diff --git a/adapt/feature_based/_cdan.py b/adapt/feature_based/_cdan.py
index f65f947..0b47956 100644
--- a/adapt/feature_based/_cdan.py
+++ b/adapt/feature_based/_cdan.py
@@ -278,14 +278,12 @@ def train_step(self, data):
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
- self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
+ if len(gradients_enc) > 0:
+ self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update({"disc_loss": disc_loss})
logs.update(disc_metrics)
@@ -303,19 +301,19 @@ def _get_disc_metrics(self, ys_disc, yt_disc):
def _initialize_weights(self, shape_X):
- self(np.zeros((1,) + shape_X))
- Xs_enc = self.encoder_(np.zeros((1,) + shape_X), training=True)
- ys_pred = self.task_(Xs_enc, training=True)
- if Xs_enc.get_shape()[1] * ys_pred.get_shape()[1] > self.max_features:
+ self.encoder_.build((None,) + shape_X)
+ self.task_.build(self.encoder_.output_shape)
+ if self.encoder_.output_shape[1] * self.task_.output_shape[1] > self.max_features:
self.is_overloaded_ = True
- self._random_task = tf.random.normal([ys_pred.get_shape()[1],
- self.max_features])
- self._random_enc = tf.random.normal([Xs_enc.get_shape()[1],
- self.max_features])
- self.discriminator_(np.zeros((1, self.max_features)))
+ self._random_task = tf.random.normal([self.task_.output_shape[1],
+ self.max_features])
+ self._random_enc = tf.random.normal([self.encoder_.output_shape[1],
+ self.max_features])
+ self.discriminator_.build((None, self.max_features))
else:
self.is_overloaded_ = False
- self.discriminator_(np.zeros((1, Xs_enc.get_shape()[1] * ys_pred.get_shape()[1])))
+ self.discriminator_.build((None, self.encoder_.output_shape[1] * self.task_.output_shape[1]))
+ self.build((None,) + shape_X)
def _initialize_networks(self):
@@ -337,21 +335,6 @@ def _initialize_networks(self):
self.discriminator_ = check_network(self.discriminator,
copy=self.copy,
name="discriminator")
-
-
-
- # def _initialize_networks(self, shape_Xt):
- # Call predict to avoid strange behaviour with
- # Sequential model whith unspecified input_shape
- # zeros_enc_ = self.encoder_.predict(np.zeros((1,) + shape_Xt));
- # zeros_task_ = self.task_.predict(zeros_enc_);
- # if zeros_task_.shape[1] * zeros_enc_.shape[1] > self.max_features:
- # self.discriminator_.predict(np.zeros((1, self.max_features)))
- # else:
- # zeros_mapping_ = np.matmul(np.expand_dims(zeros_enc_, 2),
- # np.expand_dims(zeros_task_, 1))
- # zeros_mapping_ = np.reshape(zeros_mapping_, (1, -1))
- # self.discriminator_.predict(zeros_mapping_);
def predict_disc(self, X):
diff --git a/adapt/feature_based/_dann.py b/adapt/feature_based/_dann.py
index 614a924..23fe8f3 100644
--- a/adapt/feature_based/_dann.py
+++ b/adapt/feature_based/_dann.py
@@ -2,6 +2,7 @@
DANN
"""
+import inspect
import warnings
import numpy as np
import tensorflow as tf
@@ -170,10 +171,13 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
+ #for metric in self.metrics:
+ # metric.update_state(ys, ys_pred)
+ #self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ #logs = {m.name: m.result() for m in self.metrics}
+
+ logs = self._update_logs(ys, ys_pred)
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update({"disc_loss": disc_loss})
logs.update(disc_metrics)
diff --git a/adapt/feature_based/_deepcoral.py b/adapt/feature_based/_deepcoral.py
index faadce7..555de63 100644
--- a/adapt/feature_based/_deepcoral.py
+++ b/adapt/feature_based/_deepcoral.py
@@ -190,10 +190,7 @@ def train_step(self, data):
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": disc_loss})
return logs
diff --git a/adapt/feature_based/_mcd.py b/adapt/feature_based/_mcd.py
index c548520..4cbd6a2 100644
--- a/adapt/feature_based/_mcd.py
+++ b/adapt/feature_based/_mcd.py
@@ -122,15 +122,12 @@ def pretrain_step(self, data):
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)
# Update weights
- self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
- self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
- self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
+ self.pretrain_optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
+ self.pretrain_optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
+ self.pretrain_optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
return logs
@@ -162,7 +159,7 @@ def train_step(self, data):
# Compute gradients
trainable_vars_enc = self.encoder_.trainable_variables
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
- self.optimizer.apply_gradients(zip(gradients_enc, trainable_vars_enc))
+ self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape, tf.GradientTape() as disc_tape:
@@ -212,10 +209,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": discrepancy})
return logs
@@ -264,12 +258,7 @@ def _initialize_networks(self):
def _initialize_weights(self, shape_X):
- # Init weights encoder
- self(np.zeros((1,) + shape_X))
- X_enc = self.encoder_(np.zeros((1,) + shape_X))
- self.task_(X_enc)
- self.discriminator_(X_enc)
-
+ super()._initialize_weights(shape_X)
# Add noise to discriminator in order to
# differentiate from task
weights = self.discriminator_.get_weights()
diff --git a/adapt/feature_based/_mdd.py b/adapt/feature_based/_mdd.py
index 121d5a3..81b33b7 100644
--- a/adapt/feature_based/_mdd.py
+++ b/adapt/feature_based/_mdd.py
@@ -157,11 +157,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
- # disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
+ logs = self._update_logs(ys, ys_pred)
logs.update({"disc_loss": disc_loss})
return logs
@@ -189,11 +185,7 @@ def _initialize_networks(self):
def _initialize_weights(self, shape_X):
# Init weights encoder
- self(np.zeros((1,) + shape_X))
- X_enc = self.encoder_(np.zeros((1,) + shape_X))
- self.task_(X_enc)
- self.discriminator_(X_enc)
-
+ super()._initialize_weights(shape_X)
# Add noise to discriminator in order to
# differentiate from task
weights = self.discriminator_.get_weights()
diff --git a/adapt/feature_based/_wdgrl.py b/adapt/feature_based/_wdgrl.py
index 3ac4d87..519096f 100644
--- a/adapt/feature_based/_wdgrl.py
+++ b/adapt/feature_based/_wdgrl.py
@@ -183,10 +183,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update(disc_metrics)
logs.update({"gp": penalty})
diff --git a/adapt/instance_based/_iwn.py b/adapt/instance_based/_iwn.py
index 3fd9ce3..e51da10 100644
--- a/adapt/instance_based/_iwn.py
+++ b/adapt/instance_based/_iwn.py
@@ -12,7 +12,7 @@
from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import (check_arrays, check_network, get_default_task,
- set_random_seed, check_estimator, check_sample_weight)
+ set_random_seed, check_estimator, check_sample_weight, check_if_compiled)
EPS = np.finfo(np.float32).eps
@@ -141,8 +141,21 @@ def _initialize_networks(self):
name="weighter")
self.sigma_ = tf.Variable(self.sigma_init,
trainable=self.update_sigma)
-
-
+
+ if not hasattr(self, "estimator_"):
+ self.estimator_ = check_estimator(self.estimator,
+ copy=self.copy,
+ force_copy=True)
+
+
+ def _initialize_weights(self, shape_X):
+ if hasattr(self, "weighter_"):
+ self.weighter_.build((None,) + shape_X)
+ self.build((None,) + shape_X)
+ if isinstance(self.estimator_, Model):
+ self.estimator_.build((None,) + shape_X)
+
+
def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
@@ -163,7 +176,7 @@ def pretrain_step(self, data):
gradients = tape.gradient(loss, trainable_vars)
# Update weights
- self.optimizer.apply_gradients(zip(gradients, trainable_vars))
+ self.pretrain_optimizer.apply_gradients(zip(gradients, trainable_vars))
logs = {"loss": loss}
return logs
@@ -200,7 +213,7 @@ def train_step(self, data):
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
- self.optimizer.apply_gradients(zip(gradients_sigma, [self.sigma_]))
+ self.optimizer_sigma.apply_gradients(zip(gradients_sigma, [self.sigma_]))
# Return a dict mapping metric names to current value
logs = {"loss": loss, "sigma": self.sigma_}
@@ -214,6 +227,26 @@ def fit(self, X, y=None, Xt=None, yt=None, domains=None,
return self
+ def compile(self,
+ optimizer=None,
+ loss=None,
+ metrics=None,
+ loss_weights=None,
+ weighted_metrics=None,
+ run_eagerly=None,
+ steps_per_execution=None,
+ **kwargs):
+ super().compile(optimizer=optimizer,
+ loss=loss,
+ metrics=metrics,
+ loss_weights=loss_weights,
+ weighted_metrics=weighted_metrics,
+ run_eagerly=run_eagerly,
+ steps_per_execution=steps_per_execution,
+ **kwargs)
+ self.optimizer_sigma = self.optimizer.__class__.from_config(self.optimizer.get_config())
+
+
def fit_weights(self, Xs, Xt, **fit_params):
"""
Fit importance weighting.
@@ -276,22 +309,23 @@ def fit_estimator(self, X, y, sample_weight=None,
X, y = check_arrays(X, y, accept_sparse=True)
set_random_seed(random_state)
- if (not warm_start) or (not hasattr(self, "estimator_")):
- estimator = self.estimator
- self.estimator_ = check_estimator(estimator,
+ if not hasattr(self, "estimator_"):
+ self.estimator_ = check_estimator(self.estimator,
copy=self.copy,
force_copy=True)
- if isinstance(self.estimator_, Model):
- compile_params = {}
- if estimator._is_compiled:
- compile_params["loss"] = deepcopy(estimator.loss)
- compile_params["optimizer"] = deepcopy(estimator.optimizer)
- else:
- raise ValueError("The given `estimator` argument"
- " is not compiled yet. "
- "Please give a compiled estimator or "
- "give a `loss` and `optimizer` arguments.")
- self.estimator_.compile(**compile_params)
+
+ estimator = self.estimator
+ if isinstance(self.estimator_, Model):
+ compile_params = {}
+ if check_if_compiled(estimator):
+ compile_params["loss"] = deepcopy(estimator.loss)
+ compile_params["optimizer"] = deepcopy(estimator.optimizer)
+ else:
+ raise ValueError("The given `estimator` argument"
+ " is not compiled yet. "
+ "Please give a compiled estimator or "
+ "give a `loss` and `optimizer` arguments.")
+ self.estimator_.compile(**compile_params)
fit_args = [
p.name
diff --git a/adapt/instance_based/_wann.py b/adapt/instance_based/_wann.py
index c070609..b71648a 100644
--- a/adapt/instance_based/_wann.py
+++ b/adapt/instance_based/_wann.py
@@ -116,6 +116,16 @@ def _initialize_networks(self):
name="discriminator")
+ def _initialize_weights(self, shape_X):
+ if hasattr(self, "weighter_"):
+ self.weighter_.build((None,) + shape_X)
+ if hasattr(self, "task_"):
+ self.task_.build((None,) + shape_X)
+ if hasattr(self, "discriminator_"):
+ self.discriminator_.build((None,) + shape_X)
+ self.build((None,) + shape_X)
+
+
def _add_regularization(self, weighter):
for i in range(len(weighter.layers)):
if hasattr(weighter.layers[i], "kernel_constraint"):
@@ -149,7 +159,7 @@ def pretrain_step(self, data):
gradients = tape.gradient(loss, trainable_vars)
# Update weights
- self.optimizer.apply_gradients(zip(gradients, trainable_vars))
+ self.pretrain_optimizer.apply_gradients(zip(gradients, trainable_vars))
logs = {"loss": loss}
return logs
@@ -217,15 +227,33 @@ def train_step(self, data):
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
- self.optimizer.apply_gradients(zip(gradients_weight, trainable_vars_weight))
- self.optimizer.apply_gradients(zip(gradients_disc, trainable_vars_disc))
+ self.optimizer_weight.apply_gradients(zip(gradients_weight, trainable_vars_weight))
+ self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
return logs
+
+
+ def compile(self,
+ optimizer=None,
+ loss=None,
+ metrics=None,
+ loss_weights=None,
+ weighted_metrics=None,
+ run_eagerly=None,
+ steps_per_execution=None,
+ **kwargs):
+ super().compile(optimizer=optimizer,
+ loss=loss,
+ metrics=metrics,
+ loss_weights=loss_weights,
+ weighted_metrics=weighted_metrics,
+ run_eagerly=run_eagerly,
+ steps_per_execution=steps_per_execution,
+ **kwargs)
+ self.optimizer_weight = self.optimizer.__class__.from_config(self.optimizer.get_config())
+ self.optimizer_disc = self.optimizer.__class__.from_config(self.optimizer.get_config())
def predict_weights(self, X):
diff --git a/adapt/parameter_based/_finetuning.py b/adapt/parameter_based/_finetuning.py
index a625bca..eefe924 100644
--- a/adapt/parameter_based/_finetuning.py
+++ b/adapt/parameter_based/_finetuning.py
@@ -146,10 +146,7 @@ def pretrain_step(self, data):
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
return logs
@@ -185,13 +182,11 @@ def train_step(self, data):
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
- self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
+ if len(trainable_vars_enc) > 0:
+ self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
- self.compiled_metrics.update_state(ys, ys_pred)
- self.compiled_loss(ys, ys_pred)
- # Return a dict mapping metric names to current value
- logs = {m.name: m.result() for m in self.metrics}
+ logs = self._update_logs(ys, ys_pred)
return logs
diff --git a/adapt/parameter_based/_regular.py b/adapt/parameter_based/_regular.py
index 0ed1f2c..4d8197e 100644
--- a/adapt/parameter_based/_regular.py
+++ b/adapt/parameter_based/_regular.py
@@ -412,41 +412,65 @@ def _initialize_networks(self):
else:
self.task_ = check_network(self.task,
copy=self.copy,
+ force_copy=True,
name="task")
+
+
+ def _initialize_weights(self, shape_X):
+ if hasattr(self, "task_"):
+ self.task_.build((None,) + shape_X)
+ self.build((None,) + shape_X)
self._add_regularization()
- def _get_regularizer(self, old_weight, weight, lambda_=1.):
+ def _get_regularizer(self, old_weight, weight, lambda_):
if self.regularizer == "l2":
- def regularizer():
- return lambda_ * tf.reduce_mean(tf.square(old_weight - weight))
+ return lambda_ * tf.reduce_mean(tf.square(old_weight - weight))
if self.regularizer == "l1":
- def regularizer():
- return lambda_ * tf.reduce_mean(tf.abs(old_weight - weight))
+ return lambda_ * tf.reduce_mean(tf.abs(old_weight - weight))
return regularizer
+ def train_step(self, data):
+ # Unpack the data.
+ Xs, Xt, ys, yt = self._unpack_data(data)
+
+ # Run forward pass.
+ with tf.GradientTape() as tape:
+ y_pred = self.task_(Xt, training=True)
+ if hasattr(self, "_compile_loss") and self._compile_loss is not None:
+ loss = self._compile_loss(yt, y_pred)
+ else:
+ loss = self.compiled_loss(yt, y_pred)
+
+ loss = tf.reduce_mean(loss)
+ loss += sum(self.losses)
+ reg_loss = 0.
+ for i in range(len(self.task_.trainable_variables)):
+ reg_loss += self._get_regularizer(self.old_weights_[i],
+ self.task_.trainable_variables[i],
+ self.lambdas_[i])
+ loss += reg_loss
+
+ # Run backwards pass.
+ gradients = tape.gradient(loss, self.task_.trainable_variables)
+ self.optimizer.apply_gradients(zip(gradients, self.task_.trainable_variables))
+ return self._update_logs(yt, y_pred)
+
+
def _add_regularization(self):
- i = 0
+ self.old_weights_ = []
if not hasattr(self.lambdas, "__iter__"):
- lambdas = [self.lambdas]
+ self.lambdas_ = [self.lambdas] * len(self.task_.weights)
else:
- lambdas = self.lambdas
+ self.lambdas_ = (self.lambdas +
+ [self.lambdas[-1]] * (len(self.task_.weights) - len(self.lambdas)))
+ self.lambdas_ = self.lambdas_[::-1]
- for layer in reversed(self.task_.layers):
- if (hasattr(layer, "weights") and
- layer.weights is not None and
- len(layer.weights) != 0):
- if i >= len(lambdas):
- lambda_ = lambdas[-1]
- else:
- lambda_ = lambdas[i]
- for weight in reversed(layer.weights):
- old_weight = tf.identity(weight)
- old_weight.trainable = False
- self.add_loss(self._get_regularizer(
- old_weight, weight, lambda_))
- i += 1
+ for weight in self.task_.trainable_variables:
+ old_weight = tf.identity(weight)
+ old_weight.trainable = False
+ self.old_weights_.append(old_weight)
def call(self, inputs):
diff --git a/adapt/utils.py b/adapt/utils.py
index c6f37a1..495ebcc 100644
--- a/adapt/utils.py
+++ b/adapt/utils.py
@@ -16,7 +16,6 @@
except:
from scikeras.wrappers import KerasClassifier, KerasRegressor
import tensorflow as tf
-import tensorflow.keras.backend as K
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Layer, Dense, Flatten, Input
from tensorflow.keras.models import clone_model
@@ -88,24 +87,25 @@ def accuracy(y_true, y_pred):
Boolean Tensor
"""
# TODO: accuracy can't handle 1D ys.
- multi_columns_t = K.cast(K.greater(K.shape(y_true)[1], 1),
- "float32")
- binary_t = K.reshape(K.sum(K.cast(K.greater(y_true, 0.5),
- "float32"), axis=-1), (-1,))
- multi_t = K.reshape(K.cast(K.argmax(y_true, axis=-1),
- "float32"), (-1,))
+ dtype = y_pred.dtype
+ multi_columns_t = tf.cast(tf.greater(tf.shape(y_true)[1], 1),
+ dtype)
+ binary_t = tf.reshape(tf.reduce_sum(tf.cast(tf.greater(y_true, 0.5),
+ dtype), axis=-1), (-1,))
+ multi_t = tf.reshape(tf.cast(tf.math.argmax(y_true, axis=-1),
+ dtype), (-1,))
y_true = ((1 - multi_columns_t) * binary_t +
multi_columns_t * multi_t)
- multi_columns_p = K.cast(K.greater(K.shape(y_pred)[1], 1),
- "float32")
- binary_p = K.reshape(K.sum(K.cast(K.greater(y_pred, 0.5),
- "float32"), axis=-1), (-1,))
- multi_p = K.reshape(K.cast(K.argmax(y_pred, axis=-1),
- "float32"), (-1,))
+ multi_columns_p = tf.cast(tf.greater(tf.shape(y_pred)[1], 1),
+ dtype)
+ binary_p = tf.reshape(tf.reduce_sum(tf.cast(tf.greater(y_pred, 0.5),
+ dtype), axis=-1), (-1,))
+ multi_p = tf.reshape(tf.cast(tf.math.argmax(y_pred, axis=-1),
+ dtype), (-1,))
y_pred = ((1 - multi_columns_p) * binary_p +
- multi_columns_p * multi_p)
- return tf.keras.metrics.get("acc")(y_true, y_pred)
+ multi_columns_p * multi_p)
+ return tf.cast(tf.math.equal(y_true, y_pred), dtype)
def predict(self, x, **kwargs):
@@ -259,11 +259,11 @@ def check_network(network, copy=True,
# but no input_shape
if hasattr(network, "input_shape"):
shape = network.input_shape[1:]
- new_network = clone_model(network, input_tensors=Input(shape))
+ new_network = clone_model(network)
new_network.set_weights(network.get_weights())
elif network.built:
shape = network._build_input_shape[1:]
- new_network = clone_model(network, input_tensors=Input(shape))
+ new_network = clone_model(network)
new_network.set_weights(network.get_weights())
else:
new_network = clone_model(network)
@@ -284,7 +284,7 @@ def check_network(network, copy=True,
new_network._name = name
# Override the predict method to speed the prediction for small dataset
- new_network.predict = predict.__get__(new_network)
+ # new_network.predict = predict.__get__(new_network)
return new_network
@@ -366,62 +366,6 @@ def get_default_discriminator(name=None, state=None):
return model
-@tf.custom_gradient
-def _grad_handler(x, lambda_):
- y = tf.identity(x)
- def custom_grad(dy):
- return (lambda_ * dy, 0. * lambda_)
- return y, custom_grad
-
-class GradientHandler(Layer):
- """
- Multiply gradients with a scalar during backpropagation.
-
- Act as identity in forward step.
-
- Parameters
- ----------
- lambda_init : float (default=1.)
- Scalar multiplier
- """
- def __init__(self, lambda_init=1., name="g_handler"):
- super().__init__(name=name)
- self.lambda_init=lambda_init
- self.lambda_ = tf.Variable(lambda_init,
- trainable=False,
- dtype="float32")
-
- def call(self, x):
- """
- Call gradient handler.
-
- Parameters
- ----------
- x: object
- Inputs
-
- Returns
- -------
- x, custom gradient function
- """
- return _grad_handler(x, self.lambda_)
-
-
- def get_config(self):
- """
- Return config dictionnary.
-
- Returns
- -------
- dict
- """
- config = super().get_config().copy()
- config.update({
- 'lambda_init': self.lambda_init
- })
- return config
-
-
def make_classification_da(n_samples=100,
n_features=2,
random_state=2):
@@ -638,8 +582,18 @@ def check_fitted_network(estimator):
if isinstance(estimator, Model):
estimator.__deepcopy__ = __deepcopy__.__get__(estimator)
return estimator
-
-
+
+
+def check_if_compiled(network):
+ """
+ Check if the network is compiled.
+ """
+ if hasattr(network, "compiled") and network.compiled:
+ return True
+ elif hasattr(network, "_is_compiled") and network._is_compiled:
+ return True
+ else:
+ return False
# Try to save the initial estimator if it is a Keras Model
# This is required for cloning the adapt method.
diff --git a/setup.py b/setup.py
index a6fac40..4a733c9 100644
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,9 @@
+import os
from setuptools import setup, find_packages
-from pathlib import Path
-this_directory = Path(__file__).parent
-long_description = (this_directory / "README.md").read_text()
+ROOT = os.path.abspath(os.path.dirname(__file__))
+with open(os.path.join(ROOT, 'README.md'), encoding="utf-8") as f:
+ long_description = f.read()
setup(
name='adapt',
diff --git a/tests/test_adda.py b/tests/test_adda.py
index 512d482..a5712a3 100644
--- a/tests/test_adda.py
+++ b/tests/test_adda.py
@@ -6,12 +6,9 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
+from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.initializers import GlorotUniform
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
from adapt.feature_based import ADDA
@@ -29,7 +26,8 @@
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
- model.add(Dense(1, input_shape=input_shape,
+ model.add(Input(shape=input_shape))
+ model.add(Dense(1,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
@@ -38,8 +36,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
def _get_discriminator(input_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(10,
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="elu"))
model.add(Dense(1,
@@ -51,10 +49,10 @@ def _get_discriminator(input_shape=(1,)):
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(np.prod(output_shape),
use_bias=False,
- kernel_initializer=GlorotUniform(seed=0),
- input_shape=input_shape))
+ kernel_initializer=GlorotUniform(seed=0)))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
diff --git a/tests/test_base.py b/tests/test_base.py
index 71fc9c1..3d90cbd 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -2,17 +2,15 @@
Test base
"""
+import os
import copy
import shutil
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam
from sklearn.utils.estimator_checks import check_estimator
from sklearn.base import clone
@@ -32,7 +30,7 @@
def _custom_metric(yt, yp):
- return tf.shape(yt)[0]
+ return tf.ones_like(yt) * tf.cast(tf.shape(yt)[0], yt.dtype)
class DummyFeatureBased(BaseAdaptEstimator):
@@ -104,7 +102,8 @@ def test_base_adapt_score():
def test_base_adapt_keras_estimator():
est = Sequential()
- est.add(Dense(1, input_shape=Xs.shape[1:]))
+ est.add(Input(shape=Xs.shape[1:]))
+ est.add(Dense(1))
est.compile(loss="mse", optimizer=Adam(0.01))
model = BaseAdaptEstimator(est, Xt=Xt)
model.fit(Xs, ys)
@@ -126,7 +125,8 @@ def test_base_adapt_keras_estimator():
assert not isinstance(model.estimator_.optimizer, Adam)
est = Sequential()
- est.add(Dense(1, input_shape=Xs.shape[1:]))
+ est.add(Input(shape=Xs.shape[1:]))
+ est.add(Dense(1))
model = BaseAdaptEstimator(est, Xt=Xt, loss="mae",
optimizer=Adam(0.01, beta_1=0.5),
learning_rate=0.1)
@@ -159,18 +159,15 @@ def test_base_adapt_deep():
ypt = model.predict_task(Xt)
ypd = model.predict_disc(Xt)
- new_model = clone(model)
+ model.save_weights("./model.weights.h5")
+ new_model = BaseAdaptDeep(Xt=Xt, loss="mse",
+ epochs=0,
+ optimizer=Adam(),
+ learning_rate=0.1,
+ random_state=0)
new_model.fit(Xs, ys)
- yp2 = new_model.predict(Xt)
- score2 = new_model.score(Xt, yt)
- score_adapt2 = new_model.unsupervised_score(Xs, Xt)
- X_enc2 = new_model.transform(Xs)
- ypt2 = new_model.predict_task(Xt)
- ypd2 = new_model.predict_disc(Xt)
-
- model.save("model.tf", save_format="tf")
- new_model = tf.keras.models.load_model("model.tf")
- shutil.rmtree("model.tf")
+ new_model.load_weights("./model.weights.h5")
+ os.remove("./model.weights.h5")
yp3 = new_model.predict(Xt)
assert isinstance(model.optimizer, Adam)
@@ -178,6 +175,31 @@ def test_base_adapt_deep():
assert hasattr(model, "encoder_")
assert hasattr(model, "task_")
assert hasattr(model, "discriminator_")
+ assert np.mean(np.abs(yp - yp3)) < 1e-6
+
+
+def test_base_adapt_deep_clone():
+ model = BaseAdaptDeep(Xt=Xt, loss="mse",
+ epochs=2,
+ optimizer=Adam(),
+ learning_rate=0.1,
+ random_state=0)
+ model.fit(Xs, ys)
+ yp = model.predict(Xt)
+ score = model.score(Xt, yt)
+ score_adapt = model.unsupervised_score(Xs, Xt)
+ X_enc = model.transform(Xs)
+ ypt = model.predict_task(Xt)
+ ypd = model.predict_disc(Xt)
+
+ new_model = clone(model)
+ new_model.fit(Xs, ys)
+ yp2 = new_model.predict(Xt)
+ score2 = new_model.score(Xt, yt)
+ score_adapt2 = new_model.unsupervised_score(Xs, Xt)
+ X_enc2 = new_model.transform(Xs)
+ ypt2 = new_model.predict_task(Xt)
+ ypd2 = new_model.predict_disc(Xt)
assert np.all(yp == yp2)
assert score == score2
@@ -185,7 +207,6 @@ def test_base_adapt_deep():
assert np.all(ypt == ypt2)
assert np.all(ypd == ypd2)
assert np.all(X_enc == X_enc2)
- assert np.mean(np.abs(yp - yp3)) < 1e-6
def test_base_deep_validation_data():
@@ -267,9 +288,9 @@ def test_complete_batch():
model.fit(dataset, batch_size=32, validation_data=dataset)
assert model.history_["cm"][0] == 32
- model = BaseAdaptDeep(Xt=Xtt.batch(32), metrics=[_custom_metric])
- model.fit(dataset.batch(32), batch_size=48, validation_data=dataset.batch(32))
- assert model.history_["cm"][0] == 25
+ model = BaseAdaptDeep(Xt=Xtt, metrics=[_custom_metric])
+ model.fit(dataset, batch_size=48, validation_data=dataset.batch(32))
+ assert model.history_["cm"][0] == 48
def gens():
for i in range(40):
@@ -289,11 +310,30 @@ def gent():
model = BaseAdaptDeep(metrics=[_custom_metric])
model.fit(dataset, Xt=dataset2, validation_data=dataset, batch_size=22)
- assert model.history_["cm"][0] == 22
assert model.total_steps_ == 3
assert model.length_src_ == 40
assert model.length_tgt_ == 50
+ assert model.history_["cm"][0] == 22
model.fit(dataset, Xt=dataset2, validation_data=dataset, batch_size=32)
assert model.total_steps_ == 2
- assert model.history_["cm"][-1] == 32
\ No newline at end of file
+ assert model.history_["cm"][-1] == 32
+
+
+def test_batch_error():
+ dataset = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(Xs),
+ tf.data.Dataset.from_tensor_slices(ys.reshape(-1,1))
+ ))
+ Xtt = tf.data.Dataset.from_tensor_slices(Xt)
+
+ model = BaseAdaptDeep(Xt=Xtt, metrics=[_custom_metric])
+
+ with pytest.raises(ValueError) as excinfo:
+ model.fit(dataset.batch(32), batch_size=48, validation_data=dataset.batch(32))
+ assert "X is already divided" in str(excinfo.value)
+
+ model = BaseAdaptDeep(Xt=Xtt.batch(32), metrics=[_custom_metric])
+
+ with pytest.raises(ValueError) as excinfo:
+ model.fit(dataset, batch_size=48, validation_data=dataset.batch(32))
+ assert "Xt is already divided" in str(excinfo.value)
\ No newline at end of file
diff --git a/tests/test_ccsa.py b/tests/test_ccsa.py
index eddb323..fc048fe 100644
--- a/tests/test_ccsa.py
+++ b/tests/test_ccsa.py
@@ -4,10 +4,7 @@
from adapt.utils import make_classification_da
from adapt.feature_based import CCSA
from tensorflow.keras.initializers import GlorotUniform
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
np.random.seed(0)
tf.random.set_seed(0)
diff --git a/tests/test_cdan.py b/tests/test_cdan.py
index d91e441..c62912c 100644
--- a/tests/test_cdan.py
+++ b/tests/test_cdan.py
@@ -5,11 +5,8 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import CDAN
@@ -30,7 +27,8 @@ def _entropy(x):
def _get_encoder(input_shape=Xs.shape[1:], units=10):
model = Sequential()
- model.add(Dense(units, input_shape=input_shape,
+ model.add(Input(shape=input_shape))
+ model.add(Dense(units,
kernel_initializer=GlorotUniform(seed=0),))
model.compile(loss="mse", optimizer="adam")
return model
@@ -38,8 +36,8 @@ def _get_encoder(input_shape=Xs.shape[1:], units=10):
def _get_discriminator(input_shape=(10*2,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(10,
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1, activation="sigmoid", kernel_initializer=GlorotUniform(seed=0)))
@@ -49,9 +47,9 @@ def _get_discriminator(input_shape=(10*2,)):
def _get_task(input_shape=(10,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(2,
kernel_initializer=GlorotUniform(seed=0),
- input_shape=input_shape,
activation="softmax"))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
@@ -66,8 +64,8 @@ def test_fit_lambda_zero():
random_state=0, validation_data=(Xt, ytt))
model.fit(Xs, yss, Xt, ytt,
epochs=300, verbose=0)
- assert model.history_['acc'][-1] > 0.9
- assert model.history_['val_acc'][-1] < 0.9
+ assert model.history_['accuracy'][-1] > 0.9
+ assert model.history_['val_accuracy'][-1] < 0.9
def test_fit_lambda_one_no_entropy():
@@ -79,8 +77,8 @@ def test_fit_lambda_one_no_entropy():
random_state=0, validation_data=(Xt, ytt))
model.fit(Xs, yss, Xt, ytt,
epochs=300, verbose=0)
- assert model.history_['acc'][-1] > 0.8
- assert model.history_['val_acc'][-1] > 0.8
+ assert model.history_['accuracy'][-1] > 0.8
+ assert model.history_['val_accuracy'][-1] > 0.8
def test_fit_lambda_entropy():
diff --git a/tests/test_dann.py b/tests/test_dann.py
index 8b31bf2..65760df 100644
--- a/tests/test_dann.py
+++ b/tests/test_dann.py
@@ -6,11 +6,8 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam, SGD
from adapt.feature_based import DANN
from adapt.utils import UpdateLambda
@@ -30,7 +27,8 @@
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
- model.add(Dense(1, input_shape=input_shape,
+ model.add(Input(shape=input_shape))
+ model.add(Dense(1,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
@@ -39,8 +37,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
def _get_discriminator(input_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(10,
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="elu"))
model.add(Dense(1,
@@ -52,10 +50,10 @@ def _get_discriminator(input_shape=(1,)):
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
- use_bias=False,
- input_shape=input_shape))
+ use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
@@ -64,10 +62,10 @@ def test_fit_lambda_zero():
tf.random.set_seed(0)
np.random.seed(0)
model = DANN(_get_encoder(), _get_task(), _get_discriminator(),
- lambda_=0, loss="mse", optimizer=Adam(0.01), metrics=["mae"],
+ lambda_=0., loss="mse", optimizer=Adam(0.01), metrics=["mae"],
random_state=0)
model.fit(Xs, ys, Xt=Xt, yt=yt,
- epochs=200, batch_size=32, verbose=0)
+ epochs=400, batch_size=32, verbose=0)
assert isinstance(model, Model)
assert model.encoder_.get_weights()[0][1][0] == 1.0
assert np.sum(np.abs(model.predict(Xs) - ys)) < 0.01
@@ -78,9 +76,9 @@ def test_fit_lambda_one():
tf.random.set_seed(0)
np.random.seed(0)
model = DANN(_get_encoder(), _get_task(), _get_discriminator(),
- lambda_=1, loss="mse", optimizer=Adam(0.01), random_state=0)
+ lambda_=1., loss="mse", optimizer=Adam(0.01), random_state=0)
model.fit(Xs, ys, Xt, yt,
- epochs=100, batch_size=32, verbose=0)
+ epochs=200, batch_size=32, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
model.encoder_.get_weights()[0][0][0]) < 0.15
@@ -101,7 +99,7 @@ def test_fit_lambda_update():
model.encoder_.get_weights()[0][0][0]) < 0.2
assert np.sum(np.abs(model.predict(Xs) - ys)) < 1
assert np.sum(np.abs(model.predict(Xt) - yt)) < 5
- assert model.lambda_.numpy() == 1
+ assert np.abs(model.lambda_.numpy() - 1.) < 0.01
def test_optimizer_enc_disc():
diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py
index 5a45ccf..a4ec112 100644
--- a/tests/test_finetuning.py
+++ b/tests/test_finetuning.py
@@ -5,10 +5,7 @@
from adapt.utils import make_classification_da
from adapt.parameter_based import FineTuning
from tensorflow.keras.initializers import GlorotUniform
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
np.random.seed(0)
tf.random.set_seed(0)
@@ -44,7 +41,7 @@ def test_finetune():
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
- assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() > 1.
+ assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() > 0.5
assert np.mean((fine_tuned.predict(Xt).ravel()>0.5) == yt) > 0.9
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
@@ -53,7 +50,7 @@ def test_finetune():
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
- assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() > 1.
+ assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() > .5
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=[False],
diff --git a/tests/test_iwc.py b/tests/test_iwc.py
index 2fee586..fb1d413 100644
--- a/tests/test_iwc.py
+++ b/tests/test_iwc.py
@@ -7,10 +7,7 @@
from adapt.utils import make_classification_da
from adapt.instance_based import IWC
from adapt.utils import get_default_discriminator
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
Xs, ys, Xt, yt = make_classification_da()
diff --git a/tests/test_iwn.py b/tests/test_iwn.py
index 3276187..4d6bb9e 100644
--- a/tests/test_iwn.py
+++ b/tests/test_iwn.py
@@ -7,10 +7,7 @@
from adapt.instance_based import IWN
from adapt.utils import get_default_task
from sklearn.neighbors import KNeighborsClassifier
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
Xs, ys, Xt, yt = make_classification_da()
diff --git a/tests/test_mcd.py b/tests/test_mcd.py
index b7740e5..0b0d593 100644
--- a/tests/test_mcd.py
+++ b/tests/test_mcd.py
@@ -5,11 +5,8 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import MCD
@@ -28,7 +25,8 @@
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
- model.add(Dense(1, input_shape=input_shape,
+ model.add(Input(shape=input_shape))
+ model.add(Dense(1,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
@@ -37,8 +35,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
def _get_discriminator(input_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(10,
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1,
@@ -50,10 +48,10 @@ def _get_discriminator(input_shape=(1,)):
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
- use_bias=False,
- input_shape=input_shape))
+ use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
diff --git a/tests/test_mdd.py b/tests/test_mdd.py
index 294c06e..d1ff96f 100644
--- a/tests/test_mdd.py
+++ b/tests/test_mdd.py
@@ -5,11 +5,8 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import MDD
@@ -28,8 +25,8 @@
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
- model.add(Dense(1, input_shape=input_shape,
- kernel_initializer="ones",
+ model.add(Input(shape=input_shape))
+ model.add(Dense(1, kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
@@ -37,8 +34,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
def _get_discriminator(input_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(10,
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1,
@@ -50,10 +47,10 @@ def _get_discriminator(input_shape=(1,)):
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(np.prod(output_shape),
use_bias=False,
- kernel_initializer=GlorotUniform(seed=0),
- input_shape=input_shape))
+ kernel_initializer=GlorotUniform(seed=0)))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
diff --git a/tests/test_regular.py b/tests/test_regular.py
index 32ad2a2..ce50eb4 100644
--- a/tests/test_regular.py
+++ b/tests/test_regular.py
@@ -10,11 +10,8 @@
from sklearn.base import clone
import tensorflow as tf
from tensorflow.keras import Sequential, Model
-from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.layers import Dense, Input
+from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.utils import make_classification_da, make_regression_da
@@ -43,10 +40,10 @@
def _get_network(input_shape=(1,), output_shape=(1,)):
model = Sequential()
+ model.add(Input(shape=input_shape))
model.add(Dense(np.prod(output_shape),
- input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
- use_bias=False))
+ use_bias=True))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
@@ -153,17 +150,20 @@ def test_regularnn_fit():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
+ print(network.get_weights())
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
- model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
+ print(network.get_weights())
+ model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1), loss="mse")
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
+ print(model.task_.get_weights())
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
- assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
+ assert np.sum(np.abs(network.get_weights()[0] - model.task_.get_weights()[0])) > 4.
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
- assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
+ assert np.sum(np.abs(network.get_weights()[0] - model.task_.get_weights()[0])) < 0.001
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
diff --git a/tests/test_tradaboost.py b/tests/test_tradaboost.py
index 2c8623c..4fc1c72 100644
--- a/tests/test_tradaboost.py
+++ b/tests/test_tradaboost.py
@@ -8,10 +8,7 @@
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, RidgeClassifier
from sklearn.metrics import r2_score, accuracy_score
import tensorflow as tf
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
from adapt.instance_based import (TrAdaBoost,
TrAdaBoostR2,
diff --git a/tests/test_utils.py b/tests/test_utils.py
index c781fc1..18c259f 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -6,7 +6,6 @@
import numpy as np
import pytest
import tensorflow as tf
-import tensorflow.keras.backend as K
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
@@ -19,7 +18,6 @@
# from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Flatten, Reshape
-from tensorflow.python.keras.engine.input_layer import InputLayer
from adapt.utils import *
@@ -50,7 +48,7 @@ def is_equal_estimator(v1, v2):
elif isinstance(v1, Tree):
pass # TODO create a function to check if two tree are equal
else:
- if not "input" in str(v1):
+ if not "input" in str(v1) and not "input" in str(v2):
assert v1 == v2
return True
@@ -80,13 +78,10 @@ def __deepcopy__(self):
raise ValueError("Can not be deep copied!")
-def _get_model_Model(compiled=True, custom_loss=False):
+def _get_model_Model(compiled=True):
inputs = Input((10,))
output = Dense(1)(inputs)
model = Model(inputs, output)
- if custom_loss:
- loss = K.mean(output)
- model.add_loss(loss)
if compiled:
model.compile(loss="mse", optimizer="adam")
return model
@@ -128,11 +123,11 @@ def test_check_arrays_no_array():
networks = [
- _get_model_Model(compiled=True, custom_loss=False),
+ _get_model_Model(compiled=True),
_get_model_Sequential(compiled=True, input_shape=(10,)),
_get_model_Sequential(compiled=True, input_shape=None),
- _get_model_Model(compiled=False, custom_loss=False),
- _get_model_Model(compiled=False, custom_loss=True),
+ _get_model_Model(compiled=False),
+ _get_model_Model(compiled=False),
_get_model_Sequential(compiled=False, input_shape=(10,)),
_get_model_Sequential(compiled=False, input_shape=None)
]
@@ -347,22 +342,6 @@ def test_get_default_discriminator():
assert model.layers[3].get_config()["activation"] == "sigmoid"
-scales = [-1, 0, 1., 0.1]
-
-@pytest.mark.parametrize("lambda_", scales)
-def test_gradienthandler(lambda_):
- grad_handler = GradientHandler(lambda_)
- inputs = K.variable([1, 2, 3])
- assert np.all(grad_handler(inputs) == inputs)
- with tf.GradientTape() as tape:
- gradient = tape.gradient(grad_handler(inputs),
- inputs)
- assert np.all(gradient == lambda_ * np.ones(3))
- config = grad_handler.get_config()
- assert config['lambda_init'] == lambda_
-
-
-
def test_make_classification_da():
Xs, ys, Xt, yt = make_classification_da()
assert Xs.shape == (100, 2)
@@ -413,11 +392,13 @@ def test_accuracy():
def test_updatelambda():
up = UpdateLambda()
- dummy = DummyModel()
- dummy.lambda_ = tf.Variable(0.)
- up.model = dummy
- for _ in range(1000):
- up.on_batch_end(0, None)
+ dummy = Sequential()
+ dummy.add(Dense(1))
+ dummy.compile(loss="mse", optimizer="adam")
+ dummy.lambda_ = tf.Variable(0., trainable=False)
+ dummy.fit(np.zeros((100, 1)), np.zeros((100,)),
+ batch_size=1, epochs=10, verbose=0,
+ callbacks=[up])
assert dummy.lambda_.numpy() == 1.
diff --git a/tests/test_wann.py b/tests/test_wann.py
index 7286d8e..6d7671a 100644
--- a/tests/test_wann.py
+++ b/tests/test_wann.py
@@ -4,10 +4,7 @@
import numpy as np
from sklearn.linear_model import LinearRegression
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from adapt.instance_based import WANN
diff --git a/tests/test_wdgrl.py b/tests/test_wdgrl.py
index 652d6a1..ffb095e 100644
--- a/tests/test_wdgrl.py
+++ b/tests/test_wdgrl.py
@@ -6,10 +6,7 @@
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
-try:
- from tensorflow.keras.optimizers.legacy import Adam
-except:
- from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import WDGRL
@@ -82,6 +79,6 @@ def test_fit_lambda_one():
epochs=300, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
- model.encoder_.get_weights()[0][0][0]) < 0.2
+ model.encoder_.get_weights()[0][0][0]) < 0.3
assert np.sum(np.abs(model.predict(Xs).ravel() - ys)) < 2
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 5