Skip to content

Commit

Permalink
update to new tf version
Browse files Browse the repository at this point in the history
  • Loading branch information
antoinedemathelin committed Oct 29, 2024
1 parent 3d06483 commit fc8d586
Show file tree
Hide file tree
Showing 12 changed files with 283 additions and 293 deletions.
238 changes: 134 additions & 104 deletions adapt/base.py

Large diffs are not rendered by default.

24 changes: 10 additions & 14 deletions adapt/feature_based/_adda.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,14 +183,11 @@ def pretrain_step(self, data):
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)

# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.pretrain_optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.pretrain_optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
return logs


Expand All @@ -211,6 +208,8 @@ def train_step(self, data):
else:
# encoder src is not needed if pretrain=False
Xs_enc = Xs

ys_pred = self.task_(Xs_enc, training=False)

ys_disc = self.discriminator_(Xs_enc, training=True)

Expand Down Expand Up @@ -245,7 +244,8 @@ def train_step(self, data):
# self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
# logs = {m.name: m.result() for m in self.metrics}
logs = self._get_disc_metrics(ys_disc, yt_disc)
logs = self._update_logs(ys, ys_pred)
logs.update(self._get_disc_metrics(ys_disc, yt_disc))
return logs


Expand All @@ -262,15 +262,11 @@ def _get_disc_metrics(self, ys_disc, yt_disc):
))
return disc_dict


def _initialize_weights(self, shape_X):
# Init weights encoder
self(np.zeros((1,) + shape_X))

# Set same weights to encoder_src

def _initialize_networks(self):
super()._initialize_networks()
if self.pretrain:
# encoder src is not needed if pretrain=False
self.encoder_(np.zeros((1,) + shape_X))
self.encoder_src_ = check_network(self.encoder_,
copy=True,
name="encoder_src")
Expand Down
5 changes: 1 addition & 4 deletions adapt/feature_based/_ccsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,6 @@ def train_step(self, data):
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
logs.update({"contrast": contrastive_loss})
return logs
25 changes: 11 additions & 14 deletions adapt/feature_based/_cdan.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,10 +282,7 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
logs = self._update_logs(ys, ys_pred)
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update({"disc_loss": disc_loss})
logs.update(disc_metrics)
Expand All @@ -303,19 +300,19 @@ def _get_disc_metrics(self, ys_disc, yt_disc):


def _initialize_weights(self, shape_X):
self(np.zeros((1,) + shape_X))
Xs_enc = self.encoder_(np.zeros((1,) + shape_X), training=True)
ys_pred = self.task_(Xs_enc, training=True)
if Xs_enc.get_shape()[1] * ys_pred.get_shape()[1] > self.max_features:
self.encoder_.build((None,) + shape_X)
self.task_.build(self.encoder_.output_shape)
if self.encoder_.output_shape[1] * self.task_.output_shape[1] > self.max_features:
self.is_overloaded_ = True
self._random_task = tf.random.normal([ys_pred.get_shape()[1],
self.max_features])
self._random_enc = tf.random.normal([Xs_enc.get_shape()[1],
self.max_features])
self.discriminator_(np.zeros((1, self.max_features)))
self._random_task = tf.random.normal([self.task_.output_shape[1],
self.max_features])
self._random_enc = tf.random.normal([self.encoder_.output_shape[1],
self.max_features])
self.discriminator_.build((None, self.max_features))
else:
self.is_overloaded_ = False
self.discriminator_(np.zeros((1, Xs_enc.get_shape()[1] * ys_pred.get_shape()[1])))
self.discriminator_.build((None, self.encoder_.output_shape[1] * self.task_.output_shape[1]))
self.build((None,) + shape_X)


def _initialize_networks(self):
Expand Down
10 changes: 7 additions & 3 deletions adapt/feature_based/_dann.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
DANN
"""

import inspect
import warnings
import numpy as np
import tensorflow as tf
Expand Down Expand Up @@ -170,10 +171,13 @@ def train_step(self, data):
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))

# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
#for metric in self.metrics:
# metric.update_state(ys, ys_pred)
#self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
#logs = {m.name: m.result() for m in self.metrics}

logs = self._update_logs(ys, ys_pred)
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update({"disc_loss": disc_loss})
logs.update(disc_metrics)
Expand Down
106 changes: 30 additions & 76 deletions adapt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
except:
from scikeras.wrappers import KerasClassifier, KerasRegressor
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Layer, Dense, Flatten, Input
from tensorflow.keras.models import clone_model
Expand Down Expand Up @@ -88,24 +87,25 @@ def accuracy(y_true, y_pred):
Boolean Tensor
"""
# TODO: accuracy can't handle 1D ys.
multi_columns_t = K.cast(K.greater(K.shape(y_true)[1], 1),
"float32")
binary_t = K.reshape(K.sum(K.cast(K.greater(y_true, 0.5),
"float32"), axis=-1), (-1,))
multi_t = K.reshape(K.cast(K.argmax(y_true, axis=-1),
"float32"), (-1,))
dtype = y_pred.dtype
multi_columns_t = tf.cast(tf.greater(tf.shape(y_true)[1], 1),
dtype)
binary_t = tf.reshape(tf.reduce_sum(tf.cast(tf.greater(y_true, 0.5),
dtype), axis=-1), (-1,))
multi_t = tf.reshape(tf.cast(tf.math.argmax(y_true, axis=-1),
dtype), (-1,))
y_true = ((1 - multi_columns_t) * binary_t +
multi_columns_t * multi_t)

multi_columns_p = K.cast(K.greater(K.shape(y_pred)[1], 1),
"float32")
binary_p = K.reshape(K.sum(K.cast(K.greater(y_pred, 0.5),
"float32"), axis=-1), (-1,))
multi_p = K.reshape(K.cast(K.argmax(y_pred, axis=-1),
"float32"), (-1,))
multi_columns_p = tf.cast(tf.greater(tf.shape(y_pred)[1], 1),
dtype)
binary_p = tf.reshape(tf.reduce_sum(tf.cast(tf.greater(y_pred, 0.5),
dtype), axis=-1), (-1,))
multi_p = tf.reshape(tf.cast(tf.math.argmax(y_pred, axis=-1),
dtype), (-1,))
y_pred = ((1 - multi_columns_p) * binary_p +
multi_columns_p * multi_p)
return tf.keras.metrics.get("acc")(y_true, y_pred)
multi_columns_p * multi_p)
return tf.cast(tf.math.equal(y_true, y_pred), dtype)


def predict(self, x, **kwargs):
Expand Down Expand Up @@ -259,11 +259,11 @@ def check_network(network, copy=True,
# but no input_shape
if hasattr(network, "input_shape"):
shape = network.input_shape[1:]
new_network = clone_model(network, input_tensors=Input(shape))
new_network = clone_model(network)
new_network.set_weights(network.get_weights())
elif network.built:
shape = network._build_input_shape[1:]
new_network = clone_model(network, input_tensors=Input(shape))
new_network = clone_model(network)
new_network.set_weights(network.get_weights())
else:
new_network = clone_model(network)
Expand All @@ -284,7 +284,7 @@ def check_network(network, copy=True,
new_network._name = name

# Override the predict method to speed the prediction for small dataset
new_network.predict = predict.__get__(new_network)
# new_network.predict = predict.__get__(new_network)
return new_network


Expand Down Expand Up @@ -366,62 +366,6 @@ def get_default_discriminator(name=None, state=None):
return model


@tf.custom_gradient
def _grad_handler(x, lambda_):
y = tf.identity(x)
def custom_grad(dy):
return (lambda_ * dy, 0. * lambda_)
return y, custom_grad

class GradientHandler(Layer):
"""
Multiply gradients with a scalar during backpropagation.
Act as identity in forward step.
Parameters
----------
lambda_init : float (default=1.)
Scalar multiplier
"""
def __init__(self, lambda_init=1., name="g_handler"):
super().__init__(name=name)
self.lambda_init=lambda_init
self.lambda_ = tf.Variable(lambda_init,
trainable=False,
dtype="float32")

def call(self, x):
"""
Call gradient handler.
Parameters
----------
x: object
Inputs
Returns
-------
x, custom gradient function
"""
return _grad_handler(x, self.lambda_)


def get_config(self):
"""
Return config dictionnary.
Returns
-------
dict
"""
config = super().get_config().copy()
config.update({
'lambda_init': self.lambda_init
})
return config


def make_classification_da(n_samples=100,
n_features=2,
random_state=2):
Expand Down Expand Up @@ -638,8 +582,18 @@ def check_fitted_network(estimator):
if isinstance(estimator, Model):
estimator.__deepcopy__ = __deepcopy__.__get__(estimator)
return estimator




def check_if_compiled(network):
"""
Check if the network is compiled.
"""
if hasattr(network, "compiled") and network.compiled:
return True
elif hasattr(network, "_is_compiled") and networtf._is_compiled:
return True
else:
return False

# Try to save the initial estimator if it is a Keras Model
# This is required for cloning the adapt method.
Expand Down
5 changes: 1 addition & 4 deletions tests/test_adda.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,7 @@
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.initializers import GlorotUniform
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam

from adapt.feature_based import ADDA

Expand Down
Loading

0 comments on commit fc8d586

Please sign in to comment.