Skip to content

Commit

Permalink
Sync OSS keras to head.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 364350506
  • Loading branch information
qlzh727 authored and tensorflower-gardener committed Mar 22, 2021
1 parent 7fc995e commit 2f1cc10
Show file tree
Hide file tree
Showing 64 changed files with 1,011 additions and 496 deletions.
1 change: 1 addition & 0 deletions keras/api/api_init_files.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ KERAS_API_INIT_FILES = [
"keras/preprocessing/text/__init__.py",
"keras/regularizers/__init__.py",
"keras/utils/__init__.py",
"keras/utils/experimental/__init__.py",
"keras/wrappers/__init__.py",
"keras/wrappers/scikit_learn/__init__.py",
]
Expand Down
17 changes: 17 additions & 0 deletions keras/applications/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,23 @@ def EfficientNetB7(include_top=True,

@keras_export('keras.applications.efficientnet.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the efficientnet model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x


Expand Down
21 changes: 21 additions & 0 deletions keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -592,6 +592,27 @@ def __iter__(self):
class Callback(object):
"""Abstract base class used to build new callbacks.
Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
`predict` in order to hook into the various stages of the model training and
inference lifecycle.
To create a custom callback, subclass `keras.callbacks.Callback` and override
the method associated with the stage of interest. See
https://www.tensorflow.org/guide/keras/custom_callback for more information.
Example:
>>> training_finished = False
>>> class MyCallback(tf.keras.callbacks.Callback):
... def on_train_end(self, logs=None):
... global training_finished
... training_finished = True
>>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
... callbacks=[MyCallback()])
>>> assert training_finished == True
Attributes:
params: Dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
Expand Down
42 changes: 42 additions & 0 deletions keras/constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,53 @@

@keras_export('keras.constraints.Constraint')
class Constraint(object):
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(tf.keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype)
>>> weight = tf.constant((-1.0, 1.0))
>>> NonNegative()(weight)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.], dtype=float32)>
>>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())
"""

def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w

def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}


Expand Down
2 changes: 1 addition & 1 deletion keras/distribute/mirrored_strategy_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def step_fn(inputs):
num_epochs = 4
num_steps = 7
for _ in range(num_epochs):
accuracy.reset_states()
accuracy.reset_state()
for _ in range(num_steps):
train_step(distributed_iterator)

Expand Down
2 changes: 1 addition & 1 deletion keras/distribute/multi_worker_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def step_fn(inputs):
num_epochs = 4
num_steps = 7
for _ in range(num_epochs):
accuracy.reset_states()
accuracy.reset_state()
for _ in range(num_steps):
train_step(distributed_iterator)

Expand Down
2 changes: 1 addition & 1 deletion keras/distribute/parameter_server_training_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def replica_fn(iterator):
distributed_dataset = self.coordinator.create_per_worker_dataset(dataset_fn)
distributed_iterator = iter(distributed_dataset)
for _ in range(4):
accuracy.reset_states()
accuracy.reset_state()
for _ in range(7):
self.coordinator.schedule(worker_fn, args=(distributed_iterator,))
self.coordinator.join()
Expand Down
68 changes: 34 additions & 34 deletions keras/engine/base_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1683,48 +1683,48 @@ def add_update(self, updates, inputs=None):
update() # pylint: disable=not-callable

def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
"""Sets the weights of the layer, from NumPy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function by calling
weights must be instantiated before calling this function, by calling
the layer.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> a = tf.keras.layers.Dense(1,
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
weights: a list of NumPy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights

Expand Down Expand Up @@ -1763,39 +1763,39 @@ def set_weights(self, weights):
backend.batch_set_value(weight_value_tuples)

def get_weights(self):
"""Returns the current weights of the layer.
"""Returns the current weights of the layer, as NumPy arrays.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of Numpy arrays, which can in turn be used to load state
layer as a list of NumPy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> a = tf.keras.layers.Dense(1,
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of numpy arrays.
Weights values as a list of NumPy arrays.
"""
weights = self.weights
output_weights = []
Expand Down
14 changes: 13 additions & 1 deletion keras/engine/base_preprocessing_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,19 @@
@keras_export('keras.layers.experimental.preprocessing.PreprocessingLayer')
@six.add_metaclass(abc.ABCMeta)
class PreprocessingLayer(Layer):
"""Base class for PreprocessingLayers.
"""Base class for Preprocessing Layers.
**Don't use this class directly: it's an abstract base class!** You may
be looking for one of the many built-in
[preprocessing layers](https://keras.io/guides/preprocessing_layers/)
instead.
Preprocessing layers are layers whose state gets computed before model
training starts. They do not get updated during training.
Most preprocessing layers implement an `adapt()` method for state computation.
The `PreprocessingLayer` class is the base class you would subclass to
implement your own preprocessing layers.
Attributes:
stateful: Whether the layer contains state that needs to be adapted via
Expand Down
8 changes: 4 additions & 4 deletions keras/engine/compile_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,14 +251,14 @@ def __call__(self,
# Ok for a model to have no compiled loss.
return tf.zeros(shape=())

def reset_states(self):
def reset_state(self):
"""Resets the state of loss metrics."""
if not self._built:
return
metrics = [self._loss_metric] + tf.nest.flatten(self._per_output_metrics)
for metric_obj in metrics:
if metric_obj is not None:
metric_obj.reset_states()
metric_obj.reset_state()

def _get_loss_object(self, loss):
"""Returns a `Loss` object.
Expand Down Expand Up @@ -466,7 +466,7 @@ def update_state(self, y_true, y_pred, sample_weight=None):
continue
weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw)

def reset_states(self):
def reset_state(self):
"""Resets the state of all `Metric`s in this container."""
if self._built:
metrics = self._metrics_in_order
Expand All @@ -479,7 +479,7 @@ def reset_states(self):

for metric_obj in metrics:
if isinstance(metric_obj, metrics_mod.Metric):
metric_obj.reset_states()
metric_obj.reset_state()

def _get_metric_objects(self, metrics, y_t, y_p):
"""Convert user-supplied metrics to `Metric` objects."""
Expand Down
16 changes: 8 additions & 8 deletions keras/engine/compile_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_single_loss(self):
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 1.)

loss_container.reset_states()
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0.)

def test_loss_list(self):
Expand Down Expand Up @@ -71,7 +71,7 @@ def test_loss_list(self):
self.assertEqual(output_2_metric.name, 'output_2_loss')
self.assertEqual(output_2_metric.result().numpy(), 0.5)

loss_container.reset_states()
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(output_1_metric.result().numpy(), 0)
self.assertEqual(output_2_metric.result().numpy(), 0)
Expand Down Expand Up @@ -108,7 +108,7 @@ def test_loss_dict(self):
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)

loss_container.reset_states()
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(out1_metric.result().numpy(), 0)
self.assertEqual(out2_metric.result().numpy(), 0)
Expand Down Expand Up @@ -405,7 +405,7 @@ def test_single_metric(self):
self.assertEqual(metric.name, 'mse')
self.assertEqual(metric.result().numpy(), 1.)

metric_container.reset_states()
metric_container.reset_state()
self.assertEqual(metric.result().numpy(), 0.)

def test_list_of_metrics_one_output(self):
Expand All @@ -422,7 +422,7 @@ def test_list_of_metrics_one_output(self):
self.assertEqual(mae_metric.name, 'mae')
self.assertEqual(mae_metric.result().numpy(), 2.)

metric_container.reset_states()
metric_container.reset_state()
self.assertEqual(mse_metric.result().numpy(), 0.)
self.assertEqual(mae_metric.result().numpy(), 0.)

Expand Down Expand Up @@ -507,7 +507,7 @@ def test_metric_dict(self):
self.assertEqual(weighted_mae_metric.name, 'out2_weighted_mae')
self.assertEqual(weighted_mae_metric.result().numpy(), 2.)

metric_container.reset_states()
metric_container.reset_state()
self.assertEqual(mse_metric.result().numpy(), 0.)
self.assertEqual(weighted_mse_metric.result().numpy(), 0.)
self.assertEqual(mae_metric.result().numpy(), 0.)
Expand Down Expand Up @@ -782,13 +782,13 @@ def __call__(self, y_true, y_pred):
self.assertEqual(metric_container.metrics[0].name, 'custom_metric_fn')
self.assertEqual(metric_container.metrics[1].name, 'custom_metric_class')

def test_reset_states_existing_metric_before_built(self):
def test_reset_state_existing_metric_before_built(self):
metric = metrics_mod.Mean()
metric.update_state([2.0, 4.0])
self.assertEqual(metric.result().numpy(), 3.0)

metric_container = compile_utils.MetricsContainer(metric)
metric_container.reset_states()
metric_container.reset_state()
self.assertEqual(metric.result().numpy(), 0.0)


Expand Down
2 changes: 1 addition & 1 deletion keras/engine/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -1704,7 +1704,7 @@ def reset_metrics(self):
"""
for m in self.metrics:
m.reset_states()
m.reset_state()

def train_on_batch(self,
x,
Expand Down
Loading

0 comments on commit 2f1cc10

Please sign in to comment.