python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entry point for annealed flow transport and baselines."""
from typing import Sequence
from absl import app
from absl import flags
from annealed_flow_transport import train
from ml_collections.config_flags import config_flags
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file('config',
'./configs/single_normal.py',
'Training configuration.')
def main(argv: Sequence[str]) -> None:
config = FLAGS.config
info = 'Displaying config '+str(config)
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train.run_experiment(config)
if __name__ == '__main__':
app.run(main)
|
annealed_flow_transport-master
|
main.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for train_vae.py."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a train_vae config as ConfigDict."""
config = ConfigDict()
config.random_seed = 1
config.batch_size = 100
config.num_latents = 30 # Number of latents for VAE.
config.step_size = 0.00005 # ADAM optimizer step size.
# Base directory for output of results. If falsey don't store files.
config.output_dir_stub = '/tmp/aft_vae/'
config.train_iters = 500001
config.report_period = 5000
return config
|
annealed_flow_transport-master
|
train_vae_configs/vae_config.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for loading and saving model parameters."""
import pickle
def load_checkpoint(filename):
with open(filename, "rb") as f:
state = pickle.load(f)
return state
def save_checkpoint(filename, state):
with open(filename, "wb") as f:
pickle.dump(state, f)
|
annealed_flow_transport-master
|
annealed_flow_transport/serialize.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for probability densities."""
import abc
import pickle
from annealed_flow_transport import vae as vae_lib
import annealed_flow_transport.aft_types as tp
import annealed_flow_transport.cox_process_utils as cp_utils
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.scipy.linalg as slinalg
from jax.scipy.special import logsumexp
from jax.scipy.stats import multivariate_normal
from jax.scipy.stats import norm
import numpy as np
import tensorflow_datasets as tfds
# TypeDefs
NpArray = np.ndarray
Array = tp.Array
ConfigDict = tp.ConfigDict
Samples = tp.Samples
SampleShape = tp.SampleShape
assert_trees_all_equal = chex.assert_trees_all_equal
class LogDensity(metaclass=abc.ABCMeta):
"""Abstract base class from which all log densities should inherit."""
def __init__(self, config: ConfigDict, sample_shape: SampleShape):
self._check_constructor_inputs(config, sample_shape)
self._config = config
self._sample_shape = sample_shape
@abc.abstractmethod
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
"""Check the config and sample shape of the class.
Will typically raise Assertion like errors.
Args:
config: Configuration for the log density.
sample_shape: Shape expected for the density.
"""
def __call__(self, x: Samples) -> Array:
"""Evaluate the log density with automatic shape checking.
This calls evaluate_log_density which needs to be implemented
in derived classes.
Args:
x: input Samples.
Returns:
Array of shape (num_batch,) with corresponding log densities.
"""
self._check_input_shape(x)
output = self.evaluate_log_density(x)
self._check_output_shape(x, output)
return output
@abc.abstractmethod
def evaluate_log_density(self, x: Samples) -> Array:
"""Evaluate the log density.
Args:
x: Samples.
Returns:
Array of shape (num_batch,) containing values of log densities.
"""
def _check_input_shape(self, x_in: Samples):
should_be_tree_shape = jax.tree_map(lambda x: x.shape[1:], x_in)
chex.assert_trees_all_equal(self._sample_shape, should_be_tree_shape)
def get_first_leaf(tree):
return jax.tree_util.tree_leaves(tree)[0]
first_batch_size = np.shape(get_first_leaf(x_in))[0]
chex.assert_tree_shape_prefix(x_in, (first_batch_size,))
def _check_output_shape(self, x_in: Samples, x_out: Samples):
batch_sizes = jax.tree_util.tree_map(lambda x: np.shape(x)[0],
x_in)
def get_first_leaf(tree):
return jax.tree_util.tree_leaves(tree)[0]
first_batch_size = get_first_leaf(batch_sizes)
chex.assert_shape(x_out, (first_batch_size,))
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError("LogDensity config element not found: ", elem)
if not isinstance(config[elem], elem_type):
msg = "LogDensity config element " + elem + " is not of type " + str(
elem_type)
raise TypeError(msg)
class NormalDistribution(LogDensity):
"""A univariate normal distribution with configurable scale and location.
num_dim should be 1 and config should include scalars "loc" and "scale"
"""
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
assert_trees_all_equal(sample_shape, (1,))
expected_members_types = [("loc", float),
("scale", float),
]
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Samples) -> Array:
output = norm.logpdf(x,
loc=self._config.loc,
scale=self._config.scale)[:, 0]
return output
class MultivariateNormalDistribution(LogDensity):
"""A normalized multivariate normal distribution.
Each element of the mean vector has the same value config.shared_mean
Each element of the diagonal covariance matrix has value config.diagonal_cov
"""
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
expected_members_types = [("shared_mean", float), ("diagonal_cov", float)]
assert len(sample_shape) == 1
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Array) -> Array:
num_dim = np.shape(x)[1]
mean = jnp.ones(num_dim) * self._config.shared_mean
cov = jnp.diag(jnp.ones(num_dim) * self._config.diagonal_cov)
output = multivariate_normal.logpdf(x,
mean=mean,
cov=cov)
return output
class FunnelDistribution(LogDensity):
"""The funnel distribution from https://arxiv.org/abs/physics/0009028.
num_dim should be 10. config is unused in this case.
"""
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
del config
assert_trees_all_equal(sample_shape, (10,))
def evaluate_log_density(self, x: Array) -> Array:
num_dim = self._sample_shape[0]
def unbatched(x):
v = x[0]
log_density_v = norm.logpdf(v,
loc=0.,
scale=3.)
variance_other = jnp.exp(v)
other_dim = num_dim - 1
cov_other = jnp.eye(other_dim) * variance_other
mean_other = jnp.zeros(other_dim)
log_density_other = multivariate_normal.logpdf(x[1:],
mean=mean_other,
cov=cov_other)
chex.assert_equal_shape([log_density_v, log_density_other])
return log_density_v + log_density_other
output = jax.vmap(unbatched)(x)
return output
class LogGaussianCoxPines(LogDensity):
"""Log Gaussian Cox process posterior in 2D for pine saplings data.
This follows Heng et al 2020 https://arxiv.org/abs/1708.08396 .
config.file_path should point to a csv file of num_points columns
and 2 rows containg the Finnish pines data.
config.use_whitened is a boolean specifying whether or not to use a
reparameterization in terms of the Cholesky decomposition of the prior.
See Section G.4 of https://arxiv.org/abs/2102.07501 for more detail.
The experiments in the paper have this set to False.
num_dim should be the square of the lattice sites per dimension.
So for a 40 x 40 grid num_dim should be 1600.
"""
def __init__(self,
config: ConfigDict,
sample_shape: SampleShape):
super().__init__(config, sample_shape)
# Discretization is as in Controlled Sequential Monte Carlo
# by Heng et al 2017 https://arxiv.org/abs/1708.08396
num_dim = sample_shape[0]
self._num_latents = num_dim
self._num_grid_per_dim = int(np.sqrt(num_dim))
bin_counts = jnp.array(
cp_utils.get_bin_counts(self.get_pines_points(config.file_path),
self._num_grid_per_dim))
self._flat_bin_counts = jnp.reshape(bin_counts, (self._num_latents))
# This normalizes by the number of elements in the grid
self._poisson_a = 1./self._num_latents
# Parameters for LGCP are as estimated in Moller et al, 1998
# "Log Gaussian Cox processes" and are also used in Heng et al.
self._signal_variance = 1.91
self._beta = 1./33
self._bin_vals = cp_utils.get_bin_vals(self._num_grid_per_dim)
def short_kernel_func(x, y):
return cp_utils.kernel_func(x, y, self._signal_variance,
self._num_grid_per_dim, self._beta)
self._gram_matrix = cp_utils.gram(short_kernel_func, self._bin_vals)
self._cholesky_gram = jnp.linalg.cholesky(self._gram_matrix)
self._white_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi)
half_log_det_gram = jnp.sum(jnp.log(jnp.abs(jnp.diag(self._cholesky_gram))))
self._unwhitened_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi) - half_log_det_gram
# The mean function is a constant with value mu_zero.
self._mu_zero = jnp.log(126.) - 0.5*self._signal_variance
if self._config.use_whitened:
self._posterior_log_density = self.whitened_posterior_log_density
else:
self._posterior_log_density = self.unwhitened_posterior_log_density
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
expected_members_types = [("use_whitened", bool)]
self._check_members_types(config, expected_members_types)
num_dim = sample_shape[0]
assert_trees_all_equal(sample_shape, (num_dim,))
num_grid_per_dim = int(np.sqrt(num_dim))
if num_grid_per_dim * num_grid_per_dim != num_dim:
msg = ("num_dim needs to be a square number for LogGaussianCoxPines "
"density.")
raise ValueError(msg)
if not config.file_path:
msg = "Please specify a path in config for the Finnish pines data csv."
raise ValueError(msg)
def get_pines_points(self, file_path):
"""Get the pines data points."""
with open(file_path, "rt") as input_file:
b = np.genfromtxt(input_file, delimiter=",")
return b
def whitened_posterior_log_density(self, white: Array) -> Array:
quadratic_term = -0.5 * jnp.sum(white**2)
prior_log_density = self._white_gaussian_log_normalizer + quadratic_term
latent_function = cp_utils.get_latents_from_white(white, self._mu_zero,
self._cholesky_gram)
log_likelihood = cp_utils.poisson_process_log_likelihood(
latent_function, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def unwhitened_posterior_log_density(self, latents: Array) -> Array:
white = cp_utils.get_white_from_latents(latents, self._mu_zero,
self._cholesky_gram)
prior_log_density = -0.5 * jnp.sum(
white * white) + self._unwhitened_gaussian_log_normalizer
log_likelihood = cp_utils.poisson_process_log_likelihood(
latents, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self._posterior_log_density)(x)
class ChallengingTwoDimensionalMixture(LogDensity):
"""A challenging mixture of Gaussians in two dimensions.
num_dim should be 2. config is unused in this case.
"""
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
del config
assert_trees_all_equal(sample_shape, (2,))
def raw_log_density(self, x: Array) -> Array:
"""A raw log density that we will then symmetrize."""
mean_a = jnp.array([3.0, 0.])
mean_b = jnp.array([-2.5, 0.])
mean_c = jnp.array([2.0, 3.0])
means = jnp.stack((mean_a, mean_b, mean_c), axis=0)
cov_a = jnp.array([[0.7, 0.], [0., 0.05]])
cov_b = jnp.array([[0.7, 0.], [0., 0.05]])
cov_c = jnp.array([[1.0, 0.95], [0.95, 1.0]])
covs = jnp.stack((cov_a, cov_b, cov_c), axis=0)
log_weights = jnp.log(jnp.array([1./3, 1./3., 1./3.]))
l = jnp.linalg.cholesky(covs)
y = slinalg.solve_triangular(l, x[None, :] - means, lower=True, trans=0)
mahalanobis_term = -1/2 * jnp.einsum("...i,...i->...", y, y)
n = means.shape[-1]
normalizing_term = -n / 2 * np.log(2 * np.pi) - jnp.log(
l.diagonal(axis1=-2, axis2=-1)).sum(axis=1)
individual_log_pdfs = mahalanobis_term + normalizing_term
mixture_weighted_pdfs = individual_log_pdfs + log_weights
return logsumexp(mixture_weighted_pdfs)
def make_2d_invariant(self, log_density, x: Array) -> Array:
density_a = log_density(x)
density_b = log_density(np.flip(x))
return jnp.logaddexp(density_a, density_b) - jnp.log(2)
def evaluate_log_density(self, x: Array) -> Array:
density_func = lambda x: self.make_2d_invariant(self.raw_log_density, x)
return jax.vmap(density_func)(x)
class AutoEncoderLikelihood(LogDensity):
"""Generative decoder log p(x,z| theta) as a function of latents z.
This evaluates log p(x,z| theta) = log p(x, z| theta ) + log p(z) for a VAE.
Here x is an binarized MNIST Image, z are real valued latents, theta denotes
the generator neural network parameters.
Since x is fixed and z is a random variable this is the log of an unnormalized
z density p(x, z | theta)
The normalizing constant is a marginal p(x | theta) = int p(x, z | theta) dz.
The normalized target density is the posterior over latents p(z|x, theta).
The likelihood uses a pretrained generator neural network.
It is contained in a pickle file specifed by config.params_filesname
A script producing such a pickle file can be found in train_vae.py
The resulting pretrained network used in the AFT paper
can be found at data/vae.pickle
The binarized MNIST test set image used is specfied by config.image_index
"""
def __init__(self, config: ConfigDict, sample_shape: SampleShape):
super().__init__(config, sample_shape)
self._num_dim = sample_shape[0]
self._vae_params = self._get_vae_params(config.params_filename)
test_batch_size = 1
test_ds = vae_lib.load_dataset(tfds.Split.TEST, test_batch_size)
for unused_index in range(self._config.image_index):
unused_batch = next(test_ds)
self._test_image = next(test_ds)["image"]
assert self._test_image.shape[0] == 1 # Batch size needs to be 1.
assert self._test_image.shape[1:] == vae_lib.MNIST_IMAGE_SHAPE
self.entropy_eval = hk.transform(self.cross_entropy_eval_func)
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
assert_trees_all_equal(sample_shape, (30,))
num_mnist_test = 10000
in_range = config.image_index >= 0 and config.image_index < num_mnist_test
if not in_range:
msg = "VAE image_index must be greater than or equal to zero "
msg += "and strictly less than "+str(num_mnist_test)+"."
raise ValueError(msg)
def _get_vae_params(self, ckpt_filename):
with open(ckpt_filename, "rb") as f:
vae_params = pickle.load(f)
return vae_params
def cross_entropy_eval_func(self, data: Array, latent: Array) -> Array:
"""Evaluate the binary cross entropy for given latent and data.
Needs to be called within a Haiku transform.
Args:
data: Array of shape (1, image_shape)
latent: Array of shape (num_latent_dim,)
Returns:
Array, value of binary cross entropy for single data point in question.
"""
chex.assert_rank(latent, 1)
chex.assert_rank(data, 4) # Shape should be (1, 28, 28, 1) hence rank 4.
vae = vae_lib.ConvVAE()
# New axis here required for batch size = 1 for VAE compatibility.
batch_latent = latent[None, :]
logits = vae.decoder(batch_latent)
chex.assert_equal_shape([logits, data])
return vae_lib.binary_cross_entropy_from_logits(logits, data)
def log_prior(self, latent: Array) -> Array:
"""Latent shape (num_dim,) -> standard multivariate log density."""
chex.assert_rank(latent, 1)
log_norm_gaussian = -0.5*self._num_dim * jnp.log(2.*jnp.pi)
data_term = - 0.5 * jnp.sum(jnp.square(latent))
return data_term + log_norm_gaussian
def total_log_probability(self, latent: Array) -> Array:
chex.assert_rank(latent, 1)
log_prior = self.log_prior(latent)
dummy_rng_key = 0
# Data point log likelihood is negative of loss for batch size of 1.
log_likelihood = -1. * self.entropy_eval.apply(
self._vae_params, dummy_rng_key, self._test_image, latent)
total_log_probability = log_prior + log_likelihood
return total_log_probability
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self.total_log_probability)(x)
def phi_four_log_density(x: Array,
mass_squared: Array,
bare_coupling: Array) -> Array:
"""Evaluate the phi_four_log_density.
Args:
x: Array of size (L_x, L_y)- values on 2D lattice.
mass_squared: Scalar representing bare mass squared.
bare_coupling: Scare representing bare coupling.
Returns:
Scalar corresponding to log_density.
"""
chex.assert_rank(x, 2)
chex.assert_rank(mass_squared, 0)
chex.assert_rank(bare_coupling, 0)
mass_term = mass_squared * jnp.sum(jnp.square(x))
quadratic_term = bare_coupling * jnp.sum(jnp.power(x, 4))
roll_x_plus = jnp.roll(x, shift=1, axis=0)
roll_x_minus = jnp.roll(x, shift=-1, axis=0)
roll_y_plus = jnp.roll(x, shift=1, axis=1)
roll_y_minus = jnp.roll(x, shift=-1, axis=1)
# D'alembertian operator acting on field phi.
dalembert_phi = 4.*x - roll_x_plus - roll_x_minus - roll_y_plus - roll_y_minus
kinetic_term = jnp.sum(x * dalembert_phi)
action_density = kinetic_term + mass_term + quadratic_term
return -action_density
class PhiFourTheory(LogDensity):
"""Log density for phi four field theory in two dimensions."""
def __init__(self,
config: ConfigDict,
sample_shape: SampleShape):
super().__init__(config, sample_shape)
self._num_grid_per_dim = int(np.sqrt(sample_shape[0]))
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
expected_members_types = [("mass_squared", float),
("bare_coupling", float)]
num_dim = sample_shape[0]
self._check_members_types(config, expected_members_types)
num_grid_per_dim = int(np.sqrt(num_dim))
if num_grid_per_dim * num_grid_per_dim != num_dim:
msg = ("num_dim needs to be a square number for PhiFourTheory "
"density.")
raise ValueError(msg)
def reshape_and_call(self, x: Array) -> Array:
return phi_four_log_density(jnp.reshape(x, (self._num_grid_per_dim,
self._num_grid_per_dim)),
self._config.mass_squared,
self._config.bare_coupling)
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self.reshape_and_call)(x)
class ManyWell(LogDensity):
"""Many well log density.
See:
Midgeley, Stimper et al. Flow Annealed Importance Sampling Bootstrap. 2022.
Wu et al. Stochastic Normalizing Flows. 2020.
"""
def __init__(self, config: ConfigDict, sample_shape: SampleShape):
super().__init__(config, sample_shape)
self._num_dim = sample_shape[0]
def _check_constructor_inputs(self, config: ConfigDict,
sample_shape: SampleShape):
num_dim = sample_shape[0]
if num_dim % 2 != 0:
msg = ("sample_shape[0] needs to be even.")
raise ValueError(msg)
def single_well_log_density(self, x) -> Array:
chex.assert_shape(x, (2,))
# Here we index from 0 instead of 1 which differs from the cited papers.
x_zero_term = (
-1.0 * jnp.power(x[0], 4) + 6.0 * jnp.power(x[0], 2) + 0.5 * x[0]
)
x_one_term = -0.5 * jnp.power(x[1], 2)
return x_zero_term + x_one_term
def many_well_log_density(self, x: Array) -> Array:
chex.assert_rank(x, 2)
per_group_log_densities = jax.vmap(self.single_well_log_density)(x)
return jnp.sum(per_group_log_densities)
def evaluate_log_density(self, x: Array) -> Array:
chex.assert_rank(x, 2)
(num_batch, num_dim) = x.shape
reshaped_x = jnp.reshape(x, (num_batch, num_dim//2, 2))
return jax.vmap(self.many_well_log_density)(reshaped_x)
|
annealed_flow_transport-master
|
annealed_flow_transport/densities.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared math functions for flow transport SMC algorithms."""
from typing import Any, Tuple, Union
from annealed_flow_transport import resampling
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
from jax.scipy.special import logsumexp
Array = tp.Array
FlowApply = tp.FlowApply
FlowParams = tp.FlowParams
LogDensityByStep = tp.LogDensityByStep
LogDensityNoStep = tp.LogDensityNoStep
MarkovKernelApply = tp.MarkovKernelApply
AcceptanceTuple = tp.AcceptanceTuple
RandomKey = tp.RandomKey
Samples = tp.Samples
assert_equal_shape = chex.assert_equal_shape
assert_trees_all_equal_shapes = chex.assert_trees_all_equal_shapes
class GeometricAnnealingSchedule(object):
"""Container computing a geometric annealing schedule between log densities."""
def __init__(self,
initial_log_density: LogDensityNoStep,
final_log_density: LogDensityNoStep,
num_temps: int):
self._initial_log_density = initial_log_density
self._final_log_density = final_log_density
self._num_temps = num_temps
def get_beta(self,
step):
final_step = self._num_temps-1
beta = step / final_step
return beta
def __call__(self,
step: int,
samples: Samples):
log_densities_final = self._final_log_density(samples)
log_densities_initial = self._initial_log_density(samples)
beta = self.get_beta(step)
interpolated_densities = (
1. - beta) * log_densities_initial + beta * log_densities_final
return interpolated_densities
def get_delta_no_flow(samples: Samples,
log_density: LogDensityByStep,
step: int) -> Array:
log_density_values_current = log_density(step, samples)
log_density_values_previous = log_density(step-1, samples)
assert_equal_shape([log_density_values_current, log_density_values_previous])
deltas = log_density_values_previous - log_density_values_current
return deltas
def get_delta(samples: Samples,
flow_apply: FlowApply,
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int) -> Array:
"""Get density difference between current target and push forward of previous.
Args:
samples: Array containing samples of shape (batch,) + sample_shape.
flow_apply: function that applies the flow.
flow_params: parameters of the flow.
log_density: function returning the log_density of a sample at given step.
step: current step.
Returns:
deltas: an array containing the difference for each sample.
"""
transformed_samples, log_det_jacs = flow_apply(flow_params, samples)
assert_trees_all_equal_shapes(transformed_samples, samples)
log_density_values_current = log_density(step, transformed_samples)
log_density_values_previous = log_density(step-1, samples)
assert_equal_shape([log_density_values_current, log_density_values_previous])
assert_equal_shape([log_density_values_previous, log_det_jacs])
deltas = log_density_values_previous - log_density_values_current - log_det_jacs
return deltas
def get_delta_path_grad(samples: Samples,
flow_apply: FlowApply,
inv_flow_apply: FlowApply,
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int) -> Array:
"""Like get_delta above but with gradient changed to use path estimator.
See https://arxiv.org/abs/1703.09194 and https://arxiv.org/abs/2207.08219
Args:
samples: Array containing samples of shape (batch,) + sample_shape.
flow_apply: function that applies the flow.
inv_flow_apply: function that applies the inverse flow.
flow_params: parameters of the flow.
log_density: function returning the log_density of a sample at given step.
step: current step.
Returns:
deltas: an array containing the difference for each sample.
"""
transformed_samples, _ = flow_apply(flow_params, samples)
assert_trees_all_equal_shapes(transformed_samples, samples)
log_density_target = log_density(step, transformed_samples)
def variational_density(params, input_samples):
initial_samples, log_det_jacs = inv_flow_apply(params, input_samples)
assert_trees_all_equal_shapes(initial_samples, input_samples)
log_density_base = log_density(step-1, initial_samples)
assert_equal_shape([log_density_base, log_det_jacs])
return log_density_base + log_det_jacs
log_density_q = variational_density(jax.lax.stop_gradient(flow_params),
transformed_samples)
assert_equal_shape([log_density_target, log_density_q])
return log_density_q - log_density_target
def get_batch_parallel_free_energy_increment(samples: Samples,
flow_apply: FlowApply,
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int) -> Array:
"""Get the log normalizer increments in case where there is no resampling.
Args:
samples: (num_batch, num_dim)
flow_apply: Apply the flow.
flow_params: Parameters of the flow.
log_density: Value of the log density.
step: Step of the algorithm.
Returns:
Scalar array containing the increments.
"""
deltas = get_delta(samples, flow_apply, flow_params, log_density, step)
chex.assert_rank(deltas, 1)
# The mean takes the average over the batch. This is equivalent to delaying
# the average until all temperatures have been accumulated.
return jnp.mean(deltas)
def transport_free_energy_estimator(samples: Samples,
log_weights: Array,
flow_apply: FlowApply,
inv_flow_apply: Union[FlowApply, Any],
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int,
use_path_gradient: bool) -> Array:
"""Compute an estimate of the free energy.
Args:
samples: Array representing samples (batch,) + sample_shape
log_weights: scalar representing sample weights (batch,)
flow_apply: function that applies the flow.
inv_flow_apply: function that applies the inverse flow or None.
flow_params: parameters of the flow.
log_density: function returning the log_density of a sample at given step.
step: current step
use_path_gradient: Whether or not to modify gradients to use path estimator.
Returns:
Estimate of the free_energy.
"""
if not use_path_gradient:
deltas = get_delta(samples,
flow_apply,
flow_params,
log_density,
step)
else:
deltas = get_delta_path_grad(samples,
flow_apply,
inv_flow_apply,
flow_params,
log_density,
step)
assert_equal_shape([deltas, log_weights])
return jnp.sum(jax.nn.softmax(log_weights) * deltas)
def get_log_normalizer_increment_no_flow(deltas: Array,
log_weights: Array) -> Array:
assert_equal_shape([deltas, log_weights])
normalized_log_weights = jax.nn.log_softmax(log_weights)
total_terms = normalized_log_weights - deltas
assert_equal_shape([normalized_log_weights, log_weights, total_terms])
increment = logsumexp(total_terms)
return increment
def get_log_normalizer_increment(samples: Samples,
log_weights: Array,
flow_apply: FlowApply,
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int) -> Array:
"""Get the increment in the log of the normalizing constant estimate.
Args:
samples: Array representing samples (batch,) + sample_shape
log_weights: scalar representing sample weights (batch,)
flow_apply: function that applies the flow.
flow_params: parameters of the flow.
log_density: function returning the log_density of a sample at given step.
step: current step
Returns:
Scalar Array, logarithm of normalizing constant increment.
"""
deltas = get_delta(samples,
flow_apply,
flow_params,
log_density,
step)
increment = get_log_normalizer_increment_no_flow(deltas, log_weights)
return increment
def reweight_no_flow(log_weights_old: Array,
deltas: Array) -> Array:
log_weights_new_unorm = log_weights_old - deltas
log_weights_new = jax.nn.log_softmax(log_weights_new_unorm)
return log_weights_new
def reweight(log_weights_old: Array,
samples: Samples,
flow_apply: FlowApply,
flow_params: FlowParams,
log_density: LogDensityByStep,
step: int) -> Array:
"""Compute the new weights from the old ones and the deltas.
Args:
log_weights_old: scalar representing previous sample weights (batch,)
samples: Array representing samples (batch,) + sample_shape
flow_apply: function that applies the flow.
flow_params: parameters of the flow.
log_density: function returning the log_density of a sample at given step.
step: current step
Returns:
logarithm of new weights.
"""
deltas = get_delta(samples,
flow_apply,
flow_params,
log_density,
step)
log_weights_new = reweight_no_flow(log_weights_old, deltas)
return log_weights_new
def update_samples_log_weights(
flow_apply: FlowApply, markov_kernel_apply: MarkovKernelApply,
flow_params: FlowParams, samples: Samples, log_weights: Array,
key: RandomKey, log_density: LogDensityByStep, step: int,
use_resampling: bool, use_markov: bool,
resample_threshold: float) -> Tuple[Array, Array, AcceptanceTuple]:
"""Update samples and log weights once the flow has been learnt."""
transformed_samples, _ = flow_apply(flow_params, samples)
assert_trees_all_equal_shapes(transformed_samples, samples)
log_weights_new = reweight(log_weights, samples, flow_apply, flow_params,
log_density, step)
assert_equal_shape([log_weights_new, log_weights])
if use_resampling:
subkey, key = jax.random.split(key)
resampled_samples, log_weights_resampled = resampling.optionally_resample(
subkey, log_weights_new, transformed_samples, resample_threshold)
assert_trees_all_equal_shapes(resampled_samples, transformed_samples)
assert_equal_shape([log_weights_resampled, log_weights_new])
else:
resampled_samples = transformed_samples
log_weights_resampled = log_weights_new
if use_markov:
markov_samples, acceptance_tuple = markov_kernel_apply(
step, key, resampled_samples)
else:
markov_samples = resampled_samples
acceptance_tuple = (1., 1., 1.)
return markov_samples, log_weights_resampled, acceptance_tuple
|
annealed_flow_transport-master
|
annealed_flow_transport/flow_transport.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.flows."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport import flows
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
ConfigDict = ml_collections.ConfigDict
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class DiagonalAffineTest(parameterized.TestCase):
def test_identity_init(self):
# Config dict is unused here so pass None.
flow_config = ConfigDict()
flow_config.sample_shape = (3,)
def compute_flow(x_loc):
diagonal_affine_flow = flows.DiagonalAffine(flow_config)
return diagonal_affine_flow.transform_and_log_abs_det_jac(x_loc)
x_in = jnp.array([1., 2., 3.])
flow_func = hk.without_apply_rng(hk.transform(compute_flow))
dummy_key = jax.random.PRNGKey(13)
init_params = flow_func.init(dummy_key, x_in)
x_out, log_det_abs_jac = flow_func.apply(init_params, x_in)
_assert_equal_vec(self, x_out, x_in)
_assert_equal_vec(self, log_det_abs_jac, 0.)
# Now test the inverse.
def compute_inverse_flow(x_loc):
diagonal_affine_flow = flows.DiagonalAffine(flow_config)
return diagonal_affine_flow.inv_transform_and_log_abs_det_jac(x_loc)
inv_flow_func = hk.without_apply_rng(hk.transform(compute_inverse_flow))
x_inv, log_det_abs_jac_inv = inv_flow_func.apply(init_params, x_in)
_assert_equal_vec(self, x_in, x_inv)
_assert_equal_vec(self, log_det_abs_jac_inv, 0.)
def test_non_identity(self):
flow_config = ConfigDict()
flow_config.sample_shape = (3,)
# Config dict is unused here so pass None.
def compute_flow(x_loc):
diagonal_affine_flow = flows.DiagonalAffine(flow_config)
return diagonal_affine_flow.transform_and_log_abs_det_jac(x_loc)
x_in = jnp.array([1., 2., 3.])
flow_func = hk.without_apply_rng(hk.transform(compute_flow))
dummy_key = jax.random.PRNGKey(13)
init_params = flow_func.init(dummy_key, x_in)
shift = -0.3
num_dim = 3
new_params = jax.tree_map(lambda x: x+shift, init_params)
x_out, log_det_abs_jac = flow_func.apply(new_params, x_in)
validation_x_out = x_in * jnp.exp(shift) + shift
_assert_equal_vec(self, validation_x_out, x_out)
_assert_equal_vec(self, log_det_abs_jac, num_dim*shift)
def short_func(x_loc):
return flow_func.apply(new_params, x_loc)[0]
numerical_jacobian = jax.jacobian(short_func)(x_in)
numerical_log_abs_det = jnp.linalg.slogdet(numerical_jacobian)[1]
_assert_equal_vec(self, log_det_abs_jac, numerical_log_abs_det)
# Now test the inverse.
def compute_inverse_flow(x_loc):
diagonal_affine_flow = flows.DiagonalAffine(flow_config)
return diagonal_affine_flow.inv_transform_and_log_abs_det_jac(x_loc)
inv_flow_func = hk.without_apply_rng(hk.transform(compute_inverse_flow))
x_inv, log_det_abs_jac_inv = inv_flow_func.apply(new_params, x_out)
_assert_equal_vec(self, x_in, x_inv)
_assert_equal_vec(self, log_det_abs_jac, -1. * log_det_abs_jac_inv)
class SplinesTest(parameterized.TestCase):
def _get_non_identity_monotone_spline(self):
bin_positions = jnp.linspace(-3., 3., 10)
bin_heights = jax.nn.softplus(bin_positions)
derivatives = jax.nn.sigmoid(bin_positions)
def spline_func(x):
return flows.rational_quadratic_spline(x,
bin_positions,
bin_heights,
derivatives)
return spline_func
def test_identity(self):
bin_positions = jnp.linspace(-3., 3., 10)
bin_heights = bin_positions
derivatives = jnp.ones_like(bin_heights)
x = jnp.array(1.77)
output, output_deriv = flows.rational_quadratic_spline(x,
bin_positions,
bin_heights,
derivatives)
_assert_equal_vec(self, x, output)
_assert_equal_vec(self, output_deriv, 1.)
def test_jacobian(self):
# Test jacobian against numerical value for non-identity transformation.
x = jnp.array(1.77)
spline_func = self._get_non_identity_monotone_spline()
_, output_deriv = spline_func(x)
curry = lambda input: spline_func(input)[0]
grad_func = jax.grad(curry)
grad_val = grad_func(x)
_assert_equal_vec(self, grad_val, output_deriv)
def test_monotonic(self):
# Test function is monotonic and has positive deriviatives.
x = jnp.linspace(-2.7, 2.7, 17)
spline_func_bat = jax.vmap(self._get_non_identity_monotone_spline())
spline_vals, spline_derivs = spline_func_bat(x)
self.assertTrue((jnp.diff(spline_vals) > 0.).all())
self.assertTrue((jnp.diff(spline_derivs) > 0.).all())
class AutoregressiveMLPTest(parameterized.TestCase):
def _get_transformed(self, zero_init):
def forward(x):
mlp = flows.AutoregressiveMLP([3, 1],
False,
jax.nn.leaky_relu,
zero_init,
True)
return mlp(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
return forward_fn
def test_zero_init(self):
forward_fn = self._get_transformed(True)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
output = forward_fn.apply(params, x)
_assert_equal_vec(self, output, jnp.zeros_like(output))
def test_autoregressive(self):
forward_fn = self._get_transformed(False)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
curry = lambda u: forward_fn.apply(params, u)[:, 0]
jacobian = jax.jacobian(curry)(x)
lower_triangle = jnp.tril(jacobian, k=0)
_assert_equal_vec(self, lower_triangle, jnp.zeros_like(lower_triangle))
class SplineInverseAutoregressiveFlowTest(parameterized.TestCase):
def _get_config(self, identity_init):
flow_config = ConfigDict()
flow_config.num_spline_bins = 10
flow_config.intermediate_hids_per_dim = 30
flow_config.num_layers = 3
flow_config.identity_init = identity_init
flow_config.lower_lim = -4.
flow_config.upper_lim = 4.
flow_config.min_bin_size = 1e-4
flow_config.min_derivative = 1e-4
flow_config.bias_last = True
return flow_config
def _get_transformed(self, identity_init):
def forward(x):
config = self._get_config(identity_init)
flow = flows.SplineInverseAutoregressiveFlow(config)
return flow.transform_and_log_abs_det_jac(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
return forward_fn
def test_identity_init(self):
forward_fn = self._get_transformed(True)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
output, log_det_jac = forward_fn.apply(params, x)
_assert_equal_vec(self, output, x)
_assert_equal_vec(self, 0., log_det_jac, atol=1e-6)
def test_jacobian(self):
# Compare the numerical Jacobian with computed value.
forward_fn = self._get_transformed(False)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
curry_val = lambda x: forward_fn.apply(params, x)[0]
curry_jac = lambda x: forward_fn.apply(params, x)[1]
jac_func = jax.jacobian(curry_val)
jac = jac_func(x)
target_log_det_jac = jnp.sum(jnp.log(jnp.abs(jnp.diag(jac))))
test_log_det_jac = curry_jac(x)
_assert_equal_vec(self, target_log_det_jac, test_log_det_jac)
lower_triangle = jnp.tril(jac, k=-1)
_assert_equal_vec(self, lower_triangle, jnp.zeros_like(lower_triangle))
class AffineInverseAutoregressiveFlowTest(parameterized.TestCase):
def _get_config(self, identity_init):
flow_config = ConfigDict()
flow_config.intermediate_hids_per_dim = 30
flow_config.num_layers = 3
flow_config.identity_init = identity_init
flow_config.bias_last = True
return flow_config
def _get_transformed(self, identity_init):
def forward(x):
config = self._get_config(identity_init)
flow = flows.AffineInverseAutoregressiveFlow(config)
return flow.transform_and_log_abs_det_jac(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
return forward_fn
def test_identity_init(self):
forward_fn = self._get_transformed(True)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
output, log_det_jac = forward_fn.apply(params, x)
_assert_equal_vec(self, output, x)
_assert_equal_vec(self, 0., log_det_jac, atol=1e-6)
def test_jacobian(self):
# Compare the numerical Jacobian with computed value.
forward_fn = self._get_transformed(False)
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
curry_val = lambda x: forward_fn.apply(params, x)[0]
curry_jac = lambda x: forward_fn.apply(params, x)[1]
jac_func = jax.jacobian(curry_val)
jac = jac_func(x)
target_log_det_jac = jnp.sum(jnp.log(jnp.abs(jnp.diag(jac))))
test_log_det_jac = curry_jac(x)
_assert_equal_vec(self, target_log_det_jac, test_log_det_jac)
lower_triangle = jnp.tril(jac, k=-1)
_assert_equal_vec(self, lower_triangle, jnp.zeros_like(lower_triangle))
class RationalQuadraticSplineFlowTest(parameterized.TestCase):
"""This just tests that the flow constructs and gives right shape.
The math functions are separately tested by SplinesTest.
"""
def _get_config(self):
flow_config = ConfigDict()
flow_config.num_bins = 5
flow_config.lower_lim = -3.
flow_config.upper_lim = 3.
flow_config.min_bin_size = 1e-2
flow_config.min_derivative = 1e-2
return flow_config
def test_shape(self):
def forward(x):
config = self._get_config()
flow = flows.RationalQuadraticSpline(config)
return flow.transform_and_log_abs_det_jac(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
x = jnp.array([1., 2., 3.])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
output, log_det_jac = forward_fn.apply(params, x)
self.assertEqual(x.shape, output.shape)
self.assertEqual(log_det_jac.shape, ())
class ComposedFlowsTest(parameterized.TestCase):
def _get_individual_config(self, is_identity: bool):
flow_config = ConfigDict()
flow_config.type = 'SplineInverseAutoregressiveFlow'
flow_config.num_spline_bins = 10
flow_config.intermediate_hids_per_dim = 30
flow_config.num_layers = 3
flow_config.identity_init = is_identity
flow_config.lower_lim = -4.
flow_config.upper_lim = 4.
flow_config.min_bin_size = 1e-4
flow_config.min_derivative = 1e-4
flow_config.bias_last = True
return flow_config
def _get_overall_config(self, is_identity: bool):
flow_config = ConfigDict()
# A flow based on two flows composed.
flow_config.flow_configs = [self._get_individual_config(is_identity)] * 2
return flow_config
def _get_transformed(self, is_identity):
def forward(x):
config = self._get_overall_config(is_identity=is_identity)
flow = flows.ComposedFlows(config)
return flow(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
return forward_fn
def test_identity(self):
# Test that two identity flows composed gives an identity flow.
forward_fn = self._get_transformed(is_identity=True)
x = jnp.array([[1., 2., 3.]])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
output, log_det_jac = forward_fn.apply(params, x)
_assert_equal_vec(self, x, output, atol=1e-6)
_assert_equal_vec(self, log_det_jac, jnp.array([0.]), atol=1e-6)
def test_jacobian(self):
# Test the numerical Jacobian of the composition of two non-identity flows.
forward_fn = self._get_transformed(is_identity=False)
x = jnp.array([[1., 2., 3.]])
key = jax.random.PRNGKey(13)
params = forward_fn.init(key,
x)
curry_val = lambda x: forward_fn.apply(params, x[None])[0][0, :]
curry_jac = lambda x: forward_fn.apply(params, x)[1][0]
jac_func = jax.jacobian(curry_val)
jac = jac_func(x[0])
print(jac)
target_log_det_jac = jnp.linalg.slogdet(jac)[1]
test_log_det_jac = curry_jac(x)
_assert_equal_vec(self, target_log_det_jac, test_log_det_jac, atol=1e-6)
class CheckerBoardMaskTest(parameterized.TestCase):
def test_checkerboard(self):
target_a = jnp.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
target_b = jnp.array([[1, 0, 1], [0, 1, 0]])
test_a = flows.get_checkerboard_mask((3, 3), 0)
test_b = flows.get_checkerboard_mask((2, 3), 1)
_assert_equal_vec(self, target_a, test_a)
_assert_equal_vec(self, target_b, test_b)
class TestFullyConvolutionalNetwork(parameterized.TestCase):
def test_net(self):
num_middle_channels = 3
num_middle_layers = 2
num_final_channels = 2
image_shape = (7, 9)
kernel_shape = (4, 3)
def forward(x):
net = flows.FullyConvolutionalNetwork(
num_middle_channels=num_middle_channels,
num_middle_layers=num_middle_layers,
num_final_channels=num_final_channels,
kernel_shape=kernel_shape,
zero_final=True)
return net(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
key = jax.random.PRNGKey(1)
subkey, key = jax.random.split(key)
random_input = jax.random.normal(subkey, image_shape)
params = forward_fn.init(key, random_input)
output = forward_fn.apply(params, random_input)
self.assertEqual(output.shape, image_shape+(2,))
_assert_equal_vec(self, output, jnp.zeros_like(output))
def test_translation_symmetry(self):
num_middle_channels = 3
num_middle_layers = 2
num_final_channels = 2
image_shape = (7, 9)
kernel_shape = (3, 3)
def forward(x):
net = flows.FullyConvolutionalNetwork(
num_middle_channels=num_middle_channels,
num_middle_layers=num_middle_layers,
num_final_channels=num_final_channels,
kernel_shape=kernel_shape,
zero_final=False,
is_torus=True)
return net(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
key = jax.random.PRNGKey(1)
subkey, key = jax.random.split(key)
random_input = jax.random.normal(subkey, image_shape)
params = forward_fn.init(key, random_input)
output = forward_fn.apply(params, random_input)
def roll_array(array_in):
return jnp.roll(array_in,
shift=(2, 2),
axis=(0, 1))
translated_output = forward_fn.apply(params,
roll_array(random_input))
_assert_equal_vec(self,
translated_output,
roll_array(output))
class TestConvAffineCoupling(parameterized.TestCase):
def test_identity_init(self):
image_shape = (3, 3)
kernel_shape = (2, 2)
num_middle_channels = 3
num_middle_layers = 3
mask = jnp.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
def forward(x):
flow = flows.ConvAffineCoupling(
mask=mask,
conv_num_middle_channels=num_middle_channels,
conv_num_middle_layers=num_middle_layers,
conv_kernel_shape=kernel_shape,
identity_init=True)
return flow(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
key = jax.random.PRNGKey(1)
subkey, key = jax.random.split(key)
random_input = jax.random.normal(subkey, shape=image_shape)
params = forward_fn.init(key, random_input)
output, log_det_jac = forward_fn.apply(params, random_input)
_assert_equal_vec(self, output, random_input)
_assert_equal_vec(self, log_det_jac, 0.)
def test_jacobian(self):
num_middle_channels = 3
num_middle_layers = 5
image_shape = (3, 2)
kernel_shape = (3, 3)
mask = jnp.array([[1, 1], [1, 0], [0, 0]])
def forward(x):
flow = flows.ConvAffineCoupling(
mask=mask,
conv_num_middle_channels=num_middle_channels,
conv_num_middle_layers=num_middle_layers,
conv_kernel_shape=kernel_shape,
identity_init=False)
return flow(x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
key = jax.random.PRNGKey(2)
subkey, key = jax.random.split(key)
random_input = jax.random.normal(subkey, shape=image_shape)
params = forward_fn.init(key, random_input)
apply = jax.jit(forward_fn.apply)
curry_val = lambda x: apply(params, x)[0].reshape((6,))
curry_jac = lambda x: apply(params, x)[1]
jac_func = jax.jit(jax.jacobian(curry_val))
jac = jac_func(random_input).reshape((6, 6))
print('NVP Jacobian \n', jac)
target_log_det_jac = jnp.sum(jnp.log(jnp.abs(jnp.diag(jac))))
test_log_det_jac = curry_jac(random_input)
_assert_equal_vec(self, target_log_det_jac, test_log_det_jac)
upper_triangle = jnp.triu(jac, k=1)
_assert_equal_vec(self, upper_triangle, jnp.zeros_like(upper_triangle))
def test_inverse(self):
num_middle_channels = 3
num_middle_layers = 5
image_shape = (3, 2)
kernel_shape = (3, 3)
mask = jnp.array([[1, 1], [1, 0], [0, 0]])
def forward_and_inverse(x):
flow = flows.ConvAffineCoupling(
mask=mask,
conv_num_middle_channels=num_middle_channels,
conv_num_middle_layers=num_middle_layers,
conv_kernel_shape=kernel_shape,
identity_init=False)
y, fw_ldj = flow(x)
recons_x, bw_ldj = flow.inverse(y)
return y, fw_ldj, recons_x, bw_ldj
forward_fn = hk.without_apply_rng(hk.transform(forward_and_inverse))
key = jax.random.PRNGKey(2)
subkey, key = jax.random.split(key)
random_input = jax.random.normal(subkey, shape=image_shape)
params = forward_fn.init(key, random_input)
unused_y, fw_ldj, recons_x, bw_ldj = forward_fn.apply(params, random_input)
_assert_equal_vec(self, recons_x, random_input)
_assert_equal_vec(self, fw_ldj, -1.*bw_ldj)
class TestHaikuParameterShapes(parameterized.TestCase):
def test_diagonal_affine(self):
flow_config = ConfigDict()
flow_config.sample_shape = (2,)
num_dim = 2
num_batch = 3
def run_flow(x):
flow = flows.DiagonalAffine(config=flow_config)
return flow(x)
x_in = jnp.zeros((num_batch, num_dim))
forward_fn = hk.without_apply_rng(hk.transform(run_flow))
key = jax.random.PRNGKey(1)
params = forward_fn.init(key, x_in)
bias_shape = params['diagonal_affine']['bias'].shape
unconst_diag_shape = params['diagonal_affine']['unconst_diag'].shape
self.assertEqual(bias_shape, (num_dim,))
self.assertEqual(unconst_diag_shape, (num_dim,))
x_out, log_abs_det = forward_fn.apply(params, x_in)
self.assertEqual(x_out.shape, x_in.shape)
self.assertEqual(log_abs_det.shape, (num_batch,))
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/flows_test.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Continual Repeated Annealed Flow Transport (CRAFT) Monte Carlo algorithm."""
import time
from typing import Any, Tuple, Union
from absl import logging
from annealed_flow_transport import flow_transport
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import optax
Array = tp.Array
Samples = tp.Samples
OptState = tp.OptState
UpdateFn = tp.UpdateFn
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
SamplesTuple = tp.SamplesTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
MarkovKernelApply = tp.MarkovKernelApply
LogDensityByStep = tp.LogDensityByStep
AcceptanceTuple = tp.AcceptanceTuple
LogWeightsTuple = tp.LogWeightsTuple
AlgoResultsTuple = tp.AlgoResultsTuple
ParticleState = tp.ParticleState
def inner_step_craft(
key: RandomKey, free_energy_and_grad: FreeEnergyAndGrad,
flow_params: FlowParams,
flow_apply: FlowApply, markov_kernel_apply: MarkovKernelApply,
samples: Array, log_weights: Array, log_density: LogDensityByStep,
step: int, config
) -> Tuple[FlowParams, Array, Array, Samples, Array, AcceptanceTuple]:
"""A temperature step of CRAFT.
Args:
key: A JAX random key.
free_energy_and_grad: function giving estimate of free energy and gradient.
flow_params: parameters of the flow.
flow_apply: function that applies the flow.
markov_kernel_apply: function that applies the Markov transition kernel.
samples: input samples.
log_weights: Array containing train/validation/test log_weights.
log_density: function returning the log_density of a sample at given step.
step: int giving current step of algorithm.
config: experiment configuration.
Returns:
flow_grads: Gradient with respect to parameters of flow.
vfe: Value of the objective for this temperature.
log_normalizer_increment: Scalar log of normalizing constant increment.
next_samples: samples after temperature step has been performed.
new_log_weights: log_weights after temperature step has been performed.
acceptance_tuple: Acceptance rate of the Markov kernels used.
"""
vfe, flow_grads = free_energy_and_grad(flow_params,
samples,
log_weights,
step)
log_normalizer_increment = flow_transport.get_log_normalizer_increment(
samples, log_weights, flow_apply, flow_params, log_density, step)
next_samples, next_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply, markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params, samples=samples, log_weights=log_weights,
key=key, log_density=log_density, step=step,
use_resampling=config.use_resampling, use_markov=config.use_markov,
resample_threshold=config.resample_threshold)
return flow_grads, vfe, log_normalizer_increment, next_samples, next_log_weights, acceptance_tuple
def inner_loop_craft(key: RandomKey, free_energy_and_grad: FreeEnergyAndGrad,
opt_update: UpdateFn, opt_states: OptState,
transition_params: FlowParams, flow_apply: FlowApply,
markov_kernel_apply: MarkovKernelApply,
initial_sampler: InitialSampler,
log_density: LogDensityByStep, config,
axis_name=None):
"""Inner loop of CRAFT training.
Uses Scan step requiring trees that have the same structure as the base input
but with each leaf extended with an extra array index of size num_transitions.
We call this an extended tree.
Args:
key: A JAX random key.
free_energy_and_grad: function giving estimate of free energy and gradient.
opt_update: function that updates the state of flow based on gradients etc.
opt_states: Extended tree of optimizer states.
transition_params: Extended tree of flow parameters.
flow_apply: function that applies the flow.
markov_kernel_apply: function that applies the Markov transition kernel.
initial_sampler: A function that produces the initial samples.
log_density: A function evaluating the log density for each step.
config: A ConfigDict containing the configuration.
axis_name: None or string for gradient sync when using pmap only.
Returns:
final_samples: final samples.
final_log_weights: Array of final log_weights.
final_transition_params: Extended tree of updated flow params.
final_opt_states: Extended tree of updated optimizer parameters.
overall_free_energy: Total variational free energy.
log_normalizer_estimate: Estimate of the log normalizers.
"""
subkey, key = jax.random.split(key)
initial_samples = initial_sampler(subkey, config.craft_batch_size,
config.sample_shape)
initial_log_weights = -jnp.log(config.craft_batch_size) * jnp.ones(
config.craft_batch_size)
def scan_step(passed_state, per_step_input):
samples, log_weights = passed_state
flow_params, key, inner_step = per_step_input
flow_grads, vfe, log_normalizer_increment, next_samples, next_log_weights, acceptance_tuple = inner_step_craft(
key=key,
free_energy_and_grad=free_energy_and_grad,
flow_params=flow_params,
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_apply,
samples=samples,
log_weights=log_weights,
log_density=log_density,
step=inner_step,
config=config)
next_passed_state = (next_samples, next_log_weights)
per_step_output = (flow_grads, vfe,
log_normalizer_increment, acceptance_tuple)
return next_passed_state, per_step_output
initial_state = (initial_samples, initial_log_weights)
inner_steps = jnp.arange(1, config.num_temps)
keys = jax.random.split(key, config.num_temps-1)
per_step_inputs = (transition_params, keys, inner_steps)
final_state, per_step_outputs = jax.lax.scan(scan_step, initial_state,
per_step_inputs)
final_samples, final_log_weights = final_state
flow_grads, free_energies, log_normalizer_increments, unused_acceptance_tuples = per_step_outputs
if axis_name:
flow_grads = jax.lax.pmean(flow_grads, axis_name=axis_name)
def per_step_update(input_tuple):
(step_grad, step_opt, step_params) = input_tuple
step_updates, new_opt_state = opt_update(step_grad,
step_opt)
new_step_params = optax.apply_updates(step_params,
step_updates)
return new_step_params, new_opt_state
final_transition_params, final_opt_states = jax.lax.map(
per_step_update, (flow_grads, opt_states, transition_params))
overall_free_energy = jnp.sum(free_energies)
log_normalizer_estimate = jnp.sum(log_normalizer_increments)
return final_samples, final_log_weights, final_transition_params, final_opt_states, overall_free_energy, log_normalizer_estimate
def craft_evaluation_loop(key: RandomKey, transition_params: FlowParams,
flow_apply: FlowApply,
markov_kernel_apply: MarkovKernelApply,
initial_sampler: InitialSampler,
log_density: LogDensityByStep,
config) -> ParticleState:
"""A single pass of CRAFT with fixed flows.
Uses Scan step requiring trees that have the same structure as the base input
but with each leaf extended with an extra array index of size num_transitions.
We call this an extended tree.
Args:
key: A JAX random key.
transition_params: Extended tree of flow parameters.
flow_apply: function that applies the flow.
markov_kernel_apply: function that applies the Markov transition kernel.
initial_sampler: A function that produces the initial samples.
log_density: A function evaluating the log density for each step.
config: A ConfigDict containing the configuration.
Returns:
ParticleState containing samples, log_weights, log_normalizer_estimate.
"""
subkey, key = jax.random.split(key)
initial_samples = initial_sampler(subkey, config.craft_batch_size,
config.sample_shape)
initial_log_weights = -jnp.log(config.craft_batch_size) * jnp.ones(
config.craft_batch_size)
def scan_step(passed_state, per_step_input):
samples, log_weights = passed_state
flow_params, key, inner_step = per_step_input
log_normalizer_increment = flow_transport.get_log_normalizer_increment(
samples, log_weights, flow_apply, flow_params, log_density, inner_step)
next_samples, next_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply, markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params, samples=samples, log_weights=log_weights,
key=key, log_density=log_density, step=inner_step,
use_resampling=config.use_resampling, use_markov=config.use_markov,
resample_threshold=config.resample_threshold)
next_passed_state = (next_samples, next_log_weights)
per_step_output = (log_normalizer_increment, acceptance_tuple)
return next_passed_state, per_step_output
initial_state = (initial_samples, initial_log_weights)
inner_steps = jnp.arange(1, config.num_temps)
keys = jax.random.split(key, config.num_temps-1)
per_step_inputs = (transition_params, keys, inner_steps)
final_state, per_step_outputs = jax.lax.scan(scan_step, initial_state,
per_step_inputs)
final_samples, final_log_weights = final_state
log_normalizer_increments, unused_acceptance_tuples = per_step_outputs
log_normalizer_estimate = jnp.sum(log_normalizer_increments)
particle_state = ParticleState(
samples=final_samples,
log_weights=final_log_weights,
log_normalizer_estimate=log_normalizer_estimate)
return particle_state
def outer_loop_craft(opt_update: UpdateFn,
opt_init_state: OptState,
flow_init_params: FlowParams,
flow_apply: FlowApply,
flow_inv_apply: Union[FlowApply, Any],
density_by_step: LogDensityByStep,
markov_kernel_by_step: MarkovKernelApply,
initial_sampler: InitialSampler,
key: RandomKey,
config,
log_step_output,
save_checkpoint) -> AlgoResultsTuple:
"""Outer loop for CRAFT training.
Args:
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial state of the flow.
flow_apply: function that applies the flow.
flow_inv_apply: function that applies the inverse flow or None.
density_by_step: The log density for different annealing temperatures.
markov_kernel_by_step: Markov kernel for different annealing temperatures.
initial_sampler: A function that produces the initial samples.
key: A Jax random key.
config: A ConfigDict containing the configuration.
log_step_output: Callable that logs the step output.
save_checkpoint: None or function that takes params and saves them.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
def free_energy_short(flow_params: FlowParams,
samples: Samples,
log_weights: Array,
step: int) -> Array:
return flow_transport.transport_free_energy_estimator(
samples, log_weights, flow_apply, flow_inv_apply, flow_params,
density_by_step, step, config.use_path_gradient)
free_energy_and_grad = jax.value_and_grad(free_energy_short)
def short_inner_loop(rng_key: RandomKey,
curr_opt_states: OptState,
curr_transition_params):
return inner_loop_craft(key=rng_key,
free_energy_and_grad=free_energy_and_grad,
opt_update=opt_update,
opt_states=curr_opt_states,
transition_params=curr_transition_params,
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_by_step,
initial_sampler=initial_sampler,
log_density=density_by_step,
config=config)
inner_loop_jit = jax.jit(short_inner_loop)
repeater = lambda x: jnp.repeat(x[None], num_temps-1, axis=0)
opt_states = jax.tree_util.tree_map(repeater, opt_init_state)
transition_params = jax.tree_util.tree_map(repeater, flow_init_params)
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
inner_loop_jit(key, opt_states, transition_params)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
start_time = time.time()
for step in range(config.craft_num_iters):
with jax.profiler.StepTraceAnnotation('train', step_num=step):
key, subkey = jax.random.split(key)
final_samples, final_log_weights, transition_params, opt_states, overall_free_energy, log_normalizer_estimate = inner_loop_jit(
subkey, opt_states, transition_params)
if step % config.report_step == 0:
if log_step_output is not None:
delta_time = time.time()-start_time
log_step_output(step=step,
training_objective=overall_free_energy,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
samples=final_samples,
log_weights=final_log_weights)
logging.info(
'Step %05d: Free energy %f Log Normalizer estimate %f',
step, overall_free_energy, log_normalizer_estimate
)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
if save_checkpoint:
save_checkpoint(transition_params)
results = AlgoResultsTuple(
test_samples=final_samples,
test_log_weights=final_log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/craft.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to resampling of weighted samples."""
from typing import Tuple
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
Array = tp.Array
RandomKey = tp.RandomKey
Samples = tp.Samples
assert_trees_all_equal_shapes = chex.assert_trees_all_equal_shapes
def log_effective_sample_size(log_weights: Array) -> Array:
"""Numerically stable computation of log of effective sample size.
ESS := (sum_i weight_i)^2 / (sum_i weight_i^2) and so working in terms of logs
log ESS = 2 log sum_i (log exp log weight_i) - log sum_i (exp 2 log weight_i )
Args:
log_weights: Array of shape (num_batch). log of normalized weights.
Returns:
Scalar log ESS.
"""
chex.assert_rank(log_weights, 1)
first_term = 2.*jax.scipy.special.logsumexp(log_weights)
second_term = jax.scipy.special.logsumexp(2.*log_weights)
chex.assert_equal_shape([first_term, second_term])
return first_term-second_term
def simple_resampling(key: RandomKey, log_weights: Array,
samples: Array) -> Tuple[Array, Array]:
"""Simple resampling of log_weights and samples pair.
Randomly select possible samples with replacement proportionally to
softmax(log_weights).
Args:
key: A Jax Random Key.
log_weights: An array of size (num_batch,) containing the log weights.
samples: An array of size (num_batch, num_dim) containing the samples.å
Returns:
New samples of shape (num_batch, num_dim) and weights of shape (num_batch,)
"""
chex.assert_rank(log_weights, 1)
num_batch = log_weights.shape[0]
indices = jax.random.categorical(key, log_weights,
shape=(num_batch,))
take_lambda = lambda x: jnp.take(x, indices, axis=0)
resamples = jax.tree_util.tree_map(take_lambda, samples)
log_weights_new = -jnp.log(log_weights.shape[0])*jnp.ones_like(log_weights)
chex.assert_equal_shape([log_weights, log_weights_new])
assert_trees_all_equal_shapes(resamples, samples)
return resamples, log_weights_new
def optionally_resample(key: RandomKey, log_weights: Array, samples: Samples,
resample_threshold: Array) -> Tuple[Array, Array]:
"""Call simple_resampling on log_weights/samples if ESS is below threshold.
The resample_threshold is interpretted as a fraction of the total number of
samples. So for example a resample_threshold of 0.3 corresponds to an ESS of
samples 0.3 * num_batch.
Args:
key: Jax Random Key.
log_weights: Array of shape (num_batch,)
samples: Array of shape (num_batch, num_dim)
resample_threshold: scalar controlling fraction of total sample sized used.
Returns:
new samples of shape (num_batch, num_dim) and
"""
# In the case where we don't resample we just return the current
# samples and weights.
# lamdba_no_resample will do that on the tuple given to jax.lax.cond below.
lambda_no_resample = lambda x: (x[2], x[1])
lambda_resample = lambda x: simple_resampling(*x)
threshold_sample_size = log_weights.shape[0] * resample_threshold
log_ess = log_effective_sample_size(log_weights)
return jax.lax.cond(log_ess < jnp.log(threshold_sample_size), lambda_resample,
lambda_no_resample, (key, log_weights, samples))
|
annealed_flow_transport-master
|
annealed_flow_transport/resampling.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.flow_transport."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport import cox_process_utils
import jax.numpy as jnp
import numpy as np
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class CoxProcessUtilsTest(parameterized.TestCase):
def test_get_bin_vals(self):
bins_per_dim = 30
bin_vals = cox_process_utils.get_bin_vals(bins_per_dim)
self.assertEqual(bin_vals.shape, (bins_per_dim*bins_per_dim, 2))
first_bin_vals = bin_vals[0, :]
self.assertEqual(list(first_bin_vals), [0, 0])
second_bin_vals = bin_vals[1, :]
self.assertEqual(list(second_bin_vals), [0, 1])
final_bin_vals = bin_vals[-1, :]
self.assertEqual(list(final_bin_vals),
[bins_per_dim-1, bins_per_dim-1])
def test_whites_and_latents(self):
lower_triangular_matrix = jnp.array([[1., 0.], [-1., 2.]])
constant_mean = 1.7
latents = jnp.array([5.5, 3.6])
test_white = cox_process_utils.get_white_from_latents(
latents, constant_mean, lower_triangular_matrix)
test_latents = cox_process_utils.get_latents_from_white(
test_white, constant_mean, lower_triangular_matrix)
_assert_equal_vec(self, latents, test_latents)
def test_gram(self):
def pairwise_function(x, y):
return jnp.sqrt((x[0]-y[0])**2 + (x[1]-y[1])**2)
test_points = jnp.array([[1., 2.], [5.2, 6.1], [7.2, 3.6]])
test_gram_matrix = cox_process_utils.gram(pairwise_function,
test_points)
validation_gram_matrix = np.zeros((3, 3))
for row_index in range(3):
for col_index in range(3):
pair_val = pairwise_function(test_points[row_index, :],
test_points[col_index, :])
validation_gram_matrix[row_index, col_index] = pair_val
_assert_equal_vec(self, test_gram_matrix, validation_gram_matrix)
def test_bin_counts(self):
num_bins_per_dim = 2
test_array = jnp.array([[0.25, 0.25], # in bin [0, 0]
[0.75, 0.75], # in bin [1, 1]
[0.0, 0.0], # in bin [0, 0]
[0.0, 1.0], # in bin [0, 1] an edge case
[1.0, 1.0], # in bin [1, 1] a corner case
[0.22, 0.22]]) # in bin [0, 0]
test_bin_counts = cox_process_utils.get_bin_counts(test_array,
num_bins_per_dim)
validation_bin_counts = jnp.array([[3, 1], [0, 2]])
_assert_equal_vec(self, test_bin_counts, validation_bin_counts)
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/cox_process_utils_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared custom defined types used in more than one source file."""
from typing import Any, Callable, Mapping, NamedTuple, Tuple
import chex
import ml_collections
import numpy as np
import optax
VaeBatch = Mapping[str, np.ndarray]
ConfigDict = ml_collections.ConfigDict
Array = Any
Samples = chex.ArrayTree
SampleShape = Any
LogDensityByStep = Any
RandomKey = Array
AcceptanceTuple = Tuple[Array, Array, Array]
MarkovKernelApply = Callable[[int, RandomKey, Samples],
Tuple[Samples, AcceptanceTuple]]
OptState = optax.OptState
UpdateFn = optax.TransformUpdateFn
FlowParams = Any
FlowApply = Callable[[FlowParams, Samples], Tuple[Samples, Array]]
LogDensityNoStep = Callable[[Samples], Array]
InitialSampler = Callable[[RandomKey, int, Tuple[int]], Samples]
FreeEnergyAndGrad = Callable[[FlowParams, Array, Array, int], Tuple[Array,
Array]]
FreeEnergyEval = Callable[[FlowParams, Array, Array, int], Array]
MNIST_IMAGE_SHAPE = (28, 28, 1)
class SamplesTuple(NamedTuple):
train_samples: Array
validation_samples: Array
test_samples: Array
class LogWeightsTuple(NamedTuple):
train_log_weights: Array
validation_log_weights: Array
test_log_weights: Array
class VfesTuple(NamedTuple):
train_vfes: Array
validation_vfes: Array
class AlgoResultsTuple(NamedTuple):
test_samples: Samples
test_log_weights: Array
log_normalizer_estimate: Array
delta_time: float
initial_time_diff: float
class ParticleState(NamedTuple):
samples: Samples
log_weights: Array
log_normalizer_estimate: Array
class VAEResult(NamedTuple):
sample_image: Array
reconst_sample: Array
latent_mean: Array
latent_std: Array
logits: Array
ParticlePropose = Callable[[RandomKey], ParticleState]
|
annealed_flow_transport-master
|
annealed_flow_transport/aft_types.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.train_vae."""
from absl.testing import parameterized
from annealed_flow_transport import vae
import jax
import jax.numpy as jnp
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class TestKLDivergence(parameterized.TestCase):
def reference_kl_divergence(self, mean, std):
def termwise_kl(single_mean, single_std):
term_a = -jnp.log(single_std)
term_b = 0.5 * single_std**2
term_c = 0.5 * single_mean**2
term_d = -0.5
return term_a + term_b + term_c + term_d
return jnp.sum(jax.vmap(termwise_kl)(mean, std))
def test_kl_divergence(self):
num_dim = 4
mean = jnp.zeros(num_dim)
std = jnp.ones(num_dim)
test_kl = vae.kl_divergence_standard_gaussian(mean, std)
_assert_equal_vec(self, test_kl, 0.)
mean_b = jnp.arange(4)
std_b = jnp.array([1.3, 1.7, 1.8, 2.0])
test_kl_b = vae.kl_divergence_standard_gaussian(mean_b, std_b)
reference_kl_b = self.reference_kl_divergence(mean_b, std_b)
_assert_equal_vec(self, test_kl_b, reference_kl_b)
def test_batch_kl_divergence(self):
num_dim = 5
num_batch = 3
total_points = num_dim * num_batch
means = jnp.arange(total_points).reshape((num_batch, num_dim))
stds = jnp.arange(total_points).reshape((num_batch, num_dim))+1.5
total_reference_kl_divergence = 0.
for batch_index in range(num_batch):
total_reference_kl_divergence += self.reference_kl_divergence(
means[batch_index], stds[batch_index])
reference_mean_kl = total_reference_kl_divergence/num_batch
test_mean_kl = vae.batch_kl_divergence_standard_gaussian(means,
stds)
_assert_equal_vec(self, reference_mean_kl, test_mean_kl)
class TestBinaryCrossEntropy(parameterized.TestCase):
def reference_binary_cross_entropy(self, logits, labels):
def single_binary_cross_entropy(logit, label):
h = label * jax.nn.softplus(-logit) + (1 - label) * jax.nn.softplus(logit)
return h
accumulator = 0.
(num_batch, num_dim_a, num_dim_b) = logits.shape
for batch_index in range(num_batch):
for dim_a in range(num_dim_a):
for dim_b in range(num_dim_b):
accumulator += single_binary_cross_entropy(
logits[batch_index, dim_a, dim_b], labels[batch_index, dim_a,
dim_b])
return accumulator/num_batch
def test_binary_cross_entropy(self):
num_batch = 7
num_pixel_per_image_dim = 3
total_elements = num_batch * num_pixel_per_image_dim * num_pixel_per_image_dim
trial_logits = jnp.arange(total_elements).reshape(
(num_batch, num_pixel_per_image_dim, num_pixel_per_image_dim)) - 10.
sequence = jnp.arange(total_elements).reshape(
(num_batch, num_pixel_per_image_dim, num_pixel_per_image_dim))
trial_labels = jnp.mod(sequence, 2)
test_loss = vae.binary_cross_entropy_from_logits(trial_logits,
trial_labels)
reference_loss = self.reference_binary_cross_entropy(trial_logits,
trial_labels)
_assert_equal_vec(self, test_loss, reference_loss)
|
annealed_flow_transport-master
|
annealed_flow_transport/vae_test.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Normalizing Flows as implemented in Wu et al. 2020.
For fixed flows this is equivalent to Annealed Importance Sampling with flows,
and without resampling.
Training is then based on the corresponding ELBO.
This is not reparameterizable using a continuous function but Wu et al.
proceed as if it where using a "straight through" gradient estimator.
"""
import time
from absl import logging
from annealed_flow_transport import flow_transport
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import optax
Array = tp.Array
OptState = tp.OptState
UpdateFn = tp.UpdateFn
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
SamplesTuple = tp.SamplesTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
MarkovKernelApply = tp.MarkovKernelApply
LogDensityByStep = tp.LogDensityByStep
AcceptanceTuple = tp.AcceptanceTuple
LogWeightsTuple = tp.LogWeightsTuple
AlgoResultsTuple = tp.AlgoResultsTuple
def inner_loop_snf(key: RandomKey,
transition_params: FlowParams, flow_apply: FlowApply,
markov_kernel_apply: MarkovKernelApply,
initial_sampler: InitialSampler,
log_density: LogDensityByStep, config):
"""Inner loop of Stochastic Normalizing Flows.
Uses Scan step requiring trees that have the same structure as the base input
but with each leaf extended with an extra array index of size num_transitions.
We call this an extended tree.
Args:
key: A JAX random key.
transition_params: Extended tree of flow parameters.
flow_apply: function that applies the flow.
markov_kernel_apply: function that applies the Markov transition kernel.
initial_sampler: A function that produces the initial samples.
log_density: A function evaluating the log density for each step.
config: A ConfigDict containing the configuration.
Returns:
vfe: variational free energy.
"""
subkey, key = jax.random.split(key)
initial_samples = initial_sampler(subkey, config.snf_batch_size,
config.sample_shape)
initial_log_weights = -jnp.log(config.snf_batch_size) * jnp.ones(
config.snf_batch_size)
def scan_step(passed_state, per_step_input):
samples, log_weights = passed_state
flow_params, key, inner_step = per_step_input
vfe_increment = flow_transport.get_batch_parallel_free_energy_increment(
samples=samples,
flow_apply=flow_apply,
flow_params=flow_params,
log_density=log_density,
step=inner_step)
next_samples, next_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply, markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params, samples=samples, log_weights=log_weights,
key=key, log_density=log_density, step=inner_step, use_resampling=False,
use_markov=config.use_markov,
resample_threshold=config.resample_threshold)
next_passed_state = (next_samples, next_log_weights)
per_step_output = (vfe_increment, acceptance_tuple)
return next_passed_state, per_step_output
initial_state = (initial_samples, initial_log_weights)
inner_steps = jnp.arange(1, config.num_temps)
keys = jax.random.split(key, config.num_temps-1)
per_step_inputs = (transition_params, keys, inner_steps)
unused_final_state, per_step_outputs = jax.lax.scan(scan_step, initial_state,
per_step_inputs)
vfe_increments, unused_acceptance_tuples = per_step_outputs
vfe = jnp.sum(vfe_increments)
return vfe
def outer_loop_snf(flow_init_params: FlowParams,
flow_apply: FlowApply,
density_by_step: LogDensityByStep,
markov_kernel_by_step: MarkovKernelApply,
initial_sampler: InitialSampler,
key: RandomKey,
opt,
config,
log_step_output,
save_checkpoint):
"""Outer loop for Stochastic Normalizing Flows.
Args:
flow_init_params: initial state of the flow.
flow_apply: function that applies the flow.
density_by_step: The log density for different annealing temperatures.
markov_kernel_by_step: Markov kernel for different annealing temperatures.
initial_sampler: A function that produces the initial samples.
key: A Jax random key.
opt: An Optax optimizer.
config: A ConfigDict containing the configuration.
log_step_output: Callable that logs the step output.
save_checkpoint: None or function that takes params and saves them.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
def short_inner_loop(curr_transition_params,
rng_key: RandomKey):
return inner_loop_snf(rng_key,
curr_transition_params, flow_apply,
markov_kernel_by_step, initial_sampler,
density_by_step, config)
repeater = lambda x: jnp.repeat(x[None], num_temps-1, axis=0)
transition_params = jax.tree_util.tree_map(repeater, flow_init_params)
opt_state = opt.init(transition_params)
def vi_update(curr_key, curr_transition_params, curr_opt_state):
subkey, curr_key = jax.random.split(curr_key)
objective, flow_grads = jax.value_and_grad(short_inner_loop)(
curr_transition_params, subkey)
updates, new_opt_state = opt.update(flow_grads,
curr_opt_state)
new_transition_params = optax.apply_updates(curr_transition_params,
updates)
return curr_key, new_transition_params, objective, new_opt_state
jit_vi_update = jax.jit(vi_update)
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
jit_vi_update(key, transition_params, opt_state)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
place_holder_array = jnp.array([0.])
start_time = time.time()
for step in range(config.snf_num_iters):
key, transition_params, vfe, opt_state = jit_vi_update(
key, transition_params, opt_state)
if step % config.report_step == 0:
if log_step_output is not None:
delta_time = time.time()-start_time
log_step_output(step=step,
training_objective=vfe,
log_normalizer_estimate=-1.*vfe,
delta_time=delta_time,
samples=place_holder_array,
log_weights=place_holder_array)
logging.info(
'Step %05d: Free energy %f',
step, vfe
)
finish_time = time.time()
delta_time = finish_time - start_time
final_log_normalizer_estimate = -1.*vfe
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', final_log_normalizer_estimate)
if save_checkpoint:
save_checkpoint(transition_params)
results = AlgoResultsTuple(
test_samples=place_holder_array,
test_log_weights=place_holder_array,
log_normalizer_estimate=final_log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/snf.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation code for launching PIMH and final target density MCMC."""
from absl import logging
from annealed_flow_transport import craft
from annealed_flow_transport import densities
from annealed_flow_transport import expectations
from annealed_flow_transport import flow_transport
from annealed_flow_transport import flows
from annealed_flow_transport import markov_kernel
from annealed_flow_transport import mcmc
from annealed_flow_transport import pimh
from annealed_flow_transport import samplers
from annealed_flow_transport import serialize
from annealed_flow_transport import smc
from annealed_flow_transport import vi
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
# Type defs.
Array = tp.Array
OptState = tp.OptState
UpdateFn = tp.UpdateFn
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityByStep = tp.LogDensityByStep
RandomKey = tp.RandomKey
AcceptanceTuple = tp.AcceptanceTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
FreeEnergyEval = tp.FreeEnergyEval
MarkovKernelApply = tp.MarkovKernelApply
SamplesTuple = tp.SamplesTuple
LogWeightsTuple = tp.LogWeightsTuple
VfesTuple = tp.VfesTuple
InitialSampler = tp.InitialSampler
LogDensityNoStep = tp.LogDensityNoStep
assert_equal_shape = chex.assert_equal_shape
AlgoResultsTuple = tp.AlgoResultsTuple
ParticleState = tp.ParticleState
ParticlePropose = tp.ParticlePropose
class OnlineMovingAverage():
"""Numerically stable implementation of a moving average."""
def __init__(self, label: str):
self.label = label
self._num_vals = 0
# pytype: disable=attribute-error
def update(self, val):
self._num_vals += 1
if self._num_vals == 1:
self._average = val
else:
delta = (val - self._average)
self._average = self._average + delta/self._num_vals
def get_value(self):
return self._average
# pytype: enable=attribute-error
class ExpectationLogger(object):
"""Compute and log expectations based on particles."""
def __init__(self, expectation_config, num_dim: int):
self._expectation_config = expectation_config
self._step = 0
self._expectations = []
for config in self._expectation_config.configurations:
exp_class = getattr(expectations, config.name)
self._expectations.append(jax.jit(exp_class(config, num_dim)))
names = []
self._labels = []
for config in self._expectation_config.configurations:
name = config.name
index = len([elem for elem in names if elem == name])
label = name+'_'+str(index)
names.append(name)
self._labels.append(label)
self._averages = []
for config, label in zip(self._expectation_config.configurations,
self._labels):
self._averages.append(OnlineMovingAverage(label=label))
def record_expectations(self, particle_state: ParticleState):
"""Record expectations based on particle state."""
for index, expectation in enumerate(self._expectations):
expectation_val = expectation(particle_state.samples,
particle_state.log_weights)
average = self._averages[index]
average.update(expectation_val)
if self._step % self._expectation_config.expectation_report_step == 0:
logging.info('Step %05d :', self._step)
msg = ''
for average in self._averages:
msg += average.label + ' '
msg += str(average.get_value()) + ', '
logging.info(msg)
self._step += 1
def is_flow_algorithm(algo_name):
return algo_name in ('craft', 'vi')
def is_annealing_markov_kernel_algorithm(algo_name):
return algo_name in ('smc', 'craft')
def get_particle_propose(config) -> ParticlePropose:
"""Get a function that proposes particles and log normalizer."""
log_density_initial = getattr(densities, config.initial_config.density)(
config.initial_config, config.sample_shape)
log_density_final = getattr(densities, config.final_config.density)(
config.final_config, config.sample_shape)
initial_sampler = getattr(samplers,
config.initial_sampler_config.initial_sampler)(
config.initial_sampler_config)
if is_annealing_markov_kernel_algorithm(config.algo):
density_by_step = flow_transport.GeometricAnnealingSchedule(
log_density_initial, log_density_final, config.num_temps)
markov_kernel_by_step = markov_kernel.MarkovTransitionKernel(
config.mcmc_config, density_by_step, config.num_temps)
if is_flow_algorithm(config.algo):
def flow_func(x):
flow = getattr(flows, config.flow_config.type)(config.flow_config)
return flow(x)
flow_forward_fn = hk.without_apply_rng(hk.transform(flow_func))
flow_params = serialize.load_checkpoint(config.params_filename)
if config.algo == 'smc':
@jax.jit
def particle_propose(loc_key: RandomKey):
return smc.fast_outer_loop_smc(density_by_step,
initial_sampler,
markov_kernel_by_step,
loc_key,
config)
elif config.algo == 'craft':
@jax.jit
def particle_propose(loc_key: RandomKey):
return craft.craft_evaluation_loop(loc_key,
flow_params,
flow_forward_fn.apply,
markov_kernel_by_step,
initial_sampler,
density_by_step,
config)
elif config.algo == 'vi':
@jax.jit
def particle_propose(loc_key: RandomKey):
return vi.vfe_naive_importance(initial_sampler,
log_density_initial,
log_density_final,
flow_forward_fn.apply,
flow_params,
loc_key,
config)
else:
raise NotImplementedError
return particle_propose
def get_expectation_logger(expectation_config,
num_dim: int) -> ExpectationLogger:
return ExpectationLogger(expectation_config, num_dim)
def run_experiment(config):
"""Run a SMC flow experiment.
Args:
config: experiment configuration.
Returns:
An AlgoResultsTuple containing the experiment results.
"""
random_key = jax.random.PRNGKey(config.evaluation_seed)
expectation_logger = get_expectation_logger(config.expectation_config,
config.sample_shape[0])
if config.evaluation_algo == 'pimh':
particle_propose = get_particle_propose(config)
logging.info('Draw initial samples redundantly for accurate timing...')
particle_propose(random_key)
logging.info('Starting PIMH algorithm.')
pimh.particle_metropolis_loop(random_key,
particle_propose,
config.num_evaluation_samples,
expectation_logger.record_expectations)
elif config.evaluation_algo == 'mcmc_final':
mcmc.outer_loop_mcmc(random_key,
config.num_evaluation_samples,
expectation_logger.record_expectations,
config)
else:
raise NotImplementedError
|
annealed_flow_transport-master
|
annealed_flow_transport/evaluation.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for variational inference (VI) with normalizing flows.
For background see:
Rezende and Mohamed. 2015. Variational Inference with Normalizing Flows.
International Conference of Machine Learning.
"""
from absl import logging
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
from jax.scipy.special import logsumexp
import numpy as np
import optax
Array = jnp.ndarray
UpdateFn = tp.UpdateFn
OptState = tp.OptState
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
assert_equal_shape = chex.assert_equal_shape
AlgoResultsTuple = tp.AlgoResultsTuple
ParticleState = tp.ParticleState
assert_trees_all_equal_shapes = chex.assert_trees_all_equal_shapes
def vfe_naive_importance(initial_sampler: InitialSampler,
initial_density: LogDensityNoStep,
final_density: LogDensityNoStep,
flow_apply: FlowApply,
flow_params: FlowParams,
key: RandomKey,
config) -> ParticleState:
"""Estimate log normalizing constant using naive importance sampling."""
samples = initial_sampler(key,
config.batch_size,
config.sample_shape)
transformed_samples, log_det_jacs = flow_apply(flow_params, samples)
assert_trees_all_equal_shapes(transformed_samples, samples)
log_density_target = final_density(transformed_samples)
log_density_initial = initial_density(samples)
assert_equal_shape([log_density_initial, log_density_target])
log_density_approx = log_density_initial - log_det_jacs
log_importance_weights = log_density_target - log_density_approx
log_normalizer_estimate = logsumexp(log_importance_weights) - np.log(
config.batch_size)
particle_state = ParticleState(
samples=transformed_samples,
log_weights=log_importance_weights,
log_normalizer_estimate=log_normalizer_estimate)
return particle_state
def vi_free_energy(flow_params: FlowParams,
key: RandomKey,
initial_sampler: InitialSampler,
initial_density: LogDensityNoStep,
final_density: LogDensityNoStep,
flow_apply: FlowApply,
config):
"""The variational free energy used in VI with normalizing flows."""
samples = initial_sampler(key,
config.batch_size,
config.sample_shape)
transformed_samples, log_det_jacs = flow_apply(flow_params, samples)
assert_trees_all_equal_shapes(transformed_samples, samples)
log_density_target = final_density(transformed_samples)
log_density_initial = initial_density(samples)
assert_equal_shape([log_density_initial, log_density_target])
log_density_approx = log_density_initial - log_det_jacs
assert_equal_shape([log_density_approx, log_density_initial])
free_energies = log_density_approx - log_density_target
free_energy = jnp.mean(free_energies)
return free_energy
def outer_loop_vi(initial_sampler: InitialSampler,
opt_update: UpdateFn,
opt_init_state: OptState,
flow_init_params: FlowParams,
flow_apply: FlowApply,
key: RandomKey,
initial_log_density: LogDensityNoStep,
final_log_density: LogDensityNoStep,
config,
save_checkpoint) -> AlgoResultsTuple:
"""The training loop for variational inference with normalizing flows.
Args:
initial_sampler: Produces samples from the base distribution.
opt_update: Optax update function for the optimizer.
opt_init_state: Optax initial state for the optimizer.
flow_init_params: Initial params for the flow.
flow_apply: A callable that evaluates the flow for given params and samples.
key: A Jax random Key.
initial_log_density: Function that evaluates the base density.
final_log_density: Function that evaluates the target density.
config: configuration ConfigDict.
save_checkpoint: None or function that takes params and saves them.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
def vi_free_energy_short(loc_flow_params,
loc_key):
return vi_free_energy(loc_flow_params,
loc_key,
initial_sampler,
initial_log_density,
final_log_density,
flow_apply,
config)
free_energy_and_grad = jax.jit(jax.value_and_grad(vi_free_energy_short))
def short_vfe_naive_importance(loc_flow_params, loc_key):
return vfe_naive_importance(initial_sampler, initial_log_density,
final_log_density, flow_apply, loc_flow_params,
loc_key, config).log_normalizer_estimate
jit_vfe_naive_importance = jax.jit(short_vfe_naive_importance)
flow_params = flow_init_params
opt_state = opt_init_state
def vi_update(curr_key, curr_flow_params, curr_opt_state):
subkey, curr_key = jax.random.split(curr_key)
new_free_energy, flow_grads = free_energy_and_grad(curr_flow_params,
subkey)
updates, new_opt_state = opt_update(flow_grads,
curr_opt_state)
new_flow_params = optax.apply_updates(curr_flow_params,
updates)
return curr_key, new_flow_params, new_free_energy, new_opt_state
jit_vi_update = jax.jit(vi_update)
step = 0
while step < config.vi_iters:
with jax.profiler.StepTraceAnnotation('train', step_num=step):
key, flow_params, curr_free_energy, opt_state = jit_vi_update(key,
flow_params,
opt_state)
if step % config.vi_report_step == 0:
logging.info('Step %05d: free_energy %f:', step, curr_free_energy)
if config.vi_estimator == 'elbo':
log_normalizer_estimate = -1.*curr_free_energy
elif config.vi_estimator == 'importance':
subkey, key = jax.random.split(key, 2)
log_normalizer_estimate = jit_vfe_naive_importance(flow_params,
subkey)
else:
raise NotImplementedError
logging.info('Log normalizer estimate %f:', log_normalizer_estimate)
step += 1
if save_checkpoint:
save_checkpoint(flow_params)
particle_state = vfe_naive_importance(initial_sampler, initial_log_density,
final_log_density, flow_apply,
flow_params, key, config)
results = AlgoResultsTuple(
test_samples=particle_state.samples,
test_log_weights=particle_state.log_weights,
log_normalizer_estimate=particle_state.log_normalizer_estimate,
delta_time=0., # These are currently set with placeholders.
initial_time_diff=0.)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/vi.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.flow_transport."""
import hashlib
import os.path
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport.densities import AutoEncoderLikelihood
from annealed_flow_transport.densities import ChallengingTwoDimensionalMixture
from annealed_flow_transport.densities import FunnelDistribution
from annealed_flow_transport.densities import MultivariateNormalDistribution
from annealed_flow_transport.densities import NormalDistribution
from annealed_flow_transport.densities import phi_four_log_density
from annealed_flow_transport.densities import PhiFourTheory
import jax
import jax.numpy as jnp
from jax.scipy.stats import norm
import ml_collections
import numpy as np
ConfigDict = ml_collections.ConfigDict
join = os.path.join
dirname = os.path.dirname
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
def get_normal_config():
config = ConfigDict()
config.loc = 1.
config.scale = 1.
return config
def get_multivariate_normal_config():
config = ConfigDict()
config.shared_mean = 1.
config.diagonal_cov = 1.
return config
class BasicShapesTest(parameterized.TestCase):
@parameterized.named_parameters(
('Normal', NormalDistribution, 1, get_normal_config()),
('MultivariateNormal', MultivariateNormalDistribution, 2,
get_multivariate_normal_config()),
('TwoDimensionalMixture', ChallengingTwoDimensionalMixture, 2, None),
('FunnelDistribution', FunnelDistribution, 10, None),)
def test_shapes(self, test_class, num_dim: int, config):
num_batch = 7
test_matrix = jnp.arange(num_dim * num_batch).reshape((num_batch, num_dim))
if not config:
config = ConfigDict()
test_density = test_class(config, (num_dim,))
output_log_densities = test_density(test_matrix)
self.assertEqual(output_log_densities.shape, (num_batch,))
class VAETest(parameterized.TestCase):
@parameterized.named_parameters(
('First digit', 0, '9ea2704b4fafa24f97c5e330506bd2c9'),
('Tenth digit', 9, '67a03001cd0eadedff8300f2b6cb7f03'),
('Main paper digit', 3689, 'c4ae91223d22b0ed227b401d18237e65'),)
def test_digit_ordering(self, digit_index, target_md5_hash):
"""Confirm the digit determined by index has not changed using a hash."""
config = ConfigDict()
config.params_filename = join(dirname(__file__), 'data/vae.pickle')
config.image_index = digit_index
vae_density = AutoEncoderLikelihood(config, (30,))
hex_digit_hash = hashlib.md5(vae_density._test_image).hexdigest()
self.assertEqual(hex_digit_hash, target_md5_hash)
def test_density_shape(self):
num_batch = 7
num_dim = 30
total_points = num_batch * num_dim
config = ConfigDict()
config.params_filename = join(dirname(__file__), 'data/vae.pickle')
config.image_index = 0
vae_density = AutoEncoderLikelihood(config, (num_dim,))
test_input = (jnp.arange(total_points).reshape(num_batch, num_dim) -
100.) / 100.
test_output = vae_density(test_input)
self.assertEqual(test_output.shape, (num_batch,))
def test_log_prior(self):
num_dim = 30
config = ConfigDict()
config.params_filename = join(dirname(__file__), 'data/vae.pickle')
config.image_index = 0
vae_density = AutoEncoderLikelihood(config, (num_dim,))
test_input = (jnp.arange(num_dim)-num_dim)/num_dim
test_output = vae_density.log_prior(test_input)
reference_output = jnp.sum(jax.vmap(norm.logpdf)(test_input))
_assert_equal_vec(self, reference_output, test_output)
def test_batching_consistency(self):
"""Paranoid test to check there is nothing wrong with batching/averages."""
num_batch = 7
num_dim = 30
total_points = num_batch * num_dim
config = ConfigDict()
config.params_filename = join(dirname(__file__), 'data/vae.pickle')
config.image_index = 0
vae_density = AutoEncoderLikelihood(config, (num_dim,))
test_input = (jnp.arange(total_points).reshape(num_batch, num_dim) -
100.) / 100.
test_output = vae_density(test_input)
for batch_index in range(num_batch):
current_log_density = vae_density.total_log_probability(
test_input[batch_index, :])
_assert_equal_vec(self, test_output[batch_index], current_log_density)
class PhiFourTest(parameterized.TestCase):
def test_batched_configurable(self):
config = ConfigDict()
config.mass_squared = -4.
config.bare_coupling = 5.1
batch_size = 7
num_dim = 16
trial_values = jnp.linspace(-2., 2., batch_size * num_dim).reshape(
(batch_size, num_dim))
log_density = PhiFourTheory(config, (num_dim,))
log_density_val = log_density(trial_values)
self.assertTrue(log_density_val.shape, (batch_size))
def test_zero(self):
lattice_shape = (8, 6)
trial_lattice_values = jnp.zeros(lattice_shape)
mass_squared = -4.
bare_coupling = 5.1
trial_log_density = phi_four_log_density(trial_lattice_values,
mass_squared,
bare_coupling)
_assert_equal_vec(self, trial_log_density, 0.)
def test_reflection_symmetry(self):
lattice_shape = (8, 6)
lattice_size = np.prod(lattice_shape)
trial_lattice_values = jnp.linspace(-2., 2.,
lattice_size).reshape(lattice_shape)
reflected_trial_lattice_values = -1.*trial_lattice_values
mass_squared = -4.
bare_coupling = 5.1
trial_log_density = phi_four_log_density(trial_lattice_values,
mass_squared,
bare_coupling)
reflected_trial_log_density = phi_four_log_density(
reflected_trial_lattice_values, mass_squared, bare_coupling)
_assert_equal_vec(self, trial_log_density, reflected_trial_log_density)
def test_translation_symmetry(self):
lattice_shape = (8, 6)
lattice_size = np.prod(lattice_shape)
trial_lattice_values = jnp.linspace(-2., 2.,
lattice_size).reshape(lattice_shape)
translated_lattice_values = jnp.roll(trial_lattice_values, shift=(1, 2),
axis=(0, 1))
mass_squared = -4.
bare_coupling = 5.1
trial_log_density = phi_four_log_density(trial_lattice_values,
mass_squared,
bare_coupling)
translated_log_density = phi_four_log_density(translated_lattice_values,
mass_squared,
bare_coupling)
_assert_equal_vec(self, trial_log_density, translated_log_density)
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/densities_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.aft."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport import aft
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import optax
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class AftTest(parameterized.TestCase):
def setUp(self):
super().setUp()
num_dim = 1
num_samples = 4
self._train_samples = jnp.zeros((num_samples, num_dim))
self._train_log_weights = -jnp.log(num_samples)*jnp.ones((num_samples,))
self._validation_samples = self._train_samples
self._validation_log_weights = self._train_log_weights
self._true_target = jnp.ones((num_dim,))
self._initial_mean = jnp.zeros((num_dim,))
self._opt = optax.adam(1e-2)
self._opt_init_state = self._opt.init(self._initial_mean)
self.dummy_free_energy_and_grad = jax.value_and_grad(self.dummy_free_energy)
self._dummy_outer_step = 0
self._iterations = 500
def dummy_free_energy(self, mean, samples, log_weights, unused_step):
integrands = jnp.square(samples + mean - self._true_target[None, :])[:, 0]
return jnp.sum(jax.nn.softmax(log_weights) * integrands)
def test_early_stopping(self):
best_mean_greedy, unused_opt_values = aft.optimize_free_energy(
opt_update=self._opt.update,
opt_init_state=self._opt_init_state,
flow_init_params=self._initial_mean,
free_energy_and_grad=self.dummy_free_energy_and_grad,
free_energy_eval=self.dummy_free_energy,
train_samples=self._train_samples,
train_log_weights=self._train_log_weights,
validation_samples=self._validation_samples,
validation_log_weights=self._validation_log_weights,
outer_step=self._dummy_outer_step,
opt_iters=self._iterations,
stopping_criterion='greedy_time')
best_mean_time, opt_values = aft.optimize_free_energy(
opt_update=self._opt.update,
opt_init_state=self._opt_init_state,
flow_init_params=self._initial_mean,
free_energy_and_grad=self.dummy_free_energy_and_grad,
free_energy_eval=self.dummy_free_energy,
train_samples=self._train_samples,
train_log_weights=self._train_log_weights,
validation_samples=self._validation_samples,
validation_log_weights=self._validation_log_weights,
outer_step=self._dummy_outer_step,
opt_iters=self._iterations,
stopping_criterion='time')
_assert_equal_vec(self, best_mean_greedy, self._true_target, atol=1e-5)
_assert_equal_vec(self, best_mean_time, self._true_target, atol=1e-5)
min_train_vfe = jnp.min(opt_values.train_vfes)
min_validation_vfe = jnp.min(opt_values.validation_vfes)
true_minimium = 0.
_assert_equal_vec(self, min_train_vfe, true_minimium, atol=1e-5)
_assert_equal_vec(self, min_validation_vfe, true_minimium, atol=1e-5)
def test_opt_step(self):
initial_vfes = tp.VfesTuple(jnp.zeros(self._iterations),
jnp.zeros(self._iterations))
initial_loop_state = aft.OptimizationLoopState(
self._opt_init_state,
self._initial_mean,
inner_step=0,
opt_vfes=initial_vfes,
best_params=self._initial_mean,
best_validation_vfe=jnp.inf,
best_index=-1)
loop_state_b = aft.flow_estimate_step(
loop_state=initial_loop_state,
free_energy_and_grad=self.dummy_free_energy_and_grad,
train_samples=self._train_samples,
train_log_weights=self._train_log_weights,
outer_step=self._dummy_outer_step,
validation_samples=self._validation_samples,
validation_log_weights=self._validation_log_weights,
free_energy_eval=self.dummy_free_energy,
opt_update=self._opt.update)
self.assertEqual(loop_state_b.inner_step, 1)
array_one = jnp.array(1.)
_assert_equal_vec(self, loop_state_b.opt_vfes.train_vfes[0], array_one)
_assert_equal_vec(self, loop_state_b.opt_vfes.validation_vfes[0], array_one)
_assert_equal_vec(self, loop_state_b.best_params, self._initial_mean)
_assert_equal_vec(self, loop_state_b.best_validation_vfe, array_one)
self.assertEqual(loop_state_b.best_index, 0)
loop_state_c = aft.flow_estimate_step(
loop_state=loop_state_b,
free_energy_and_grad=self.dummy_free_energy_and_grad,
train_samples=self._train_samples,
train_log_weights=self._train_log_weights,
outer_step=self._dummy_outer_step,
validation_samples=self._validation_samples,
validation_log_weights=self._validation_log_weights,
free_energy_eval=self.dummy_free_energy,
opt_update=self._opt.update)
self.assertEqual(loop_state_c.inner_step, 2)
self.assertLess(loop_state_c.best_validation_vfe, array_one)
self.assertEqual(loop_state_c.best_index, 1)
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/aft_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a convolutional variational autoencoder for likelihood experiments.
Some Jax/Haiku programming idioms inspired by OSS Apache 2.0 Haiku vae example.
A pretrained version of this model is already included in data/vae.pickle.
To run one of the sampling algorithms on that trained model use configs/vae.py
This training script is included for full reproducibility.
"""
import os
import pickle
import time
from typing import Any, Tuple
from absl import app
from absl import flags
from absl import logging
from annealed_flow_transport import vae
import annealed_flow_transport.aft_types as tp
import haiku as hk
import jax
from matplotlib import pylab as plt
from ml_collections.config_flags import config_flags
import optax
import tensorflow_datasets as tfds
Array = tp.Array
Batch = tp.VaeBatch
MNIST_IMAGE_SHAPE = tp.MNIST_IMAGE_SHAPE
RandomKey = tp.RandomKey
OptState = tp.OptState
Params = Any
UpdateFn = tp.UpdateFn
VAEResult = tp.VAEResult
def train_step(vae_apply,
vae_params,
opt_state: OptState,
opt_update: UpdateFn,
batch: Batch,
random_key: RandomKey) -> Tuple[Params, OptState, Array]:
"""A single step of training for the VAE."""
def params_loss(loc_params):
scaled_image = batch['image']
output: VAEResult = vae_apply(loc_params, random_key, scaled_image)
loss = vae.vae_loss(batch['image'], output.logits, output.latent_mean,
output.latent_std)
return loss
value, grads = jax.value_and_grad(params_loss)(vae_params)
updates, new_opt_state = opt_update(grads, opt_state)
new_params = optax.apply_updates(vae_params, updates)
return new_params, new_opt_state, value
def save_image(reconst_image: Array, train_image: Array, sample_image: Array,
num_plot: int, opt_iter: int, output_directory: str):
"""Show image plots."""
overall_size = 1.5
unused_fig, axes = plt.subplots(
3, num_plot, figsize=(overall_size*num_plot, overall_size*3))
def plot_image(curr_plot_index, sub_index, data):
axes[sub_index, curr_plot_index].imshow(data, cmap='gray', vmin=0., vmax=1.)
for plot_index in range(num_plot):
for (sub_index,
datum) in zip(range(3), (train_image, reconst_image, sample_image)):
plot_image(plot_index, sub_index, datum[plot_index, :, :, 0])
plt.savefig(output_directory+str(opt_iter)+'.png')
plt.close()
def train_vae(batch_size: int,
num_latents: int,
random_seed: int,
step_size: float,
output_dir_stub,
train_iters: int,
report_period: int
):
"""Train the VAE on binarized MNIST.
Args:
batch_size: Batch size for training and validation.
num_latents: Number of latents for VAE latent space.
random_seed: Random seed for training.
step_size: Step size for ADAM optimizer.
output_dir_stub: Where to store files if truthy otherwise don't store files.
train_iters: Number of iterations to run training for.
report_period: Period between reporting losses and storing files.
"""
train_dataset = vae.load_dataset(tfds.Split.TRAIN, batch_size)
validation_dataset = vae.load_dataset(tfds.Split.TEST, batch_size)
def call_vae(x):
res_vae = vae.ConvVAE(num_latents=num_latents)
return res_vae(x)
vae_fn = hk.transform(call_vae)
rng_seq = hk.PRNGSequence(random_seed)
vae_params = vae_fn.init(
next(rng_seq), next(train_dataset)['image'])
opt = optax.chain(
optax.clip_by_global_norm(1e5),
optax.scale_by_adam(b1=0.9, b2=0.999, eps=1e-8),
optax.scale(-step_size)
)
opt_state = opt.init(vae_params)
opt_update = opt.update
def train_step_short(curr_params, curr_opt_state, curr_batch, curr_key):
return train_step(vae_fn.apply, curr_params, curr_opt_state, opt_update,
curr_batch, curr_key)
def compute_validation(curr_params, curr_batch, curr_key):
output: VAEResult = vae_fn.apply(curr_params, curr_key, curr_batch['image'])
loss = vae.vae_loss(curr_batch['image'], output.logits,
output.latent_mean, output.latent_std)
return loss, output.reconst_sample, curr_batch['image'], output.sample_image
train_step_jit = jax.jit(train_step_short)
compute_validation_jit = jax.jit(compute_validation)
if output_dir_stub:
output_directory = output_dir_stub + time.strftime('%a_%d_%b_%Y_%H:%M:%S/',
time.gmtime())
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for opt_iter in range(train_iters):
vae_params, opt_state, train_loss = train_step_jit(
vae_params, opt_state, next(train_dataset), next(rng_seq))
if opt_iter % report_period == 0:
validation_loss, reconst_sample, example, sample = compute_validation_jit(
vae_params, next(validation_dataset), next(rng_seq))
logging.info('Step: %5d: Training VFE: %.3f', opt_iter, train_loss)
logging.info('Step: %5d: Validation VFE: %.3f', opt_iter, validation_loss)
if output_dir_stub:
save_image(reconst_sample, example, sample, 8, opt_iter,
output_directory)
if output_dir_stub:
save_result(vae_params, output_directory)
def get_checkpoint_filename():
return 'vae.pickle'
def save_result(state, output_directory: str):
ckpt_filename = os.path.join(output_directory, get_checkpoint_filename())
with open(ckpt_filename, 'wb') as f:
pickle.dump(state, f)
def main(argv):
config = FLAGS.train_vae_config
info = 'Displaying config '+str(config)
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train_vae(
batch_size=config.batch_size,
num_latents=config.num_latents,
random_seed=config.random_seed,
step_size=config.step_size,
output_dir_stub=config.output_dir_stub,
train_iters=config.train_iters,
report_period=config.report_period)
if __name__ == '__main__':
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file('train_vae_config',
'./train_vae_configs/vae_config.py',
'VAE training configuration.')
app.run(main)
|
annealed_flow_transport-master
|
annealed_flow_transport/train_vae.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sequential Monte Carlo (SMC) sampler algorithm.
For background see:
Del Moral, Doucet and Jasra. 2006. Sequential Monte Carlo samplers.
Journal of the Royal Statistical Society B.
"""
import time
from typing import Tuple
from absl import logging
from annealed_flow_transport import flow_transport
from annealed_flow_transport import resampling
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
MarkovKernelApply = tp.MarkovKernelApply
LogDensityByStep = tp.LogDensityByStep
assert_equal_shape = chex.assert_equal_shape
assert_trees_all_equal_shapes = chex.assert_trees_all_equal_shapes
AlgoResultsTuple = tp.AlgoResultsTuple
ParticleState = tp.ParticleState
def inner_loop(
key: RandomKey,
markov_kernel_apply: MarkovKernelApply,
samples: Array, log_weights: Array,
log_density: LogDensityByStep, step: int, config
) -> Tuple[Array, Array, Array, Array]:
"""Inner loop of the algorithm.
Args:
key: A JAX random key.
markov_kernel_apply: functional that applies the Markov transition kernel.
samples: Array containing samples.
log_weights: Array containing log_weights.
log_density: function returning the log_density of a sample at given step.
step: int giving current step of algorithm.
config: experiment configuration.
Returns:
samples_final: samples after the full inner loop has been performed.
log_weights_final: log_weights after the full inner loop has been performed.
log_normalizer_increment: Scalar log of normalizing constant increment.
Acceptance_rates: Acceptance rates of samplers.
"""
deltas = flow_transport.get_delta_no_flow(samples, log_density, step)
log_normalizer_increment = flow_transport.get_log_normalizer_increment_no_flow(
deltas, log_weights)
log_weights_new = flow_transport.reweight_no_flow(log_weights, deltas)
if config.use_resampling:
subkey, key = jax.random.split(key)
resampled_samples, log_weights_resampled = resampling.optionally_resample(
subkey, log_weights_new, samples, config.resample_threshold)
assert_trees_all_equal_shapes(resampled_samples, samples)
assert_equal_shape([log_weights_resampled, log_weights_new])
else:
resampled_samples = samples
log_weights_resampled = log_weights_new
markov_samples, acceptance_tuple = markov_kernel_apply(
step, key, resampled_samples)
return markov_samples, log_weights_resampled, log_normalizer_increment, acceptance_tuple
def get_short_inner_loop(markov_kernel_by_step: MarkovKernelApply,
density_by_step: LogDensityByStep,
config):
"""Get a short version of inner loop."""
def short_inner_loop(rng_key: RandomKey,
loc_samples: Array,
loc_log_weights: Array,
loc_step: int):
return inner_loop(rng_key,
markov_kernel_by_step,
loc_samples,
loc_log_weights,
density_by_step,
loc_step,
config)
return short_inner_loop
def fast_outer_loop_smc(density_by_step: LogDensityByStep,
initial_sampler: InitialSampler,
markov_kernel_by_step: MarkovKernelApply,
key: RandomKey,
config) -> ParticleState:
"""A fast SMC loop for evaluation or use inside other algorithms."""
key, subkey = jax.random.split(key)
samples = initial_sampler(subkey, config.batch_size, config.sample_shape)
log_weights = -jnp.log(config.batch_size) * jnp.ones(config.batch_size)
short_inner_loop = get_short_inner_loop(markov_kernel_by_step,
density_by_step, config)
keys = jax.random.split(key, config.num_temps-1)
def scan_step(passed_state, per_step_input):
samples, log_weights = passed_state
current_step, current_key = per_step_input
new_samples, new_log_weights, log_z_increment, _ = short_inner_loop(
current_key, samples, log_weights, current_step)
new_passed_state = (new_samples, new_log_weights)
return new_passed_state, log_z_increment
init_state = (samples, log_weights)
per_step_inputs = (np.arange(1, config.num_temps), keys)
final_state, log_normalizer_increments = jax.lax.scan(scan_step,
init_state,
per_step_inputs
)
log_normalizer_estimate = jnp.sum(log_normalizer_increments)
particle_state = ParticleState(
samples=final_state[0],
log_weights=final_state[1],
log_normalizer_estimate=log_normalizer_estimate)
return particle_state
def outer_loop_smc(density_by_step: LogDensityByStep,
initial_sampler: InitialSampler,
markov_kernel_by_step: MarkovKernelApply,
key: RandomKey,
config) -> AlgoResultsTuple:
"""The outer loop for Annealed Flow Transport Monte Carlo.
Args:
density_by_step: The log density for each annealing step.
initial_sampler: A function that produces the initial samples.
markov_kernel_by_step: Markov transition kernel for each annealing step.
key: A Jax random key.
config: A ConfigDict containing the configuration.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
key, subkey = jax.random.split(key)
initial_sampler_start = time.time()
samples = initial_sampler(subkey, config.batch_size, config.sample_shape)
initial_sampler_finish = time.time()
initial_sampler_time_diff = initial_sampler_finish - initial_sampler_start
logging.info('Initial sampler time / seconds %f: ',
initial_sampler_time_diff)
log_weights = -jnp.log(config.batch_size) * jnp.ones(config.batch_size)
logging.info('Jitting step...')
inner_loop_jit = jax.jit(
get_short_inner_loop(markov_kernel_by_step, density_by_step, config))
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
inner_loop_jit(key, samples, log_weights, 1)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
log_normalizer_estimate = 0.
start_time = time.time()
for step in range(1, num_temps):
subkey, key = jax.random.split(key)
samples, log_weights, log_normalizer_increment, acceptance = inner_loop_jit(
subkey, samples, log_weights, step)
acceptance_hmc = float(np.asarray(acceptance[0]))
acceptance_rwm = float(np.asarray(acceptance[1]))
log_normalizer_estimate += log_normalizer_increment
if step % config.report_step == 0:
beta = density_by_step.get_beta(step) # pytype: disable=attribute-error
logging.info(
'Step %05d: beta %f Acceptance rate HMC %f Acceptance rate RWM %f',
step, beta, acceptance_hmc, acceptance_rwm)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
results = AlgoResultsTuple(
test_samples=samples,
test_log_weights=log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/smc.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Particle independent Metropolis-Hasting algorithm code."""
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
Array = tp.Array
ParticleState = tp.ParticleState
RandomKey = tp.RandomKey
def particle_metropolis_step(key: RandomKey,
current_particle_state: ParticleState,
proposed_particle_state) -> ParticleState:
"""A Particle independent Metropolis Hasting step.
Accept the proposed particles with probability a(x', x_t)
where x' is the proposed particle state, x_t is the current state and
a(x', x_t) = min( 1, Z(x')/Z(x_t)) with Z being the normalizing constant
estimate for that particle state.
For numerical stability we transform the transformation to log space i.e.:
u sim Uniform[0,1]
Accept if a greater than u
Becomes:
log u sim -Exponential(1)
Accept if log a greater than log u
For more background see Andrieu, Doucet and Holenstein: 2010
"Particle Markov chain Monte Carlo Methods" JRSS B.
Args:
key: A Jax random key.
current_particle_state: Corresponds to x_t
proposed_particle_state: Corresponds to x'
Returns:
next_particle_state: Results of the update step.
"""
log_u = -1.*jax.random.exponential(key)
log_a = proposed_particle_state.log_normalizer_estimate - current_particle_state.log_normalizer_estimate
accept = log_a > log_u
next_samples = jnp.where(accept,
proposed_particle_state.samples,
current_particle_state.samples)
next_log_weights = jnp.where(accept,
proposed_particle_state.log_weights,
current_particle_state.log_weights)
next_log_z = jnp.where(accept,
proposed_particle_state.log_normalizer_estimate,
current_particle_state.log_normalizer_estimate)
next_particle_state = ParticleState(samples=next_samples,
log_weights=next_log_weights,
log_normalizer_estimate=next_log_z)
return next_particle_state
def particle_metropolis_loop(key: RandomKey,
particle_propose,
num_samples: int,
record_expectations,
):
"""Run a particle independent Metropolis-Hastings chain.
Args:
key: A Jax random key.
particle_propose: Takes a RandomKey and returns a ParticleState.
num_samples: Number of iterations to run for.
record_expectations: Takes a ParticleState and logs required expectations.
"""
subkey, key = jax.random.split(key)
particle_state = particle_propose(subkey)
record_expectations(particle_state)
for unused_sample_index in range(num_samples):
subkey, key = jax.random.split(key)
proposed_particle_state = particle_propose(subkey)
subkey, key = jax.random.split(key)
particle_state = particle_metropolis_step(subkey,
particle_state,
proposed_particle_state)
record_expectations(particle_state)
|
annealed_flow_transport-master
|
annealed_flow_transport/pimh.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code from running standard MCMC at the final target temperature."""
import time
from absl import logging
from annealed_flow_transport import densities
from annealed_flow_transport import markov_kernel
from annealed_flow_transport import samplers
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
MarkovKernelApply = tp.MarkovKernelApply
assert_equal_shape = chex.assert_equal_shape
AlgoResultsTuple = tp.AlgoResultsTuple
ParticleState = tp.ParticleState
def outer_loop_mcmc(key: RandomKey,
num_iters: int,
record_expectations,
config) -> AlgoResultsTuple:
"""The outer loop for Annealed Flow Transport Monte Carlo.
Args:
key: A Jax random key.
num_iters: Number of iterations of MCMC to run.
record_expectations: Function for recording values of expectations.
config: A ConfigDict containing the configuration.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
final_log_density = getattr(densities, config.final_config.density)(
config.final_config, config.sample_shape[0])
initial_sampler = getattr(samplers,
config.initial_sampler_config.initial_sampler)(
config.initial_sampler_config)
num_temps = 2
key, subkey = jax.random.split(key)
samples = initial_sampler(subkey, config.batch_size, config.sample_shape)
log_weights = -jnp.log(config.batch_size) * jnp.ones(config.batch_size)
dummy_density_by_step = lambda unused_step, x: final_log_density(x)
final_step = 1
markov_kernel_dummy_step = markov_kernel.MarkovTransitionKernel(
config.mcmc_config, dummy_density_by_step, num_temps)
logging.info('Jitting step...')
fast_markov_kernel = jax.jit(
lambda x, y: markov_kernel_dummy_step(final_step, x, y))
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
fast_markov_kernel(key, samples)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
log_normalizer_estimate = 0.
start_time = time.time()
for step in range(num_iters):
subkey, key = jax.random.split(key)
samples, acceptance = fast_markov_kernel(subkey, samples)
acceptance_nuts = float(np.asarray(acceptance[0]))
acceptance_hmc = float(np.asarray(acceptance[1]))
particle_state = ParticleState(
samples=samples,
log_weights=log_weights,
log_normalizer_estimate=log_normalizer_estimate)
record_expectations(particle_state)
if step % config.report_step == 0:
logging.info(
'Step %05d: Acceptance rate NUTS %f Acceptance rate HMC %f',
step, acceptance_nuts, acceptance_hmc
)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
results = AlgoResultsTuple(
test_samples=samples,
test_log_weights=log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/mcmc.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Expectations to be estimated from samples and log weights."""
import math
from annealed_flow_transport import qft_observables
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import numpy as np
ConfigDict = tp.ConfigDict
Array = tp.Array
ParticleState = tp.ParticleState
class SingleComponentMean():
"""Simple computation of the expectation of one component of the vector."""
def __init__(self, config, num_dim: int):
del num_dim
self._config = config
def __call__(self,
samples: Array,
log_weights: Array) -> Array:
normalized_weights = jax.nn.softmax(log_weights)
component_values = samples[:, self._config.component_index]
return jnp.sum(normalized_weights * component_values)
class TwoPointSusceptibility():
"""A wrapper for the two point susceptibility observable."""
def __init__(self, config, num_dim: int):
self._config = config
self._num_grid_per_dim = int(math.sqrt(num_dim))
assert self._num_grid_per_dim ** 2 == num_dim
def __call__(self,
samples: Array,
log_weights: Array) -> Array:
num_batch = np.shape(samples)[0]
reshaped_samples = jnp.reshape(samples, (num_batch,
self._num_grid_per_dim,
self._num_grid_per_dim))
return qft_observables.estimate_two_point_susceptibility(
reshaped_samples, log_weights, self._num_grid_per_dim)
class IsingEnergyDensity():
"""A wrapper for the Ising energy density observable."""
def __init__(self, config, num_dim: int):
self._config = config
self._num_grid_per_dim = int(math.sqrt(num_dim))
assert self._num_grid_per_dim ** 2 == num_dim
def __call__(self,
samples: Array,
log_weights: Array) -> Array:
num_batch = np.shape(samples)[0]
reshaped_samples = jnp.reshape(samples, (num_batch,
self._num_grid_per_dim,
self._num_grid_per_dim))
return qft_observables.estimate_ising_energy_density(
reshaped_samples, log_weights)
|
annealed_flow_transport-master
|
annealed_flow_transport/expectations.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annealed Flow Transport (AFT) Monte Carlo algorithm.
For more detail see:
Arbel, Matthews and Doucet. 2021. Annealed Flow Transport Monte Carlo.
International Conference on Machine Learning.
"""
import time
from typing import NamedTuple, Tuple
from absl import logging
from annealed_flow_transport import flow_transport
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import numpy as np
import optax
Array = tp.Array
UpdateFn = tp.UpdateFn
OptState = tp.OptState
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
SamplesTuple = tp.SamplesTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
MarkovKernelApply = tp.MarkovKernelApply
FreeEnergyEval = tp.FreeEnergyEval
VfesTuple = tp.VfesTuple
LogDensityByStep = tp.LogDensityByStep
AcceptanceTuple = tp.AcceptanceTuple
LogWeightsTuple = tp.LogWeightsTuple
AlgoResultsTuple = tp.AlgoResultsTuple
def get_initial_samples_log_weight_tuples(
initial_sampler: InitialSampler, key: RandomKey,
config) -> Tuple[SamplesTuple, LogWeightsTuple]:
"""Get initial train/validation/test state depending on config."""
batch_sizes = (config.estimation_batch_size,
config.estimation_batch_size,
config.batch_size)
subkeys = jax.random.split(key, 3)
samples_tuple = SamplesTuple(*[
initial_sampler(elem, batch, config.sample_shape)
for elem, batch in zip(subkeys, batch_sizes)
])
log_weights_tuple = LogWeightsTuple(*[-jnp.log(batch) * jnp.ones(
batch) for batch in batch_sizes])
return samples_tuple, log_weights_tuple
def update_tuples(
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
key: RandomKey, flow_apply: FlowApply, flow_params: FlowParams,
markov_kernel_apply: MarkovKernelApply, log_density: LogDensityByStep,
step: int, config) -> Tuple[SamplesTuple, LogWeightsTuple, AcceptanceTuple]:
"""Update the samples and log weights and return diagnostics."""
samples_list = []
log_weights_list = []
acceptance_tuple_list = []
subkeys = jax.random.split(key, 3)
for curr_samples, curr_log_weights, subkey in zip(samples_tuple,
log_weights_tuple,
subkeys):
new_samples, new_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params,
samples=curr_samples,
log_weights=curr_log_weights,
key=subkey,
log_density=log_density,
step=step,
use_resampling=config.use_resampling,
use_markov=config.use_markov,
resample_threshold=config.resample_threshold)
samples_list.append(new_samples)
log_weights_list.append(new_log_weights)
acceptance_tuple_list.append(acceptance_tuple)
samples_tuple = SamplesTuple(*samples_list)
log_weights_tuple = LogWeightsTuple(*log_weights_list)
test_acceptance_tuple = acceptance_tuple_list[-1]
return samples_tuple, log_weights_tuple, test_acceptance_tuple
class OptimizationLoopState(NamedTuple):
opt_state: OptState
flow_params: FlowParams
inner_step: int
opt_vfes: VfesTuple
best_params: FlowParams
best_validation_vfe: Array
best_index: int
def flow_estimate_step(loop_state: OptimizationLoopState,
free_energy_and_grad: FreeEnergyAndGrad,
train_samples: Array, train_log_weights: Array,
outer_step: int, validation_samples: Array,
validation_log_weights: Array,
free_energy_eval: FreeEnergyEval,
opt_update: UpdateFn) -> OptimizationLoopState:
"""A single step of the flow estimation loop."""
# Evaluate the flow on train and validation particles.
train_vfe, flow_grads = free_energy_and_grad(loop_state.flow_params,
train_samples,
train_log_weights,
outer_step)
validation_vfe = free_energy_eval(loop_state.flow_params,
validation_samples,
validation_log_weights,
outer_step)
# Update the best parameters, best validation vfe and index
# if the measured validation vfe is better.
validation_vfe_is_better = validation_vfe < loop_state.best_validation_vfe
new_best_params = jax.lax.cond(validation_vfe_is_better,
lambda _: loop_state.flow_params,
lambda _: loop_state.best_params,
operand=None)
new_best_validation_vfe = jnp.where(validation_vfe_is_better,
validation_vfe,
loop_state.best_validation_vfe)
new_best_index = jnp.where(validation_vfe_is_better,
loop_state.inner_step,
loop_state.best_index)
# Update the logs of train and validation vfes.
new_train_vfes = loop_state.opt_vfes.train_vfes.at[loop_state.inner_step].set(
train_vfe)
new_validation_vfes = loop_state.opt_vfes.validation_vfes.at[
loop_state.inner_step].set(validation_vfe)
new_opt_vfes = VfesTuple(train_vfes=new_train_vfes,
validation_vfes=new_validation_vfes)
# Apply gradients ready for next round of flow evaluations in the next step.
updates, new_opt_state = opt_update(flow_grads,
loop_state.opt_state)
new_flow_params = optax.apply_updates(loop_state.flow_params,
updates)
new_inner_step = loop_state.inner_step + 1
# Pack everything into the next loop state.
new_state_tuple = OptimizationLoopState(new_opt_state, new_flow_params,
new_inner_step, new_opt_vfes,
new_best_params,
new_best_validation_vfe,
new_best_index)
return new_state_tuple
def flow_estimation_should_continue(loop_state: OptimizationLoopState,
opt_iters: int,
stopping_criterion: str) -> bool:
"""Based on stopping criterion control termination of flow estimation."""
if stopping_criterion == 'time':
return loop_state.inner_step < opt_iters
elif stopping_criterion == 'greedy_time':
index = loop_state.inner_step
best_index = loop_state.best_index
return jnp.logical_and(best_index == index-1, index < opt_iters)
else:
raise NotImplementedError
def optimize_free_energy(
opt_update: UpdateFn, opt_init_state: OptState,
flow_init_params: FlowParams, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, train_samples: Array,
train_log_weights: Array, validation_samples: Array,
validation_log_weights: Array, outer_step: int, opt_iters: int,
stopping_criterion: str) -> Tuple[FlowParams, VfesTuple]:
"""Optimize an estimate of the free energy.
Args:
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
train_samples: Array of shape (batch,)+sample_shape
train_log_weights: Array of shape (batch,)
validation_samples: Array of shape (batch,)
validation_log_weights: Array of shape (batch,)
outer_step: int giving current outer step of algorithm.
opt_iters: number of flow estimation iters.
stopping_criterion: One of 'time' or 'greedy-time'.
Returns:
flow_params: optimized flow parameters.
free_energies: array containing all estimates of free energy.
"""
opt_state = opt_init_state
flow_params = flow_init_params
train_vfes = jnp.zeros(opt_iters)
validation_vfes = jnp.zeros(opt_iters)
opt_vfes = VfesTuple(train_vfes, validation_vfes)
def body_fun(loop_state: OptimizationLoopState) -> OptimizationLoopState:
return flow_estimate_step(loop_state, free_energy_and_grad, train_samples,
train_log_weights, outer_step, validation_samples,
validation_log_weights, free_energy_eval,
opt_update)
def cond_fun(loop_state: OptimizationLoopState) -> bool:
return flow_estimation_should_continue(loop_state, opt_iters,
stopping_criterion)
initial_loop_state = OptimizationLoopState(opt_state, flow_params, 0,
opt_vfes, flow_params, jnp.inf, -1)
final_loop_state = jax.lax.while_loop(cond_fun,
body_fun,
initial_loop_state)
return final_loop_state.best_params, final_loop_state.opt_vfes
def inner_loop(
key: RandomKey, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, opt_update: UpdateFn,
opt_init_state: OptState, flow_init_params: FlowParams,
flow_apply: FlowApply, markov_kernel_apply: MarkovKernelApply,
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
log_density: LogDensityByStep, step: int, config
) -> Tuple[FlowParams, OptState, VfesTuple, Array, AcceptanceTuple]:
"""Inner loop of the algorithm.
Args:
key: A JAX random key.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
flow_apply: function that applies the flow.
markov_kernel_apply: functional that applies the Markov transition kernel.
samples_tuple: Tuple containing train/validation/test samples.
log_weights_tuple: Tuple containing train/validation/test log_weights.
log_density: function returning the log_density of a sample at given step.
step: int giving current step of algorithm.
config: experiment configuration.
Returns:
samples_final: samples after the full inner loop has been performed.
log_weights_final: log_weights after the full inner loop has been performed.
free_energies: array containing all estimates of free energy.
log_normalizer_increment: Scalar log of normalizing constant increment.
"""
flow_params, vfes_tuple = optimize_free_energy(
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
train_samples=samples_tuple.train_samples,
train_log_weights=log_weights_tuple.train_log_weights,
validation_samples=samples_tuple.validation_samples,
validation_log_weights=log_weights_tuple.validation_log_weights,
outer_step=step,
opt_iters=config.optimization_config.free_energy_iters,
stopping_criterion=config.stopping_criterion)
log_normalizer_increment = flow_transport.get_log_normalizer_increment(
samples_tuple.test_samples, log_weights_tuple.test_log_weights,
flow_apply, flow_params, log_density, step)
samples_tuple, log_weights_tuple, test_acceptance_tuple = update_tuples(
samples_tuple=samples_tuple,
log_weights_tuple=log_weights_tuple,
key=key,
flow_apply=flow_apply,
flow_params=flow_params,
markov_kernel_apply=markov_kernel_apply,
log_density=log_density,
step=step,
config=config)
return samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance_tuple
def outer_loop_aft(opt_update: UpdateFn,
opt_init_state: OptState,
flow_init_params: FlowParams,
flow_apply: FlowApply,
density_by_step: LogDensityByStep,
markov_kernel_by_step: MarkovKernelApply,
initial_sampler: InitialSampler,
key: RandomKey,
config,
log_step_output) -> AlgoResultsTuple:
"""The outer loop for Annealed Flow Transport Monte Carlo.
Args:
opt_update: A Optax optimizer update function.
opt_init_state: Optax initial state.
flow_init_params: Initial parameters for the flow.
flow_apply: Function that evaluates flow on parameters and samples.
density_by_step: The log density for different annealing temperatures.
markov_kernel_by_step: Markov kernel for different annealing temperatures.
initial_sampler: A function that produces the initial samples.
key: A Jax random key.
config: A ConfigDict containing the configuration.
log_step_output: Function to log step output or None.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
def free_energy_short(flow_params: FlowParams,
samples: Array,
log_weights: Array,
step: int) -> Array:
return flow_transport.transport_free_energy_estimator(
samples, log_weights, flow_apply, None, flow_params, density_by_step,
step, False)
free_energy_eval = jax.jit(free_energy_short)
free_energy_and_grad = jax.value_and_grad(free_energy_short)
key, subkey = jax.random.split(key)
samples_tuple, log_weights_tuple = get_initial_samples_log_weight_tuples(
initial_sampler, subkey, config)
def short_inner_loop(rng_key: RandomKey,
loc_samples_tuple: SamplesTuple,
loc_log_weights_tuple: LogWeightsTuple,
loc_step: int):
return inner_loop(key=rng_key,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_by_step,
samples_tuple=loc_samples_tuple,
log_weights_tuple=loc_log_weights_tuple,
log_density=density_by_step,
step=loc_step,
config=config)
logging.info('Jitting step...')
inner_loop_jit = jax.jit(short_inner_loop)
opt_iters = config.optimization_config.free_energy_iters
if log_step_output is not None:
zero_vfe_tuple = VfesTuple(train_vfes=jnp.zeros(opt_iters),
validation_vfes=jnp.zeros(opt_iters))
log_step_output(samples_tuple, log_weights_tuple, zero_vfe_tuple, 0., 1.,
1.)
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
inner_loop_jit(key, samples_tuple, log_weights_tuple, 1)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
log_normalizer_estimate = 0.
start_time = time.time()
for step in range(1, num_temps):
subkey, key = jax.random.split(key)
samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance = inner_loop_jit(
subkey, samples_tuple, log_weights_tuple, step)
acceptance_hmc = float(np.asarray(test_acceptance[0]))
acceptance_rwm = float(np.asarray(test_acceptance[1]))
log_normalizer_estimate += log_normalizer_increment
if step % config.report_step == 0:
beta = density_by_step.get_beta(step) # pytype: disable=attribute-error
logging.info(
'Step %05d: beta %f Acceptance rate HMC %f Acceptance rate RWM %f',
step, beta, acceptance_hmc, acceptance_rwm
)
if log_step_output is not None:
log_step_output(samples_tuple, log_weights_tuple,
vfes_tuple, log_normalizer_increment, acceptance_rwm,
acceptance_hmc)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
results = AlgoResultsTuple(
test_samples=samples_tuple.test_samples,
test_log_weights=log_weights_tuple.test_log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/aft.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training for all SMC and flow algorithms."""
from typing import Callable, Tuple
from annealed_flow_transport import aft
from annealed_flow_transport import craft
from annealed_flow_transport import densities
from annealed_flow_transport import flow_transport
from annealed_flow_transport import flows
from annealed_flow_transport import markov_kernel
from annealed_flow_transport import samplers
from annealed_flow_transport import serialize
from annealed_flow_transport import smc
from annealed_flow_transport import snf
from annealed_flow_transport import vi
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
import optax
# Type defs.
Array = tp.Array
OptState = tp.OptState
UpdateFn = tp.UpdateFn
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityByStep = tp.LogDensityByStep
RandomKey = tp.RandomKey
AcceptanceTuple = tp.AcceptanceTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
FreeEnergyEval = tp.FreeEnergyEval
MarkovKernelApply = tp.MarkovKernelApply
SamplesTuple = tp.SamplesTuple
LogWeightsTuple = tp.LogWeightsTuple
VfesTuple = tp.VfesTuple
InitialSampler = tp.InitialSampler
LogDensityNoStep = tp.LogDensityNoStep
assert_equal_shape = chex.assert_equal_shape
AlgoResultsTuple = tp.AlgoResultsTuple
def get_optimizer(initial_learning_rate: float,
boundaries_and_scales):
"""Get an optimizer possibly with learning rate schedule."""
if boundaries_and_scales is None:
return optax.adam(initial_learning_rate)
else:
schedule_fn = optax.piecewise_constant_schedule(
initial_learning_rate,
boundaries_and_scales[0])
opt = optax.chain(optax.scale_by_adam(),
optax.scale_by_schedule(schedule_fn), optax.scale(-1.))
return opt
def value_or_none(value: str,
config):
if value in config:
return config[value]
else:
return None
def prepare_outer_loop(initial_sampler: InitialSampler,
initial_log_density: Callable[[Array], Array],
final_log_density: Callable[[Array], Array],
flow_func: Callable[[Array], Tuple[Array, Array]],
config) -> AlgoResultsTuple:
"""Shared code outer loops then calls the outer loops themselves.
Args:
initial_sampler: Function for producing initial sample.
initial_log_density: Function for evaluating initial log density.
final_log_density: Function for evaluating final log density.
flow_func: Flow function to pass to Haiku transform.
config: experiment configuration.
Returns:
An AlgoResultsTuple containing the experiment results.
"""
num_temps = config.num_temps
if is_annealing_algorithm(config.algo):
density_by_step = flow_transport.GeometricAnnealingSchedule(
initial_log_density, final_log_density, num_temps)
if is_markov_algorithm(config.algo):
markov_kernel_by_step = markov_kernel.MarkovTransitionKernel(
config.mcmc_config, density_by_step, num_temps)
key = jax.random.PRNGKey(config.seed)
flow_forward_fn = hk.without_apply_rng(hk.transform(flow_func))
key, subkey = jax.random.split(key)
single_normal_sample = initial_sampler(subkey,
config.batch_size,
config.sample_shape)
key, subkey = jax.random.split(key)
flow_init_params = flow_forward_fn.init(subkey,
single_normal_sample)
if value_or_none('save_checkpoint', config):
def save_checkpoint(params):
return serialize.save_checkpoint(config.params_filename, params)
else:
save_checkpoint = None
if config.algo == 'vi':
# Add a save_checkpoint function here to enable saving final state.
opt = get_optimizer(
config.optimization_config.vi_step_size,
None)
opt_init_state = opt.init(flow_init_params)
results = vi.outer_loop_vi(initial_sampler=initial_sampler,
opt_update=opt.update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_forward_fn.apply,
key=key,
initial_log_density=initial_log_density,
final_log_density=final_log_density,
config=config,
save_checkpoint=save_checkpoint)
elif config.algo == 'smc':
results = smc.outer_loop_smc(density_by_step=density_by_step,
initial_sampler=initial_sampler,
markov_kernel_by_step=markov_kernel_by_step,
key=key,
config=config)
elif config.algo == 'snf':
opt = get_optimizer(
config.optimization_config.snf_step_size,
value_or_none('snf_boundaries_and_scales',
config.optimization_config))
log_step_output = None
results = snf.outer_loop_snf(flow_init_params=flow_init_params,
flow_apply=flow_forward_fn.apply,
density_by_step=density_by_step,
markov_kernel_by_step=markov_kernel_by_step,
initial_sampler=initial_sampler,
key=key,
opt=opt,
config=config,
log_step_output=log_step_output,
save_checkpoint=save_checkpoint)
elif config.algo == 'aft':
opt = get_optimizer(
config.optimization_config.aft_step_size,
None)
opt_init_state = opt.init(flow_init_params)
# Add a log_step_output function here to enable non-trivial step logging.
log_step_output = None
results = aft.outer_loop_aft(opt_update=opt.update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_forward_fn.apply,
density_by_step=density_by_step,
markov_kernel_by_step=markov_kernel_by_step,
initial_sampler=initial_sampler,
key=key,
config=config,
log_step_output=log_step_output)
elif config.algo == 'craft':
opt = get_optimizer(
config.optimization_config.craft_step_size,
value_or_none('craft_boundaries_and_scales',
config.optimization_config))
opt_init_state = opt.init(flow_init_params)
log_step_output = None
results = craft.outer_loop_craft(
opt_update=opt.update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_forward_fn.apply,
flow_inv_apply=None,
density_by_step=density_by_step,
markov_kernel_by_step=markov_kernel_by_step,
initial_sampler=initial_sampler,
key=key,
config=config,
log_step_output=log_step_output,
save_checkpoint=save_checkpoint)
else:
raise NotImplementedError
return results
def is_flow_algorithm(algo_name):
return algo_name in ('aft', 'vi', 'craft', 'snf')
def is_markov_algorithm(algo_name):
return algo_name in ('aft', 'craft', 'snf', 'smc')
def is_annealing_algorithm(algo_name):
return algo_name in ('aft', 'craft', 'snf', 'smc')
def run_experiment(config) -> AlgoResultsTuple:
"""Run a SMC flow experiment.
Args:
config: experiment configuration.
Returns:
An AlgoResultsTuple containing the experiment results.
"""
log_density_initial = getattr(densities, config.initial_config.density)(
config.initial_config, config.sample_shape)
log_density_final = getattr(densities, config.final_config.density)(
config.final_config, config.sample_shape)
initial_sampler = getattr(samplers,
config.initial_sampler_config.initial_sampler)(
config.initial_sampler_config)
def flow_func(x):
if is_flow_algorithm(config.algo):
flow = getattr(flows, config.flow_config.type)(config.flow_config)
return flow(x)
else:
return None
results = prepare_outer_loop(initial_sampler, log_density_initial,
log_density_final, flow_func, config)
return results
|
annealed_flow_transport-master
|
annealed_flow_transport/train.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.flow_transport."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport.flow_transport import GeometricAnnealingSchedule
from annealed_flow_transport.flow_transport import get_delta
from annealed_flow_transport.flow_transport import get_delta_path_grad
from annealed_flow_transport.flow_transport import transport_free_energy_estimator
from annealed_flow_transport.flows import DiagonalAffine
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy.stats import norm
import ml_collections
ConfigDict = ml_collections.ConfigDict
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class FlowTransportTest(parameterized.TestCase):
def test_transport_free_energy_estimator(self):
# parameters for various normal distribution.
mean_a = 0.
mean_b = -1.
mean_c = 2.
mean_d = 0.1
var_a = 1.
var_b = 4.
var_c = 5.
var_d = 1.
num_samples = 10000
dimension = 1
key = jax.random.PRNGKey(1)
# First we sample from normal distribution d
# and then perform an importance correction to normal distribution a.
samples = jax.random.normal(key, shape=(num_samples, dimension))+mean_d
def log_importance_correction(samples):
first_terms = norm.logpdf(
samples, loc=mean_a, scale=jnp.sqrt(var_a)).flatten()
second_terms = norm.logpdf(
samples, loc=mean_d, scale=jnp.sqrt(var_d)).flatten()
return first_terms-second_terms
log_weights = log_importance_correction(samples)
# this will change the normal distribution a to normal distribution b
# because it is an affine transformation.
def flow_apply(unused_params, samples):
return 2.*samples-1., jnp.log(2)*jnp.ones((num_samples,))
def analytic_gauss_kl(mean0, var0, mean1, var1):
return 0.5 * (
var0 / var1 + jnp.square(mean1 - mean0) / var1 - 1. + jnp.log(var1) -
jnp.log(var0))
def initial_density(x):
return norm.logpdf(x, loc=mean_a, scale=jnp.sqrt(var_a)).flatten()
def final_density(x):
return norm.logpdf(x, loc=mean_c, scale=jnp.sqrt(var_c)).flatten()
def step_density(step, x):
if step == 0:
return initial_density(x)
if step == 1:
return final_density(x)
estimator_value = transport_free_energy_estimator(samples=samples,
log_weights=log_weights,
flow_apply=flow_apply,
inv_flow_apply=None,
flow_params=None,
log_density=step_density,
step=1,
use_path_gradient=False)
# the target KL is analytically tractable as it is between two Gaussians.
# the target KL is between normal b and c.
analytic_value = analytic_gauss_kl(mean0=mean_b,
var0=var_b,
mean1=mean_c,
var1=var_c)
_assert_equal_vec(self, estimator_value, analytic_value, atol=1e-2)
def test_geometric_annealing_schedule(self):
def initial_density(x):
return norm.logpdf(x, loc=-1., scale=2.).flatten()
def final_density(x):
return norm.logpdf(x, loc=1.5, scale=3.).flatten()
num_temps = 5.
annealing_schedule = GeometricAnnealingSchedule(initial_density,
final_density,
num_temps)
num_samples = 10000
dimension = 1
key = jax.random.PRNGKey(1)
samples = jax.random.normal(key, shape=(num_samples, dimension))
interpolated_densities_initial = annealing_schedule(0, samples)
test_densities_initial = initial_density(samples)
interpolated_densities_final = annealing_schedule(4, samples)
test_densities_final = final_density(samples)
_assert_equal_vec(self,
interpolated_densities_initial,
test_densities_initial)
_assert_equal_vec(self,
interpolated_densities_final,
test_densities_final)
class PathGradientTest(parameterized.TestCase):
def test_forward_consistency(self):
def initial_density(x):
return norm.logpdf(x, loc=-1., scale=2.).flatten()
def final_density(x):
return norm.logpdf(x, loc=1.5, scale=3.).flatten()
num_temps = 5.
annealing_schedule = GeometricAnnealingSchedule(initial_density,
final_density,
num_temps)
key = jax.random.PRNGKey(13)
num_batch = 7
num_dim = 1
subkey, key = jax.random.split(key)
samples = jax.random.normal(subkey, shape=(num_batch, num_dim))
temp_index = 2
flow_config = ConfigDict()
flow_config.sample_shape = (num_dim,)
def flow_func(x):
flow = DiagonalAffine(flow_config)
return flow(x)
def inv_flow_func(x):
flow = DiagonalAffine(flow_config)
return flow.inverse(x)
flow_fn = hk.without_apply_rng(hk.transform(flow_func))
inv_flow_fn = hk.without_apply_rng(hk.transform(inv_flow_func))
flow_init_params = flow_fn.init(key,
samples)
shift = 0.3
new_params = jax.tree_util.tree_map(lambda x: x+shift, flow_init_params)
flow_apply = flow_fn.apply
inv_flow_apply = inv_flow_fn.apply
delta_original = get_delta(samples,
flow_apply,
new_params,
annealing_schedule,
temp_index)
delta_path = get_delta_path_grad(samples,
flow_apply,
inv_flow_apply,
new_params,
annealing_schedule,
temp_index)
print('delta_original ', delta_original)
print('delta_path ', delta_path)
_assert_equal_vec(self,
delta_original,
delta_path)
def test_optimal_gradient(self):
pass
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/flow_transport_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for normalizing flows.
For a review of normalizing flows see: https://arxiv.org/abs/1912.02762
The abstract base class ConfigurableFlow demonstrates our minimal interface.
Although the standard change of variables formula requires that
normalizing flows are invertible, none of the algorithms in train.py
require evaluating that inverse explicitly so inverses are not implemented.
"""
import abc
from typing import Callable, List, Tuple
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
Samples = tp.Samples
ConfigDict = tp.ConfigDict
class ConfigurableFlow(hk.Module, abc.ABC):
"""Abstract base clase for configurable normalizing flows.
This is the interface expected by all flow based algorithms called in train.py
"""
def __init__(self, config: ConfigDict):
super().__init__()
self._check_configuration(config)
self._config = config
def _check_input(self, x: Samples):
chex.assert_rank(x, 2)
def _check_outputs(self, x: Samples, transformed_x: Samples,
log_abs_det_jac: Array):
chex.assert_rank(x, 2)
chex.assert_equal_shape([x, transformed_x])
num_batch = x.shape[0]
chex.assert_shape(log_abs_det_jac, (num_batch,))
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError('Flow config element not found: ', elem)
if not isinstance(config[elem], elem_type):
msg = 'Flow config element '+elem+' is not of type '+str(elem_type)
raise TypeError(msg)
def __call__(self, x: Samples) -> Tuple[Samples, Array]:
"""Call transform_and_log abs_det_jac with automatic shape checking.
This calls transform_and_log_abs_det_jac which needs to be implemented
in derived classes.
Args:
x: input samples to flow.
Returns:
output samples and (num_batch,) log abs det Jacobian.
"""
self._check_input(x)
vmapped = hk.vmap(self.transform_and_log_abs_det_jac, split_rng=False)
output, log_abs_det_jac = vmapped(x)
self._check_outputs(x, output, log_abs_det_jac)
return output, log_abs_det_jac
def inverse(self, x: Samples) -> Tuple[Samples, Array]:
"""Call transform_and_log abs_det_jac with automatic shape checking.
This calls transform_and_log_abs_det_jac which needs to be implemented
in derived classes.
Args:
x: input to flow
Returns:
output and (num_batch,) log abs det Jacobian.
"""
self._check_input(x)
vmapped = hk.vmap(self.inv_transform_and_log_abs_det_jac, split_rng=False)
output, log_abs_det_jac = vmapped(x)
self._check_outputs(x, output, log_abs_det_jac)
return output, log_abs_det_jac
@abc.abstractmethod
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Transform x through the flow and compute log abs determinant of Jacobian.
Args:
x: (num_dim,) input to the flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
def inv_transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Transform x through inverse and compute log abs determinant of Jacobian.
Args:
x: (num_dim,) input to the flow.
Returns:
Array size (num_dim,) containing output and Scalar log abs det Jacobian.
"""
raise NotImplementedError
@abc.abstractmethod
def _check_configuration(self, config: ConfigDict):
"""Check the configuration includes the necessary fields.
Will typically raise Assertion like errors.
Args:
config: A ConfigDict include the fields required by the flow.
"""
class DiagonalAffine(ConfigurableFlow):
"""An affine transformation with a positive diagonal matrix."""
def __init__(self, config: ConfigDict):
super().__init__(config)
num_elem = config.sample_shape[0]
unconst_diag_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
bias_init = hk.initializers.Constant(jnp.zeros((num_elem,)))
self._unconst_diag = hk.get_parameter(
'unconst_diag',
shape=[num_elem],
dtype=jnp.float32, # TODO(alexmatthews) nicer way to infer dtype
init=unconst_diag_init)
self._bias = hk.get_parameter(
'bias',
shape=[num_elem],
dtype=jnp.float32, # TODO(alexmatthews) nicer way to infer dtype
init=bias_init)
def _check_configuration(self, unused_config: ConfigDict):
pass
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
output = jnp.exp(self._unconst_diag)*x + self._bias
log_abs_det = jnp.sum(self._unconst_diag)
return output, log_abs_det
def inv_transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
output = jnp.exp(-self._unconst_diag)*(x - self._bias)
log_abs_det = -1.*jnp.sum(self._unconst_diag)
return output, log_abs_det
def rational_quadratic_spline(x: Array,
bin_positions: Array,
bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""Compute a rational quadratic spline.
See https://arxiv.org/abs/1906.04032
Args:
x: A single real number.
bin_positions: A sorted array of bin positions of length num_bins+1.
bin_heights: An array of bin heights of length num_bins+1.
derivatives: An array of derivatives at bin positions of length num_bins+1.
Returns:
Value of the rational quadratic spline at x.
Derivative with respect to x of rational quadratic spline at x.
"""
bin_index = jnp.searchsorted(bin_positions, x)
array_index = bin_index % len(bin_positions)
lower_x = bin_positions[array_index-1]
upper_x = bin_positions[array_index]
lower_y = bin_heights[array_index-1]
upper_y = bin_heights[array_index]
lower_deriv = derivatives[array_index-1]
upper_deriv = derivatives[array_index]
delta_x = upper_x - lower_x
delta_y = upper_y - lower_y
slope = delta_y / delta_x
alpha = (x - lower_x)/delta_x
alpha_squared = jnp.square(alpha)
beta = alpha * (1.-alpha)
gamma = jnp.square(1.-alpha)
epsilon = upper_deriv+lower_deriv -2. *slope
numerator_quadratic = delta_y * (slope*alpha_squared + lower_deriv*beta)
denominator_quadratic = slope + epsilon*beta
interp_x = lower_y + numerator_quadratic/denominator_quadratic
# now compute derivative
numerator_deriv = jnp.square(slope) * (
upper_deriv * alpha_squared + 2. * slope * beta + lower_deriv * gamma)
sqrt_denominator_deriv = slope + epsilon*beta
denominator_deriv = jnp.square(sqrt_denominator_deriv)
deriv = numerator_deriv / denominator_deriv
return interp_x, deriv
def identity_padded_rational_quadratic_spline(
x: Array, bin_positions: Array, bin_heights: Array,
derivatives: Array) -> Tuple[Array, Array]:
"""An identity padded rational quadratic spline.
Args:
x: the value to evaluate the spline at.
bin_positions: sorted values of bin x positions of length num_bins+1.
bin_heights: absolute height of bin of length num_bins-1.
derivatives: derivatives at internal bin edge of length num_bins-1.
Returns:
The value of the spline at x.
The derivative with respect to x of the spline at x.
"""
lower_limit = bin_positions[0]
upper_limit = bin_positions[-1]
bin_height_sequence = (jnp.atleast_1d(jnp.array(lower_limit)),
bin_heights,
jnp.atleast_1d(jnp.array(upper_limit)))
full_bin_heights = jnp.concatenate(bin_height_sequence)
derivative_sequence = (jnp.ones((1,)),
derivatives,
jnp.ones((1,)))
full_derivatives = jnp.concatenate(derivative_sequence)
in_range = jnp.logical_and(jnp.greater(x, lower_limit),
jnp.less(x, upper_limit))
multiplier = in_range*1.
multiplier_complement = jnp.logical_not(in_range)*1.
spline_val, spline_deriv = rational_quadratic_spline(x,
bin_positions,
full_bin_heights,
full_derivatives)
identity_val = x
identity_deriv = 1.
val = spline_val*multiplier + multiplier_complement*identity_val
deriv = spline_deriv*multiplier + multiplier_complement*identity_deriv
return val, deriv
class AutoregressiveMLP(hk.Module):
"""An MLP which is constrained to have autoregressive dependency."""
def __init__(self,
num_hiddens_per_input_dim: List[int],
include_self_links: bool,
non_linearity,
zero_final: bool,
bias_last: bool,
name=None):
super().__init__(name=name)
self._num_hiddens_per_input_dim = num_hiddens_per_input_dim
self._include_self_links = include_self_links
self._non_linearity = non_linearity
self._zero_final = zero_final
self._bias_last = bias_last
def __call__(self, x: Array) -> Array:
input_dim = x.shape[0]
hidden_representation = jnp.atleast_2d(x).T
prev_hid_per_dim = 1
num_hidden_layers = len(self._num_hiddens_per_input_dim)
final_index = num_hidden_layers-1
for layer_index in range(num_hidden_layers):
is_last_layer = (final_index == layer_index)
hid_per_dim = self._num_hiddens_per_input_dim[layer_index]
name_stub = '_'+str(layer_index)
layer_shape = (input_dim,
prev_hid_per_dim,
input_dim,
hid_per_dim)
in_degree = prev_hid_per_dim * input_dim
if is_last_layer and self._zero_final:
w_init = jnp.zeros
else:
w_init = hk.initializers.TruncatedNormal(1. / np.sqrt(in_degree))
bias_init = hk.initializers.Constant(jnp.zeros((input_dim, hid_per_dim,)))
weights = hk.get_parameter(name='weights'+name_stub,
shape=layer_shape,
dtype=x.dtype,
init=w_init)
if is_last_layer and not self._bias_last:
biases = jnp.zeros((input_dim, hid_per_dim,))
else:
biases = hk.get_parameter(name='biases'+name_stub,
shape=(input_dim, hid_per_dim),
dtype=x.dtype,
init=bias_init)
if not(self._include_self_links) and is_last_layer:
k = -1
else:
k = 0
mask = jnp.tril(jnp.ones((input_dim, input_dim)),
k=k)
masked_weights = mask[:, None, :, None] * weights
new_hidden_representation = jnp.einsum('ijkl,ij->kl',
masked_weights,
hidden_representation) + biases
prev_hid_per_dim = hid_per_dim
if not is_last_layer:
hidden_representation = self._non_linearity(new_hidden_representation)
else:
hidden_representation = new_hidden_representation
return hidden_representation
class InverseAutogressiveFlow(object):
"""A generic inverse autoregressive flow.
See https://arxiv.org/abs/1606.04934
Takes two functions as input.
1) autoregressive_func takes array of (num_dim,)
and returns array (num_dim, num_features)
it is autoregressive in the sense that the output[i, :]
depends only on the input[:i]. This is not checked.
2) transform_func takes array of (num_dim, num_features) and
an array of (num_dim,) and returns output of shape (num_dim,)
and a single log_det_jacobian value. The represents the transformation
acting on the inputs with given parameters.
"""
def __init__(self,
autoregressive_func: Callable[[Array], Array],
transform_func: Callable[[Array, Array], Tuple[Array, Array]]):
self._autoregressive_func = autoregressive_func
self._transform_func = transform_func
def __call__(self, x: Array) -> Tuple[Array, Array]:
"""x is of shape (num_dim,)."""
transform_features = self._autoregressive_func(x)
output, log_abs_det = self._transform_func(transform_features, x)
return output, log_abs_det
class SplineInverseAutoregressiveFlow(ConfigurableFlow):
"""An inverse autoregressive flow with spline transformer.
config must contain the following fields:
num_spline_bins: Number of bins for rational quadratic spline.
intermediate_hids_per_dim: See AutoregresiveMLP.
num_layers: Number of layers for AutoregressiveMLP.
identity_init: Whether to initalize the flow to the identity.
bias_last: Whether to include biases on the last later of AutoregressiveMLP
lower_lim: Lower limit of active region for rational quadratic spline.
upper_lim: Upper limit of active region for rational quadratic spline.
min_bin_size: Minimum bin size for rational quadratic spline.
min_derivative: Minimum derivative for rational quadratic spline.
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
self._num_spline_bins = config.num_spline_bins
num_spline_parameters = 3 * config.num_spline_bins - 1
num_hids_per_input_dim = [config.intermediate_hids_per_dim
] * config.num_layers + [
num_spline_parameters
]
self._autoregressive_mlp = AutoregressiveMLP(
num_hids_per_input_dim,
include_self_links=False,
non_linearity=jax.nn.leaky_relu,
zero_final=config.identity_init,
bias_last=config.bias_last)
self._lower_lim = config.lower_lim
self._upper_lim = config.upper_lim
self._min_bin_size = config.min_bin_size
self._min_derivative = config.min_derivative
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('num_spline_bins', int),
('intermediate_hids_per_dim', int),
('num_layers', int),
('identity_init', bool),
('bias_last', bool),
('lower_lim', float),
('upper_lim', float),
('min_bin_size', float),
('min_derivative', float)
]
self._check_members_types(config, expected_members_types)
def _unpack_spline_params(self, raw_param_vec) -> Tuple[Array, Array, Array]:
unconst_bin_size_x = raw_param_vec[:self._num_spline_bins]
unconst_bin_size_y = raw_param_vec[self._num_spline_bins:2 *
self._num_spline_bins]
unconst_derivs = raw_param_vec[2 * self._num_spline_bins:(
3 * self._num_spline_bins - 1)]
return unconst_bin_size_x, unconst_bin_size_y, unconst_derivs
def _transform_raw_to_spline_params(
self, raw_param_vec: Array) -> Tuple[Array, Array, Array]:
unconst_bin_size_x, unconst_bin_size_y, unconst_derivs = (
self._unpack_spline_params(raw_param_vec)
)
def normalize_bin_sizes(unconst_bin_sizes: Array) -> Array:
bin_range = self._upper_lim - self._lower_lim
reduced_bin_range = (
bin_range - self._num_spline_bins * self._min_bin_size)
return jax.nn.softmax(
unconst_bin_sizes) * reduced_bin_range + self._min_bin_size
bin_size_x = normalize_bin_sizes(unconst_bin_size_x)
bin_size_y = normalize_bin_sizes(unconst_bin_size_y)
# get the x bin positions.
array_sequence = (jnp.ones((1,))*self._lower_lim, bin_size_x)
x_bin_pos = jnp.cumsum(jnp.concatenate(array_sequence))
# get the y bin positions, ignoring redundant terms.
stripped_y_bin_pos = self._lower_lim + jnp.cumsum(bin_size_y[:-1])
def forward_positive_transform(unconst_value: Array,
min_value: Array) -> Array:
return jax.nn.softplus(unconst_value) + min_value
def inverse_positive_transform(const_value: Array,
min_value: Array) -> Array:
return jnp.log(jnp.expm1(const_value-min_value))
inverted_one = inverse_positive_transform(1., self._min_derivative)
derivatives = forward_positive_transform(unconst_derivs + inverted_one,
self._min_derivative)
return x_bin_pos, stripped_y_bin_pos, derivatives
def _get_spline_values(self,
raw_parameters: Array,
x: Array) -> Tuple[Array, Array]:
bat_get_parameters = jax.vmap(self._transform_raw_to_spline_params)
bat_x_bin_pos, bat_stripped_y, bat_derivatives = bat_get_parameters(
raw_parameters)
# Vectorize spline over data and parameters.
bat_get_spline_vals = jax.vmap(identity_padded_rational_quadratic_spline,
in_axes=[0, 0, 0, 0])
spline_vals, derivs = bat_get_spline_vals(x, bat_x_bin_pos, bat_stripped_y,
bat_derivatives)
log_abs_det = jnp.sum(jnp.log(jnp.abs(derivs)))
return spline_vals, log_abs_det
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
iaf = InverseAutogressiveFlow(self._autoregressive_mlp,
self._get_spline_values)
return iaf(x)
class AffineInverseAutoregressiveFlow(ConfigurableFlow):
"""An inverse autoregressive flow with affine transformer.
config must contain the following fields:
intermediate_hids_per_dim: See AutoregresiveMLP.
num_layers: Number of layers for AutoregressiveMLP.
identity_init: Whether to initalize the flow to the identity.
bias_last: Whether to include biases on the last later of AutoregressiveMLP
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
num_affine_params = 2
num_hids_per_input_dim = [config.intermediate_hids_per_dim
] * config.num_layers + [num_affine_params]
self._autoregressive_mlp = AutoregressiveMLP(
num_hids_per_input_dim,
include_self_links=False,
non_linearity=jax.nn.leaky_relu,
zero_final=config.identity_init,
bias_last=config.bias_last)
def _check_configuration(self, config: ConfigDict):
expected_members_types = [('intermediate_hids_per_dim', int),
('num_layers', int),
('identity_init', bool),
('bias_last', bool)
]
self._check_members_types(config, expected_members_types)
def _get_affine_transformation(self,
raw_parameters: Array,
x: Array) -> Tuple[Array, Array]:
shifts = raw_parameters[:, 0]
scales = raw_parameters[:, 1] + jnp.ones_like(raw_parameters[:, 1])
log_abs_det = jnp.sum(jnp.log(jnp.abs(scales)))
output = x * scales + shifts
return output, log_abs_det
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
iaf = InverseAutogressiveFlow(self._autoregressive_mlp,
self._get_affine_transformation)
return iaf(x)
def affine_transformation(params: Array,
x: Array) -> Tuple[Array, Array]:
shift = params[0]
# Assuming params start as zero adding 1 to scale gives identity transform.
scale = params[1] + 1.
output = x * scale + shift
return output, jnp.log(jnp.abs(scale))
def inverse_affine_transformation(params: Array,
y: Array) -> Tuple[Array, Array]:
shift = params[0]
# Assuming params start as zero adding 1 to scale gives identity transform.
scale = params[1] + 1.
output = (y - shift) / scale
return output, -1.*jnp.log(jnp.abs(scale))
class AffineTransformer:
def __call__(self, params: Array, x: Array) -> Tuple[Array, Array]:
vectorized_affine = jnp.vectorize(affine_transformation,
signature='(k),()->(),()')
return vectorized_affine(params, x)
def inverse(self, params: Array, y: Array) -> Tuple[Array, Array]:
vectorized_affine = jnp.vectorize(inverse_affine_transformation,
signature='(k),()->(),()')
return vectorized_affine(params, y)
class RationalQuadraticSpline(ConfigurableFlow):
"""A learnt monotonic rational quadratic spline with identity padding.
Each input dimension is operated on by a separate spline.
The spline is initialized to the identity.
config must contain the following fields:
num_bins: Number of bins for rational quadratic spline.
lower_lim: Lower limit of active region for rational quadratic spline.
upper_lim: Upper limit of active region for rational quadratic spline.
min_bin_size: Minimum bin size for rational quadratic spline.
min_derivative: Minimum derivative for rational quadratic spline.
"""
def __init__(self,
config: ConfigDict):
super().__init__(config)
self._num_bins = config.num_bins
self._lower_lim = config.lower_lim
self._upper_lim = config.upper_lim
self._min_bin_size = config.min_bin_size
self._min_derivative = config.min_derivative
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('num_bins', int),
('lower_lim', float),
('upper_lim', float),
('min_bin_size', float),
('min_derivative', float)
]
self._check_members_types(config, expected_members_types)
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
"""Apply the spline transformation.
Args:
x: (num_dim,) DeviceArray representing flow input.
Returns:
output: (num_dim,) transformed sample through flow.
log_prob_out: new Scalar representing log_probability of output.
"""
num_dim = x.shape[0]
bin_parameter_shape = (num_dim, self._num_bins)
# Setup the bin position and height parameters.
bin_init = hk.initializers.Constant(jnp.ones(bin_parameter_shape))
unconst_bin_size_x = hk.get_parameter(
'unconst_bin_size_x',
shape=bin_parameter_shape,
dtype=x.dtype,
init=bin_init)
unconst_bin_size_y = hk.get_parameter(
'unconst_bin_size_y',
shape=bin_parameter_shape,
dtype=x.dtype,
init=bin_init)
def normalize_bin_sizes(unconst_bin_sizes):
bin_range = self._upper_lim - self._lower_lim
reduced_bin_range = (bin_range - self._num_bins * self._min_bin_size)
return jax.nn.softmax(
unconst_bin_sizes) * reduced_bin_range + self._min_bin_size
batched_normalize = jax.vmap(normalize_bin_sizes)
bin_size_x = batched_normalize(unconst_bin_size_x)
bin_size_y = batched_normalize(unconst_bin_size_y)
array_sequence = (jnp.ones((num_dim, 1)) * self._lower_lim, bin_size_x)
bin_positions = jnp.cumsum(jnp.concatenate(array_sequence, axis=1), axis=1)
# Don't include the redundant bin heights.
stripped_bin_heights = self._lower_lim + jnp.cumsum(
bin_size_y[:, :-1], axis=1)
# Setup the derivative parameters.
def forward_positive_transform(unconst_value, min_value):
return jax.nn.softplus(unconst_value) + min_value
def inverse_positive_transform(const_value, min_value):
return jnp.log(jnp.expm1(const_value - min_value))
deriv_parameter_shape = (num_dim, self._num_bins - 1)
inverted_one = inverse_positive_transform(1., self._min_derivative)
deriv_init = hk.initializers.Constant(
jnp.ones(deriv_parameter_shape) * inverted_one)
unconst_deriv = hk.get_parameter(
'unconst_deriv',
shape=deriv_parameter_shape,
dtype=x.dtype,
init=deriv_init)
batched_positive_transform = jax.vmap(
forward_positive_transform, in_axes=[0, None])
deriv = batched_positive_transform(unconst_deriv, self._min_derivative)
# Setup batching then apply the spline.
batch_padded_rq_spline = jax.vmap(
identity_padded_rational_quadratic_spline, in_axes=[0, 0, 0, 0])
output, jac_terms = batch_padded_rq_spline(x, bin_positions,
stripped_bin_heights, deriv)
log_abs_det_jac = jnp.sum(jnp.log(jac_terms))
return output, log_abs_det_jac
def expand_periodic_dim(x: Array, num_extra_vals: int):
if num_extra_vals == 0:
return x
first = x[-num_extra_vals:, :]
last = x[:num_extra_vals, :]
return jnp.vstack([first, x, last])
def pad_periodic_2d(x: Array, kernel_shape) -> Array:
"""Pad x to be have the required extra terms at the edges."""
assert len(kernel_shape) == 2
chex.assert_rank(x, 2)
# this code is unbatched
# we require that kernel shape has odd rows/cols.
is_even = False
for elem in kernel_shape:
is_even = is_even or (elem % 2 == 0)
if is_even:
raise ValueError('kernel_shape is assumed to have odd rows and cols')
# calculate num extra rows/cols each side.
num_extra_row = (kernel_shape[0] - 1) // 2
num_extra_col = (kernel_shape[1] -1) // 2
row_expanded_x = expand_periodic_dim(x,
num_extra_row)
col_expanded_x = expand_periodic_dim(row_expanded_x.T,
num_extra_col).T
return col_expanded_x
def batch_pad_periodic_2d(x: Array, kernel_shape) -> Array:
assert len(kernel_shape) == 2
chex.assert_rank(x, 4)
batch_func = jax.vmap(pad_periodic_2d, in_axes=(0, None))
batch_channel_func = jax.vmap(batch_func, in_axes=(3, None), out_axes=3)
return batch_channel_func(x, kernel_shape)
class Conv2DTorus(hk.Conv2D):
"""Convolution in 2D with periodic boundary conditions.
Strides are ignored and this is not checked.
kernel_shapes is a tuple (a, b) where a and b are odd positive integers.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, padding='VALID', **kwargs)
def __call__(self, x: Array) -> Array:
padded_x = batch_pad_periodic_2d(x, self.kernel_shape)
return super().__call__(padded_x)
class FullyConvolutionalNetwork(hk.Module):
"""A fully convolutional network with ResNet middle layers."""
def __init__(self,
num_middle_channels: int = 5,
num_middle_layers: int = 2,
num_final_channels: int = 2,
kernel_shape: Tuple[int] = (3, 3),
zero_final: bool = True,
is_torus: bool = False): # pytype: disable=annotation-type-mismatch
super().__init__()
self._num_middle_channels = num_middle_channels
self._num_middle_layers = num_middle_layers
self._num_final_channels = num_final_channels
self._kernel_shape = kernel_shape
self._zero_final = zero_final
self._is_torus = is_torus
def __call__(self,
x: Array):
"""Call the residual network on x.
Args:
x: is of shape (length_a, length_b)
Returns:
Array of shape (length_a, length_b, num_channels[-1])
"""
chex.assert_rank(x, 2)
length_a, length_b = jnp.shape(x)
non_linearity = jax.nn.relu
if self._is_torus:
conv_two_d = Conv2DTorus
else:
conv_two_d = hk.Conv2D
# Cast to batch size of one and one channel in last index.
representation = x[None, :, :, None]
for middle_layer_index in range(self._num_middle_layers):
if middle_layer_index == 0:
representation = conv_two_d(
output_channels=self._num_middle_channels,
stride=1,
kernel_shape=self._kernel_shape,
with_bias=True)(representation)
representation = non_linearity(representation)
else:
conv_result = conv_two_d(
output_channels=self._num_middle_channels,
stride=1,
kernel_shape=self._kernel_shape,
with_bias=True)(representation)
representation = representation + non_linearity(conv_result)
if self._zero_final:
representation = conv_two_d(
output_channels=self._num_final_channels,
stride=1,
kernel_shape=self._kernel_shape,
with_bias=True,
w_init=jnp.zeros,
b_init=jnp.zeros)(representation)
else:
representation = conv_two_d(
output_channels=self._num_final_channels,
stride=1,
kernel_shape=self._kernel_shape,
with_bias=True)(representation)
chex.assert_shape(representation,
[1, length_a, length_b, self._num_final_channels])
# Remove extraneous batch index of size 1.
representation = representation[0, :, :, :]
return representation
class CouplingLayer(object):
"""A generic coupling layer.
Takes the following functions as inputs.
1) A conditioner network mapping from event_shape->event_shape + (num_params,)
2) Mask of shape event_shape.
3) transformer A map from event_shape -> event_shape that acts elementwise on
the terms to give a diagonal Jacobian expressed as shape event_shape and in
abs-log space.
It is parameterised by parameters of shape params_shape.
"""
def __init__(self, conditioner_network: Callable[[Array], Array], mask: Array,
transformer):
self._conditioner_network = conditioner_network
self._mask = mask
self._transformer = transformer
def __call__(self, x):
"""Transform x with coupling layer.
Args:
x: event_shape Array.
Returns:
output_x: event_shape Array corresponding to the output.
log_abs_det: scalar corresponding to the log abs det Jacobian.
"""
mask_complement = 1. - self._mask
masked_x = x * self._mask
chex.assert_equal_shape([masked_x, x])
transformer_params = self._conditioner_network(masked_x)
transformed_x, log_abs_dets = self._transformer(transformer_params, x)
output_x = masked_x + mask_complement * transformed_x
chex.assert_equal_shape([transformed_x,
output_x,
x,
log_abs_dets])
log_abs_det = jnp.sum(log_abs_dets * mask_complement)
return output_x, log_abs_det
def inverse(self, y):
"""Transform y with inverse coupling layer.
Args:
y: event_shape Array.
Returns:
output_y: event_shape Array corresponding to the output.
log_abs_det: scalar corresponding to the log abs det Jacobian.
"""
mask_complement = 1. - self._mask
masked_y = y * self._mask
chex.assert_equal_shape([masked_y, y])
transformer_params = self._conditioner_network(masked_y)
transformed_y, log_abs_dets = self._transformer.inverse(transformer_params,
y)
output_y = masked_y + mask_complement * transformed_y
chex.assert_equal_shape([transformed_y,
output_y,
y,
log_abs_dets])
log_abs_det = jnp.sum(log_abs_dets * mask_complement)
return output_y, log_abs_det
class ConvAffineCoupling(CouplingLayer):
"""A convolutional affine coupling layer."""
def __init__(self,
mask: Array,
conv_num_middle_channels: int = 5,
conv_num_middle_layers: int = 2,
conv_kernel_shape: Tuple[int] = (3, 3),
identity_init: bool = True,
is_torus: bool = False): # pytype: disable=annotation-type-mismatch
conv_net = FullyConvolutionalNetwork(
num_middle_channels=conv_num_middle_channels,
num_middle_layers=conv_num_middle_layers,
num_final_channels=2,
kernel_shape=conv_kernel_shape,
zero_final=identity_init,
is_torus=is_torus)
vectorized_affine = AffineTransformer()
super().__init__(conv_net,
mask,
vectorized_affine)
def get_checkerboard_mask(overall_shape: Tuple[int, int],
period: int):
range_a = jnp.arange(overall_shape[0])
range_b = jnp.arange(overall_shape[1])
def modulo_func(index_a, index_b):
return jnp.mod(index_a+index_b+period, 2)
func = lambda y: jax.vmap(modulo_func, in_axes=[0, None])(range_a, y)
vals = func(range_b)
chex.assert_shape(vals, overall_shape)
return vals
class ConvAffineCouplingStack(ConfigurableFlow):
"""A stack of convolutional affine coupling layers."""
def __init__(self, config: ConfigDict):
super().__init__(config)
num_elem = config.num_elem
num_grid_per_dim = int(np.sqrt(num_elem))
assert num_grid_per_dim * num_grid_per_dim == num_elem
self._true_shape = (num_grid_per_dim, num_grid_per_dim)
self._coupling_layers = []
for index in range(self._config.num_coupling_layers):
mask = get_checkerboard_mask(self._true_shape, index)
coupling_layer = ConvAffineCoupling(
mask,
conv_kernel_shape=self._config.conv_kernel_shape,
conv_num_middle_layers=self._config.conv_num_middle_layers,
conv_num_middle_channels=self._config.conv_num_middle_channels,
is_torus=self._config.is_torus,
identity_init=self._config.identity_init
)
self._coupling_layers.append(coupling_layer)
def _check_configuration(self, config: ConfigDict):
expected_members_types = [
('conv_kernel_shape', list),
('conv_num_middle_layers', int),
('conv_num_middle_channels', int),
('is_torus', bool),
('identity_init', bool)
]
self._check_members_types(config, expected_members_types)
def transform_and_log_abs_det_jac(self, x: Array) -> Tuple[Array, Array]:
reshaped_x = jnp.reshape(x, self._true_shape)
transformed_x = reshaped_x
log_abs_det = 0.
for index in range(self._config.num_coupling_layers):
coupling_layer = self._coupling_layers[index]
transformed_x, log_det_increment = coupling_layer(transformed_x)
chex.assert_equal_shape([transformed_x, reshaped_x])
log_abs_det += log_det_increment
restored_x = jnp.reshape(transformed_x, x.shape)
return restored_x, log_abs_det
def inv_transform_and_log_abs_det_jac(self, x: Array) -> tuple[Array, Array]:
reshaped_x = jnp.reshape(x, self._true_shape)
transformed_x = reshaped_x
log_abs_det = 0.
for index in range(self._config.num_coupling_layers-1, -1, -1):
coupling_layer = self._coupling_layers[index]
transformed_x, log_det_increment = coupling_layer.inverse(transformed_x)
chex.assert_equal_shape([transformed_x, reshaped_x])
log_abs_det += log_det_increment
restored_x = jnp.reshape(transformed_x, x.shape)
return restored_x, log_abs_det
class ComposedFlows():
"""Class to compose flows based on a list of configs.
config should contain flow_configs a list of flow configs to compose.
"""
def __init__(self, config: ConfigDict):
self._config = config
self._flows = []
for flow_config in self._config.flow_configs:
base_flow_class = globals()[flow_config.type]
flow = base_flow_class(flow_config)
self._flows.append(flow)
def __call__(self, x: Samples) -> Tuple[Samples, Array]:
log_abs_det = jnp.zeros(x.shape[0])
progress = x
for flow in self._flows:
progress, log_abs_det_increment = flow(progress)
log_abs_det += log_abs_det_increment
chex.assert_equal_shape((x, progress))
chex.assert_shape(log_abs_det, (x.shape[0],))
return progress, log_abs_det
|
annealed_flow_transport-master
|
annealed_flow_transport/flows.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.train_vae."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport import train_vae
class TestEntryPoint(parameterized.TestCase):
"""Test that the main VAE training loop runs on a tiny example."""
def test_entry_point(self):
random_seed = 1
batch_size = 2
num_latents = 5
step_size = 0.00005 #
output_dir_stub = False
train_iters = 7
report_period = 3
train_vae.train_vae(batch_size=batch_size,
num_latents=num_latents,
random_seed=random_seed,
step_size=step_size,
output_dir_stub=output_dir_stub,
train_iters=train_iters,
report_period=report_period)
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/train_vae_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for annealed_flow_transport.resampling."""
from absl.testing import absltest
from absl.testing import parameterized
from annealed_flow_transport import resampling
import chex
import jax
import jax.numpy as jnp
def _assert_equal_vec(tester, v1, v2, **kwargs):
tester.assertTrue(jnp.allclose(v1, v2, **kwargs))
class ResamplingTest(parameterized.TestCase):
def test_ess(self):
# Test equal unnormalized weights come out correctly.
num_samples = 32
arbitrary_number = -3.7
log_weights = -arbitrary_number*jnp.ones(num_samples)
test_log_ess = resampling.log_effective_sample_size(log_weights)
true_log_ess = jnp.log(num_samples)
_assert_equal_vec(self, test_log_ess, true_log_ess)
# Test an arbitrary simple case.
weights = jnp.array([0.3, 0.2, 0.5])
true_log_ess_b = jnp.log(1./jnp.sum(weights**2))
log_weights_b = jnp.log(weights) + arbitrary_number
test_log_ess_b = resampling.log_effective_sample_size(log_weights_b)
_assert_equal_vec(self, true_log_ess_b, test_log_ess_b)
def test_simple_resampling(self):
arbitrary_number = -5.2
num_samples = 10000
key = jax.random.PRNGKey(1)
# First we sample from normal distribution d
# and then perform an importance correction to normal distribution a.
dimension = 1
key, subkey = jax.random.split(key, 2)
samples = jax.random.normal(subkey, shape=(num_samples, dimension))
weights = jnp.array([0.5, 0.25, 0.25]+[0.]*(num_samples-3))
log_unnormalized_weights = jnp.log(weights)+arbitrary_number
target_mean = 0.5*samples[0] + 0.25*samples[1] + 0.25*samples[2]
target_variance = 0.5 * (samples[0] - target_mean)**2 + 0.25 * (
samples[1] - target_mean)**2 + 0.25 * (samples[2] - target_mean)**2
target_weights = -1.*jnp.ones(num_samples)*jnp.log(num_samples)
resampled, log_weights_new = resampling.simple_resampling(
key, log_unnormalized_weights, samples)
empirical_mean = jnp.mean(resampled)
empirical_variance = jnp.var(resampled)
_assert_equal_vec(self, empirical_mean, target_mean, atol=1e-2)
_assert_equal_vec(self, empirical_variance, target_variance, atol=1e-2)
_assert_equal_vec(self, log_weights_new, target_weights)
def test_optionally_resample(self):
num_samples = 100
dimension = 2
key = jax.random.PRNGKey(1)
key, subkey = jax.random.split(key, 2)
samples = jax.random.normal(subkey, shape=(num_samples, dimension))
key, subkey = jax.random.split(key, 2)
log_weights = jax.random.normal(subkey, shape=(num_samples,))
log_ess = resampling.log_effective_sample_size(log_weights)
resamples, log_uniform_weights = resampling.simple_resampling(key,
log_weights,
samples)
threshold_lower = 0.9/num_samples*jnp.exp(log_ess)
threshold_upper = 1.1/num_samples*jnp.exp(log_ess)
should_be_samples, should_be_log_weights = resampling.optionally_resample(
key, log_weights, samples, threshold_lower)
should_be_resamples, should_be_uniform_weights = resampling.optionally_resample(
key, log_weights, samples, threshold_upper)
_assert_equal_vec(self, should_be_samples, samples)
_assert_equal_vec(self, should_be_resamples, resamples)
_assert_equal_vec(self, log_uniform_weights, should_be_uniform_weights)
_assert_equal_vec(self, should_be_log_weights, log_weights)
def test_tree_resampling(self):
log_weights = jnp.array([0, -jnp.inf, -jnp.inf])
tree_component = jnp.arange(6).reshape((3, 2))
expected_tree_component = jnp.repeat(jnp.atleast_2d(tree_component[0, :]),
3, axis=0)
tree = (tree_component, (tree_component, tree_component))
expected_tree = (expected_tree_component, (expected_tree_component,
expected_tree_component))
key = jax.random.PRNGKey(1)
resampled_tree = resampling.simple_resampling(key,
log_weights,
tree)[0]
chex.assert_trees_all_equal(expected_tree, resampled_tree)
if __name__ == '__main__':
absltest.main()
|
annealed_flow_transport-master
|
annealed_flow_transport/resampling_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for cox process density utilities."""
import itertools
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
import jax.scipy.linalg as slinalg
import numpy as np
# TypeDefs
NpArray = np.ndarray
Array = tp.Array
def get_bin_counts(array_in: NpArray,
num_bins_per_dim: int) -> NpArray:
"""Divide two dimensional input space into a grid and count points in each.
Point on the upper edge, which does happen in the data, go into the lower bin.
The occurrence of these points is an artefact of the rescaling done on data.
Args:
array_in: (num_points,2) containing points in square [0,1]^2
num_bins_per_dim: the number of bins per dimension for the grid.
Returns:
Numpy array of shape containing (num_bins_per_dim, num_bins_per_dim) counts.
"""
chex.assert_rank(array_in, 2)
scaled_array = array_in * num_bins_per_dim
counts = np.zeros((num_bins_per_dim, num_bins_per_dim))
for elem in scaled_array:
flt_row, col_row = np.floor(elem)
row = int(flt_row)
col = int(col_row)
# Deal with the case where the point lies exactly on upper/rightmost edge.
if row == num_bins_per_dim:
row -= 1
if col == num_bins_per_dim:
col -= 1
counts[row, col] += 1
return counts
def get_bin_vals(num_bins: int) -> NpArray:
grid_indices = jnp.arange(num_bins)
bin_vals = jnp.array([
jnp.array(elem) for elem in itertools.product(grid_indices, grid_indices)
])
return bin_vals
def gram(kernel, xs: Array) -> Array:
"""Given a kernel function and an array of points compute a gram matrix."""
return jax.vmap(lambda x: jax.vmap(lambda y: kernel(x, y))(xs))(xs)
def kernel_func(x: Array,
y: Array,
signal_variance: Array,
num_grid_per_dim: int,
raw_length_scale: Array) -> Array:
"""Compute covariance/kernel function.
K(m,n) = signal_variance * exp(-|m-n|/(num_grid_per_dim*raw_length_scale))
Args:
x: First point shape (num_spatial_dim,)
y: Second point shape (num_spatial_dim,)
signal_variance: non-negative scalar.
num_grid_per_dim: Number of grid points per spatial dimension.
raw_length_scale: Length scale of the undiscretized process.
Returns:
Scalar value of covariance function.
"""
chex.assert_equal_shape([x, y])
chex.assert_rank(x, 1)
normalized_distance = jnp.linalg.norm(x - y, 2) / (
num_grid_per_dim * raw_length_scale)
return signal_variance * jnp.exp(-normalized_distance)
def poisson_process_log_likelihood(latent_function: Array,
bin_area: Array,
flat_bin_counts: Array) -> Array:
"""Discretized Poisson process log likelihood.
Args:
latent_function: Intensity per unit area of shape (total_dimensions,)
bin_area: Scalar bin_area.
flat_bin_counts: Non negative integer counts of shape (total_dimensions,)
Returns:
Total log likelihood of points.
"""
chex.assert_rank([latent_function, bin_area], [1, 0])
chex.assert_equal_shape([latent_function, flat_bin_counts])
first_term = latent_function * flat_bin_counts
second_term = -bin_area * jnp.exp(latent_function)
return jnp.sum(first_term+second_term)
def get_latents_from_white(white: Array, const_mean: Array,
cholesky_gram: Array) -> Array:
"""Get latents from whitened representation.
Let f = L e + mu where e is distributed as standard multivariate normal.
Then Cov[f] = LL^T .
In the present case L is assumed to be lower triangular and is given by
the input cholesky_gram.
mu_zero is a constant so that mu_i = const_mean for all i.
Args:
white: shape (total_dimensions,) e.g. (900,) for a 30x30 grid.
const_mean: scalar.
cholesky_gram: shape (total_dimensions, total_dimensions)
Returns:
points in the whitened space of shape (total_dimensions,)
"""
chex.assert_rank([white, const_mean, cholesky_gram], [1, 0, 2])
latent_function = jnp.matmul(cholesky_gram, white) + const_mean
chex.assert_equal_shape([latent_function, white])
return latent_function
def get_white_from_latents(latents: Array,
const_mean: Array,
cholesky_gram: Array) -> Array:
"""Get whitened representation from function representation.
Let f = L e + mu where e is distributed as standard multivariate normal.
Then Cov[f] = LL^T and e = L^-1(f-mu).
In the present case L is assumed to be lower triangular and is given by
the input cholesky_gram.
mu_zero is a constant so that mu_i = const_mean for all i.
Args:
latents: shape (total_dimensions,) e.g. (900,) for a 30x30 grid.
const_mean: scalar.
cholesky_gram: shape (total_dimensions, total_dimensions)
Returns:
points in the whitened space of shape (total_dimensions,)
"""
chex.assert_rank([latents, const_mean, cholesky_gram], [1, 0, 2])
white = slinalg.solve_triangular(
cholesky_gram, latents - const_mean, lower=True)
chex.assert_equal_shape([latents, white])
return white
|
annealed_flow_transport-master
|
annealed_flow_transport/cox_process_utils.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional variational autoencoder code for likelihood experiments.
Some Jax/Haiku programming idioms inspired by OSS Apache 2.0 Haiku vae example.
A pretrained version of this model is already included in data/vae.pickle.
To run one of the sampling algorithms on that trained model use configs/vae.py
This training script is included for full reproducibility.
"""
from typing import Any, Tuple
import annealed_flow_transport.aft_types as tp
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import tensorflow_datasets as tfds
Array = tp.Array
MNIST_IMAGE_SHAPE = tp.MNIST_IMAGE_SHAPE
Batch = tp.VaeBatch
RandomKey = tp.RandomKey
OptState = tp.OptState
Params = Any
UpdateFn = tp.UpdateFn
VAEResult = tp.VAEResult
def kl_divergence_standard_gaussian(mean, std) -> Array:
"""KL divergence from diagonal Gaussian with mean std to standard normal.
Independence means the KL is a sum of KL divergences for each dimension.
expectation_{q(x)}log prod_i q_i(x_i)/p_i(x_i)
= sum_{i=1}^{N} expectation_{q_i(x_i)} log q_i(x_i) / p_i(x_i)
So we have a sum of KL divergence between univariate Gaussians where
p_i(x_i) is a standar normal.
So each term is 0.5 * ((std)^2 + (mean)^2 - 1 - 2 ln (std) )
Args:
mean: Array of length (ndim,)
std: Array of length (ndim,)
Returns:
KL-divergence Array of shape ().
"""
chex.assert_rank([mean, std], [1, 1])
terms = 0.5 * (jnp.square(std) + jnp.square(mean) - 1. - 2. * jnp.log(std))
return jnp.sum(terms)
def batch_kl_divergence_standard_gaussian(mean, std) -> Array:
"""Mean KL divergence diagonal Gaussian with mean std to standard normal.
Works for batches of mean/std.
Independence means the KL is a sum of KL divergences for each dimension.
expectation_{q(x)}log prod_i q_i(x_i)/p_i(x_i)
= sum_{i=1}^{N} expectation_{q_i(x_i)} log q_i(x_i) / p_i(x_i)
So we have a sum of KL divergence between univariate Gaussians where
p_i(x_i) is a standar normal.
So each term is 0.5 * ((std)^2 + (mean)^2 - 1 - 2 ln (std) )
Args:
mean: Array of length (batch,ndim)
std: Array of length (batch,ndim)
Returns:
KL-divergence Array of shape ().
"""
chex.assert_rank([mean, std], [2, 2])
chex.assert_equal_shape([mean, std])
batch_kls = jax.vmap(kl_divergence_standard_gaussian)(mean, std)
return jnp.mean(batch_kls)
def generate_binarized_images(key: RandomKey, logits: Array) -> Array:
return jax.random.bernoulli(key, jax.nn.sigmoid(logits))
def load_dataset(split: str, batch_size: int):
"""Load the dataset."""
read_config = tfds.ReadConfig(shuffle_seed=1)
ds = tfds.load(
'binarized_mnist',
split=split,
shuffle_files=True,
read_config=read_config)
ds = ds.shuffle(buffer_size=10 * batch_size, seed=1)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
class ConvEncoder(hk.Module):
"""A residual network encoder with mean stdev outputs."""
def __init__(self,
num_latents: int = 20):
super().__init__()
self._num_latents = num_latents
def __call__(self, x: Array) -> Tuple[Array, Array]:
conv_a = hk.Conv2D(kernel_shape=(4, 4),
stride=(2, 2),
output_channels=16,
padding='valid')
conv_b = hk.Conv2D(kernel_shape=(4, 4),
stride=(2, 2),
output_channels=32,
padding='valid')
flatten = hk.Flatten()
sequential = hk.Sequential([conv_a,
jax.nn.relu,
conv_b,
jax.nn.relu,
flatten])
progress = sequential(x)
def get_output_params(progress_in, name=None):
flat_output = hk.Linear(self._num_latents, name=name)(progress_in)
flat_output = hk.LayerNorm(create_scale=True,
create_offset=True,
axis=1)(flat_output)
return flat_output
latent_mean = get_output_params(progress)
unconst_std_dev = get_output_params(progress)
latent_std = jax.nn.softplus(unconst_std_dev)
return latent_mean, latent_std
class ConvDecoder(hk.Module):
"""A residual network decoder with logit outputs."""
def __init__(self, image_shape: Tuple[int, int, int] = MNIST_IMAGE_SHAPE):
super().__init__()
self._image_shape = image_shape
def __call__(self,
z: Array) -> Tuple[Array, Array, Array]:
linear_features = 7 * 7 * 32
linear = hk.Linear(linear_features)
progress = linear(z)
hk.LayerNorm(create_scale=True,
create_offset=True,
axis=1)(progress)
progress = jnp.reshape(progress, (-1, 7, 7, 32))
deconv_a = hk.Conv2DTranspose(
kernel_shape=(3, 3), stride=(2, 2), output_channels=64)
deconv_b = hk.Conv2DTranspose(
kernel_shape=(3, 3), stride=(2, 2), output_channels=32)
deconv_c = hk.Conv2DTranspose(
kernel_shape=(3, 3), stride=(1, 1), output_channels=1)
sequential = hk.Sequential([deconv_a,
jax.nn.relu,
deconv_b,
jax.nn.relu,
deconv_c])
progress = sequential(progress)
return progress
class ConvVAE(hk.Module):
"""A VAE with residual nets, diagonal normal q and logistic mixture output."""
def __init__(self, num_latents: int = 30,
output_shape: Tuple[int, int, int] = MNIST_IMAGE_SHAPE):
super().__init__()
self._num_latents = num_latents
self._output_shape = output_shape
self.encoder = ConvEncoder(self._num_latents)
self.decoder = ConvDecoder()
def __call__(self, x: Array) -> VAEResult:
x = x.astype(jnp.float32)
latent_mean, latent_std = self.encoder(x)
latent = latent_mean + latent_std * jax.random.normal(
hk.next_rng_key(), latent_mean.shape)
free_latent = jax.random.normal(hk.next_rng_key(), latent_mean.shape)
logits = self.decoder(latent)
free_logits = self.decoder(free_latent)
reconst_sample = jax.nn.sigmoid(logits)
sample_image = jax.nn.sigmoid(free_logits)
return VAEResult(sample_image, reconst_sample, latent_mean, latent_std,
logits)
def binary_cross_entropy_from_logits(logits: Array, labels: Array) -> Array:
"""Numerically stable implementation of binary cross entropy with logits.
For an individual term we follow a standard manipulation of the loss:
H = -label * log sigmoid(logit) - (1-label) * log (1-sigmoid(logit))
= logit - label * logit + log(1+exp(-logit))
or for logit < 0 we take a different version for numerical stability.
= - label * logit + log(1+exp(logit))
combining to avoid a conditional.
= max(logit, 0) - label * logit + log(1+exp(-abs(logit)))
Args:
logits: (batch, sample_shape) containing logits of class probs.
labels: (batch, sample_shape) containing {0, 1} class labels.
Returns:
sum of loss over all shape indices then mean of loss over batch index.
"""
chex.assert_equal_shape([logits, labels])
max_logits_zero = jax.nn.relu(logits)
negative_abs_logits = -jnp.abs(logits)
terms = max_logits_zero - logits*labels + jax.nn.softplus(negative_abs_logits)
return jnp.sum(jnp.mean(terms, axis=0))
def vae_loss(target: Array, logits: Array, latent_mean: Array,
latent_std: Array) -> Array:
log_loss = binary_cross_entropy_from_logits(logits, target)
kl_term = batch_kl_divergence_standard_gaussian(latent_mean, latent_std)
free_energy = log_loss + kl_term
return free_energy
|
annealed_flow_transport-master
|
annealed_flow_transport/vae.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for exact sampling from initial distributions."""
from typing import Tuple
import annealed_flow_transport.aft_types as tp
import jax
RandomKey = tp.RandomKey
Array = tp.Array
class NormalDistribution(object):
"""A wrapper for the univariate normal sampler."""
def __init__(self, config):
self._config = config
def __call__(self,
key: RandomKey,
num_samples: int,
sample_shape: Tuple[int]) -> Array:
batched_sample_shape = (num_samples,) + sample_shape
return jax.random.normal(key,
shape=batched_sample_shape)
class MultivariateNormalDistribution(object):
"""A wrapper for the multivariate normal sampler."""
def __init__(self, config):
self._config = config
def __call__(self, key: RandomKey, num_samples: int,
sample_shape: Tuple[int]) -> Array:
batched_sample_shape = (num_samples,) + sample_shape
return jax.random.normal(key, shape=batched_sample_shape)
|
annealed_flow_transport-master
|
annealed_flow_transport/samplers.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for Markov transition kernels."""
from typing import Tuple
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow_probability.substrates.jax as tfp
mcmc = tfp.mcmc
ConfigDict = tp.ConfigDict
Array = tp.Array
LogDensityByStep = tp.LogDensityByStep
RandomKey = tp.RandomKey
MarkovKernelApply = tp.MarkovKernelApply
Samples = tp.Samples
assert_trees_all_equal_shapes = chex.assert_trees_all_equal_shapes
class InterpolatedStepSize(object):
"""Interpolate MCMC step sizes."""
def __init__(self,
config: ConfigDict,
total_num_time_steps: int):
self._config = config
self._total_num_time_steps = total_num_time_steps
def __call__(self, time_step: int):
final_step = self._total_num_time_steps-1.
beta = time_step/final_step
return jnp.interp(beta,
jnp.array(self._config.step_times),
jnp.array(self._config.step_sizes))
def tree_add(tree_a, tree_b):
assert_trees_all_equal_shapes(tree_a, tree_b)
return jax.tree_map(lambda a, b: a+b, tree_a, tree_b)
def tree_scalar_mul(tree, scalar):
res = jax.tree_map(lambda x: x * scalar, tree)
assert_trees_all_equal_shapes(tree, res)
return res
def random_walk_metropolis(samples_in: Array, proposal_scale: Array,
log_density_by_step: LogDensityByStep,
temp_step: int, num_mh_steps: int,
key: RandomKey) -> Tuple[Array, Array]:
"""Corrected random walk Metropolis-Hastings algorithm.
Args:
samples_in: (num_batch, num_dim)
proposal_scale: Scalar representing scale of isotropic normal proposal.
log_density_by_step: Target log density.
temp_step: Step of outer annealing algorithm.
num_mh_steps: Number of Metropolis-Hastings steps.
key: Jax Random Key.
Returns:
samples_out: (num_batch, num_dim)
acceptance: Average acceptance rate of chains.
"""
chex.assert_rank(proposal_scale, 0)
num_batch = np.shape(jax.tree_util.tree_leaves(samples_in)[0])[0]
def rwm_step(previous_samples: Array, curr_key: RandomKey):
normal_key, acceptance_key = jax.random.split(curr_key)
standard_normal_tree = random_normal_like_tree(normal_key, previous_samples)
normal_deltas = tree_scalar_mul(standard_normal_tree, proposal_scale)
exponential_rvs = jax.random.exponential(key=acceptance_key,
shape=(num_batch,))
proposed_samples = tree_add(previous_samples, normal_deltas)
assert_trees_all_equal_shapes(previous_samples, proposed_samples)
log_density_proposed = log_density_by_step(temp_step, proposed_samples)
log_density_previous = log_density_by_step(temp_step, previous_samples)
delta_log_prob = log_density_proposed - log_density_previous
chex.assert_shape(delta_log_prob, (num_batch,))
is_accepted = jnp.greater(delta_log_prob, -1.*exponential_rvs)
chex.assert_shape(is_accepted, (num_batch,))
step_acceptance_rate = jnp.mean(is_accepted * 1.)
samples_next = jnp.where(is_accepted[:, None], proposed_samples,
previous_samples)
return samples_next, step_acceptance_rate
keys = jax.random.split(key, num_mh_steps)
samples_out, acceptance_rates = jax.lax.scan(rwm_step,
samples_in,
keys)
acceptance_rate = jnp.mean(acceptance_rates)
chex.assert_equal_shape((samples_out, samples_in))
chex.assert_rank(acceptance_rate, 0)
return samples_out, acceptance_rate
def momentum_step(samples_in: Array,
momentum_in: Array,
step_coefficient: Array,
epsilon: Array,
grad_log_density) -> Array:
"""A momentum update with variable momentum step_coefficient.
Args:
samples_in: (num_batch, num_dim)
momentum_in: (num_batch, num_dim)
step_coefficient: A Scalar which is typically either 0.5 (half step) or 1.0
epsilon: A Scalar representing the constant step size.
grad_log_density: (num_batch, num_dim) -> (num_batch, num_dim)
Returns:
momentum_out (num_batch, num_dim)
"""
chex.assert_rank((step_coefficient, epsilon), (0, 0))
assert_trees_all_equal_shapes(samples_in, momentum_in)
gradient_val = grad_log_density(samples_in)
momentum_out = tree_add(
momentum_in, tree_scalar_mul(gradient_val, step_coefficient * epsilon))
assert_trees_all_equal_shapes(momentum_in, momentum_out)
return momentum_out
def leapfrog_step(samples_in: Array,
momentum_in: Array,
step_coefficient: Array,
epsilon: Array,
grad_log_density) -> Tuple[Array, Array]:
"""A step of the Leapfrog iteration with variable momentum step_coefficient.
Args:
samples_in: (num_batch, num_dim)
momentum_in: (num_batch, num_dim)
step_coefficient: A Scalar which is typically either 0.5 (half step) or 1.0
epsilon: A Scalar representing the constant step size.
grad_log_density: (num_batch, num_dim) -> (num_batch, num_dim)
Returns:
samples_out: (num_batch, num_dim)
momentum_out (num_batch, num_dim)
"""
chex.assert_rank((step_coefficient, epsilon), (0, 0))
assert_trees_all_equal_shapes(samples_in, momentum_in)
samples_out = tree_add(samples_in, tree_scalar_mul(momentum_in, epsilon))
momentum_out = momentum_step(samples_out, momentum_in, step_coefficient,
epsilon, grad_log_density)
assert_trees_all_equal_shapes(samples_in, samples_out)
return samples_out, momentum_out
def random_normal_like_tree(key, tree):
tree_struct = jax.tree_util.tree_structure(tree)
split_keys = jax.random.split(key, tree_struct.num_leaves)
tree_keys = jax.tree_util.tree_unflatten(tree_struct, split_keys)
tree_normals = jax.tree_util.tree_map(
lambda x, y: jax.random.normal(key=y, shape=x.shape), tree, tree_keys)
return tree_normals
def hmc_step(samples_in: Array,
key: RandomKey,
epsilon: Array,
log_density,
grad_log_density,
num_leapfrog_iters: int) -> Tuple[Array, Array]:
"""A single step of Hamiltonian Monte Carlo.
Args:
samples_in: (num_batch, num_dim)
key: A Jax random key.
epsilon: A Scalar representing the constant step size.
log_density: (num_batch, num_dim) -> (num_batch,)
grad_log_density: (num_batch, num_dim) -> (num_batch, num_dim)
num_leapfrog_iters: Number of leapfrog iterations.
Returns:
samples_out: (num_batch, num_dim)
"""
chex.assert_rank(epsilon, 0)
samples_state = samples_in
momentum_key, acceptance_key = jax.random.split(key)
initial_momentum = random_normal_like_tree(momentum_key, samples_in)
# A half momentum step.
momentum_state = momentum_step(samples_state, initial_momentum,
step_coefficient=0.5,
epsilon=epsilon,
grad_log_density=grad_log_density)
def scan_step(passed_state, unused_input):
pos, mom = passed_state
new_pos, new_mom = leapfrog_step(pos, mom, step_coefficient=1.0,
epsilon=epsilon,
grad_log_density=grad_log_density)
return (new_pos, new_mom), None
state_in = (samples_state, momentum_state)
scan_length = num_leapfrog_iters - 1
# (num_leapfrog_iters - 1) whole position and momentum steps.
new_state, _ = jax.lax.scan(
scan_step, state_in, [None] * scan_length, length=scan_length)
samples_state, momentum_state = new_state
# A whole position step and half momentum step.
samples_state, momentum_state = leapfrog_step(
samples_state,
momentum_state,
step_coefficient=0.5,
epsilon=epsilon,
grad_log_density=grad_log_density)
# We don't negate the momentum here because it has no effect.
# This would be required if momentum was used other than for just the energy.
# Decide if we accept the proposed update using Metropolis correction.
def get_combined_log_densities(pos, mom):
pos_log_densities = log_density(pos)
def leaf_log_density(x):
summation_axes = tuple(range(1, len(np.shape(x))))
return -0.5 * jnp.sum(jnp.square(x), axis=summation_axes)
per_leaf_mom_log_densities = jax.tree_util.tree_map(leaf_log_density,
mom)
mom_log_densities = jax.tree_util.tree_reduce(
jnp.add, per_leaf_mom_log_densities)
chex.assert_equal_shape((pos_log_densities, mom_log_densities))
return pos_log_densities + mom_log_densities
current_log_densities = get_combined_log_densities(samples_in,
initial_momentum)
proposed_log_densities = get_combined_log_densities(samples_state,
momentum_state)
num_batch = np.shape(current_log_densities)[0]
exponential_rvs = jax.random.exponential(key=acceptance_key,
shape=(num_batch,))
delta_log_prob = proposed_log_densities - current_log_densities
chex.assert_shape(delta_log_prob, (num_batch,))
is_accepted = jnp.greater(delta_log_prob, -1.*exponential_rvs)
chex.assert_shape(is_accepted, (num_batch,))
step_acceptance_rate = jnp.mean(is_accepted * 1.)
def acceptance(a, b):
broadcast_axes = tuple(range(1, len(a.shape)))
broadcast_is_accepted = jnp.expand_dims(is_accepted,
axis=broadcast_axes)
return jnp.where(broadcast_is_accepted, a, b)
samples_next = jax.tree_util.tree_map(acceptance,
samples_state,
samples_in)
return samples_next, step_acceptance_rate
def hmc(samples_in: Array,
key: RandomKey,
epsilon: Array,
log_density,
grad_log_density,
num_leapfrog_iters: int,
num_hmc_iters: int) -> Tuple[Array, Array]:
"""Hamiltonian Monte Carlo as described in Neal 2011.
Args:
samples_in: (num_batch, num_dim)
key: A Jax random key.
epsilon: A Scalar representing the constant step size.
log_density: (num_batch, num_dim) -> (num_batch,)
grad_log_density: (num_batch, num_dim) -> (num_batch, num_dim)
num_leapfrog_iters: Number of leapfrog iterations.
num_hmc_iters: Number of steps of Hamiltonian Monte Carlo.
Returns:
samples_out: (num_batch, num_dim)
"""
step_keys = jax.random.split(key, num_hmc_iters)
def short_hmc_step(loc_samples, loc_key):
return hmc_step(loc_samples,
loc_key,
epsilon=epsilon,
log_density=log_density,
grad_log_density=grad_log_density,
num_leapfrog_iters=num_leapfrog_iters)
samples_final, acceptance_rates = jax.lax.scan(short_hmc_step, samples_in,
step_keys)
return samples_final, np.mean(acceptance_rates)
def hmc_wrapped(samples_in: Samples,
key: RandomKey,
epsilon: Array,
log_density_by_step: LogDensityByStep,
temp_step: int,
num_leapfrog_iters: int,
num_hmc_iters: int
) -> Tuple[Array, Array]:
"""A wrapper for HMC that deals with all the interfacing with the codebase.
Args:
samples_in: Samples.
key: A Jax random key.
epsilon: Scalar step size.
log_density_by_step: Density at a given temperature.
temp_step: Specifies the current temperature.
num_leapfrog_iters: Number of leapfrog iterations.
num_hmc_iters: Number of Hamiltonian Monte Carlo iterations.
Returns:
tfp_samples_out: (0, num_batch, num_dim)
"""
log_density = lambda x: log_density_by_step(temp_step, x)
def unbatched_log_density(unbatched_tree_in):
# Takes an unbatched tree and returns a single scalar value.
batch_one_tree = jax.tree_util.tree_map(lambda x: x[None],
unbatched_tree_in)
return log_density(batch_one_tree)[0]
grad_log_density = jax.vmap(jax.grad(unbatched_log_density))
samples_out, acceptance = hmc(
samples_in,
key=key,
epsilon=epsilon,
log_density=log_density,
grad_log_density=grad_log_density,
num_leapfrog_iters=num_leapfrog_iters,
num_hmc_iters=num_hmc_iters)
return samples_out, acceptance
class MarkovTransitionKernel(object):
"""Wraps TFP slice sampling and NUTS allowing configuration/composition."""
def __init__(self,
config: ConfigDict,
density_by_step: LogDensityByStep,
total_time_steps: int):
self._config = config
self._density_by_step = density_by_step
if hasattr(config, 'hmc_step_config'):
self._hmc_step_size = InterpolatedStepSize(
config.hmc_step_config,
total_time_steps)
if hasattr(config, 'rwm_step_config'):
self._rwm_step_size = InterpolatedStepSize(
config.rwm_step_config,
total_time_steps)
def __call__(self, step: int, key: RandomKey, samples: Samples) -> Array:
"""A single step of slice sampling followed by NUTS.
Args:
step: The time step of the overall algorithm.
key: A JAX random key.
samples: The current samples.
Returns:
New samples.
"""
if self._config.rwm_steps_per_iter != 0:
subkey, key = jax.random.split(key)
samples, rwm_acc = random_walk_metropolis(
samples, self._rwm_step_size(step), self._density_by_step,
step, self._config.rwm_steps_per_iter, subkey)
else:
rwm_acc = 1.
if self._config.hmc_steps_per_iter != 0:
samples, hmc_acc = hmc_wrapped(samples, key, self._hmc_step_size(step),
self._density_by_step, step,
self._config.hmc_num_leapfrog_steps,
self._config.hmc_steps_per_iter)
else:
hmc_acc = 1.
acceptance_tuple = (hmc_acc, rwm_acc)
return samples, acceptance_tuple
|
annealed_flow_transport-master
|
annealed_flow_transport/markov_kernel.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantum field theory (QFT) observables, particularly phi^four theory."""
from typing import Tuple
import annealed_flow_transport.aft_types as tp
import chex
import jax
import jax.numpy as jnp
import numpy as np
Array = tp.Array
def estimate_two_point_green(offset_x: Tuple[int, int],
samples: Array,
log_weights: Array) -> Array:
"""Estimate the connected two point Green's function from weighted samples.
This equals 1/V sum_y ( <phi(y) phi(y+x) > - <phi(y)><phi(y+x)>).
Where V is the lattice volume. phi are the values of the field on the lattice.
For more details see:
Equation 22. Albergo, Kanwar and Shanahan (2019) Phys. Rev. D
''Flow-based generative models for Markov chain Monte Carlo
in lattice field theory.''
Args:
offset_x: 2-tuple containing lattice offset x.
samples: Array of size (num_batch, L_x, L_y)- particle values on 2D lattice.
log_weights: Array of size (num_batch,) - particle log weights.
Returns:
Scalar estimate of two point greens function at offset_x.
"""
chex.assert_rank([samples, log_weights], [3, 1])
offset_samples = jnp.roll(samples, shift=offset_x, axis=(1, 2))
normalized_log_weights = jax.nn.softmax(log_weights)
# In this case means are all taken to be zero by symmetry.
covariance = jnp.sum(
normalized_log_weights[:, None, None] * samples * offset_samples, axis=0)
# take spatial mean 1/V sum_y ...
two_point_green = jnp.mean(covariance)
return two_point_green
def estimate_zero_momentum_green(samples: Array,
log_weights: Array,
time_offset: int) -> Array:
"""Estimate the momentum space two point Green's function at momentum zero.
We adopt the convention that the first grid axis corresponds to space.
Usually it doesn't matter which one you choose along as you are consistent.
It is important not to mix up lattice sizes when they are unequal.
For more details see:
Equation 23. Albergo, Kanwar and Shanahan (2019) Phys. Rev. D
''Flow-based generative models for Markov chain Monte Carlo
in lattice field theory.''
Args:
samples: Array of size (num_batch, L_x, L_y)- particle values on 2D lattice.
log_weights: Array of size (num_batch,) - particle log weights.
time_offset: Offset in lattice units in the time dimension.
Returns:
Scalar estimate of the zero momentum Green's function.
"""
chex.assert_rank([samples, log_weights], [3, 1])
num_space_indices = np.shape(samples)[2]
offset_indices = [(elem, time_offset) for elem in range(num_space_indices)]
# The complex exponential term is 1 because of zero momentum assumption.
running_total = 0.
for offset in offset_indices:
running_total += estimate_two_point_green(offset, samples, log_weights)
return running_total/num_space_indices
def estimate_time_vals(samples: Array,
log_weights: Array,
num_time_indices: int) -> Array:
"""Estimate zero momentum Green for a range of different time offsets."""
time_vals = np.zeros(num_time_indices)
for time_offset in range(num_time_indices):
time_vals[time_offset] = estimate_zero_momentum_green(samples,
log_weights,
time_offset)
return time_vals
def estimate_two_point_susceptibility(samples: Array,
log_weights: Array,
num_grid_per_dim: int) -> Array:
"""Estimate the two point susceptibility."""
total = 0.
for row_index in range(num_grid_per_dim):
for col_index in range(num_grid_per_dim):
offset = (row_index, col_index)
total += estimate_two_point_green(offset, samples, log_weights)
return total
def estimate_ising_energy_density(samples: Array,
log_weights: Array) -> Array:
"""Estimate Ising energy density."""
total = 0.
unit_displacements = [(0, 1), (1, 0)]
for offset in unit_displacements:
total += estimate_two_point_green(offset, samples, log_weights)
return total/len(unit_displacements)
|
annealed_flow_transport-master
|
annealed_flow_transport/qft_observables.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the phi^four theory experiment."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
config.seed = 1
config.evaluation_seed = 1
config.batch_size = 2000
config.estimation_batch_size = 2000
config.sample_shape = (14*14,)
config.report_step = 1
config.vi_report_step = 100
config.num_layers = 1
config.step_logging = True
config.num_temps = 11
config.resample_threshold = 0.3
config.write_samples = True
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.algo = 'craft'
config.use_path_gradient = False
config.evaluation_algo = 'pimh'
config.num_evaluation_samples = 1000
config.optim_markov = False
config.craft_num_iters = 1000
config.craft_batch_size = 2000
config.vi_iters = 30000
config.checkpoint_interval = 200000
config.vi_estimator = 'importance'
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 100
optimization_config.aft_step_size = 1e-3
optimization_config.craft_step_size = 1e-3
optimization_config.vi_step_size = 1e-4
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'PhiFourTheory'
final_config.bare_coupling = 5.1
final_config.mass_squared = -4.75
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'ConvAffineCouplingStack'
flow_config.conv_kernel_shape = [3, 3]
flow_config.conv_num_middle_layers = 1
flow_config.conv_num_middle_channels = 10
flow_config.num_coupling_layers = 2
flow_config.is_torus = True
flow_config.identity_init = True
flow_config.num_elem = config.sample_shape[0]
config.flow_config = flow_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
# Parameters for 14 x 14 m^2 = -4.75
hmc_step_config.step_times = [0., 0.3, 1.]
hmc_step_config.step_sizes = [0.3, 0.15, 0.1]
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [0.7, 0.7, 0.5, 0.5]
mcmc_config.hmc_step_config = hmc_step_config
mcmc_config.slice_step_config = hmc_step_config
mcmc_config.nuts_step_config = nuts_step_config
mcmc_config.hmc_steps_per_iter = 10
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
mcmc_config.iters = 1
config.mcmc_config = mcmc_config
config.save_params = True
config.params_filename = ''
expectation_config = ConfigDict()
expectation_config.expectation_report_step = 50
two_point_config = ConfigDict()
two_point_config.name = 'TwoPointSusceptibility'
ising_config = ConfigDict()
ising_config.name = 'IsingEnergyDensity'
one_site_config = ConfigDict()
one_site_config.name = 'SingleComponentMean'
one_site_config.component_index = 0
expectation_config.configurations = [two_point_config,
ising_config]
config.expectation_config = expectation_config
return config
|
annealed_flow_transport-master
|
configs/phi_four_theory.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the many well example."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a many well experiment config as ConfigDict."""
config = ConfigDict()
config.seed = 1
config.batch_size = 2000
config.estimation_batch_size = 2000
config.sample_shape = (32,)
config.report_step = 1
config.vi_report_step = 100
config.use_x64 = False
config.num_layers = 1
config.step_logging = True
config.num_temps = 11
config.resample_threshold = 0.3
config.write_samples = False
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.algo = 'craft'
config.optim_markov = False
config.craft_num_iters = 200
config.snf_num_iters = 1000
config.fcraft_num_iters = 500
config.craft_batch_size = 2000
config.snf_batch_size = 2000
config.fcraft_batch_size = 2000
config.vi_iters = 100000
config.checkpoint_interval = 200000
config.vi_estimator = 'importance'
config.use_path_gradient = False
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 500
optimization_config.aft_step_size = 1e-2
optimization_config.craft_step_size = 1e-4
optimization_config.vi_step_size = 1e-4
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'ManyWell'
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'SplineInverseAutoregressiveFlow'
flow_config.num_spline_bins = 10
flow_config.lower_lim = -3.
flow_config.intermediate_hids_per_dim = 30
flow_config.num_layers = 3
flow_config.identity_init = True
flow_config.bias_last = True
flow_config.upper_lim = 3.
flow_config.min_bin_size = 1e-2
flow_config.min_derivative = 1e-2
flow_config.num_elem = config.sample_shape[0]
flow_config.sample_shape = config.sample_shape
config.flow_config = flow_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 1.]
hmc_step_config.step_sizes = [0.3, 0.3, 0.2, 0.2]
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [0.7, 0.7, 0.5, 0.5]
rwm_step_config = ConfigDict()
rwm_step_config.step_times = [0., 0.25, 0.5, 1.]
rwm_step_config.step_sizes = [0.03, 0.03, 0.03, 0.03]
mcmc_config.hmc_step_config = hmc_step_config
mcmc_config.slice_step_config = hmc_step_config
mcmc_config.nuts_step_config = nuts_step_config
mcmc_config.rwm_step_config = rwm_step_config
mcmc_config.hmc_steps_per_iter = 1
mcmc_config.use_jax_hmc = True
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
mcmc_config.iters = 1
config.mcmc_config = mcmc_config
config.save_params = False
return config
|
annealed_flow_transport-master
|
configs/many_well.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the standard normal distribution."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
config.seed = 1
config.evaluation_seed = 1
config.batch_size = 2000
config.estimation_batch_size = 2000
config.sample_shape = (1,)
config.report_step = 1
config.vi_report_step = 50
config.checkpoint_interval = 500
config.num_temps = 10
config.step_logging = False
config.resample_threshold = 0.3
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.use_path_gradient = False
config.algo = 'craft'
config.evaluation_algo = 'pimh'
config.num_evaluation_samples = 1000
config.vi_estimator = 'importance'
config.vi_iters = 1000
config.craft_num_iters = 1000
config.snf_num_iters = 1000
config.craft_batch_size = 2000
config.snf_batch_size = 2000
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 100
optimization_config.aft_step_size = 1e-2
optimization_config.craft_step_size = 1e-2
optimization_config.snf_step_size = 1e-2
optimization_config.vi_step_size = 1e-2
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'NormalDistribution'
initial_config.loc = 0.
initial_config.scale = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'NormalDistribution'
final_config.loc = 5.
final_config.scale = 1.
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'DiagonalAffine'
flow_config.sample_shape = config.sample_shape
config.flow_config = flow_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 1.]
hmc_step_config.step_sizes = [1.5, 1.5, 1.5, 1.5]
mcmc_config.hmc_step_config = hmc_step_config
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [2.0, 2.0, 2.0, 2.0]
mcmc_config.hmc_steps_per_iter = 10
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.use_jax_hmc = True
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
config.mcmc_config = mcmc_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'NormalDistribution'
config.initial_sampler_config = initial_sampler_config
expectation_config = ConfigDict()
expectation_config.expectation_report_step = 50
mean_config = ConfigDict()
mean_config.name = 'SingleComponentMean'
mean_config.component_index = 0
expectation_config.configurations = [mean_config]
config.expectation_config = expectation_config
return config
|
annealed_flow_transport-master
|
configs/single_normal.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for a two dimensional distribution."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
num_dim = 2
config.seed = 1
config.batch_size = 2000
config.craft_batch_size = 2000
config.estimation_batch_size = 2000
config.sample_shape = (num_dim,)
config.report_step = 1
config.vi_report_step = 50
config.num_temps = 5
config.step_logging = False
config.resample_threshold = 0.3
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.use_path_gradient = False
config.algo = 'craft'
config.vi_iters = 1000
config.craft_num_iters = 1000
config.vi_estimator = 'importance'
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 1000
optimization_config.aft_step_size = 1e-3
optimization_config.vi_step_size = 1e-3
optimization_config.craft_step_size = 1e-2
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'ChallengingTwoDimensionalMixture'
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'SplineInverseAutoregressiveFlow'
flow_config.num_spline_bins = 10
flow_config.num_bins = 10
flow_config.intermediate_hids_per_dim = 30
flow_config.num_layers = 3
flow_config.identity_init = True
flow_config.lower_lim = -4.
flow_config.upper_lim = 4.
flow_config.min_bin_size = 1e-4
flow_config.min_derivative = 1e-4
flow_config.bias_last = True
config.flow_config = flow_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 1.]
hmc_step_config.step_sizes = [0.5, 0.5, 0.5, 0.3]
mcmc_config.hmc_step_config = hmc_step_config
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [2.0, 2.0, 2.0, 2.0]
mcmc_config.hmc_steps_per_iter = 10
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
config.mcmc_config = mcmc_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
return config
|
annealed_flow_transport-master
|
configs/two_dimensional_challenging.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the funnel distribution."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
num_dim = 10
config.seed = 1
config.batch_size = 2000
config.craft_batch_size = 2000
config.estimation_batch_size = 6000
config.sample_shape = (num_dim,)
config.report_step = 1
config.vi_report_step = 10
config.num_temps = 5
config.resample_threshold = 0.3
config.stopping_criterion = 'time'
config.write_samples = False
config.use_resampling = True
config.use_markov = True
config.use_path_gradient = False
config.algo = 'craft'
config.vi_iters = 2000
config.craft_num_iters = 2000
config.vi_estimator = 'importance'
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 4000
optimization_config.aft_step_size = 1e-3
optimization_config.vi_step_size = 1e-3
optimization_config.craft_step_size = 1e-3
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'FunnelDistribution'
config.final_config = final_config
base_flow_config = ConfigDict()
base_flow_config.type = 'AffineInverseAutoregressiveFlow'
base_flow_config.intermediate_hids_per_dim = 30
base_flow_config.num_layers = 3
base_flow_config.identity_init = True
base_flow_config.bias_last = True
flow_config = ConfigDict()
flow_config.type = 'ComposedFlows'
num_stack = 1 # When comparing to VI raise this to match expressivity of AFT.
flow_config.flow_configs = [base_flow_config] * num_stack
config.flow_config = flow_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 0.75, 1.]
hmc_step_config.step_sizes = [0.9, 0.7, 0.6, 0.5, 0.4]
mcmc_config.hmc_step_config = hmc_step_config
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [2.0, 2.0, 2.0, 2.0]
mcmc_config.hmc_steps_per_iter = 1
mcmc_config.hmc_num_leapfrog_steps = 10
slice_step_config = ConfigDict()
slice_step_config.step_times = [0., 0.25, 0.5, 0.75, 1.]
slice_step_config.step_sizes = [0.9, 0.7, 0.6, 0.5, 0.4]
mcmc_config.slice_step_config = slice_step_config
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.iters = 1
config.mcmc_config = mcmc_config
return config
|
annealed_flow_transport-master
|
configs/funnel.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the Cox process posterior distribution."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
config.seed = 1
config.batch_size = 2000
config.estimation_batch_size = 2000
config.sample_shape = (32*32,)
config.report_step = 1
config.vi_report_step = 100
config.num_layers = 1
config.step_logging = False
config.num_temps = 11
config.resample_threshold = 0.3
config.write_samples = True
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.use_path_gradient = False
config.algo = 'craft'
config.craft_num_iters = 200
config.snf_num_iters = 1000
config.craft_batch_size = 2000
config.snf_batch_size = 2000
config.vi_iters = 100000
config.checkpoint_interval = 200000
config.vi_estimator = 'importance'
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 500
optimization_config.aft_step_size = 1e-2
optimization_config.craft_step_size = 5e-2
optimization_config.craft_boundaries_and_scales = ({100: 1e-2},)
optimization_config.snf_step_size = 0.1
optimization_config.snf_boundaries_and_scales = ({70: 5e-2,
100: 1e-2},)
optimization_config.vi_step_size = 1e-4
config.optimization_config = optimization_config
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'LogGaussianCoxPines'
final_config.use_whitened = False
final_config.file_path = ''
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'DiagonalAffine'
flow_config.sample_shape = config.sample_shape
config.flow_config = flow_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 1.]
hmc_step_config.step_sizes = [0.3, 0.3, 0.2, 0.2]
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [0.7, 0.7, 0.5, 0.5]
mcmc_config.hmc_step_config = hmc_step_config
mcmc_config.slice_step_config = hmc_step_config
mcmc_config.nuts_step_config = nuts_step_config
mcmc_config.hmc_steps_per_iter = 1
mcmc_config.use_jax_hmc = True
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
config.mcmc_config = mcmc_config
config.save_params = False
return config
|
annealed_flow_transport-master
|
configs/lgcp_pines.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment setup for the VAE distribution."""
import ml_collections
ConfigDict = ml_collections.ConfigDict
def get_config():
"""Returns a standard normal experiment config as ConfigDict."""
config = ConfigDict()
config.seed = 1
config.batch_size = 100
config.estimation_batch_size = 100
config.sample_shape = (30,)
config.report_step = 1
config.vi_report_step = 10
config.num_layers = 1
config.step_logging = True
config.num_temps = 3
config.resample_threshold = 0.3
config.write_samples = False
config.stopping_criterion = 'time'
config.use_resampling = True
config.use_markov = True
config.algo = 'craft'
config.vi_iters = 100000
config.vi_estimator = 'importance'
config.checkpoint_interval = 200000
config.craft_num_iters = 100
config.snf_num_iters = 500
config.craft_batch_size = 100
config.snf_batch_size = 100
optimization_config = ConfigDict()
optimization_config.free_energy_iters = 1000
optimization_config.vi_step_size = 1e-4
optimization_config.aft_step_size = 1e-3
optimization_config.craft_step_size = 1e-2
optimization_config.snf_step_size = 1e-3
optimization_config.snf_boundaries_and_scales = ({200: 5e-4},)
initial_config = ConfigDict()
initial_config.density = 'MultivariateNormalDistribution'
initial_config.shared_mean = 0.
initial_config.diagonal_cov = 1.
config.initial_config = initial_config
final_config = ConfigDict()
final_config.density = 'AutoEncoderLikelihood'
final_config.params_filename = 'annealed_flow_transport/data/vae.pickle'
final_config.image_index = 3689
config.final_config = final_config
flow_config = ConfigDict()
flow_config.type = 'DiagonalAffine'
flow_config.sample_shape = config.sample_shape
config.flow_config = flow_config
initial_sampler_config = ConfigDict()
initial_sampler_config.initial_sampler = 'MultivariateNormalDistribution'
config.initial_sampler_config = initial_sampler_config
mcmc_config = ConfigDict()
hmc_step_config = ConfigDict()
hmc_step_config.step_times = [0., 0.25, 0.5, 1.]
hmc_step_config.step_sizes = [0.15, 0.1, 0.1, 0.05]
nuts_step_config = ConfigDict()
nuts_step_config.step_times = [0., 0.25, 0.5, 1.]
nuts_step_config.step_sizes = [0.7, 0.7, 0.5, 0.5]
mcmc_config.hmc_step_config = hmc_step_config
mcmc_config.slice_step_config = hmc_step_config
mcmc_config.nuts_step_config = nuts_step_config
mcmc_config.hmc_steps_per_iter = 2
mcmc_config.use_jax_hmc = True
mcmc_config.hmc_num_leapfrog_steps = 10
mcmc_config.rwm_steps_per_iter = 0
mcmc_config.slice_steps_per_iter = 0
mcmc_config.nuts_steps_per_iter = 0
mcmc_config.slice_max_doublings = 5
mcmc_config.nuts_max_tree_depth = 4
config.mcmc_config = mcmc_config
config.save_params = False
return config
|
annealed_flow_transport-master
|
configs/vae.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to install the dm_nevis library."""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as f:
dependencies = list(map(lambda x: x.strip(), f.readlines()))
setuptools.setup(
name="dm_nevis",
version="0.1",
author="DeepMind Nevis Team",
author_email="See authors emails in paper",
description="A benchmark for continual learning.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_nevis",
packages=setuptools.find_packages(),
scripts=["dm_nevis/datasets_storage/download_dataset.py"],
classifiers=[
"Programming Language :: Python :: 3",
],
python_requires=">=3.8",
install_requires=dependencies)
|
dm_nevis-master
|
setup.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregation of metrics in the benchmarker.
The ultimate goal of running the benchmarker is to publish metrics computed
from the prediction events from a task stream. This package provides a uniform
interface for defining the metrics that may be computed, and also provides
standard implementations for common metrics that may be used.
Metrics in the benchmarker are manipulated using a stateless dataclass of
pure functions called the MetricsAggregator. Each time a prediction event
is encountered by the environment, the predictions are fed to the metrics
aggregator, along with the prior state stored by the metrics aggregator.
This allows the metrics aggregator to keep track of all statistics over all
prediction tasks that the environment has encountered.
At the end of a benchmark run, the metrics aggregator's "compute_results"
function will be called with the state that the metrics aggregator has
accumulated up to the current point. It is in the compute_results function
that the metrics aggregator may compute a "final" statistical summary over every
prediction event that occurred in the whole task stream may be summarized, and
returned. For full generality, the result is allowed to be any pytree.
The environment commits to running aggregate exactly once for each prediction
event that is encountered, and the metrics aggregator is allowed to log any
intermediate metrics that it wishes, to allow "online" debugging of model
progresss, and intermediate results to be visualized before a full task stream
run has been completed.
"""
import dataclasses
from typing import Callable, Iterator
import chex
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.learners import learner_interface
State = chex.ArrayTree
Results = chex.ArrayTree
InitFn = Callable[[], State]
AggregateTrainEventFn = Callable[
[State, streams.TrainingEvent, learner_interface.ResourceUsage], State]
AggregatePredictEventFn = Callable[
[State, streams.PredictionEvent, Iterator[learner_interface.Predictions]],
State]
ComputeResultsFn = Callable[[State], Results]
@dataclasses.dataclass
class MetricsAggregator:
"""Metrics class collecting together pure functions for manipulating metrics.
Similarly to other JAX libraries, this class does not contain state
internally, but rather provides pure functions that expclitly manipulate the
state. The state may be initialized by calling the init() function.
Attributes:
init: A function to initialize the metrics state.
aggregate_train_event: A function to combine together an existing state and
new predictions, for a given training event.
aggregate_predict_event: A function to combine together existing state and
predictions.
compute_results: A function to compute the results given the state observed
up to this point is assumed to be called once all data has been added to
the state.
"""
init: InitFn
aggregate_train_event: AggregateTrainEventFn
aggregate_predict_event: AggregatePredictEventFn
compute_results: ComputeResultsFn
def noop_metrics_aggregator() -> MetricsAggregator:
"""Creates a metrics aggregator that does nothing."""
def init():
return None
def aggregate_train_event(state, event, resources_used):
del event, resources_used
return state
def aggregate_predict_event(state, event, predictions):
del event
for _ in predictions:
continue
return state
def compute_results(state):
del state
return {}
return MetricsAggregator(init, aggregate_train_event, aggregate_predict_event,
compute_results)
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/metrics_aggregators.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module implementing standard metrics for multi-label tasks."""
from typing import Iterable, NamedTuple
from dm_nevis.benchmarker.learners import learner_interface
import numpy as np
import sklearn.metrics
class MultiLabelClassificationMetrics(NamedTuple):
"""Common classification metrics.
Attributes:
num_examples: The total number of examples used to compute the metrics.
mean_average_precision: Mean average precision.
"""
num_examples: int
mean_average_precision: float
def compute_metrics(
predictions: Iterable[learner_interface.Predictions]
) -> MultiLabelClassificationMetrics:
"""Computes standard multi-label classification metrics over predictions.
Args:
predictions: Predictions constist of the input batch and the learner's
output on the given input. The input batch must contain labels confirming
holding single integer labels corresponding to multi-label classification
task. The outputs are expected to contain unnormalized logits, such as the
output from a linear layer with no activations.
Returns:
A dataclass of classification metrics for multi-label binary classification
tasks.
"""
all_probs = []
all_targets = []
for prediction in predictions:
(multi_label_one_hot, probs) = (prediction.batch.multi_label_one_hot,
prediction.output)
probs = np.stack(probs, axis=1)
if np.amin(probs) < 0.0 or np.amax(probs) > 1.0:
raise ValueError('Probabilities must be in the range [0, 1].')
all_probs.append(probs)
all_targets.append(multi_label_one_hot)
if not all_probs:
return MultiLabelClassificationMetrics(
num_examples=0,
mean_average_precision=np.nan,
)
all_probs = np.concatenate(all_probs, axis=0)
all_targets = np.concatenate(all_targets, axis=0)
mean_average_precision = sklearn.metrics.average_precision_score(
all_targets,
all_probs,
)
return MultiLabelClassificationMetrics(
num_examples=all_probs.shape[0],
mean_average_precision=mean_average_precision,
)
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/multi_label_classification_metrics.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module implementing standard metrics for unary classification tasks."""
from typing import Iterable, NamedTuple
import chex
from dm_nevis.benchmarker.learners import learner_interface
import jax
import numpy as np
import optax
class ClassificationMetrics(NamedTuple):
"""Common classification metrics.
Attributes:
num_examples: The total number of examples used to compute the metrics.
cross_entropy: The computed categorical cross entropy.
top_one_accuracy: The number of examples that predicted the correct class
first, normalized to [0, 1] by the number of examples.
top_five_accuracy: The number of examples that predicted the correct class
in the top five most likely classes, normalized to [0, 1] by the number of
examples.
top_one_correct: The integer number of predictions where the correct class
was predicted first.
top_five_correct: The integer number of predictions where the correct class
was predicted in the top five predictions.
"""
num_examples: np.ndarray
cross_entropy: np.ndarray
top_one_accuracy: np.ndarray
top_five_accuracy: np.ndarray
top_one_correct: np.ndarray
top_five_correct: np.ndarray
def compute_metrics(
predictions: Iterable[learner_interface.Predictions]
) -> ClassificationMetrics:
"""Computes standard unary classification metrics over predictions.
Args:
predictions: Predictions constist of the input batch and the learner's
output on the given input. The input batch must contain labels confirming
holding single integer labels corresponding to a single label multinomial
classification task. The outputs are expected to contain unnormalized
logits, such as the output from a linear layer with no activations.
Returns:
A dataclass of classification metrics for single label multinomial
classification tasks.
"""
top_one_correct, top_five_correct, cross_entropy = 0.0, 0.0, 0.0
num_examples = 0
for prediction in predictions:
label, logits = prediction.batch.label, prediction.output[0]
chex.assert_rank(label, 1)
chex.assert_rank(logits, 2)
num_examples += logits.shape[0]
cross_entropy += _softmax_cross_entropy(logits, label).sum()
top_one_correct += _top_n_correct(logits, label, n=1)
top_five_correct += _top_n_correct(logits, label, n=5)
if num_examples:
top_one_accuracy = top_one_correct / num_examples
top_five_accuracy = top_five_correct / num_examples
cross_entropy = cross_entropy / num_examples
else:
top_one_accuracy = np.nan
top_five_accuracy = np.nan
cross_entropy = np.nan
return ClassificationMetrics(
num_examples=np.array(num_examples, dtype=int),
cross_entropy=np.array(cross_entropy),
top_one_accuracy=np.array(top_one_accuracy),
top_five_accuracy=np.array(top_five_accuracy),
top_one_correct=np.array(top_one_correct),
top_five_correct=np.array(top_five_correct),
)
def _top_n_correct(logits: np.ndarray, targets: np.ndarray, *,
n: int) -> np.ndarray:
"""Returns the number of predictions that predict the correct class in top n.
Args:
logits: Unnormalized logits of shape (<batch size>, <num classes>).
targets: Unary class labels of shape (<batch size>), of integer type.
n: The maximum index of the correct prediction in the sorted logits. if n is
greater than or equal to the number of classes, then this function will
return the batch size.
Returns:
The number of correct predictions (between 0 and <batch size>). A correct
prediction is when the correct prediction falls within the top n largest
values over the logits (by magnitude).
"""
if n < 1:
raise ValueError(f"n must be larger than 0, got {n}")
targets = targets.reshape((*targets.shape, 1))
top_n_predictions = np.argsort(logits, axis=-1)[:, -n:]
return np.sum(targets == top_n_predictions)
def _softmax_cross_entropy(logits: np.ndarray,
targets: np.ndarray) -> np.ndarray:
"""Computes the softmax cross entropy for unnormalized logits.
Note: This function uses jax internally, and will thus use hardware
accleration, if one is available.
Args:
logits: Unnormalized logits of shape (<batch size>, <num classes>). These
are interpreted as log probabilities, and could for example come from the
output of a linear layer with no activations.
targets: Unary class labels of shape (<batch size>), of integer type.
Returns:
The cross entropy computed between the softmax over the logits and the
one-hot targets.
"""
chex.assert_rank(targets, 1)
batch_size, num_classes = logits.shape
targets_one_hot = jax.nn.one_hot(targets, num_classes)
cross_entropy = optax.softmax_cross_entropy(logits, targets_one_hot)
chex.assert_shape(cross_entropy, (batch_size,))
return np.array(cross_entropy)
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/classification_metrics.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.metrics.classification_metrics."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import classification_metrics
import numpy as np
def _prediction(label, logits):
"""Creates a prediction fixture for testing."""
return learner_interface.Predictions(
batch=datasets.MiniBatch(
image=None,
label=np.array(label, dtype=np.int32),
multi_label_one_hot=None,
),
output=[np.array(logits, dtype=np.float32)],
)
class ImageClassificationTest(parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'uneven_batches_all_predictions_incorrect',
'predictions': [
_prediction([0, 1], [[0.1, 0.9], [0.9, 0.1]]),
_prediction([0, 1], [[0.4, 0.6], [0.9, 0.1]]),
_prediction([1], [[1000, 0]]),
],
'expected_top_one_accuracy': 0.0,
'expected_top_five_accuracy': 1.0,
'expected_cross_entropy': 200.86228814,
'expected_top_one_correct': 0,
'expected_top_five_correct': 5,
'expected_num_examples': 5,
},
{
'testcase_name': 'correct_prediction_not_on_array_boundary',
'predictions': [_prediction([2], [[0, 1, 5, 3]])],
'expected_top_one_accuracy': 1.0,
'expected_top_five_accuracy': 1.0,
# verified: -log(exp(5) / (1 + exp(1) + exp(5) + exp(3)))
'expected_cross_entropy': 0.14875513,
'expected_top_one_correct': 1,
'expected_top_five_correct': 1,
'expected_num_examples': 1,
},
{
'testcase_name': 'mixed_reults_within_a_single_batch',
'predictions': [
_prediction([2, 3], [[0, -1, 5, -3], [0, 1, 5, 3]]),
_prediction([3], [[0, 0, 0, 1]]),
],
'expected_top_one_accuracy': 2 / 3,
'expected_top_five_accuracy': 1.0,
'expected_cross_entropy': 0.96731003,
'expected_top_one_correct': 2,
'expected_top_five_correct': 3,
'expected_num_examples': 3,
},
{
'testcase_name': 'top_five_edge_case_1',
'predictions': [_prediction([0], [[0, 5, 1, 3, 2, 4]])],
'expected_top_one_accuracy': 0.0,
'expected_top_five_accuracy': 0.0,
'expected_cross_entropy': 5.45619345,
'expected_top_one_correct': 0,
'expected_top_five_correct': 0,
'expected_num_examples': 1,
},
{
'testcase_name': 'top_five_edge_case_2',
'predictions': [_prediction([2], [[0, 5, 1, 3, 2, 4]])],
'expected_top_one_accuracy': 0.0,
'expected_top_five_accuracy': 1.0,
'expected_cross_entropy': 4.45619345,
'expected_top_one_correct': 0,
'expected_top_five_correct': 1,
'expected_num_examples': 1,
},
{
'testcase_name': 'no_predictions',
'predictions': [],
'expected_top_one_accuracy': np.nan,
'expected_top_five_accuracy': np.nan,
'expected_cross_entropy': np.nan,
'expected_top_one_correct': 0,
'expected_top_five_correct': 0,
'expected_num_examples': 0,
},
)
def test_compute_metrics(self, predictions, expected_top_one_accuracy,
expected_top_five_accuracy, expected_cross_entropy,
expected_top_one_correct, expected_top_five_correct,
expected_num_examples):
m = classification_metrics.compute_metrics(predictions)
np.testing.assert_allclose(m.top_one_accuracy, expected_top_one_accuracy)
np.testing.assert_allclose(m.top_five_accuracy, expected_top_five_accuracy)
np.testing.assert_allclose(m.cross_entropy, expected_cross_entropy)
np.testing.assert_allclose(m.num_examples, expected_num_examples)
np.testing.assert_allclose(m.top_one_correct, expected_top_one_correct)
np.testing.assert_allclose(m.top_five_correct, expected_top_five_correct)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/classification_metrics_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.metrics.multi_label_classification_metrics."""
import math
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import multi_label_classification_metrics
import numpy as np
TASK = tasks.TaskKey('task', tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
tasks.MultiLabelClassificationMetadata(num_classes=4))
class MultiLabelClassificationMetrics(parameterized.TestCase):
def test_mean_average_precision(self):
gen = np.random.default_rng(0)
num_classes = 2
batches = [
datasets.MiniBatch(
image=gen.standard_normal(size=(1000, 4, 4, 3)),
multi_label_one_hot=gen.integers(
size=(1000, 4), low=0, high=num_classes),
label=None,
),
datasets.MiniBatch(
image=gen.standard_normal(size=(300, 4, 4, 3)),
multi_label_one_hot=gen.integers(
size=(300, 4), low=0, high=num_classes),
label=None,
),
]
predictions = []
for batch in batches:
output = [
np.random.uniform(size=(batch.image.shape[0],))
for _ in range(num_classes)
]
predictions.append(
learner_interface.Predictions(batch=batch, output=output))
metrics = multi_label_classification_metrics.compute_metrics(predictions)
self.assertEqual(metrics.num_examples, 1300)
# The mAP should be approximately 0.5 for randomly sampled data.
self.assertAlmostEqual(metrics.mean_average_precision, 0.5, delta=0.05)
def test_empty_predictions(self):
metrics = multi_label_classification_metrics.compute_metrics([])
self.assertEqual(metrics.num_examples, 0)
self.assertTrue(math.isnan(metrics.mean_average_precision))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/metrics/multi_label_classification_metrics_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define interface for streams.
Streams are defined to be sequences of datasets containing either train or
predict entries. Each of these has a description, and a key which may be used
to instantiate the relevant dataset.
"""
from typing import Any, Callable, Iterable, Iterator, Mapping, NamedTuple, Sequence, Union, Protocol
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
DatasetKey = str
class TrainingEvent(NamedTuple):
"""A stream event corresponding to training a learner with new data.
Attributes:
train_dataset_key: The main default dataset to use for training. This does
not include data from the `dev_dataset_key` dataset given below.
train_and_dev_dataset_key: This is the full training dataset to be used by
learners that wish to use their own logic to define a dev / train split.
dev_dataset_key: A predefined dev (validation) dataset, to be used within a
learner's train step for hyperparameter searching.
"""
train_dataset_key: DatasetKey
train_and_dev_dataset_key: DatasetKey
dev_dataset_key: DatasetKey
class PredictionEvent(NamedTuple):
"""A stream event corresponding to running prediction with a learner.
Attributes:
dataset_key: The dataset key corresponding to either the `test` or
`dev-test` split of the data (depending on the stream being executed).
These events are used to measure the performance of learners on unseen
data.
"""
dataset_key: DatasetKey
Event = Union[TrainingEvent, PredictionEvent]
class Stream(Protocol):
def get_dataset_by_key(self, dataset_key: str) -> datasets.Dataset:
...
def events(self) -> Iterator[Event]:
...
class FilteredStream:
"""A stream for wrapping other streams, and removing unsupported tasks.
This is provided to test learners that do not support all tasks in the input
stream.
"""
def __init__(
self,
stream_ctor: Callable[..., Stream],
supported_task_kinds: Iterable[tasks.TaskKind],
**stream_kwargs: Mapping[str, Any],
):
self._stream = stream_ctor(**stream_kwargs)
self._supported_task_kinds = set(supported_task_kinds)
def get_dataset_by_key(self, dataset_key: str) -> datasets.Dataset:
result = self._stream.get_dataset_by_key(dataset_key)
assert result.task_key.kind in self._supported_task_kinds
return result
def events(self) -> Iterator[Event]:
"""Returns an iterator over the (filtered) stream events."""
for event in self._stream.events():
# This relies on datasets having consistent task_keys
# across all of the keys. If this assumption were to fail, an assertion
# error would be raised in get_dataset_by_key() above.
if isinstance(event, PredictionEvent):
dataset = self._stream.get_dataset_by_key(event.dataset_key)
else:
dataset = self._stream.get_dataset_by_key(event.train_dataset_key)
if dataset.task_key.kind not in self._supported_task_kinds:
logging.warning("Skipping unsupported event: %s, task key: %s", event,
dataset.task_key)
continue
yield event
def all_dataset_keys(event: Event) -> Sequence[DatasetKey]:
"""Returns all dataset keys for an event."""
if isinstance(event, TrainingEvent):
return [
event.train_dataset_key,
event.train_and_dev_dataset_key,
event.dev_dataset_key,
]
elif isinstance(event, PredictionEvent):
return [event.dataset_key]
raise ValueError(f"Unknown event type: {type(event)}")
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/streams.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to define the available tasks."""
import enum
from typing import Any, Dict, NamedTuple, Union
class TaskKind(enum.Enum):
CLASSIFICATION = 0
MULTI_LABEL_CLASSIFICATION = 1
class ClassificationMetadata(NamedTuple):
num_classes: int
class MultiLabelClassificationMetadata(NamedTuple):
num_classes: int
TaskMetadata = Union[ClassificationMetadata, MultiLabelClassificationMetadata]
class TaskKey(NamedTuple):
"""A hashable key to uniquely identify a task.
Task keys must uniquely define a task, and provide the minimal information
required to initialize a prediction head.
Attributes:
name: The (unique) name of the task, which may be shared across multiple
datasets if they define matching tasks.
kind: The kind of task (such as classification).
metadata: The metadata of the task.
"""
name: str
kind: TaskKind
metadata: TaskMetadata
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "TaskKey":
"""Deserializes a dict into a TaskKey."""
if d["kind"] == "Classification":
return cls(
name=d["name"],
kind=TaskKind.CLASSIFICATION,
metadata=ClassificationMetadata(num_classes=d["num_classes"]))
elif d["kind"] == "MultiLabelClassification":
return cls(
name=d["name"],
kind=TaskKind.MULTI_LABEL_CLASSIFICATION,
metadata=MultiLabelClassificationMetadata(
num_classes=d["num_classes"]))
else:
raise ValueError("Deserialization failed")
def to_dict(self) -> Dict[str, Any]:
"""Serializes a TaskKey into a dictionary."""
if self.kind == TaskKind.CLASSIFICATION:
return {
"kind": "Classification",
"name": self.name,
"num_classes": self.metadata.num_classes,
}
elif self.kind == TaskKind.MULTI_LABEL_CLASSIFICATION:
return {
"kind": "MultiLabelClassification",
"name": self.name,
"num_classes": self.metadata.num_classes,
}
else:
raise ValueError("Unknown TaskKind")
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/tasks.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.datasets.dataset_builders."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import dataset_builders
class DatasetBuildersTest(parameterized.TestCase):
@parameterized.parameters([
dict(
outer_start=0,
inner_start=0,
outer_end=0,
inner_end=0,
expected=(0, 0)),
dict(
outer_start=None,
inner_start=None,
outer_end=0,
inner_end=0,
expected=(None, 0)),
dict(
outer_start=None,
inner_start=None,
outer_end=None,
inner_end=None,
expected=(None, None)),
dict(
outer_start=5,
inner_start=5,
outer_end=20,
inner_end=10,
expected=(10, 15)),
dict(
outer_start=3,
inner_start=5,
outer_end=30,
inner_end=10,
expected=(8, 13)),
dict(
outer_start=3,
inner_start=5,
outer_end=12,
inner_end=10,
expected=(8, 12)),
dict(
outer_start=3,
inner_start=5,
outer_end=6,
inner_end=10,
expected=(6, 6))
])
def test_combine_indices(self, outer_start, inner_start, outer_end, inner_end,
expected):
start, end = dataset_builders.combine_indices(
outer_start=outer_start,
inner_start=inner_start,
outer_end=outer_end,
inner_end=inner_end)
self.assertEqual(start, expected[0])
self.assertEqual(end, expected[1])
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/dataset_builders_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers to unify the interface of existing datasets."""
import collections
import contextlib
import threading
from typing import Any, Callable, Iterator, NamedTuple, Optional, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
_LOCK = threading.RLock()
_DATASET_NAME_TO_BUILDER_LOCK = collections.defaultdict(threading.RLock)
class TFDSMetadata(NamedTuple):
num_examples: Optional[int]
def tfds_dataset_builder_fn(
tfds_name: str,
*,
split: str,
start: Optional[int],
end: Optional[int],
shuffle_buffer_size: int,
to_minibatch_fn: Callable[[Any], datasets.MiniBatch],
) -> Tuple[datasets.DatasetBuilderFn, TFDSMetadata]:
"""Provides a standard way to initialize a builder_fn from a tfds dataset.
TODO: Explicit test for this function that uses a mock dataset.
For efficiency, slicing indices are materialized and combined lazily when
the dataset is actually constructed, this allows for the most efficient
possible construction of the dataset.
Args:
tfds_name: The dataset name to load from tfds.
split: The name of the split to load.
start: The start index in the underlying data.
end: The maximum end index in the underlying data.
shuffle_buffer_size: The size of the shuffle buffer to use.
to_minibatch_fn: A function to be mapped to the underlying dataset and
create a datasets.MiniBatch matching the other datasets.
Returns:
a dataset builder function, along with metadata about the dataset.
"""
builder = tfds.builder(tfds_name)
metadata = _metadata_from_tfds_info(split, start, end, builder.info)
outer_start, outer_end = start, end
del start, end
def builder_fn(shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
indices = combine_indices(outer_start, start, outer_end, end)
split_with_indices = _slice_str(split, *indices)
with _lock_dataset_builder(tfds_name):
builder.download_and_prepare()
ds = builder.as_dataset(split=split_with_indices, shuffle_files=shuffle)
if shuffle:
ds = ds.shuffle(shuffle_buffer_size)
return ds.map(
to_minibatch_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=not shuffle)
return builder_fn, metadata
def combine_indices(
outer_start: Optional[int], inner_start: Optional[int],
outer_end: Optional[int],
inner_end: Optional[int]) -> Tuple[Optional[int], Optional[int]]:
"""Combine starts and ends together.
For an underlying sequence, this function combines an outer set of start:end
and an inner set of start:end indices together, so that they may be applied
in a single operation.
Semantically: first the outer_start:outer_end sequence is selected,
and then the inner_start:inner_end sequence is sampled from the result.
In the diagram below, the returned (start, end) region is represented by
the X's.
|==============================================|
^ ^
|================XXXXXXXXXX=========|
| ^ ^ |
` outer_start | | ` outer_end
| |
| |
inner_start ' ` inner_end
Args:
outer_start: Optional start index in input sequence.
inner_start: Optional start index in input sequence, relative to
outer_start.
outer_end: Optional end index in input sequence.
inner_end: Optional end index in input sequence, relative to outer_start.
Returns:
The region combing the start and end indices together.
"""
# TODO: Support negative indices.
assert (outer_start or 0) >= 0 and (inner_start or 0) >= 0
assert (outer_end or 0) >= 0 and (inner_end or 0) >= 0
combined_start = None
if outer_start is not None or inner_start is not None:
combined_start = (outer_start or 0) + (inner_start or 0)
ends = []
if outer_end is not None:
ends.append(outer_end)
if inner_end is not None:
ends.append((outer_start or 0) + inner_end)
if ends:
combined_end = min(ends)
else:
combined_end = None
if combined_end is not None and combined_start is not None:
combined_start = min(combined_start, combined_end)
return combined_start, combined_end
def _metadata_from_tfds_info(split: str, start: int, end: int,
info: tfds.core.DatasetInfo) -> TFDSMetadata:
try:
split_info = info.splits[_slice_str(split, start, end)]
except KeyError:
logging.warning("Cannot extract info for split. Is this mock data?")
return TFDSMetadata(num_examples=None)
return TFDSMetadata(num_examples=split_info.num_examples)
def _slice_str(split: str, start: Optional[int], end: Optional[int]) -> str:
if start is None and end is None:
return split
return f"{split}[{start or ''}:{'' if end is None else end}]"
@contextlib.contextmanager
def _lock_dataset_builder(dataset_name: str) -> Iterator[None]:
"""Provides a process-level mutex around tfds dataset builders.
tfds download_and_prepare() appears to give errors when called concurrently.
This mutex provides a process-level lock for each dataset, so that
single-host single-process binaries can avoid this problem. In the case
of multi-process experiments, this strategy may no longer be sufficient.
Args:
dataset_name: The name of the dataset to acquire a lock for.
Yields:
A context manager to protect access to the given resource.
"""
with _LOCK:
lock = _DATASET_NAME_TO_BUILDER_LOCK[dataset_name]
try:
lock.acquire()
yield
finally:
lock.release()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/dataset_builders.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to access datasets."""
from typing import Any, NamedTuple, Optional, Union, Protocol
import chex
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import tensorflow as tf
Array = Union[tf.Tensor, np.ndarray]
class DatasetBuilderFn(Protocol):
def __call__(self,
*,
shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
"""A function to build a tf.data.Dataset with an offset and max length.
Args:
shuffle: Whether or not to shuffle the data within the sampled region.
start: Start at this index in the underlying dataset sequence
end: Stop reading if the index in the underlying stream reaches this
value.
Returns:
A tf.data.Dataset over ``dataset.MiniBatch`` objects of single unbatched
examples.
"""
class Dataset(NamedTuple):
"""A handle for building a tf.data.Dataset.
Attributes:
builder_fn: A pure function to instantiate the dataset. Allows specifying of
basic operations, such as enabling shuffling, and reading from specific
sub-sequences of the underlying data. Each time the builder is called, a
new separate dataset reader context is created, so users may call this
multiple times to obtain separate dataset readers.
task_key: Identifies the task that is containined in this dataset.
num_examples: If available, provides the number of examples in iterable
dataset constructed by the builder_fn.
"""
builder_fn: DatasetBuilderFn
task_key: tasks.TaskKey
num_examples: Optional[int]
@chex.dataclass
class MiniBatch:
"""A shared MiniBatch representation for all tasks and datasets.
By definition, a minibatch may have any number of batch dimensions, including
zero batch dimensions (which is the default case for datasets returned by
dataset builder functions).
Attributes:
image: If the dataset has an image, it will be stored here.
label: The task specific label, if available.
multi_label_one_hot: Multi-label in a one-hot format, if available.
"""
image: Optional[Array]
label: Optional[Any]
multi_label_one_hot: Optional[Any]
def __repr__(self):
return _batch_repr(self)
def _batch_repr(batch: MiniBatch) -> str:
"""Writes a human-readable representation of a batch to a string."""
feats = []
if batch.image is not None:
feats.append(f"image ({batch.image.shape})")
parts = [f"features: {', '.join(feats)}"]
if batch.label is not None:
parts.append(f"label: {batch.label}")
if batch.multi_label_one_hot is not None:
parts.append(f"multi_label_one_hot: {batch.multi_label_one_hot}")
return f"<MiniBatch: {', '.join(parts)}>"
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/datasets.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.datasets.tasks."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import tasks
class TasksTest(parameterized.TestCase):
@parameterized.parameters([
dict(task_key=tasks.TaskKey(
name="task1", kind=tasks.TaskKind.CLASSIFICATION,
metadata=tasks.ClassificationMetadata(num_classes=10))),
dict(task_key=tasks.TaskKey(
name="task2", kind=tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
metadata=tasks.MultiLabelClassificationMetadata(num_classes=10))),
])
def test_serialization_roundtrip(self, task_key):
d = task_key.to_dict()
task_key_restored = tasks.TaskKey.from_dict(d)
self.assertEqual(task_key, task_key_restored)
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/tasks_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.streams.streams."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.datasets import test_stream
class StreamsTest(parameterized.TestCase):
def test_filtered_stream(self):
stream = streams.FilteredStream(
test_stream.TestStream,
supported_task_kinds=[tasks.TaskKind.CLASSIFICATION])
self.assertLen(list(stream.events()), 4)
stream = streams.FilteredStream(
test_stream.TestStream,
supported_task_kinds=[tasks.TaskKind.MULTI_LABEL_CLASSIFICATION])
self.assertEmpty(list(stream.events()))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/streams_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.streams.test_stream."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import test_stream
import tensorflow_datasets as tfds
class TestStreamTest(parameterized.TestCase):
def test_test_stream(self):
stream = test_stream.TestStream()
for event in stream.events():
for key in streams.all_dataset_keys(event):
dataset = stream.get_dataset_by_key(key)
ds = dataset.builder_fn(shuffle=False)
examples = list(tfds.as_numpy(ds))
self.assertLen(examples, dataset.num_examples)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/test_stream_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a stream where all data is constructed at runtime.
This dataset has been created for use in unit tests.
"""
from typing import Iterator
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets.builders import test_dataset
class TestStream:
"""An stream over in-memory test datasets."""
def __init__(self):
self._datasets_by_key = {
'train_0_10':
test_dataset.get_dataset(split='train', start=0, end=5),
'test':
test_dataset.get_dataset(split='test', start=0, end=5),
'train_10_20':
test_dataset.get_dataset(split='train', start=2, end=7),
}
self._events = [
streams.TrainingEvent('train_0_10', 'train_0_10', 'train_0_10'),
streams.PredictionEvent('test'),
streams.TrainingEvent('train_10_20', 'train_10_20', 'train_10_20'),
streams.PredictionEvent('test'),
]
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/test_stream.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset builders for COIL100."""
from typing import Optional
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
DATASET_NAME = "coil100"
TASK_METADATA = tasks.ClassificationMetadata(num_classes=1_00)
TASK_KEY = tasks.TaskKey(DATASET_NAME, tasks.TaskKind.CLASSIFICATION,
TASK_METADATA)
SHUFFLE_BUFFER_SIZE = 10_000
# Angle ranges are between 0 and 71, since each unit corresponds to an increment
# of 5 degrees (360/5=72). For instance, 0 means between 0 and 4, 1 between 5
# and 9, etc.
# We take frontal views for training (around 0 degrees).
# We take side views for dev and dev-test (opposite side),
# and use for test all the reamining views.
NUM_ANGLE_RANGES = 72
NUM_OBJS = 100
SPLIT_ANGLE_RANGES = {"train": [70, 71, 0, 1, 2], "dev": [16, 17, 18, 19, 20],
"dev_test": [50, 51, 52, 53, 54]}
def _keep(i: int) -> bool:
return (i not in SPLIT_ANGLE_RANGES["train"] and
(i not in SPLIT_ANGLE_RANGES["dev"])
and (i not in SPLIT_ANGLE_RANGES["dev_test"]))
SPLIT_ANGLE_RANGES["test"] = [i for i in range(NUM_ANGLE_RANGES) if _keep(i)]
SPLIT_ANGLE_RANGES["train_and_dev"] = SPLIT_ANGLE_RANGES[
"train"] + SPLIT_ANGLE_RANGES["dev"]
def get_dataset(split: str,
*,
outer_start: Optional[int] = None,
outer_end: Optional[int] = None) -> datasets.Dataset:
"""Get the COIL100 dataset."""
builder = tfds.builder(DATASET_NAME)
# Since there are 100 images per pose, we calculate the number of sample for
# each split.
# TODO: Support use of start and stop indexes.
num_samples = NUM_OBJS * len(SPLIT_ANGLE_RANGES[split])
metadata = dataset_builders.TFDSMetadata(num_samples)
def builder_fn(shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
# start/end are used to slice a dataset in the stream (done by
# the learner), while outer_start/outer_end are used to slice a dataset
# while constructing a stream (done by the designer of the stream).
builder.download_and_prepare()
ds = builder.as_dataset(split="train", shuffle_files=False)
split_array = np.zeros((NUM_ANGLE_RANGES,)) # (0, …,0)
# Turn the list of angle ranges into a binary vector with 1's indicating
# which angle ranges we select for that split.
# For instance, there is going to be a 1 in the first position if there are
# images with views between [0, 4] degrees.
np.put(split_array, SPLIT_ANGLE_RANGES[split], 1) # (0,0, 1, 0, 0, 0, 1..)
def _filter_fn(example):
angle = example["angle_label"] # integer from 0 to NUM_ANGLE_RANGES - 1
result = tf.gather(tf.convert_to_tensor(split_array), angle) # -> 0 | 1
return tf.cast(result, tf.bool)
ds = ds.filter(_filter_fn) # leave the elements with desired angle
# Slice if needed.
indices = dataset_builders.combine_indices(outer_start, start, outer_end,
end)
# NOTE: Do not shuffle the data. Dataset needs to be deterministic for this
# construction to work.
if indices[0] is not None:
ds = ds.skip(indices[0])
if indices[1] is not None:
ds = ds.take(indices[1] - (indices[0] or 0))
if shuffle:
# Note: We entirely rely on the shuffle buffer to randomize the order.
ds = ds.shuffle(SHUFFLE_BUFFER_SIZE)
def _to_minibatch_fn(data) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=data["image"],
label=data["object_id"],
multi_label_one_hot=None,
)
return ds.map(_to_minibatch_fn)
return datasets.Dataset(
builder_fn=builder_fn,
task_key=TASK_KEY,
num_examples=metadata.num_examples)
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/coil100.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.datasets.builders.test_dataset."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets.builders import test_dataset
import tensorflow_datasets as tfds
class TestDatasetTest(parameterized.TestCase):
def test_get_test_dataset(self):
with self.subTest('test_split'):
dataset = test_dataset.get_dataset(split='test')
self.assertEqual(dataset.num_examples,
test_dataset.SPLIT_DEFAULTS['test']['num_examples'])
ds = dataset.builder_fn(shuffle=True, start=3, end=9)
ds = ds.batch(batch_size=2)
batches = list(tfds.as_numpy(ds))
self.assertLen(batches, 3)
with self.subTest('train_split'):
dataset = test_dataset.get_dataset(split='train')
ds = dataset.builder_fn(shuffle=False)
examples = list(tfds.as_numpy(ds))
self.assertLen(examples,
test_dataset.SPLIT_DEFAULTS['train']['num_examples'])
with self.subTest('val_sub_split'):
dataset = test_dataset.get_dataset(split='val', start=3, end=10)
self.assertEqual(dataset.num_examples, 10 - 3)
elements = list(tfds.as_numpy(dataset.builder_fn(shuffle=False)))
self.assertLen(elements, dataset.num_examples)
ds = dataset.builder_fn(shuffle=True, start=1, end=3)
examples = list(tfds.as_numpy(ds))
self.assertLen(examples, 2)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/test_dataset_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset builders for DomainNet."""
from typing import List, Optional, Tuple
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.datasets.builders import tfds_builder
import tensorflow as tf
import tensorflow_datasets as tfds
DATASET_NAME = 'domainnet'
DEFAULT_SHUFFLE_BUFFER_SIZE = 5_000
TASK_METADATA = tasks.ClassificationMetadata(num_classes=345)
TRAIN_DOMAINS = ['real', 'painting', 'clipart', 'quickdraw', 'infograph']
TEST_DOMAIN_DATASET = DATASET_NAME + '/sketch'
TASK_KEY = tasks.TaskKey(DATASET_NAME, tasks.TaskKind.CLASSIFICATION,
TASK_METADATA)
def merge_datasets(all_datasets: List[tf.data.Dataset]) -> tf.data.Dataset:
assert all_datasets, 'The list of datasets to be merged is empty!'
ds = all_datasets[0]
for ds_i in all_datasets[1:]:
ds = ds.concatenate(ds_i)
return ds
def get_dataset_from_domains(
domain_split_ls: List[Tuple[str, str]]) -> datasets.Dataset:
"""Gets a dataset given a list of (domain, split) tuples."""
label_key = 'label'
to_minibatch_fn = lambda x: _to_minibatch_single_label(x, label_key)
num_examples = 0
num_examples_ls = []
builder_ls = []
# Compute the num_examples and collect builders
for curr_domain, curr_split in domain_split_ls:
domain_dataset = f'{DATASET_NAME}/{curr_domain}'
builder = tfds.builder(domain_dataset)
builder_ls.append(builder)
num_examples_ls.append(builder.info.splits[curr_split].num_examples)
num_examples += builder.info.splits[curr_split].num_examples
def builder_fn(shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
all_datasets = []
start = 0 if start is None else start
end = num_examples if end is None else end
for count, (_, curr_split) in enumerate(domain_split_ls):
curr_domain_start = sum(num_examples_ls[:count])
curr_domain_end = sum(num_examples_ls[:count + 1])
# if the indices overlap with the current domain
if start < curr_domain_end and end > curr_domain_start:
local_start = max(start, curr_domain_start) - curr_domain_start
local_end = min(end, curr_domain_end) - curr_domain_start
indices = dataset_builders.combine_indices(None, local_start, None,
local_end)
split_with_indices = _slice_str(curr_split, *indices)
builder_ls[count].download_and_prepare()
all_datasets.append(builder_ls[count].as_dataset(
split=split_with_indices, shuffle_files=shuffle))
# concatenate the datasets from different domains
ds = merge_datasets(all_datasets)
if shuffle:
ds = ds.shuffle(DEFAULT_SHUFFLE_BUFFER_SIZE)
return ds.map(to_minibatch_fn)
return datasets.Dataset(
builder_fn=builder_fn, task_key=TASK_KEY, num_examples=num_examples)
def get_dataset(split: str) -> datasets.Dataset:
"""Gets the DomainNet dataset."""
if split == 'dev_test':
dataset = tfds_builder.get_dataset(TEST_DOMAIN_DATASET, split='train')
return dataset._replace(task_key=TASK_KEY)
elif split == 'test':
dataset = tfds_builder.get_dataset(TEST_DOMAIN_DATASET, split='test')
return dataset._replace(task_key=TASK_KEY)
elif split == 'dev':
domain_splits = [(domain, 'test') for domain in TRAIN_DOMAINS]
return get_dataset_from_domains(domain_splits)
elif split == 'train':
domain_splits = [(domain, 'train') for domain in TRAIN_DOMAINS]
return get_dataset_from_domains(domain_splits)
else:
train_domain_splits = [(domain, 'train') for domain in TRAIN_DOMAINS]
test_domain_splits = [(domain, 'test') for domain in TRAIN_DOMAINS]
# Interleave the two (domain, split) lists
domain_splits = []
for train_domain_split, test_domain_split in zip(train_domain_splits,
test_domain_splits):
domain_splits.append(train_domain_split)
domain_splits.append(test_domain_split)
return get_dataset_from_domains(domain_splits)
def _slice_str(split: str, start: Optional[int], end: Optional[int]) -> str:
if start is None and end is None:
return split
return f"{split}[{start or ''}:{'' if end is None else end}]"
def _to_minibatch_single_label(data, label_key) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=data['image'],
label=data[label_key],
multi_label_one_hot=None,
)
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/domainnet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test dataset that may be constructed entirely in-memory.
The test dataset consists of images. The task is to predict the dominant color
(red, green, or blue).
"""
import functools
from typing import Iterator, Optional, Tuple
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import tensorflow as tf
DATASET_NAME = 'test_dataset'
DEFAULT_IMAGE_SIZE = 32
DEFAULT_SHUFFLE_BUFFER_SIZE = 10
SPLIT_DEFAULTS = {
'test': {
'noise_level': 64,
'num_examples': 100,
'seed': 0,
},
'train': {
'noise_level': 32,
'num_examples': 1000,
'seed': 1,
},
'val': {
'noise_level': 32,
'num_examples': 100,
'seed': 2,
}
}
def get_dataset(
split: str,
*,
start: Optional[int] = None,
end: Optional[int] = None,
image_size: int = DEFAULT_IMAGE_SIZE,
task_kind: tasks.TaskKind = tasks.TaskKind.CLASSIFICATION
) -> datasets.Dataset:
"""Gets the test dataset.
The task is to classify the color of a flood filled image as Red, Green,
or Blue. The images in the dataset have been flood filled to one of these
colors, and then perturbed with uniform noise.
Args:
split: One of ["test", "train", "val"].
start: An optional offset from the beginning of the dataset to start at.
end: An optional offset from the beggning of the dataset to end at.
image_size: Select the size of the image to be returned.
task_kind: The task kind to use.
Returns:
A dataset containing at most end - start images.
"""
kwargs = SPLIT_DEFAULTS[split]
builder_fn = _make_builder_fn(
outer_start=start,
outer_end=end,
image_size=image_size,
task_kind=task_kind,
**kwargs,
)
return datasets.Dataset(
task_key=_task_key(task_kind),
builder_fn=builder_fn,
num_examples=_compute_num_examples(kwargs['num_examples'], start, end))
def _make_builder_fn(
outer_start: Optional[int],
outer_end: Optional[int],
image_size: int,
seed: int,
num_examples: int,
noise_level: int,
task_kind: tasks.TaskKind,
) -> datasets.DatasetBuilderFn:
"""Constructs a builder function for the test dataset."""
def builder_fn(*,
shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
start, end = dataset_builders.combine_indices(outer_start, start, outer_end,
end)
def gen():
yield from _generate_data(
seed,
num_examples,
image_size,
noise_level,
)
ds = tf.data.Dataset.from_generator(
gen,
output_signature=(tf.TensorSpec(
shape=(image_size, image_size, 3),
dtype=tf.uint8), tf.TensorSpec(shape=(), dtype=tf.int32)))
if start is not None:
ds = ds.skip(start)
if end is not None:
ds = ds.take(end - (start or 0))
if shuffle:
ds = ds.shuffle(buffer_size=DEFAULT_SHUFFLE_BUFFER_SIZE)
ds = ds.map(functools.partial(_to_minibatch, task_kind=task_kind))
return ds
return builder_fn
def _generate_data(seed: int, num_examples: int, image_size: int,
noise_level: int) -> Iterator[Tuple[tf.Tensor, tf.Tensor]]:
"""Generates data for the dataset.
Args:
seed: A seed ensures that each run produces deterministic output.
num_examples: The number of examples that the generator should produce.
image_size: The image size of the resulting data.
noise_level: Noise added to the images, to make the task a little more
challenging.
Yields:
A generator over (image, label) tuples. The image is of shape
(image_size, image_size, 3) and in the range [0, 255].
Each image is generated by flood filling a single channel (R, G, or B)
and the associated label is the channel that was flood filled.
Noise in the range (-noise_level, noise_level) is added to the image,
to make the classification task a little more challenging.
The label is a value from {0, 1, 2} representing the dominant color
of the returned image.
"""
gen = np.random.default_rng(seed=seed)
for _ in range(num_examples):
color = gen.integers(0, 3)
img = np.zeros((image_size, image_size, 3))
img[:, :, color] = 255
if noise_level > 0:
img += gen.integers(
-noise_level, noise_level, size=(image_size, image_size, 3))
img = img.clip(0, 255)
yield tf.constant(img, dtype=tf.uint8), color
def _to_minibatch(
image: tf.Tensor,
label: tf.Tensor,
task_kind: tasks.TaskKind,
) -> datasets.MiniBatch:
"""Create a minibatch from the generated example."""
if task_kind is tasks.TaskKind.CLASSIFICATION:
return datasets.MiniBatch(
image=image,
label=label,
multi_label_one_hot=None,
)
elif task_kind is tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
label_one_hot = tf.one_hot([label], depth=3)[0]
return datasets.MiniBatch(
image=image,
label=None,
multi_label_one_hot=label_one_hot,
)
else:
raise ValueError(f'Unsupported task kind: {task_kind}')
def _compute_num_examples(n: int, start: Optional[int],
end: Optional[int]) -> int:
"""Computes number of examples from known length and optional offsets."""
start = start or 0
end = end or n
return min(n, end - start)
def _task_key(task_kind: tasks.TaskKind) -> tasks.TaskKey:
"""Creates a task key given the task kind."""
if task_kind is tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
return tasks.TaskKey(
DATASET_NAME,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
tasks.MultiLabelClassificationMetadata(num_classes=3),
)
elif task_kind is tasks.TaskKind.CLASSIFICATION:
return tasks.TaskKey(
DATASET_NAME,
tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=3),
)
else:
raise ValueError(f'Unsupported task kind: {task_kind}')
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/test_dataset.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset builders for Small Norb."""
from typing import Optional
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
DATASET_NAME = 'smallnorb'
SHUFFLE_BUFFER_SIZE = 10_000
TASK_METADATA = tasks.ClassificationMetadata(num_classes=5)
TASK_KEY = tasks.TaskKey(DATASET_NAME, tasks.TaskKind.CLASSIFICATION,
TASK_METADATA)
# pylint:disable=missing-function-docstring
def get_dataset(split: str,
*,
outer_start: Optional[int] = None,
outer_end: Optional[int] = None) -> datasets.Dataset:
"""Small Norb dataset."""
# There are 5 object categories, and each category has 10 object instances.
# 5 instances are used for testing, and 5 for training in the original
# dataset. We are going to split the original training set into: train, dev
# and dev-test. We are going to use images from a particular instance for dev,
# and images from another particular instance for dev-test. Images from the
# remaining 3 instances are used for the train split. This way we guarantee
# that we test for actual generalization.
def _to_minibatch_fn(data) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=data['image'],
label=data['label_category'],
multi_label_one_hot=None,
)
if split == 'test': # We can use original test split.
builder_fn, metadata = dataset_builders.tfds_dataset_builder_fn(
tfds_name='smallnorb',
shuffle_buffer_size=SHUFFLE_BUFFER_SIZE,
split=split,
start=None,
end=None,
to_minibatch_fn=_to_minibatch_fn)
return datasets.Dataset(
builder_fn=builder_fn,
task_key=TASK_KEY,
num_examples=metadata.num_examples)
builder = tfds.builder(DATASET_NAME)
builder.download_and_prepare()
ds = builder.as_dataset(split='train')
instance_ids = [4, 6, 7, 8, 9] # Instances present in original train split
ids = dict()
ids['dev'] = [instance_ids[0]]
ids['dev_test'] = [instance_ids[1]]
ids['train'] = instance_ids[2:]
ids['train_and_dev'] = ids['train'] + ids['dev']
num_samples = {'dev': 0, 'dev_test': 0, 'train': 0}
for el in ds.as_numpy_iterator():
el_id = el['instance']
if el_id in ids['dev']:
num_samples['dev'] += 1
elif el_id in ids['dev_test']:
num_samples['dev_test'] += 1
elif el_id in ids['train']:
num_samples['train'] += 1
else:
raise ValueError(f'Unknown instance id {el_id}')
num_samples['train_and_dev'] = num_samples['train'] + num_samples['dev']
metadata = dataset_builders.TFDSMetadata(num_samples[split])
def builder_fn_train(shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
# start/end are used to slice a dataset in the stream (done by
# the learner), while outer_start/outer_end are used to slice a dataset
# while constructing a stream (done by the designer of the stream).
builder.download_and_prepare()
ds = builder.as_dataset(split='train', shuffle_files=False)
split_array = np.zeros((10,)) # (0, …,0)
np.put(split_array, ids[split], 1) # (0,0, 1, 0, 0, 0, 1..)
def _filter_fn(example):
instance_id = example['instance']
result = tf.gather(tf.convert_to_tensor(split_array), instance_id)
return tf.cast(result, tf.bool)
ds = ds.filter(_filter_fn) # leave the elements with desired instance id
# Slice if needed.
indices = dataset_builders.combine_indices(outer_start, start, outer_end,
end)
if indices[0] is not None:
ds = ds.skip(indices[0])
if indices[1] is not None:
ds = ds.take(indices[1] - (indices[0] or 0))
if shuffle:
ds = ds.shuffle(SHUFFLE_BUFFER_SIZE)
return ds.map(_to_minibatch_fn)
return datasets.Dataset(
builder_fn=builder_fn_train,
task_key=TASK_KEY,
num_examples=metadata.num_examples)
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/smallnorb.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.datasets.builders.tfds_builder."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets.builders import tfds_builder
import tensorflow_datasets as tfds
class TFDSBuilderTest(parameterized.TestCase):
# We can include any dataset that has a fixture writer.
@parameterized.parameters(tfds_builder.SUPPORTED_DATASETS)
def test_build_dataset(self, dataset_name):
with tfds.testing.mock_data(num_examples=10):
dataset = tfds_builder.get_dataset(dataset_name, split="train")
ds = dataset.builder_fn(shuffle=False, end=10)
self.assertLen(list(ds), 10)
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/tfds_builder_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset builders for TFDS."""
from typing import Optional
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
import tensorflow as tf
import tensorflow_datasets as tfds
DEFAULT_SHUFFLE_BUFFER_SIZE = 5_000
SINGLE_LABEL_DATASETS = [
"caltech101",
"cifar10",
"cifar100",
"caltech_birds2011",
"dtd",
"emnist/balanced",
"fashion_mnist",
"food101",
"imagenet2012",
"mnist",
"oxford_flowers102",
"oxford_iiit_pet",
"patch_camelyon",
"stanford_dogs",
"stl10",
"sun397",
"svhn_cropped",
"smallnorb",
"domainnet/sketch",
]
MULTI_LABEL_DATASETS = [
"voc/2012",
"voc/2007",
"celeb_a",
# "lfw",
# "coco", # Add support for this multi-label dataset.
]
SUPPORTED_DATASETS = SINGLE_LABEL_DATASETS + MULTI_LABEL_DATASETS
_CELEB_A_ATTRIBUTES = [
"5_o_Clock_Shadow",
"Arched_Eyebrows",
"Attractive",
"Bags_Under_Eyes",
"Bald",
"Bangs",
"Big_Lips",
"Big_Nose",
"Black_Hair",
"Blond_Hair",
"Blurry",
"Brown_Hair",
"Bushy_Eyebrows",
"Chubby",
"Double_Chin",
"Eyeglasses",
"Goatee",
"Gray_Hair",
"Heavy_Makeup",
"High_Cheekbones",
"Male",
"Mouth_Slightly_Open",
"Mustache",
"Narrow_Eyes",
"No_Beard",
"Oval_Face",
"Pale_Skin",
"Pointy_Nose",
"Receding_Hairline",
"Rosy_Cheeks",
"Sideburns",
"Smiling",
"Straight_Hair",
"Wavy_Hair",
"Wearing_Earrings",
"Wearing_Hat",
"Wearing_Lipstick",
"Wearing_Necklace",
"Wearing_Necktie",
"Young",
]
def get_single_label_dataset(dataset_name: str, split: str, start: int,
end: int) -> datasets.Dataset:
"""Gets single class tfds dataset."""
dataset_info = tfds.builder(dataset_name).info
label_key = "label"
if dataset_name == "smallnorb":
label_key = "label_category"
num_classes = dataset_info.features[label_key].num_classes
task_name = dataset_name.translate(str.maketrans(" -/", "___"))
task_key = tasks.TaskKey(
task_name, tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=num_classes))
builder_fn, metadata = dataset_builders.tfds_dataset_builder_fn(
tfds_name=dataset_name,
shuffle_buffer_size=DEFAULT_SHUFFLE_BUFFER_SIZE,
split=split,
start=start,
end=end,
to_minibatch_fn=lambda x: _to_minibatch_single_label(x, label_key))
return datasets.Dataset(
builder_fn=builder_fn,
task_key=task_key,
num_examples=metadata.num_examples)
def _to_minibatch_single_label(data, label_key) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=data["image"],
label=data[label_key],
multi_label_one_hot=None,
)
def _to_minibatch_multi_label(data, multi_label_key,
num_classes) -> datasets.MiniBatch:
image = data["image"]
multi_label = data[multi_label_key]
multi_label_one_hot = tf.reduce_sum(
tf.one_hot(multi_label, num_classes), axis=0)
return datasets.MiniBatch(
image=image,
label=None,
multi_label_one_hot=multi_label_one_hot,
)
def _to_minibatch_celeb_a(data) -> datasets.MiniBatch:
image = data["image"]
attributes = []
for attr in _CELEB_A_ATTRIBUTES:
attributes.append(tf.cast(data["attributes"][attr], tf.float32))
attributes = tf.stack(attributes, axis=0)
return datasets.MiniBatch(
image=image,
label=None,
multi_label_one_hot=attributes,
)
def get_multi_label_dataset(dataset_name: str, split: str, start: int,
end: int) -> datasets.Dataset:
"""Gets multi label tfds dataset."""
dataset_info = tfds.builder(dataset_name).info
task_name = dataset_name.translate(str.maketrans(" -/", "___"))
if dataset_name == "celeb_a":
num_classes = len(dataset_info.features["attributes"])
to_minibatch_fn = _to_minibatch_celeb_a
elif dataset_name == "voc/2012" or dataset_name == "voc/2007":
multi_labels_key = "labels"
# pylint: disable=g-long-lambda
num_classes = dataset_info.features[multi_labels_key].num_classes
to_minibatch_fn = lambda x: _to_minibatch_multi_label(
x, multi_labels_key, num_classes)
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
task_key = tasks.TaskKey(
task_name, tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
tasks.MultiLabelClassificationMetadata(num_classes=num_classes))
builder_fn, metadata = dataset_builders.tfds_dataset_builder_fn(
tfds_name=dataset_name,
shuffle_buffer_size=DEFAULT_SHUFFLE_BUFFER_SIZE,
split=split,
start=start,
end=end,
to_minibatch_fn=to_minibatch_fn)
return datasets.Dataset(
builder_fn=builder_fn,
task_key=task_key,
num_examples=metadata.num_examples)
def get_dataset(dataset_name: str,
split: str,
*,
start: Optional[int] = None,
end: Optional[int] = None) -> datasets.Dataset:
"""Gets tensorflow dataset."""
if dataset_name not in SUPPORTED_DATASETS:
raise ValueError(f"Unsupported dataset: {dataset_name}")
if dataset_name in SINGLE_LABEL_DATASETS:
return get_single_label_dataset(dataset_name, split, start, end)
elif dataset_name in MULTI_LABEL_DATASETS:
return get_multi_label_dataset(dataset_name, split, start, end)
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
|
dm_nevis-master
|
dm_nevis/benchmarker/datasets/builders/tfds_builder.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/learners/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for learners."""
import dataclasses
from typing import Any, Callable, Iterator, NamedTuple, Optional, Tuple, Protocol
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
LearnerState = Any
Checkpoint = Any
CheckpointFn = Callable[[Checkpoint], None]
@dataclasses.dataclass
class ResourceUsage:
"""Estimated resources used by a learner.
All attributes are optional and default to None, since they may be either
inappropriate or unavailable depending on the specific implementation details
of the learner.
Floating point values are used to measure flops since these may overflow
integers. Since these are typically estimates, the inherent inprecision
may be ignored.
Attributes:
floating_point_operations: An estimate of the number of floating point
operations used, of any precision. Sometimes referred to as FLOPs. We
avoid this acronym due to the ambiguity with FLOPS (floating point
operations per second).
peak_parameter_count: The peak number of parameters used by the learner.
peak_parameter_size_bytes: The peak number of bytes used to store the
learner's parameters.
"""
floating_point_operations: Optional[float] = None
peak_parameter_count: Optional[int] = None
peak_parameter_size_bytes: Optional[int] = None
def combine(self, other: 'ResourceUsage') -> 'ResourceUsage':
"""Combines with other resource usage dataclasses and return a new one..
Args:
other: Resources to be combined with `self`.
Returns:
Accumulated resource usage. If any values are None in the inputs, then the
return values will be set to None.
"""
def add_or_none(x, y):
if x is None or y is None:
return None
return x + y
def max_or_none(x, y):
if x is None or y is None:
return None
return max(x, y)
return ResourceUsage(
floating_point_operations=add_or_none(self.floating_point_operations,
other.floating_point_operations),
peak_parameter_count=max_or_none(self.peak_parameter_count,
other.peak_parameter_count),
peak_parameter_size_bytes=max_or_none(self.peak_parameter_size_bytes,
other.peak_parameter_size_bytes),
)
class Predictions(NamedTuple):
"""Input batch and resulting learner predictions.
TODO: Implement a specific type for the returned output.
In all cases, the batch and output are assumed to have a single batch
dimension that is identical between the abtch and output attributes.
Attributes:
batch: The verbatim input batch used to compute the predictions.
output: The outputs resulting from running prediction on the batch.
"""
batch: datasets.MiniBatch
output: Any
class InitFn(Protocol):
def __call__(self) -> LearnerState:
"""Initializes a learner's state."""
class TrainFn(Protocol):
def __call__(
self,
event: streams.TrainingEvent,
state: LearnerState,
write_checkpoint: CheckpointFn,
*,
checkpoint_to_resume: Optional[Checkpoint] = None,
) -> Tuple[LearnerState, ResourceUsage]:
"""Trains a learner with the given state, and returns updated state."""
class PredictFn(Protocol):
def __call__(
self,
event: streams.PredictionEvent,
state: LearnerState,
) -> Iterator[Predictions]:
"""Computes predictions."""
class Learner(NamedTuple):
init: InitFn
train: TrainFn
predict: PredictFn
|
dm_nevis-master
|
dm_nevis/benchmarker/learners/learner_interface.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the interface for a checkpointer."""
from typing import Optional, TypeVar, Protocol
CheckpointableState = TypeVar("CheckpointableState")
class Checkpointer(Protocol[CheckpointableState]):
"""The interface that checkpoints must satisfy to work with the benchmarker.
The checkpointer must support reading and writing checkpointable state.
"""
def write(self, state: CheckpointableState) -> None:
"""Writes a checkpoint.
Args:
state: Arbitrary checkpointable state
"""
def restore(self) -> Optional[CheckpointableState]:
"""Restores the most recent checkpointed state.
Returns:
The most recent checkpoint that was successfully written using write,
or None if no checkpoint state is available.
"""
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/checkpointer_interface.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the interface for a data writer."""
from typing import TypeVar, Protocol
MetricsData = TypeVar("MetricsData")
class DataWriter(Protocol[MetricsData]):
"""The interface that checkpoints must satisfy to work with the benchmarker.
The checkpointer must support reading and writing checkpointable state.
"""
def write(self, metrics_data: MetricsData) -> None:
"""Writes metrics to persistent state."""
def flush(self) -> None:
"""Flushes the buffer and ensure data is actually written."""
def close(self) -> None:
"""Closes metrics writer and free whatever was allocated."""
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/datawriter_interface.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the environment which is used to run a benchmark."""
import dataclasses
import datetime
import time
from typing import Callable, NamedTuple, Optional, Tuple
from absl import logging
import chex
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import metrics_aggregators
@chex.dataclass(frozen=True)
class EnvironmentState:
"""Represents the state of the environment.
The environment state stores the synchronized model checkpoint and current
position in the stream.
Attributes:
number_of_completed_events: The number of events completed when this state
was written.
within_training_event_checkpoint: If the learner writes a checkpoint during
a train event, then we write an environment checkpoint with this field set
to that checkpoint.
learner_state: The state of the learner.
metrics_state: The accumulated metrics computed by the environment.
train_resources_used: The accumulated resource usage during training.
"""
number_of_completed_events: int
within_training_event_checkpoint: Optional[learner_interface.Checkpoint]
learner_state: learner_interface.LearnerState
metrics_state: metrics_aggregators.State
train_resources_used: learner_interface.ResourceUsage
CheckpointWriterFn = Callable[[EnvironmentState], None]
class RunResult(NamedTuple):
results: metrics_aggregators.Results
train_resources_used: learner_interface.ResourceUsage
final_learner_state: learner_interface.LearnerState
def no_op_checkpointer(state: EnvironmentState) -> None:
"""A checkpointer function that ignores the state."""
del state
def run(
learner: learner_interface.Learner,
stream: streams.Stream,
metrics: metrics_aggregators.MetricsAggregator,
*,
write_checkpoint: CheckpointWriterFn = no_op_checkpointer,
checkpoint_to_resume: Optional[EnvironmentState] = None,
) -> RunResult:
"""Runs the interaction of a learner with a stream and computes metrics.
Args:
learner: The learner that will be exposed to the datasets in the stream.
stream: A stream containing an iterable sequence of events to feed to the
learner. To support resuming an environment from a checkpoint, the event
sequence returned by the stream, up to the resumed point must be
determnistic and identical for future runs.
metrics: Defines the metrics aggregator that will be used to compute and
publish the metrics resuting from this benchmarker run.
write_checkpoint: A callable that stores intermediate environment state to a
checkpoint.
checkpoint_to_resume: If provided, the environment run is resumed from the
given checkpointed state.
Returns:
The result of the metrics aggregator applied to the accumulated state
computed across all prediction tasks, along with the resource usage
during the run.
"""
if not checkpoint_to_resume:
state = EnvironmentState(
number_of_completed_events=0,
within_training_event_checkpoint=None,
learner_state=learner.init(),
metrics_state=metrics.init(),
train_resources_used=learner_interface.ResourceUsage(
floating_point_operations=0.0,
peak_parameter_count=0,
peak_parameter_size_bytes=0))
else:
logging.info("Restoring run from checkpoint...")
state = checkpoint_to_resume
for index_in_stream, event in enumerate(stream.events()):
if index_in_stream < state.number_of_completed_events:
logging.info("Skipping step %d: %s", index_in_stream, event)
continue
step_start_time = time.monotonic()
logging.info("Step %d: %s", index_in_stream, event)
if isinstance(event, streams.TrainingEvent):
learner_state, resources = _train(state, event, learner, write_checkpoint)
metrics_state = metrics.aggregate_train_event(state.metrics_state, event,
resources)
state = dataclasses.replace(
state,
metrics_state=metrics_state,
learner_state=learner_state,
train_resources_used=state.train_resources_used.combine(resources),
)
elif isinstance(event, streams.PredictionEvent):
predictions = learner.predict(event, state.learner_state)
metrics_state = metrics.aggregate_predict_event(state.metrics_state,
event, predictions)
state = dataclasses.replace(state, metrics_state=metrics_state)
else:
raise ValueError(f"Unknown stream task type {type(event)}")
state = dataclasses.replace(
state,
within_training_event_checkpoint=None,
number_of_completed_events=index_in_stream + 1,
)
write_checkpoint(state)
logging.info(
"Completed step %d: %s in %s",
index_in_stream,
event,
datetime.timedelta(seconds=time.monotonic() - step_start_time),
)
return RunResult(
results=metrics.compute_results(state.metrics_state),
train_resources_used=state.train_resources_used,
final_learner_state=state.learner_state,
)
def _train(
state: EnvironmentState,
event: streams.TrainingEvent,
learner: learner_interface.Learner,
write_checkpoint: CheckpointWriterFn,
) -> Tuple[learner_interface.LearnerState, learner_interface.ResourceUsage]:
"""Runs a train dataset."""
def write_train_event_checkpoint(learner_train_checkpoint):
write_checkpoint(
dataclasses.replace(
state, within_training_event_checkpoint=learner_train_checkpoint))
learner_state, resources_used = learner.train(
event,
state.learner_state,
write_checkpoint=write_train_event_checkpoint,
checkpoint_to_resume=state.within_training_event_checkpoint)
logging.info("Resources used during train event: %s", resources_used)
return learner_state, resources_used
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/environment.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.benchmarker.environment.environment."""
import copy
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import environment
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import metrics_aggregators
class EnvironmentTest(parameterized.TestCase):
def test_run(self):
checkpointer = _InMemoryCheckpointer()
stream = test_stream.TestStream()
metrics = metrics_aggregators.noop_metrics_aggregator()
learner = _build_test_learner()
environment.run(
learner, stream, metrics, write_checkpoint=checkpointer.write)
self.assertNotEmpty(checkpointer.checkpoints)
train_events = [
event for event in stream.events()
if isinstance(event, streams.TrainingEvent)
]
for checkpoint in checkpointer.checkpoints:
result = environment.run(
learner, stream, metrics, checkpoint_to_resume=checkpoint)
expected = {
'seen_train_events': train_events,
'values_of_x': [sum(range(20)), sum(range(20))],
}
self.assertEqual(expected, result.final_learner_state)
def _build_test_learner() -> learner_interface.Learner:
def init():
return {
'seen_train_events': [],
'values_of_x': [],
}
def train(event, state, write_checkpoint, *, checkpoint_to_resume=None):
if checkpoint_to_resume:
step, x, checkpoint_event = checkpoint_to_resume
assert checkpoint_event == event
else:
x = step = 0
for i in range(step, 20):
if i % 3 == 0:
write_checkpoint((i, x, event))
x += i
# Add to the learner state the value of x we computed, along with the
# tain event that we used to compute it. In all cases, the value of x
# will be the sum from 0 to 20, if checkpointing is working correctly!
state = {
'seen_train_events': [*state['seen_train_events'], event],
'values_of_x': [*state['values_of_x'], x],
}
return state, learner_interface.ResourceUsage()
def predict(event, state):
del event, state
return []
return learner_interface.Learner(init, train, predict)
class _InMemoryCheckpointer:
"""A checkpointer that stores every checkpoint written in a list."""
def __init__(self):
self.checkpoints = []
def write(self, ckpt: environment.EnvironmentState) -> None:
self.checkpoints.append(copy.deepcopy(ckpt))
def restore(self) -> Optional[environment.EnvironmentState]:
if not self.checkpoints:
return None
return self.checkpoints[-1]
def learner_checkpoints(self):
return [ckpt.learner_state for ckpt in self.checkpoints]
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/environment_test.py
|
"""Utils for creating logger."""
import datetime
import os
from typing import Optional, Mapping, Any
from dm_nevis.benchmarker.environment import tensorboard_writer
def generate_tensorboard_log_root() -> str:
"""Generates log root for tensorboard."""
log_dir = os.environ.get('TENSORBOARD_LOG_DIR', '/tmp/tensorboard')
folder_name = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return os.path.join(log_dir, folder_name)
def get_metrics_writer(
tensorboard_log_root: str,
logger_name: str,
index_of_training_event: Optional[int] = None,
overrides: Optional[Mapping[str, Any]] = None,
) -> tensorboard_writer.TensorBoardWriter:
"""Gets metrics writer by name."""
if logger_name == 'benchmarker':
return tensorboard_writer.TensorBoardWriter(
logdir=os.path.join(tensorboard_log_root, 'benchmark_metrics'),
prefix='benchmark_metrics',
prefix_fields=['data_split'],
step_field='index_of_most_recent_train_event',
)
elif logger_name in ['learner_train', 'learner_eval']:
metric_prefix = f'train_event_{index_of_training_event}'
logdir = os.path.join(tensorboard_log_root, metric_prefix)
if overrides is not None:
overrides_str = ','.join([f'{k}={v}' for k, v in overrides.items()])
logdir = os.path.join(logdir, overrides_str)
logdir = os.path.join(logdir, logger_name.split('_')[1])
return tensorboard_writer.TensorBoardWriter(
logdir=logdir,
prefix=metric_prefix,
)
elif logger_name == 'finetuning':
return tensorboard_writer.TensorBoardWriter(
logdir=os.path.join(tensorboard_log_root, 'finetuning'),
prefix='finetuning',
step_field='index_of_train_event',
)
else:
raise NotImplementedError(f'Unknown logger_name {logger_name}.')
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/logger_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A datawriter logging on stdout."""
from typing import Any, Mapping
from absl import logging
class LoggingWriter:
"""A datawriter logging on stdout."""
def __init__(self, prefix: str = ""):
self.prefix = f"{prefix}: "
def write(self, metrics_data: Mapping[str, Any]) -> None:
"""Writes metrics data on stdout.
Args:
metrics_data: A mapping of metrics name to metrics value to log.
"""
message = self.prefix + "\n".join(
[f"{k}: {v}" for k, v in metrics_data.items()])
logging.info(message)
def flush(self) -> None:
"""Flushes the buffer and ensure data is actually written."""
logging.flush()
def close(self) -> None:
"""Closes logging writer."""
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/logging_writer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A writer that logs metrics to tensorboard."""
import numbers
from typing import Any, Sequence
from absl import logging
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
try:
import torch # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
torch = None
class TensorBoardWriter:
"""A writer that logs metrics to tensorboard."""
def __init__(
self,
logdir: str,
prefix: str = "",
prefix_fields: Sequence[str] = (),
step_field: str = "step",
):
"""Constructs a writer that logs metrics to tensorboard.
Args:
logdir: Logging directory used to instantiate `tf.summary.SummaryWriter`.
prefix: Harcoded prefix to add to each metric.
prefix_fields: A sequence of metric names that will be used as prefix of
metric. They appear after the hardcoded prefix.
step_field: The name of the metric used as `step` argument to logging with
tensorboard. It will be the x-axis on tensorboard dashboard.
"""
self._prefix = prefix
logging.info("Created TensorBoardWriter with logdir=%s", logdir)
self._file_writer = tf.summary.create_file_writer(logdir)
self._prefix_fields = prefix_fields
self._step_field = step_field
def write(self, metrics_data: dict[str, Any]) -> None:
"""Write metrics data on stdout.
Args:
metrics_data: A mapping of metrics name to metrics value to log.
"""
# Extract logging step from metrics.
if self._step_field not in metrics_data:
raise ValueError("metrics_data doesn't contain the field that is"
f" used as step, metrics_data={metrics_data},"
f" step_field={self._step_field}")
step = metrics_data.pop(self._step_field)
# Construct prefix for metric.
prefix = self._construct_prefix(metrics_data)
for metric_name, value in metrics_data.items():
self._log_one_metric(prefix, metric_name, value, step)
def _construct_prefix(self, metrics_data: dict[str, Any]) -> str:
"""Constructs prefix for each metric name."""
prefixes = [self._prefix] if self._prefix else []
for field in self._prefix_fields:
val = metrics_data.pop(field, None)
if val is not None:
prefixes.append(val)
prefix = "/".join(prefixes)
return prefix
def _log_one_metric(
self,
prefix: str,
metric_name: str,
value: Any,
step: int,
) -> None:
"""Logs one metric value."""
tf_metric_name = f"{prefix}/{metric_name}"
if torch is not None and isinstance(value, torch.Tensor):
if torch.numel(value) == 1:
value = value.item()
else:
logging.warning(
"%sTrying to log %s with shape %s: %s,"
" while only scalar is supported.", "*" * 50, type(value),
value.shape, value)
return
if isinstance(value, jnp.ndarray) or isinstance(value, np.ndarray):
if value.size == 1:
value = value.item()
else:
logging.warning(
"%sTrying to log %s with shape %s: %s,"
" while only scalar is supported.", "*" * 50, type(value),
value.shape, value)
return
with self._file_writer.as_default():
if isinstance(value, numbers.Number):
tf.summary.scalar(tf_metric_name, value, step=step)
elif isinstance(value, str):
tf.summary.text(tf_metric_name, value, step=step)
else:
logging.warning(
"%sCan't handle metric '%s' which has type %s: %s",
"*" * 50 + "\n",
metric_name,
type(value),
value,
)
def flush(self) -> None:
"""Flushes the buffer and ensure data is actually written."""
logging.info("flush tensorboard metrics")
self._file_writer.flush()
logging.flush()
def close(self) -> None:
"""Closes logging writer."""
logging.info("close tensorboard writer")
self._file_writer.close()
|
dm_nevis-master
|
dm_nevis/benchmarker/environment/tensorboard_writer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.streams.example_stream."""
from absl.testing import absltest
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.streams import example_stream
import tensorflow_datasets as tfds
class ExampleStreamTest(absltest.TestCase):
def test_get_stream(self):
with tfds.testing.mock_data(num_examples=1):
stream = example_stream.ExampleStream()
for e in stream.events():
if isinstance(e, streams.PredictionEvent):
dataset = stream.get_dataset_by_key(e.dataset_key)
else:
dataset = stream.get_dataset_by_key(e.train_and_dev_dataset_key)
ds = dataset.builder_fn(shuffle=False)
self.assertLen(ds, 1)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/streams/example_stream_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.streams.nevis_stream."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.streams import nevis_stream
class NevisStreamTest(parameterized.TestCase):
@parameterized.named_parameters(
("spaces", "some data key", "some_data_key"),
("everything", " /-~", "____"),
)
def test_canonicalize_name(self, name, expected):
self.assertEqual(nevis_stream._canonicalize_name(name), expected)
@parameterized.named_parameters(
("full_stream", nevis_stream.NevisStreamVariant.FULL),
("short_tream", nevis_stream.NevisStreamVariant.SHORT),
)
def test_datasets_in_stream(self, stream_variant):
n1 = nevis_stream.datasets_in_stream(stream_variant, remove_duplicates=True)
n2 = nevis_stream.datasets_in_stream(
stream_variant, remove_duplicates=False)
self.assertSameElements(n1, n2)
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
dm_nevis/streams/nevis_stream_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.streams.nevis."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.streams import nevis
import numpy as np
import tensorflow as tf
class NevisTest(parameterized.TestCase):
@parameterized.parameters([
{
'labels': [
[0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
],
'expected': [
[0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
]
},
{
'labels': [
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
],
'expected': [
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1.],
]
},
])
def test_patch_biwi_minibatch(self, labels, expected):
for label, expected in zip(labels, expected):
example = datasets.MiniBatch(
multi_label_one_hot=tf.constant(label), image=None, label=None)
result = nevis._patch_biwi_minibatch(example)
np.testing.assert_allclose(result.multi_label_one_hot.numpy(),
np.array(expected))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/streams/nevis_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/streams/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Defines the NEVIS main stream.
To check that this stream is working as intended, a binary is provided at
iterate_nevis_stream.py in this directory. Running this binary will iterate
the stream and print the first example in every (successfully fetched) dataset.
"""
import collections
from concurrent import futures
import enum
from typing import Iterator, List, Mapping, NamedTuple, Optional, Sequence, Tuple, Union
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets.builders import coil100
from dm_nevis.benchmarker.datasets.builders import domainnet
from dm_nevis.benchmarker.datasets.builders import smallnorb
from dm_nevis.benchmarker.datasets.builders import tfds_builder
from dm_nevis.datasets_storage import dataset_loader
from dm_nevis.streams import nevis as nevis_dataset_loader
import numpy as np
import tensorflow_datasets as tfds
from dm_nevis.datasets_storage import paths
NEVIS_DATA_DIR = paths.NEVIS_DATA_DIR
# Years up to (but not including) 2020 and beyond are used for development and
# hyperparameter selecton. Only at the very end we run on the ramining years.
DEFAULT_NEVIS_STOP_YEAR = 2020
DEFAULT_NUM_THREADPOOL_WORKERS = 10
class NevisStreamVariant(enum.Enum):
"""Known task streams."""
FULL = 'FULL'
SHORT = 'SHORT'
TINY = 'TINY'
DEBUG = 'DEBUG'
IMAGENET_ONLY = 'IMAGENET_ONLY'
MAJOR_DOMAIN_ONLY = 'MAJOR_DOMAIN_ONLY'
LARGE_DATASET_ONLY = 'LARGE_DATASET_ONLY'
class AblationStreamVariant(enum.Enum):
"""Known cripple streams."""
REMOVE_FIRST_30_TASKS = 'REMOVE_FIRST_30_TASKS'
REMOVE_LAST_30_TASKS = 'REMOVE_LAST_30_TASKS'
NO_IMAGENET = 'NO_IMAGENET'
RANDOM_DATASET = 'RANDOM_DATASET'
class Split(enum.Enum):
"""Data split names."""
TRAIN = 'train'
DEV = 'dev'
DEV_TEST = 'dev_test'
TEST = 'test'
NEVIS_STREAM_PER_YEAR: Mapping[NevisStreamVariant, Mapping[int, Sequence[str]]] = {
NevisStreamVariant.DEBUG: {
1999: ('Pascal 2007',),
2000: ('COIL 20',),
},
NevisStreamVariant.FULL: {
1989: (),
1992:
('Magellan Venus Volcanoes', 'Aberdeen face database.'),
1998: ('LandSat UCI repo', 'Brodatz', 'Olivetti Face Dataset'
),
2000: ('COIL 20',),
2001: ('COIL 100', 'MPEG-7'
),
2002: (),
2003: (),
2004:
('Butterfly dataset', 'MNIST'),
2005: ('Caltech 101', 'UMIST', 'CMU AMP expression'),
2006: ('Pascal 2005', 'Caltech cars, motorbikes', 'UIUC cars', 'ALOI'),
2007: ('8 Scene Dataset',),
2008: ('15 Scenes',),
2009: ('Pascal 2006', 'Extended YaleB'
),
2010: ('Pascal 2007', 'Graz-02', 'Olivetti Face Dataset', 'PPMI'),
2011:
('Caltech 256', 'ImageNet', 'LFW', 'Oxford Flowers',
'Flicker Material Dataset', 'Oxford Flowers 102',
'Belgium Traffic Sign Dataset',
'German Traffic Sign Recognition Benchmark', 'Brodatz', 'VisTex'),
2012: ('UMD', 'KTH-TIPS', 'UIUC texture', 'KTH-TIPS2-b', 'CVC-MUSCIMA'),
2013: ('IAPRTC-12', 'sketch dataset', 'KTH-TIPS2-a', 'Pascal 2012',
'NORB'),
2014: ('Wikipaintings',),
2015:
('MNIST', 'MIT Scenes', 'SUN 397', 'CIFAR 10'),
2016: ('CUB 200', 'Stanford Cars', 'FGVC Aircraft', 'DTD', 'MS COCO',
'Caltech 101', 'Oxford IIIT Pets', 'Stanford Dogs', 'ANIMAL',
'Caltech 101 Silhouettes', 'Interact', 'VOC Actions', 'SVHN',
'Chars74K'),
2017: ('CelebA', 'LFWA', 'SUN Attribute', 'CIFAR 100'
),
2018: ('TID2008', 'TID2013', 'USPS', 'Semeion', 'MNIST-m',
'Office Caltech', 'PACS', 'Caltech Camera Traps',
'EMNIST Balanced', 'CASIA-HWDB1.1',
'ISBI-ISIC 2017 melanoma classification challenge'),
2019:
('Trancos', 'Mall dataset', 'Fashion MNIST', 'NotMNIST',
'Tiny Imagenet', 'STL10', 'Food 101', 'MNIST-rot', 'AWA2',
'15 Scenes', 'Food 101 N', 'COIL 20', 'COIL 100'),
2020: ('ShanghaiTech', 'AnimalWeb', 'BIWI'
),
2021: ('ImageNet', 'Oxford Flowers 102', 'DomainNet-Real',
'Pneumonia Chest X-ray', 'Path MNIST', 'NIH Chest X-ray',
'Tubercolosis',
'covid-19 x-ray', 'PatchCamelyon', 'DDSM',
'Synthetic COVID-19 Chest X-ray Dataset'),
},
NevisStreamVariant.SHORT: {
2004: ('COIL 100', 'MNIST'
),
2006: ('Pascal 2005', 'Caltech cars, motorbikes', 'UIUC cars'),
2009: ('Pascal 2006',),
2010: ('Caltech 101',),
2011: (
'Graz-02',
'15 Scenes',
'Pascal 2007',
'LFW',
),
2013: ('sketch dataset', 'Brodatz'),
2014: ('ImageNet', 'Pascal 2012', 'Caltech 256'
),
2018: (
'CIFAR 100',
'CIFAR 10',
'USPS',
'MNIST',
'MNIST-m',
'Office Caltech',
'PACS',
'ISBI-ISIC 2017 melanoma classification challenge',
),
2019: ('Fashion MNIST',),
2020: ('Stanford Dogs', 'CUB 200', 'Stanford Cars', 'FGVC Aircraft'),
},
NevisStreamVariant.TINY: {
2004: ('MNIST',),
2010: ('Caltech 101',),
2014: ('Pascal 2012',),
2018: (
'CIFAR 100',
'MNIST',
'ISBI-ISIC 2017 melanoma classification challenge',
),
2020: ('CUB 200',),
},
NevisStreamVariant.IMAGENET_ONLY: {
2014: ('ImageNet',
),
},
NevisStreamVariant.MAJOR_DOMAIN_ONLY: {
# Exclude satellite, face, texture, shape, ocr, quality, medical
1989: (),
1992: (),
1998: (),
2000: ('COIL 20',),
2001: ('COIL 100',),
2002: (),
2003: (),
2004: ('Butterfly dataset',),
2005: ('Caltech 101',),
2006: ('Pascal 2005', 'Caltech cars, motorbikes', 'UIUC cars', 'ALOI'),
2007: ('8 Scene Dataset',),
2008: ('15 Scenes',),
2009: ('Pascal 2006',),
2010: ('Pascal 2007', 'Graz-02', 'PPMI'),
2011: ('Caltech 256', 'ImageNet', 'Oxford Flowers',
'Oxford Flowers 102', 'Belgium Traffic Sign Dataset',
'German Traffic Sign Recognition Benchmark'),
2012: (),
2013: ('IAPRTC-12', 'sketch dataset', 'Pascal 2012', 'NORB'),
2014: ('Wikipaintings',),
2015: ('MIT Scenes', 'SUN 397', 'CIFAR 10'),
2016: ('CUB 200', 'Stanford Cars', 'FGVC Aircraft', 'MS COCO',
'Caltech 101', 'Oxford IIIT Pets', 'Stanford Dogs', 'Interact',
'VOC Actions', 'SVHN'),
2017: ('SUN Attribute', 'CIFAR 100'),
2018: ('Office Caltech', 'PACS', 'Caltech Camera Traps'),
2019: ('Trancos', 'Mall dataset', 'Fashion MNIST', 'NotMNIST',
'Tiny Imagenet', 'STL10', 'Food 101', 'MNIST-rot', 'AWA2',
'15 Scenes', 'Food 101 N', 'COIL 20', 'COIL 100'),
2020: ('ShanghaiTech', 'AnimalWeb', 'BIWI'),
2021: ('ImageNet', 'Oxford Flowers 102', 'DomainNet-Real',
'Pneumonia Chest X-ray', 'Path MNIST', 'NIH Chest X-ray',
'Tubercolosis', 'covid-19 x-ray', 'PatchCamelyon', 'DDSM',
'Synthetic COVID-19 Chest X-ray Dataset'),
},
NevisStreamVariant.LARGE_DATASET_ONLY: {
# Exclude datasets with less than 10_000 samples,
# all splits combined.
1989: (),
1992: (),
1998: (),
2000: (),
2001: (),
2002: (),
2003: (),
2004: ('MNIST',),
2005: ('Caltech 101',),
2006: ('ALOI',),
2007: (),
2008: (),
2009: ('Extended YaleB',),
2010: ('Pascal 2007',),
2011: ('Caltech 256', 'ImageNet', 'LFW', 'Belgium Traffic Sign Dataset',
'German Traffic Sign Recognition Benchmark'),
2012: (),
2013: ('IAPRTC-12', 'sketch dataset', 'Pascal 2012', 'NORB'),
2014: ('Wikipaintings',),
2015: ('MNIST', 'MIT Scenes', 'SUN 397', 'CIFAR 10'),
2016: ('CUB 200', 'Stanford Cars', 'FGVC Aircraft', 'MS COCO',
'Caltech 101', 'Oxford IIIT Pets', 'Stanford Dogs',
'Caltech 101 Silhouettes', 'VOC Actions', 'SVHN', 'Chars74K'),
2017: ('CelebA', 'LFWA', 'SUN Attribute', 'CIFAR 100'),
2018: ('USPS', 'MNIST-m', 'PACS', 'Caltech Camera Traps',
'EMNIST Balanced', 'CASIA-HWDB1.1'),
2019: ('Trancos', 'Mall dataset', 'Fashion MNIST', 'NotMNIST',
'Tiny Imagenet', 'STL10', 'Food 101', 'MNIST-rot', 'AWA2',
'15 Scenes', 'Food 101 N', 'COIL 20', 'COIL 100'),
2020: ('ShanghaiTech', 'AnimalWeb', 'BIWI'),
2021: ('ImageNet', 'Oxford Flowers 102', 'DomainNet-Real',
'Pneumonia Chest X-ray', 'Path MNIST', 'NIH Chest X-ray',
'Tubercolosis', 'covid-19 x-ray', 'PatchCamelyon', 'DDSM',
'Synthetic COVID-19 Chest X-ray Dataset')
},
}
# List of datasets for parallel runs. These should be automatically derived from
# the NevisStream; unfortunately this requires reverse-mapping names and opening
# the stream. To keep things simple we use a static list for now; should be
# changed when we refactor for the OSS release.
# All datasets in the SHORT stream, including held-out years
PARALLEL_DATASETS_SHORT = [
'COIL 100', 'MNIST', 'Pascal 2005', 'Caltech cars, motorbikes', 'UIUC cars',
'Pascal 2006', 'Caltech 101', 'Graz-02', '15 Scenes', 'Pascal 2007', 'LFW',
'sketch dataset', 'Brodatz', 'ImageNet', 'Pascal 2012', 'Caltech 256',
'CIFAR 100', 'CIFAR 10', 'USPS', 'MNIST', 'MNIST-m', 'Office Caltech',
'PACS', 'ISBI-ISIC 2017 melanoma classification challenge', 'Fashion MNIST',
'Stanford Dogs', 'CUB 200', 'Stanford Cars', 'FGVC Aircraft'
]
# All datasets in the FULL stream, including held-out years
PARALLEL_DATASETS = [
'Magellan Venus Volcanoes', 'Aberdeen face database.', 'LandSat UCI repo',
'Brodatz', 'Olivetti Face Dataset', 'COIL 20', 'COIL 100', 'MPEG-7',
'Butterfly dataset', 'MNIST', 'Caltech 101', 'UMIST', 'CMU AMP expression',
'Pascal 2005', 'Caltech cars, motorbikes', 'UIUC cars', 'ALOI',
'8 Scene Dataset', '15 Scenes', 'Pascal 2006', 'Extended YaleB',
'Pascal 2007', 'Graz-02', 'Olivetti Face Dataset', 'PPMI', 'Caltech 256',
'ImageNet', 'LFW', 'Oxford Flowers', 'Flicker Material Dataset',
'Oxford Flowers 102', 'Belgium Traffic Sign Dataset',
'German Traffic Sign Recognition Benchmark', 'Brodatz', 'VisTex', 'UMD',
'KTH-TIPS', 'UIUC texture', 'KTH-TIPS2-b', 'CVC-MUSCIMA', 'IAPRTC-12',
'sketch dataset', 'KTH-TIPS2-a', 'Pascal 2012', 'NORB', 'Wikipaintings',
'MNIST', 'MIT Scenes', 'SUN 397', 'CIFAR 10', 'CUB 200', 'Stanford Cars',
'FGVC Aircraft', 'DTD', 'MS COCO', 'Caltech 101', 'Oxford IIIT Pets',
'Stanford Dogs', 'ANIMAL', 'Caltech 101 Silhouettes', 'Interact',
'VOC Actions', 'SVHN', 'Chars74K', 'CelebA', 'LFWA', 'SUN Attribute',
'CIFAR 100', 'TID2008', 'TID2013', 'USPS', 'Semeion', 'MNIST-m',
'Office Caltech', 'PACS', 'Caltech Camera Traps', 'EMNIST Balanced',
'CASIA-HWDB1.1', 'ISBI-ISIC 2017 melanoma classification challenge',
'Trancos', 'Mall dataset', 'Fashion MNIST', 'NotMNIST', 'Tiny Imagenet',
'STL10', 'Food 101', 'MNIST-rot', 'AWA2', '15 Scenes', 'Food 101 N',
'COIL 20', 'COIL 100', 'ShanghaiTech', 'AnimalWeb', 'BIWI', 'ImageNet',
'Oxford Flowers 102', 'DomainNet-Real', 'Pneumonia Chest X-ray',
'Path MNIST', 'NIH Chest X-ray', 'Tubercolosis', 'covid-19 x-ray',
'PatchCamelyon', 'DDSM', 'Synthetic COVID-19 Chest X-ray Dataset'
]
DEFAULT_THREADPOOL_WORKERS = 30
class NevisSource(NamedTuple):
"""Represents a dataset implemented in nevis."""
name: str
class TFDSSource(NamedTuple):
"""Represents a dataset implemented in tfds."""
name: str
class KeyAndDataset(NamedTuple):
key: streams.DatasetKey
dataset: datasets.Dataset
class DatasetSplits(NamedTuple):
train: KeyAndDataset
dev: KeyAndDataset
train_and_dev: KeyAndDataset
dev_test: KeyAndDataset
test: KeyAndDataset
# pylint: disable=line-too-long
# pyformat: disable
DATASET_NAME_TO_SOURCE = {
'15 Scenes': NevisSource('scenes15'),
'8 Scene Dataset': NevisSource('scenes8'),
'Aberdeen face database.': NevisSource('aberdeen'),
'ALOI': NevisSource('aloi'),
'ANIMAL': NevisSource('animal'),
'AnimalWeb': NevisSource('animal_web'),
'AWA2': NevisSource('awa2'),
'Belgium Traffic Sign Dataset': NevisSource('belgium_tsc'),
'BIWI': NevisSource('biwi'),
'Brodatz': NevisSource('brodatz'),
'Butterfly dataset': NevisSource('butterflies'),
'Caltech 101 Silhouettes': NevisSource('silhouettes_28'),
'Caltech 101': TFDSSource('caltech101'),
'Caltech 256': NevisSource('caltech256'),
'Caltech Camera Traps': NevisSource('caltech_camera_traps'),
'Caltech cars, motorbikes': NevisSource('caltech_categories'),
'CASIA-HWDB1.1': NevisSource('casia_hwdb'),
'CelebA': TFDSSource('celeb_a'),
'Chars74K': NevisSource('chars74k'),
'CIFAR 10': TFDSSource('cifar10'),
'CIFAR 100': TFDSSource('cifar100'),
'CMU AMP expression': NevisSource('cmu_amp_expression'),
'COIL 100': TFDSSource('coil100'),
'COIL 20': NevisSource('coil20'),
'covid-19 x-ray': NevisSource('covid_19_xray'),
'CUB 200': TFDSSource('caltech_birds2011'),
'CVC-MUSCIMA': NevisSource('cvc_muscima'),
'DDSM': NevisSource('ddsm'),
'DomainNet-Real': TFDSSource('domainnet'),
'DTD': TFDSSource('dtd'),
'EMNIST Balanced': TFDSSource('emnist/balanced'),
'Extended YaleB': NevisSource('extended_yaleb'),
'Fashion MNIST': TFDSSource('fashion_mnist'),
'FGVC Aircraft': NevisSource('fgvc_aircraft_family'),
'Flicker Material Dataset': NevisSource('flickr_material_database'),
'Food 101 N': NevisSource('food101n'),
'Food 101': NevisSource('food101'),
'German Traffic Sign Recognition Benchmark': NevisSource('german_tsr'),
'Graz-02': NevisSource('ig02'),
'IAPRTC-12': NevisSource('iaprtc12'),
'ImageNet': TFDSSource('imagenet2012'),
'Interact': NevisSource('interact'),
'ISBI-ISIC 2017 melanoma classification challenge': NevisSource('melanoma'),
'KTH-TIPS': NevisSource('kth_tips'),
'KTH-TIPS2-a': NevisSource('kth_tips_2a'),
'KTH-TIPS2-b': NevisSource('kth_tips_2b'),
'LandSat UCI repo': NevisSource('landsat'),
'LFW': NevisSource('lfw'),
'LFWA': NevisSource('lfwa'),
'Magellan Venus Volcanoes': NevisSource('magellan_venus_volcanoes'),
'Mall dataset': NevisSource('mall'),
'MIT Scenes': NevisSource('mit_scenes'),
'MNIST-m': NevisSource('mnist_m'),
'MNIST-rot': NevisSource('mnist_rotation'),
'MNIST': TFDSSource('mnist'),
'MPEG-7': NevisSource('mpeg7'),
'MS COCO': NevisSource('coco_single_label'),
'NIH Chest X-ray': NevisSource('nih_chest_xray'),
'NORB': TFDSSource('smallnorb'),
'NotMNIST': NevisSource('not_mnist'),
'Office 31 amazon': NevisSource('office31_amazon'),
'Office 31 dslr': NevisSource('office31_dslr'),
'Office 31 webcam': NevisSource('office31_webcam'),
'Office Caltech': NevisSource('office_caltech_10'),
'Olivetti Face Dataset': NevisSource('olivetti_face'),
'Oxford Flowers 102': TFDSSource('oxford_flowers102'),
'Oxford Flowers': NevisSource('oxford_flowers_17'),
'Oxford IIIT Pets': TFDSSource('oxford_iiit_pet'),
'PACS': NevisSource('pacs'),
'Pascal 2005': NevisSource('pascal_voc2005'),
'Pascal 2006': NevisSource('pascal_voc2006'),
'Pascal 2007': TFDSSource('voc/2007'),
'Pascal 2012': TFDSSource('voc/2012'),
'PatchCamelyon': TFDSSource('patch_camelyon'),
'Path MNIST': NevisSource('path_mnist'),
'Pneumonia Chest X-ray': NevisSource('pneumonia_chest_xray'),
'PPMI': NevisSource('ppmi'),
'Semeion': NevisSource('semeion'),
'ShanghaiTech': NevisSource('shanghai_tech'),
'sketch dataset': NevisSource('sketch'),
'Stanford Cars': NevisSource('stanford_cars'),
'Stanford Dogs': TFDSSource('stanford_dogs'),
'STL10': TFDSSource('stl10'),
'SUN 397': TFDSSource('sun397'),
'SUN Attribute': NevisSource('sun_attributes'),
'SVHN': TFDSSource('svhn_cropped'),
'Synthetic COVID-19 Chest X-ray Dataset': NevisSource('synthetic_covid19_xray'),
'TID2008': NevisSource('tid2008'),
'TID2013': NevisSource('tid2013'),
'Tiny Imagenet': NevisSource('tiny_imagenet'),
'Trancos': NevisSource('trancos'),
'Tubercolosis': NevisSource('tubercolosis'),
'UIUC cars': NevisSource('uiuc_cars'),
'UIUC texture': NevisSource('uiuc_texture'),
'UMD': NevisSource('umd'),
'UMIST': NevisSource('umist'),
'USPS': NevisSource('usps'),
'VisTex': NevisSource('vistex'),
'VOC Actions': NevisSource('voc_actions'),
'Wikipaintings': NevisSource('wiki_paintings_style'),
}
# pyformat: enable
# pylint: enable=line-too-long
class NevisStream:
"""The NEVIS benchmark stream.
The stream adds a train event for each instance of the train data in the
stream.
Additionally, a predict event is added containing the test dataset after
every instance of a train dataset.
Once the stream is complete, a further predict event is added for every
seen train event. This makes it possible to compare the performance on tasks
from train time to the end of the stream.
"""
def __init__(
self,
stream_variant: NevisStreamVariant = NevisStreamVariant.FULL,
stop_year: int = DEFAULT_NEVIS_STOP_YEAR,
*,
predict_event_splits: Sequence[Split] = (Split.DEV_TEST,),
shuffle_seed: int = 1,
shuffle_within_year: bool = False,
shuffle_datasets_order: bool = False,
):
"""Instantiates a NEVIS task stream.
Args:
stream_variant: Which of the streams to use (see `NevisStreamVariant`).
stop_year: The stream will only include tasks before the given year.
predict_event_splits: Sequence of splits to use for prediction.
shuffle_seed: An integer denoting a seed for shuffling logic when
`shuffle_within_year` or `shuffle_datasets_order` are ative.
shuffle_within_year: Whether to shuffle the order of datasets within a
year.
shuffle_datasets_order: Whether to shuffle the order of datasets randomly
across years.
"""
logging.info('Reading NEVIS stream from NEVIS_STREAM_PER_YEAR.')
self._events, self._datasets_by_key = _get_events_and_lookup(
NEVIS_STREAM_PER_YEAR[stream_variant],
stop_year,
predict_event_splits=predict_event_splits,
shuffle_seed=shuffle_seed,
shuffle_within_year=shuffle_within_year,
shuffle_datasets_order=shuffle_datasets_order)
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
class IndividualDatasetStream:
"""A train and predict event for an individual, or a pair of datasets."""
def __init__(
self,
dataset_name: str,
second_dataset_name: Optional[str] = None,
predict_event_splits: Sequence[Split] = (Split.DEV_TEST,),
):
"""A stream with a train and predict event for an individual dataset.
Args:
dataset_name: One of the dataset names from `DATASET_NAME_TO_SOURCE`.
second_dataset_name: Optional second dataset in the stream. If it is
either None or equal to the first dataset, it will not be added.
predict_event_splits: Sequence of splits to use for prediction.
"""
dataset_split = _get_splits_for_dataset_name(dataset_name)
if dataset_split is None:
logging.warning('Skipping `%s`', dataset_name)
self._events = []
self._datasets_by_key = {}
else:
self._events = [
streams.TrainingEvent(
train_dataset_key=dataset_split.train.key,
dev_dataset_key=dataset_split.dev.key,
train_and_dev_dataset_key=dataset_split.train_and_dev.key),
]
for split in predict_event_splits:
self._events.append(
streams.PredictionEvent(split_to_key(split, dataset_split)))
self._datasets_by_key = {
dataset_split.train.key: dataset_split.train.dataset,
dataset_split.dev.key: dataset_split.dev.dataset,
dataset_split.train_and_dev.key: dataset_split.train_and_dev.dataset,
dataset_split.test.key: dataset_split.test.dataset,
dataset_split.dev_test.key: dataset_split.dev_test.dataset,
}
if second_dataset_name is None:
return
dataset_split = _get_splits_for_dataset_name(second_dataset_name)
if dataset_split is None:
raise ValueError('Could not find second dataset `%s`' %
second_dataset_name)
else:
self._events.append(
streams.TrainingEvent(
train_dataset_key=dataset_split.train.key,
dev_dataset_key=dataset_split.dev.key,
train_and_dev_dataset_key=dataset_split.train_and_dev.key))
for split in predict_event_splits:
self._events.append(
streams.PredictionEvent(split_to_key(split, dataset_split)))
self._datasets_by_key.update({
dataset_split.train.key: dataset_split.train.dataset,
dataset_split.dev.key: dataset_split.dev.dataset,
dataset_split.train_and_dev.key: dataset_split.train_and_dev.dataset,
dataset_split.test.key: dataset_split.test.dataset,
dataset_split.dev_test.key: dataset_split.dev_test.dataset,
})
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
class AblationStream:
"""The NEVIS benchmark ablation stream."""
def __init__(self,
stream_variant: AblationStreamVariant,
meta_train_stop_year: int = DEFAULT_NEVIS_STOP_YEAR,
stop_year: int = DEFAULT_NEVIS_STOP_YEAR + 2,
*,
predict_event_splits: Sequence[Split] = (Split.DEV_TEST,),
**kwargs):
"""Instantiates a NEVIS ablation stream."""
logging.info('Reading NEVIS ablation stream.')
assert stop_year > meta_train_stop_year, ('Full stream stop year needs to '
'be larger than meta_train stop '
'year')
self._meta_train_stop_year = meta_train_stop_year
self._stop_year = stop_year
datasets_by_year = NEVIS_STREAM_PER_YEAR[NevisStreamVariant.FULL]
if stream_variant is AblationStreamVariant.REMOVE_FIRST_30_TASKS:
filtered_datasets_by_year = self._remove_k_datasets_from_stream(
datasets_by_year=datasets_by_year, k=30)
elif stream_variant is AblationStreamVariant.REMOVE_LAST_30_TASKS:
filtered_datasets_by_year = self._remove_k_datasets_from_stream(
datasets_by_year=datasets_by_year, k=30, reverse=True)
elif stream_variant is AblationStreamVariant.NO_IMAGENET:
filtered_datasets_by_year = remove_imagenet_from_stream(
datasets_by_year=datasets_by_year,
stop_year=self._meta_train_stop_year)
elif stream_variant is AblationStreamVariant.RANDOM_DATASET:
assert 'num_random_datasets' in kwargs, ('num_random_dataset needed for '
'defining random dataset stream')
num_random_datasets = kwargs['num_random_datasets']
random_seed = kwargs.get('random_seed', 0)
filtered_datasets_by_year = self._get_random_dataset_from_stream(
datasets_by_year=datasets_by_year,
num_random_datasets=num_random_datasets,
random_seed=random_seed)
else:
raise ValueError('Ablation stream variant not defined')
self._events, self._datasets_by_key = _get_events_and_lookup(
filtered_datasets_by_year,
stop_year,
predict_event_splits=predict_event_splits)
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
def _get_random_dataset_from_stream(
self, datasets_by_year: Mapping[int,
Sequence[str]], num_random_datasets: int,
random_seed: int) -> Mapping[int, Sequence[str]]:
"""Randomly picks datasets from a stream."""
rng = np.random.default_rng(random_seed)
train_stream_tasks = []
test_stream_tasks = []
for year, dataset_names_by_year in datasets_by_year.items():
if year >= self._meta_train_stop_year:
test_stream_tasks += [
(year, dataset_name) for dataset_name in dataset_names_by_year
]
else:
train_stream_tasks += [
(year, dataset_name) for dataset_name in dataset_names_by_year
]
assert num_random_datasets > len(
test_stream_tasks), 'Need at least one dataset for train stream.'
# Only shuffle tasks in the traininig stream
rng.shuffle(train_stream_tasks)
random_stream_tasks = test_stream_tasks + train_stream_tasks
random_stream_tasks = random_stream_tasks[:num_random_datasets]
result_datasets_by_year = collections.defaultdict(list)
for year, dataset_names_by_year in datasets_by_year.items():
# Retain datasets in the random stream and follow the within-year order.
for dataset_name in dataset_names_by_year:
if (year, dataset_name) in random_stream_tasks:
result_datasets_by_year[year].append(dataset_name)
filtered_datasets_by_year = {}
for year, dataset_names_by_year in result_datasets_by_year.items():
filtered_datasets_by_year[year] = tuple(
dataset for dataset in dataset_names_by_year)
return filtered_datasets_by_year
def _remove_k_datasets_from_stream(
self,
datasets_by_year: Mapping[int, Sequence[str]],
k: int,
reverse=False) -> Mapping[int, Sequence[str]]:
"""Removes k tasks from stream.
Args:
datasets_by_year: A stream of datasets by year.
k: number of tasks to remove from the stream.
reverse: If reverse=False, remove the first k datasets from stream. remove
the last k datasets if reverse is set to True.
Returns:
A stream of datasets with k datasets removed.
"""
filtered_datasets_by_year = {}
dataset_index = 0
for year, dataset_names_by_year in sorted(
datasets_by_year.items(), reverse=reverse):
if year >= self._meta_train_stop_year or dataset_index >= k:
filtered_datasets_by_year[year] = dataset_names_by_year
else:
num_skipped_dataset = min(len(dataset_names_by_year), k - dataset_index)
task_list = dataset_names_by_year[num_skipped_dataset:]
dataset_index += len(dataset_names_by_year)
if task_list:
filtered_datasets_by_year[year] = task_list
return filtered_datasets_by_year
def remove_imagenet_from_stream(
datasets_by_year: Mapping[int, Sequence[str]],
stop_year: int = DEFAULT_NEVIS_STOP_YEAR) -> Mapping[int, Sequence[str]]:
"""Removes ImageNet from stream."""
filtered_datasets_by_year = {}
for year, dataset_names_by_year in datasets_by_year.items():
if year >= stop_year:
filtered_datasets_by_year[year] = dataset_names_by_year
else:
filtered_datasets_by_year[year] = tuple(
dataset_name for dataset_name in dataset_names_by_year
if dataset_name != 'ImageNet')
return filtered_datasets_by_year
def datasets_in_stream(
stream_variant: NevisStreamVariant = NevisStreamVariant.FULL,
stop_year: int = DEFAULT_NEVIS_STOP_YEAR,
remove_duplicates: bool = True,
check_availability: bool = False,
) -> Sequence[str]:
"""Returns the list of datasets in the stream.
Args:
stream_variant: Which of the streams to use (see `NevisStreamVariant`).
stop_year: Only include datasets before the given year.
remove_duplicates: Remove duplicate datasets or not.
check_availability: Only include datasets that are available.
Returns:
A list or dataset names.
"""
dataset_names = []
for year, datasets_in_year in NEVIS_STREAM_PER_YEAR[stream_variant].items():
if year >= stop_year:
break
dataset_names.extend(datasets_in_year)
if remove_duplicates:
# Remove duplicates while preserving order
dataset_names = list(dict.fromkeys(dataset_names))
if check_availability:
# Filter out datasets for which we can't load split information.
with futures.ThreadPoolExecutor(max_workers=10) as executor:
# pylint: disable=g-long-lambda
dataset_names = executor.map(
lambda name: name
if _get_splits_for_dataset_name(name) else None, dataset_names)
dataset_names = list(filter(None, dataset_names))
return dataset_names
def _filter_to_datasets_that_are_available(
dataset_names: List[str]) -> List[str]:
"""Returns datasets with unavailable datasets filtered."""
def dataset_name_or_none(dataset_name):
if _get_splits_for_dataset_name(dataset_name,) is not None:
return dataset_name
return None
# This operation is slow, so we use a thread pool to parallelize the IO.
with futures.ThreadPoolExecutor(
max_workers=DEFAULT_NUM_THREADPOOL_WORKERS) as pool:
dataset_names = pool.map(dataset_name_or_none, dataset_names)
return [name for name in dataset_names if name is not None]
def split_to_key(split: Split,
dataset_split: DatasetSplits) -> streams.DatasetKey:
if split is Split.DEV:
return dataset_split.dev.key
elif split is Split.DEV_TEST:
return dataset_split.dev_test.key
elif split is Split.TEST:
return dataset_split.test.key
else:
raise ValueError(f'Unsupported split: {split}')
def _get_events_and_lookup(
datasets_by_year: Mapping[int, Sequence[str]],
stop_year: int,
*,
predict_event_splits: Sequence[Split] = (Split.DEV_TEST,),
shuffle_seed: int = 1,
shuffle_within_year: bool = False,
shuffle_datasets_order: bool = False,
) -> Tuple[Sequence[streams.Event], Mapping[streams.DatasetKey,
datasets.Dataset]]:
"""Constructs a sequence of events and a dataset lookup."""
events = []
lookup = {}
datasets_by_key = {}
dataset_names = set()
for dataset_names_by_year in datasets_by_year.values():
dataset_names.update(dataset_names_by_year)
lookup = _build_lookup(sorted(dataset_names))
rng = np.random.default_rng(shuffle_seed)
iterable_datasets_by_year = sorted(datasets_by_year.items())
if shuffle_datasets_order:
rng.shuffle(iterable_datasets_by_year)
for year, dataset_names in iterable_datasets_by_year:
if year >= stop_year:
break
if shuffle_within_year:
dataset_names = list(dataset_names)
rng.shuffle(dataset_names)
for dataset_name in dataset_names:
result = lookup[dataset_name]
if result is None:
logging.warning('Skipping for %d: `%s`', year, dataset_name)
continue
train_event = streams.TrainingEvent(
train_dataset_key=result.train.key,
dev_dataset_key=result.dev.key,
train_and_dev_dataset_key=result.train_and_dev.key)
events.append(train_event)
for split in predict_event_splits:
events.append(streams.PredictionEvent(split_to_key(split, result)))
datasets_by_key[result.train.key] = result.train.dataset
datasets_by_key[result.test.key] = result.test.dataset
datasets_by_key[result.dev.key] = result.dev.dataset
datasets_by_key[result.dev_test.key] = result.dev_test.dataset
datasets_by_key[result.train_and_dev.key] = result.train_and_dev.dataset
total_available_datasets = sum(1 for x in lookup.values() if x is not None)
logging.info('Total available datasets: %d/%d', total_available_datasets,
len(lookup.keys()))
return events, datasets_by_key
def _get_splits_for_dataset_name(dataset_name: str) -> Optional[DatasetSplits]:
"""Gets train and test datasets for a dataset by name."""
if dataset_name not in DATASET_NAME_TO_SOURCE:
raise ValueError(f'Unknown source for dataset named: `{dataset_name}`')
source = DATASET_NAME_TO_SOURCE.get(dataset_name)
if source is None:
logging.warning('Source not yet available for `%s`', dataset_name)
return None
try:
result = _get_splits_for_source(source)
except dataset_loader.DatasetNotReadyError:
logging.warning('Dataset found but not yet ready: %s', dataset_name)
return None
if result is None:
logging.warning('Dataset found but not yet available: `%s`', dataset_name)
return None
return result
def _get_splits_for_source(
source: Union[NevisSource, TFDSSource]) -> DatasetSplits:
"""Constructs the keys and datasets for the given source."""
if isinstance(source, NevisSource):
return _dataset_splits_for_nevis(source)
elif isinstance(source, TFDSSource):
return _dataset_splits_for_tfds(source)
raise ValueError(f'Unknown source type: {type(source)}')
_TFDS_DATASETS_TRAIN_TEST_DATASETS = [
'mnist',
'cifar10',
'cifar100',
'caltech101',
'caltech_birds2011',
'emnist/balanced',
'fashion_mnist',
'oxford_iiit_pet',
'stanford_dogs',
'stl10',
'svhn_cropped',
]
_TFDS_DATASETS_TRAIN_VALIDATION_TEST_DATASETS = [
'dtd',
'oxford_flowers102',
'voc/2007',
'patch_camelyon',
'sun397',
'celeb_a',
]
def _dataset_splits_for_tfds(source: TFDSSource) -> DatasetSplits:
"""Constructs key and dataset for tfds dataset."""
dataset_name = source.name
dataset_key_prefix = _canonicalize_name(dataset_name)
test_key = f'{dataset_key_prefix}_test'
dev_test_key = f'{dataset_key_prefix}_dev_test'
train_key = f'{dataset_key_prefix}_train'
train_and_dev_key = f'{dataset_key_prefix}_train_and_dev'
dev_key = f'{dataset_key_prefix}_dev'
dataset_info = tfds.builder(dataset_name).info
if dataset_name in _TFDS_DATASETS_TRAIN_TEST_DATASETS:
train_fraction = 0.7
dev_fraction = 0.15
num_examples = dataset_info.splits['train'].num_examples
num_train_examples = int(num_examples * train_fraction)
num_dev_examples = int(num_examples * dev_fraction)
train_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples)
dev_dataset = tfds_builder.get_dataset(
dataset_name,
split='train',
start=num_train_examples,
end=num_train_examples + num_dev_examples)
train_and_dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples + num_dev_examples)
dev_test_dataset = tfds_builder.get_dataset(
dataset_name,
split='train',
start=num_train_examples + num_dev_examples)
test_dataset = tfds_builder.get_dataset(dataset_name, split='test')
elif dataset_name in _TFDS_DATASETS_TRAIN_VALIDATION_TEST_DATASETS:
train_fraction = 0.8
dev_fraction = 0.2
num_examples = dataset_info.splits['train'].num_examples
num_train_examples = int(num_examples * train_fraction)
train_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples)
dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train', start=num_train_examples)
train_and_dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train')
dev_test_dataset = tfds_builder.get_dataset(
dataset_name, split='validation')
test_dataset = tfds_builder.get_dataset(dataset_name, split='test')
elif dataset_name == 'coil100':
train_dataset = coil100.get_dataset(split='train')
dev_dataset = coil100.get_dataset(split='dev')
dev_test_dataset = coil100.get_dataset(split='dev_test')
test_dataset = coil100.get_dataset(split='test')
train_and_dev_dataset = coil100.get_dataset(split='train_and_dev')
elif dataset_name == 'domainnet':
train_dataset = domainnet.get_dataset(split='train')
dev_dataset = domainnet.get_dataset(split='dev')
dev_test_dataset = domainnet.get_dataset(split='dev_test')
test_dataset = domainnet.get_dataset(split='test')
train_and_dev_dataset = domainnet.get_dataset(split='train_and_dev')
elif dataset_name == 'smallnorb':
train_dataset = smallnorb.get_dataset(split='train')
dev_dataset = smallnorb.get_dataset(split='dev')
dev_test_dataset = smallnorb.get_dataset(split='dev_test')
test_dataset = smallnorb.get_dataset(split='test')
train_and_dev_dataset = smallnorb.get_dataset(split='train_and_dev')
elif dataset_name == 'imagenet2012':
train_fraction = 0.9
dev_fraction = 0.05
num_examples = dataset_info.splits['train'].num_examples
num_train_examples = int(num_examples * train_fraction)
num_dev_examples = int(num_examples * dev_fraction) # 64_058 images
train_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples)
dev_dataset = tfds_builder.get_dataset(
dataset_name,
split='train',
start=num_train_examples,
end=num_train_examples + num_dev_examples)
train_and_dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples + num_dev_examples)
dev_test_dataset = tfds_builder.get_dataset(
dataset_name,
split='train',
start=num_train_examples + num_dev_examples)
# Use provided validation split for actual testing.
test_dataset = tfds_builder.get_dataset(dataset_name, split='validation')
elif dataset_name == 'voc/2012':
train_fraction = 0.8
dev_test_fraction = 0.5
num_examples = dataset_info.splits['train'].num_examples
num_train_examples = int(num_examples * train_fraction) # 4_574 images
num_val_examples = dataset_info.splits['validation'].num_examples
num_dev_test_examples = int(num_val_examples *
dev_test_fraction) # 2_911 images
train_dataset = tfds_builder.get_dataset(
dataset_name, split='train', end=num_train_examples)
dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train', start=num_train_examples)
train_and_dev_dataset = tfds_builder.get_dataset(
dataset_name, split='train')
dev_test_dataset = tfds_builder.get_dataset(
dataset_name, split='validation', end=num_dev_test_examples)
# Use provided validation split for actual testing.
test_dataset = tfds_builder.get_dataset(
dataset_name, split='validation', start=num_dev_test_examples)
else:
raise NotImplementedError(f'TFDS dataset {dataset_name} not available')
return DatasetSplits(
train=KeyAndDataset(train_key, train_dataset),
dev=KeyAndDataset(dev_key, dev_dataset),
train_and_dev=KeyAndDataset(train_and_dev_key, train_and_dev_dataset),
dev_test=KeyAndDataset(dev_test_key, dev_test_dataset),
test=KeyAndDataset(test_key, test_dataset),
)
def _dataset_splits_for_nevis(source: NevisSource) -> DatasetSplits:
"""Constructs key and dataset for nevis dataset."""
dataset_key_prefix = _canonicalize_name(source.name)
train_key = f'{dataset_key_prefix}_train'
test_key = f'{dataset_key_prefix}_test'
dev_test_key = f'{dataset_key_prefix}_dev_test'
train_and_dev_key = f'{dataset_key_prefix}_train_and_dev'
dev_key = f'{dataset_key_prefix}_dev'
train_dataset = nevis_dataset_loader.get_dataset(
source.name, 'train', root_dir=NEVIS_DATA_DIR)
dev_test_dataset = nevis_dataset_loader.get_dataset(
source.name, 'dev-test', root_dir=NEVIS_DATA_DIR)
test_dataset = nevis_dataset_loader.get_dataset(
source.name, 'test', root_dir=NEVIS_DATA_DIR)
dev_dataset = nevis_dataset_loader.get_dataset(
source.name, 'dev', root_dir=NEVIS_DATA_DIR)
train_and_dev_dataset = nevis_dataset_loader.get_dataset(
source.name, 'train_and_dev', root_dir=NEVIS_DATA_DIR)
return DatasetSplits(
train=KeyAndDataset(train_key, train_dataset),
dev=KeyAndDataset(dev_key, dev_dataset),
train_and_dev=KeyAndDataset(train_and_dev_key, train_and_dev_dataset),
dev_test=KeyAndDataset(dev_test_key, dev_test_dataset),
test=KeyAndDataset(test_key, test_dataset),
)
def _build_lookup(
dataset_names: Sequence[str]) -> Mapping[str, Optional[DatasetSplits]]:
"""Creates a lookup for given dataset names."""
with futures.ThreadPoolExecutor(
max_workers=DEFAULT_THREADPOOL_WORKERS) as executor:
result = list(executor.map(_get_splits_for_dataset_name, dataset_names))
return dict(zip(dataset_names, result))
def _canonicalize_name(s: str) -> str:
"""Translates special characters in datasets names to underscores."""
return s.translate(str.maketrans(' -/~', '____'))
|
dm_nevis-master
|
dm_nevis/streams/nevis_stream.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builders for Nevis datasets."""
import dataclasses
import os
from typing import Callable, Optional
from absl import logging
from dm_nevis.benchmarker.datasets import dataset_builders
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.datasets_storage import dataset_loader
from dm_nevis.datasets_storage import encoding
import tensorflow as tf
_MULTI_LABEL_DATASETS = [
"sun_attributes",
"voc_actions",
"pascal_voc2007",
"iaprtc12",
"nih_chest_xray",
"awa2",
"biwi",
]
def _nevis_builder_fn(
name: str,
split: str,
version: str,
start: Optional[int],
end: Optional[int],
to_minibatch_fn: Callable[..., datasets.MiniBatch],
path: Optional[str] = None,
) -> datasets.DatasetBuilderFn:
"""Builds a builder_fn for nevis datasets."""
outer_start, outer_end = start, end
del start, end
def builder_fn(shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
# Combine inner and outer interval boundaries
start, end = dataset_builders.combine_indices(outer_start, start, outer_end,
end)
if path:
dataset = dataset_loader.load_dataset_from_path(path, split)
else:
dataset = dataset_loader.load_dataset(name, split, version)
ds = dataset.builder_fn(shuffle=shuffle, start=start, end=end)
return ds.map(
to_minibatch_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return builder_fn
def get_dataset(dataset: str,
split: str,
*,
root_dir: Optional[str] = None,
version: str = "stable",
start: Optional[int] = None,
end: Optional[int] = None) -> datasets.Dataset:
"""Get a Nevis dataset.
Args:
dataset: The name of the dataset to load.
split: The name of the split to load.
root_dir: If provided, loads data from this root directory, instead of from
the default paths.
version: If no root_dir is provided, load the version specified by this
name.
start: Offset from the start of the dataset to load.
end: Offset from the end of the dataset.
Returns:
A dataset in the form of a datasets.Dataset.
"""
# TODO: Consider only supporting loading by path.
path = os.path.join(root_dir, dataset) if root_dir else None
if path:
metadata = dataset_loader.get_metadata_from_path(path)
else:
metadata = dataset_loader.get_metadata(dataset, version=version)
available_splits = metadata.additional_metadata["splits"]
num_classes = metadata.num_classes
if split not in available_splits:
raise ValueError(f"Requested nevis dataset `{dataset}`, split `{split}` "
f"but available splits are {available_splits}")
max_end = metadata.additional_metadata["num_data_points_per_split"][split]
num_examples = (end or max_end) - (start or 0)
if dataset in _MULTI_LABEL_DATASETS:
task_metadata = tasks.MultiLabelClassificationMetadata(
num_classes=num_classes)
task_key = tasks.TaskKey(dataset, tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
task_metadata)
def to_minibatch(data) -> datasets.MiniBatch:
multi_label_one_hot = tf.reduce_sum(
tf.one_hot(data["multi_label"], num_classes), axis=0)
return datasets.MiniBatch(
image=data[encoding.DEFAULT_IMAGE_FEATURE_NAME],
label=None,
multi_label_one_hot=multi_label_one_hot,
)
else:
task_metadata = tasks.ClassificationMetadata(num_classes=num_classes)
task_key = tasks.TaskKey(dataset, tasks.TaskKind.CLASSIFICATION,
task_metadata)
def to_minibatch(data) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=data[encoding.DEFAULT_IMAGE_FEATURE_NAME],
label=data[encoding.DEFAULT_LABEL_FEATURE_NAME],
multi_label_one_hot=None,
)
# TODO: Remove this workaround.
if dataset == "biwi":
logging.warning("Applying patch to BIWI labels")
patched_to_minibatch = lambda x: _patch_biwi_minibatch(to_minibatch(x))
else:
patched_to_minibatch = to_minibatch
builder_fn = _nevis_builder_fn(
name=dataset,
split=split,
version=version,
start=start,
end=end,
to_minibatch_fn=patched_to_minibatch,
path=path)
return datasets.Dataset(
builder_fn=builder_fn, task_key=task_key, num_examples=num_examples)
@tf.function
def _patch_biwi_minibatch(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Fix an off-by-one-error in BIWI.
TODO: Fix this in the underlying data.
Due to a boundary error, angles that should have been [0,0,0,0,1] were
assigned the value [0,0,0,0,0], and the 1 was added to the following bucket.
This function fixes the bug.
Args:
batch: The batch to fix.
Returns:
A batch with the fix in place.
"""
def fix_angle(angle):
if tf.reduce_max(angle) == 0:
return tf.constant([0, 0, 0, 0, 1], dtype=angle.dtype)
elif tf.reduce_sum(angle) == 2:
return angle - tf.constant([1, 0, 0, 0, 0], dtype=angle.dtype)
else:
return angle
label = batch.multi_label_one_hot
a1 = fix_angle(label[0:5])
a2 = fix_angle(label[5:10])
a3 = fix_angle(label[10:15])
fixed_label = tf.concat([a1, a2, a3], axis=0)
return dataclasses.replace(batch, multi_label_one_hot=fixed_label)
|
dm_nevis-master
|
dm_nevis/streams/nevis.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a stream where all data is available in tfds."""
from typing import Iterator
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets.builders import tfds_builder
DEV_SIZE = 1_000
class ExampleStream:
"""An example stream over tfds datasets."""
def __init__(self):
# These datasets do not have predefined dev splits, so we create
# our own splits, where the dev set is assigned `DEV_SIZE` examples.
self._datasets_by_key = {
# MNIST
'mnist_train_and_dev':
tfds_builder.get_dataset(dataset_name='mnist', split='train'),
'mnist_train':
tfds_builder.get_dataset(
dataset_name='mnist', split='train', start=DEV_SIZE),
'mnist_dev':
tfds_builder.get_dataset(
dataset_name='mnist', split='train', end=DEV_SIZE),
'mnist_test':
tfds_builder.get_dataset(dataset_name='mnist', split='test'),
# CIFAR10
'cifar10_train_and_dev':
tfds_builder.get_dataset(dataset_name='cifar10', split='train'),
'cifar10_train':
tfds_builder.get_dataset(
dataset_name='cifar10', split='train', start=DEV_SIZE),
'cifar10_dev':
tfds_builder.get_dataset(
dataset_name='cifar10', split='train', end=DEV_SIZE),
'cifar10_test':
tfds_builder.get_dataset(dataset_name='cifar10', split='test'),
# CIFAR100
'cifar100_train_and_dev':
tfds_builder.get_dataset(dataset_name='cifar100', split='train'),
'cifar100_train':
tfds_builder.get_dataset(
dataset_name='cifar100', split='train', start=DEV_SIZE),
'cifar100_dev':
tfds_builder.get_dataset(
dataset_name='cifar100', split='train', end=DEV_SIZE),
'cifar100_test':
tfds_builder.get_dataset(dataset_name='cifar100', split='test'),
}
self._events = [
# MNIST
streams.TrainingEvent('mnist_train', 'mnist_train_and_dev',
'mnist_dev'),
streams.PredictionEvent('mnist_test'),
# CIFAR10
streams.TrainingEvent('cifar10_train', 'cifar10_train_and_dev',
'cifar10_dev'),
streams.PredictionEvent('cifar10_test'),
# CIFAR100
streams.TrainingEvent('cifar100_train', 'cifar100_train_and_dev',
'cifar100_dev'),
streams.PredictionEvent('cifar100_test'),
]
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
|
dm_nevis-master
|
dm_nevis/streams/example_stream.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split-Imagenet stream that consists of 100 ten-way classification tasks."""
from concurrent import futures
from typing import Dict, Iterator, Mapping, Sequence, Tuple
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets.builders import split_imagenet
from dm_nevis.streams import nevis_stream
import numpy as np
DEFAULT_THREADPOOL_WORKERS = 30
class SplitImagenetStream:
"""The split ImageNet benchmark stream.
The stream adds a train event for each instance of the train data in the
stream.
Additionally, a predict event is added containing the test dataset after
every instance of a train dataset.
Once the stream is complete, a further predict event is added for every
seen train event. This makes it possible to compare the performance on tasks
from train time to the end of the stream.
"""
def __init__(
self,
*,
predict_event_splits: Sequence[nevis_stream.Split] = (
nevis_stream.Split.DEV_TEST,),
shuffle_seed: int = 1,
shuffle_tasks_order: bool = False,
):
"""Instantiate a Split-Imagenet stream.
Imagenet has 1000 classes, we split them into 100 disjoint subsets with
10 classes each to create 100 ten-way classification tasks.
Args:
predict_event_splits: Sequence of splits to use for prediction.
shuffle_seed: An integer denoting a seed for shuffling logic when
`shuffle_datasets_order` are active.
shuffle_tasks_order: Whether to shuffle the order of datasets.
"""
self._events, self._datasets_by_key = _get_events_and_lookup(
predict_event_splits=predict_event_splits,
shuffle_seed=shuffle_seed,
shuffle_datasets_order=shuffle_tasks_order)
def get_dataset_by_key(self,
dataset_key: streams.DatasetKey) -> datasets.Dataset:
return self._datasets_by_key[dataset_key]
def events(self) -> Iterator[streams.Event]:
return iter(self._events)
def _get_events_and_lookup(
*,
predict_event_splits: Sequence[nevis_stream.Split] = (
nevis_stream.Split.DEV_TEST,),
shuffle_seed: int = 1,
shuffle_datasets_order: bool = False,
) -> Tuple[Sequence[streams.Event], Mapping[streams.DatasetKey,
datasets.Dataset]]:
"""Constructs a sequence of events and a dataset lookup."""
events = []
lookup = {}
datasets_by_key = {}
task_indices = list(range(split_imagenet.N_TASKS))
lookup = _build_lookup(task_indices)
rng = np.random.default_rng(shuffle_seed)
if shuffle_datasets_order:
task_indices = list(task_indices)
rng.shuffle(task_indices)
for task_index in task_indices:
result = lookup[task_index]
if result is None:
raise ValueError(f'Unable to read dataset for task_index = {task_index}')
train_event = streams.TrainingEvent(
train_dataset_key=result.train.key,
dev_dataset_key=result.dev.key,
train_and_dev_dataset_key=result.train_and_dev.key)
events.append(train_event)
for split in predict_event_splits:
events.append(
streams.PredictionEvent(nevis_stream.split_to_key(split, result)))
datasets_by_key[result.train.key] = result.train.dataset
datasets_by_key[result.test.key] = result.test.dataset
datasets_by_key[result.dev.key] = result.dev.dataset
datasets_by_key[result.dev_test.key] = result.dev_test.dataset
datasets_by_key[result.train_and_dev.key] = result.train_and_dev.dataset
return events, datasets_by_key
def _build_lookup(
task_indices: Sequence[int]) -> Dict[int, nevis_stream.DatasetSplits]:
"""Creates a lookup for given dataset names."""
with futures.ThreadPoolExecutor(
max_workers=DEFAULT_THREADPOOL_WORKERS) as executor:
result = executor.map(_get_dataset_splist_by_task, task_indices)
return dict(zip(task_indices, result))
def _get_dataset_splist_by_task(task_index: int) -> nevis_stream.DatasetSplits:
"""Construct key and dataset for tfds dataset."""
train_dataset = split_imagenet.get_dataset(
task_index=task_index, split='train')
dev_dataset = split_imagenet.get_dataset(task_index=task_index, split='dev')
train_and_dev_dataset = split_imagenet.get_dataset(
task_index=task_index, split='train_and_dev')
dev_test_dataset = split_imagenet.get_dataset(
task_index=task_index, split='dev_test')
test_dataset = split_imagenet.get_dataset(task_index=task_index, split='test')
dataset_key_prefix = train_dataset.task_key.name
train_key = f'{dataset_key_prefix}_train'
dev_key = f'{dataset_key_prefix}_dev'
train_and_dev_key = f'{dataset_key_prefix}_train_and_dev'
dev_test_key = f'{dataset_key_prefix}_dev_test'
test_key = f'{dataset_key_prefix}_test'
return nevis_stream.DatasetSplits(
train=nevis_stream.KeyAndDataset(train_key, train_dataset),
dev=nevis_stream.KeyAndDataset(dev_key, dev_dataset),
train_and_dev=nevis_stream.KeyAndDataset(train_and_dev_key,
train_and_dev_dataset),
dev_test=nevis_stream.KeyAndDataset(dev_test_key, dev_test_dataset),
test=nevis_stream.KeyAndDataset(test_key, test_dataset),
)
|
dm_nevis-master
|
dm_nevis/streams/split_imagenet_stream.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A binary to iterate through the nevis stream and verify that all data can be read.
This binary is for testing that all of the datasets can successfully be
located and accessed.
"""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.streams import nevis_stream
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_OUTPATH = flags.DEFINE_string('outpath', '/tmp/output.txt',
'Destination to write to.')
_STREAM_VARIANT = flags.DEFINE_enum_class('stream_variant',
nevis_stream.NevisStreamVariant.FULL,
nevis_stream.NevisStreamVariant,
'Stream to iterate')
_STREAM_STOP_YEAR = flags.DEFINE_integer(
'stream_stop_year', 2022, 'The year when to stop the stream (exclusive)')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
logging.info('General statistics about datasets for streams:')
datasets = nevis_stream.DATASET_NAME_TO_SOURCE
num_available_datasets = len(
[k for k, v in datasets.items() if v is not None])
num_tfds_datasets = len([
k for k, v in datasets.items() if isinstance(v, nevis_stream.TFDSSource)
])
num_nevis_datasets = len([
k for k, v in datasets.items() if isinstance(v, nevis_stream.NevisSource)
])
logging.info('Num known datasets %d', len(datasets))
logging.info('Num available datasets %d', num_available_datasets)
logging.info('Num tfds datasets %d', num_tfds_datasets)
logging.info('Num nevis datasets %d', num_nevis_datasets)
logging.info('Iterating Nevis %s stream up to year %d', _STREAM_VARIANT.value,
_STREAM_STOP_YEAR.value)
logging.info('Writing output to %s', _OUTPATH.value)
total_test_images = 0
total_train_images = 0
total_dev_images = 0
stream = nevis_stream.NevisStream(
stream_variant=_STREAM_VARIANT.value, stop_year=_STREAM_STOP_YEAR.value)
with gfile.GFile(_OUTPATH.value, 'wt') as f:
f.write('=== Nevis stream ===\n')
num_train_datasets = 0
for event in stream.events():
logging.info('Reading datasets for %s', event)
if isinstance(event, streams.PredictionEvent):
test_dataset = stream.get_dataset_by_key(event.dataset_key)
total_test_images += test_dataset.num_examples
elif isinstance(event, streams.TrainingEvent):
train_dataset = stream.get_dataset_by_key(event.train_dataset_key)
dev_dataset = stream.get_dataset_by_key(event.dev_dataset_key)
total_dev_images += dev_dataset.num_examples
total_train_images += train_dataset.num_examples
num_train_datasets += 1
for key in streams.all_dataset_keys(event):
dataset = stream.get_dataset_by_key(key)
assert dataset.num_examples is not None
f.write('---\n')
f.write(f' Key: {key}, num examples: {dataset.num_examples}\n')
ds = dataset.builder_fn(shuffle=False).batch(1)
for _ in range(10):
batch = next(iter(tfds.as_numpy(ds)))
f.write(f' Example: {batch}\n')
f.write('\n===\n')
f.write(f'Num train images: {total_train_images}\n')
f.write(f'Num test images: {total_test_images}\n')
f.write(f'Total training datasets in stream: {num_train_datasets}\n')
if __name__ == '__main__':
app.run(main)
|
dm_nevis-master
|
dm_nevis/streams/iterate_nevis_stream.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to download all the Nevis datasets.
"""
import os
import shutil
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from dm_nevis.datasets_storage import download_util as du
from dm_nevis.datasets_storage import handlers
from dm_nevis.datasets_storage import preprocessing
from dm_nevis.datasets_storage import version_control as vc
from dm_nevis.streams import nevis_stream
from tensorflow.io import gfile
# pylint:disable=line-too-long
_NUM_SHARDS = flags.DEFINE_integer(
'num_shards', default=10, help='Number of shards to write.')
_SHUFFLE_BUFFER_SIZE = flags.DEFINE_integer(
'shuffle_buffer_size', default=10_000, help='Shuffle buffer size.')
_DATASET = flags.DEFINE_string(
'dataset',
default=None,
help='Dataset name. MANDATORY or provide stream name.')
_STREAM = flags.DEFINE_string(
'stream',
default=None,
help='Stream name. MANDATORY or provide dataset name.')
_LOCAL_DOWNLOAD_DIR = flags.DEFINE_string(
'local_download_dir',
default='/tmp/datasets',
help='Local directory used for downloading.')
_FORCE_DOWNLOADING = flags.DEFINE_boolean(
'force_downloading',
default=False,
help='Whether to download dataset regardless if it is available.')
_FORCE_EXTRACTION = flags.DEFINE_boolean(
'force_extraction',
default=False,
help='Whether to extract dataset regardless if it is already extracted.')
_WRITE_STABLE_VERSION = flags.DEFINE_boolean(
'write_stable_version',
default=False,
help='Whether to write a stable version of the dataset or `tmp_version`.')
_TRY_DOWNLOAD_ARTIFACTS_FROM_URLS = flags.DEFINE_boolean(
'try_download_artifacts_from_urls',
default=True,
help='Whether to try to download artifacts from URLs.')
_TMP_VERSION = flags.DEFINE_string(
'tmp_version',
default='temp',
help='Name of temporary version of the dataset.')
_CLEAN_ARTIFACTS_ON_COMPLETION = flags.DEFINE_boolean(
'clean_artifacts_on_completion',
default=False,
help='Whether to clean up downloaded artifacts on completion.')
_COMPUTE_STATISTICS = flags.DEFINE_boolean(
'compute_statistics',
default=False,
help='Whether or not to compute statistics and add to metadata. '
'This is not supported for multilabels datasets.')
def _gather_dataset_names(dataset_name: str, stream_name: str) -> list[str]:
"""Returns a list of dataset names for a given dataset or stream."""
if dataset_name:
return [dataset_name.lower()]
stream = nevis_stream.NevisStreamVariant[stream_name.upper()]
dataset_names = []
for pretty_names in nevis_stream.NEVIS_STREAM_PER_YEAR[stream].values():
for dataset_pretty_name in pretty_names:
source = nevis_stream.DATASET_NAME_TO_SOURCE[dataset_pretty_name]
if isinstance(source, nevis_stream.NevisSource):
dataset_names.append(source.name)
return dataset_names
def _download_and_prepare_dataset(
dataset_name: str,
artifacts_path: str,
version: str,
try_download_artifacts_from_urls: bool,
force_extraction: bool,
compute_statistics: bool,
num_shards: int,
shuffle_buffer_size: int) -> None:
"""Downloads dataset, extracts it, and creates shards."""
if try_download_artifacts_from_urls:
du.lazily_download_artifacts(dataset_name, artifacts_path)
# Extracting part.
output_path = vc.get_dataset_path(dataset_name, version)
gfile.makedirs(output_path)
logging.info('Extracting dataset %s from dataset artifacts to %s.',
dataset_name, output_path)
if du.try_read_status(
output_path) == du.DatasetDownloadStatus.READY and not force_extraction:
logging.info('Dataset `%s` already extracted (skipping).', dataset_name)
return
downloadable_dataset = handlers.get_dataset(dataset_name)
du.extract_dataset_and_update_status(
downloadable_dataset,
artifacts_path,
output_path,
num_shards,
shuffle_buffer_size,
compute_statistics=compute_statistics,
preprocess_image_fn=preprocessing.preprocess_image_fn,
preprocess_metadata_fn=preprocessing.preprocess_metadata_fn)
def main(argv: Sequence[str]) -> None:
del argv
if not gfile.exists(vc.get_nevis_data_dir()):
gfile.makedirs(vc.get_nevis_data_dir())
num_shards = _NUM_SHARDS.value
shuffle_buffer_size = _SHUFFLE_BUFFER_SIZE.value
compute_statistics = _COMPUTE_STATISTICS.value
force_downloading = _FORCE_DOWNLOADING.value
force_extraction = _FORCE_EXTRACTION.value
try_download_artifacts_from_urls = _TRY_DOWNLOAD_ARTIFACTS_FROM_URLS.value
version = version = 'stable' if _WRITE_STABLE_VERSION.value else _TMP_VERSION.value
dataset_name = _DATASET.value
stream_name = _STREAM.value
if dataset_name and stream_name:
raise ValueError('Exactly one of --dataset or --stream should be set.')
if not dataset_name and not stream_name:
raise ValueError('Provide dataset name or stream name to download.')
dataset_names = _gather_dataset_names(dataset_name, stream_name)
logging.info('Datasets: %s', dataset_names)
logging.info('%d datasets will be downloaded and prepared.',
len(dataset_names))
# Downloading part.
for i, dataset_name in enumerate(dataset_names, start=1):
logging.info('%d/%d: downloading dataset `%s`', i, len(dataset_names),
dataset_name)
du.check_dataset_supported_or_exit(dataset_name)
artifacts_path = os.path.join(_LOCAL_DOWNLOAD_DIR.value, dataset_name)
if force_downloading:
du.clean_artifacts_path(artifacts_path)
_download_and_prepare_dataset(
dataset_name=dataset_name,
artifacts_path=artifacts_path,
version=version,
try_download_artifacts_from_urls=try_download_artifacts_from_urls,
compute_statistics=compute_statistics,
force_extraction=force_extraction,
num_shards=num_shards,
shuffle_buffer_size=shuffle_buffer_size)
if _CLEAN_ARTIFACTS_ON_COMPLETION.value:
logging.info('Cleaning up artifacts directory at `%s`', artifacts_path)
shutil.rmtree(artifacts_path)
if __name__ == '__main__':
app.run(main)
|
dm_nevis-master
|
dm_nevis/datasets_storage/download_dataset.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoding and decoding of dataset examples.
Examples are represented as instaces of the `Example` dataclass.
These dataclasses are serialized to tf.train.Example protobuf containers, which
can be serialized to the wire. When loading datasets, the dataset_loader
deserializes data into tf.train.Example protobuff and parses it given the
features description provided by the dataset metadata.
"""
import io
import os
import queue
import threading
import types
from typing import Any, Callable, Optional, Type
from dm_nevis.datasets_storage.handlers import types as handlers_types
import PIL.Image as pil_image
import tensorflow as tf
import tensorflow_datasets as tfds
DEFAULT_IMAGE_FEATURE_NAME = 'png_encoded_image'
DEFAULT_LABEL_FEATURE_NAME = 'label'
DEFAULT_MULTI_LABEL_FEATURE_NAME = 'multi_label'
SUPPORTED_FEATURES = frozenset([
DEFAULT_IMAGE_FEATURE_NAME, DEFAULT_LABEL_FEATURE_NAME,
DEFAULT_MULTI_LABEL_FEATURE_NAME
])
RecordWriter = Callable[[str], Any]
_DEFAULT_MAX_WRITER_QUEUE_SIZE = 100
def get_default_features(num_channels: int,
num_classes: int) -> tfds.features.FeaturesDict:
"""Creates a default single class features specification."""
return tfds.features.FeaturesDict({
DEFAULT_IMAGE_FEATURE_NAME:
tfds.features.Image(shape=(None, None, num_channels)),
DEFAULT_LABEL_FEATURE_NAME:
tfds.features.ClassLabel(num_classes=num_classes)
})
def build_example_encoder(
features: tfds.features.FeaturesDict
) -> Callable[[handlers_types.Example], tf.train.Example]:
"""Returns a function to create tf.train.Example."""
if set(features.keys()) - SUPPORTED_FEATURES:
raise ValueError('features contains features which are not supported.')
example_serializer = tfds.core.example_serializer.ExampleSerializer(
features.get_serialized_info())
def encode(example: handlers_types.Example) -> tf.train.Example:
"""Creates a tf.train.Example. Ignored fields not in `features`."""
example_to_serialize = {}
for key, value in example._asdict().items():
if key == 'image':
example_to_serialize[DEFAULT_IMAGE_FEATURE_NAME] = _encoded_png_feature(
value)
elif key in features:
example_to_serialize[key] = value
return example_serializer.get_tf_example(example_to_serialize)
return encode
def build_example_decoder(
features: tfds.features.FeaturesDict
) -> Callable[[tf.Tensor], tf.train.Example]:
"""Returns feature decoder."""
example_parser = tfds.core.example_parser.ExampleParser(
features.get_serialized_info())
def decoder(encoded_tf_example: tf.Tensor) -> tf.train.Example:
"""Decodes the example into tf.train.Example proto."""
return features.decode_example(
example_parser.parse_example(encoded_tf_example))
return decoder
class ThreadpoolExampleWriter:
"""A class for encoding and writing examples to shards."""
_STOP_QUEUE = object()
def __init__(self,
num_shards: int,
example_encoder: Callable[[handlers_types.Example],
tf.train.Example],
*,
output_dir: str = '',
record_writer: RecordWriter = tf.io.TFRecordWriter) -> None:
"""Creates the writer class, with the given number of write shards.
Each shard is assigned a worker thread, which handles encoding and protobuf
serializing. This class is designed to be used as a context manager. Writing
happens asynchronously and is only completed once the context manager exits
successfully.
Args:
num_shards: The number of shards to write.
example_encoder: Function which encodes examples into tf.train.Example
protobuf.
output_dir: The directory name to join to the shard path.
record_writer: The class used to write records.
"""
self._example_encoder = example_encoder
self._queues = []
self._threads = []
def worker(q, path):
writer = record_writer(path)
while True:
example = q.get()
if example is self._STOP_QUEUE:
break
writer.write(self._example_encoder(example).SerializeToString())
for i in range(num_shards):
q = queue.Queue(maxsize=_DEFAULT_MAX_WRITER_QUEUE_SIZE)
path = os.path.join(output_dir, f'data-{i:05d}-of-{num_shards:05d}')
thread = threading.Thread(target=worker, args=(q, path), daemon=True)
thread.start()
self._queues.append(q)
self._threads.append(thread)
def __enter__(self):
return self
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[types.TracebackType]) -> None:
"""Blocks until writing the shards is complete."""
del exc_type, exc_val, exc_tb
for q in self._queues:
q.put(self._STOP_QUEUE)
for thread in self._threads:
thread.join()
def write(self, shard: int, example: handlers_types.Example) -> None:
"""Asynchronously writes the given example to the given shard index."""
self._queues[shard].put(example)
def _encoded_png_feature(image: handlers_types.ImageLike) -> bytes:
"""Encodes an image to bytes."""
if not isinstance(image, pil_image.Image):
raise ValueError('Encoder only works with PIL images.')
buffer = io.BytesIO()
# We strip any ICC profile to avoid decoding warnings caused by invalid ICC
# profiles in some datasets.
image.save(buffer, format='PNG', icc_profile=b'')
buffer = buffer.getvalue()
return buffer
|
dm_nevis-master
|
dm_nevis/datasets_storage/encoding.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing all the paths."""
# pylint: disable=line-too-long
import os
NEVIS_DATA_DIR = os.environ.get('NEVIS_DATA_DIR', '/tmp/nevis_data_dir')
NEVIS_RAW_DATA_DIR = os.environ.get('NEVIS_RAW_DATA_DIR', '/tmp/nevis_raw_data_dir')
METADATA_FNAME = 'metadata'
STATUS_FNAME = 'status'
STABLE_DIR_NAME = 'stable'
|
dm_nevis-master
|
dm_nevis/datasets_storage/paths.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download utils."""
import codecs
import collections
import enum
import hashlib
import os
import re
import shutil
from typing import Any, Callable, Dict, Iterable, Iterator, Optional, TypeVar
import urllib
from absl import logging
import dill
from dm_nevis.datasets_storage import encoding
from dm_nevis.datasets_storage import handlers
from dm_nevis.datasets_storage import paths
from dm_nevis.datasets_storage import statistics
from dm_nevis.datasets_storage.handlers import kaggle_utils
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image as pil_image
import requests
from tensorflow.io import gfile
import tqdm
DEFAULT_CHUNK_SIZE = 1024
DEFAULT_MAX_QUEUE_SIZE = 100
DEFAULT_SHUFFLE_BUFFER_LOG_SECONDS = 10
PreprocessImageFn = Callable[[pil_image.Image, str, Optional[Any], int],
pil_image.Image]
PreprocessMetadataFn = Callable[[types.DatasetMetaData], types.DatasetMetaData]
class DatasetDownloadStatus(enum.Enum):
NOT_READY = 'not_ready'
READY = 'ready'
def maybe_unpack_archive(link, cur_link_file, ds_path):
if link.endswith('.zip') or link.endswith('.tar'):
shutil.unpack_archive(cur_link_file, extract_dir=ds_path)
os.remove(cur_link_file)
def write_status(dataset_path, status):
status_file_path = os.path.join(dataset_path, paths.STATUS_FNAME)
with gfile.GFile(status_file_path, 'w') as f:
f.write(status.value)
def try_read_status(dataset_path):
status_file_path = os.path.join(dataset_path, paths.STATUS_FNAME)
if not gfile.exists(status_file_path):
return None
with gfile.GFile(status_file_path, 'r') as f:
status = DatasetDownloadStatus(f.readline())
return status
def download_data_from_links(links, local_ds_path):
"""Donwload data artefacts from a list of urls or kaggle dataset/competition).
"""
for link in links:
if isinstance(link, types.DownloadableArtefact):
artefact_path = lazily_download_file(link.url, local_ds_path)
elif isinstance(link, types.KaggleDataset):
artefact_path = download_data_from_kaggle(
link.dataset_name,
local_ds_path,
kaggle_origin=kaggle_utils.KaggleOrigin.DATASETS)
elif isinstance(link, types.KaggleCompetition):
artefact_path = download_data_from_kaggle(
link.competition_name,
local_ds_path,
kaggle_origin=kaggle_utils.KaggleOrigin.COMPETITIONS)
else:
raise TypeError(f'Unknown type link: {type(link)}')
if link.checksum is not None:
validate_artefact_checksum(artefact_path, link.checksum)
def download_data_from_kaggle(data_name, local_ds_path, kaggle_origin):
"""Downloads dataset from kaggle website."""
artefact_path = os.path.join(local_ds_path, f'{data_name.split("/")[-1]}.zip')
if gfile.exists(artefact_path):
logging.info('Found existing file at `%s` (skipping download)',
artefact_path)
else:
kaggle_utils.download_kaggle_data(
data_name, local_ds_path, kaggle_origin=kaggle_origin)
return artefact_path
def validate_artefact_checksum(artefact_path: str, checksum: str) -> None:
"""Compares read checksum to expected checksum."""
hash_fn = hashlib.md5()
buffer_size = 128 * hash_fn.block_size
logging.info('Validating checksum of artefact `%s`', artefact_path)
with gfile.GFile(artefact_path, 'rb') as f:
while True:
data = f.read(buffer_size)
if not data:
break
hash_fn.update(data)
read_checksum = hash_fn.hexdigest()
if read_checksum != checksum:
raise ValueError(
f'{artefact_path} checksum ({read_checksum}) != to expected checksum {checksum}). '
'This error can be silenced by removing the MD5 hash in the download '
'URL of the failing dataset.')
def lazily_download_file(url: str, output_dir: str) -> str:
"""Lazily fetch a file and write it into the output dir."""
# try to infer filename from url, cannot work on google drive
tentative_filename = _get_filename_from_url(url)
if _is_among_allowed_extentions(tentative_filename):
path = os.path.join(output_dir, tentative_filename)
if gfile.exists(path):
logging.info('Found existing file at `%s` (skipping download)', path)
return path
with requests.get(url, stream=True, allow_redirects=True) as r:
# If an HTTP error occurred, it will be raised as an exception here.
r.raise_for_status()
filename = _get_filename_from_headers(r.headers) or _get_filename_from_url(
r.url)
path = os.path.join(output_dir, filename)
if gfile.exists(path):
logging.info('Found existing file at `%s` (skipping download)', path)
return path
partial_path = f'{path}.part'
with gfile.GFile(partial_path, 'wb') as w:
filesize = _content_length_from_headers(r.headers)
logging.info('Downloading `%s` to `%s`', url, path)
with tqdm.tqdm(
unit='B',
unit_scale=True,
unit_divisor=1024,
total=filesize,
desc=filename) as progress:
for chunk in r.iter_content(chunk_size=DEFAULT_CHUNK_SIZE):
w.write(chunk)
progress.update(n=len(chunk))
# Atomically move to destination, to avoid caching a partial file on failure.
os.rename(partial_path, path)
return path
def _get_filename_from_url(url: str) -> str:
o = urllib.parse.urlparse(url)
return o.path.split('/')[-1]
def _is_among_allowed_extentions(filename: str) -> bool:
ext = filename.split('.')[-1]
return ext in ('png', 'jpg', 'jpeg', 'gif', 'zip', 'tar', 'gz', 'rar')
def _get_filename_from_headers(headers: Dict[str, Any]) -> Optional[str]:
"""Extracts the filename from HTTP headers, if present."""
# TODO: This can be vastly more complicated, but this logic should
# work for most cases we will encounter.
content = headers.get('Content-Disposition')
if not content:
return None
match = re.findall('filename="(.+)"', content)
if match:
return match[0]
match = re.findall('filename=(.+)', content)
if match:
return match[0]
return None
def _content_length_from_headers(headers: Dict[str, Any]) -> Optional[int]:
if 'Content-Length' not in headers:
return None
return int(headers['Content-Length'])
T = TypeVar('T')
def shuffle_generator_with_buffer(gen: Iterable[T],
buffer_size: int,
seed: int = 0) -> Iterator[T]:
"""Shuffles `gen` using a shuffle buffer."""
rng = np.random.default_rng(seed)
buffer = []
for example in gen:
if len(buffer) < buffer_size:
buffer.append(example)
logging.log_every_n_seconds(logging.INFO,
'Filling shuffle buffer: %d/%d...',
DEFAULT_SHUFFLE_BUFFER_LOG_SECONDS,
len(buffer), buffer_size)
continue
idx = rng.integers(0, buffer_size)
result, buffer[idx] = buffer[idx], example
yield result
rng.shuffle(buffer)
yield from buffer
def _optionally_convert_to_example(example):
"""Converts example tuple into types.Example."""
if isinstance(example, types.Example):
return example
assert len(example) == 2
image, label = example[:2]
return types.Example(image=image, label=label, multi_label=None)
def _extract_dataset(
downloable_dataset: handlers.types.DownloadableDataset,
artifacts_path: str,
output_path: str,
num_shards: int,
shuffle_buffer_size: int,
compute_statistics: bool,
preprocess_image_fn: PreprocessImageFn,
preprocess_metadata_fn: PreprocessMetadataFn,
seed: int = 0,
) -> None:
"""Converts dataset to tfrecord examples."""
logging.info('Extracting dataset %s from dataset artifacts.',
downloable_dataset.name)
metadata, splits_generators = downloable_dataset.handler(artifacts_path)
metadata = preprocess_metadata_fn(metadata)
num_channels = metadata.num_channels
num_classes = metadata.num_classes
# This is required for backwards compatibility. Later on, we will remove need
# for default features.
features = metadata.features or encoding.get_default_features(
num_channels, num_classes)
if compute_statistics:
statistics_calculator = statistics.StatisticsCalculator(
list(splits_generators.keys()), metadata)
num_data_points_per_split = collections.defaultdict(int)
for split, split_gen in splits_generators.items():
logging.info('Writing split: `%s`', split)
description = f'Writing examples for `{split}`'
gen = shuffle_generator_with_buffer(split_gen, shuffle_buffer_size)
rng = np.random.default_rng(seed=seed)
split_dir = os.path.join(output_path, split)
if not gfile.exists(split_dir):
gfile.makedirs(split_dir)
with encoding.ThreadpoolExampleWriter(
num_shards,
output_dir=split_dir,
example_encoder=encoding.build_example_encoder(features)) as writer:
with tqdm.tqdm(total=None, desc=description, unit=' examples') as pbar:
for i, example in enumerate(gen):
converted_example = _optionally_convert_to_example(example)
converted_example._replace(
image=preprocess_image_fn(converted_example.image,
metadata.preprocessing, rng, 0))
if compute_statistics:
statistics_calculator.accumulate(converted_example.image,
converted_example.label, split)
shard_index = i % num_shards
writer.write(shard_index, converted_example)
num_data_points_per_split[split] += 1
pbar.update(1)
additional_metadata = metadata.additional_metadata
additional_metadata['num_data_points_per_split'] = num_data_points_per_split
additional_metadata['splits'] = sorted(splits_generators.keys())
if compute_statistics:
additional_metadata['statistics'] = statistics_calculator.merge_statistics()
with gfile.GFile(os.path.join(output_path, paths.METADATA_FNAME), 'wb') as f:
# See https://stackoverflow.com/a/67171328
pickled_object = codecs.encode(
dill.dumps(metadata, protocol=dill.HIGHEST_PROTOCOL),
'base64').decode()
f.write(pickled_object)
def extract_dataset_and_update_status(
downloadable_dataset: handlers.types.DownloadableDataset,
artifacts_path: str, output_path: str, num_shards: int,
shuffle_buffer_size: int, compute_statistics: bool,
preprocess_image_fn: PreprocessImageFn,
preprocess_metadata_fn: PreprocessMetadataFn) -> None:
"""Extracts dataset from artifacts_path and updates its status.
Args:
downloadable_dataset: The instance of `types.DownloadableDataset` describing
the dataset together with all the meta information.
artifacts_path: The directory where fetched artifacts are stored for this
dataset. These contain e.g. raw zip files and data downloaded from the
internet. If artifacts are found here matching the paths, they will not be
re-downloaded.
output_path: The destination where the final dataset will be written.
num_shards: The number of shards to write the dataset into.
shuffle_buffer_size: The size of the shuffle buffer.
compute_statistics: True if statistics should be computed.
preprocess_image_fn: Preprocessing function converting PIL.Image into numpy
array with consistent format.
preprocess_metadata_fn: Preprocessing function responsible for applying
consistent transformations to metadata.
"""
logging.info('Marking dataset `%s` as NOT_READY', downloadable_dataset.name)
write_status(output_path, DatasetDownloadStatus.NOT_READY)
_extract_dataset(
downloadable_dataset,
artifacts_path,
output_path,
num_shards,
shuffle_buffer_size,
compute_statistics=compute_statistics,
preprocess_image_fn=preprocess_image_fn,
preprocess_metadata_fn=preprocess_metadata_fn)
logging.info('Marking dataset `%s` as READY', downloadable_dataset.name)
write_status(output_path, DatasetDownloadStatus.READY)
def lazily_download_artifacts(dataset_name: str, artifacts_path: str) -> None:
"""Downloads artifacts (lazily) to the given path.
Args:
dataset_name: The name of the dataset to fetch the artifacts for.
artifacts_path: The destintion to write the fetched artifacts to.
"""
if not gfile.exists(artifacts_path):
gfile.makedirs(artifacts_path)
downloadable_dataset = handlers.get_dataset(dataset_name)
if downloadable_dataset.manual_download:
logging.info('Dataset `%s` has to be manually downloaded (skipping).',
dataset_name)
return
uris = downloadable_dataset.download_urls
logging.info('Fetching %d artifact(s) for %s to `%s`', len(uris),
dataset_name, artifacts_path)
download_data_from_links(uris, artifacts_path)
def check_dataset_supported_or_exit(dataset_name: str) -> None:
"""Checks whether given dataset has corresponding handler."""
if not handlers.is_dataset_available(dataset_name):
logging.error(
'The dataset `%s` is not recognized. The available datasets are %s',
dataset_name, ', '.join(handlers.dataset_names()))
exit(1)
def clean_artifacts_path(artifacts_path: str) -> None:
"""Cleans up `artifacts_path` directory."""
logging.info('Ensuring artifacts directory is empty at `%s`', artifacts_path)
if gfile.exists(artifacts_path):
shutil.rmtree(artifacts_path)
|
dm_nevis-master
|
dm_nevis/datasets_storage/download_util.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.dataset_loader."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage import dataset_loader
from dm_nevis.datasets_storage import download_util
from dm_nevis.datasets_storage import handlers
from dm_nevis.datasets_storage import preprocessing
from dm_nevis.datasets_storage.handlers import types
DEFAULT_NUM_SHARDS = 5
DEFAULT_BUFFER_SIZE = 100
class DatasetLoaderTest(parameterized.TestCase):
# We can include any dataset that has a fixture writer.
@parameterized.parameters([
# dict(dataset_name='belgium_tsc'),
# dict(dataset_name='pacs'),
dict(dataset_name='caltech_categories'),
# dict(dataset_name='flickr_material_database'),
])
def test_load_dataset_from_path(self, dataset_name: str):
artifacts_path = self.create_tempdir().full_path
dataset_path = self.create_tempdir().full_path
downloadable_dataset = handlers.get_dataset(dataset_name)
self.assertIsNotNone(downloadable_dataset.fixture_writer)
_extract_dataset(downloadable_dataset, artifacts_path, dataset_path)
metadata = dataset_loader.get_metadata_from_path(dataset_path)
self.assertNotEmpty(metadata.additional_metadata['splits'])
for split in metadata.additional_metadata['splits']:
with self.subTest(split):
dataset = dataset_loader.load_dataset_from_path(dataset_path, split)
examples = list(dataset.builder_fn(shuffle=False))
self.assertNotEmpty(examples)
self.assertLen(examples, dataset.num_examples)
examples = list(dataset.builder_fn(shuffle=True))
self.assertNotEmpty(examples)
self.assertLen(examples, dataset.num_examples)
examples = list(dataset.builder_fn(shuffle=False, start=2))
self.assertLen(examples, dataset.num_examples - 2)
def _extract_dataset(downloadable_dataset: types.DownloadableDataset,
artifacts_path: str, output_path: str) -> None:
"""Writes a fixture and then extracts a dataset from the fixture."""
downloadable_dataset.fixture_writer(artifacts_path)
download_util.extract_dataset_and_update_status(
downloadable_dataset,
artifacts_path,
output_path,
DEFAULT_NUM_SHARDS,
DEFAULT_BUFFER_SIZE,
compute_statistics=True,
preprocess_image_fn=preprocessing.preprocess_image_fn,
preprocess_metadata_fn=preprocessing.preprocess_metadata_fn)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/dataset_loader_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Information about Nevis datasets for the benchmark."""
# Datasets mapping from canonical name to tfds_name, available in tfds.
TFDS_DATASETS_MAPPING = {
"Caltech 101": "caltech101",
"CUB 200": "caltech_birds2011",
"CIFAR 10": "cifar10",
"CIFAR 100": "cifar100",
"COIL 100": "coil100",
"CUB 200 2011": "caltech_birds2011",
"DomainNet-Real": "domainnet",
"EMNIST Balanced": "emnist/balanced",
"FashionMNIST": "fashion_mnist",
"Food 101 N": "food101",
"ImageNet": "imagenet2012",
"iNaturalist": "i_naturalist2017",
"Oxford Flowers": "oxford_flowers102",
"Oxford Pets": "oxford_iiit_pet",
"Stanford Dogs": "stanford_dogs",
"SUN": "sun397",
}
|
dm_nevis-master
|
dm_nevis/datasets_storage/datasets.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
dm_nevis/datasets_storage/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to download all the Nevis datasets.
"""
import os
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from dm_nevis.datasets_storage import dataset_loader
import tensorflow_datasets as tfds
_DATASET = flags.DEFINE_string('dataset', 'animal', 'Dataset name.')
_DATASET_VERSION = flags.DEFINE_string('dataset_version', 'temp',
'Dataset version.')
_DATASET_ROOT_DIR = flags.DEFINE_string('dataset_root_dir', '',
'Dataset version.')
def main(argv: Sequence[str]) -> None:
del argv
if _DATASET_ROOT_DIR.value:
path = os.path.join(_DATASET_ROOT_DIR.value, _DATASET.value)
logging.info('Reading dataset from %s', path)
else:
path = None
if path:
metadata = dataset_loader.get_metadata_from_path(path)
else:
metadata = dataset_loader.get_metadata(
_DATASET.value, version=_DATASET_VERSION.value)
splits = metadata.additional_metadata['splits']
num_classes = metadata.num_classes
image_shape = metadata.image_shape
logging.info('metadata: %s', str(metadata))
logging.info('splits: %s', str(splits))
logging.info('num_classes: %d', num_classes)
logging.info('image_shape: %s', str(image_shape))
for split in splits:
logging.info('Trying split `%s`.', split)
if path:
dataset = dataset_loader.load_dataset_from_path(path, split)
else:
dataset = dataset_loader.load_dataset(
_DATASET.value, split, version=_DATASET_VERSION.value)
ds = iter(tfds.as_numpy(dataset.builder_fn(shuffle=True)))
elem = next(ds)
logging.info(elem)
logging.info('Checking the integrity of `%s`.', split)
for elem in ds:
pass
logging.info('Checks for `%s` are passed.', split)
if __name__ == '__main__':
app.run(main)
|
dm_nevis-master
|
dm_nevis/datasets_storage/play_with_dataset.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to control data versionning."""
# pylint: disable=line-too-long
import os
from dm_nevis.datasets_storage import paths
def get_nevis_data_dir() -> str:
"""Prefix directory where all the data lives."""
return paths.NEVIS_DATA_DIR
def get_dataset_path(dataset: str, version: str = paths.STABLE_DIR_NAME) -> str:
"""Provides directory for the dataset and given version.
Currently, we assume that data lives in `NEVIS_DATA_DIR` directory followed
by the proposed version. It means that if the version is `stable`, the dataset
will be living in `NEVIS_DATA_DIR/stable/dataset` directory.
Args:
dataset: Name of the dataset
version: Name of the version of the dataset.
Returns:
Path to the dataset location for the given version.
"""
return os.path.join(get_nevis_data_dir(), version, dataset)
|
dm_nevis-master
|
dm_nevis/datasets_storage/version_control.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset loader."""
import codecs
import functools
import os
from typing import Callable, NamedTuple, Optional
import dill
from dm_nevis.datasets_storage import download_util as du
from dm_nevis.datasets_storage import encoding
from dm_nevis.datasets_storage import handlers
from dm_nevis.datasets_storage import paths
from dm_nevis.datasets_storage import version_control as vc
from dm_nevis.datasets_storage.handlers import types
import tensorflow as tf
from tensorflow.io import gfile
MAXIMUM_DATA_FETCH_CONCURRENT_REQUESTS = 16
DEFAULT_BLOCK_LENGTH = 16
DEFAULT_SHUFFLE_BUFFER_SIZE = 5_000
DEFAULT_LRU_CACHE_SIZE = 1_000
class Dataset(NamedTuple):
builder_fn: Callable[..., tf.data.Dataset]
num_examples: int
class DatasetNotAvailableError(ValueError):
"""Raised when a dataset is not known or available."""
class DatasetNotReadyError(ValueError):
"""Raised when a dataset is known, but was not ready."""
def check_dataset_is_ready(dataset_name: str, version: str) -> None:
"""Raises an exception if the dataset is not found or not ready."""
if not handlers.is_dataset_available(dataset_name):
raise DatasetNotAvailableError(f'Dataset `{dataset_name}` not available.')
path = vc.get_dataset_path(dataset_name, version=version)
status = du.try_read_status(path)
if status != du.DatasetDownloadStatus.READY:
raise DatasetNotReadyError(
f'Dataset `{dataset_name}` not ready to be used.')
@functools.lru_cache(maxsize=DEFAULT_LRU_CACHE_SIZE)
def get_metadata_from_path(path: str) -> types.DatasetMetaData:
with gfile.GFile(os.path.join(path, paths.METADATA_FNAME), 'rb') as f:
# tf.io.gfile has a bug with pickle, thus we write it to a temporary buffer
# in base64 before decoding the pickle object.
# It's only metadata of limited size thus the overhead is acceptable.
# See https://stackoverflow.com/a/67171328
pickled_object = f.read()
return dill.loads(codecs.decode(pickled_object, 'base64'))
def get_metadata(dataset_name: str,
version: str = 'stable') -> types.DatasetMetaData:
check_dataset_is_ready(dataset_name, version=version)
path = vc.get_dataset_path(dataset_name, version=version)
return get_metadata_from_path(path)
def load_dataset(dataset_name: str,
split: str,
version: str = 'stable') -> Dataset:
"""Loads the dataset given a name and a split."""
check_dataset_is_ready(dataset_name, version=version)
path = vc.get_dataset_path(dataset_name, version=version)
return load_dataset_from_path(path, split)
def load_dataset_from_path(dataset_dir: str, split: str) -> Dataset:
"""Loads the dataset for a given split from a path."""
metadata = get_metadata_from_path(dataset_dir)
num_examples = _num_examples_from_metadata(split, metadata)
num_classes = metadata.num_classes
num_channels = metadata.num_channels
encoding.get_default_features(num_channels, num_classes)
# This is required for backwards compatibility. Later on, we will remove need
# for default features.
features = metadata.features or encoding.get_default_features(
num_channels, num_classes)
split_path = os.path.join(dataset_dir, split)
shards = gfile.glob(os.path.join(split_path, 'data-*-of-*'))
if not shards:
raise ValueError(f'No shards found for split `{split}` at {dataset_dir}')
def builder_fn(*,
shuffle: bool,
start: Optional[int] = None,
end: Optional[int] = None) -> tf.data.Dataset:
# TODO: It's possible to support this by carefully keeping track
# of the offsets and mapping them to the dataloaders, as tfds does.
# We opt not to do this for now, however.
if shuffle and (start is not None or end is not None):
raise NotImplementedError(
'Currently, offsets are not supported when shuffling')
ds = tf.data.Dataset.from_tensor_slices(shards)
if shuffle:
# Shuffling the shards helps randomize across the full dataset.
ds = ds.shuffle(buffer_size=len(shards))
ds = ds.interleave(
tf.data.TFRecordDataset,
cycle_length=MAXIMUM_DATA_FETCH_CONCURRENT_REQUESTS,
block_length=DEFAULT_BLOCK_LENGTH,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not shuffle)
if start is not None:
ds = ds.skip(start)
if end is not None:
ds = ds.take(end - start or 0)
if shuffle:
ds = ds.shuffle(DEFAULT_SHUFFLE_BUFFER_SIZE)
example_decoder = encoding.build_example_decoder(features=features)
ds = ds.map(
example_decoder,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not shuffle)
return ds
return Dataset(num_examples=num_examples, builder_fn=builder_fn)
def _num_examples_from_metadata(split: str,
metadata: types.DatasetMetaData) -> int:
points_per_split = metadata.additional_metadata['num_data_points_per_split']
try:
return points_per_split[split]
except KeyError as e:
raise ValueError(f'Split `{split}` not found in {points_per_split}') from e
|
dm_nevis-master
|
dm_nevis/datasets_storage/dataset_loader.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing functions for data extraction."""
from typing import Any, Optional
from dm_nevis.datasets_storage.handlers import types
import numpy as np
STANDARD_IMAGE_SIZE = (64, 64)
def preprocess_image_fn(image: types.Image,
preprocessing: str,
rng: Optional[Any] = None,
seed: int = 0) -> types.Image:
"""Image preprocessing function."""
if not preprocessing:
pass
elif preprocessing == 'random_crop':
image = _random_crop_image(image, rng=rng, seed=seed)
else:
raise ValueError('Preprocessing: %s not supported' % preprocessing)
return image
def preprocess_metadata_fn(
metadata: types.DatasetMetaData) -> types.DatasetMetaData:
return metadata
def _random_crop_image(image, rng=None, seed=0):
"""Randomly crops the image to standard size."""
# TODO: Consider cropping to a larger size to homgenize resizing step
# across all datasets once we move it to the learner.
width, height = image.size
assert width - STANDARD_IMAGE_SIZE[0] > 0
assert height - STANDARD_IMAGE_SIZE[1] > 0
if rng is None:
rng = np.random.default_rng(seed=seed)
left = rng.integers(0, width - STANDARD_IMAGE_SIZE[0])
top = rng.integers(0, height - STANDARD_IMAGE_SIZE[1])
# Crops to STANDARD_IMAGE_SIZE[0]xSTANDARD_IMAGE_SIZE[1]
return image.crop(
(left, top, left + STANDARD_IMAGE_SIZE[0], top + STANDARD_IMAGE_SIZE[1]))
|
dm_nevis-master
|
dm_nevis/datasets_storage/preprocessing.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.encoding."""
import collections
from typing import Iterable
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage import encoding
from dm_nevis.datasets_storage.handlers import types
import numpy as np
import PIL.Image as pil_image
import tensorflow as tf
class EncodingTest(parameterized.TestCase):
@parameterized.parameters([
{
'num_examples': 10,
'num_shards': 2
},
{
'num_examples': 7,
'num_shards': 1
},
{
'num_examples': 7,
'num_shards': 3
},
{
'num_examples': 1,
'num_shards': 5
},
{
'num_examples': 0,
'num_shards': 5
},
])
def test_threadpool_example_writer(self, num_examples, num_shards):
examples = list(_example_fixtures(n=num_examples))
features = encoding.get_default_features(num_channels=3, num_classes=10)
written_data = collections.defaultdict(list)
class MemoryRecordWriter:
def __init__(self, path: str) -> None:
self._path = path
def write(self, buffer: bytes) -> None:
written_data[self._path].append(buffer)
with encoding.ThreadpoolExampleWriter(
num_shards=num_shards,
record_writer=MemoryRecordWriter,
example_encoder=encoding.build_example_encoder(features)) as writer:
for i, example in enumerate(examples):
writer.write(i % num_shards, example)
total_written = sum(len(v) for v in written_data.values())
self.assertEqual(num_examples, total_written)
def test_encode_example(self):
example, *_ = _example_fixtures(n=1)
features = encoding.get_default_features(num_channels=3, num_classes=10)
encoder = encoding.build_example_encoder(features=features)
encoded = encoder(example)
decoder = encoding.build_example_decoder(features=features)
result = decoder(encoded.SerializeToString())
self.assertEqual(
[30, 60, 3],
result[encoding.DEFAULT_IMAGE_FEATURE_NAME].shape.as_list())
self.assertIn(result[encoding.DEFAULT_LABEL_FEATURE_NAME].numpy(),
set(range(10)))
self.assertIsInstance(result[encoding.DEFAULT_IMAGE_FEATURE_NAME],
tf.Tensor)
self.assertEqual(result[encoding.DEFAULT_IMAGE_FEATURE_NAME].dtype,
tf.uint8)
np.testing.assert_allclose(result[encoding.DEFAULT_IMAGE_FEATURE_NAME],
example.image)
def _example_fixtures(*, n: int) -> Iterable[types.Example]:
gen = np.random.default_rng(seed=0)
for _ in range(n):
img = pil_image.fromarray(
gen.integers(0, 255, size=(30, 60, 3), dtype=np.uint8))
yield types.Example(
image=img,
label=gen.integers(0, 10),
multi_label=None,
)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/encoding_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accumulates and calculates statistics."""
import dataclasses
from typing import List
from dm_nevis.datasets_storage.handlers import types
import numpy as np
_FULL_DATASET_STATS_NAME = 'full_dataset_stats'
def _incremental_mean(x: np.ndarray, prev_mu: np.ndarray, n: int) -> np.ndarray:
return 1. / n * ((n - 1) * prev_mu + x)
def _incremental_sigma_sq_with_sq_diff(x: np.ndarray, prev_mu: np.ndarray,
mu: np.ndarray, prev_sq_diff: np.ndarray,
n: int):
sq_diff = prev_sq_diff + (x - prev_mu) * (x - mu)
sigma_sq = sq_diff / n
return sq_diff, sigma_sq
def _incremental_min(x: np.ndarray, prev_min: np.ndarray, n: int) -> np.ndarray:
if n == 1:
return x
return np.minimum(x, prev_min)
def _incremental_max(x: np.ndarray, prev_max: np.ndarray, n: int) -> np.ndarray:
if n == 1:
return x
return np.maximum(x, prev_max)
@dataclasses.dataclass
class StatisticsAccumulator:
"""Data structure to accumulate all the dataset statistics."""
mean_per_channel: np.ndarray
sigma_sq_per_channel: np.ndarray
sq_diff_per_channel: np.ndarray
max_per_channel: np.ndarray
min_per_channel: np.ndarray
num_examples_per_class: np.ndarray
num_examples: int
min_label: int
max_label: int
class StatisticsCalculator(object):
"""Aggregates statistics over the whole dataset for each split."""
def __init__(self, splits: List[str], metadata: types.DatasetMetaData):
self._accumulator = dict()
self._num_classes = metadata.num_classes
self._num_channels = metadata.num_channels
for split in tuple(splits) + (_FULL_DATASET_STATS_NAME,):
self._accumulator[split] = StatisticsAccumulator(
mean_per_channel=np.zeros((self._num_channels,), dtype=float),
sigma_sq_per_channel=np.zeros((self._num_channels,), dtype=float),
sq_diff_per_channel=np.zeros((self._num_channels,), dtype=float),
max_per_channel=np.zeros((self._num_channels,), dtype=float),
min_per_channel=np.zeros((self._num_channels,), dtype=float),
num_examples_per_class=np.zeros((self._num_classes,), dtype=int),
num_examples=0,
min_label=self._num_classes + 1,
max_label=-1)
def _accumulate_for_split(self, image, label, split_accumulator):
"""Accumulates the statistics and updates `split_accumulator`."""
split_accumulator.num_examples_per_class[label] += 1
split_accumulator.num_examples += 1
split_accumulator.min_label = min(label, split_accumulator.min_label)
split_accumulator.max_label = max(label, split_accumulator.max_label)
prev_mu = split_accumulator.mean_per_channel
flatten_image = np.copy(image).reshape((-1, self._num_channels))
x_mu = np.mean(flatten_image, axis=0)
mu = _incremental_mean(x_mu, prev_mu, split_accumulator.num_examples)
split_accumulator.mean_per_channel = mu
prev_sq_diff = split_accumulator.sq_diff_per_channel
x_sigma = np.mean(flatten_image, axis=0)
sq_diff, sigma_sq = _incremental_sigma_sq_with_sq_diff(
x_sigma, prev_mu, mu, prev_sq_diff, split_accumulator.num_examples)
split_accumulator.sq_diff_per_channel = sq_diff
split_accumulator.sigma_sq_per_channel = sigma_sq
x_max = np.max(flatten_image, axis=0)
split_accumulator.max_per_channel = _incremental_max(
x_max, split_accumulator.max_per_channel,
split_accumulator.num_examples)
x_min = np.min(flatten_image, axis=0)
split_accumulator.min_per_channel = _incremental_min(
x_min, split_accumulator.min_per_channel,
split_accumulator.num_examples)
def accumulate(self, image, label, split):
"""Accumulates the statistics for the given split."""
self._accumulate_for_split(image, label, self._accumulator[split])
self._accumulate_for_split(image, label,
self._accumulator[_FULL_DATASET_STATS_NAME])
def merge_statistics(self):
"""Merges all the statistics together."""
merged_statistics = dict()
for split, split_accumulator in self._accumulator.items():
std_per_channel = np.sqrt(split_accumulator.sigma_sq_per_channel)
label_imbalance = np.max(
split_accumulator.num_examples_per_class) - np.min(
split_accumulator.num_examples_per_class)
dict_split_accumulator = dataclasses.asdict(split_accumulator)
dict_split_accumulator['std_per_channel'] = std_per_channel
dict_split_accumulator['label_imbalance'] = label_imbalance
merged_statistics[split] = dict_split_accumulator
return merged_statistics
|
dm_nevis-master
|
dm_nevis/datasets_storage/statistics.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.download_util."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage import download_util as du
class DownloadUtilTest(parameterized.TestCase):
def test_get_filename_from_url(self):
url = """http://docs.python.org:80/3/library/foo.image.jpg?highlight=params#url-parsing"""
expected = 'foo.image.jpg'
self.assertEqual(du._get_filename_from_url(url), expected)
@parameterized.parameters([
(1000, 10),
(10, 1000),
(10000, 100),
])
def test_shuffle_generator_with_buffer(self, num_elements,
shuffle_buffer_size):
def make_gen():
for i in range(num_elements):
yield i
shuffled_gen = du.shuffle_generator_with_buffer(make_gen(),
shuffle_buffer_size)
self.assertSameElements(list(shuffled_gen), list(make_gen()))
self.assertNotEqual(list(shuffled_gen), list(make_gen()))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/download_util_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for stable datasets in Nevis dataset storage.
The purpose of these tests is to guarantee that a dataset specified in
`DATASETS_TO_RUN_INTEGRATION_TESTS_FOR` is available in the `stable` version
(see README.md) and have specified properties:
* A dataset can be read as TFRecordDataset.
* A dataset contains metadata.
* Each image in the dataset has `image_shape` and `num_channels` as specified in
metadata.
* Each pixel of the image is in range [0,255].
* Labels are in [0,num_classes - 1].
* Each class has at least one data point.
"""
import collections
import os
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage import dataset_loader
from dm_nevis.datasets_storage import download_util as du
from dm_nevis.datasets_storage import version_control as vc
import tensorflow as tf
DATASETS_TO_RUN_INTEGRATION_TESTS_FOR = [
'animal',
]
@absltest.skipThisClass('Timeout for OSS')
class StableDatasetsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests on the `stable` version of Nevis datasets.
These tests go over all the specified datasets in
`DATASETS_TO_RUN_INTEGRATION_TESTS_FOR` in the `stable` dataset directory
and ensure that these datasets satisfy given (in the tests) constraints.
That will ensure that the datasets which are actually used in the benchmark
produce expected data.
"""
@parameterized.parameters(sorted(DATASETS_TO_RUN_INTEGRATION_TESTS_FOR))
def test_datasets_available(self, dataset_name):
data_dir = vc.get_nevis_stable_data_dir()
dataset_path = os.path.join(data_dir, dataset_name)
status = du.try_read_status(dataset_path)
self.assertEqual(status, du.DatasetDownloadStatus.READY)
@parameterized.parameters(sorted(DATASETS_TO_RUN_INTEGRATION_TESTS_FOR))
def test_dataset_splits_metadata_not_empty(self, dataset_name):
metadata = dataset_loader.get_metadata(dataset_name)
splits = metadata.additional_metadata['splits']
self.assertNotEmpty(splits)
@parameterized.parameters(sorted(DATASETS_TO_RUN_INTEGRATION_TESTS_FOR))
def test_datasets_each_class_has_at_least_one_element(self, dataset_name):
metadata = dataset_loader.get_metadata(dataset_name)
num_classes = metadata.num_classes
splits = metadata.additional_metadata['splits']
per_class_num_points = collections.defaultdict(int)
for split in splits:
ds = dataset_loader.load_dataset(dataset_name, split)
ds = ds.builder_fn().as_numpy_iterator()
for elem in ds:
label = elem.label
per_class_num_points[label] += 1
self.assertLen(per_class_num_points, num_classes)
@parameterized.parameters(sorted(DATASETS_TO_RUN_INTEGRATION_TESTS_FOR))
def test_datasets_elements_have_expected_shapes(self, dataset_name):
metadata = dataset_loader.get_metadata(dataset_name)
num_channels = metadata.num_channels
num_classes = metadata.num_classes
image_shape = metadata.image_shape
splits = metadata.additional_metadata['splits']
for split in splits:
ds = dataset_loader.load_dataset(dataset_name, split)
ds = ds.builder_fn().as_numpy_iterator()
for elem in ds:
label = elem.label
self.assertEqual(elem.image.shape, image_shape + (num_channels,))
self.assertAllInRange(label, 0, num_classes - 1)
# TODO: Make sure that the test fails when image is in (0,1).
self.assertAllInRange(elem.image, 0, 255)
if __name__ == '__main__':
tf.test.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/integration_tests/stable_datasets_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.