python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for evaluating shapes."""
from unittest import mock
from haiku._src import base
from haiku._src import basic
from haiku._src import stateful
import jax
import jax.numpy as jnp
def zeros_creator(next_creator, shape, dtype, init, context):
del context
init = jnp.zeros
return next_creator(shape, dtype, init)
def noop_dropout(rng, rate, x, broadcast_dims=()):
del rng, rate, broadcast_dims
return x
def fast_eval_shape(fun, *args, **kwargs):
"""Equivalent to ``eval_shape`` in JAX.
This utility is equivalent to ``eval_shape`` in JAX except that it avoids
running Haiku functions whose shapes are trivially known. This can avoid some
Python overheads in JAX which can accumulate for very large models.
Optimizations:
* All parameter/state initialisers replaced with zeros.
* ``hk.dropout`` replaced with identity.
* ``jax.random.fold_in`` replaced with identity.
Args:
fun: The function to trace.
*args: Positional arguments to ``fun``.
**kwargs: Keyword arguments to ``fun``.
Returns:
The shape produced by ``fun`` for the given args/kwargs.
"""
with base.custom_creator_unsafe(zeros_creator), \
mock.patch.object(basic, 'dropout_impl', noop_dropout), \
mock.patch.object(jax.random, 'fold_in', lambda key, data: key):
if base.inside_transform():
return stateful.eval_shape(fun, *args, **kwargs)
else:
return jax.eval_shape(fun, *args, **kwargs)
|
dm-haiku-main
|
haiku/_src/eval_shape.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.pool."""
import functools
from absl.testing import absltest
from haiku._src import pool
from haiku._src import test_utils
import jax
import jax.numpy as jnp
import numpy as np
class MaxPoolTest(absltest.TestCase):
def test_max_pool_basic(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
x = np.broadcast_to(x, (2, 10, 6, 2))
window_shape = [1, 2, 2, 1]
result = pool.max_pool(
x, window_shape=window_shape, strides=window_shape, padding="VALID")
ground_truth = np.asarray([1., 3., 5.]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_max_pool_unbatched(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
leading_dims = (2, 3)
x = np.broadcast_to(x, leading_dims + (10, 6, 2))
window_shape = [2, 2, 1]
result = pool.max_pool(
x, window_shape=window_shape, strides=window_shape, padding="VALID")
ground_truth = np.asarray([1., 3., 5.]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, leading_dims + (5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_max_pool_unbatched_vmapped(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
leading_dims = (2, 3)
x = np.broadcast_to(x, leading_dims + (10, 6, 2))
window_shape = [2, 2, 1]
max_pool_fn = functools.partial(
pool.max_pool,
window_shape=window_shape,
strides=window_shape,
padding="VALID")
result = jax.vmap(jax.vmap(max_pool_fn))(x)
ground_truth = np.asarray([1., 3., 5.]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, leading_dims + (5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_max_pool_batch_vs_vmap(self):
key = jax.random.PRNGKey(42)
batch = jax.random.normal(key, [8, 28, 28, 3])
p = functools.partial(pool.max_pool, window_shape=(4, 4, 1),
strides=(2, 2, 1), padding="VALID")
np.testing.assert_allclose(p(batch), jax.vmap(p)(batch))
def test_max_pool_overlapping_windows(self):
x = np.arange(12, dtype=jnp.float32).reshape([6, 2])
x = np.broadcast_to(x, (2, 10, 6, 2))
window_shape = [1, 5, 3, 2]
strides = [1, 1, 3, 2]
result = pool.max_pool(
x, window_shape=window_shape, strides=strides, padding="VALID")
ground_truth = np.asarray([5., 11.,]).reshape([2, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 6, 2, 1))
np.testing.assert_equal(result, ground_truth)
def test_max_pool_same_padding(self):
x = np.arange(6, dtype=jnp.float32)
x = np.broadcast_to(x, (2, 3, 6))
window_shape = [1, 3, 3]
strides = [1, 1, 1]
result = pool.max_pool(
x, window_shape=window_shape, strides=strides, padding="SAME")
np.testing.assert_equal(result.shape, x.shape)
@test_utils.transform_and_run
def test_max_pool_same_padding_class(self):
x = np.arange(6, dtype=jnp.float32)
x = np.broadcast_to(x, (2, 3, 6))
window_shape = [1, 3, 3]
strides = [1, 1, 1]
max_pool = pool.MaxPool(
window_shape=window_shape, strides=strides, padding="SAME")
result = max_pool(x)
np.testing.assert_equal(result.shape, x.shape)
def test_max_pool_basic_with_inferred_shapes(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
x = np.broadcast_to(x, (2, 10, 6, 2))
result = pool.max_pool(x, 2, 2, padding="VALID")
ground_truth = np.asarray([1., 3., 5.]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_max_pool_same_padding_with_inferred_shapes(self):
x = np.arange(6, dtype=jnp.float32)
x = np.broadcast_to(x, (2, 3, 6))
result = pool.max_pool(x, 3, 1, padding="SAME", channel_axis=None)
np.testing.assert_equal(result.shape, x.shape)
@test_utils.transform_and_run
def test_max_pool_same_padding_class_with_inferred_shapes(self):
x = np.arange(6, dtype=jnp.float32)
x = np.broadcast_to(x, (2, 3, 6))
max_pool = pool.MaxPool(3, 1, padding="SAME", channel_axis=None)
result = max_pool(x)
np.testing.assert_equal(result.shape, x.shape)
class AvgPoolTest(absltest.TestCase):
def test_avg_pool_basic(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
x = np.broadcast_to(x, (2, 10, 6, 2))
window_shape = [1, 2, 2, 1]
result = pool.avg_pool(
x, window_shape=window_shape, strides=window_shape, padding="VALID")
ground_truth = np.asarray([0.5, 2.5, 4.5]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_avg_pool_unbatched(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
leading_dims = (2, 3)
x = np.broadcast_to(x, leading_dims + (10, 6, 2))
window_shape = [2, 2, 1]
result = pool.avg_pool(
x, window_shape=window_shape, strides=window_shape, padding="VALID")
ground_truth = np.asarray([0.5, 2.5, 4.5]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, leading_dims + (5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_avg_pool_unbatched_vmapped(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
leading_dims = (2, 3)
x = np.broadcast_to(x, leading_dims + (10, 6, 2))
window_shape = [2, 2, 1]
avg_pool_fn = functools.partial(
pool.avg_pool,
window_shape=window_shape,
strides=window_shape,
padding="VALID")
result = jax.vmap(jax.vmap(avg_pool_fn))(x)
ground_truth = np.asarray([0.5, 2.5, 4.5]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, leading_dims + (5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_avg_pool_batch_vs_vmap(self):
key = jax.random.PRNGKey(42)
batch = jax.random.normal(key, [8, 28, 28, 3])
p = functools.partial(pool.avg_pool, window_shape=(4, 4, 1),
strides=(2, 2, 1), padding="VALID")
np.testing.assert_allclose(p(batch), jax.vmap(p)(batch))
def test_avg_pool_overlapping_windows(self):
x = np.arange(12, dtype=jnp.float32).reshape([6, 2])
x = np.broadcast_to(x, (2, 10, 6, 2))
window_shape = [1, 5, 3, 2]
strides = [1, 1, 3, 2]
result = pool.avg_pool(
x, window_shape=window_shape, strides=strides, padding="VALID")
ground_truth = np.asarray([
2.5,
8.5,
]).reshape([2, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 6, 2, 1))
np.testing.assert_almost_equal(result, ground_truth, decimal=5)
def test_avg_pool_same_padding(self):
x = np.ones((2, 3, 6))
window_shape = [1, 3, 3]
strides = [1, 1, 1]
result = pool.avg_pool(
x, window_shape=window_shape, strides=strides, padding="SAME")
np.testing.assert_equal(result.shape, x.shape)
# Since x is constant, its avg value should be itself.
np.testing.assert_equal(result, x)
@test_utils.transform_and_run
def test_avg_pool_same_padding_class(self):
x = np.ones((2, 3, 6))
window_shape = [1, 3, 3]
strides = [1, 1, 1]
avg_pool = pool.AvgPool(
window_shape=window_shape, strides=strides, padding="SAME")
result = avg_pool(x)
np.testing.assert_equal(result.shape, x.shape)
# Since x is constant, its avg value should be itself.
np.testing.assert_equal(result, x)
def test_avg_pool_basic_with_inferred_shapes(self):
x = np.arange(6, dtype=jnp.float32).reshape([6, 1])
x = np.broadcast_to(x, (2, 10, 6, 2))
result = pool.avg_pool(x, 2, 2, padding="VALID")
ground_truth = np.asarray([0.5, 2.5, 4.5]).reshape([3, 1])
ground_truth = np.broadcast_to(ground_truth, (2, 5, 3, 2))
np.testing.assert_equal(result, ground_truth)
def test_avg_pool_same_padding_with_inferred_shapes(self):
x = np.ones((2, 3, 6))
result = pool.avg_pool(x, 3, 1, padding="SAME", channel_axis=None)
np.testing.assert_equal(result.shape, x.shape)
# Since x is constant, its avg value should be itself.
np.testing.assert_equal(result, x)
@test_utils.transform_and_run
def test_avg_pool_same_padding_class_with_inferred_shapes(self):
x = np.ones((2, 3, 6))
result = pool.AvgPool(3, 1, padding="SAME", channel_axis=None)(x)
np.testing.assert_equal(result.shape, x.shape)
# Since x is constant, its avg value should be itself.
np.testing.assert_equal(result, x)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/pool_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Haiku modules in half precision."""
from absl.testing import absltest
from haiku._src import test_utils
from haiku._src.integration import common
from haiku._src.integration import descriptors
import jax.numpy as jnp
ModuleFn = descriptors.ModuleFn
class Bfloat16Test(common.DTypeTestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_float16(self, module_fn: descriptors.ModuleFn, shape, dtype):
self.assert_dtype(jnp.float16, module_fn, shape, dtype)
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_bfloat16(self, module_fn: descriptors.ModuleFn, shape, dtype):
self.assert_dtype(jnp.bfloat16, module_fn, shape, dtype)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/half_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku transforms."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
DEFAULT_ATOL = 1e-5
CUSTOM_ATOL = {hk.nets.ResNet: 0.05, hk.nets.MobileNetV1: 0.05,
hk.BatchNorm: 1e-4, hk.SeparableDepthwiseConv2D: 3e-3}
class HaikuTransformsTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES,
test_utils.named_bools('init'))
def test_hk_jit(self, module_fn: ModuleFn, shape, dtype, init):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x, jit=False):
mod = module_fn()
if jit:
mod = stateful.jit(mod)
return mod(x)
f = hk.transform_with_state(g)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-4)
# NOTE: We shard init/apply tests since some modules are expensive to jit
# (e.g. ResNet50 takes ~60s to compile and we compile it twice per test).
if init:
jax.tree_util.tree_map(
assert_allclose, jax.jit(f.init)(rng, x), f.init(rng, x, jit=True))
else:
params, state = f.init(rng, x)
jax.tree_util.tree_map(
assert_allclose,
jax.jit(f.apply)(params, state, rng, x),
f.apply(params, state, rng, x, jit=True))
@test_utils.combined_named_parameters(descriptors.ALL_MODULES,
test_utils.named_bools('init'))
def test_hk_scan(self, module_fn: descriptors.ModuleFn, shape, dtype, init):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def f(x):
mod = module_fn()
return mod(x)
def u_f(xs):
mod = module_fn()
def s(carry, x):
y = mod(x)
return carry, y
_, ys = hk.scan(s, (), xs)
return ys
u_f = hk.transform_with_state(u_f)
f = hk.transform_with_state(f)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-4)
xs = jnp.broadcast_to(x, (8,) + x.shape)
params, state = f.init(rng, x)
if init:
u_params, u_state = u_f.init(rng, xs)
jax.tree_util.tree_map(assert_allclose, u_params, params)
jax.tree_util.tree_map(assert_allclose, u_state, state)
return
def fun(state, x):
y, state = f.apply(params, state, rng, x)
return state, y
s_state, s_ys = jax.lax.scan(fun, state, xs)
u_ys, u_state = u_f.apply(params, state, rng, xs)
jax.tree_util.tree_map(assert_allclose, u_ys, s_ys)
jax.tree_util.tree_map(assert_allclose, u_state, s_state)
@test_utils.combined_named_parameters(
# TODO(tomhennigan) Enable once grad for _scan_transpose implemented.
set(descriptors.ALL_MODULES) - set(descriptors.RECURRENT_MODULES))
def test_hk_remat(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x, remat=False):
mod = module_fn()
if remat:
mod = hk.remat(mod)
out = mod(x)
if isinstance(out, dict):
out = out['loss']
return jnp.mean(out)
f = hk.transform_with_state(g)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-5)
grad_jax_remat = jax.grad(jax.remat(f.apply), has_aux=True)
grad_hk_remat = jax.grad(functools.partial(f.apply, remat=True),
has_aux=True)
params, state = f.init(rng, x)
jax.tree_util.tree_map(
assert_allclose, grad_jax_remat(params, state, rng, x),
grad_hk_remat(params, state, rng, x))
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_optimize_rng_use_under_jit(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(hk.experimental.optimize_rng_use(g))
module_type = descriptors.module_type(module_fn)
atol = CUSTOM_ATOL.get(module_type, DEFAULT_ATOL)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=atol)
params, state = jax.jit(f.init)(rng, x)
jax.tree_util.tree_map(assert_allclose, (params, state), f.init(rng, x))
if module_type in (hk.nets.VectorQuantizer, hk.nets.VectorQuantizerEMA):
# For stochastic modules just test apply runs.
jax.device_get(jax.jit(f.apply)(params, state, rng, x))
else:
jax.tree_util.tree_map(
assert_allclose,
jax.jit(f.apply)(params, state, rng, x),
f.apply(params, state, rng, x))
@test_utils.combined_named_parameters(descriptors.OPTIONAL_BATCH_MODULES)
def test_vmap(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
# Expand our input since we will map over it.
x = jnp.broadcast_to(x, (2,) + x.shape)
f = hk.transform_with_state(lambda x: module_fn()(x)) # pylint: disable=unnecessary-lambda
f_mapped = hk.transform_with_state(
lambda x: hk.vmap(lambda x: module_fn()(x), split_rng=False)(x)) # pylint: disable=unnecessary-lambda
params, state = f_mapped.init(rng, x)
# JAX vmap with explicitly unmapped params/state/rng. This should be
# equivalent to `f_mapped.apply(..)` (since by default hk.vmap does not map
# params/state/rng).
v_apply = jax.vmap(f.apply,
in_axes=(None, None, None, 0),
out_axes=(0, None))
module_type = descriptors.module_type(module_fn)
atol = CUSTOM_ATOL.get(module_type, DEFAULT_ATOL)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=atol)
jax.tree_util.tree_map(
assert_allclose, f_mapped.apply(params, state, rng, x),
v_apply(params, state, rng, x))
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_fast_eval_shape(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
init_jax = jax.eval_shape(f.init, rng, x)
init_hk = hk.experimental.fast_eval_shape(f.init, rng, x)
self.assertEqual(init_jax, init_hk)
apply_jax = jax.eval_shape(f.apply, *init_jax, rng, x)
apply_hk = hk.experimental.fast_eval_shape(f.apply, *init_hk, rng, x)
self.assertEqual(apply_jax, apply_hk)
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
@test_utils.transform_and_run(run_apply=False)
def test_fast_eval_shape_inside_transform(self, module_fn: ModuleFn, shape,
dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
m = module_fn()
m_slow = hk.eval_shape(m, x)
m_fast = hk.experimental.fast_eval_shape(m, x)
self.assertEqual(m_slow, m_fast)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/hk_transforms_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ensures that code samples in Haiku are accurate."""
import collections
import contextlib
import doctest
import inspect
import itertools
import types
import unittest
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import chex
import flax.linen as nn
import haiku as hk
from haiku._src import test_utils
import jax
import jax.numpy as jnp
import jmp
class DoctestTest(parameterized.TestCase):
@parameterized.named_parameters(test_utils.find_internal_python_modules(hk))
def test_doctest(self, module):
def run_test():
num_failed, num_attempted = doctest.testmod(
module,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
extraglobs={
"itertools": itertools,
"chex": chex,
"collections": collections,
"contextlib": contextlib,
"unittest": unittest,
"hk": hk,
"jnp": jnp,
"jax": jax,
"jmp": jmp,
"nn": nn,
})
tests_symbols = ", ".join(module.__test__.keys())
if num_attempted == 0:
logging.info("No doctests in %s", tests_symbols)
self.assertEqual(num_failed, 0, f"{num_failed} doctests failed")
logging.info("%s tests passed in %s", num_attempted, tests_symbols)
# `hk` et al import all dependencies from `src`, however doctest does not
# test imported deps so we must manually set `__test__` such that imported
# symbols are tested.
# See: docs.python.org/3/library/doctest.html#which-docstrings-are-examined
if not hasattr(module, "__test__") or not module.__test__:
module.__test__ = {}
# Many tests expect to be run as part of an `hk.transform`. We loop over all
# exported symbols and run them in their own `hk.transform` so parameter and
# module names don't clash.
for name in module.__all__:
test_names = []
value = getattr(module, name)
if inspect.ismodule(value):
continue
# Skip type annotations in Python 3.7.
if hasattr(value, "__origin__"):
continue
logging.info("Testing name: %r value: %r", name, value)
if inspect.isclass(value) and not isinstance(value, types.GenericAlias):
# Find unbound methods on classes, doctest doesn't seem to find them.
test_names.append(name)
module.__test__[name] = value
for attr_name in dir(value):
attr_value = getattr(value, attr_name)
if inspect.isfunction(attr_value):
test_name = name + "_" + attr_name
test_names.append(test_name)
module.__test__[test_name] = attr_value
elif (isinstance(value, str) or inspect.isfunction(value) or
inspect.ismethod(value) or inspect.isclass(value)):
test_names.append(name)
module.__test__[name] = value
elif hasattr(value, "__doc__"):
test_names.append(name)
module.__test__[name] = value.__doc__
else:
# This will probably fail, DocTestFinder.find: __test__ values must be
# strings, functions, methods, classes, or modules
test_names.append(name)
module.__test__[name] = value
init_fn, _ = hk.transform_with_state(run_test)
rng = jax.random.PRNGKey(42)
init_fn(rng)
for test_name in test_names:
del module.__test__[test_name]
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/doctest_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Haiku to dot functionality."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import numpy as np
ModuleFn = descriptors.ModuleFn
class DotTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_abstract_to_dot(self, module_fn: ModuleFn, shape, dtype):
f = hk.transform_with_state(lambda x: module_fn()(x)) # pylint: disable=unnecessary-lambda
rng = jax.random.PRNGKey(42)
x = np.ones(shape, dtype)
params, state = jax.eval_shape(f.init, rng, x)
self.assertIsNotNone(
hk.experimental.abstract_to_dot(f.apply)(params, state, rng, x))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/to_dot_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Haiku shim imports."""
import importlib
import types
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
def named_internal_modules():
for module_name in ("haiku._src", "haiku._src.nets"):
for name in dir(importlib.import_module(module_name)):
if not name.startswith("_"):
submodule_name = module_name + "." + name
yield submodule_name, importlib.import_module(submodule_name)
class ShimHkTest(parameterized.TestCase):
@parameterized.named_parameters(*named_internal_modules())
def test_hk_shim(self, module):
if not hasattr(module, "hk"):
self.skipTest(f"No `hk` in {module}")
shim_hk = module.hk
for name in dir(shim_hk):
if name.startswith("_"):
continue
shim_value = getattr(shim_hk, name)
if not hasattr(hk, name):
raise ValueError(f"`hk.{name}` is not part of the actual Haiku API")
actual_value = getattr(hk, name)
if isinstance(actual_value, types.ModuleType):
# Most shimmed submodules are instance of types.ModuleType, but some
# are nested classes, e.g. `hk.pad`.
self.assertIsInstance(shim_value, (type, types.ModuleType))
else:
self.assertEqual(actual_value, shim_value)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/shim_hk_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Haiku modules running with leak checking."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
ModuleFn = descriptors.ModuleFn
def get_module_cls(module_fn: ModuleFn) -> type[hk.Module]:
get_cls = lambda: type(descriptors.unwrap(module_fn()))
return hk.testing.transform_and_run(get_cls)()
class LeakCheckerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
jax.config.update('jax_check_tracer_leaks', True)
def tearDown(self):
super().tearDown()
jax.config.update('jax_check_tracer_leaks', False)
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_run(self, module_fn: ModuleFn, shape, dtype):
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
def run():
rng = jax.random.PRNGKey(42)
x = jnp.zeros(shape, dtype)
params, state = f.init(rng, x)
return f.apply(params, state, rng, x)
jax.eval_shape(run)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/check_tracer_leaks_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Running haiku._src.integration.common.DTypeTestCase with float64.
This is put into a separate file since it requires enabling float64 right after
loading jax.
"""
from absl.testing import absltest
from haiku._src import test_utils
from haiku._src.integration import common
from haiku._src.integration import descriptors
from jax.config import config
import jax.numpy as jnp
ModuleFn = descriptors.ModuleFn
class Float64Test(common.DTypeTestCase):
def setUp(self):
super().setUp()
config.update("jax_enable_x64", True)
def tearDown(self):
super().tearDown()
config.update("jax_enable_x64", False)
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_float32(self, module_fn: ModuleFn, shape, dtype):
self.assertTrue(config.read("jax_enable_x64"))
self.assert_dtype(jnp.float32, module_fn, shape, dtype)
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_float64(self, module_fn: ModuleFn, shape, dtype):
self.assertTrue(config.read("jax_enable_x64"))
self.assert_dtype(jnp.float64, module_fn, shape, dtype)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/float64_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests whether modules produce similar output given np.ndarray inputs."""
import functools
import os
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
def tree_assert_allclose(a, b, *, atol=1e-6):
jax.tree_util.tree_map(
functools.partial(np.testing.assert_allclose, atol=atol), a, b)
def get_module_cls(module_fn: ModuleFn) -> type[hk.Module]:
get_cls = lambda: type(descriptors.unwrap(module_fn()))
return hk.testing.transform_and_run(get_cls)()
class NumpyInputsTest(parameterized.TestCase):
@test_utils.combined_named_parameters(
descriptors.ALL_MODULES,
test_utils.named_bools('np_inputs'),
test_utils.named_bools('np_params'),
test_utils.named_bools('close_over_params'))
def test_numpy_and_jax_results_close(
self,
module_fn: ModuleFn,
shape: tuple[int, ...],
dtype: jnp.dtype,
np_params: bool,
np_inputs: bool,
close_over_params: bool,
):
if not (np_params or np_inputs):
self.skipTest('Pure JAX variants tested elsewhere')
# TODO(b/257921991): Fix the timeouts here.
if (close_over_params and 'UNITTEST_ON_FORGE' in os.environ):
module_cls = get_module_cls(module_fn)
if module_cls in (hk.nets.ResNet, hk.Conv2DTranspose, hk.LayerNorm,
hk.Conv2DLSTM, hk.IdentityCore):
self.skipTest('Close over tests for these modules take >5 minutes')
f = hk.transform_with_state(lambda x: module_fn()(x)) # pylint: disable=unnecessary-lambda
rng = jax.random.PRNGKey(42)
x = jnp.ones(shape, dtype)
params, state = f.init(rng, x)
if close_over_params:
apply_fn = functools.partial(f.apply, params, state)
out, new_state = jax.jit(apply_fn)(rng, x)
else:
out, new_state = jax.jit(f.apply)(params, state, rng, x)
if np_inputs:
rng, x = jax.device_get((rng, x))
with self.subTest('init'):
params2, state2 = f.init(rng, x)
tree_assert_allclose(params, params2)
tree_assert_allclose(state, state2)
with self.subTest('apply'):
if np_params:
params, state = jax.device_get((params, state))
if close_over_params:
apply_fn = functools.partial(f.apply, params, state)
out2, new_state2 = jax.jit(apply_fn)(rng, x)
else:
out2, new_state2 = jax.jit(f.apply)(params, state, rng, x)
tree_assert_allclose(out, out2)
tree_assert_allclose(new_state, new_state2)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/numpy_inputs_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
haiku/_src/integration/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Haiku checkpointing."""
import json
import os
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
from haiku._src.integration import checkpoint_utils
from haiku._src.integration import descriptors
ModuleFn = descriptors.ModuleFn
HOW_TO_REGENERATE = """
You can regenerate checkpoints using the checkpoint_generate utility in this
folder. Set the --base_path flag to the checkpoint folder.
"""
class CheckpointTest(parameterized.TestCase):
@test_utils.combined_named_parameters(
descriptors.with_name(descriptors.ALL_MODULES))
def test_checkpoint_format(self, name, module_fn: ModuleFn, shape, dtype):
descriptor = descriptors.ModuleDescriptor(name, module_fn, shape, dtype)
cls = descriptors.module_type(descriptor.create)
expected = checkpoint_utils.summarize(descriptor)
file_path = os.path.join(
"haiku/_src/integration/checkpoints/",
descriptors.to_file_name(descriptor) + ".json")
if not os.path.exists(file_path):
expected_json = json.dumps(expected, indent=2)
raise ValueError(f"Missing checkpoint file: {file_path}\n\n"
f"Expected:\n\n{expected_json}")
with open(file_path) as fp:
actual = json.load(fp)
self.assertEqual(expected, actual, msg=HOW_TO_REGENERATE)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/checkpoint_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.conformance.descriptors."""
from absl.testing import absltest
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
ALL_MODULES = descriptors.ALL_MODULES
IGNORED_MODULES = descriptors.IGNORED_MODULES
class DescriptorsTest(absltest.TestCase):
@test_utils.transform_and_run
def test_coverage(self):
all_modules = frozenset(test_utils.find_subclasses(hk, hk.Module))
tested_modules = {type(descriptors.unwrap(d.create())) for d in ALL_MODULES}
self.assertEmpty(all_modules - (tested_modules | IGNORED_MODULES))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/descriptors_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Haiku summarise functionality."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax.numpy as jnp
ModuleFn = descriptors.ModuleFn
class SummariseTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_eval_summary(self, module_fn: ModuleFn, shape, dtype):
f = lambda x: module_fn()(x) # pylint: disable=unnecessary-lambda
x = jnp.ones(shape, dtype)
self.assertIsNotNone(hk.experimental.eval_summary(f)(x))
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_tabulate(self, module_fn: ModuleFn, shape, dtype):
f = lambda x: module_fn()(x) # pylint: disable=unnecessary-lambda
x = jnp.ones(shape, dtype)
self.assertIsNotNone(hk.experimental.tabulate(f)(x))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/summarise_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.conformance.descriptors."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
DEFAULT_ATOL = 1e-5
CUSTOM_ATOL = {hk.nets.ResNet: 0.05, hk.nets.MobileNetV1: 0.05,
hk.nets.VectorQuantizer: 0.05, hk.nets.VectorQuantizerEMA: 0.05,
hk.BatchNorm: 1e-4, hk.SeparableDepthwiseConv2D: 3e-3}
class JaxTransformsTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_jit(self, module_fn: ModuleFn, shape, dtype):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
atol = CUSTOM_ATOL.get(descriptors.module_type(module_fn), DEFAULT_ATOL)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=atol)
# Ensure initialization under jit is the same.
jax.tree_util.tree_map(
assert_allclose, f.init(rng, x), jax.jit(f.init)(rng, x))
# Ensure application under jit is the same.
params, state = f.init(rng, x)
jax.tree_util.tree_map(
assert_allclose,
f.apply(params, state, rng, x),
jax.jit(f.apply)(params, state, rng, x),
)
@test_utils.combined_named_parameters(descriptors.OPTIONAL_BATCH_MODULES)
def test_vmap(self, module_fn: ModuleFn, shape, dtype):
batch_size, shape = shape[0], shape[1:]
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
sample = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
sample = jax.random.uniform(rng, shape, dtype)
batch = jnp.broadcast_to(sample, (batch_size,) + sample.shape)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
# Ensure application under vmap is the same.
params, state = f.init(rng, sample)
v_apply = jax.vmap(f.apply, in_axes=(None, None, None, 0))
jax.tree_util.tree_map(
lambda a, b: np.testing.assert_allclose(a, b, atol=DEFAULT_ATOL),
f.apply(params, state, rng, batch), v_apply(params, state, rng, batch))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/jax_transforms_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module descriptors programatically describe how to use modules."""
from collections.abc import Sequence
from typing import Any, Callable, NamedTuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = Callable[[], Callable[[jax.Array], jax.Array]]
class Wrapped(hk.Module):
def __init__(self, wrapped):
super().__init__()
self.wrapped = wrapped
class Training(Wrapped):
def __call__(self, x: jax.Array):
return self.wrapped(x, is_training=True)
class MultiInput(Wrapped):
def __init__(self, wrapped, num_inputs):
super().__init__(wrapped)
self.num_inputs = num_inputs
def __call__(self, x: jax.Array):
inputs = [x for _ in range(self.num_inputs)]
return self.wrapped(*inputs)
class Recurrent(Wrapped):
"""Unrolls a recurrent module."""
def __init__(self, module: hk.RNNCore, unroller=None):
super().__init__(module)
self.unroller = unroller
def __call__(self, x: jax.Array):
initial_state = jax.tree_util.tree_map(
lambda v: v.astype(x.dtype),
self.wrapped.initial_state(batch_size=x.shape[0]))
x = jnp.expand_dims(x, axis=0)
return self.unroller(self.wrapped, x, initial_state)
def unwrap(module):
while isinstance(module, Wrapped):
module = module.wrapped
return module
class ModuleDescriptor(NamedTuple):
name: Any
create: ModuleFn
shape: Sequence[int]
dtype: Any = jnp.float32
BATCH_SIZE = 8
# pylint: disable=unnecessary-lambda
# Modules that have equivalent behaviour with or without a batch dimension.
OPTIONAL_BATCH_MODULES = (
ModuleDescriptor(
name="Embed",
create=lambda: hk.Embed(vocab_size=6, embed_dim=12),
shape=(BATCH_SIZE,),
dtype=jnp.int32),
ModuleDescriptor(
name="Linear",
create=lambda: hk.Linear(10),
shape=(BATCH_SIZE, 1)),
ModuleDescriptor(
name="Sequential",
create=lambda: hk.Sequential([]),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="nets.MLP",
create=lambda: hk.nets.MLP([3, 4, 5]),
shape=(BATCH_SIZE, 3)),
ModuleDescriptor(
name="ConvND",
create=lambda: hk.ConvND(1, 3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="ConvNDTranspose",
create=lambda: hk.ConvNDTranspose(1, 3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv1D",
create=lambda: hk.Conv1D(3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv1DTranspose",
create=lambda: hk.Conv1DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv2D",
create=lambda: hk.Conv2D(3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv2DTranspose",
create=lambda: hk.Conv2DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv3D",
create=lambda: hk.Conv3D(3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="Conv3DTranspose",
create=lambda: hk.Conv3DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
)
# Modules that require input to have a batch dimension.
BATCH_MODULES = (
ModuleDescriptor(
name="BatchNorm",
create=lambda: Training(hk.BatchNorm(True, True, 0.9)),
shape=(BATCH_SIZE, 2, 2, 3)),
ModuleDescriptor(
name="Bias",
create=lambda: hk.Bias(),
shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="Flatten",
create=lambda: hk.Flatten(),
shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="InstanceNorm",
create=lambda: hk.InstanceNorm(True, True),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="GroupNorm",
create=lambda: hk.GroupNorm(5),
shape=(BATCH_SIZE, 4, 4, 10)),
ModuleDescriptor(
name="LayerNorm",
create=lambda: hk.LayerNorm(1, True, True, param_axis=-1),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="MultiHeadAttention",
create=lambda: MultiInput( # pylint: disable=g-long-lambda
hk.MultiHeadAttention(num_heads=8, key_size=64, w_init_scale=1.0),
num_inputs=3),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="RMSNorm",
create=lambda: hk.RMSNorm(1),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="SpectralNorm",
create=lambda: hk.SpectralNorm(),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="nets.ResNet",
create=lambda: Training(hk.nets.ResNet((3, 4, 6, 3), 1000)),
shape=(BATCH_SIZE, 3, 3, 2)),
# pylint: disable=g-long-lambda
ModuleDescriptor(
name="nets.MobileNetV1",
create=lambda: Training(hk.nets.MobileNetV1(num_classes=1000,
strides=(1, 1, 1),
channels=(16, 32, 64))),
shape=(BATCH_SIZE, 64, 64, 2)),
# pylint: enable=g-long-lambda
ModuleDescriptor(
name="nets.VectorQuantizer",
create=lambda: Training(hk.nets.VectorQuantizer(64, 512, 0.25)),
shape=(BATCH_SIZE, 64)),
ModuleDescriptor(
name="nets.VectorQuantizerEMA",
create=lambda: Training(hk.nets.VectorQuantizerEMA(64, 512, 0.25, 0.9)),
shape=(BATCH_SIZE, 64)),
# TODO(tomhennigan) Make these modules support unbatched input.
ModuleDescriptor(
name="DepthwiseConv1D",
create=lambda: hk.DepthwiseConv1D(1, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="DepthwiseConv2D",
create=lambda: hk.DepthwiseConv2D(1, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="DepthwiseConv3D",
create=lambda: hk.DepthwiseConv3D(1, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="SeparableDepthwiseConv2D",
create=lambda: hk.SeparableDepthwiseConv2D(1, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
)
class DummyCore(hk.RNNCore):
def initial_state(self, batch_size):
if batch_size is not None:
return jnp.ones([batch_size, 128, 1])
else:
return jnp.ones([128, 1])
def __call__(self, inputs, state):
return inputs, state
class ResetCoreAdapter(Wrapped, hk.RNNCore):
def initial_state(self, batch_size):
return self.wrapped.initial_state(batch_size)
def __call__(self, inputs, state):
batch_size = inputs.shape[0]
resets = np.broadcast_to(True, (batch_size,))
return self.wrapped((inputs, resets), state)
# RNN cores. For shape, use the shape of a single example.
RNN_CORES = (
ModuleDescriptor(
name="ResetCore",
create=lambda: ResetCoreAdapter(hk.ResetCore(DummyCore())),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="GRU",
create=lambda: hk.GRU(1),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="IdentityCore",
create=lambda: hk.IdentityCore(),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="LSTM",
create=lambda: hk.LSTM(1),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="Conv1DLSTM",
create=lambda: hk.Conv1DLSTM([2], 3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv2DLSTM",
create=lambda: hk.Conv2DLSTM([2, 2], 3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv3DLSTM",
create=lambda: hk.Conv3DLSTM([2, 2, 2], 3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="VanillaRNN",
create=lambda: hk.VanillaRNN(8),
shape=(BATCH_SIZE, 128)),
)
def recurrent_factory(
create_core: Callable[[], hk.RNNCore],
unroller,
) -> Callable[[], Recurrent]:
return lambda: Recurrent(create_core(), unroller)
def unroll_descriptors(descriptors, unroller):
"""Returns `Recurrent` wrapped descriptors with the given unroller applied."""
out = []
for name, create, shape, dtype in descriptors:
name = f"Recurrent({name}, {unroller.__name__})"
out.append(
ModuleDescriptor(name=name,
create=recurrent_factory(create, unroller),
shape=shape,
dtype=dtype))
return tuple(out)
def module_type(module_fn: ModuleFn) -> type[hk.Module]:
f = hk.transform_with_state(lambda: type(unwrap(module_fn())))
return f.apply(*f.init(jax.random.PRNGKey(42)), None)[0]
def with_name(descriptors: Sequence[ModuleDescriptor]):
return [[n, n, c, s, d] for n, c, s, d in descriptors]
def to_file_name(descriptor: ModuleDescriptor):
n = descriptor.name
return n.replace(" ", "-").replace("(", "-").replace(")", "").replace(",", "")
# Modules that require time then batch input.
RECURRENT_MODULES = (
unroll_descriptors(RNN_CORES, hk.dynamic_unroll) +
unroll_descriptors(RNN_CORES, hk.static_unroll))
STATEFUL_MODULES = (
ModuleDescriptor(
name="nets.VectorQuantizerEMA",
create=lambda: Training(hk.nets.VectorQuantizerEMA(64, 512, 0.25, 0.9)),
shape=(BATCH_SIZE, 64)),
ModuleDescriptor(
name="SpectralNorm",
create=lambda: hk.SpectralNorm(),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="BatchNorm",
create=lambda: Training(hk.BatchNorm(True, True, 0.9)),
shape=(BATCH_SIZE, 2, 2, 3)),
)
ALL_MODULES = OPTIONAL_BATCH_MODULES + BATCH_MODULES + RECURRENT_MODULES
# Modules that do not use get_parameter
NO_PARAM_MODULES = (
ModuleDescriptor(
name="Flatten",
create=lambda: hk.Flatten(),
shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="SpectralNorm",
create=lambda: hk.SpectralNorm(),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="Sequential",
create=lambda: hk.Sequential([]),
shape=(BATCH_SIZE, 2, 2)),
)
# Modules that do not use "hk.next_rng_key"
NO_NEXT_RNG_KEY_MODULES = (
ModuleDescriptor(
name="Sequential",
create=lambda: hk.Sequential([]),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="RMSNorm",
create=lambda: hk.RMSNorm(1),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="InstanceNorm",
create=lambda: hk.InstanceNorm(True, True),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="GroupNorm",
create=lambda: hk.GroupNorm(5),
shape=(BATCH_SIZE, 4, 4, 10)),
ModuleDescriptor(
name="LayerNorm",
create=lambda: hk.LayerNorm(1, True, True, param_axis=-1),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="BatchNorm",
create=lambda: Training(hk.BatchNorm(True, True, 0.9)),
shape=(BATCH_SIZE, 2, 2, 3)),
ModuleDescriptor(
name="Bias",
create=lambda: hk.Bias(),
shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="Flatten",
create=lambda: hk.Flatten(),
shape=(BATCH_SIZE, 3, 3, 3)),
) + RECURRENT_MODULES
NO_NEXT_RNG_KEY_NAMES = [d.name for d in NO_NEXT_RNG_KEY_MODULES]
NEXT_RNG_KEY_MODULES = [
d for d in ALL_MODULES if d.name not in NO_NEXT_RNG_KEY_NAMES
]
NOT_ONLY_PARAMS_MODULES = set(NO_PARAM_MODULES) | set(STATEFUL_MODULES) | set(
RECURRENT_MODULES)
NOT_ONLY_PARAMS_NAMES = [d.name for d in NOT_ONLY_PARAMS_MODULES]
ONLY_PARAMS_MODULES = [
d for d in ALL_MODULES if d.name not in NOT_ONLY_PARAMS_NAMES
]
IGNORED_MODULES = {
# Stateless or abstract.
hk.BatchApply,
hk.Module,
hk.Reshape,
hk.AvgPool,
hk.MaxPool,
hk.lift,
# Non-standard.
hk.EMAParamsTree,
hk.SNParamsTree,
# Metrics.
hk.ExponentialMovingAverage,
# Recurrent.
hk.DeepRNN,
hk.RNNCore,
# Tested transitively.
hk.nets.ResNet18,
hk.nets.ResNet34,
hk.nets.ResNet50,
hk.nets.ResNet101,
hk.nets.ResNet152,
hk.nets.ResNet200,
}
|
dm-haiku-main
|
haiku/_src/integration/descriptors.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Haiku typing."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
ModuleFn = descriptors.ModuleFn
class TypingTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
@test_utils.transform_and_run
def test_protocols(self, module_fn: ModuleFn, shape, dtype):
del shape, dtype
module = descriptors.unwrap(module_fn())
self.assertIsInstance(module, hk.ModuleProtocol)
# NOTE: All current Haiku builtin modules are callable.
self.assertIsInstance(module, hk.SupportsCall)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/typing_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
from absl.testing import parameterized
import haiku as hk
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
import tree
ModuleFn = descriptors.ModuleFn
class DTypeTestCase(parameterized.TestCase):
"""Common base class for dtype tests."""
def assert_dtype(self, test_dtype, module_fn: ModuleFn, shape, input_dtype):
"""Checks that modules accepting float32 input_dtype output test_dtype."""
if input_dtype != jnp.float32:
self.skipTest('Skipping module with non-f32 input')
def ones_creator(next_creator, shape, dtype, init, context):
if context.full_name == 'vector_quantizer/embeddings':
# NOTE: vector_quantizer/embeddings is created using a ctor argument
# so dtype is not expected to follow input to __call__.
dtype = test_dtype
else:
self.assertEqual(dtype, test_dtype, msg=context.full_name)
# NOTE: We need to do this since some initializers (e.g. random.uniform)
# do not support <32bit dtypes. This also makes the test run a bit faster.
init = jnp.ones
return next_creator(shape, dtype, init)
def g(x):
with hk.custom_creator(ones_creator):
mod = module_fn()
return mod(x)
g = hk.transform_with_state(g)
# No custom creator for state so we need to do this manually.
def cast_if_floating(x):
if jnp.issubdtype(x.dtype, jnp.floating):
x = x.astype(test_dtype)
return x
def init_fn(rng, x):
params, state = g.init(rng, x)
state = jax.tree_util.tree_map(cast_if_floating, state)
return params, state
x = np.ones(shape, test_dtype)
rng = jax.random.PRNGKey(42)
params, state = jax.eval_shape(init_fn, rng, x)
for _ in range(2):
y, state = jax.eval_shape(g.apply, params, state, rng, x)
def assert_dtype(path, v):
if jnp.issubdtype(v.dtype, jnp.floating):
self.assertEqual(v.dtype, test_dtype, msg=path)
tree.map_structure_with_path(assert_dtype, y)
tree.map_structure_with_path(assert_dtype, state)
|
dm-haiku-main
|
haiku/_src/integration/common.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to ensure all modules work with jax_numpy_rank_promotion == 'raise'."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
class RankPromotionTest(parameterized.TestCase):
def setUp(self):
super().setUp()
jax.config.update('jax_numpy_rank_promotion', 'raise')
def tearDown(self):
super().tearDown()
jax.config.update('jax_numpy_rank_promotion', 'warn')
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_strict_promotion(self, module_fn: ModuleFn, shape, dtype):
f = hk.transform_with_state(lambda x: module_fn()(x)) # pylint: disable=unnecessary-lambda
rng = jax.random.PRNGKey(42)
x = np.ones(shape, dtype)
params, state = jax.eval_shape(f.init, rng, x)
self.assertIsNotNone(jax.eval_shape(f.apply, params, state, rng, x))
def test_rank_promotion_exception(self):
with self.assertRaises(ValueError) as cm:
_ = jnp.zeros((8, 1)) * jnp.zeros((8,))
self.assertIn('could not be broadcast together', str(cm.exception))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/rank_promotion_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for checkpoints."""
from collections.abc import Mapping
from typing import Any
import haiku as hk
from haiku._src import utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
def format_tensor(tensor: jax.Array) -> str:
shape = list(tensor.shape)
dtype = utils.simple_dtype(tensor.dtype)
return f"{dtype}{shape}"
def module_name(d: descriptors.ModuleDescriptor):
name = hk.testing.transform_and_run(
lambda: str(descriptors.unwrap(d.create())))()
return name.split("\n")
def summarize(d: descriptors.ModuleDescriptor) -> Mapping[str, Any]:
"""Generates a summary of the given descriptor."""
f = hk.transform_with_state(lambda x: d.create()(x)) # pylint: disable=unnecessary-lambda
x = jnp.ones(d.shape, d.dtype)
rng = jax.random.PRNGKey(42)
params, state = map(hk.data_structures.to_mutable_dict,
jax.eval_shape(f.init, rng, x))
out = {"module": module_name(d), "input": format_tensor(x)}
if params:
out["param_size"] = int(hk.data_structures.tree_size(params))
out["param_bytes"] = int(hk.data_structures.tree_bytes(params))
out["params"] = jax.tree_util.tree_map(format_tensor, params)
if state:
out["state_size"] = int(hk.data_structures.tree_size(state))
out["state_bytes"] = int(hk.data_structures.tree_bytes(state))
out["state"] = jax.tree_util.tree_map(format_tensor, state)
return out
|
dm-haiku-main
|
haiku/_src/integration/checkpoint_utils.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.conformance.descriptors."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
from jax.experimental import jax2tf
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
ModuleFn = descriptors.ModuleFn
DEFAULT_ATOL = 1e-2
CUSTOM_ATOL = {hk.nets.ResNet: 0.1}
# TODO(tomhennigan): Test with experimental_compile=True.
TF_TRANSFORM = (("identity", lambda f: f), ("tf.function", tf.function))
JAX_TRANSFORM = (("identity", lambda f: f), ("jax.jit", jax.jit))
class JaxToTfTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES,
test_utils.named_bools("init"),
TF_TRANSFORM, JAX_TRANSFORM)
def test_convert(
self,
module_fn: ModuleFn,
shape,
dtype,
init: bool,
tf_transform,
jax_transform,
):
rng = jax.random.PRNGKey(42)
if jnp.issubdtype(dtype, jnp.integer):
x = jax.random.randint(rng, shape, 0, np.prod(shape), dtype)
else:
x = jax.random.uniform(rng, shape, dtype)
def g(x):
return module_fn()(x)
f = hk.transform_with_state(g)
atol = CUSTOM_ATOL.get(descriptors.module_type(module_fn), DEFAULT_ATOL)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=atol)
get = lambda t: jax.tree_util.tree_map(lambda x: x.numpy(), t)
if init:
init_jax = jax_transform(f.init)
init_tf = tf_transform(jax2tf.convert(f.init))
jax.tree_util.tree_map(
assert_allclose, init_jax(rng, x), get(init_tf(rng, x)))
else:
params, state = f.init(rng, x)
apply_jax = jax_transform(f.apply)
apply_tf = tf_transform(jax2tf.convert(f.apply))
jax.tree_util.tree_map(
assert_allclose,
apply_jax(params, state, rng, x),
get(apply_tf(params, state, rng, x)),
)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/jax2tf_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to ensure all modules work with jaxpr_info."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
ModuleFn = descriptors.ModuleFn
jaxpr_info = hk.experimental.jaxpr_info
class JaxprInfoTest(parameterized.TestCase):
@test_utils.combined_named_parameters(descriptors.ALL_MODULES)
def test_info_and_html(self, module_fn: ModuleFn, shape, dtype):
x = jnp.ones(shape, dtype)
f = hk.transform_with_state(lambda: module_fn()(x))
rng = jax.random.PRNGKey(42)
params, state = f.init(rng)
info = jaxpr_info.make_model_info(f.apply)(params, state, rng)
if descriptors.module_type(module_fn).__name__ != 'Sequential':
self.assertNotEmpty(info.expressions)
self.assertIsNotNone(jaxpr_info.as_html_page(info))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/integration/jaxpr_info_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates checkpoint json files."""
import json
import os
from absl import app
from absl import flags
from haiku._src.integration import checkpoint_utils
from haiku._src.integration import descriptors
flags.DEFINE_string("base_dir", None, help="Base directory.")
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
for descriptor in descriptors.ALL_MODULES:
cls = descriptors.module_type(descriptor.create)
file_name = descriptors.to_file_name(descriptor) + ".json"
summary = checkpoint_utils.summarize(descriptor)
with open(os.path.join(FLAGS.base_dir, file_name), "w") as fp:
fp.write(json.dumps(summary, indent=2))
fp.write("\n")
if __name__ == "__main__":
flags.mark_flag_as_required("base_dir")
app.run(main)
|
dm-haiku-main
|
haiku/_src/integration/checkpoint_generate.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.nets.vqvae."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
from haiku._src.nets import vqvae
import jax
import jax.numpy as jnp
import numpy as np
class VqvaeTest(parameterized.TestCase):
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testConstruct(self, constructor, kwargs):
vqvae_module = constructor(**kwargs)
# Batch of input vectors to quantize
inputs_np = np.random.randn(100, kwargs['embedding_dim']).astype(np.float32)
inputs = jnp.array(inputs_np)
# Set is_training to False, otherwise for the EMA case just evaluating the
# forward pass will change the embeddings, meaning that some of our computed
# closest embeddings will be incorrect.
vq_output = vqvae_module(inputs, is_training=False)
# Output shape is correct
self.assertEqual(vq_output['quantize'].shape, inputs.shape)
vq_output_np = jax.tree_util.tree_map(lambda t: t, vq_output)
embeddings_np = vqvae_module.embeddings
self.assertEqual(embeddings_np.shape,
(kwargs['embedding_dim'], kwargs['num_embeddings']))
# Check that each input was assigned to the embedding it is closest to.
distances = (jnp.square(inputs_np).sum(axis=1, keepdims=True) -
2 * np.dot(inputs_np, embeddings_np) +
jnp.square(embeddings_np).sum(axis=0, keepdims=True))
closest_index = np.argmax(-distances, axis=1)
# On TPU, distances can be different by ~1% due to precision. This can cause
# the distanc to the closest embedding to flip, leading to a difference
# in the encoding indices tensor. First we check that the continuous
# distances are reasonably close, and then we only allow N differences in
# the encodings. For batch of 100, N == 3 seems okay (passed 1000x tests).
np.testing.assert_allclose(distances, vq_output_np['distances'], atol=5e-2)
num_differences_in_encodings = (closest_index !=
vq_output_np['encoding_indices']).sum()
num_differences_allowed = 3
self.assertLessEqual(num_differences_in_encodings, num_differences_allowed)
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testShapeChecking(self, constructor, kwargs):
vqvae_module = constructor(**kwargs)
wrong_shape_input = np.random.randn(100, kwargs['embedding_dim'] * 2)
with self.assertRaisesRegex(TypeError, 'total size must be unchanged'):
vqvae_module(
jnp.array(wrong_shape_input.astype(np.float32)), is_training=False)
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testNoneBatch(self, constructor, kwargs):
"""Check that vqvae can be built on input with a None batch dimension."""
vqvae_module = constructor(**kwargs)
inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])
vqvae_module(inputs, is_training=False)
@parameterized.parameters({'use_jit': True, 'dtype': jnp.float32},
{'use_jit': True, 'dtype': jnp.float64},
{'use_jit': False, 'dtype': jnp.float32},
{'use_jit': False, 'dtype': jnp.float64})
@test_utils.transform_and_run
def testEmaUpdating(self, use_jit, dtype):
if jax.local_devices()[0].platform == 'tpu' and dtype == jnp.float64:
self.skipTest('F64 not supported by TPU')
embedding_dim = 6
np_dtype = np.float64 if dtype is jnp.float64 else np.float32
decay = np.array(0.1, dtype=np_dtype)
vqvae_module = vqvae.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=7,
commitment_cost=0.5,
decay=decay,
dtype=dtype)
if use_jit:
vqvae_f = stateful.jit(vqvae_module, static_argnums=1)
else:
vqvae_f = vqvae_module
batch_size = 16
prev_embeddings = vqvae_module.embeddings
# Embeddings should change with every forwards pass if is_training == True.
for _ in range(10):
inputs = np.random.rand(batch_size, embedding_dim).astype(dtype)
vqvae_f(inputs, True)
current_embeddings = vqvae_module.embeddings
self.assertFalse((prev_embeddings == current_embeddings).all())
prev_embeddings = current_embeddings
# Forward passes with is_training == False don't change anything
for _ in range(10):
inputs = np.random.rand(batch_size, embedding_dim).astype(dtype)
vqvae_f(inputs, False)
current_embeddings = vqvae_module.embeddings
self.assertTrue((current_embeddings == prev_embeddings).all())
def testEmaCrossReplica(self):
embedding_dim = 6
batch_size = 16
inputs = np.random.rand(jax.local_device_count(), batch_size, embedding_dim)
embeddings = {}
perplexities = {}
for axis_name in [None, 'i']:
def my_function(x, axis_name):
decay = np.array(0.9, dtype=np.float32)
vqvae_module = vqvae.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=7,
commitment_cost=0.5,
decay=decay,
cross_replica_axis=axis_name,
dtype=jnp.float32)
outputs = vqvae_module(x, is_training=True)
return vqvae_module.embeddings, outputs['perplexity']
vqvae_f = transform.transform_with_state(
functools.partial(my_function, axis_name=axis_name))
rng = jax.random.PRNGKey(42)
rng = jnp.broadcast_to(rng, (jax.local_device_count(), *rng.shape))
params, state = jax.pmap(
vqvae_f.init, axis_name='i')(rng, inputs)
update_fn = jax.pmap(vqvae_f.apply, axis_name='i')
for _ in range(10):
outputs, state = update_fn(params, state, None, inputs)
embeddings[axis_name], perplexities[axis_name] = outputs
# In the single-device case, specifying a cross_replica_axis should have
# no effect. Otherwise, it should!
if jax.device_count() == 1:
# Have to use assert_allclose here rather than checking exact matches to
# make the test pass on GPU, presumably because of nondeterministic
# reductions.
np.testing.assert_allclose(
embeddings[None], embeddings['i'], rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(
perplexities[None], perplexities['i'], rtol=1e-6, atol=1e-6)
else:
self.assertFalse((embeddings[None] == embeddings['i']).all())
self.assertFalse((perplexities[None] == perplexities['i']).all())
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/nets/vqvae_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.nets.mobilenetv1."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
from haiku._src.nets import mobilenetv1
import jax.numpy as jnp
class MobileNetV1Test(parameterized.TestCase):
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_simple(self, use_bn):
image = jnp.ones([2, 224, 224, 3])
model = mobilenetv1.MobileNetV1(
(1, 2, 2, 2, 2),
(16, 32, 64, 128, 256),
100,
use_bn=use_bn
)
logits = model(image, is_training=True)
self.assertIsNotNone(logits)
self.assertEqual(logits.shape, (2, 100))
@test_utils.transform_and_run
def test_error_incorrect_args_stride_list(self):
stride_list = (1, 2, 2, 2, 1, 2)
channel_list = (64, 128, 128, 256, 256, 512, 512, 512, 512,
512, 512, 1024, 1024)
with self.assertRaisesRegex(
ValueError, "`strides` and `channels` must have the same length."):
mobilenetv1.MobileNetV1(stride_list,
channel_list,
1000,
True)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/nets/mobilenetv1_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
haiku/_src/nets/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet V1, from https://arxiv.org/abs/1704.04861.
Achieves ~71% top-1 performance on ImageNet.
Depending on the input size, may want to adjust strides from their default
configuration.
With a 32x32 input, last block output should be (N, 1, 1, 1024), before
average pooling.
With 224x224 input, will be (N, 7, 7, 1024).
The average pooling is currently done via a mean, and returns (N, 1, 1, 1024).
If something different is desired, replace with AvgPool.
"""
from collections.abc import Sequence
from typing import Optional
from haiku._src import basic
from haiku._src import batch_norm
from haiku._src import conv
from haiku._src import depthwise_conv
from haiku._src import module
from haiku._src import reshape
import jax
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
BatchNorm = batch_norm.BatchNorm
Conv2D = conv.Conv2D
DepthwiseConv2D = depthwise_conv.DepthwiseConv2D
Flatten = reshape.Flatten
Linear = basic.Linear
# pylint: enable=invalid-name
del basic, batch_norm, conv, depthwise_conv, module, reshape
class MobileNetV1Block(hk.Module):
"""Block for MobileNetV1."""
def __init__(
self,
channels: int,
stride: int,
use_bn: bool = True,
name: Optional[str] = None,
):
super().__init__(name=name)
self.channels = channels
self.stride = stride
self.use_bn = use_bn
self.with_bias = not use_bn
def __call__(self, inputs: jax.Array, is_training: bool) -> jax.Array:
depthwise = hk.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=self.stride,
padding=((1, 1), (1, 1)),
with_bias=self.with_bias,
name="depthwise_conv")
pointwise = hk.Conv2D(
output_channels=self.channels,
kernel_shape=(1, 1),
stride=1,
padding="VALID",
with_bias=self.with_bias,
name="pointwise_conv")
out = depthwise(inputs)
if self.use_bn:
bn1 = hk.BatchNorm(create_scale=True, create_offset=True,
decay_rate=0.999)
out = bn1(out, is_training)
out = jax.nn.relu(out)
out = pointwise(out)
if self.use_bn:
bn2 = hk.BatchNorm(create_scale=True, create_offset=True,
decay_rate=0.999)
out = bn2(out, is_training)
out = jax.nn.relu(out)
return out
class MobileNetV1(hk.Module):
"""MobileNetV1 model."""
# TODO(jordanhoffmann) add width multiplier
def __init__(
self,
strides: Sequence[int] = (1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1),
channels: Sequence[int] = (64, 128, 128, 256, 256, 512, 512,
512, 512, 512, 512, 1024, 1024),
num_classes: int = 1000,
use_bn: bool = True,
name: Optional[str] = None,
):
"""Constructs a MobileNetV1 model.
Args:
strides: The stride to use the in depthwise convolution in each mobilenet
block.
channels: Number of output channels from the pointwise convolution to use
in each block.
num_classes: Number of classes.
use_bn: Whether or not to use batch normalization. Defaults to True. When
true, biases are not used. When false, biases are used.
name: Name of the module.
"""
super().__init__(name=name)
if len(strides) != len(channels):
raise ValueError("`strides` and `channels` must have the same length.")
self.strides = strides
self.channels = channels
self.use_bn = use_bn
self.with_bias = not use_bn
self.num_classes = num_classes
def __call__(self, inputs: jax.Array, is_training: bool) -> jax.Array:
initial_conv = hk.Conv2D(
output_channels=32,
kernel_shape=(3, 3),
stride=2,
padding="VALID",
with_bias=self.with_bias)
out = initial_conv(inputs)
if self.use_bn:
bn = hk.BatchNorm(create_scale=True, create_offset=True, decay_rate=0.999)
out = bn(out, is_training)
out = jax.nn.relu(out)
for i in range(len(self.strides)):
block = MobileNetV1Block(self.channels[i],
self.strides[i],
self.use_bn)
out = block(out, is_training)
out = jnp.mean(out, axis=(1, 2))
out = hk.Flatten()(out)
out = hk.Linear(self.num_classes, name="logits")(out)
return out
|
dm-haiku-main
|
haiku/_src/nets/mobilenetv1.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A minimal interface mlp module."""
from collections.abc import Iterable
from typing import Callable, Optional
from haiku._src import base
from haiku._src import basic
from haiku._src import initializers
from haiku._src import module
import jax
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
initializers = initializers
get_parameter = base.get_parameter
PRNGSequence = base.PRNGSequence
Linear = basic.Linear
dropout = basic.dropout
# pylint: enable=invalid-name
del base, basic, module
class MLP(hk.Module):
"""A multi-layer perceptron module."""
def __init__(
self,
output_sizes: Iterable[int],
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
with_bias: bool = True,
activation: Callable[[jax.Array], jax.Array] = jax.nn.relu,
activate_final: bool = False,
name: Optional[str] = None,
):
"""Constructs an MLP.
Args:
output_sizes: Sequence of layer sizes.
w_init: Initializer for :class:`~haiku.Linear` weights.
b_init: Initializer for :class:`~haiku.Linear` bias. Must be ``None`` if
``with_bias=False``.
with_bias: Whether or not to apply a bias in each layer.
activation: Activation function to apply between :class:`~haiku.Linear`
layers. Defaults to ReLU.
activate_final: Whether or not to activate the final layer of the MLP.
name: Optional name for this module.
Raises:
ValueError: If ``with_bias`` is ``False`` and ``b_init`` is not ``None``.
"""
if not with_bias and b_init is not None:
raise ValueError("When with_bias=False b_init must not be set.")
super().__init__(name=name)
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init
self.activation = activation
self.activate_final = activate_final
layers = []
output_sizes = tuple(output_sizes)
for index, output_size in enumerate(output_sizes):
layers.append(hk.Linear(output_size=output_size,
w_init=w_init,
b_init=b_init,
with_bias=with_bias,
name="linear_%d" % index))
self.layers = tuple(layers)
self.output_size = output_sizes[-1] if output_sizes else None
def __call__(
self,
inputs: jax.Array,
dropout_rate: Optional[float] = None,
rng=None,
) -> jax.Array:
"""Connects the module to some inputs.
Args:
inputs: A Tensor of shape ``[batch_size, input_size]``.
dropout_rate: Optional dropout rate.
rng: Optional RNG key. Require when using dropout.
Returns:
The output of the model of size ``[batch_size, output_size]``.
"""
if dropout_rate is not None and rng is None:
raise ValueError("When using dropout an rng key must be passed.")
elif dropout_rate is None and rng is not None:
raise ValueError("RNG should only be passed when using dropout.")
rng = hk.PRNGSequence(rng) if rng is not None else None
num_layers = len(self.layers)
out = inputs
for i, layer in enumerate(self.layers):
out = layer(out)
if i < (num_layers - 1) or self.activate_final:
# Only perform dropout if we are activating the output.
if dropout_rate is not None:
out = hk.dropout(next(rng), dropout_rate, out)
out = self.activation(out)
return out
def reverse(
self,
activate_final: Optional[bool] = None,
name: Optional[str] = None,
) -> "MLP":
"""Returns a new MLP which is the layer-wise reverse of this MLP.
NOTE: Since computing the reverse of an MLP requires knowing the input size
of each linear layer this method will fail if the module has not been called
at least once.
The contract of reverse is that the reversed module will accept the output
of the parent module as input and produce an output which is the input size
of the parent.
>>> mlp = hk.nets.MLP([1, 2, 3])
>>> mlp_in = jnp.ones([1, 2])
>>> y = mlp(mlp_in)
>>> rev = mlp.reverse()
>>> rev_mlp_out = rev(y)
>>> mlp_in.shape == rev_mlp_out.shape
True
Args:
activate_final: Whether the final layer of the MLP should be activated.
name: Optional name for the new module. The default name will be the name
of the current module prefixed with ``"reversed_"``.
Returns:
An MLP instance which is the reverse of the current instance. Note these
instances do not share weights and, apart from being symmetric to each
other, are not coupled in any way.
"""
if activate_final is None:
activate_final = self.activate_final
if name is None:
name = self.name + "_reversed"
output_sizes = tuple(
layer.input_size
for layer in reversed(self.layers)
if layer.input_size is not None
)
if len(output_sizes) != len(self.layers):
raise ValueError("You cannot reverse an MLP until it has been called.")
return MLP(
output_sizes=output_sizes,
w_init=self.w_init,
b_init=self.b_init,
with_bias=self.with_bias,
activation=self.activation,
activate_final=activate_final,
name=name,
)
|
dm-haiku-main
|
haiku/_src/nets/mlp.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet."""
from collections.abc import Mapping, Sequence
from typing import Any, Optional, Union
from haiku._src import basic
from haiku._src import batch_norm
from haiku._src import conv
from haiku._src import module
from haiku._src import pool
import jax
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
BatchNorm = batch_norm.BatchNorm
Conv2D = conv.Conv2D
Linear = basic.Linear
max_pool = pool.max_pool
# pylint: enable=invalid-name
del basic, batch_norm, conv, module, pool
FloatStrOrBool = Union[str, float, bool]
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
bn_config.setdefault("decay_rate", 0.999)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
self.proj_batchnorm = hk.BatchNorm(name="shortcut_batchnorm", **bn_config)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
bn_2 = hk.BatchNorm(name="batchnorm_2", scale_init=jnp.zeros, **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection:
shortcut = self.proj_conv(shortcut)
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, (conv_i, bn_i) in enumerate(self.layers):
out = conv_i(out)
out = bn_i(out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply relu on last layer
out = jax.nn.relu(out)
return jax.nn.relu(out + shortcut)
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
# NOTE: Some implementations of ResNet50 v2 suggest initializing
# gamma/scale here to zeros.
bn_2 = hk.BatchNorm(name="batchnorm_2", **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, (conv_i, bn_i) in enumerate(self.layers):
x = bn_i(x, is_training, test_local_stats)
x = jax.nn.relu(x)
if i == 0 and self.use_projection:
shortcut = self.proj_conv(x)
x = conv_i(x)
return x + shortcut
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
bn_config: Mapping[str, FloatStrOrBool],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
bottleneck=bottleneck,
bn_config=bn_config,
name="block_%d" % (i)))
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ResNet(hk.Module):
"""ResNet model."""
CONFIGS = {
18: {
"blocks_per_group": (2, 2, 2, 2),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
34: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
50: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
101: {
"blocks_per_group": (3, 4, 23, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
152: {
"blocks_per_group": (3, 8, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
200: {
"blocks_per_group": (3, 24, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
}
BlockGroup = BlockGroup # pylint: disable=invalid-name
BlockV1 = BlockV1 # pylint: disable=invalid-name
BlockV2 = BlockV2 # pylint: disable=invalid-name
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault("decay_rate", 0.9)
bn_config.setdefault("eps", 1e-5)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
logits_config = dict(logits_config or {})
logits_config.setdefault("w_init", jnp.zeros)
logits_config.setdefault("name", "logits")
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
check_length(4, strides, "strides")
initial_conv_config = dict(initial_conv_config or {})
initial_conv_config.setdefault("output_channels", 64)
initial_conv_config.setdefault("kernel_shape", 7)
initial_conv_config.setdefault("stride", 2)
initial_conv_config.setdefault("with_bias", False)
initial_conv_config.setdefault("padding", "SAME")
initial_conv_config.setdefault("name", "initial_conv")
self.initial_conv = hk.Conv2D(**initial_conv_config)
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm",
**bn_config)
self.block_groups = []
for i, stride in enumerate(strides):
self.block_groups.append(
BlockGroup(channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=stride,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name="block_group_%d" % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config)
self.logits = hk.Linear(num_classes, **logits_config)
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=(1, 2))
return self.logits(out)
class ResNet18(ResNet):
"""ResNet18."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[18])
class ResNet34(ResNet):
"""ResNet34."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[34])
class ResNet50(ResNet):
"""ResNet50."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[50])
class ResNet101(ResNet):
"""ResNet101."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[101])
class ResNet152(ResNet):
"""ResNet152."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[152])
class ResNet200(ResNet):
"""ResNet200."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[200])
|
dm-haiku-main
|
haiku/_src/nets/resnet.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku implementation of VQ-VAE https://arxiv.org/abs/1711.00937."""
from typing import Any, Optional
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import moving_averages
import jax
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
get_state = base.get_state
set_state = base.set_state
initializers = initializers
ExponentialMovingAverage = moving_averages.ExponentialMovingAverage
Module = module.Module
# pylint: enable=invalid-name
del base, initializers, module, moving_averages
class VectorQuantizer(hk.Module):
"""Haiku module representing the VQ-VAE layer.
Implements the algorithm presented in
"Neural Discrete Representation Learning" by van den Oord et al.
https://arxiv.org/abs/1711.00937
Input any tensor to be quantized. Last dimension will be used as space in
which to quantize. All other dimensions will be flattened and will be seen
as different examples to quantize.
The output tensor will have the same shape as the input.
For example a tensor with shape ``[16, 32, 32, 64]`` will be reshaped into
``[16384, 64]`` and all ``16384`` vectors (each of ``64`` dimensions) will be
quantized independently.
Attributes:
embedding_dim: integer representing the dimensionality of the tensors in the
quantized space. Inputs to the modules must be in this format as well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms (see
equation 4 in the paper - this variable is Beta).
"""
def __init__(
self,
embedding_dim: int,
num_embeddings: int,
commitment_cost: float,
dtype: Any = jnp.float32,
name: Optional[str] = None,
cross_replica_axis: Optional[str] = None,
):
"""Initializes a VQ-VAE module.
Args:
embedding_dim: dimensionality of the tensors in the quantized space.
Inputs to the modules must be in this format as well.
num_embeddings: number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms
(see equation 4 in the paper - this variable is Beta).
dtype: dtype for the embeddings variable, defaults to ``float32``.
name: name of the module.
cross_replica_axis: If not ``None``, it should be a string representing
the axis name over which this module is being run within a
:func:`jax.pmap`. Supplying this argument means that perplexity is
calculated across all replicas on that axis.
"""
super().__init__(name=name)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.commitment_cost = commitment_cost
self.cross_replica_axis = cross_replica_axis
self._embedding_shape = [embedding_dim, num_embeddings]
self._embedding_dtype = dtype
@property
def embeddings(self):
initializer = hk.initializers.VarianceScaling(distribution="uniform")
return hk.get_parameter(
"embeddings",
self._embedding_shape,
self._embedding_dtype,
init=initializer)
def __call__(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to ``embedding_dim``. All
other leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data.
Returns:
dict: Dictionary containing the following keys and values:
* ``quantize``: Tensor containing the quantized version of the input.
* ``loss``: Tensor containing the loss to optimize.
* ``perplexity``: Tensor containing the perplexity of the encodings.
* ``encodings``: Tensor containing the discrete encodings, ie which
element of the quantized space each input element was mapped to.
* ``encoding_indices``: Tensor containing the discrete encoding indices,
ie which element of the quantized space each input element was mapped
to.
"""
flat_inputs = jnp.reshape(inputs, [-1, self.embedding_dim])
distances = (
jnp.sum(jnp.square(flat_inputs), 1, keepdims=True) -
2 * jnp.matmul(flat_inputs, self.embeddings) +
jnp.sum(jnp.square(self.embeddings), 0, keepdims=True))
encoding_indices = jnp.argmax(-distances, 1)
encodings = jax.nn.one_hot(encoding_indices,
self.num_embeddings,
dtype=distances.dtype)
# NB: if your code crashes with a reshape error on the line below about a
# Tensor containing the wrong number of values, then the most likely cause
# is that the input passed in does not have a final dimension equal to
# self.embedding_dim. Ideally we would catch this with an Assert but that
# creates various other problems related to device placement / TPUs.
encoding_indices = jnp.reshape(encoding_indices, inputs.shape[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = jnp.mean(
jnp.square(jax.lax.stop_gradient(quantized) - inputs))
q_latent_loss = jnp.mean(
jnp.square(quantized - jax.lax.stop_gradient(inputs)))
loss = q_latent_loss + self.commitment_cost * e_latent_loss
# Straight Through Estimator
quantized = inputs + jax.lax.stop_gradient(quantized - inputs)
avg_probs = jnp.mean(encodings, 0)
if self.cross_replica_axis:
avg_probs = jax.lax.pmean(avg_probs, axis_name=self.cross_replica_axis)
perplexity = jnp.exp(-jnp.sum(avg_probs * jnp.log(avg_probs + 1e-10)))
return {
"quantize": quantized,
"loss": loss,
"perplexity": perplexity,
"encodings": encodings,
"encoding_indices": encoding_indices,
"distances": distances,
}
def quantize(self, encoding_indices):
"""Returns embedding tensor for a batch of indices."""
w = self.embeddings.swapaxes(1, 0)
w = jax.device_put(w) # Required when embeddings is a NumPy array.
return w[(encoding_indices,)]
class VectorQuantizerEMA(hk.Module):
r"""Haiku module representing the VQ-VAE layer.
Implements a slightly modified version of the algorithm presented in
"Neural Discrete Representation Learning" by van den Oord et al.
https://arxiv.org/abs/1711.00937
The difference between :class:`VectorQuantizerEMA` and
:class:`VectorQuantizer` is that this module uses
:class:`~haiku.ExponentialMovingAverage`\ s to update the embedding vectors
instead of an auxiliary loss. This has the advantage that the embedding
updates are independent of the choice of optimizer (SGD, RMSProp, Adam, K-Fac,
...) used for the encoder, decoder and other parts of the architecture. For
most experiments the EMA version trains faster than the non-EMA version.
Input any tensor to be quantized. Last dimension will be used as space in
which to quantize. All other dimensions will be flattened and will be seen
as different examples to quantize.
The output tensor will have the same shape as the input.
For example a tensor with shape ``[16, 32, 32, 64]`` will be reshaped into
``[16384, 64]`` and all ``16384`` vectors (each of 64 dimensions) will be
quantized independently.
Attributes:
embedding_dim: integer representing the dimensionality of the tensors in
the quantized space. Inputs to the modules must be in this format as well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms
(see equation 4 in the paper).
decay: float, decay for the moving averages.
epsilon: small float constant to avoid numerical instability.
"""
def __init__(
self,
embedding_dim,
num_embeddings,
commitment_cost,
decay,
epsilon: float = 1e-5,
dtype: Any = jnp.float32,
cross_replica_axis: Optional[str] = None,
name: Optional[str] = None,
):
"""Initializes a VQ-VAE EMA module.
Args:
embedding_dim: integer representing the dimensionality of the tensors in
the quantized space. Inputs to the modules must be in this format as
well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms
(see equation 4 in the paper - this variable is Beta).
decay: float between 0 and 1, controls the speed of the Exponential Moving
Averages.
epsilon: small constant to aid numerical stability, default ``1e-5``.
dtype: dtype for the embeddings variable, defaults to ``float32``.
cross_replica_axis: If not ``None``, it should be a string representing
the axis name over which this module is being run within a
:func:`jax.pmap`. Supplying this argument means that cluster statistics
and the perplexity are calculated across all replicas on that axis.
name: name of the module.
"""
super().__init__(name=name)
if not 0 <= decay <= 1:
raise ValueError("decay must be in range [0, 1]")
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.decay = decay
self.commitment_cost = commitment_cost
self.epsilon = epsilon
self.cross_replica_axis = cross_replica_axis
self._embedding_shape = [embedding_dim, num_embeddings]
self._dtype = dtype
self._ema_cluster_size = hk.ExponentialMovingAverage(
decay=self.decay, name="ema_cluster_size")
self._ema_dw = hk.ExponentialMovingAverage(decay=self.decay, name="ema_dw")
@property
def embeddings(self):
initializer = hk.initializers.VarianceScaling(distribution="uniform")
return hk.get_state(
"embeddings", self._embedding_shape, self._dtype, init=initializer)
@property
def ema_cluster_size(self):
self._ema_cluster_size.initialize([self.num_embeddings], self._dtype)
return self._ema_cluster_size
@property
def ema_dw(self):
self._ema_dw.initialize(self._embedding_shape, self._dtype)
return self._ema_dw
def __call__(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to ``embedding_dim``. All
other leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to ``False``, the internal moving average statistics will
not be updated.
Returns:
dict: Dictionary containing the following keys and values:
* ``quantize``: Tensor containing the quantized version of the input.
* ``loss``: Tensor containing the loss to optimize.
* ``perplexity``: Tensor containing the perplexity of the encodings.
* ``encodings``: Tensor containing the discrete encodings, ie which
element of the quantized space each input element was mapped to.
* ``encoding_indices``: Tensor containing the discrete encoding indices,
ie which element of the quantized space each input element was mapped
to.
"""
flat_inputs = jnp.reshape(inputs, [-1, self.embedding_dim])
embeddings = self.embeddings
distances = (
jnp.sum(jnp.square(flat_inputs), 1, keepdims=True) -
2 * jnp.matmul(flat_inputs, embeddings) +
jnp.sum(jnp.square(embeddings), 0, keepdims=True))
encoding_indices = jnp.argmax(-distances, 1)
encodings = jax.nn.one_hot(encoding_indices,
self.num_embeddings,
dtype=distances.dtype)
# NB: if your code crashes with a reshape error on the line below about a
# Tensor containing the wrong number of values, then the most likely cause
# is that the input passed in does not have a final dimension equal to
# self.embedding_dim. Ideally we would catch this with an Assert but that
# creates various other problems related to device placement / TPUs.
encoding_indices = jnp.reshape(encoding_indices, inputs.shape[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = jnp.mean(
jnp.square(jax.lax.stop_gradient(quantized) - inputs))
if is_training:
cluster_size = jnp.sum(encodings, axis=0)
if self.cross_replica_axis:
cluster_size = jax.lax.psum(
cluster_size, axis_name=self.cross_replica_axis)
updated_ema_cluster_size = self.ema_cluster_size(cluster_size)
dw = jnp.matmul(flat_inputs.T, encodings)
if self.cross_replica_axis:
dw = jax.lax.psum(dw, axis_name=self.cross_replica_axis)
updated_ema_dw = self.ema_dw(dw)
n = jnp.sum(updated_ema_cluster_size)
updated_ema_cluster_size = ((updated_ema_cluster_size + self.epsilon) /
(n + self.num_embeddings * self.epsilon) * n)
normalised_updated_ema_w = (
updated_ema_dw / jnp.reshape(updated_ema_cluster_size, [1, -1]))
hk.set_state("embeddings", normalised_updated_ema_w)
loss = self.commitment_cost * e_latent_loss
else:
loss = self.commitment_cost * e_latent_loss
# Straight Through Estimator
quantized = inputs + jax.lax.stop_gradient(quantized - inputs)
avg_probs = jnp.mean(encodings, 0)
if self.cross_replica_axis:
avg_probs = jax.lax.pmean(avg_probs, axis_name=self.cross_replica_axis)
perplexity = jnp.exp(-jnp.sum(avg_probs * jnp.log(avg_probs + 1e-10)))
return {
"quantize": quantized,
"loss": loss,
"perplexity": perplexity,
"encodings": encodings,
"encoding_indices": encoding_indices,
"distances": distances,
}
def quantize(self, encoding_indices):
"""Returns embedding tensor for a batch of indices."""
w = self.embeddings.swapaxes(1, 0)
w = jax.device_put(w) # Required when embeddings is a NumPy array.
return w[(encoding_indices,)]
|
dm-haiku-main
|
haiku/_src/nets/vqvae.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.nets.mlp."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
from haiku._src.nets import mlp
import jax
import jax.numpy as jnp
class MLPTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_b_init_when_with_bias_false(self):
with self.assertRaisesRegex(ValueError, "b_init must not be set"):
mlp.MLP([1], with_bias=False, b_init=lambda *_: _)
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_layers(self, num_layers):
mod = mlp.MLP([1] * num_layers)
self.assertLen(mod.layers, num_layers)
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_applies_activation(self, num_layers):
activation = CountingActivation()
mod = mlp.MLP([1] * num_layers, activation=activation)
mod(jnp.ones([1, 1]))
self.assertEqual(activation.count, num_layers - 1)
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_activate_final(self, num_layers):
activation = CountingActivation()
mod = mlp.MLP([1] * num_layers, activate_final=True, activation=activation)
mod(jnp.ones([1, 1]))
self.assertEqual(activation.count, num_layers)
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_adds_index_to_layer_names(self, num_layers):
mod = mlp.MLP([1] * num_layers)
for index, linear in enumerate(mod.layers):
self.assertEqual(linear.name, "linear_%d" % index)
@parameterized.parameters(False, True)
@test_utils.transform_and_run
def test_passes_with_bias_to_layers(self, with_bias):
mod = mlp.MLP([1, 1, 1], with_bias=with_bias)
for linear in mod.layers:
self.assertEqual(linear.with_bias, with_bias)
@test_utils.transform_and_run(run_apply=False)
def test_repeat_initializer(self):
w_init = CountingInitializer()
b_init = CountingInitializer()
mod = mlp.MLP([1, 1, 1], w_init=w_init, b_init=b_init)
mod(jnp.ones([1, 1]))
self.assertEqual(w_init.count, 3)
self.assertEqual(b_init.count, 3)
@test_utils.transform_and_run
def test_default_name(self):
mod = mlp.MLP([1])
self.assertEqual(mod.name, "mlp")
@test_utils.transform_and_run
def test_custom_name(self):
mod = mlp.MLP([1], name="foobar")
self.assertEqual(mod.name, "foobar")
@test_utils.transform_and_run
def test_reverse_default_name(self):
mod = reversed_mlp()
self.assertEqual(mod.name, "mlp_reversed")
@test_utils.transform_and_run
def test_reverse_custom_name(self):
mod = reversed_mlp(name="foobar")
self.assertEqual(mod.name, "foobar_reversed")
@test_utils.transform_and_run
def test_reverse_override_name(self):
mod = mlp.MLP([2, 3, 4])
mod(jnp.ones([1, 1]))
rev = mod.reverse(name="foobar")
self.assertEqual(rev.name, "foobar")
@test_utils.transform_and_run
def test_reverse(self):
mod = reversed_mlp()
self.assertEqual([l.output_size for l in mod.layers], [3, 2, 1])
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_reverse_passed_with_bias(self, with_bias):
mod = reversed_mlp(with_bias=with_bias)
for linear in mod.layers:
self.assertEqual(linear.with_bias, with_bias)
@test_utils.transform_and_run
def test_reverse_w_init(self):
w_init = CountingInitializer()
mod = reversed_mlp(w_init=w_init)
for linear in mod.layers:
self.assertIs(linear.w_init, w_init)
@test_utils.transform_and_run
def test_reverse_b_init(self):
b_init = CountingInitializer()
mod = reversed_mlp(b_init=b_init)
for linear in mod.layers:
self.assertIs(linear.b_init, b_init)
@test_utils.transform_and_run
def test_reverse_activation(self):
activation = CountingActivation()
mod = reversed_mlp(activation=activation)
activation.count = 0
mod(jnp.ones([1, 1]))
self.assertEqual(activation.count, 2)
@test_utils.transform_and_run
def test_dropout_requires_key(self):
mod = mlp.MLP([1, 1])
with self.assertRaisesRegex(ValueError, "rng key must be passed"):
mod(jnp.ones([1, 1]), dropout_rate=0.5)
@test_utils.transform_and_run
def test_no_dropout_rejects_rng(self):
mod = mlp.MLP([1, 1])
with self.assertRaisesRegex(ValueError, "only.*when using dropout"):
mod(jnp.ones([1, 1]), rng=jax.random.PRNGKey(42))
@parameterized.parameters(False, True)
@test_utils.transform_and_run
def test_reverse_activate_final(self, activate_final):
activation = CountingActivation()
mod = reversed_mlp(activation=activation, activate_final=activate_final)
activation.count = 0
mod(jnp.ones([1, 1]))
self.assertEqual(activation.count, 3 if activate_final else 2)
@parameterized.parameters(False, True)
@test_utils.transform_and_run
def test_applies_activation_with_dropout(self, use_dropout):
if use_dropout:
dropout_rate = 0.5
rng = jax.random.PRNGKey(42)
else:
dropout_rate = rng = None
activation = CountingActivation()
mod = mlp.MLP([1, 1, 1], activation=activation)
mod(jnp.ones([1, 1]), dropout_rate, rng)
self.assertEqual(activation.count, 2)
@test_utils.transform_and_run
def test_repr(self):
mod = mlp.MLP([1, 2, 3])
for index, linear in enumerate(mod.layers):
self.assertEqual(
repr(linear),
f"Linear(output_size={index + 1}, name='linear_{index}')",
)
@parameterized.parameters(([30, 15],), ([8],), ([],))
@test_utils.transform_and_run
def test_output_size(self, output_sizes):
mod = mlp.MLP(output_sizes)
expected_output_size = output_sizes[-1] if output_sizes else None
self.assertEqual(mod.output_size, expected_output_size)
def reversed_mlp(**kwargs):
mod = mlp.MLP([2, 3, 4], **kwargs)
mod(jnp.ones([1, 1]))
return mod.reverse()
class CountingActivation:
def __init__(self):
self.count = 0
def __call__(self, x):
self.count += 1
return x
class CountingInitializer:
def __init__(self):
self.count = 0
def __call__(self, shape, dtype=jnp.float32):
self.count += 1
return jnp.ones(shape, dtype=dtype)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/nets/mlp_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.nets.resnet."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src import transform
from haiku._src.nets import resnet
import jax
import jax.numpy as jnp
import numpy as np
_RESNETS = [f"ResNet{i}" for i in (18, 34, 50, 101, 152, 200)]
_RESNET_NUM_PARAMS = [int(i * 1e6)
for i in (11.7, 21.8, 25.6, 44.5, 60.2, 64.7)]
_RESNET_HAS_PROJECTION = [False, False, True, True, True, True]
class ResnetTest(parameterized.TestCase):
@test_utils.combined_named_parameters(test_utils.named_bools("resnet_v2"),
test_utils.named_bools("bottleneck"))
@test_utils.transform_and_run
def test_simple(self, resnet_v2, bottleneck):
image = jnp.ones([2, 64, 64, 3])
model = resnet.ResNet([1, 1, 1, 1], 10,
resnet_v2=resnet_v2,
bottleneck=bottleneck)
for is_training in (True, False):
logits = model(image, is_training=is_training)
self.assertEqual(logits.shape, (2, 10))
@test_utils.combined_named_parameters(test_utils.named_bools("resnet_v2"),
test_utils.named_bools("bottleneck"))
def test_local_stats(self, resnet_v2, bottleneck):
def forward_fn(image):
model = resnet.ResNet([1, 1, 1, 1], 10,
resnet_v2=resnet_v2,
bottleneck=bottleneck)
return model(image, is_training=False, test_local_stats=True)
forward = transform.transform(forward_fn)
rng = jax.random.PRNGKey(42)
image = jnp.ones([2, 64, 64, 3])
params = forward.init(rng, image)
logits = forward.apply(params, None, image)
self.assertEqual(logits.shape, (2, 10))
@parameterized.parameters(3, 5)
@test_utils.transform_and_run
def test_error_incorrect_args_block_list(self, list_length):
block_list = [i for i in range(list_length)]
with self.assertRaisesRegex(
ValueError, "blocks_per_group` must be of length 4 not {}".format(
list_length)):
resnet.ResNet(block_list, 10, {"decay_rate": 0.9, "eps": 1e-5})
@parameterized.parameters(3, 5)
@test_utils.transform_and_run
def test_error_incorrect_args_channel_list(self, list_length):
channel_list = [i for i in range(list_length)]
with self.assertRaisesRegex(
ValueError,
"channels_per_group` must be of length 4 not {}".format(
list_length)):
resnet.ResNet([1, 1, 1, 1], 10, {"decay_rate": 0.9, "eps": 1e-5},
channels_per_group=channel_list)
@test_utils.combined_named_parameters(
[(i, (getattr(resnet, i), n))
for i, n in zip(_RESNETS, _RESNET_NUM_PARAMS)],
test_utils.named_bools("resnet_v2"),
)
def test_num_params(self, resnet_class_and_num_params, resnet_v2):
resnet_class, expected_num_params = resnet_class_and_num_params
def model_func(img):
model = resnet_class(1000, resnet_v2=resnet_v2)
return model(img, is_training=True)
model = hk.transform_with_state(model_func)
image = jnp.ones([2, 64, 64, 3])
rng = jax.random.PRNGKey(0)
params, _ = model.init(rng, image)
num_params = sum(
np.prod(p.shape).item() for p in jax.tree_util.tree_leaves(params))
self.assertGreater(num_params, int(0.998 * expected_num_params))
self.assertLess(num_params, int(1.002 * expected_num_params))
@test_utils.combined_named_parameters(
[(i, (getattr(resnet, i), p))
for i, p in zip(_RESNETS, _RESNET_HAS_PROJECTION)],
test_utils.named_bools("resnet_v2"),
)
@test_utils.transform_and_run
def test_has_projection(self, resnet_class_and_has_projection, resnet_v2):
resnet_class, has_projection = resnet_class_and_has_projection
model = resnet_class(1000, resnet_v2=resnet_v2)
for i, block_group in enumerate(model.block_groups):
if i == 0:
self.assertEqual(hasattr(block_group.blocks[0], "proj_conv"),
has_projection)
else:
self.assertTrue(hasattr(block_group.blocks[0], "proj_conv"))
for block in block_group.blocks[1:]:
self.assertFalse(hasattr(block, "proj_conv"))
@test_utils.combined_named_parameters(
[(i, getattr(resnet, i)) for i in _RESNETS],
test_utils.named_bools("resnet_v2"),
)
def test_logits_config(self, resnet_class, resnet_v2):
def model_func_logits_config_default(img):
model = resnet_class(1000, resnet_v2=resnet_v2)
return model(img, is_training=True)
def model_func_logits_config_modified(img):
model = resnet_class(1000, resnet_v2=resnet_v2,
logits_config=dict(w_init=jnp.ones))
return model(img, is_training=True)
image = jnp.ones([2, 64, 64, 3])
rng = jax.random.PRNGKey(0)
model = hk.transform_with_state(model_func_logits_config_default)
params, _ = model.init(rng, image)
logits_keys = [k for k in params.keys() if "/logits" in k]
self.assertLen(logits_keys, 1)
# Check logits params are zeros
w_logits = params[logits_keys[0]]["w"]
np.testing.assert_allclose(jnp.zeros_like(w_logits), w_logits)
model = hk.transform_with_state(model_func_logits_config_modified)
params, _ = model.init(rng, image)
# Check logits params are ones
w_logits = params[logits_keys[0]]["w"]
np.testing.assert_allclose(jnp.ones_like(w_logits), w_logits)
@test_utils.combined_named_parameters(
[(i, getattr(resnet, i)) for i in _RESNETS],
)
@test_utils.transform_and_run
def test_initial_conv_config(self, resnet_cls):
config = dict(name="custom_name", output_channels=32, kernel_shape=(3, 3),
stride=(1, 1), padding="VALID", with_bias=True)
net = resnet_cls(1000, initial_conv_config=config)
for key, value in config.items():
self.assertEqual(getattr(net.initial_conv, key), value)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/nets/resnet_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.flax.utils."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src.flax import utils
class UtilsTest(parameterized.TestCase):
def test_flatten_flax_to_haiku(self):
variables = {'params': {'mod1': {'mod2': {'w': 0}}, '~': {'w': 1}}}
hk_params = utils.flatten_flax_to_haiku(variables['params'])
self.assertEqual(hk_params, {'mod1/mod2': {'w': 0}, '~': {'w': 1}})
def test_flatten_flax_to_haiku_toplevel(self):
variables = {'params': {'w': 0}}
hk_params = utils.flatten_flax_to_haiku(variables['params'])
self.assertEqual(hk_params, {'~': {'w': 0}})
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/flax/utils_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.flax.transform_flax."""
from absl.testing import absltest
from absl.testing import parameterized
import flax.errors
import flax.linen as nn
from haiku._src import base
from haiku._src import module
from haiku._src import transform
from haiku._src.flax import transform_flax
import jax
import jax.numpy as jnp
class Child(nn.Module):
@nn.compact
def __call__(self):
if self.is_initializing():
zeros = jnp.zeros([])
self.put_variable('params', 'w', zeros + 0)
self.put_variable('flim_state', 's', zeros + 1)
self.put_variable('flam_state', 's', zeros + 2)
w = self.get_variable('params', 'w')
flim = self.get_variable('flim_state', 's')
flam = self.get_variable('flam_state', 's')
return w, flim, flam
class Parent(nn.Module):
@nn.compact
def __call__(self):
Child()()
class Outer(nn.Module):
@nn.compact
def __call__(self):
Parent()()
class Empty(nn.Module):
def __call__(self):
pass
class OuterHk(module.Module):
def __call__(self):
mod = transform_flax.lift(Outer(), name='outer')
mod()
class ParamsRNG(nn.Module):
@nn.compact
def __call__(self):
return self.make_rng('params')
class UsesRNG(nn.Module):
@nn.compact
def __call__(self):
self.make_rng('foo')
self.param('w', nn.initializers.normal(), [], jnp.float32)
class Counter(nn.Module):
@nn.compact
def __call__(self):
if self.is_initializing():
c = jnp.zeros([])
self.put_variable('state', 'count', c)
return c
else:
c = self.get_variable('state', 'count')
self.put_variable('state', 'count', c + 1)
return c
class TransformFlaxTest(parameterized.TestCase):
def test_lift_empty(self):
def f():
mod = transform_flax.lift(Empty(), name='foo')
mod()
f = transform.transform_with_state(f)
params, state = f.init(jax.random.PRNGKey(42))
self.assertEmpty(params)
self.assertEmpty(state)
def test_lift_toplevel(self):
def f():
mod = transform_flax.lift(Child(), name='foo')
mod()
f = transform.transform_with_state(f)
params, state = f.init(jax.random.PRNGKey(42))
self.assertEqual(params, {'foo/~': {'w': 0}})
self.assertEqual(state, {'foo/flim_state/~': {'s': 1},
'foo/flam_state/~': {'s': 2}})
def test_lift(self):
def f():
mod = transform_flax.lift(Outer(), name='foo')
mod()
f = transform.transform_with_state(f)
rng = jax.random.PRNGKey(42)
params, state = f.init(rng)
self.assertEqual(params, {'foo/Parent_0/Child_0': {'w': 0}})
self.assertEqual(
state,
{
'foo/flim_state/Parent_0/Child_0': {'s': 1},
'foo/flam_state/Parent_0/Child_0': {'s': 2},
},
)
def test_lift_used_inside_module(self):
def f():
mod = OuterHk()
mod()
f = transform.transform_with_state(f)
rng = jax.random.PRNGKey(42)
params, state = f.init(rng)
self.assertEqual(params, {'outer_hk/outer/Parent_0/Child_0': {'w': 0}})
self.assertEqual(
state,
{
'outer_hk/outer/flim_state/Parent_0/Child_0': {'s': 1},
'outer_hk/outer/flam_state/Parent_0/Child_0': {'s': 2},
},
)
def test_lift_module_called_repeatedly(self):
def f():
mod = transform_flax.lift(Outer(), name='foo')
mod()
f = transform.transform_with_state(f)
rng = jax.random.PRNGKey(42)
params, state = f.init(rng)
self.assertEqual(params, {'foo/Parent_0/Child_0': {'w': 0}})
self.assertEqual(
state,
{
'foo/flim_state/Parent_0/Child_0': {'s': 1},
'foo/flam_state/Parent_0/Child_0': {'s': 2},
},
)
def test_stateful_equivalence(self):
def f():
mod = transform_flax.lift(counter, name='foo')
return mod()
counter = Counter()
f = transform.transform_with_state(f)
rng = jax.random.PRNGKey(42)
_, state = f.init(rng)
variables = counter.init(rng)
self.assertEqual(state, {'foo/state/~': {'count': 0}})
self.assertEqual(variables, {'state': {'count': 0}})
for i in range(10):
out_hk, state = f.apply({}, state, None)
out_flax, variables = counter.apply(variables, mutable=['state'])
self.assertEqual(out_hk, out_flax)
self.assertEqual(state, {'foo/state/~': {'count': i + 1}})
self.assertEqual(variables, {'state': {'count': i + 1}})
def test_uses_updated_params(self):
def f():
mod = transform_flax.lift(Child(), name='foo')
return mod()
f = transform.transform_with_state(f)
rng = jax.random.PRNGKey(42)
params, state = f.init(rng)
# Modify the parameters from their initial value.
params, state = jax.tree_map(lambda x: x + 1, (params, state))
(w_out, flim_out, flam_out), _ = f.apply(params, state, None)
w = params['foo/~']['w']
flim = state['foo/flim_state/~']['s']
flam = state['foo/flam_state/~']['s']
# We want to assert that the params/state passed in are literally what is
# returned, this ensures that initialisers did not re-run.
self.assertIs(w, w_out)
self.assertIs(flim, flim_out)
self.assertIs(flam, flam_out)
def test_with_explicit_rngs(self):
def f():
mod = transform_flax.lift(ParamsRNG(), name='foo')
return mod(rngs={'params': jax.random.PRNGKey(42)})
f = transform.transform(f)
params = f.init(None)
self.assertIsNotNone(f.apply(params, None))
@parameterized.parameters(True, False)
def test_with_keys_dict(self, with_params_rng: bool):
def f():
mod = transform_flax.lift(UsesRNG(), name='foo')
if with_params_rng:
rngs = {'params': base.next_rng_key(), 'foo': base.next_rng_key()}
else:
rngs = {'foo': base.next_rng_key()}
return mod(rngs=rngs)
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
init_rng, apply_rng = jax.random.split(rng)
params = f.init(init_rng)
f.apply(params, apply_rng) # Does not fail.
def test_non_mapping_rngs(self):
def f():
mod = transform_flax.lift(ParamsRNG(), name='foo')
return mod(rngs=jax.random.PRNGKey(42))
f = transform.transform(f)
with self.assertRaisesRegex(
flax.errors.InvalidRngError, 'should be a dictionary'
):
f.init(None)
def test_lift_multiple_uses(self):
def f(x):
mod = transform_flax.lift(nn.Dense(1), name='mod')
x = mod(x)
x = mod(x)
return x
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
x = jnp.ones([1, 1])
params = f.init(rng, x)
params = jax.tree_map(lambda x: (x.shape, x.dtype), params)
self.assertEqual(
params,
{
'mod/~': {
'bias': ((1,), jnp.float32),
'kernel': ((1, 1), jnp.float32),
}
},
)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/flax/transform_flax_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for working with Haiku and Flax code."""
from collections.abc import Mapping, MutableMapping, Sequence
from typing import Any, Union
from haiku._src import typing
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Params = typing.Params
State = typing.State
# pylint: enable=invalid-name
del typing
FlaxCollection = Mapping[str, Any]
MutableFlaxCollection = MutableMapping[str, Any]
FlaxVariables = Mapping[str, FlaxCollection]
MutableFlaxVariables = MutableMapping[str, MutableFlaxCollection]
HaikuParamsOrState = Union[hk.Params, hk.State]
MutableHaikuParamsOrState = MutableMapping[str, MutableMapping[str, Any]]
def flatten_flax_to_haiku(collection: FlaxCollection) -> HaikuParamsOrState:
"""Flattens a Flax variable collection (e.g. params) to a Haiku dict."""
out = {}
for name, value in collection.items():
if not isinstance(value, Mapping):
if '~' not in out:
out['~'] = {}
out['~'][name] = value
else:
_flatten_flax_to_haiku_inner(value, out, (name,))
return out
def _flatten_flax_to_haiku_inner(
collection: FlaxCollection,
out: MutableHaikuParamsOrState,
prefix: Sequence[str],
) -> HaikuParamsOrState:
"""Recursive inner loop of `flatten_flax_to_haiku`."""
for name, value in collection.items():
if isinstance(value, Mapping):
_flatten_flax_to_haiku_inner(value, out=out, prefix=(*prefix, name))
else:
assert prefix
mod_name = '/'.join(prefix)
if mod_name not in out:
out[mod_name] = {}
out[mod_name][name] = value
return out
|
dm-haiku-main
|
haiku/_src/flax/utils.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for converting Flax modules to use with Haiku."""
from collections.abc import Mapping
from typing import Any, Callable
import flax.errors
import flax.linen as nn
from haiku._src import base
from haiku._src import filtering
from haiku._src import lift as lift_lib
from haiku._src import transform
from haiku._src import typing
from haiku._src.flax import utils
# pylint: disable=invalid-name
# If you are forking replace this with `import haiku as hk`.
class hk:
lift_with_state = lift_lib.lift_with_state
maybe_next_rng_key = base.maybe_next_rng_key
Params = typing.Params
running_init = transform.running_init
State = typing.State
TransformedWithState = transform.TransformedWithState
# If you are forking replace this with `import haiku.data_structures as hkds`.
class hkds:
traverse = filtering.traverse
# pylint: enable=invalid-name
del typing, filtering, lift_lib
FlaxCollection = utils.FlaxCollection
FlaxVariables = utils.FlaxVariables
MutableFlaxVariables = utils.MutableFlaxVariables
def _from_haiku_params(params: hk.Params) -> FlaxCollection:
"""Converts Haiku parameters to a nested Flax collection."""
collection = {}
for mod_name, name, value in hkds.traverse(params):
if mod_name == '~':
collection[name] = value
else:
nested_collection = collection
for part in mod_name.split('/'):
if part not in nested_collection:
nested_collection[part] = {}
nested_collection = nested_collection[part]
nested_collection[name] = value
return collection
def _to_haiku_state(variables: FlaxVariables) -> hk.State:
"""Converts a nested Flax collection to a Haiku state dict."""
state = {}
for collection_name, collection in variables.items():
flat_collection = utils.flatten_flax_to_haiku(collection)
for mod_name, name, value in hkds.traverse(flat_collection):
mod_name = f'{collection_name}/{mod_name}'
if mod_name not in state:
state[mod_name] = {}
state[mod_name][name] = value
return state
def _from_haiku_state(state: hk.State) -> MutableFlaxVariables:
"""Converts a Haiku state dict to a nested Flax collection."""
variables = {}
for name, module_state in state.items():
collection_name, *mod_name, name = name.split('/')
if collection_name not in variables:
variables[collection_name] = {}
if not mod_name and name == '~':
variables[collection_name].update(module_state)
else:
nested_collection = variables[collection_name]
for part in mod_name:
if part not in nested_collection:
nested_collection[part] = {}
nested_collection = nested_collection[part]
nested_collection[name] = dict(module_state)
return variables
def _flax_transform_with_state(mod: nn.Module) -> hk.TransformedWithState:
"""Transforms a Flax ``nn.Module`` into a Haiku transformed function.
Example usage:
>>> mod = nn.Dense(10)
>>> f = _flax_transform_with_state(mod)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> params, state = f.init(rng, x)
>>> out, state = f.apply(params, state, rng, x)
Args:
mod: Any Flax ``nn.Module`` instance.
Returns:
A :class:`~haiku.TransformedWithState` instance (equivalent to the result of
calling :func:`transform_with_state`).
See also:
lift: Use a Flax module as part of an outer :func:`~haiku.transform` or
:func:`~haiku.transform_with_state`.
"""
def init_fn(rng, *args, **kwargs):
assert 'rngs' not in kwargs # Handled in `lift`.
variables = dict(mod.init(rng, *args, **kwargs))
params = utils.flatten_flax_to_haiku(variables.pop('params', {}))
state = _to_haiku_state(variables)
return params, state
def apply_fn(params, state, rng, *args, **kwargs):
if rng is not None:
raise ValueError(
'RNGs passed in apply must be passed in the rngs keyword argument'
)
variables = _from_haiku_state(state)
mutable = set(variables)
variables['params'] = _from_haiku_params(params)
out, variables = mod.apply(variables, *args, **kwargs, mutable=mutable)
state = _to_haiku_state(variables)
return out, state
return hk.TransformedWithState(init_fn, apply_fn)
def lift(
mod: nn.Module,
*,
name: str,
) -> Callable[..., Any]:
"""Lifts a flax nn.Module into a Haiku transformed function.
For a Flax Module (e.g. ``mod = nn.Dense(10)``), ``mod = lift(mod)`` allows
you to run the call method of the module as if the module was a regular Haiku
module.
Parameters and state from the Flax module are registered with Haiku and become
part of the params/state dictionaries (as returned from ``init``/``apply``).
>>> def f(x):
... # Create and "lift" a Flax module.
... mod = hk.experimental.flax.lift(nn.Dense(300), name='dense')
... x = mod(x) # Any params/state will be registered
... # with Haiku when applying the module.
... x = jax.nn.relu(x)
... x = hk.nets.MLP([100, 10]) # You can of course mix Haiku modules in.
... return x
>>> f = hk.transform(f)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> params = f.init(rng, x)
>>> out = f.apply(params, None, x)
Args:
mod: Any Flax ``nn.Module`` instance.
name: Name scope to prefix entries in the outer params/state dict.
Returns:
A function that when applied calls the call method of the given Flax module
and returns its output. As a side effect of calling the module any module
parameters and state variables are registered with Haiku.
"""
mod = _flax_transform_with_state(mod)
init_fn, updater = hk.lift_with_state(mod.init, name=name, allow_reuse=True)
def wrapped(*args, **kwargs):
init_kwargs = dict(kwargs)
if hk.running_init():
if 'rngs' not in init_kwargs:
init_rng = hk.maybe_next_rng_key()
elif isinstance(init_kwargs['rngs'], Mapping):
init_rng = dict(init_kwargs.pop('rngs'))
if 'params' not in init_rng:
rng = hk.maybe_next_rng_key()
if rng is not None:
init_rng['params'] = rng
else:
raise flax.errors.InvalidRngError(
'rngs should be a dictionary mapping strings to `jax.PRNGKey`.'
)
else:
init_rng = None
params, state = init_fn(init_rng, *args, **init_kwargs)
out, state = mod.apply(params, state, None, *args, **kwargs)
updater._used = False # pylint: disable=protected-access
updater.update(state)
return out
return wrapped
|
dm-haiku-main
|
haiku/_src/flax/transform_flax.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for converting Haiku modules to Flax modules."""
from typing import TypeVar, Union
import flax.core
import flax.linen as nn
from haiku._src import filtering
from haiku._src import transform as transform_lib
from haiku._src import typing
from haiku._src.flax import utils
# pylint: disable=invalid-name
# If you are forking replace this with `import haiku as hk`.
class hk:
Params = typing.Params
State = typing.State
SupportsCall = typing.SupportsCall
transform = transform_lib.transform
transform_with_state = transform_lib.transform_with_state
Transformed = transform_lib.Transformed
TransformedWithState = transform_lib.TransformedWithState
with_empty_state = transform_lib.with_empty_state
# If you are forking replace this with `import haiku.data_structures as hkds`.
class hkds:
traverse = filtering.traverse
# pylint: enable=invalid-name
del filtering, transform_lib, typing
T = TypeVar('T')
FlaxCollection = utils.FlaxCollection
FlaxVariables = utils.FlaxVariables
HaikuParamsOrState = utils.HaikuParamsOrState
MutableHaikuParamsOrState = utils.MutableHaikuParamsOrState
def store_haiku_collections(
scope: flax.core.Scope,
**collections: HaikuParamsOrState,
):
"""Stores the given Haiku params/state in nested collections under scope."""
for collection_name, collection in collections.items():
for mod_name, name, value in hkds.traverse(collection):
subscope = scope
for part in mod_name.split('/'):
subscope = subscope.push(part, reuse=True)
subscope.put_variable(collection_name, name, value)
def only_changed_state(old_state: hk.State, new_state: hk.State) -> hk.State:
"""Returns the subset of new_state that has changed from old_state."""
updated_state = {}
for mod_name, name, value in hkds.traverse(new_state):
if mod_name in old_state and name in old_state[mod_name]:
if old_state[mod_name][name] is not value:
if mod_name not in updated_state:
updated_state[mod_name] = {}
updated_state[mod_name][name] = value
return updated_state
class Module(nn.Module):
"""A Flax ``nn.Module`` that runs a Haiku transformed function.
This type is designed to make it easy to take a Haiku transformed function
and/or a Haiku module and use it inside a program that otherwise uses Flax.
Given a Haiku transformed function
>>> def f(x):
... return hk.Linear(1)(x)
>>> f = hk.transform(f)
You can convert it into a Flax module using:
>>> mod = hk.experimental.flax.Module(f)
Calling this module is the same as calling any regular Flax module:
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> variables = mod.init(rng, x)
>>> out = mod.apply(variables, x)
If you just want to convert a Haiku module class such that it can be used
with Flax you can use the ``create`` class method:
>>> mod = hk.experimental.flax.Module.create(hk.Linear, 1)
>>> variables = mod.init(rng, x)
>>> out = mod.apply(variables, x)
"""
transformed: Union[hk.Transformed, hk.TransformedWithState]
def __post_init__(self):
super().__post_init__()
if isinstance(self.transformed, hk.Transformed):
self.transformed = hk.with_empty_state(self.transformed)
@classmethod
def create(
cls, hk_cls: type[hk.SupportsCall], *init_args, **init_kwargs
) -> 'Module':
"""Converts a given Haiku module into a Flax ``nn.Module``.
TODO(tomhennigan): Support multiple forward methods.
Example usage:
>>> mod = hk.experimental.flax.Module.create(hk.Linear, 1) # hk.Linear(1)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> variables = mod.init(rng, x)
>>> out = mod.apply(variables, x)
For a stateful module like resnet, you need to also handle output state:
>>> mod = hk.experimental.flax.Module.create(hk.nets.ResNet50, 10)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 224, 224, 3])
>>> variables = mod.init(rng, x, is_training=True)
>>> out, state_out = mod.apply(variables, x, is_training=True,
... mutable=['state'])
Args:
hk_cls: A Haiku module type (e.g. ``hk.Linear``).
*init_args: Positional arguments for the constructor.
**init_kwargs: Keyword arguments for the constructor.
Returns:
A Flax ``nn.Module`` wrapping the given class.
"""
def fn(*args, **kwargs):
mod = hk_cls(*init_args, **init_kwargs)
return mod(*args, **kwargs)
fn = hk.transform_with_state(fn)
return Module(fn)
@nn.compact
def __call__(self, *args, **kwargs):
if self.is_initializing():
rng = self.make_rng('params')
params, state = self.transformed.init(rng, *args, **kwargs)
store_haiku_collections(self.scope, params=params, state=state)
else:
params = utils.flatten_flax_to_haiku(self.variables.get('params', {}))
state = utils.flatten_flax_to_haiku(self.variables.get('state', {}))
rng = self.make_rng('apply') if self.has_rng('apply') else None
out, state_out = self.transformed.apply(params, state, rng, *args, **kwargs)
if not self.is_initializing():
updated_state = only_changed_state(state, state_out)
store_haiku_collections(self.scope, state=updated_state)
return out
|
dm-haiku-main
|
haiku/_src/flax/flax_module.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.flax.flax_module."""
from absl.testing import absltest
from absl.testing import parameterized
import flax.linen as nn
from haiku._src import base
from haiku._src import filtering
from haiku._src import module
from haiku._src import transform
from haiku._src.flax import flax_module
from haiku._src.flax import utils
from haiku._src.nets import resnet
import jax
import jax.numpy as jnp
import numpy as np
class Counter(module.Module):
def __call__(self):
c = base.get_state('c', [], init=jnp.zeros)
base.set_state('c', c + 1)
return c
class FlaxModuleTest(parameterized.TestCase):
def test_transform(self):
def f():
w1 = base.get_parameter('w1', [], init=jnp.zeros)
w2 = base.get_parameter('w2', [], init=jnp.ones)
return w1, w2
f = transform.transform(f)
m = flax_module.Module(f)
rng = jax.random.PRNGKey(42)
variables = m.init(rng)
self.assertEqual(variables, {'params': {'~': {'w1': 0, 'w2': 1}}})
w1, w2 = m.apply(variables)
self.assertIs(w1, variables['params']['~']['w1'])
self.assertIs(w2, variables['params']['~']['w2'])
def test_transform_with_state(self):
def f():
s1 = base.get_state('s1', [], init=jnp.zeros)
s2 = base.get_state('s2', [], init=jnp.ones)
base.set_state('s1', s1 + 1)
base.set_state('s2', s2 + 1)
return s1, s2
f = transform.transform_with_state(f)
m = flax_module.Module(f)
rng = jax.random.PRNGKey(42)
variables = m.init(rng)
self.assertEqual(dict(variables), {'state': {'~': {'s1': 0, 's2': 1}}})
for i in range(5):
(s1, s2), variables = m.apply(variables, mutable=['state'])
self.assertEqual(s1, i)
self.assertEqual(variables['state']['~']['s1'], i + 1)
self.assertEqual(s2, i + 1)
self.assertEqual(variables['state']['~']['s2'], i + 2)
def test_transform_with_state_not_mutating_state_after_ini(self):
def f():
s1 = base.get_state('s1', [], init=jnp.zeros)
s2 = base.get_state('s2', [], init=jnp.ones)
return s1, s2
f = transform.transform_with_state(f)
m = flax_module.Module(f)
rng = jax.random.PRNGKey(42)
variables = m.init(rng)
self.assertEqual(variables, {'state': {'~': {'s1': 0, 's2': 1}}})
# NOTE: Intentionally not making state collection mutable.
s1, s2 = m.apply(variables)
self.assertIs(s1, variables['state']['~']['s1'])
self.assertIs(s2, variables['state']['~']['s2'])
def test_stateful_module(self):
c = flax_module.Module.create(Counter)
rng = jax.random.PRNGKey(42)
variables = c.init(rng)
self.assertEqual(variables, {'state': {'counter': {'c': 0}}})
for i in range(10):
out, variables = c.apply(variables, mutable=['state'])
self.assertEqual(out, i)
self.assertEqual(variables, {'state': {'counter': {'c': i + 1}}})
def test_resnet_50_init_equivalence_to_flax(self):
mod = flax_module.Module.create(resnet.ResNet50, 10)
rng = jax.random.PRNGKey(42)
x = jnp.ones([1, 224, 224, 3])
f_haiku = transform.transform_with_state(
lambda x: resnet.ResNet50(10)(x, is_training=True)
)
# We check that init is equivalent when passed the RNG used by Flax. There
# is no mechanism currently to test the inverse (init the Flax module to
# match Haiku init) because of how Flax and Haiku disagree on RNG key
# splitting.
hk_params, hk_state = f_haiku.init(flax_init_rng(rng), x)
variables = dict(mod.init(rng, x, is_training=True))
assert_matches(variables['params'], hk_params)
assert_matches(variables['state'], hk_state)
def test_resnet_50_apply_equivalence(self):
mod = flax_module.Module.create(resnet.ResNet50, 10)
rng = jax.random.PRNGKey(42)
x = jnp.ones([1, 224, 224, 3])
f_haiku = transform.transform_with_state(
lambda x: resnet.ResNet50(10)(x, is_training=True)
)
variables = dict(mod.init(rng, x, is_training=True))
# Haiku and Flax have very different RNG key implementations, so parameter
# initialisation does not match when using `flax_module`. There is no
# mechanism to make Flax initialization match Haiku initialisation.
hk_params = utils.flatten_flax_to_haiku(variables['params'])
hk_state = utils.flatten_flax_to_haiku(variables['state'])
for _ in range(5):
out_flax, state = mod.apply(
variables, x, is_training=True, mutable=['state']
)
out_hk, hk_state = f_haiku.apply(hk_params, hk_state, None, x)
np.testing.assert_array_equal(out_flax, out_hk)
variables = {**variables, **state}
assert_matches(variables['state'], hk_state)
def assert_matches(flax_collection, haiku_collection):
flax_collection = utils.flatten_flax_to_haiku(flax_collection)
for mod_name, name, value in filtering.traverse(haiku_collection):
# We expect equality (not close) because we are running the same
# operations in the same order on the same data in both cases.
np.testing.assert_array_equal(
flax_collection[mod_name][name],
value,
err_msg=f'{mod_name}/{name}',
)
class ParamsRngModule(nn.Module):
"""Module used to exfiltrate a key from the "params" collection."""
def __call__(self):
return self.make_rng('params')
def flax_init_rng(rng: jax.Array) -> jax.Array:
"""Returns the rng key that Flax will pass to the Haiku init function."""
return ParamsRngModule().apply({}, rngs={'params': rng})
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/flax/flax_module_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark trace/compile/run timings of init functions."""
import google_benchmark
import haiku as hk
import jax
import jax.numpy as jnp
def init_benchmark(model):
"""Compile/Trace/Run init."""
input_shape = [100, 100, 100, 100]
init, _ = hk.transform_with_state(model)
@google_benchmark.register(name=f'trace_{model.__name__}')
def trace_bench(state):
"""Benchmark Jax trace of hk.init_fn of model."""
x = jnp.ones(input_shape).block_until_ready()
k = jax.random.PRNGKey(42)
while state:
jax.jit(init).lower(k, x).compiler_ir(dialect='hlo')
@google_benchmark.register(name=f'compile_{model.__name__}')
def compile_bench(state):
"""Benchmark Jax compile of hk.init_fn of model."""
x = jnp.ones(input_shape).block_until_ready()
k = jax.random.PRNGKey(42)
c = jax.jit(init).lower(k, x)
while state:
c.compile()
@google_benchmark.register(name=f'run_{model.__name__}')
def run_bench(state):
"""Benchmark runtime of compiled hk.init_fn of model."""
x = jnp.ones(input_shape).block_until_ready()
k = jax.random.PRNGKey(42)
jitted_init = jax.jit(init)
# run jit once to compile
jitted_init(k, x)
while state:
params, _ = jitted_init(k, x)
# block on computation to finish
jax.tree_util.tree_map(lambda x: x.block_until_ready(), params)
return trace_bench, compile_bench, run_bench
# Models to be benchmarked
@init_benchmark
def mlp(x):
return hk.nets.MLP([300, 100, 10])(x)
@init_benchmark
def resnet_50(x):
return hk.nets.ResNet50(num_classes=10)(x, is_training=True,
test_local_stats=True)
if __name__ == '__main__':
google_benchmark.main()
|
dm-haiku-main
|
haiku/benchmarks/init.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark eval_shape for Haiku models."""
import google_benchmark
import haiku as hk
import jax
import jax.numpy as jnp
def init_benchmark(model):
"""Compile/Trace/Run init."""
input_shape = [100, 100, 100, 100]
init, _ = hk.transform_with_state(model)
@google_benchmark.register(name=f'{model.__name__}_init')
def init_slow_bench(state):
"""Benchmark Jax trace of hk.init_fn of model."""
x = jnp.ones(input_shape)
k = jax.random.PRNGKey(42).block_until_ready()
while state:
jax.eval_shape(init, k, x)
@google_benchmark.register(name=f'{model.__name__}_init_fast')
def init_fast_bench(state):
"""Benchmark runtime of compiled hk.init_fn of model."""
x = jnp.ones(input_shape)
k = jax.random.PRNGKey(42).block_until_ready()
while state:
hk.experimental.fast_eval_shape(init, k, x)
return init_slow_bench, init_fast_bench
# Models to be benchmarked
@init_benchmark
def mlp(x):
return hk.nets.MLP([300, 100, 10])(x)
@init_benchmark
def resnet_50(x):
return hk.nets.ResNet50(num_classes=10)(x, is_training=True,
test_local_stats=True)
if __name__ == '__main__':
google_benchmark.main()
|
dm-haiku-main
|
haiku/benchmarks/eval_shape.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import doctest
import inspect
import os
import sys
import typing
def _add_annotations_import(path):
"""Appends a future annotations import to the file at the given path."""
with open(path) as f:
contents = f.read()
if contents.startswith('from __future__ import annotations'):
# If we run sphinx multiple times then we will append the future import
# multiple times too.
return
assert contents.startswith('#'), (path, contents.split('\n')[0])
with open(path, 'w') as f:
# NOTE: This is subtle and not unit tested, we're prefixing the first line
# in each Python file with this future import. It is important to prefix
# not insert a newline such that source code locations are accurate (we link
# to GitHub). The assertion above ensures that the first line in the file is
# a comment so it is safe to prefix it.
f.write('from __future__ import annotations ')
f.write(contents)
def _recursive_add_annotations_import():
for path, _, files in os.walk('../haiku/'):
for file in files:
if file.endswith('.py'):
_add_annotations_import(os.path.abspath(os.path.join(path, file)))
if 'READTHEDOCS' in os.environ:
_recursive_add_annotations_import()
typing.get_type_hints = lambda obj, *unused: obj.__annotations__
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
import haiku as hk
import sphinxcontrib.katex as katex
# -- Project information -----------------------------------------------------
project = 'Haiku'
copyright = '2019, DeepMind' # pylint: disable=redefined-builtin
author = 'Haiku Contributors'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'sphinxcontrib.katex',
'sphinx_autodoc_typehints',
'sphinx_book_theme',
'coverage_check',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
html_theme_options = {
'repository_url': 'https://github.com/deepmind/dm-haiku',
'use_repository_button': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_favicon = '_static/favicon.ico'
# -- Options for doctest -----------------------------------------------------
doctest_test_doctest_blocks = 'true'
doctest_global_setup = """
import collections
import itertools
import unittest
import chex
import flax.linen as nn
import haiku as hk
import jax
import jax.numpy as jnp
import jmp
# Equivalent to wrapping the whole files doctests in `hk.transform_with_state`.
from haiku._src import base
base.new_context(rng=jax.random.PRNGKey(42)).__enter__()
"""
doctest_default_flags = (
doctest.ELLIPSIS
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_TRUE_FOR_1
| doctest.NORMALIZE_WHITESPACE)
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = 'macros: {' + katex_macros + '}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
# TODO(slebedev): support tags after we release an initial version.
return 'https://github.com/deepmind/dm-haiku/blob/master/haiku/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
hk.__file__)), lineno, lineno + len(source) - 1)
# -- nbsphinx configuration --------------------------------------------------
# TODO(tomhennigan): Consider auto/always here.
nbsphinx_execute = 'never'
nbsphinx_codecell_lexer = 'ipython'
nbsphinx_kernel_name = 'python'
nbsphinx_timeout = 180
nbsphinx_prolog = r"""
{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
Interactive online version:
:raw-html:`<a href="https://colab.research.google.com/github/deepmind/dm-haiku/blob/master/{{ docname }}"><img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/deepmind/dm-haiku/blob/
{{ env.config.release }}/{{ docname }}
"""
# -- bibtex configuration ---------------------------------------------------
bibtex_bibfiles = ['references.bib']
|
dm-haiku-main
|
docs/conf.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asserts all public symbols are covered in the docs."""
from collections.abc import Mapping
from typing import Any
import haiku as hk
from haiku._src import test_utils
from sphinx import application
from sphinx import builders
from sphinx import errors
HIDDEN_SYMBOLS = ("haiku.experimental.GetterContext",
"haiku.experimental.MethodContext",
"haiku.experimental.ParamContext",
"haiku.experimental.custom_creator",
"haiku.experimental.custom_getter",
"haiku.experimental.intercept_methods",
"haiku.experimental.lift")
def haiku_public_symbols():
names = set()
for module_name, module in test_utils.find_internal_python_modules(hk):
for name in module.__all__:
symbol_name = f"{module_name}.{name}"
if symbol_name not in HIDDEN_SYMBOLS:
names.add(symbol_name)
return names
class HaikuCoverageCheck(builders.Builder):
"""Builder that checks all public symbols are included."""
name = "coverage_check"
def get_outdated_docs(self) -> str:
return "coverage_check"
def write(self, *ignored: Any) -> None:
pass
def finish(self) -> None:
documented_objects = frozenset(self.env.domaindata["py"]["objects"])
undocumented_objects = haiku_public_symbols() - documented_objects
if undocumented_objects:
# Remove symbols that appear to have moved out of experimental.
for obj in tuple(undocumented_objects):
if obj.replace("haiku.experimental", "haiku") in documented_objects:
undocumented_objects.remove(obj)
if undocumented_objects:
undocumented_objects = tuple(sorted(undocumented_objects))
raise errors.SphinxError(
"All public symbols must be included in our documentation, did you "
"forget to add an entry to `api.rst`?\n"
f"Undocumented symbols: {undocumented_objects}")
def setup(app: application.Sphinx) -> Mapping[str, Any]:
app.add_builder(HaikuCoverageCheck)
return dict(version="0.0.1", parallel_read_safe=True)
|
dm-haiku-main
|
docs/ext/coverage_check.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
examples/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku.examples.impala_lite."""
from absl.testing import absltest
from examples import impala_lite
class ImpalaLiteTest(absltest.TestCase):
def test_impala_integration(self):
impala_lite.run(trajectories_per_actor=2, num_actors=2, unroll_len=20)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
examples/impala_lite_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple, single-process IMPALA in JAX with Haiku.
This implementation is a simple, minimal implementation of IMPALA.
For a more full-fledged implementation, see examples/impala/README.md.
See: https://arxiv.org/abs/1802.01561
"""
import functools
import queue
import threading
from typing import Any, Callable, NamedTuple
from absl import app
from absl import logging
from bsuite.environments import catch
import dm_env
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
class Transition(NamedTuple):
timestep: dm_env.TimeStep
action: int
agent_out: Any
class SimpleNet(hk.Module):
"""A simple network."""
def __init__(self, num_actions: int):
super().__init__()
self._num_actions = num_actions
def __call__(
self,
timestep: dm_env.TimeStep,
) -> tuple[jax.Array, jax.Array]:
"""Process a batch of observations."""
torso = hk.Sequential([hk.Flatten(),
hk.Linear(128), jax.nn.relu,
hk.Linear(64), jax.nn.relu])
hidden = torso(timestep.observation)
policy_logits = hk.Linear(self._num_actions)(hidden)
baseline = hk.Linear(1)(hidden)
baseline = jnp.squeeze(baseline, axis=-1)
return policy_logits, baseline
class Agent:
"""A simple, feed-forward agent."""
def __init__(self, net_apply):
self._net = net_apply
self._discount = 0.99
@functools.partial(jax.jit, static_argnums=0)
def step(
self,
params: hk.Params,
rng: jax.Array,
timestep: dm_env.TimeStep,
) -> tuple[jax.Array, jax.Array]:
"""Steps on a single observation."""
timestep = jax.tree_util.tree_map(lambda t: jnp.expand_dims(t, 0), timestep)
logits, _ = self._net(params, timestep)
logits = jnp.squeeze(logits, axis=0)
action = hk.multinomial(rng, logits, num_samples=1)
action = jnp.squeeze(action, axis=-1)
return action, logits
def loss(self, params: hk.Params, trajs: Transition) -> jax.Array:
"""Computes a loss of trajs wrt params."""
# Re-run the agent over the trajectories.
# Due to https://github.com/google/jax/issues/1459, we use hk.BatchApply
# instead of vmap.
# BatchApply turns the input tensors from [T, B, ...] into [T*B, ...].
# We `functools.partial` params in so it does not get transformed.
net_curried = hk.BatchApply(functools.partial(self._net, params))
learner_logits, baseline_with_bootstrap = net_curried(trajs.timestep)
# Separate the bootstrap from the value estimates.
baseline = baseline_with_bootstrap[:-1]
baseline_tp1 = baseline_with_bootstrap[1:]
# Remove bootstrap timestep from non-observations.
_, actions, behavior_logits = jax.tree_util.tree_map(
lambda t: t[:-1], trajs)
learner_logits = learner_logits[:-1]
# Shift step_type/reward/discount back by one, so that actions match the
# timesteps caused by the action.
timestep = jax.tree_util.tree_map(lambda t: t[1:], trajs.timestep)
discount = timestep.discount * self._discount
# The step is uninteresting if we transitioned LAST -> FIRST.
mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST))
mask = mask.astype(jnp.float32)
# Compute v-trace returns.
vtrace_td_error_and_advantage = jax.vmap(
rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1)
rhos = rlax.categorical_importance_sampling_ratios(learner_logits,
behavior_logits, actions)
vtrace_returns = vtrace_td_error_and_advantage(baseline, baseline_tp1,
timestep.reward, discount,
rhos)
# Note that we use mean here, rather than sum as in canonical IMPALA.
# Compute policy gradient loss.
pg_advantage = jax.lax.stop_gradient(vtrace_returns.pg_advantage)
tb_pg_loss_fn = jax.vmap(rlax.policy_gradient_loss, in_axes=1, out_axes=0)
pg_loss = tb_pg_loss_fn(learner_logits, actions, pg_advantage, mask)
pg_loss = jnp.mean(pg_loss)
# Baseline loss.
bl_loss = 0.5 * jnp.mean(jnp.square(vtrace_returns.errors) * mask)
# Entropy regularization.
ent_loss_fn = jax.vmap(rlax.entropy_loss, in_axes=1, out_axes=0)
ent_loss = ent_loss_fn(learner_logits, mask)
ent_loss = jnp.mean(ent_loss)
total_loss = pg_loss + 0.5 * bl_loss + 0.01 * ent_loss
return total_loss
def preprocess_step(ts: dm_env.TimeStep) -> dm_env.TimeStep:
# reward: None -> 0, discount: None -> 1,
# scalar -> np.array(), and StepType -> int.
if ts.reward is None:
ts = ts._replace(reward=0.)
if ts.discount is None:
ts = ts._replace(discount=1.)
return jax.tree_util.tree_map(np.asarray, ts)
def run_actor(
agent: Agent,
rng_key: jax.Array,
get_params: Callable[[], hk.Params],
enqueue_traj: Callable[[Transition], None],
unroll_len: int,
num_trajectories: int,
):
"""Runs an actor to produce num_trajectories trajectories."""
env = catch.Catch()
state = env.reset()
traj = []
for i in range(num_trajectories):
params = get_params()
# The first rollout is one step longer.
for _ in range(unroll_len + int(i == 0)):
rng_key, step_key = jax.random.split(rng_key)
state = preprocess_step(state)
action, logits = agent.step(params, step_key, state)
transition = Transition(state, action, logits)
traj.append(transition)
state = env.step(action)
if state.step_type == dm_env.StepType.LAST:
logging.log_every_n(logging.INFO, 'Episode ended with reward: %s', 5,
state.reward)
# Stack and send the trajectory.
stacked_traj = jax.tree_util.tree_map(lambda *ts: np.stack(ts), *traj)
enqueue_traj(stacked_traj)
# Reset the trajectory, keeping the last timestep.
traj = traj[-1:]
class Learner:
"""Slim wrapper around an agent/optimizer pair."""
def __init__(self, agent: Agent, opt_update):
self._agent = agent
self._opt_update = opt_update
@functools.partial(jax.jit, static_argnums=0)
def update(
self,
params: hk.Params,
opt_state: optax.OptState,
trajs: Transition,
) -> tuple[hk.Params, optax.OptState]:
g = jax.grad(self._agent.loss)(params, trajs)
updates, new_opt_state = self._opt_update(g, opt_state)
return optax.apply_updates(params, updates), new_opt_state
def run(*, trajectories_per_actor, num_actors, unroll_len):
"""Runs the example."""
# Construct the agent network. We need a sample environment for its spec.
env = catch.Catch()
num_actions = env.action_spec().num_values
net = hk.without_apply_rng(
hk.transform(lambda ts: SimpleNet(num_actions)(ts))) # pylint: disable=unnecessary-lambda
# Construct the agent and learner.
agent = Agent(net.apply)
opt = optax.rmsprop(5e-3, decay=0.99, eps=1e-7)
learner = Learner(agent, opt.update)
# Initialize the optimizer state.
sample_ts = env.reset()
sample_ts = preprocess_step(sample_ts)
ts_with_batch = jax.tree_util.tree_map(
lambda t: np.expand_dims(t, 0), sample_ts)
params = jax.jit(net.init)(jax.random.PRNGKey(428), ts_with_batch)
opt_state = opt.init(params)
# Create accessor and queueing functions.
current_params = lambda: params
batch_size = 2
q = queue.Queue(maxsize=batch_size)
def dequeue():
batch = []
for _ in range(batch_size):
batch.append(q.get())
batch = jax.tree_util.tree_map(lambda *ts: np.stack(ts, axis=1), *batch)
return jax.device_put(batch)
# Start the actors.
for i in range(num_actors):
key = jax.random.PRNGKey(i)
args = (agent, key, current_params, q.put, unroll_len,
trajectories_per_actor)
threading.Thread(target=run_actor, args=args).start()
# Run the learner.
num_steps = num_actors * trajectories_per_actor // batch_size
for i in range(num_steps):
traj = dequeue()
params, opt_state = learner.update(params, opt_state, traj)
def main(_):
run(trajectories_per_actor=500, num_actors=2, unroll_len=20)
if __name__ == '__main__':
app.run(main)
|
dm-haiku-main
|
examples/impala_lite.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST classifier with pruning as in https://arxiv.org/abs/1710.01878 ."""
from collections.abc import Iterator, Mapping, Sequence
import functools
from typing import Callable
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
Batch = Mapping[str, np.ndarray]
Predicate = Callable[[str, str, jax.Array], bool]
PredicateMap = Mapping[Predicate, jax.Array]
ModuleSparsity = Sequence[tuple[Predicate, jax.Array]]
def topk_mask(value: jax.Array, density_fraction: float) -> jax.Array:
"""Return a mask with 1s marking the top fraction of value.
Note: This routine takes care to make sure that ties are handled without
bias toward smaller indices. This can be a problem when pruning large
embedding matrices, or global pruning where all parameters in the model
are concatenated together and pruned at once.
Args:
value: An array. Must contain sortable values (i.e. not complex).
density_fraction: A float. What fraction of value should be kept.
Returns:
A mask containing 1s where the topk elements of value are. k is
determined based on density_fraction and the size of value.
"""
def topk_mask_internal(value):
assert value.ndim == 1
indices = jnp.argsort(value)
k = jnp.round(density_fraction * jnp.size(value)).astype(jnp.int32)
mask = jnp.greater_equal(np.arange(value.size), value.size - k)
mask = jnp.zeros_like(mask).at[indices].set(mask)
return mask.astype(np.int32)
# shuffle value so that identical values aren't always pruned
# with a bias to lower indices
orig_shape = value.shape
value = jnp.reshape(value, -1)
shuffled_indices = jax.random.shuffle(
jax.random.PRNGKey(42), jnp.arange(0, jnp.size(value), dtype=jnp.int32))
shuffled_mask = topk_mask_internal(value[shuffled_indices])
mask = jnp.zeros_like(shuffled_mask).at[shuffled_indices].set(shuffled_mask)
mask = jnp.reshape(mask, orig_shape)
return mask
def zhugupta_func(progress: float) -> float:
"""From 'To Prune or Not To Prune' :cite:`zhu2017prune`."""
return 1. - (1. - progress)**3
def _create_partitions(
module_sparsity: ModuleSparsity, params: hk.Params
) -> tuple[Sequence[hk.Params], Sequence[jax.Array], hk.Params]:
"""Partition params based on sparsity_predicate_map.
Args:
module_sparsity: A Sequence of (Predicate, float) pairs. Predicate
functions take module_name, name, value as arguments. The floats are the
sparsity level to apply to leaves matching Predicate.
params: A Haiku param.
Returns:
A tuple containing:
- A list of len(module_sparsity), where each element is a disjoint subset
of the `params` to be pruned.
- A list of len(module_sparsity) where each element is the sparsity level.
- The remaining elements of `params` not being pruned such that the union
of the first list and this element contains the elements of `params`.
"""
list_of_trees = []
sparsity_list = []
tail = params
# Greedily match so that no parameter can be matched more than once
for predicate, sparsity in module_sparsity:
head, tail = hk.data_structures.partition(predicate, tail)
list_of_trees.append(head)
sparsity_list.append(sparsity)
return list_of_trees, sparsity_list, tail
def sparsity_ignore(m: str, n: str, v: jax.Array) -> bool:
"""Any parameter matching these conditions should generally not be pruned."""
# n == 'b' when param is a bias
return n == "b" or v.ndim == 1 or "batchnorm" in m or "batch_norm" in m
@functools.partial(jax.jit, static_argnums=2)
def apply_mask(params: hk.Params, masks: Sequence[hk.Params],
module_sparsity: ModuleSparsity) -> hk.Params:
"""Apply existing masks to params based on sparsity_predicate_map.
Some of params may not be masked depending on the content of
module_sparsity. masks must have the same structure as implied by
module_sparsity.
Args:
params: Tree to mask, can be a superset of masks.
masks: Tree of masks to apply to params. This must match the result of
applying module_sparsity to params.
module_sparsity: A dictionary mapping predicates to sparsity levels. Any
leaf matching a predicate key will be pruned to the resulting sparsity
level.
Returns:
A tree of masked params.
"""
params_to_prune, _, params_no_prune = _create_partitions(
module_sparsity, params)
pruned_params = []
for value, mask in zip(params_to_prune, masks):
pruned_params.append(
jax.tree_util.tree_map(lambda x, y: x * y, value, mask))
params = hk.data_structures.merge(*pruned_params, params_no_prune)
return params
@functools.partial(jax.jit, static_argnums=2)
def update_mask(params: hk.Params, sparsity_fraction: float,
module_sparsity: ModuleSparsity) -> Sequence[hk.Params]:
"""Generate masks based on module_sparsity and sparsity_fraction."""
params_to_prune, sparsities, _ = _create_partitions(module_sparsity, params)
masks = []
def map_fn(x: jax.Array, sparsity: float) -> jax.Array:
return topk_mask(jnp.abs(x), 1. - sparsity * sparsity_fraction)
for tree, sparsity in zip(params_to_prune, sparsities):
map_fn_sparsity = functools.partial(map_fn, sparsity=sparsity)
mask = jax.tree_util.tree_map(map_fn_sparsity, tree)
masks.append(mask)
return masks
@jax.jit
def get_sparsity(params: hk.Params):
"""Calculate the total sparsity and tensor-wise sparsity of params."""
total_params = sum(jnp.size(x) for x in jax.tree_util.tree_leaves(params))
total_nnz = sum(jnp.sum(x != 0.) for x in jax.tree_util.tree_leaves(params))
leaf_sparsity = jax.tree_util.tree_map(
lambda x: jnp.sum(x == 0) / jnp.size(x), params)
return total_params, total_nnz, leaf_sparsity
def net_fn(batch: Batch) -> jax.Array:
"""Standard LeNet-300-100 MLP network."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(300), jax.nn.relu,
hk.Linear(100), jax.nn.relu,
hk.Linear(10),
])
return mlp(x)
def load_dataset(
split: str,
*,
shuffle: bool,
batch_size: int,
) -> Iterator[Batch]:
"""Loads the dataset as a generator of batches."""
ds, ds_info = tfds.load("mnist:3.*.*", split=split, with_info=True)
ds.cache()
if shuffle:
ds = ds.shuffle(ds_info.splits[split].num_examples, seed=0)
ds = ds.repeat()
ds = ds.batch(batch_size)
return iter(tfds.as_numpy(ds))
def main(_):
# Make the network and optimiser.
net = hk.without_apply_rng(hk.transform(net_fn))
opt = optax.adam(1e-3)
# Define layerwise sparsities
def module_matching(s):
def match_func(m, n, k):
return m.endswith(s) and not sparsity_ignore(m, n, k)
return match_func
module_sparsity = ((module_matching("linear"), 0.98),
(module_matching("linear_1"), 0.9))
# Training loss (cross-entropy).
@jax.jit
def loss(params: hk.Params, batch: Batch) -> jax.Array:
"""Compute the loss of the network, including L2."""
logits = net.apply(params, batch)
labels = jax.nn.one_hot(batch["label"], 10)
l2_loss = 0.5 * sum(
jnp.sum(jnp.square(p)) for p in jax.tree_util.tree_leaves(params))
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent + 1e-4 * l2_loss
# Evaluation metric (classification accuracy).
@jax.jit
def accuracy(params: hk.Params, batch: Batch) -> jax.Array:
predictions = net.apply(params, batch)
return jnp.mean(jnp.argmax(predictions, axis=-1) == batch["label"])
@jax.jit
def get_updates(
params: hk.Params,
opt_state: optax.OptState,
batch: Batch,
) -> tuple[hk.Params, optax.OptState]:
"""Learning rule (stochastic gradient descent)."""
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
return updates, opt_state
# We maintain avg_params, the exponential moving average of the "live" params.
# avg_params is used only for evaluation (cf. https://doi.org/10.1137/0330046)
@jax.jit
def ema_update(params, avg_params):
return optax.incremental_update(params, avg_params, step_size=0.001)
# Make datasets.
train = load_dataset("train", shuffle=True, batch_size=1000)
train_eval = load_dataset("train", shuffle=False, batch_size=10000)
test_eval = load_dataset("test", shuffle=False, batch_size=10000)
# Implemenation note: It is possible to avoid pruned_params and just use
# a single params which progressively gets pruned. The updates also don't
# need to masked in such an implementation. The current implementation
# attempts to mimic the way the current TF implementation which allows for
# previously inactivated connections to become active again if active values
# drop below their value.
# Initialize network and optimiser; note we draw an input to get shapes.
pruned_params = params = avg_params = net.init(
jax.random.PRNGKey(42), next(train))
masks = update_mask(params, 0., module_sparsity)
opt_state = opt.init(params)
# Train/eval loop.
for step in range(10001):
if step % 1000 == 0:
# Periodically evaluate classification accuracy on train & test sets.
avg_params = apply_mask(avg_params, masks, module_sparsity)
train_accuracy = accuracy(avg_params, next(train_eval))
test_accuracy = accuracy(avg_params, next(test_eval))
total_params, total_nnz, per_layer_sparsities = get_sparsity(avg_params)
train_accuracy, test_accuracy, total_nnz, per_layer_sparsities = (
jax.device_get(
(train_accuracy, test_accuracy, total_nnz, per_layer_sparsities)))
print(f"[Step {step}] Train / Test accuracy: "
f"{train_accuracy:.3f} / {test_accuracy:.3f}.")
print(f"Non-zero params / Total: {total_nnz} / {total_params}; "
f"Total Sparsity: {1. - total_nnz / total_params:.3f}")
# Do SGD on a batch of training examples.
pruned_params = apply_mask(params, masks, module_sparsity)
updates, opt_state = get_updates(pruned_params, opt_state, next(train))
# applying a straight-through estimator here (that is not masking
# the updates) leads to much worse performance.
updates = apply_mask(updates, masks, module_sparsity)
params = optax.apply_updates(params, updates)
# we start pruning at iteration 1000 and end at iteration 8000
progress = min(max((step - 1000.) / 8000., 0.), 1.)
if step % 200 == 0:
sparsity_fraction = zhugupta_func(progress)
masks = update_mask(params, sparsity_fraction, module_sparsity)
avg_params = ema_update(params, avg_params)
print(per_layer_sparsities)
if __name__ == "__main__":
app.run(main)
|
dm-haiku-main
|
examples/mnist_pruning.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variational Autoencoder example on binarized MNIST dataset.
See "Auto-encoding variational Bayes" (Kingma & Welling, 2014) [0].
[0]https://arxiv.org/abs/1312.6114
"""
from collections.abc import Iterator, Sequence
import dataclasses
from typing import NamedTuple
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
@dataclasses.dataclass
class Config:
batch_size: int = 128
learning_rate: float = 1e-3
training_steps: int = 5000
eval_every: int = 100
seed: int = 0
class Batch(NamedTuple):
image: jax.Array # [B, H, W, C]
def load_dataset(split: str, batch_size: int, seed: int) -> Iterator[Batch]:
ds = (
tfds.load("binarized_mnist", split=split)
.shuffle(buffer_size=10 * batch_size, seed=seed)
.batch(batch_size)
.prefetch(buffer_size=5)
.repeat()
.as_numpy_iterator()
)
return map(lambda x: Batch(x["image"]), ds)
@dataclasses.dataclass
class Encoder(hk.Module):
"""Encoder model."""
latent_size: int
hidden_size: int = 512
def __call__(self, x: jax.Array) -> tuple[jax.Array, jax.Array]:
"""Encodes an image as an isotropic Guassian latent code."""
x = hk.Flatten()(x)
x = hk.Linear(self.hidden_size)(x)
x = jax.nn.relu(x)
mean = hk.Linear(self.latent_size)(x)
log_stddev = hk.Linear(self.latent_size)(x)
stddev = jnp.exp(log_stddev)
return mean, stddev
@dataclasses.dataclass
class Decoder(hk.Module):
"""Decoder model."""
output_shape: Sequence[int]
hidden_size: int = 512
def __call__(self, z: jax.Array) -> jax.Array:
"""Decodes a latent code into Bernoulli log-odds over an output image."""
z = hk.Linear(self.hidden_size)(z)
z = jax.nn.relu(z)
logits = hk.Linear(np.prod(self.output_shape))(z)
logits = jnp.reshape(logits, (-1, *self.output_shape))
return logits
class VAEOutput(NamedTuple):
image: jax.Array
mean: jax.Array
variance: jax.Array
logits: jax.Array
@dataclasses.dataclass
class VariationalAutoEncoder(hk.Module):
"""Main VAE model class."""
encoder: Encoder
decoder: Decoder
def __call__(self, x: jax.Array) -> VAEOutput:
"""Forward pass of the variational autoencoder."""
x = x.astype(jnp.float32)
mean, stddev = self.encoder(x)
z = mean + stddev * jax.random.normal(hk.next_rng_key(), mean.shape)
logits = self.decoder(z)
p = jax.nn.sigmoid(logits)
image = jax.random.bernoulli(hk.next_rng_key(), p)
return VAEOutput(image, mean, jnp.square(stddev), logits)
class TrainingState(NamedTuple):
params: hk.Params
opt_state: optax.OptState
rng_key: jax.Array
def main(_):
flags.FLAGS.alsologtostderr = True
config = Config()
@hk.transform
def model(x):
vae = VariationalAutoEncoder(
encoder=Encoder(latent_size=10),
decoder=Decoder(output_shape=x.shape[1:]),
)
return vae(x)
@jax.jit
def loss_fn(params, rng_key, batch: Batch) -> jax.Array:
"""ELBO loss: E_p[log(x)] - KL(d||q), where p ~ Be(0.5) and q ~ N(0,1)."""
# Run the model on the inputs.
_, mean, var, logits = model.apply(params, rng_key, batch.image)
# Bernoulli log-likelihood (assumes `image` is binarised).
log_likelihood = jnp.einsum(
"b...->b", batch.image * logits - jnp.logaddexp(0., logits))
# KL divergence between Gaussians N(mean, std) and N(0, 1).
kl = 0.5 * jnp.sum(-jnp.log(var) - 1. + var + jnp.square(mean), axis=-1)
# Loss is the negative evidence lower-bound.
return -jnp.mean(log_likelihood - kl)
optimizer = optax.adam(config.learning_rate)
@jax.jit
def update(state: TrainingState, batch: Batch) -> TrainingState:
"""Performs a single SGD step."""
rng_key, next_rng_key = jax.random.split(state.rng_key)
gradients = jax.grad(loss_fn)(state.params, rng_key, batch)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(new_params, new_opt_state, next_rng_key)
# Load datasets.
train_dataset = load_dataset("train", config.batch_size, config.seed)
eval_datasets = {
"train": load_dataset("train", config.batch_size, config.seed),
"valid": load_dataset("validation", config.batch_size, config.seed),
}
# Initialise the training state.
initial_rng_key = jax.random.PRNGKey(config.seed)
initial_params = model.init(initial_rng_key, next(train_dataset).image)
initial_opt_state = optimizer.init(initial_params)
state = TrainingState(initial_params, initial_opt_state, initial_rng_key)
# Run training and evaluation.
for step in range(config.training_steps):
state = update(state, next(train_dataset))
if step % config.eval_every == 0:
for split, ds in eval_datasets.items():
loss = loss_fn(state.params, state.rng_key, next(ds))
logging.info({
"step": step,
"split": split,
"elbo": -jax.device_get(loss).item(),
})
if __name__ == "__main__":
app.run(main)
|
dm-haiku-main
|
examples/vae.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A minimal MNIST classifier example."""
from collections.abc import Iterator
from typing import NamedTuple
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
NUM_CLASSES = 10 # MNIST has 10 classes (hand-written digits).
class Batch(NamedTuple):
image: np.ndarray # [B, H, W, 1]
label: np.ndarray # [B]
class TrainingState(NamedTuple):
params: hk.Params
avg_params: hk.Params
opt_state: optax.OptState
def net_fn(images: jax.Array) -> jax.Array:
"""Standard LeNet-300-100 MLP network."""
x = images.astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(300), jax.nn.relu,
hk.Linear(100), jax.nn.relu,
hk.Linear(NUM_CLASSES),
])
return mlp(x)
def load_dataset(
split: str,
*,
shuffle: bool,
batch_size: int,
) -> Iterator[Batch]:
"""Loads the MNIST dataset."""
ds, ds_info = tfds.load("mnist:3.*.*", split=split, with_info=True)
ds.cache()
if shuffle:
ds = ds.shuffle(ds_info.splits[split].num_examples, seed=0)
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.map(lambda x: Batch(**x))
return iter(tfds.as_numpy(ds))
def main(_):
# First, make the network and optimiser.
network = hk.without_apply_rng(hk.transform(net_fn))
optimiser = optax.adam(1e-3)
def loss(params: hk.Params, batch: Batch) -> jax.Array:
"""Cross-entropy classification loss, regularised by L2 weight decay."""
batch_size, *_ = batch.image.shape
logits = network.apply(params, batch.image)
labels = jax.nn.one_hot(batch.label, NUM_CLASSES)
l2_regulariser = 0.5 * sum(
jnp.sum(jnp.square(p)) for p in jax.tree_util.tree_leaves(params))
log_likelihood = jnp.sum(labels * jax.nn.log_softmax(logits))
return -log_likelihood / batch_size + 1e-4 * l2_regulariser
@jax.jit
def evaluate(params: hk.Params, batch: Batch) -> jax.Array:
"""Evaluation metric (classification accuracy)."""
logits = network.apply(params, batch.image)
predictions = jnp.argmax(logits, axis=-1)
return jnp.mean(predictions == batch.label)
@jax.jit
def update(state: TrainingState, batch: Batch) -> TrainingState:
"""Learning rule (stochastic gradient descent)."""
grads = jax.grad(loss)(state.params, batch)
updates, opt_state = optimiser.update(grads, state.opt_state)
params = optax.apply_updates(state.params, updates)
# Compute avg_params, the exponential moving average of the "live" params.
# We use this only for evaluation (cf. https://doi.org/10.1137/0330046).
avg_params = optax.incremental_update(
params, state.avg_params, step_size=0.001)
return TrainingState(params, avg_params, opt_state)
# Make datasets.
train_dataset = load_dataset("train", shuffle=True, batch_size=1_000)
eval_datasets = {
split: load_dataset(split, shuffle=False, batch_size=10_000)
for split in ("train", "test")
}
# Initialise network and optimiser; note we draw an input to get shapes.
initial_params = network.init(
jax.random.PRNGKey(seed=0), next(train_dataset).image)
initial_opt_state = optimiser.init(initial_params)
state = TrainingState(initial_params, initial_params, initial_opt_state)
# Training & evaluation loop.
for step in range(3001):
if step % 100 == 0:
# Periodically evaluate classification accuracy on train & test sets.
# Note that each evaluation is only on a (large) batch.
for split, dataset in eval_datasets.items():
accuracy = np.array(evaluate(state.avg_params, next(dataset))).item()
print({"step": step, "split": split, "accuracy": f"{accuracy:.3f}"})
# Do SGD on a batch of training examples.
state = update(state, next(train_dataset))
if __name__ == "__main__":
app.run(main)
|
dm-haiku-main
|
examples/mnist.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
examples/transformer/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Didactic example of an autoregressive Transformer-based language model.
Glossary of shapes:
- B: Batch size.
- T: Sequence length.
- D: Model embedding size.
- H: Number of attention heads.
- V: Vocabulary size.
"""
import dataclasses
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def _layer_norm(x: jax.Array) -> jax.Array:
"""Applies a unique LayerNorm to `x` with default settings."""
ln = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)
return ln(x)
@dataclasses.dataclass
class Transformer(hk.Module):
"""A transformer stack."""
num_heads: int # Number of attention heads.
num_layers: int # Number of transformer (attention + MLP) layers to stack.
attn_size: int # Size of the attention (key, query, value) vectors.
dropout_rate: float # Probability with which to apply dropout.
widening_factor: int = 4 # Factor by which the MLP hidden layer widens.
name: Optional[str] = None # Optional identifier for the module.
def __call__(
self,
embeddings: jax.Array, # [B, T, D]
mask: jax.Array, # [B, T]
) -> jax.Array: # [B, T, D]
"""Transforms input embedding sequences to output embedding sequences."""
initializer = hk.initializers.VarianceScaling(2 / self.num_layers)
_, seq_len, model_size = embeddings.shape
# Compute causal mask for autoregressive sequence modelling.
mask = mask[:, None, None, :] # [B, H=1, T'=1, T]
causal_mask = np.tril(np.ones((1, 1, seq_len, seq_len))) # [B=1, H=1, T, T]
mask = mask * causal_mask # [B, H=1, T, T]
h = embeddings
for _ in range(self.num_layers):
# First the attention block.
attn_block = hk.MultiHeadAttention(
num_heads=self.num_heads,
key_size=self.attn_size,
model_size=model_size,
w_init=initializer,
)
h_norm = _layer_norm(h)
h_attn = attn_block(h_norm, h_norm, h_norm, mask=mask)
h_attn = hk.dropout(hk.next_rng_key(), self.dropout_rate, h_attn)
h = h + h_attn
# Then the dense block.
dense_block = hk.Sequential([
hk.Linear(self.widening_factor * model_size, w_init=initializer),
jax.nn.gelu,
hk.Linear(model_size, w_init=initializer),
])
h_norm = _layer_norm(h)
h_dense = dense_block(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), self.dropout_rate, h_dense)
h = h + h_dense
return _layer_norm(h)
@dataclasses.dataclass
class LanguageModel(hk.Module):
"""An autoregressive transformer-based language model."""
transformer: Transformer
model_size: int # Embedding size.
vocab_size: int # Size of the vocabulary.
pad_token: int # Identity of the padding token (used for masking inputs).
name: Optional[str] = None # Optional identifier for the module.
def __call__(
self,
tokens: jax.Array, # Batch of sequences of input tokens, shape [B, T].
) -> jax.Array: # Batch of sequences of output token logits, shape [B, T, V].
"""Forward pass, producing a sequence of logits."""
input_mask = jnp.greater(tokens, self.pad_token)
unused_batch_size, seq_len = tokens.shape
# Embed the input tokens and positions.
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(
self.vocab_size, embed_dim=self.model_size, w_init=embed_init)
token_embeddings = token_embedding_map(tokens)
positional_embeddings = hk.get_parameter(
'positional_embeddings', [seq_len, self.model_size], init=embed_init)
input_embeddings = token_embeddings + positional_embeddings # [B, T, D]
# Run the transformer over the inputs.
embeddings = self.transformer(input_embeddings, input_mask) # [B, T, D]
# Decode the embeddings (here, we use untied weights).
return hk.Linear(self.vocab_size)(embeddings) # [B, T, V]
|
dm-haiku-main
|
examples/transformer/model.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple example loader for an ASCII language-modelling dataset."""
from collections.abc import Iterable, Iterator
import itertools
import random
from typing import NamedTuple, TypeVar
import numpy as np
VOCAB_SIZE = 128 # Number of ASCII code points.
PAD_TOKEN = 0
_T = TypeVar('_T')
class Batch(NamedTuple):
inputs: np.ndarray # Integer tokens, shape [B, T].
targets: np.ndarray # Integer tokens, shape [B, T].
def repeat(dataset: Iterable[_T]) -> Iterator[_T]:
return itertools.cycle(dataset)
def shuffle(dataset: Iterator[_T], buffer_size: int) -> Iterator[_T]:
buffer = [next(dataset) for _ in range(buffer_size)]
random.shuffle(buffer)
for item in dataset:
idx = random.randint(0, buffer_size - 1) # Inclusive.
result = buffer[idx]
buffer[idx] = item
yield result
def load_ascii_dataset(
corpus: str,
*,
batch_size: int,
sequence_length: int,
num_shuffle_batches: int = 10,
) -> Iterator[Batch]:
"""Loads a single-file ASCII dataset in memory."""
if not corpus.isascii():
raise ValueError('Loaded corpus is not ASCII.')
if chr(PAD_TOKEN) in corpus: # Reserve 0 codepoint for pad token.
raise ValueError('Corpus must not contain the null byte.')
# Naively tokenise by taking ASCII codepoints.
corpus = np.array([ord(c) for c in corpus]).astype(np.int32)
assert np.max(corpus) < VOCAB_SIZE
crop_len = sequence_length + 1
num_batches, remainder = divmod(corpus.size, batch_size * crop_len)
if remainder:
corpus = corpus[:-remainder] # Drop remainder (incomplete) batch.
ds = corpus.reshape([-1, crop_len])
if num_batches < num_shuffle_batches:
raise ValueError(
f'Only {num_batches} batches in the dataset; consider using a shorter '
'sequence length or a smaller batch batch size.',
)
ds = repeat(ds)
ds = shuffle(ds, buffer_size=batch_size * num_shuffle_batches)
while True:
batch = np.stack([next(ds) for _ in range(batch_size)])
yield Batch(inputs=batch[:, :-1], targets=batch[:, 1:])
|
dm-haiku-main
|
examples/transformer/dataset.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Trains a transformer for language modeling on a small text dataset.
This example serves to demonstrate:
- A clean Haiku transformer implementation.
- An example minimal training loop around it.
This example runs on ASCII text files.
We have not tuned the hyperparameters at all.
Example, using Karpathy's tiny_shakespeare dataset:
$ wget -O /tmp/shakespeare.txt \
https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt
$ python3 examples/transformer/train.py \
--dataset_path=/tmp/shakespeare.txt --alsologtostderr
"""
from collections.abc import MutableMapping
import time
from typing import Any, NamedTuple, Union
from absl import app
from absl import flags
from absl import logging
import haiku as hk
from examples.transformer import dataset
from examples.transformer import model
import jax
import jax.numpy as jnp
import numpy as np
import optax
DATASET_PATH = flags.DEFINE_string(
'dataset_path', None, help='Path to raw dataset file', required=True)
# Training hyperparameters.
BATCH_SIZE = 2
SEQUENCE_LENGTH = 64
LEARNING_RATE = 3e-4
GRAD_CLIP_VALUE = 1
LOG_EVERY = 50
MAX_STEPS = 10**6
SEED = 0
# Model hyperparameters.
NUM_LAYERS = 6
NUM_HEADS = 8 # Number of attention heads.
MODEL_SIZE = 128
KEY_SIZE = 32
DROPOUT_RATE = 0.1
# Helpful type aliases.
_Batch = dataset.Batch
_Metrics = MutableMapping[str, Any]
class TrainingState(NamedTuple):
"""Container for the training state."""
params: hk.Params # Current network parameters.
opt_state: optax.OptState # Optimiser state (e.g. gradient moments).
rng_key: jax.Array # RNG used for e.g. dropout. Split on each update step.
step: jax.Array # Tracks the number of training steps.
def forward_pass(tokens: Union[np.ndarray, jax.Array]) -> jax.Array:
"""Defines the forward pass of the language model."""
lm = model.LanguageModel(
model_size=MODEL_SIZE,
vocab_size=dataset.VOCAB_SIZE,
pad_token=dataset.PAD_TOKEN,
transformer=model.Transformer(
num_heads=NUM_HEADS,
num_layers=NUM_LAYERS,
attn_size=KEY_SIZE,
dropout_rate=DROPOUT_RATE,
),
)
return lm(tokens) # Logits, shape [B, T, V].
def optimiser() -> optax.GradientTransformation:
return optax.chain(
optax.clip_by_global_norm(GRAD_CLIP_VALUE),
optax.adam(LEARNING_RATE, b1=0.9, b2=0.99),
)
@hk.transform
def loss_fn(data: _Batch) -> jax.Array:
"""Computes the (scalar) language modelling loss on `data` w.r.t. params."""
logits = forward_pass(data.inputs)
log_probs = jax.nn.log_softmax(logits) # [B, T, V]
onehot_targets = jax.nn.one_hot(data.targets, dataset.VOCAB_SIZE)
log_likelihood = jnp.sum(onehot_targets * log_probs, axis=-1) # [B, T]
# Loss is the average negative log-likelihood per (non-masked) token.
mask = jnp.not_equal(data.inputs, dataset.PAD_TOKEN) # [B, T]
return -jnp.sum(log_likelihood * mask) / jnp.sum(mask) # []
@jax.jit
def init(rng: jax.Array, data: _Batch) -> TrainingState:
"""Makes an initial training state (random parameters)."""
rng, init_rng = jax.random.split(rng)
initial_params = loss_fn.init(init_rng, data)
initial_opt_state = optimiser().init(initial_params)
return TrainingState(
params=initial_params,
opt_state=initial_opt_state,
rng_key=rng,
step=jnp.array(0),
)
@jax.jit
def update(
state: TrainingState, data: _Batch
) -> tuple[TrainingState, _Metrics]:
"""Does an SGD step, returning a new training state and metrics."""
rng, net_rng = jax.random.split(state.rng_key)
loss_and_grad_fn = jax.value_and_grad(loss_fn.apply)
loss, gradients = loss_and_grad_fn(state.params, net_rng, data)
updates, new_opt_state = optimiser().update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
new_state = TrainingState(
params=new_params,
opt_state=new_opt_state,
rng_key=rng,
step=state.step + 1,
)
metrics = {
'step': state.step,
'loss': loss,
}
return new_state, metrics
def main(_):
# Create the dataset.
with open(DATASET_PATH.value) as file:
train_dataset = dataset.load_ascii_dataset(
corpus=file.read(),
batch_size=BATCH_SIZE,
sequence_length=SEQUENCE_LENGTH,
)
# Initialise the model parameters.
rng = jax.random.PRNGKey(SEED)
data = next(train_dataset)
state = init(rng, data)
# Training loop (note we don't include any explicit eval in this example).
prev_time = time.time()
for step in range(MAX_STEPS):
state, metrics = update(state, data)
data = next(train_dataset)
# We use JAX runahead to mask data preprocessing and JAX dispatch overheads.
# Using values from state/metrics too often will block the runahead and can
# cause these overheads to become more prominent.
if step % LOG_EVERY == 0:
steps_per_sec = LOG_EVERY / (time.time() - prev_time)
prev_time = time.time()
metrics |= {'steps_per_sec': steps_per_sec}
logging.info({k: float(v) for k, v in metrics.items()})
if __name__ == '__main__':
app.run(main)
|
dm-haiku-main
|
examples/transformer/train.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Actor test."""
from unittest import mock
from absl.testing import absltest
from bsuite.environments import catch
import dm_env
from examples.impala import actor as actor_lib
from examples.impala import agent as agent_lib
from examples.impala import haiku_nets
from examples.impala import learner as learner_lib
from examples.impala import util
import jax
import numpy as np
import tree
class CatchTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.env = catch.Catch()
self.action_spec = self.env.action_spec()
self.num_actions = self.action_spec.num_values
self.obs_spec = self.env.observation_spec()
self.agent = agent_lib.Agent(
num_actions=self.num_actions,
obs_spec=self.obs_spec,
net_factory=haiku_nets.CatchNet,
)
self.key = jax.random.PRNGKey(42)
self.key, subkey = jax.random.split(self.key)
self.initial_params = self.agent.initial_params(subkey)
def test_unroll(self):
mock_learner = mock.MagicMock()
traj_len = 10
actor = actor_lib.Actor(
agent=self.agent,
env=self.env,
learner=mock_learner,
unroll_length=traj_len,
)
self.key, subkey = jax.random.split(self.key)
act_out = actor.unroll(
rng_key=subkey,
frame_count=0,
params=self.initial_params,
unroll_length=traj_len)
self.assertIsInstance(act_out, util.Transition)
self.assertIsInstance(act_out.timestep, dm_env.TimeStep)
self.assertLen(act_out.timestep.reward.shape, 1)
self.assertEqual(act_out.timestep.reward.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.discount.shape, 1)
self.assertEqual(act_out.timestep.discount.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.step_type.shape, 1)
self.assertEqual(act_out.timestep.step_type.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.observation.shape, 3)
self.assertEqual(act_out.timestep.observation.shape,
(traj_len + 1,) + self.obs_spec.shape)
self.assertIsInstance(act_out.agent_out, agent_lib.AgentOutput)
self.assertLen(act_out.agent_out.action.shape, 1)
self.assertEqual(act_out.agent_out.action.shape, (traj_len + 1,))
self.assertLen(act_out.agent_out.policy_logits.shape, 2)
self.assertEqual(act_out.agent_out.policy_logits.shape,
(traj_len + 1, self.num_actions))
self.assertLen(act_out.agent_out.values.shape, 1)
self.assertEqual(act_out.agent_out.values.shape, (traj_len + 1,))
self.assertEqual(act_out.agent_state.shape, (traj_len + 1,))
def test_sync_params(self):
mock_learner = mock.MagicMock()
frame_count = 428
params = self.initial_params
mock_learner.params_for_actor.return_value = frame_count, params
traj_len = 10
actor = actor_lib.Actor(
agent=self.agent,
env=self.env,
learner=mock_learner,
unroll_length=traj_len,
)
received_frame_count, received_params = actor.pull_params()
self.assertEqual(received_frame_count, frame_count)
tree.assert_same_structure(received_params, params)
tree.map_structure(np.testing.assert_array_almost_equal, received_params,
params)
def test_unroll_and_push(self):
traj_len = 3
mock_learner = mock.create_autospec(learner_lib.Learner, instance=True)
actor = actor_lib.Actor(
agent=self.agent,
env=self.env,
learner=mock_learner,
unroll_length=traj_len,
)
actor.unroll_and_push(0, self.initial_params)
mock_learner.enqueue_traj.assert_called_once()
act_out = mock_learner.enqueue_traj.call_args[0][0]
self.assertIsInstance(act_out, util.Transition)
self.assertIsInstance(act_out.timestep, dm_env.TimeStep)
self.assertLen(act_out.timestep.reward.shape, 1)
self.assertEqual(act_out.timestep.reward.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.discount.shape, 1)
self.assertEqual(act_out.timestep.discount.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.step_type.shape, 1)
self.assertEqual(act_out.timestep.step_type.shape, (traj_len + 1,))
self.assertLen(act_out.timestep.observation.shape, 3)
self.assertEqual(act_out.timestep.observation.shape,
(traj_len + 1,) + self.obs_spec.shape)
self.assertIsInstance(act_out.agent_out, agent_lib.AgentOutput)
self.assertLen(act_out.agent_out.action.shape, 1)
self.assertEqual(act_out.agent_out.action.shape, (traj_len + 1,))
self.assertLen(act_out.agent_out.policy_logits.shape, 2)
self.assertEqual(act_out.agent_out.policy_logits.shape,
(traj_len + 1, self.num_actions))
self.assertLen(act_out.agent_out.values.shape, 1)
self.assertEqual(act_out.agent_out.values.shape, (traj_len + 1,))
self.assertEqual(act_out.agent_state.shape, (traj_len + 1,))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
examples/impala/actor_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Single-process IMPALA wiring."""
import threading
from absl import app
from bsuite.environments import catch
from examples.impala import actor as actor_lib
from examples.impala import agent as agent_lib
from examples.impala import haiku_nets
from examples.impala import learner as learner_lib
from examples.impala import util
import jax
import optax
ACTION_REPEAT = 1
BATCH_SIZE = 2
DISCOUNT_FACTOR = 0.99
MAX_ENV_FRAMES = 20000
NUM_ACTORS = 2
UNROLL_LENGTH = 20
FRAMES_PER_ITER = ACTION_REPEAT * BATCH_SIZE * UNROLL_LENGTH
def run_actor(actor: actor_lib.Actor, stop_signal: list[bool]):
"""Runs an actor to produce num_trajectories trajectories."""
while not stop_signal[0]:
frame_count, params = actor.pull_params()
actor.unroll_and_push(frame_count, params)
def main(_):
# A thunk that builds a new environment.
# Substitute your environment here!
build_env = catch.Catch
# Construct the agent. We need a sample environment for its spec.
env_for_spec = build_env()
num_actions = env_for_spec.action_spec().num_values
agent = agent_lib.Agent(num_actions, env_for_spec.observation_spec(),
haiku_nets.CatchNet)
# Construct the optimizer.
max_updates = MAX_ENV_FRAMES / FRAMES_PER_ITER
opt = optax.rmsprop(5e-3, decay=0.99, eps=1e-7)
# Construct the learner.
learner = learner_lib.Learner(
agent,
jax.random.PRNGKey(428),
opt,
BATCH_SIZE,
DISCOUNT_FACTOR,
FRAMES_PER_ITER,
max_abs_reward=1.,
logger=util.AbslLogger(), # Provide your own logger here.
)
# Construct the actors on different threads.
# stop_signal in a list so the reference is shared.
actor_threads = []
stop_signal = [False]
for i in range(NUM_ACTORS):
actor = actor_lib.Actor(
agent,
build_env(),
UNROLL_LENGTH,
learner,
rng_seed=i,
logger=util.AbslLogger(), # Provide your own logger here.
)
args = (actor, stop_signal)
actor_threads.append(threading.Thread(target=run_actor, args=args))
# Start the actors and learner.
for t in actor_threads:
t.start()
learner.run(int(max_updates))
# Stop.
stop_signal[0] = True
for t in actor_threads:
t.join()
if __name__ == '__main__':
app.run(main)
|
dm-haiku-main
|
examples/impala/run_catch.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Util."""
import collections
from absl import logging
import dm_env
import numpy as np
import tree
# Can represent either a single transition, a trajectory, or a batch of
# trajectories.
Transition = collections.namedtuple('Transition',
['timestep', 'agent_out', 'agent_state'])
def _preprocess_none(t) -> np.ndarray:
if t is None:
return np.array(0., dtype=np.float32)
else:
return np.asarray(t)
def preprocess_step(timestep: dm_env.TimeStep) -> dm_env.TimeStep:
if timestep.discount is None:
timestep = timestep._replace(discount=1.)
return tree.map_structure(_preprocess_none, timestep)
class NullLogger:
"""Logger that does nothing."""
def write(self, _):
pass
def close(self):
pass
class AbslLogger:
"""Writes to logging.info."""
def write(self, d):
logging.info(d)
def close(self):
pass
|
dm-haiku-main
|
examples/impala/util.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common networks."""
import collections
import dm_env
import haiku as hk
import jax.nn
import jax.numpy as jnp
NetOutput = collections.namedtuple('NetOutput', ['policy_logits', 'value'])
class CatchNet(hk.RNNCore):
"""A simple neural network for catch."""
def __init__(self, num_actions, name=None):
super().__init__(name=name)
self._num_actions = num_actions
def initial_state(self, batch_size):
if batch_size is None:
shape = []
else:
shape = [batch_size]
return jnp.zeros(shape) # Dummy.
def __call__(self, x: dm_env.TimeStep, state):
torso_net = hk.Sequential(
[hk.Flatten(),
hk.Linear(128), jax.nn.relu,
hk.Linear(64), jax.nn.relu])
torso_output = torso_net(x.observation)
policy_logits = hk.Linear(self._num_actions)(torso_output)
value = hk.Linear(1)(torso_output)
value = jnp.squeeze(value, axis=-1)
return NetOutput(policy_logits=policy_logits, value=value), state
def unroll(self, x, state):
"""Unrolls more efficiently than dynamic_unroll."""
out, _ = hk.BatchApply(self)(x, None)
return out, state
class AtariShallowTorso(hk.Module):
"""Shallow torso for Atari, from the DQN paper."""
def __init__(self, name=None):
super().__init__(name=name)
def __call__(self, x):
torso_net = hk.Sequential([
lambda x: x / 255.,
hk.Conv2D(32, kernel_shape=[8, 8], stride=[4, 4], padding='VALID'),
jax.nn.relu,
hk.Conv2D(64, kernel_shape=[4, 4], stride=[2, 2], padding='VALID'),
jax.nn.relu,
hk.Conv2D(64, kernel_shape=[3, 3], stride=[1, 1], padding='VALID'),
jax.nn.relu,
hk.Flatten(),
hk.Linear(512),
jax.nn.relu,
])
return torso_net(x)
class ResidualBlock(hk.Module):
"""Residual block."""
def __init__(self, num_channels, name=None):
super().__init__(name=name)
self._num_channels = num_channels
def __call__(self, x):
main_branch = hk.Sequential([
jax.nn.relu,
hk.Conv2D(
self._num_channels,
kernel_shape=[3, 3],
stride=[1, 1],
padding='SAME'),
jax.nn.relu,
hk.Conv2D(
self._num_channels,
kernel_shape=[3, 3],
stride=[1, 1],
padding='SAME'),
])
return main_branch(x) + x
class AtariDeepTorso(hk.Module):
"""Deep torso for Atari, from the IMPALA paper."""
def __init__(self, name=None):
super().__init__(name=name)
def __call__(self, x):
torso_out = x / 255.
for i, (num_channels, num_blocks) in enumerate([(16, 2), (32, 2), (32, 2)]):
conv = hk.Conv2D(
num_channels, kernel_shape=[3, 3], stride=[1, 1], padding='SAME')
torso_out = conv(torso_out)
torso_out = hk.max_pool(
torso_out,
window_shape=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
)
for j in range(num_blocks):
block = ResidualBlock(num_channels, name=f'residual_{i}_{j}')
torso_out = block(torso_out)
torso_out = jax.nn.relu(torso_out)
torso_out = hk.Flatten()(torso_out)
torso_out = hk.Linear(256)(torso_out)
torso_out = jax.nn.relu(torso_out)
return torso_out
class AtariNet(hk.RNNCore):
"""Network for Atari."""
def __init__(self, num_actions, use_resnet, use_lstm, name=None):
super().__init__(name=name)
self._num_actions = num_actions
self._use_resnet = use_resnet
self._use_lstm = use_lstm
self._core = hk.ResetCore(hk.LSTM(256))
def initial_state(self, batch_size):
return self._core.initial_state(batch_size)
def __call__(self, x: dm_env.TimeStep, state):
x = jax.tree_util.tree_map(lambda t: t[None, ...], x)
return self.unroll(x, state)
def unroll(self, x, state):
"""Unrolls more efficiently than dynamic_unroll."""
if self._use_resnet:
torso = AtariDeepTorso()
else:
torso = AtariShallowTorso()
torso_output = hk.BatchApply(torso)(x.observation)
if self._use_lstm:
should_reset = jnp.equal(x.step_type, int(dm_env.StepType.FIRST))
core_input = (torso_output, should_reset)
core_output, state = hk.dynamic_unroll(self._core, core_input, state)
else:
core_output = torso_output
# state passes through.
return hk.BatchApply(self._head)(core_output), state
def _head(self, core_output):
policy_logits = hk.Linear(self._num_actions)(core_output)
value = hk.Linear(1)(core_output)
value = jnp.squeeze(value, axis=-1)
return NetOutput(policy_logits=policy_logits, value=value)
|
dm-haiku-main
|
examples/impala/haiku_nets.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
examples/impala/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A stateless agent interface."""
import collections
import functools
from typing import Any, Callable, Optional
import dm_env
import haiku as hk
from examples.impala import util
import jax
import jax.numpy as jnp
import numpy as np
AgentOutput = collections.namedtuple("AgentOutput",
["policy_logits", "values", "action"])
Action = int
Nest = Any
NetFactory = Callable[[int], hk.RNNCore]
class Agent:
"""A stateless agent interface."""
def __init__(self, num_actions: int, obs_spec: Nest,
net_factory: NetFactory):
"""Constructs an Agent object.
Args:
num_actions: Number of possible actions for the agent. Assumes a flat,
discrete, 0-indexed action space.
obs_spec: The observation spec of the environment.
net_factory: A function from num_actions to a Haiku module representing
the agent. This module should have an initial_state() function and an
unroll function.
"""
self._obs_spec = obs_spec
net_factory = functools.partial(net_factory, num_actions)
# Instantiate two hk.transforms() - one for getting the initial state of the
# agent, another for actually initializing and running the agent.
_, self._initial_state_apply_fn = hk.without_apply_rng(
hk.transform(
lambda batch_size: net_factory().initial_state(batch_size)))
self._init_fn, self._apply_fn = hk.without_apply_rng(
hk.transform(lambda obs, state: net_factory().unroll(obs, state)))
@functools.partial(jax.jit, static_argnums=0)
def initial_params(self, rng_key):
"""Initializes the agent params given the RNG key."""
dummy_inputs = jax.tree_util.tree_map(
lambda t: np.zeros(t.shape, t.dtype), self._obs_spec)
dummy_inputs = util.preprocess_step(dm_env.restart(dummy_inputs))
dummy_inputs = jax.tree_util.tree_map(
lambda t: t[None, None, ...], dummy_inputs)
return self._init_fn(rng_key, dummy_inputs, self.initial_state(1))
@functools.partial(jax.jit, static_argnums=(0, 1))
def initial_state(self, batch_size: Optional[int]):
"""Returns agent initial state."""
# We expect that generating the initial_state does not require parameters.
return self._initial_state_apply_fn(None, batch_size)
@functools.partial(jax.jit, static_argnums=(0,))
def step(
self,
rng_key,
params: hk.Params,
timestep: dm_env.TimeStep,
state: Nest,
) -> tuple[AgentOutput, Nest]:
"""For a given single-step, unbatched timestep, output the chosen action."""
# Pad timestep, state to be [T, B, ...] and [B, ...] respectively.
timestep = jax.tree_util.tree_map(lambda t: t[None, None, ...], timestep)
state = jax.tree_util.tree_map(lambda t: t[None, ...], state)
net_out, next_state = self._apply_fn(params, timestep, state)
# Remove the padding from above.
net_out = jax.tree_util.tree_map(
lambda t: jnp.squeeze(t, axis=(0, 1)), net_out)
next_state = jax.tree_util.tree_map(
lambda t: jnp.squeeze(t, axis=0), next_state)
# Sample an action and return.
action = hk.multinomial(rng_key, net_out.policy_logits, num_samples=1)
action = jnp.squeeze(action, axis=-1)
return AgentOutput(net_out.policy_logits, net_out.value, action), next_state
def unroll(
self,
params: hk.Params,
trajectory: dm_env.TimeStep,
state: Nest,
) -> AgentOutput:
"""Unroll the agent along trajectory."""
net_out, _ = self._apply_fn(params, trajectory, state)
return AgentOutput(net_out.policy_logits, net_out.value, action=[])
|
dm-haiku-main
|
examples/impala/agent.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku.examples.impala.learner."""
from absl.testing import absltest
from bsuite.environments import catch
from examples.impala import actor as actor_lib
from examples.impala import agent as agent_lib
from examples.impala import haiku_nets
from examples.impala import learner as learner_lib
import jax
import optax
class LearnerTest(absltest.TestCase):
def test_integration(self):
env = catch.Catch()
action_spec = env.action_spec()
num_actions = action_spec.num_values
obs_spec = env.observation_spec()
agent = agent_lib.Agent(
num_actions=num_actions,
obs_spec=obs_spec,
net_factory=haiku_nets.CatchNet,
)
unroll_length = 20
learner = learner_lib.Learner(
agent=agent,
rng_key=jax.random.PRNGKey(42),
opt=optax.sgd(1e-2),
batch_size=1,
discount_factor=0.99,
frames_per_iter=unroll_length,
)
actor = actor_lib.Actor(
agent=agent,
env=env,
learner=learner,
unroll_length=unroll_length,
)
frame_count, params = actor.pull_params()
actor.unroll_and_push(frame_count=frame_count, params=params)
learner.run(max_iterations=1)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
examples/impala/learner_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMPALA actor class."""
import dm_env
import haiku as hk
from examples.impala import agent as agent_lib
from examples.impala import learner as learner_lib
from examples.impala import util
import jax
import numpy as np
class Actor:
"""Manages the state of a single agent/environment interaction loop."""
def __init__(
self,
agent: agent_lib.Agent,
env: dm_env.Environment,
unroll_length: int,
learner: learner_lib.Learner,
rng_seed: int = 42,
logger=None,
):
self._agent = agent
self._env = env
self._unroll_length = unroll_length
self._learner = learner
self._timestep = env.reset()
self._agent_state = agent.initial_state(None)
self._traj = []
self._rng_key = jax.random.PRNGKey(rng_seed)
if logger is None:
logger = util.NullLogger()
self._logger = logger
self._episode_return = 0.
def unroll(self, rng_key, frame_count: int, params: hk.Params,
unroll_length: int) -> util.Transition:
"""Run unroll_length agent/environment steps, returning the trajectory."""
timestep = self._timestep
agent_state = self._agent_state
# Unroll one longer if trajectory is empty.
num_interactions = unroll_length + int(not self._traj)
subkeys = jax.random.split(rng_key, num_interactions)
for i in range(num_interactions):
timestep = util.preprocess_step(timestep)
agent_out, next_state = self._agent.step(subkeys[i], params, timestep,
agent_state)
transition = util.Transition(
timestep=timestep,
agent_out=agent_out,
agent_state=agent_state)
self._traj.append(transition)
agent_state = next_state
timestep = self._env.step(agent_out.action)
if timestep.last():
self._episode_return += timestep.reward
self._logger.write({
'num_frames': frame_count,
'episode_return': self._episode_return,
})
self._episode_return = 0.
else:
self._episode_return += timestep.reward or 0.
# Elide a manual agent_state reset on step_type.first(), as the ResetCore
# already takes care of this for us.
# Pack the trajectory and reset parent state.
trajectory = jax.device_get(self._traj)
trajectory = jax.tree_util.tree_map(lambda *xs: np.stack(xs), *trajectory)
self._timestep = timestep
self._agent_state = agent_state
# Keep the bootstrap timestep for next trajectory.
self._traj = self._traj[-1:]
return trajectory
def unroll_and_push(self, frame_count: int, params: hk.Params):
"""Run one unroll and send trajectory to learner."""
params = jax.device_put(params)
self._rng_key, subkey = jax.random.split(self._rng_key)
act_out = self.unroll(
rng_key=subkey,
frame_count=frame_count,
params=params,
unroll_length=self._unroll_length)
self._learner.enqueue_traj(act_out)
def pull_params(self):
return self._learner.params_for_actor()
|
dm-haiku-main
|
examples/impala/actor.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMPALA learner class."""
import functools
import itertools
import queue
import threading
import warnings
import dm_env
import haiku as hk
from examples.impala import agent as agent_lib
from examples.impala import util
import jax
from jax.example_libraries import optimizers
import jax.numpy as jnp
import numpy as np
import optax
import rlax
# The IMPALA paper sums losses, rather than taking the mean.
# We wrap rlax to do so as well.
def policy_gradient_loss(logits, *args):
"""rlax.policy_gradient_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.policy_gradient_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
def entropy_loss(logits, *args):
"""rlax.entropy_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.entropy_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
class Learner:
"""Manages state and performs updates for IMPALA learner."""
def __init__(
self,
agent: agent_lib.Agent,
rng_key,
opt: optax.GradientTransformation,
batch_size: int,
discount_factor: float,
frames_per_iter: int,
max_abs_reward: float = 0,
logger=None,
):
if jax.device_count() > 1:
warnings.warn('Note: the impala example will only take advantage of a '
'single accelerator.')
self._agent = agent
self._opt = opt
self._batch_size = batch_size
self._discount_factor = discount_factor
self._frames_per_iter = frames_per_iter
self._max_abs_reward = max_abs_reward
# Data pipeline objects.
self._done = False
self._host_q = queue.Queue(maxsize=self._batch_size)
self._device_q = queue.Queue(maxsize=1)
# Prepare the parameters to be served to actors.
params = agent.initial_params(rng_key)
self._params_for_actor = (0, jax.device_get(params))
# Set up logging.
if logger is None:
logger = util.NullLogger()
self._logger = logger
def _loss(
self,
theta: hk.Params,
trajectories: util.Transition,
) -> tuple[jax.Array, dict[str, jax.Array]]:
"""Compute vtrace-based actor-critic loss."""
initial_state = jax.tree_util.tree_map(
lambda t: t[0], trajectories.agent_state)
learner_outputs = self._agent.unroll(theta, trajectories.timestep,
initial_state)
v_t = learner_outputs.values[1:]
# Remove bootstrap timestep from non-timesteps.
_, actor_out, _ = jax.tree_util.tree_map(lambda t: t[:-1], trajectories)
learner_outputs = jax.tree_util.tree_map(lambda t: t[:-1], learner_outputs)
v_tm1 = learner_outputs.values
# Get the discount, reward, step_type from the *next* timestep.
timestep = jax.tree_util.tree_map(lambda t: t[1:], trajectories.timestep)
discounts = timestep.discount * self._discount_factor
rewards = timestep.reward
if self._max_abs_reward > 0:
rewards = jnp.clip(rewards, -self._max_abs_reward, self._max_abs_reward)
# The step is uninteresting if we transitioned LAST -> FIRST.
# timestep corresponds to the *next* time step, so we filter for FIRST.
mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST))
mask = mask.astype(jnp.float32)
rhos = rlax.categorical_importance_sampling_ratios(
learner_outputs.policy_logits, actor_out.policy_logits,
actor_out.action)
# vmap vtrace_td_error_and_advantage to take/return [T, B, ...].
vtrace_td_error_and_advantage = jax.vmap(
rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1)
vtrace_returns = vtrace_td_error_and_advantage(
v_tm1, v_t, rewards, discounts, rhos)
pg_advs = vtrace_returns.pg_advantage
pg_loss = policy_gradient_loss(learner_outputs.policy_logits,
actor_out.action, pg_advs, mask)
baseline_loss = 0.5 * jnp.sum(jnp.square(vtrace_returns.errors) * mask)
ent_loss = entropy_loss(learner_outputs.policy_logits, mask)
total_loss = pg_loss
total_loss += 0.5 * baseline_loss
total_loss += 0.01 * ent_loss
logs = {}
logs['PG_loss'] = pg_loss
logs['baseline_loss'] = baseline_loss
logs['entropy_loss'] = ent_loss
logs['total_loss'] = total_loss
return total_loss, logs
@functools.partial(jax.jit, static_argnums=0)
def update(self, params, opt_state, batch: util.Transition):
"""The actual update function."""
(_, logs), grads = jax.value_and_grad(
self._loss, has_aux=True)(params, batch)
grad_norm_unclipped = optimizers.l2_norm(grads)
updates, updated_opt_state = self._opt.update(grads, opt_state)
params = optax.apply_updates(params, updates)
weight_norm = optimizers.l2_norm(params)
logs.update({
'grad_norm_unclipped': grad_norm_unclipped,
'weight_norm': weight_norm,
})
return params, updated_opt_state, logs
def enqueue_traj(self, traj: util.Transition):
"""Enqueue trajectory."""
self._host_q.put(traj)
def params_for_actor(self) -> tuple[int, hk.Params]:
return self._params_for_actor
def host_to_device_worker(self):
"""Elementary data pipeline."""
batch = []
while not self._done:
# Try to get a batch. Skip the iteration if we couldn't.
try:
for _ in range(len(batch), self._batch_size):
# As long as possible while keeping learner_test time reasonable.
batch.append(self._host_q.get(timeout=10))
except queue.Empty:
continue
assert len(batch) == self._batch_size
# Prepare for consumption, then put batch onto device.
stacked_batch = jax.tree_util.tree_map(
lambda *xs: np.stack(xs, axis=1), *batch)
self._device_q.put(jax.device_put(stacked_batch))
# Clean out the built-up batch.
batch = []
def run(self, max_iterations: int = -1):
"""Runs the learner for max_iterations updates."""
# Start host-to-device transfer worker.
transfer_thread = threading.Thread(target=self.host_to_device_worker)
transfer_thread.start()
(num_frames, params) = self._params_for_actor
opt_state = self._opt.init(params)
steps = range(max_iterations) if max_iterations != -1 else itertools.count()
for _ in steps:
batch = self._device_q.get()
params, opt_state, logs = self.update(params, opt_state, batch)
num_frames += self._frames_per_iter
# Collect parameters to distribute to downstream actors.
self._params_for_actor = (num_frames, jax.device_get(params))
# Collect and write logs out.
logs = jax.device_get(logs)
logs.update({
'num_frames': num_frames,
})
self._logger.write(logs)
# Shut down.
self._done = True
self._logger.close()
transfer_thread.join()
|
dm-haiku-main
|
examples/impala/learner.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
examples/imagenet/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing."""
from collections.abc import Iterable, Iterator, Mapping, Sequence
import enum
import itertools as it
import types
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as np
from packaging import version
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Batch = Mapping[str, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: str) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def _check_min_version(mod: types.ModuleType, min_ver: str):
actual_ver = getattr(mod, '__version__')
if version.parse(actual_ver) < version.parse(min_ver):
raise ValueError(
f'{mod.__name__} >= {min_ver} is required, you have {actual_ver}')
def check_versions():
_check_min_version(tf, '2.5.0')
_check_min_version(tfds, '4.2.0')
def load(
split: Split,
*,
is_training: bool,
batch_dims: Sequence[int],
dtype: jnp.dtype = jnp.float32,
transpose: bool = False,
zeros: bool = False,
) -> Iterator[Batch]:
"""Loads the given split of the dataset."""
if zeros:
h, w, c = 224, 224, 3
if transpose:
image_dims = (*batch_dims[:-1], h, w, c, batch_dims[0])
else:
image_dims = (*batch_dims, h, w, c)
batch = {'images': np.zeros(image_dims, dtype=dtype),
'labels': np.zeros(batch_dims, dtype=np.uint32)}
if is_training:
yield from it.repeat(batch)
else:
num_batches = split.num_examples // np.prod(batch_dims)
yield from it.repeat(batch, num_batches)
if is_training:
start, end = _shard(split, jax.process_index(), jax.process_count())
else:
start, end = _shard(split, 0, 1)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
total_batch_size = np.prod(batch_dims)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.process_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def preprocess(example):
image = _preprocess_image(example['image'], is_training)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
ds = ds.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def transpose_fn(batch):
# We use the "double transpose trick" to improve performance for TPUs. Note
# that this (typically) requires a matching HWCN->NHWC transpose in your
# model code. The compiler cannot make this optimization for us since our
# data pipeline and model are compiled separately.
batch = dict(**batch)
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
def cast_fn(batch):
batch = dict(**batch)
batch['images'] = tf.cast(batch['images'], tf.dtypes.as_dtype(dtype))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
ds = ds.batch(batch_size)
if i == 0:
if transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
if dtype != jnp.float32:
ds = ds.map(cast_fn)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
yield from tfds.as_numpy(ds)
def _device_put_sharded(sharded_tree, devices):
leaves, treedef = jax.tree_util.tree_flatten(sharded_tree)
n = leaves[0].shape[0]
return jax.device_put_sharded([
jax.tree_util.tree_unflatten(treedef, [l[i] for l in leaves])
for i in range(n)], devices)
def double_buffer(ds: Iterable[Batch]) -> Iterator[Batch]:
"""Keeps at least two batches on the accelerator.
The current GPU allocator design reuses previous allocations. For a training
loop this means batches will (typically) occupy the same region of memory as
the previous batch. An issue with this is that it means we cannot overlap a
host->device copy for the next batch until the previous step has finished and
the previous batch has been freed.
By double buffering we ensure that there are always two batches on the device.
This means that a given batch waits on the N-2'th step to finish and free,
meaning that it can allocate and copy the next batch to the accelerator in
parallel with the N-1'th step being executed.
Args:
ds: Iterable of batches of numpy arrays.
Yields:
Batches of sharded device arrays.
"""
batch = None
devices = jax.local_devices()
for next_batch in ds:
assert next_batch is not None
next_batch = _device_put_sharded(next_batch, devices)
if batch is not None:
yield batch
batch = next_batch
if batch is not None:
yield batch
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
) -> tf.Tensor:
"""Returns processed and resized images."""
if is_training:
image = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: tuple[float, float],
area_range: tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor) -> tf.Tensor:
"""Make a random crop of 224."""
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((224 / (224 + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
|
dm-haiku-main
|
examples/imagenet/dataset.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 on ImageNet2012."""
from collections.abc import Iterable, Mapping
import contextlib
import functools
import timeit
from typing import NamedTuple
from absl import app
from absl import flags
from absl import logging
import haiku as hk
from examples.imagenet import dataset
import jax
import jax.numpy as jnp
import jmp
import numpy as np
import optax
import tree
# Hyper parameters.
SPLITS = ('TRAIN', 'TRAIN_AND_VALID', 'VALID', 'TEST')
flags.DEFINE_integer('eval_batch_size', 1000, help='')
flags.DEFINE_enum('eval_split', 'TEST', SPLITS, help='')
flags.DEFINE_float('model_bn_decay', 0.9, help='')
flags.DEFINE_bool('model_resnet_v2', True, help='')
flags.DEFINE_float('optimizer_momentum', 0.9, help='')
flags.DEFINE_bool('optimizer_use_nesterov', True, help='')
flags.DEFINE_integer('train_device_batch_size', 128, help='')
flags.DEFINE_integer('train_eval_every', -1, help='')
flags.DEFINE_integer('train_init_random_seed', 42, help='')
flags.DEFINE_integer('train_log_every', 100, help='')
flags.DEFINE_integer('train_epochs', 90, help='')
flags.DEFINE_integer('train_lr_warmup_epochs', 5, help='')
flags.DEFINE_float('train_lr_init', 0.1, help='')
flags.DEFINE_float('train_smoothing', .1, lower_bound=0, upper_bound=1, help='')
flags.DEFINE_enum('train_split', 'TRAIN_AND_VALID', SPLITS, help='')
flags.DEFINE_float('train_weight_decay', 1e-4, help='')
flags.DEFINE_string('mp_policy', 'p=f32,c=f32,o=f32', help='')
flags.DEFINE_string('mp_bn_policy', 'p=f32,c=f32,o=f32', help='')
flags.DEFINE_enum('mp_scale_type', 'NoOp', ['NoOp', 'Static', 'Dynamic'],
help='')
flags.DEFINE_float('mp_scale_value', 2 ** 15, help='')
flags.DEFINE_bool('mp_skip_nonfinite', False, help='')
flags.DEFINE_bool('dataset_transpose', False, help='')
flags.DEFINE_bool('dataset_zeros', False, help='')
FLAGS = flags.FLAGS
Scalars = Mapping[str, jax.Array]
class TrainState(NamedTuple):
params: hk.Params
state: hk.State
opt_state: optax.OptState
loss_scale: jmp.LossScale
get_policy = lambda: jmp.get_policy(FLAGS.mp_policy)
get_bn_policy = lambda: jmp.get_policy(FLAGS.mp_bn_policy)
def get_initial_loss_scale() -> jmp.LossScale:
cls = getattr(jmp, f'{FLAGS.mp_scale_type}LossScale')
return cls(FLAGS.mp_scale_value) if cls is not jmp.NoOpLossScale else cls()
def _forward(
batch: dataset.Batch,
is_training: bool,
) -> jax.Array:
"""Forward application of the resnet."""
images = batch['images']
if FLAGS.dataset_transpose:
# See note in dataset.py if you are curious about this.
images = jnp.transpose(images, (3, 0, 1, 2)) # HWCN -> NHWC
net = hk.nets.ResNet50(1000,
resnet_v2=FLAGS.model_resnet_v2,
bn_config={'decay_rate': FLAGS.model_bn_decay})
return net(images, is_training=is_training)
# Transform our forwards function into a pair of pure functions.
forward = hk.transform_with_state(_forward)
def lr_schedule(step: jax.Array) -> jax.Array:
"""Cosine learning rate schedule."""
train_split = dataset.Split.from_string(FLAGS.train_split)
total_batch_size = FLAGS.train_device_batch_size * jax.device_count()
steps_per_epoch = train_split.num_examples / total_batch_size
warmup_steps = FLAGS.train_lr_warmup_epochs * steps_per_epoch
training_steps = FLAGS.train_epochs * steps_per_epoch
lr = FLAGS.train_lr_init * total_batch_size / 256
scaled_step = (jnp.maximum(step - warmup_steps, 0) /
(training_steps - warmup_steps))
lr *= 0.5 * (1.0 + jnp.cos(jnp.pi * scaled_step))
if warmup_steps:
lr *= jnp.minimum(step / warmup_steps, 1.0)
return lr
def make_optimizer() -> optax.GradientTransformation:
"""SGD with nesterov momentum and a custom lr schedule."""
return optax.chain(
optax.trace(
decay=FLAGS.optimizer_momentum,
nesterov=FLAGS.optimizer_use_nesterov),
optax.scale_by_schedule(lr_schedule), optax.scale(-1))
def l2_loss(params: Iterable[jax.Array]) -> jax.Array:
return 0.5 * sum(jnp.sum(jnp.square(p)) for p in params)
def loss_fn(
params: hk.Params,
state: hk.State,
loss_scale: jmp.LossScale,
batch: dataset.Batch,
) -> tuple[jax.Array, tuple[jax.Array, hk.State]]:
"""Computes a regularized loss for the given batch."""
logits, state = forward.apply(params, state, None, batch, is_training=True)
labels = jax.nn.one_hot(batch['labels'], 1000)
if FLAGS.train_smoothing:
labels = optax.smooth_labels(labels, FLAGS.train_smoothing)
loss = optax.softmax_cross_entropy(logits=logits, labels=labels).mean()
l2_params = [p for ((mod_name, _), p) in tree.flatten_with_path(params)
if 'batchnorm' not in mod_name]
loss = loss + FLAGS.train_weight_decay * l2_loss(l2_params)
return loss_scale.scale(loss), (loss, state)
@functools.partial(jax.pmap, axis_name='i', donate_argnums=(0,))
def train_step(
train_state: TrainState,
batch: dataset.Batch,
) -> tuple[TrainState, Scalars]:
"""Applies an update to parameters and returns new state."""
params, state, opt_state, loss_scale = train_state
grads, (loss, new_state) = (
jax.grad(loss_fn, has_aux=True)(params, state, loss_scale, batch))
# Grads are in "param_dtype" (likely F32) here. We cast them back to the
# compute dtype such that we do the all-reduce below in the compute precision
# (which is typically lower than the param precision).
policy = get_policy()
grads = policy.cast_to_compute(grads)
grads = loss_scale.unscale(grads)
# Taking the mean across all replicas to keep params in sync.
grads = jax.lax.pmean(grads, axis_name='i')
# We compute our optimizer update in the same precision as params, even when
# doing mixed precision training.
grads = policy.cast_to_param(grads)
# Compute and apply updates via our optimizer.
updates, new_opt_state = make_optimizer().update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
if FLAGS.mp_skip_nonfinite:
grads_finite = jmp.all_finite(grads)
loss_scale = loss_scale.adjust(grads_finite)
new_params, new_state, new_opt_state = jmp.select_tree(
grads_finite,
(new_params, new_state, new_opt_state),
(params, state, opt_state))
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {'train_loss': loss, 'loss_scale': loss_scale.loss_scale}
if FLAGS.mp_skip_nonfinite:
scalars['grads_finite'] = grads_finite
new_state, scalars = jmp.cast_to_full((new_state, scalars))
scalars = jax.lax.pmean(scalars, axis_name='i')
train_state = TrainState(new_params, new_state, new_opt_state, loss_scale)
return train_state, scalars
def initial_state(rng: jax.Array, batch: dataset.Batch) -> TrainState:
"""Computes the initial network state."""
params, state = forward.init(rng, batch, is_training=True)
opt_state = make_optimizer().init(params)
loss_scale = get_initial_loss_scale()
return TrainState(params, state, opt_state, loss_scale)
# NOTE: We use `jit` not `pmap` here because we want to ensure that we see all
# eval data once and this is not easily satisfiable with pmap (e.g. n=3).
# TODO(tomhennigan) Find a solution to allow pmap of eval.
@jax.jit
def eval_batch(
params: hk.Params,
state: hk.State,
batch: dataset.Batch,
) -> jax.Array:
"""Evaluates a batch."""
logits, _ = forward.apply(params, state, None, batch, is_training=False)
predicted_label = jnp.argmax(logits, axis=-1)
correct = jnp.sum(jnp.equal(predicted_label, batch['labels']))
return correct.astype(jnp.float32)
def evaluate(
split: dataset.Split,
params: hk.Params,
state: hk.State,
) -> Scalars:
"""Evaluates the model at the given params/state."""
if split.num_examples % FLAGS.eval_batch_size:
raise ValueError(f'Eval batch size {FLAGS.eval_batch_size} must be a '
f'multiple of {split} num examples {split.num_examples}')
# Params/state are sharded per-device during training. We just need the copy
# from the first device (since we do not pmap evaluation at the moment).
params, state = jax.tree_util.tree_map(lambda x: x[0], (params, state))
test_dataset = dataset.load(split,
is_training=False,
batch_dims=[FLAGS.eval_batch_size],
transpose=FLAGS.dataset_transpose,
zeros=FLAGS.dataset_zeros)
correct = jnp.array(0)
total = 0
for batch in test_dataset:
correct += eval_batch(params, state, batch)
total += batch['labels'].shape[0]
assert total == split.num_examples, total
return {'top_1_acc': correct.item() / total}
@contextlib.contextmanager
def time_activity(activity_name: str):
logging.info('[Timing] %s start.', activity_name)
start = timeit.default_timer()
yield
duration = timeit.default_timer() - start
logging.info('[Timing] %s finished (Took %.2fs).', activity_name, duration)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
FLAGS.alsologtostderr = True
train_split = dataset.Split.from_string(FLAGS.train_split)
eval_split = dataset.Split.from_string(FLAGS.eval_split)
# The total batch size is the batch size accross all hosts and devices. In a
# multi-host training setup each host will only see a batch size of
# `total_train_batch_size / jax.host_count()`.
total_train_batch_size = FLAGS.train_device_batch_size * jax.device_count()
num_train_steps = (
(train_split.num_examples * FLAGS.train_epochs) // total_train_batch_size)
local_device_count = jax.local_device_count()
train_dataset = dataset.load(
train_split,
is_training=True,
batch_dims=[local_device_count, FLAGS.train_device_batch_size],
dtype=get_policy().compute_dtype,
transpose=FLAGS.dataset_transpose,
zeros=FLAGS.dataset_zeros)
# Assign mixed precision policies to modules. Note that when training in f16
# we keep BatchNorm in full precision. When training with bf16 you can often
# use bf16 for BatchNorm.
mp_policy = get_policy()
bn_policy = get_bn_policy().with_output_dtype(mp_policy.compute_dtype)
# NOTE: The order we call `set_policy` doesn't matter, when a method on a
# class is called the policy for that class will be applied, or it will
# inherit the policy from its parent module.
hk.mixed_precision.set_policy(hk.BatchNorm, bn_policy)
hk.mixed_precision.set_policy(hk.nets.ResNet50, mp_policy)
if jax.default_backend() == 'gpu':
# TODO(tomhennigan): This could be removed if XLA:GPU's allocator changes.
train_dataset = dataset.double_buffer(train_dataset)
# For initialization we need the same random key on each device.
rng = jax.random.PRNGKey(FLAGS.train_init_random_seed)
rng = jnp.broadcast_to(rng, (local_device_count,) + rng.shape)
# Initialization requires an example input.
batch = next(train_dataset)
train_state = jax.pmap(initial_state)(rng, batch)
# Print a useful summary of the execution of our module.
summary = hk.experimental.tabulate(train_step)(train_state, batch)
for line in summary.split('\n'):
logging.info(line)
eval_every = FLAGS.train_eval_every
log_every = FLAGS.train_log_every
with time_activity('train'):
for step_num in range(num_train_steps):
# Take a single training step.
with jax.profiler.StepTraceAnnotation('train', step_num=step_num):
batch = next(train_dataset)
train_state, train_scalars = train_step(train_state, batch)
# By default we do not evaluate during training, but you can configure
# this with a flag.
if eval_every > 0 and step_num and step_num % eval_every == 0:
with time_activity('eval during train'):
eval_scalars = evaluate(eval_split,
train_state.params, train_state.state)
logging.info('[Eval %s/%s] %s', step_num, num_train_steps, eval_scalars)
# Log progress at fixed intervals.
if step_num and step_num % log_every == 0:
train_scalars = jax.tree_util.tree_map(
lambda v: np.mean(v).item(), jax.device_get(train_scalars))
logging.info('[Train %s/%s] %s',
step_num, num_train_steps, train_scalars)
# Once training has finished we run eval one more time to get final results.
with time_activity('final eval'):
eval_scalars = evaluate(eval_split, train_state.params, train_state.state)
logging.info('[Eval FINAL]: %s', eval_scalars)
if __name__ == '__main__':
dataset.check_versions()
app.run(main)
|
dm-haiku-main
|
examples/imagenet/train.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
examples/rnn/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tiny Shakespeare as a language modelling dataset."""
from collections.abc import Iterator, Mapping
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Batch = Mapping[str, np.ndarray]
NUM_CHARS = 128
def load(
split: tfds.Split,
*,
batch_size: int,
sequence_length: int,
) -> Iterator[Batch]:
"""Creates the Tiny Shakespeare dataset as a character modelling task."""
def preprocess_fn(x: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
x = x['text']
x = tf.strings.unicode_split(x, 'UTF-8')
x = tf.squeeze(tf.io.decode_raw(x, tf.uint8), axis=-1)
x = tf.cast(x, tf.int32)
return {'input': x[:-1], 'target': x[1:]}
ds = tfds.load(name='tiny_shakespeare', split=split)
ds = ds.map(preprocess_fn)
ds = ds.unbatch()
ds = ds.batch(sequence_length, drop_remainder=True)
ds = ds.shuffle(100)
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.map(lambda b: tf.nest.map_structure(tf.transpose, b)) # Time major.
return iter(tfds.as_numpy(ds))
def decode(x: np.ndarray) -> str:
return ''.join([chr(x) for x in x])
def encode(x: str) -> np.ndarray:
return np.array([ord(s) for s in x])
|
dm-haiku-main
|
examples/rnn/dataset.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Character-level language modelling with a recurrent network in JAX."""
from typing import Any, NamedTuple
from absl import app
from absl import flags
from absl import logging
import haiku as hk
from examples.rnn import dataset
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
TRAIN_BATCH_SIZE = flags.DEFINE_integer('train_batch_size', 32, '')
EVAL_BATCH_SIZE = flags.DEFINE_integer('eval_batch_size', 1000, '')
SEQUENCE_LENGTH = flags.DEFINE_integer('sequence_length', 128, '')
HIDDEN_SIZE = flags.DEFINE_integer('hidden_size', 256, '')
SAMPLE_LENGTH = flags.DEFINE_integer('sample_length', 128, '')
LEARNING_RATE = flags.DEFINE_float('learning_rate', 1e-3, '')
TRAINING_STEPS = flags.DEFINE_integer('training_steps', 100_000, '')
EVALUATION_INTERVAL = flags.DEFINE_integer('evaluation_interval', 100, '')
SAMPLING_INTERVAL = flags.DEFINE_integer('sampling_interval', 100, '')
SEED = flags.DEFINE_integer('seed', 42, '')
class LoopValues(NamedTuple):
tokens: jax.Array
state: Any
rng_key: jax.Array
class TrainingState(NamedTuple):
params: hk.Params
opt_state: optax.OptState
def make_network() -> hk.RNNCore:
"""Defines the network architecture."""
model = hk.DeepRNN([
lambda x: jax.nn.one_hot(x, num_classes=dataset.NUM_CHARS),
hk.LSTM(HIDDEN_SIZE.value),
jax.nn.relu,
hk.LSTM(HIDDEN_SIZE.value),
hk.nets.MLP([HIDDEN_SIZE.value, dataset.NUM_CHARS]),
])
return model
def make_optimizer() -> optax.GradientTransformation:
"""Defines the optimizer."""
return optax.adam(LEARNING_RATE.value)
def sequence_loss(batch: dataset.Batch) -> jax.Array:
"""Unrolls the network over a sequence of inputs & targets, gets loss."""
# Note: this function is impure; we hk.transform() it below.
core = make_network()
sequence_length, batch_size = batch['input'].shape
initial_state = core.initial_state(batch_size)
logits, _ = hk.dynamic_unroll(core, batch['input'], initial_state)
log_probs = jax.nn.log_softmax(logits)
one_hot_labels = jax.nn.one_hot(batch['target'], num_classes=logits.shape[-1])
return -jnp.sum(one_hot_labels * log_probs) / (sequence_length * batch_size)
@jax.jit
def update(state: TrainingState, batch: dataset.Batch) -> TrainingState:
"""Does a step of SGD given inputs & targets."""
_, optimizer = make_optimizer()
_, loss_fn = hk.without_apply_rng(hk.transform(sequence_loss))
gradients = jax.grad(loss_fn)(state.params, batch)
updates, new_opt_state = optimizer(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(params=new_params, opt_state=new_opt_state)
def sample(
rng_key: jax.Array,
context: jax.Array,
sample_length: int,
) -> jax.Array:
"""Draws samples from the model, given an initial context."""
# Note: this function is impure; we hk.transform() it below.
assert context.ndim == 1 # No batching for now.
core = make_network()
def body_fn(t: int, v: LoopValues) -> LoopValues:
token = v.tokens[t]
next_logits, next_state = core(token, v.state)
key, subkey = jax.random.split(v.rng_key)
next_token = jax.random.categorical(subkey, next_logits, axis=-1)
new_tokens = v.tokens.at[t + 1].set(next_token)
return LoopValues(tokens=new_tokens, state=next_state, rng_key=key)
logits, state = hk.dynamic_unroll(core, context, core.initial_state(None))
key, subkey = jax.random.split(rng_key)
first_token = jax.random.categorical(subkey, logits[-1])
tokens = jnp.zeros(sample_length, dtype=np.int32)
tokens = tokens.at[0].set(first_token)
initial_values = LoopValues(tokens=tokens, state=state, rng_key=key)
values: LoopValues = lax.fori_loop(0, sample_length, body_fn, initial_values)
return values.tokens
def main(_):
flags.FLAGS.alsologtostderr = True
# Make training dataset.
train_data = dataset.load(
tfds.Split.TRAIN,
batch_size=TRAIN_BATCH_SIZE.value,
sequence_length=SEQUENCE_LENGTH.value)
# Make evaluation dataset(s).
eval_data = { # pylint: disable=g-complex-comprehension
split: dataset.load(
split,
batch_size=EVAL_BATCH_SIZE.value,
sequence_length=SEQUENCE_LENGTH.value)
for split in [tfds.Split.TRAIN, tfds.Split.TEST]
}
# Make loss, sampler, and optimizer.
params_init, loss_fn = hk.without_apply_rng(hk.transform(sequence_loss))
_, sample_fn = hk.without_apply_rng(hk.transform(sample))
opt_init, _ = make_optimizer()
loss_fn = jax.jit(loss_fn)
sample_fn = jax.jit(sample_fn, static_argnums=[3])
# Initialize training state.
rng = hk.PRNGSequence(SEED.value)
initial_params = params_init(next(rng), next(train_data))
initial_opt_state = opt_init(initial_params)
state = TrainingState(params=initial_params, opt_state=initial_opt_state)
# Training loop.
for step in range(TRAINING_STEPS.value + 1):
# Do a batch of SGD.
train_batch = next(train_data)
state = update(state, train_batch)
# Periodically generate samples.
if step % SAMPLING_INTERVAL.value == 0:
context = train_batch['input'][:, 0] # First element of training batch.
assert context.ndim == 1
rng_key = next(rng)
samples = sample_fn(state.params, rng_key, context, SAMPLE_LENGTH.value)
prompt = dataset.decode(context)
continuation = dataset.decode(samples)
logging.info('Prompt: %s', prompt)
logging.info('Continuation: %s', continuation)
# Periodically evaluate training and test loss.
if step % EVALUATION_INTERVAL.value == 0:
for split, ds in eval_data.items():
eval_batch = next(ds)
loss = loss_fn(state.params, eval_batch)
logging.info({
'step': step,
'loss': float(loss),
'split': split,
})
if __name__ == '__main__':
app.run(main)
|
dm-haiku-main
|
examples/rnn/train.py
|
#!/usr/bin/env python
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script."""
import glob
import os
import pathlib
import setuptools
stub_files = (glob.glob('tensor_annotations/*.pyi') +
glob.glob('tensor_annotations/library_stubs/**/*.pyi',
recursive=True))
# package_data expects paths to be relative to the package directory, so strip
# 'tensor_annotations/' from start of paths
stub_files = [
os.path.join(*pathlib.Path(path).parts[1:])
for path in stub_files
]
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='tensor_annotations',
version='2.0.3',
description=('Enables annotations of tensor shapes in numerical computing '
'libraries. Includes type stubs for TensorFlow and JAX '
'describing how library functions change shapes.'),
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/deepmind/tensor_annotations',
# Copybara takes care of moving files to 'tensor_annotations/'
packages=[
'tensor_annotations',
'tensor_annotations/tests',
],
package_data={'tensor_annotations': stub_files + ['py.typed']},
install_requires=['dm-tree'],
extras_require={'dev': [
'absl-py',
'pytype',
]})
|
tensor_annotations-master
|
setup.py
|
#!/usr/bin/env python
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for TensorFlow stubs."""
import os
import pathlib
import shutil
import tempfile
import setuptools
# Note: Copybara takes care of moving files to 'tensorflow-stubs/'.
setuptools.setup(
name='tensor-annotations-tensorflow-stubs',
version='2.0.3',
description='Shape-aware type stubs for TensorFlow.',
long_description='Shape-aware type stubs for TensorFlow. See the `tensor-annotations` package.',
long_description_content_type='text/markdown',
url='https://github.com/deepmind/tensor_annotations',
packages=['tensorflow-stubs'],
package_data={'tensorflow-stubs': ['*.pyi', '*/*.pyi']},
install_requires=['tensor-annotations'],
)
|
tensor_annotations-master
|
tensorflow-stubs/setup.py
|
tensor_annotations-master
|
tensorflow-stubs/tensorflow-stubs/__init__.py
|
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example demonstrating checking of Time/Batch consistency in TensorFlow."""
from typing import cast
from absl import app
from tensor_annotations import axes
from tensor_annotations import tensorflow as ttf
import tensorflow as tf
# pylint: disable=missing-function-docstring
float16 = ttf.float16
Batch = axes.Batch
Time = axes.Time
def sample_batch() -> ttf.Tensor2[float16, Time, Batch]:
# tf.zeros((x, y)) returns a Tensor2[Any, Any], which is a compatibe
# with Tensor2[Batch, Time] => pytype accepts this return.
return tf.zeros((3, 5))
# An example of legacy code annotated with a conventional tensor type rather
# than the shape-annotated version.
def sample_batch_legacy() -> tf.Tensor:
# Even with our custom stubs, tf.zeros([...]) (with a list-shape!) returns an
# unspecific `Any` type, so the type-checker is happy interpreting it as
# tf.Tensor.
return tf.zeros([3, 5])
def train_batch(batch: ttf.Tensor2[float16, Batch, Time]):
b: ttf.Tensor1[float16, Batch] = tf.reduce_max(batch, axis=1)
del b # Unused
# `-> None` is necessary for Mypy to check this function
def transpose_example() -> None:
# From the signature of sample_batch(), x1 is inferred to be of type
# Tensor2[Time, Batch].
x1 = sample_batch()
# Using our custom stubs for tf.transpose(...), x2 is inferred to be of type
# Tensor2[Batch, Time].
x2 = tf.transpose(x1)
# Tensor2[Batch, Time] is compatible with the signature of train_batch(),
# so we're good! :)
# Try changing `x2` to `x1` - you should find that this script no longer
# passes type check.
train_batch(x2)
# `-> None` is necessary for Mypy to check this function
def legacy_example() -> None:
# From the signature of sample_batch_legacy(), y is inferred to be of
# type tf.Tensor.
y = sample_batch_legacy()
# We explicitly cast it to the desired type. This is a no-op at runtime.
y2 = cast(ttf.Tensor2[float16, Batch, Time], y)
# Alternative syntax for casting; again a no-op.
y3: ttf.Tensor2[float16, Batch, Time] = y # type: ignore
train_batch(y2)
train_batch(y3)
def main(argv):
del argv
transpose_example()
legacy_example()
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
examples/tensorflow_time_batch.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example demonstrating checking of Time/Batch consistency in JAX."""
from typing import cast
from absl import app
import jax.numpy as jnp
from tensor_annotations import axes
from tensor_annotations import jax as tjax
# pylint: disable=missing-function-docstring
Batch = axes.Batch
Time = axes.Time
float32 = tjax.float32
def sample_batch() -> tjax.Array2[float32, Time, Batch]:
# jnp.zeros((x, y)) returns a Tensor2[float32, Any, Any], which is compatible
# with Tensor2[float32, Batch, Time] => pytype accepts this return.
return jnp.zeros((3, 5))
# An example of legacy code annotated with a conventional tensor type rather
# than the shape-annotated version.
def sample_batch_legacy() -> jnp.ndarray:
# Even with our custom stubs, jnp.zeros([...]) (with a list-shape!) returns an
# unspecific `Any` type, so the type-checker is happy interpreting it as
# jnp.ndarray.
return jnp.zeros([3, 5])
def train_batch(batch: tjax.Array2[float32, Batch, Time]):
b: tjax.Array1[float32, Batch] = jnp.max(batch, axis=1)
del b # Unused
# `-> None` is necessary for Mypy to check this function
def transpose_example() -> None:
# From the signature of sample_batch(), x1 is inferred to be of type
# Array2[uint8, Time, Batch].
x1 = sample_batch()
# Using our custom stubs for jnp.transpose(...), x2 is inferred to be of type
# Array2[uint8, Batch, Time].
x2 = jnp.transpose(x1)
# Array2[uint8, Batch, Time] is compatible with the signature of
# train_batch(), so we're good! :)
# Try changing `x2` to `x1` - you should find that this script no longer
# passes type check.
train_batch(x2)
# `-> None` is necessary for Mypy to check this function
def legacy_example() -> None:
# From the signature of sample_batch_legacy(), y is inferred to be of
# type jnp.ndarray.
y1 = sample_batch_legacy()
# We explicitly cast it to the desired type. This is a no-op at runtime.
y2 = cast(tjax.Array2[float32, Batch, Time], y1)
# Alternative syntax for casting; again a no-op.
y3: tjax.Array2[float32, Batch, Time] = y1 # type: ignore
train_batch(y2)
train_batch(y3)
def main(argv):
del argv
transpose_example()
legacy_example()
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
examples/jax_time_batch.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run pytype on Acme and see how long it takes.
Automatically repeats the test a number of times and report the average and
minimum/maximum.
When run normally, reports the time to type-check without using our stubs.
To check the time *with* the stubs, install the stubs as in the main README.md
and then run `export TYPESHED_HOME="$HOME/typeshed"` before launching
the script.
"""
import datetime
import logging
import os
import shutil
import subprocess
import tempfile
from absl import app
from absl import flags
_NUM_RUNS = flags.DEFINE_integer('num_runs', default=3,
help='Number of times to repeat test')
def main(_):
with tempfile.TemporaryDirectory() as d:
os.chdir(d)
# ===== Download Acme =====
subprocess.run(['git', 'clone', 'https://github.com/deepmind/acme'],
check=True)
os.chdir('acme')
subprocess.run(['git', 'checkout', '4da30b8'], check=True)
os.chdir(d)
check_dir = os.path.join('acme', 'acme', 'agents', 'tf')
# ===== Time how long it takes to run pytype =====
times = []
for run_num in range(_NUM_RUNS.value):
logging.info('Test %d/%d', 1 + run_num, _NUM_RUNS.value)
t1 = datetime.datetime.now()
subprocess.run(['pytype', check_dir,
# Ignore dependencies. (I've tried installing dependencies
# to fix this, but it still chokes on trfl and reverb,
# so giving up for now.)
'--disable', 'import-error'],
check=True)
t2 = datetime.datetime.now()
shutil.rmtree('.pytype') # Remove pytype cache
delta = t2 - t1
times.append(delta)
logging.info('Test %d/%d: %d seconds',
1 + run_num, _NUM_RUNS.value, delta.total_seconds())
# ===== Print statistics =====
mean = sum(times, datetime.timedelta()).total_seconds() / _NUM_RUNS.value
logging.info('Average: %d seconds', mean)
logging.info('Minimum: %d seconds', min(times).total_seconds())
logging.info('Maximum: %d seconds', max(times).total_seconds())
logging.info('All times: %r', times)
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
benchmarks/benchmark.py
|
#!/usr/bin/env python
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for JAX stubs."""
import os
import pathlib
import shutil
import tempfile
import setuptools
# Note: Copybara takes care of moving files to 'jax-stubs/'.
setuptools.setup(
name='tensor-annotations-jax-stubs',
version='2.0.3',
description='Shape-aware type stubs for JAX.',
long_description='Shape-aware types stubs for JAX. See the `tensor-annotations` package.',
long_description_content_type='text/markdown',
url='https://github.com/deepmind/tensor_annotations',
packages=['jax-stubs'],
package_data={'jax-stubs': ['*.pyi', '*/*.pyi']},
install_requires=['tensor-annotations'],
)
|
tensor_annotations-master
|
jax-stubs/setup.py
|
tensor_annotations-master
|
jax-stubs/jax-stubs/__init__.py
|
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
tensor_annotations-master
|
tensor_annotations/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom tensor classes for NumPy supporting shape parameterisation.
Note that these should only be used for the purposes of type annotation and
should never be instantiated. (Certain IDEs may also use these for
autocompletion, too.)
Type annotations for these classes are maintained in a separate stubs file,
`numpy.pyi`.
"""
# LINT.IfChange
from typing import Any, Generic, TypeVar
from tensor_annotations import axes
A1 = TypeVar('A1', bound=axes.Axis)
A2 = TypeVar('A2', bound=axes.Axis)
A3 = TypeVar('A3', bound=axes.Axis)
A4 = TypeVar('A4', bound=axes.Axis)
A5 = TypeVar('A5', bound=axes.Axis)
A6 = TypeVar('A6', bound=axes.Axis)
A7 = TypeVar('A7', bound=axes.Axis)
A8 = TypeVar('A8', bound=axes.Axis)
# We need to define DTypes ourselves rather than use e.g. np.uint8 because
# pytype sees NumPy's own DTypes as `Any`.
# pylint: disable=invalid-name,multiple-statements,g-wrong-blank-lines
class DType: pass
class uint8(DType): pass
class uint16(DType): pass
class uint32(DType): pass
class uint64(DType): pass
class int8(DType): pass
class int16(DType): pass
class int32(DType): pass
class int64(DType): pass
class float16(DType): pass
class float32(DType): pass
class float64(DType): pass
class bfloat16(DType): pass
# pylint: enable=invalid-name, multiple-statements,g-wrong-blank-lines
# We want to have an `AnyDType` type that behaves like `Any` but for DTypes.
#
# Should `AnyDType` just be the parent class `DType` itself? No. Consider the
# following example:
#
# def takes_specific_type(x: uint8): ...
# def returns_nonspecific_type() -> DType: ...
# y = returns_nonspecific_type()
# foo(y)
#
# This doesn't type-check correctly. `DType` cannot be used in place of the
# more specific type `uint8`. We want our `AnyDType` type to have the property
# that it can be used *anywhere* - including as an argument to a function that
# takes a specific type. So using `DType` as our `AnyDType` won't work.
#
# What about a union of the dtypes above? Initially I thought no.
# Consider the following example:
#
# def takes_specific_type(x: uint8): ...
# y: Union[uint8, uint16]
# foo(y)
#
# I *thought* this would be a type error, because we can't guarantee that
# `y` is definitely uint8, but it turns out that both mypy and pytype are fine
# with it.
#
# But anyway, we can't do a union of the above types for another reason:
# pytype breaks if we do a union of too many types.
#
# So in the end, we just set this to be an alias of `Any`, so the meaning is
# clearer in code. Unfortunately, it still shows up as `Any` in pytype output.
# But hey, it's the best we can do.
AnyDType = Any
DT = TypeVar('DT', bound=DType)
class _ArrayBase:
"""Base class for ArrayN classes containing common methods and attributes."""
# These are necessary so that type checkers know we have these methods.
__abs__: Any
__add__: Any
__add__: Any
__float__: Any
__floordiv__: Any
__ge__: Any
__gt__: Any
__le__: Any
__len__: Any
__lt__: Any
__matmul__: Any
__mul__: Any
__neg__: Any
__neg__: Any
__pos__: Any
__pow__: Any
__rmatmul__: Any
__rmul__: Any
__sub__: Any
__truediv__: Any
shape: Any
dtype: Any
def __new__(cls, *args, **kwargs):
raise TypeError('tensor_annotations tensors should not be instantiated')
class Array0(Generic[DT], _ArrayBase):
"""An scalar array - from eg `np.zeros(())`."""
pass
class Array1(Generic[DT, A1], _ArrayBase):
"""An array of rank 1."""
pass
class Array2(Generic[DT, A1, A2], _ArrayBase):
"""An array of rank 2."""
pass
class Array3(Generic[DT, A1, A2, A3], _ArrayBase):
"""An array of rank 3."""
pass
class Array4(Generic[DT, A1, A2, A3, A4], _ArrayBase):
"""An array of rank 4."""
pass
class Array5(Generic[DT, A1, A2, A3, A4, A5], _ArrayBase):
"""An array of rank 5."""
pass
class Array6(Generic[DT, A1, A2, A3, A4, A5, A6], _ArrayBase):
"""An array of rank 6."""
pass
class Array7(Generic[DT, A1, A2, A3, A4, A5, A6, A7], _ArrayBase):
"""An array of rank 7."""
pass
class Array8(Generic[DT, A1, A2, A3, A4, A5, A6, A7, A8], _ArrayBase):
"""An array of rank 8."""
pass
# LINT.ThenChange(numpy.pyi)
|
tensor_annotations-master
|
tensor_annotations/numpy.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom tensor classes for JAX supporting shape parameterisation.
Note that these should only be used for the purposes of type annotation and
should never be instantiated. (Certain IDEs may also use these for
autocompletion, too.)
Type annotations for these classes are maintained in a separate stubs file,
`tensorflow.pyi`.
"""
# LINT.IfChange
from typing import Any, Generic, TypeVar
from tensor_annotations import axes
A1 = TypeVar('A1', bound=axes.Axis)
A2 = TypeVar('A2', bound=axes.Axis)
A3 = TypeVar('A3', bound=axes.Axis)
A4 = TypeVar('A4', bound=axes.Axis)
A5 = TypeVar('A5', bound=axes.Axis)
A6 = TypeVar('A6', bound=axes.Axis)
A7 = TypeVar('A7', bound=axes.Axis)
A8 = TypeVar('A8', bound=axes.Axis)
# We need to define DTypes ourselves rather than use e.g. tf.uint8 because
# according to typing.py, tf.uint8 etc aren't actually types, so they can't
# be used as type arguments.
# pylint: disable=invalid-name,multiple-statements,g-wrong-blank-lines
class DType: pass
class uint8(DType): pass
class uint16(DType): pass
class uint32(DType): pass
class uint64(DType): pass
class int8(DType): pass
class int16(DType): pass
class int32(DType): pass
class int64(DType): pass
class float16(DType): pass
class float32(DType): pass
class float64(DType): pass
class complex64(DType): pass
class complex128(DType): pass
class bfloat16(DType): pass
# Yup, these two definitely are native dtypes in TensorFlow:
# https://www.tensorflow.org/api_docs/python/tf/dtypes
class string(DType): pass
# TensorFlow's boolean dtype is definitely just 'bool'.
# It's a little annoying that has the same name as the Python keyword,
# but let's stick with TensorFlow's naming.
class bool(DType): pass # pylint: disable=redefined-builtin
# pylint: enable=invalid-name, multiple-statements,g-wrong-blank-lines
# We want to have an `AnyDType` type that behaves like `Any` but for DTypes.
#
# Should `AnyDType` just be the parent class `DType` itself? No. Consider the
# following example:
#
# def takes_specific_type(x: uint8): ...
# def returns_nonspecific_type() -> DType: ...
# y = returns_nonspecific_type()
# foo(y)
#
# This doesn't type-check correctly. `DType` cannot be used in place of the
# more specific type `uint8`. We want our `AnyDType` type to have the property
# that it can be used *anywhere* - including as an argument to a function that
# takes a specific type. So using `DType` as our `AnyDType` won't work.
#
# What about a union of the dtypes above? Initially I thought no.
# Consider the following example:
#
# def takes_specific_type(x: uint8): ...
# y: Union[uint8, uint16]
# foo(y)
#
# I *thought* this would be a type error, because we can't guarantee that
# `y` is definitely uint8, but it turns out that both mypy and pytype are fine
# with it.
#
# But anyway, we can't do a union of the above types for another reason:
# pytype breaks if we do a union of too many types.
#
# So in the end, we just set this to be an alias of `Any`, so the meaning is
# clearer in code. Unfortunately, it still shows up as `Any` in pytype output.
# But hey, it's the best we can do.
AnyDType = Any
DT = TypeVar('DT', bound=DType)
class _TensorBase:
"""Base class for TensorN classes containing common methods and attributes."""
def __new__(cls, *args, **kwargs):
raise TypeError('tensor_annotations tensors should not be instantiated')
# These are necessary so that type checkers know we have these methods.
__abs__: Any
__add__: Any
__add__: Any
__float__: Any
__floordiv__: Any
__ge__: Any
__gt__: Any
__le__: Any
__len__: Any
__lt__: Any
__matmul__: Any
__mul__: Any
__neg__: Any
__neg__: Any
__pos__: Any
__pow__: Any
__rmatmul__: Any
__rmul__: Any
__sub__: Any
__truediv__: Any
shape: Any
dtype: Any
def numpy(self) -> Any:
pass
class Tensor0(Generic[DT], _TensorBase):
"""A scalar - produced by e.g. tf.reduce_sum(tf.zeros((2, 3)))."""
pass
class Tensor1(Generic[DT, A1], _TensorBase):
"""A tensor of rank 1."""
pass
class Tensor2(Generic[DT, A1, A2], _TensorBase):
"""A tensor of rank 2."""
pass
class Tensor3(Generic[DT, A1, A2, A3], _TensorBase):
"""A tensor of rank 3."""
pass
class Tensor4(Generic[DT, A1, A2, A3, A4], _TensorBase):
"""A tensor of rank 4."""
pass
class Tensor5(Generic[DT, A1, A2, A3, A4, A5], _TensorBase):
"""A tensor of rank 5."""
pass
class Tensor6(Generic[DT, A1, A2, A3, A4, A5, A6], _TensorBase):
"""A tensor of rank 6."""
pass
class Tensor7(Generic[DT, A1, A2, A3, A4, A5, A6, A7], _TensorBase):
"""A tensor of rank 7."""
pass
class Tensor8(Generic[DT, A1, A2, A3, A4, A5, A6, A7, A8], _TensorBase):
"""A tensor of rank 8."""
pass
Tensor0AnyDType = Tensor0[AnyDType]
Tensor1AnyDType = Tensor1[AnyDType, A1]
Tensor2AnyDType = Tensor2[AnyDType, A1, A2]
Tensor3AnyDType = Tensor3[AnyDType, A1, A2, A3]
Tensor4AnyDType = Tensor4[AnyDType, A1, A2, A3, A4]
Tensor5AnyDType = Tensor5[AnyDType, A1, A2, A3, A4, A5]
Tensor6AnyDType = Tensor6[AnyDType, A1, A2, A3, A4, A5, A6]
Tensor7AnyDType = Tensor7[AnyDType, A1, A2, A3, A4, A5, A6, A7]
Tensor8AnyDType = Tensor8[AnyDType, A1, A2, A3, A4, A5, A6, A7, A8]
# LINT.ThenChange(tensorflow.pyi)
|
tensor_annotations-master
|
tensor_annotations/tensorflow.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom tensor classes for JAX supporting shape parameterisation.
Note that these should only be used for the purposes of type annotation and
should never be instantiated. (Certain IDEs may also use these for
autocompletion, too.)
Type annotations for these classes are maintained in a separate stubs file,
`jax.pyi`.
"""
# LINT.IfChange
from typing import Any, Generic, TypeVar, Union
from tensor_annotations import axes
A1 = TypeVar('A1', bound=axes.Axis)
A2 = TypeVar('A2', bound=axes.Axis)
A3 = TypeVar('A3', bound=axes.Axis)
A4 = TypeVar('A4', bound=axes.Axis)
A5 = TypeVar('A5', bound=axes.Axis)
A6 = TypeVar('A6', bound=axes.Axis)
A7 = TypeVar('A7', bound=axes.Axis)
A8 = TypeVar('A8', bound=axes.Axis)
# We need to define DTypes ourselves rather than use e.g. jnp.uint8 because
# pytype sees JAX's own DTypes as `Any`.
# pylint: disable=invalid-name,multiple-statements,g-wrong-blank-lines
class DType: pass
class uint8(DType): pass
class uint16(DType): pass
class uint32(DType): pass
class uint64(DType): pass
class int8(DType): pass
class int16(DType): pass
class int32(DType): pass
class int64(DType): pass
class float16(DType): pass
class float32(DType): pass
class float64(DType): pass
class bfloat16(DType): pass
# pylint: enable=invalid-name, multiple-statements,g-wrong-blank-lines
# We want to have an `AnyDType` type that behaves like `Any` but for DTypes.
#
# Should `AnyDType` just be the parent class `DType` itself? No. Consider the
# following example:
#
# def takes_specific_type(x: uint8): ...
# def returns_nonspecific_type() -> DType: ...
# y = returns_nonspecific_type()
# foo(y)
#
# This doesn't type-check correctly. `DType` cannot be used in place of the
# more specific type `uint8`. We want our `AnyDType` type to have the property
# that it can be used *anywhere* - including as an argument to a function that
# takes a specific type. So using `DType` as our `AnyDType` won't work.
#
# What about a union of the dtypes above? Initially I thought no.
# Consider the following example:
#
# def takes_specific_type(x: uint8): ...
# y: Union[uint8, uint16]
# foo(y)
#
# I *thought* this would be a type error, because we can't guarantee that
# `y` is definitely uint8, but it turns out that both mypy and pytype are fine
# with it.
#
# But anyway, we can't do a union of the above types for another reason:
# pytype breaks if we do a union of too many types.
#
# So in the end, we just set this to be an alias of `Any`, so the meaning is
# clearer in code. Unfortunately, it still shows up as `Any` in pytype output.
# But hey, it's the best we can do.
AnyDType = Any
DT = TypeVar('DT', bound=DType)
class _ArrayBase:
"""Base class for ArrayN classes containing common methods and attributes."""
def __new__(cls, *args, **kwargs):
raise TypeError('tensor_annotations tensors should not be instantiated')
# These are necessary so that type checkers know we have these methods.
__abs__: Any
__add__: Any
__add__: Any
__float__: Any
__floordiv__: Any
__ge__: Any
__gt__: Any
__le__: Any
__len__: Any
__lt__: Any
__matmul__: Any
__rmatmul__: Any
__mul__: Any
__neg__: Any
__neg__: Any
__pos__: Any
__pow__: Any
__rmul__: Any
__sub__: Any
__truediv__: Any
shape: Any
T: Any
at: Any
dtype: Any
reshape: Any
astype: Any
ndim: Any
class Array0(Generic[DT], _ArrayBase):
"""A scalar - produced by e.g. jnp.sum(jnp.zeros((2, 3)))."""
# Technically this exists on all instances of JAX arrays,
# but it throws an error for anything apart from a scalar
# array, eg jnp.array(0).
def item(self) -> Union[int, float, bool, complex]:
pass
class Array1(Generic[DT, A1], _ArrayBase):
"""A tensor of rank 1."""
pass
class Array2(Generic[DT, A1, A2], _ArrayBase):
"""A tensor of rank 2."""
pass
class Array3(Generic[DT, A1, A2, A3], _ArrayBase):
"""A tensor of rank 3."""
pass
class Array4(Generic[DT, A1, A2, A3, A4], _ArrayBase):
"""A tensor of rank 4."""
pass
class Array5(Generic[DT, A1, A2, A3, A4, A5], _ArrayBase):
"""A tensor of rank 5."""
pass
class Array6(Generic[DT, A1, A2, A3, A4, A5, A6], _ArrayBase):
"""A tensor of rank 6."""
pass
class Array7(Generic[DT, A1, A2, A3, A4, A5, A6, A7], _ArrayBase):
"""A tensor of rank 7."""
pass
class Array8(Generic[DT, A1, A2, A3, A4, A5, A6, A7, A8], _ArrayBase):
"""A tensor of rank 8."""
pass
Array0AnyDType = Array0[AnyDType]
Array1AnyDType = Array1[AnyDType, A1]
Array2AnyDType = Array2[AnyDType, A1, A2]
Array3AnyDType = Array3[AnyDType, A1, A2, A3]
Array4AnyDType = Array4[AnyDType, A1, A2, A3, A4]
Array5AnyDType = Array5[AnyDType, A1, A2, A3, A4, A5]
Array6AnyDType = Array6[AnyDType, A1, A2, A3, A4, A5, A6]
Array7AnyDType = Array7[AnyDType, A1, A2, A3, A4, A5, A6, A7]
Array8AnyDType = Array8[AnyDType, A1, A2, A3, A4, A5, A6, A7, A8]
# LINT.ThenChange(jax.pyi)
|
tensor_annotations-master
|
tensor_annotations/jax.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonical source for common axis types.
We need to make sure that `Batch` in one module means the same as `Batch` in
another module. Since we verify equality of axis types based on the identity of
the type, that means that both modules need to import and use the same `Batch`
type. We therefore provide this file as the canonical reference for axis types
that are likely to be used widely.
"""
from typing import NewType
class Axis:
"""Base type for axis annotations.
User-defined axis types should subclass this.
"""
pass
Batch = NewType('Batch', Axis)
Channels = NewType('Channels', Axis)
Features = NewType('Features', Axis)
Time = NewType('Time', Axis)
Height = NewType('Height', Axis)
Width = NewType('Width', Axis)
Depth = NewType('Depth', Axis)
|
tensor_annotations-master
|
tensor_annotations/axes.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Render Jinja template for JAX library type stubs."""
from absl import app
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jinja2
from tensor_annotations.tools import templates
_JAX_TEMPLATE_PATH = 'templates/jax.pyi'
_JAX_NUMPY_TEMPLATE_PATH = 'templates/jax_numpy.pyi'
_JAX_NN_TEMPLATE_PATH = 'templates/jax_nn.pyi'
_JAX_STUBS_PATH = 'library_stubs/third_party/py/jax/__init__.pyi'
_JAX_NUMPY_STUBS_PATH = 'library_stubs/third_party/py/jax/numpy/__init__.pyi'
_JAX_NN_STUBS_PATH = 'library_stubs/third_party/py/jax/nn/__init__.pyi'
def main(argv):
del argv
# ===== Render stubs for jax.* =====
# Currently we just use `Any`` for everything in jax.*
with open(_JAX_TEMPLATE_PATH, 'r') as f:
lines = f.readlines()
jax_template = jinja2.Template(
''.join(lines),
extensions=['jinja2.ext.do'],
)
jax_dir = dir(jax)
# We _don't_ want to stub `jax.numpy` as `Any`, because it would prevent
# our stubs for jax.numpy.* being used.
jax_dir.remove('numpy')
# Ditto `jax.nn`.
jax_dir.remove('nn')
# `jax.Array` is actually an important type, so we've added it as a class
# manually in the template, and don't need to stub it as `Any`.
jax_dir.remove('Array')
with open(_JAX_STUBS_PATH, 'w') as f:
f.write(jax_template.render(jax_dir=jax_dir))
# ===== Render stubs for jax.numpy.* =====
with open(_JAX_NUMPY_TEMPLATE_PATH, 'r') as f:
lines = f.readlines()
# Strip IfChange/ThenChange lines.
lines = [l for l in lines if not l.startswith('# LINT')]
jax_numpy_template = jinja2.Template(
''.join(lines),
extensions=['jinja2.ext.do'],
)
jax_numpy_template.globals['reduction_axes'] = templates.reduction_axes
jax_numpy_template.globals['transpose_axes'] = templates.transpose_axes
jax_numpy_template.globals['get_jax_array_type'] = templates.jax_array_type
jax_numpy_template.globals['get_axis_list'] = templates.axis_list
# We need to make sure that the library functions we _haven't_ annotated
# are still present in the type stubs or the type checker will think they
# don't exist at all. We do this in a bit of a hacky way: enumerating through
# `dir(jnp)` and adding an `Any` annotation for everything we find that's
# not currently annotated.
current_stubs = open(_JAX_NUMPY_STUBS_PATH).read()
jnp_dir = []
for x in dir(jnp):
if (x.startswith('_')
or f'def {x}(' in current_stubs
or f'class {x}:' in current_stubs):
continue
jnp_dir.append(x)
with open(_JAX_NUMPY_STUBS_PATH, 'w') as f:
f.write(jax_numpy_template.render(jnp_dir=jnp_dir))
# ===== Render stubs for jax.nn.* =====
with open(_JAX_NN_TEMPLATE_PATH, 'r') as f:
lines = f.readlines()
# Strip IfChange/ThenChange lines.
lines = [l for l in lines if not l.startswith('# LINT')]
jax_nn_template = jinja2.Template(
''.join(lines),
extensions=['jinja2.ext.do'],
)
current_stubs = open(_JAX_NN_STUBS_PATH).read()
jnn_dir = []
for x in dir(jnn):
if (
x.startswith('_')
or f'def {x}(' in current_stubs
or f'class {x}:' in current_stubs
):
continue
jnn_dir.append(x)
with open(_JAX_NN_STUBS_PATH, 'w') as f:
f.write(jax_nn_template.render(jnn_dir=jnn_dir))
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
tensor_annotations/tools/render_jax_library_template.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Render Jinja template for custom tensor class type stubs."""
from absl import app
from absl import flags
import jinja2
from tensor_annotations.tools import templates
flags.DEFINE_string('template', default=None, help='Template file')
flags.DEFINE_string('out', default=None, help='Output file')
flags.DEFINE_string('vars', default=None, help='A comma-separated list of '
'template substitutions, '
'e.g. foo=1,bar=2')
FLAGS = flags.FLAGS
def main(argv):
del argv
with open(FLAGS.template, 'r') as f:
lines = f.readlines()
# Strip IfChange/ThenChange lines.
lines = [l for l in lines if not l.startswith('# LINT')]
template = jinja2.Template(''.join(lines), extensions=['jinja2.ext.do'])
template.globals['reduction_axes'] = templates.reduction_axes
substitutions = {}
if FLAGS.vars:
for kv in FLAGS.vars.split(','):
k, v = kv.split('=')
substitutions[k] = v
with open(FLAGS.out, 'w') as f:
f.write(template.render(**substitutions))
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
tensor_annotations/tools/render_tensor_template.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for template rendering."""
import collections
import itertools
ReductionAxes = collections.namedtuple('ReductionAxes',
['n_axes', 'all_axes', 'reduction_axes',
'remaining_n_axes', 'remaining_axes'])
TransposeAxes = collections.namedtuple('TransposeAxes',
['n_axes', 'all_axes', 'transpose_axes',
'result_axes'])
def axis_list(n_axes: int, reverse=False) -> str:
"""Returns a comma-separated list of axis TypeVar short names.
Args:
n_axes: Maximum number of axes to include in the list.
n_axes=1 -> 'A1', n_axes=2 -> 'A1, A2', etc.
reverse: If False, the returned list starts from A1 and counts up to An.
If True, starts at An and counts down to A1.
Returns:
A string containing the list of axes.
For example, get_axis_list(2) -> 'A1, A2'.
"""
axes = range(1, n_axes + 1)
if reverse:
axes = reversed(axes)
return ', '.join(f'A{i}' for i in axes)
def jax_array_type(n_axes: int) -> str:
"""Returns the generic JAX array type, parameterised by a number of axes.
For example, get_jax_array_type(2) -> 'Array2[A1, A2]'.
Args:
n_axes: Rank of array type to return.
Returns:
A string containing array type.
"""
return f'Array{n_axes}[{axis_list(n_axes)}]'
def transpose_axes(n_axes: int):
"""A generator that yields input and output axes of transpose.
Args:
n_axes: Rank of array whose possible transposes to consider.
Yields:
A `TransposeAxes` object for each possible transpose.
For example, calculate_transpose_axes(2) would yield `TransposeAxes` objects
encoding:
Transpose shape [A1, A2] with axes=[0, 1] -> Shape[A1, A2]
[1, 0] -> Shape[A2, A1]
"""
assert n_axes >= 1
# [A1, A2, ..., An]
all_axes = list(range(1, n_axes + 1))
all_axes_str = [f'A{i}' for i in all_axes]
all_axes_str = ', '.join(all_axes_str)
for transpose_axes in itertools.permutations(range(n_axes)):
transpose_axes_str = (f'L{i}' for i in transpose_axes)
transpose_axes_str = ', '.join(transpose_axes_str)
transpose_axes_str = f'Tuple[{transpose_axes_str}]'
result_axes = (all_axes[i] for i in transpose_axes)
if result_axes:
result_axes_str = (f'A{i}' for i in result_axes)
result_axes_str = ', '.join(result_axes_str)
else:
result_axes_str = ''
yield TransposeAxes(n_axes=n_axes,
all_axes=all_axes_str,
transpose_axes=transpose_axes_str,
result_axes=result_axes_str)
def reduction_axes(n_axes: int):
"""A generator that yields input and output axes of reduction operations.
Args:
n_axes: Rank of array whose possible reductions to consider.
Yields:
A `ReductionAxes` object for each possible reduction (where axes
increase in value - e.g. we don't consider `axes=(1, 0)` - to cut down
on the size of the stubs generated).
For example, calculate_reduction_axes(2) would yield `ReductionAxes` objects
encoding:
Reduce shape [A1, A2] over axes 0 -> shape[A2]
1 -> shape[A1]
-1 -> shape[A1]
0, 1 -> shape[]
"""
assert n_axes >= 1
final_axis = n_axes - 1
# [A1, A2, ..., An]
all_axes_str = [f'A{i}' for i in range(1, n_axes + 1)]
all_axes_str = ', '.join(all_axes_str)
n_reduction_axes_iter = range(1, n_axes + 1)
for n_reduction_axes in n_reduction_axes_iter:
# First, make a list of all possible permutations of reduction axes.
reduction_axes_list = []
for reduction_axes_combo in itertools.combinations(
range(n_axes),
n_reduction_axes
):
# Thanks to `combinations`, `reduction_axes_combo` is already sorted.
reduction_axes_list.append(reduction_axes_combo)
# Also consider permutations where we refer to the final axis as '-1'.
if final_axis in reduction_axes_combo:
reduction_axes_list.append((*reduction_axes_combo[:-1], -1))
# Second, for each permutation of reduction axes, yield a ReductionAxes.
for reduction_axes in reduction_axes_list:
reduction_axes_str = (
f'L{i}' if i != -1 else 'LN1'
for i in reduction_axes
)
reduction_axes_str = ', '.join(reduction_axes_str)
if len(reduction_axes) != 1:
reduction_axes_str = f'Tuple[{reduction_axes_str}]'
concrete_reduction_axes = [
(n_axes - 1) if axis == -1 else axis
for axis in reduction_axes
]
remaining_axes = set(range(n_axes)) - set(concrete_reduction_axes)
remaining_axes = sorted(tuple(remaining_axes))
remaining_n_axes = len(remaining_axes)
if remaining_axes:
remaining_axes_str = (f'A{i + 1}' for i in remaining_axes)
remaining_axes_str = ', '.join(remaining_axes_str)
remaining_axes_str = ', ' + remaining_axes_str
else:
remaining_axes_str = ''
yield ReductionAxes(n_axes,
all_axes_str,
reduction_axes_str,
remaining_n_axes,
remaining_axes_str)
|
tensor_annotations-master
|
tensor_annotations/tools/templates.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Render Jinja template for TensorFlow library type stubs."""
from absl import app
import jinja2
from tensor_annotations.tools import templates
_TEMPLATE_PATH = 'templates/tensorflow.pyi'
_STUBS_PATH = 'tensorflow_stubs.pyi'
def main(argv):
del argv
with open(_TEMPLATE_PATH, 'r') as f:
lines = f.readlines()
# Strip IfChange/ThenChange lines.
lines = [l for l in lines if not l.startswith('# LINT')]
template = jinja2.Template(''.join(lines), extensions=['jinja2.ext.do'])
template.globals['reduction_axes'] = templates.reduction_axes
template.globals['transpose_axes'] = templates.transpose_axes
template.globals['get_axis_list'] = templates.axis_list
with open(_STUBS_PATH, 'w') as f:
f.write(template.render())
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
tensor_annotations/tools/render_tensorflow_library_template.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Render Jinja template for NumPy library type stubs."""
from absl import app
import jinja2
import numpy as np
from tensor_annotations.tools import templates
_NUMPY_TEMPLATE_PATH = 'templates/numpy.pyi'
_NUMPY_STUBS_PATH = 'library_stubs/third_party/py/numpy/__init__.pyi'
def main(argv):
del argv
with open(_NUMPY_TEMPLATE_PATH, 'r') as f:
lines = f.readlines()
# Strip IfChange/ThenChange lines.
lines = [l for l in lines if not l.startswith('# LINT')]
numpy_template = jinja2.Template(
''.join(lines),
extensions=['jinja2.ext.do'],
)
numpy_template.globals['reduction_axes'] = templates.reduction_axes
numpy_template.globals['transpose_axes'] = templates.transpose_axes
numpy_template.globals['get_axis_list'] = templates.axis_list
# We need to make sure that the library functions we _haven't_ annotated
# are still present in the type stubs or the type checker will think they
# don't exist at all. We do this in a bit of a hacky way: enumerating through
# `dir(np)` and adding an `Any` annotation for everything we find that's
# not currently annotated.
current_stubs = open(_NUMPY_STUBS_PATH).read()
np_dir = []
for x in dir(np):
if (x.startswith('_')
or f'def {x}(' in current_stubs
or f'class {x}:' in current_stubs):
continue
np_dir.append(x)
with open(_NUMPY_STUBS_PATH, 'w') as f:
f.write(numpy_template.render(np_dir=np_dir))
if __name__ == '__main__':
app.run(main)
|
tensor_annotations-master
|
tensor_annotations/tools/render_numpy_library_template.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for NumPy stubs."""
from typing import cast, NewType, SupportsFloat
from absl.testing import absltest
import numpy as np
from tensor_annotations import axes
from tensor_annotations.axes import Batch
from tensor_annotations.numpy import AnyDType
from tensor_annotations.numpy import Array0
from tensor_annotations.numpy import Array1
from tensor_annotations.numpy import Array2
from tensor_annotations.numpy import float32
from tensor_annotations.numpy import float64
from tensor_annotations.numpy import int16
from tensor_annotations.numpy import int8
from tensor_annotations.tests import utils
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
# It's less than ideal that we have to repeat imports etc. here for pytype, but
# this seems like the best balance between readability and complexity.
_PREAMBLE = """
from typing import cast, NewType, SupportsFloat
import numpy as np
from tensor_annotations import axes
from tensor_annotations.axes import Batch
from tensor_annotations.numpy import AnyDType, float32, float64, int8, int16
from tensor_annotations.numpy import Array0, Array1, Array2
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
"""
class NumPyStubTests(absltest.TestCase):
"""Tests for numpy.* stubs."""
def testTranspose_InferredShapeMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = np.zeros((1, 2))
y = np.transpose(x)
y2 = x.T
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, y.shape)
self.assertEqual(inferred.y2, y2.shape)
def testUnaryOperator_ReturnCustomType(self):
"""Confirms that things like np.abs() don't change the shape."""
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = np.zeros((1,))
# Let's just test a representative subset.
a = np.abs(x) # pylint: disable=unused-variable
b = np.sin(x) # pylint: disable=unused-variable
c = np.floor(x) # pylint: disable=unused-variable
d = np.ones_like(x) # pylint: disable=unused-variable
e = np.sign(x) # pylint: disable=unused-variable
f = np.round(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
expected = 'Array1[Any, A1]'
self.assertEqual(inferred.a, expected)
self.assertEqual(inferred.b, expected)
self.assertEqual(inferred.c, expected)
self.assertEqual(inferred.d, expected)
self.assertEqual(inferred.e, expected)
self.assertEqual(inferred.f, expected)
def testZerosOnes_ReturnsCorrectShape(self):
"""Confirms that np.zeros() returns a tensor_annotations type."""
with utils.SaveCodeAsString() as code_saver:
a = np.zeros(()) # pylint: disable=unused-variable
b = np.ones(()) # pylint: disable=unused-variable
c = np.zeros((1,)) # pylint: disable=unused-variable
d = np.ones((1,)) # pylint: disable=unused-variable
e = np.zeros((1, 1)) # pylint: disable=unused-variable
f = np.ones((1, 1)) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Array0')
self.assertEqual(inferred.b, 'Array0')
self.assertEqual(inferred.c, 'Array1')
self.assertEqual(inferred.d, 'Array1')
self.assertEqual(inferred.e, 'Array2')
self.assertEqual(inferred.f, 'Array2')
def testSum_InferredMatchesActualShape(self):
"""Tests whether np.sum() return the right shapes."""
with utils.SaveCodeAsString() as code_saver:
x: Array2[float64, A1, A2] = np.zeros((1, 2))
y1 = np.sum(x, axis=0)
y2 = np.sum(x, axis=1)
y3 = np.sum(x, axis=(0, 1))
y4 = np.sum(x)
inferred_types = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
inferred_shapes = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred_shapes.y1, y1.shape)
self.assertEqual(inferred_shapes.y2, y2.shape)
# y3 and y4 should just be scalars.
self.assertEqual(type(y3), np.float64)
self.assertEqual(type(y4), np.float64)
self.assertEqual(inferred_types.y3, 'float64')
self.assertEqual(inferred_types.y4, 'float64')
def testSumKeepdimsTrue_ReturnsAny(self):
# We haven't got around to making stubs for keepdims=True yet;
# make sure the type reflects that.
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = np.zeros((1,))
a = np.sum(x, axis=0, keepdims=True) # pylint: disable=unused-variable
b = np.sum(x, keepdims=True) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Any')
self.assertEqual(inferred.b, 'Any')
def testTensorAdd_ReturnsCustomType(self):
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = np.zeros((1,))
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('Array1[Any, A1]', inferred.a)
self.assertEqual('Array1[Any, A1]', inferred.b)
def testMatmul_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = np.zeros((1, 2))
y: Array2[AnyDType, A2, A3] = np.zeros((2, 3))
xy = x @ y
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xy, xy.shape)
def testArrayUnaryOp_ReturnsCorrectTypeAndShape(self):
"""Confirms that unary functions like abs() don't change the shape."""
with utils.SaveCodeAsString() as code_saver:
x0 = cast(Array0[AnyDType], np.array(()))
y1 = abs(x0) # pylint: disable=unused-variable
y2 = -x0 # pylint: disable=unused-variable
x1 = cast(Array1[AnyDType, A1], np.array([0]))
y3 = abs(x1) # pylint: disable=unused-variable
y4 = -x1 # pylint: disable=unused-variable
x2 = cast(Array2[AnyDType, A1, A2], np.array([[0]]))
y5 = abs(x2) # pylint: disable=unused-variable
y6 = -x2 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('Array0', inferred.y1)
self.assertEqual('Array0', inferred.y2)
self.assertEqual('Array1[Any, A1]', inferred.y3)
self.assertEqual('Array1[Any, A1]', inferred.y4)
self.assertEqual('Array2[Any, A1, A2]', inferred.y5)
self.assertEqual('Array2[Any, A1, A2]', inferred.y6)
def testBinaryOpWithScalar_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = np.zeros((1, 2))
y1 = x + 1.0
y2 = x - 1.0
y3 = x / 1.0
y4 = x * 1.0
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithBroadcast_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
a: Array2[AnyDType, A1, A2] = np.ones((1, 2))
b: Array1[AnyDType, A2] = np.ones((2,))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithSameShape_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
a: Array2[AnyDType, A1, A2] = np.ones((1, 2))
b: Array2[AnyDType, A1, A2] = np.ones((1, 2))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testShapeAttribute_HasCorrectLength(self):
with utils.SaveCodeAsString() as code_saver:
x0 = np.zeros(())
x1 = np.zeros((1,))
x2 = np.zeros((1, 2))
x3 = np.zeros((1, 2, 3))
x4 = np.zeros((1, 2, 3, 4))
x0_shape = x0.shape # pylint: disable=unused-variable
x1_shape = x1.shape # pylint: disable=unused-variable
x2_shape = x2.shape # pylint: disable=unused-variable
x3_shape = x3.shape # pylint: disable=unused-variable
x4_shape = x4.shape # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(x0_shape, ())
self.assertEqual(x1_shape, (1,))
self.assertEqual(x2_shape, (1, 2))
self.assertEqual(x3_shape, (1, 2, 3))
self.assertEqual(x4_shape, (1, 2, 3, 4))
self.assertEqual('Tuple[()]', inferred.x0_shape)
self.assertEqual('Tuple[int]', inferred.x1_shape)
self.assertEqual('Tuple[int, int]', inferred.x2_shape)
self.assertEqual('Tuple[int, int, int]', inferred.x3_shape)
self.assertEqual('Tuple[int, int, int, int]', inferred.x4_shape)
def testArray0Item_ReturnsIntFloatBoolComplexUnion(self):
with utils.SaveCodeAsString() as code_saver:
x = cast(Array0[AnyDType], np.zeros(()))
y = x.item() # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'Union[bool, complex, float, int]')
def testArray0_CanBeConvertedToFloat(self):
with utils.SaveCodeAsString() as code_saver:
x = np.zeros(())
y = float(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'float')
def testArray0_SupportsFloat(self):
with utils.SaveCodeAsString() as code_saver:
def foo(x: SupportsFloat):
return x
x = np.zeros(())
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
class NumPyDtypeTests(absltest.TestCase):
"""Tests for data types inferred from NumPy type stubs using pytype."""
def testTranspose_ReturnsSameDtypeAsInput(self):
"""Tests that np.transpose() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x8: Array2[int8, A1, A1] = np.array([[0]], dtype=np.int8)
x16: Array2[int16, A1, A1] = np.array([[0]], dtype=np.int16)
y8 = np.transpose(x8) # pylint: disable=unused-variable
y16 = np.transpose(x16) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y8, 'Array2[int8, A1, A1]')
self.assertEqual(inferred.y16, 'Array2[int16, A1, A1]')
def testZerosOnes_ReturnsAnyDType(self):
"""Tests that np.zeros and np.ones returns AnyDType."""
with utils.SaveCodeAsString() as code_saver:
a = np.zeros(()) # pylint: disable=unused-variable
b = np.ones(()) # pylint: disable=unused-variable
c = np.zeros((1,)) # pylint: disable=unused-variable
d = np.ones((1,)) # pylint: disable=unused-variable
e = np.zeros((1, 1)) # pylint: disable=unused-variable
f = np.ones((1, 1)) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# These should be e.g. Array1[AnyDType, Any], but because AnyDType
# is currently aliased to `Any`, and pytype doesn't print type arguments at
# all when they're all `Any`, hence just comparing to e.g. Array1.
self.assertEqual(inferred.a, 'Array0')
self.assertEqual(inferred.b, 'Array0')
self.assertEqual(inferred.c, 'Array1')
self.assertEqual(inferred.d, 'Array1')
self.assertEqual(inferred.e, 'Array2')
self.assertEqual(inferred.f, 'Array2')
def testSum_ReturnsSameDtypeAsInput(self):
"""Tests that np.sum() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Array1[float32, A1] = np.array([0.0], dtype=float32) # pylint: disable=unused-variable
x64: Array1[float64, A1] = np.array([0.0], dtype=float64) # pylint: disable=unused-variable
y32: Array2[float32, A1, A1] = np.array([[0.0]], dtype=float32) # pylint: disable=unused-variable
y64: Array2[float64, A1, A1] = np.array([[0.0]], dtype=float64) # pylint: disable=unused-variable
xsum32 = np.sum(x32, axis=0) # pylint: disable=unused-variable
xsum64 = np.sum(x64, axis=0) # pylint: disable=unused-variable
ysum32 = np.sum(y32, axis=0) # pylint: disable=unused-variable
ysum64 = np.sum(y64, axis=0) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xsum32, 'float32')
self.assertEqual(inferred.xsum64, 'float64')
self.assertEqual(inferred.ysum32, 'Array1[float32, A1]')
self.assertEqual(inferred.ysum64, 'Array1[float64, A1]')
def testArrayAdd_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Array1[int8, A1] = np.array([[0]])
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# AnyDType is printed as Any in pytype output.
self.assertEqual(inferred.a, 'Array1[Any, A1]')
self.assertEqual(inferred.b, 'Array1[Any, A1]')
def testArrayUnaryOp_ReturnsSameDTypeAsInput(self):
"""Tests that e.g. `-x` has the same dtype as `x`."""
with utils.SaveCodeAsString() as code_saver:
a8: Array1[int8, A1] = np.array([0], dtype=np.int8)
b8 = abs(a8) # pylint: disable=unused-variable
c8 = -a8 # pylint: disable=unused-variable
a16: Array1[int16, A1] = np.array([0], dtype=np.int16)
b16 = abs(a16) # pylint: disable=unused-variable
c16 = -a16 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b8, 'Array1[int8, A1]')
self.assertEqual(inferred.c8, 'Array1[int8, A1]')
self.assertEqual(inferred.b16, 'Array1[int16, A1]')
self.assertEqual(inferred.c16, 'Array1[int16, A1]')
def testBinaryOpWithScalar_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Array1[int8, A1] = np.array([0], dtype=np.int8)
y1 = x + 1 # pylint: disable=unused-variable
y2 = x - 1 # pylint: disable=unused-variable
y3 = x / 1 # pylint: disable=unused-variable
y4 = x * 1 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Array1[Any, A1]')
self.assertEqual(inferred.y2, 'Array1[Any, A1]')
self.assertEqual(inferred.y3, 'Array1[Any, A1]')
self.assertEqual(inferred.y4, 'Array1[Any, A1]')
def testBinaryOpWithArray_ReturnsAnyDType(self):
"""Tests that e.g. adding two arrays results in dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
a: Array1[int8, A1] = np.array([0], dtype=np.int8)
b: Array1[int8, A1] = np.array([0], dtype=np.int8)
y1 = a + b # pylint: disable=unused-variable
y2 = a - b # pylint: disable=unused-variable
y3 = a / b # pylint: disable=unused-variable
y4 = a * b # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Array1[Any, A1]')
self.assertEqual(inferred.y2, 'Array1[Any, A1]')
self.assertEqual(inferred.y3, 'Array1[Any, A1]')
self.assertEqual(inferred.y4, 'Array1[Any, A1]')
def testFunctionWithInt8Argument_AcceptsInt8Value(self):
"""Tests whether a function will accept a value with the right dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[int8, Batch]):
pass
x = cast(Array1[int8, Batch], np.array([0], dtype=np.int8))
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_RejectsInt16Value(self):
"""Tests whether a function will reject a value with the wrong dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[int8, Batch]):
pass
x = cast(Array1[int16, Batch], np.array([0], dtype=np.int16))
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
def testFunctionWithAnyDTypeArgument_AcceptsInt8Value(self):
"""Tests whether AnyDType makes a function argument compatible with all."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[AnyDType, Batch]):
pass
x = cast(Array1[int8, Batch], np.array([0], dtype=np.int8))
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_AcceptsAnyDTypeValue(self):
"""Tests whether AnyDType is compatible with an arbitrary argument dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[int8, Batch]):
pass
x = cast(Array1[AnyDType, Batch], np.array([0]))
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
if __name__ == '__main__':
absltest.main()
|
tensor_annotations-master
|
tensor_annotations/tests/numpy.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow stubs."""
from typing import Any, NewType, SupportsFloat, TypeVar
from absl.testing import absltest # For sharded test support
from tensor_annotations import axes
from tensor_annotations.axes import Batch
from tensor_annotations.axes import Time
import tensor_annotations.tensorflow as ttf
from tensor_annotations.tensorflow import AnyDType
from tensor_annotations.tensorflow import float32
from tensor_annotations.tensorflow import float64
from tensor_annotations.tensorflow import int16
from tensor_annotations.tensorflow import int8
from tensor_annotations.tensorflow import Tensor0
from tensor_annotations.tensorflow import Tensor1
from tensor_annotations.tensorflow import Tensor1AnyDType
from tensor_annotations.tensorflow import Tensor2
from tensor_annotations.tests import utils
import tensorflow as tf
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
AxisTypeVar = TypeVar('AxisTypeVar')
# It's less than ideal that we have to repeat imports etc. here for pytype, but
# this seems like the best balance between readability and complexity.
_PREAMBLE = """
from typing import Any, NewType, SupportsFloat, TypeVar
import tensorflow as tf
from tensor_annotations import axes
from tensor_annotations.axes import Batch, Time
import tensor_annotations.tensorflow as ttf
from tensor_annotations.tensorflow import AnyDType
from tensor_annotations.tensorflow import float32, float64, int8, int16
from tensor_annotations.tensorflow import Tensor0, Tensor1, Tensor1AnyDType, Tensor2
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
AxisTypeVar = TypeVar('AxisTypeVar')
"""
class TensorFlowShapeTests(absltest.TestCase):
"""Tests for shapes inferred from TensorFlow type stubs using pytype."""
def testTranspose_InferredMatchesActualShapeShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
y = tf.transpose(x)
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, y.shape)
def testUnaryOperator_ReturnCustomType(self):
"""Tests that operators like `tf.sin` return tensor_annotations types."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[Any, A1] = tf.zeros((1,))
# Let's just test a representative subset.
a = tf.abs(x) # pylint: disable=unused-variable
b = tf.sin(x) # pylint: disable=unused-variable
c = tf.floor(x) # pylint: disable=unused-variable
d = tf.ones_like(x) # pylint: disable=unused-variable
e = tf.round(x) # pylint: disable=unused-variable
f = tf.sign(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# Any is printed as Any in pytype output.
expected = 'Tensor1[Any, A1]'
self.assertEqual(inferred.a, expected)
self.assertEqual(inferred.b, expected)
self.assertEqual(inferred.c, expected)
self.assertEqual(inferred.d, expected)
self.assertEqual(inferred.e, expected)
self.assertEqual(inferred.f, expected)
def testMathUnaryOperator_ReturnCustomType(self):
"""Tests that operators like `tf.math.sin` return the correct types."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[Any, A1] = tf.zeros((1,))
# Let's just test a representative subset.
a = tf.math.abs(x) # pylint: disable=unused-variable
b = tf.math.sin(x) # pylint: disable=unused-variable
c = tf.math.floor(x) # pylint: disable=unused-variable
d = tf.math.round(x) # pylint: disable=unused-variable
e = tf.math.sign(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# Any is printed as Any in pytype output.
expected = 'Tensor1[Any, A1]'
self.assertEqual(inferred.a, expected)
self.assertEqual(inferred.b, expected)
self.assertEqual(inferred.c, expected)
self.assertEqual(inferred.d, expected)
self.assertEqual(inferred.e, expected)
def testZerosOnes_ReturnsCorrectShape(self):
"""Tests that e.g. `tf.zeros` returns the correct types."""
with utils.SaveCodeAsString() as code_saver:
a = tf.zeros(()) # pylint: disable=unused-variable
b = tf.ones(()) # pylint: disable=unused-variable
c = tf.zeros((1,)) # pylint: disable=unused-variable
d = tf.ones((1,)) # pylint: disable=unused-variable
e = tf.zeros((1, 1)) # pylint: disable=unused-variable
f = tf.ones((1, 1)) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Tensor0')
self.assertEqual(inferred.b, 'Tensor0')
self.assertEqual(inferred.c, 'Tensor1')
self.assertEqual(inferred.d, 'Tensor1')
self.assertEqual(inferred.e, 'Tensor2')
self.assertEqual(inferred.f, 'Tensor2')
def testSum_InferredMatchesActualShape(self):
"""Tests that `tf.reduce_sum` returns the correct types."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[Any, A1] = tf.zeros((1,))
y: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
x0 = tf.reduce_sum(x, axis=0)
y0 = tf.reduce_sum(y, axis=0)
y1 = tf.reduce_sum(y, axis=1)
yn1 = tf.reduce_sum(y, axis=-1)
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.x0, x0.shape)
self.assertEqual(inferred.y0, y0.shape)
self.assertEqual(inferred.y1, y1.shape)
self.assertEqual(inferred.yn1, yn1.shape)
def testMatmul_InferredMatchesActualShape(self):
"""Tests that `x @ y` returns the correct types."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
y: Tensor2[Any, A2, A3] = tf.zeros((2, 3))
xy = x @ y
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xy, xy.shape)
def testTensorAdd_ReturnsCustomType(self):
"""Tests that addition returns the correct types."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[Any, A1] = tf.zeros((1,))
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# Any is printed as Any in pytype output.
self.assertEqual(inferred.a, 'Tensor1[Any, A1]')
self.assertEqual(inferred.b, 'Tensor1[Any, A1]')
def testTensorUnaryOp_ReturnsCorrectTypeAndShape(self):
"""Tests that e.g. `-x` has the correct type."""
with utils.SaveCodeAsString() as code_saver:
x1: Tensor0[int16] = tf.zeros(())
y1 = abs(x1) # pylint: disable=unused-variable
y2 = -x1 # pylint: disable=unused-variable
x2: Tensor1[int16, A1] = tf.zeros((1,))
y3 = abs(x2) # pylint: disable=unused-variable
y4 = -x2 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('Tensor0[int16]', inferred.y1)
self.assertEqual('Tensor0[int16]', inferred.y2)
# Any is printed as Any in pytype output.
self.assertEqual('Tensor1[int16, A1]', inferred.y3)
self.assertEqual('Tensor1[int16, A1]', inferred.y4)
def testBinaryOpWithScalar_InferredMatchesActualShape(self):
"""Tests that e.g. `x + 1` has the correct type."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
y1 = x + 1.0
y2 = x - 1.0
y3 = x / 1.0
y4 = x * 1.0
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithBroadcast_InferredMatchesActualShape(self):
"""Tests the result of e.g. adding two tensors with different shapes."""
with utils.SaveCodeAsString() as code_saver:
a: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
b: Tensor1[Any, A2] = tf.zeros((2,))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithSameShape_InferredMatchesActualShape(self):
"""Tests the result of e.g. adding two tensors with the same shape."""
with utils.SaveCodeAsString() as code_saver:
a: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
b: Tensor2[Any, A1, A2] = tf.zeros((1, 2))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testShapeAttribute_HasTypeTensorShape(self):
"""Tests that `x.shape` is a tensorflow.TensorShape."""
with utils.SaveCodeAsString() as code_saver:
x0 = tf.zeros(())
x1 = tf.zeros((1,))
x2 = tf.zeros((1, 2))
x3 = tf.zeros((1, 2, 3))
x4 = tf.zeros((1, 2, 3, 4))
x5 = tf.zeros((1, 2, 3, 4, 5))
x0_shape = x0.shape # pylint: disable=unused-variable
x1_shape = x1.shape # pylint: disable=unused-variable
x2_shape = x2.shape # pylint: disable=unused-variable
x3_shape = x3.shape # pylint: disable=unused-variable
x4_shape = x4.shape # pylint: disable=unused-variable
x5_shape = x5.shape # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('tensorflow.TensorShape', inferred.x0_shape)
self.assertEqual('tensorflow.TensorShape', inferred.x1_shape)
self.assertEqual('tensorflow.TensorShape', inferred.x2_shape)
self.assertEqual('tensorflow.TensorShape', inferred.x3_shape)
self.assertEqual('tensorflow.TensorShape', inferred.x4_shape)
self.assertEqual('tensorflow.TensorShape', inferred.x5_shape)
def testShapeAttribute_HasLen(self):
with utils.SaveCodeAsString() as code_saver:
x = tf.zeros((1,))
rank = len(x.shape) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('int', inferred.rank)
def testTensor0_CanBeConvertedToFloat(self):
with utils.SaveCodeAsString() as code_saver:
x = tf.zeros(())
y = float(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'float')
def testTensor0_SupportsFloat(self):
with utils.SaveCodeAsString() as code_saver:
def foo(x: SupportsFloat):
return x
x = tf.zeros(())
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
class TensorFlowDtypeTests(absltest.TestCase):
"""Tests for data types inferred from TensorFlow type stubs using pytype."""
def testTranspose_ReturnsSameDtypeAsInput(self):
"""Tests that tf.transpose() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x8: Tensor2[int8, A1, A1] = tf.constant([[0]], dtype=tf.int8)
x16: Tensor2[int16, A1, A1] = tf.constant([[0]], dtype=tf.int16)
y8 = tf.transpose(x8) # pylint: disable=unused-variable
y16 = tf.transpose(x16) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y8, 'Tensor2[int8, A1, A1]')
self.assertEqual(inferred.y16, 'Tensor2[int16, A1, A1]')
def testUnaryFunctions_ReturnSameDtypeAsInput(self):
"""Tests that functions like `tf.sin` don't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Tensor1[float32, A1] = tf.constant([0.0], dtype=tf.float32)
x64: Tensor1[float64, A1] = tf.constant([0.0], dtype=tf.float64)
# Let's just test a representative subset.
a32 = tf.abs(x32) # pylint: disable=unused-variable
a64 = tf.abs(x64) # pylint: disable=unused-variable
b32 = tf.sin(x32) # pylint: disable=unused-variable
b64 = tf.sin(x64) # pylint: disable=unused-variable
c32 = tf.floor(x32) # pylint: disable=unused-variable
c64 = tf.floor(x64) # pylint: disable=unused-variable
d32 = tf.round(x32) # pylint: disable=unused-variable
d64 = tf.round(x64) # pylint: disable=unused-variable
e32 = tf.sign(x32) # pylint: disable=unused-variable
e64 = tf.sign(x64) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.a64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.b32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.b64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.c32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.c64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.d32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.d64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.e32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.e64, 'Tensor1[float64, A1]')
def testMathUnaryFunctions_ReturnSameDtypeAsInput(self):
"""Tests that functions like `tf.math.sin` don't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Tensor1[float32, A1] = tf.constant([0.0], dtype=tf.float32)
x64: Tensor1[float64, A1] = tf.constant([0.0], dtype=tf.float64)
# Let's just test a representative subset.
a32 = tf.math.abs(x32) # pylint: disable=unused-variable
a64 = tf.math.abs(x64) # pylint: disable=unused-variable
b32 = tf.math.sin(x32) # pylint: disable=unused-variable
b64 = tf.math.sin(x64) # pylint: disable=unused-variable
c32 = tf.math.floor(x32) # pylint: disable=unused-variable
c64 = tf.math.floor(x64) # pylint: disable=unused-variable
d32 = tf.math.round(x32) # pylint: disable=unused-variable
d64 = tf.math.round(x64) # pylint: disable=unused-variable
e32 = tf.math.sign(x32) # pylint: disable=unused-variable
e64 = tf.math.sign(x64) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.a64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.b32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.b64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.c32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.c64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.d32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.d64, 'Tensor1[float64, A1]')
self.assertEqual(inferred.e32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.e64, 'Tensor1[float64, A1]')
def testZerosOnes_ReturnsAnyDType(self):
"""Tests that tf.zeros and tf.ones returns AnyDType."""
with utils.SaveCodeAsString() as code_saver:
a = tf.zeros(()) # pylint: disable=unused-variable
b = tf.ones(()) # pylint: disable=unused-variable
ref0: Tensor0[AnyDType] = tf.constant(0) # pylint: disable=unused-variable
c = tf.zeros((1,)) # pylint: disable=unused-variable
d = tf.ones((1,)) # pylint: disable=unused-variable
ref1: Tensor1[AnyDType, Any] = tf.constant([0]) # pylint: disable=unused-variable
e = tf.zeros((1, 1)) # pylint: disable=unused-variable
f = tf.ones((1, 1)) # pylint: disable=unused-variable
ref2: Tensor2[AnyDType, Any, Any] = tf.constant([[0]]) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# We can't compare explicitly to e.g. Tensor0[AnyDType], because AnyDType
# is currently aliased to Any, and if all the type arguments are Any,
# pytype doesn't print the type arguments at all.
self.assertEqual(inferred.a, inferred.ref0)
self.assertEqual(inferred.b, inferred.ref0)
self.assertEqual(inferred.c, inferred.ref1)
self.assertEqual(inferred.d, inferred.ref1)
self.assertEqual(inferred.e, inferred.ref2)
self.assertEqual(inferred.f, inferred.ref2)
def testSum_ReturnsSameDtypeAsInput(self):
"""Tests that tf.reduce_sum() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Tensor1[float32, A1] = tf.constant([0.0], dtype=tf.float32) # pylint: disable=unused-variable
x64: Tensor1[float64, A1] = tf.constant([0.0], dtype=tf.float64) # pylint: disable=unused-variable
y32: Tensor2[float32, A1, A1] = tf.constant([[0.0]], dtype=tf.float32) # pylint: disable=unused-variable
y64: Tensor2[float64, A1, A1] = tf.constant([[0.0]], dtype=tf.float64) # pylint: disable=unused-variable
xsum32 = tf.reduce_sum(x32, axis=0) # pylint: disable=unused-variable
xsum64 = tf.reduce_sum(x64, axis=0) # pylint: disable=unused-variable
ysum32 = tf.reduce_sum(y32, axis=0) # pylint: disable=unused-variable
ysum64 = tf.reduce_sum(y64, axis=0) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xsum32, 'Tensor0[float32]')
self.assertEqual(inferred.xsum64, 'Tensor0[float64]')
self.assertEqual(inferred.ysum32, 'Tensor1[float32, A1]')
self.assertEqual(inferred.ysum64, 'Tensor1[float64, A1]')
def testTensorAdd_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([[0]])
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# AnyDType is printed as Any in pytype output.
self.assertEqual(inferred.a, 'Tensor1[Any, A1]')
self.assertEqual(inferred.b, 'Tensor1[Any, A1]')
def testTensorUnaryOp_ReturnsSameDTypeAsInput(self):
"""Tests that e.g. `-x` has the same dtype as `x`."""
with utils.SaveCodeAsString() as code_saver:
a8: Tensor0[int8] = tf.constant([[0]], dtype=tf.int8)
b8 = abs(a8) # pylint: disable=unused-variable
c8 = -a8 # pylint: disable=unused-variable
a16: Tensor0[int16] = tf.constant([[0]], dtype=tf.int16)
b16 = abs(a16) # pylint: disable=unused-variable
c16 = -a16 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b8, 'Tensor0[int8]')
self.assertEqual(inferred.c8, 'Tensor0[int8]')
self.assertEqual(inferred.b16, 'Tensor0[int16]')
self.assertEqual(inferred.c16, 'Tensor0[int16]')
def testBinaryOpWithScalar_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
y1 = x + 1 # pylint: disable=unused-variable
y2 = x - 1 # pylint: disable=unused-variable
y3 = x / 1 # pylint: disable=unused-variable
y4 = x * 1 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y2, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y3, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y4, 'Tensor1[Any, A1]')
def testBinaryOpWithArray_ReturnsAnyDType(self):
"""Tests that e.g. adding two arrays results in dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
a: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
b: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
y1 = a + b # pylint: disable=unused-variable
y2 = a - b # pylint: disable=unused-variable
y3 = a / b # pylint: disable=unused-variable
y4 = a * b # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y2, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y3, 'Tensor1[Any, A1]')
self.assertEqual(inferred.y4, 'Tensor1[Any, A1]')
def testFunctionWithInt8Argument_AcceptsInt8Value(self):
"""Tests whether a function will accept a value with the right dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor0[int8]):
pass
x: Tensor0[int8] = tf.constant([0], dtype=tf.int8)
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_RejectsInt16Value(self):
"""Tests whether a function will reject a value with the wrong dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor0[int8]):
pass
x: Tensor0[int16] = tf.constant([0], dtype=tf.int16)
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
def testFunctionWithAnyDTypeArgument_AcceptsInt8Value(self):
"""Tests whether AnyDType makes a function argument compatible with all."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor0[AnyDType]):
pass
x: Tensor0[int8] = tf.constant([0], dtype=tf.int8)
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_AcceptsAnyDTypeValue(self):
"""Tests whether AnyDType is compatible with an arbitrary argument dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor0[int8]):
pass
x: Tensor0[AnyDType] = tf.constant([0])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithBoolArgument_AcceptsBoolValue(self):
"""No problems with using 'bool' as a dtype name, right?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor0[ttf.bool]):
pass
x: Tensor0[ttf.bool] = tf.constant(False)
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testTensorShapeAttr_IsTensorShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
s = x.shape # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.s, 'tensorflow.TensorShape')
def testTensorShapeIndexedWithInt_IsInt(self):
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
s = x.shape
s0 = s[0] # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.s0, 'int')
def testTensorShapeIndexedWithSlice_IsTensorShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
s = x.shape
s0 = s[:1] # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.s0, 'tensorflow.TensorShape')
def testTensorShapeAsList_IsListOfInt(self):
with utils.SaveCodeAsString() as code_saver:
x: Tensor1[int8, A1] = tf.constant([0], dtype=tf.int8)
s = x.shape
l = s.as_list() # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.l, 'List[int]')
class TensorFlowAnyDtypeAliasTests(absltest.TestCase):
"""Tests for backwards-compatible aliases that don't use DTypes."""
def testInt8Batch_AcceptsAnyDTypeBatch(self):
"""Is Tensor1AnyDType[Batch] compatible with Tensor1[int8, Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor1[int8, Batch]):
pass
x: Tensor1AnyDType[Batch] = tf.constant([[0]])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testInt8Batch_RejectsAnyDTypeTime(self):
"""Is Tensor1AnyDType[Time] compatible with Tensor1[int8, Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor1[int8, Batch]):
pass
x: Tensor1AnyDType[Time] = tf.constant([[0]])
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
def testAnyDTypeBatch_AcceptsUint8Batch(self):
"""Is Tensor1[int8, Batch] compatible with Tensor1AnyDType[Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor1AnyDType[Batch]):
pass
x: Tensor1[int8, Batch] = tf.constant([[0]])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testAnyDTypeBatch_RejectsUint8Time(self):
"""Is Tensor1[int8, Time] compatible with Tensor1AnyDType[Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Tensor1AnyDType[Batch]):
pass
x: Tensor1[int8, Time] = tf.constant([[0]])
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
if __name__ == '__main__':
absltest.main()
|
tensor_annotations-master
|
tensor_annotations/tests/tensorflow.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for JAX stubs."""
from typing import cast, NewType, SupportsFloat, TypeVar
from absl.testing import absltest
import jax
import jax.numpy as jnp
from tensor_annotations import axes
from tensor_annotations.axes import Batch
from tensor_annotations.axes import Time
from tensor_annotations.jax import AnyDType
from tensor_annotations.jax import Array0
from tensor_annotations.jax import Array1
from tensor_annotations.jax import Array1AnyDType
from tensor_annotations.jax import Array2
from tensor_annotations.jax import float32
from tensor_annotations.jax import float64
from tensor_annotations.jax import int16
from tensor_annotations.jax import int8
from tensor_annotations.tests import utils
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
AxisTypeVar = TypeVar('AxisTypeVar')
# It's less than ideal that we have to repeat imports etc. here for pytype, but
# this seems like the best balance between readability and complexity.
_PREAMBLE = """
from typing import Any, cast, NewType, SupportsFloat, TypeVar, Union
import jax
import jax.numpy as jnp
from tensor_annotations import axes
from tensor_annotations.axes import Batch, Time
from tensor_annotations.jax import AnyDType, float32, float64, int16, int8
from tensor_annotations.jax import Array0, Array1, Array1AnyDType, Array2
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
A3 = NewType('A3', axes.Axis)
AxisTypeVar = TypeVar('AxisTypeVar')
"""
class JAXStubTests(absltest.TestCase):
"""Tests for jax.* stubs."""
def test_custom_stubs_are_used_for_jax(self):
"""Tests whether eg a syntax error in jax.pyi prevents stubs being used."""
# _sentinel is a member that exists with a specific type in our stubs but
# not in the JAX library code itself (and would therefore normally be
# seen as `Any` by pytype).
code = _PREAMBLE + 's = jax._sentinel'
inferred = utils.pytype_infer_types(code)
self.assertEqual(inferred.s, 'int')
class JAXNumpyStubTests(absltest.TestCase):
"""Tests for jax.numpy.* stubs."""
def testTranspose_InferredShapeMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
y = jnp.transpose(x)
y2 = x.T
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, y.shape)
self.assertEqual(inferred.y2, y2.shape)
def testUnaryOperator_ReturnCustomType(self):
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = jnp.zeros((1,))
# Let's just test a representative subset.
a = jnp.abs(x) # pylint: disable=unused-variable
b = jnp.sin(x) # pylint: disable=unused-variable
c = jnp.floor(x) # pylint: disable=unused-variable
d = jnp.ones_like(x) # pylint: disable=unused-variable
e = jnp.round(x) # pylint: disable=unused-variable
f = jnp.sign(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
expected = 'Array1[Any, A1]'
self.assertEqual(inferred.a, expected)
self.assertEqual(inferred.b, expected)
self.assertEqual(inferred.c, expected)
self.assertEqual(inferred.d, expected)
self.assertEqual(inferred.e, expected)
self.assertEqual(inferred.f, expected)
def testZerosOnes_ReturnsCorrectShape(self):
with utils.SaveCodeAsString() as code_saver:
a = jnp.zeros(()) # pylint: disable=unused-variable
b = jnp.ones(()) # pylint: disable=unused-variable
c = jnp.zeros((1,)) # pylint: disable=unused-variable
d = jnp.ones((1,)) # pylint: disable=unused-variable
e = jnp.zeros((1, 1)) # pylint: disable=unused-variable
f = jnp.ones((1, 1)) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Array0[float32]')
self.assertEqual(inferred.b, 'Array0[float32]')
self.assertEqual(inferred.c, 'Array1[float32, Any]')
self.assertEqual(inferred.d, 'Array1[float32, Any]')
self.assertEqual(inferred.e, 'Array2[float32, Any, Any]')
self.assertEqual(inferred.f, 'Array2[float32, Any, Any]')
def testSum_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
y1 = jnp.sum(x, axis=0)
y2 = jnp.sum(x, axis=1)
y3 = jnp.sum(x, axis=(0, 1))
y4 = jnp.sum(x)
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y1, y1.shape)
self.assertEqual(inferred.y2, y2.shape)
self.assertEqual(inferred.y3, y3.shape)
self.assertEqual(inferred.y4, y4.shape)
def testSumKeepdimsTrue_ReturnsAny(self):
# We haven't got around to making stubs for keepdims=True yet;
# make sure the type reflects that.
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = jnp.zeros((1,))
a = jnp.sum(x, axis=0, keepdims=True) # pylint: disable=unused-variable
b = jnp.sum(x, keepdims=True) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Any')
self.assertEqual(inferred.b, 'Any')
def testTensorAdd_ReturnsCustomType(self):
with utils.SaveCodeAsString() as code_saver:
x: Array1[AnyDType, A1] = jnp.zeros((1,))
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('Array1[Any, A1]', inferred.a)
self.assertEqual('Array1[Any, A1]', inferred.b)
def testMatmul_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
y: Array2[AnyDType, A2, A3] = jnp.zeros((2, 3))
xy = x @ y
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xy, xy.shape)
def testTensorUnaryOp_ReturnsCorrectTypeAndShape(self):
with utils.SaveCodeAsString() as code_saver:
x1: Array0 = jnp.zeros(())
y1 = abs(x1) # pylint: disable=unused-variable
y2 = -x1 # pylint: disable=unused-variable
x2: Array1[AnyDType, A1] = jnp.zeros((1,))
y3 = abs(x2) # pylint: disable=unused-variable
y4 = -x2 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual('Array0[DType]', inferred.y1)
self.assertEqual('Array0[DType]', inferred.y2)
self.assertEqual('Array1[Any, A1]', inferred.y3)
self.assertEqual('Array1[Any, A1]', inferred.y4)
def testBinaryOpWithScalar_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
x: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
y1 = x + 1.0
y2 = x - 1.0
y3 = x / 1.0
y4 = x * 1.0
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithBroadcast_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
a: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
b: Array1[AnyDType, A2] = jnp.zeros((2,))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testBinaryOpWithSameShape_InferredMatchesActualShape(self):
with utils.SaveCodeAsString() as code_saver:
a: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
b: Array2[AnyDType, A1, A2] = jnp.zeros((1, 2))
y1 = a + b
y2 = a - b
y3 = a / b
y4 = a * b
inferred = utils.pytype_infer_shapes(_PREAMBLE + code_saver.code)
self.assertEqual(y1.shape, inferred.y1)
self.assertEqual(y2.shape, inferred.y2)
self.assertEqual(y3.shape, inferred.y3)
self.assertEqual(y4.shape, inferred.y4)
def testShapeAttribute_HasCorrectLength(self):
with utils.SaveCodeAsString() as code_saver:
x0 = jnp.zeros(())
x1 = jnp.zeros((1,))
x2 = jnp.zeros((1, 2))
x3 = jnp.zeros((1, 2, 3))
x4 = jnp.zeros((1, 2, 3, 4))
x0_shape = x0.shape # pylint: disable=unused-variable
x1_shape = x1.shape # pylint: disable=unused-variable
x2_shape = x2.shape # pylint: disable=unused-variable
x3_shape = x3.shape # pylint: disable=unused-variable
x4_shape = x4.shape # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(x0_shape, ())
self.assertEqual(x1_shape, (1,))
self.assertEqual(x2_shape, (1, 2))
self.assertEqual(x3_shape, (1, 2, 3))
self.assertEqual(x4_shape, (1, 2, 3, 4))
self.assertEqual('Tuple[()]', inferred.x0_shape)
self.assertEqual('Tuple[int]', inferred.x1_shape)
self.assertEqual('Tuple[int, int]', inferred.x2_shape)
self.assertEqual('Tuple[int, int, int]', inferred.x3_shape)
self.assertEqual('Tuple[int, int, int, int]', inferred.x4_shape)
def testArray0Item_ReturnsIntFloatBoolComplexUnion(self):
with utils.SaveCodeAsString() as code_saver:
x = jnp.zeros(())
y = x.item() # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'Union[bool, complex, float, int]')
def testArray0_CanBeConvertedToFloat(self):
with utils.SaveCodeAsString() as code_saver:
x = jnp.zeros(())
y = float(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'float')
def testArray0_SupportsFloat(self):
with utils.SaveCodeAsString() as code_saver:
def foo(x: SupportsFloat):
return x
x = jnp.zeros(())
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
class JAXDtypeTests(absltest.TestCase):
"""Tests for data types inferred from JAX type stubs using pytype."""
def testTranspose_ReturnsSameDtypeAsInput(self):
"""Tests that jnp.transpose() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x8: Array2[int8, A1, A1] = jnp.array([[0]], dtype=jnp.int8)
x16: Array2[int16, A1, A1] = jnp.array([[0]], dtype=jnp.int16)
y8 = jnp.transpose(x8) # pylint: disable=unused-variable
y16 = jnp.transpose(x16) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y8, 'Array2[int8, A1, A1]')
self.assertEqual(inferred.y16, 'Array2[int16, A1, A1]')
def testUnaryFunctions_ReturnSameDtypeAsInput(self):
"""Tests that functions like `jnp.sin` don't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Array1[float32, A1] = jnp.array([0.0], dtype=jnp.float32)
x64: Array1[float64, A1] = jnp.array([0.0], dtype=jnp.float64)
# Let's just test a representative subset.
a32 = jnp.abs(x32) # pylint: disable=unused-variable
a64 = jnp.abs(x64) # pylint: disable=unused-variable
b32 = jnp.sin(x32) # pylint: disable=unused-variable
b64 = jnp.sin(x64) # pylint: disable=unused-variable
c32 = jnp.floor(x32) # pylint: disable=unused-variable
c64 = jnp.floor(x64) # pylint: disable=unused-variable
d32 = jnp.round(x32) # pylint: disable=unused-variable
d64 = jnp.round(x64) # pylint: disable=unused-variable
e32 = jnp.sign(x32) # pylint: disable=unused-variable
e64 = jnp.sign(x64) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a32, 'Array1[float32, A1]')
self.assertEqual(inferred.a64, 'Array1[float64, A1]')
self.assertEqual(inferred.b32, 'Array1[float32, A1]')
self.assertEqual(inferred.b64, 'Array1[float64, A1]')
self.assertEqual(inferred.c32, 'Array1[float32, A1]')
self.assertEqual(inferred.c64, 'Array1[float64, A1]')
self.assertEqual(inferred.d32, 'Array1[float32, A1]')
self.assertEqual(inferred.d64, 'Array1[float64, A1]')
self.assertEqual(inferred.e32, 'Array1[float32, A1]')
self.assertEqual(inferred.e64, 'Array1[float64, A1]')
def testZerosOnes_ReturnsCorrectDtype(self):
"""Tests that jnp.zeros and jnp.ones returns arrays with correct dtypes."""
with utils.SaveCodeAsString() as code_saver:
a = jnp.zeros(()) # pylint: disable=unused-variable
b = jnp.ones(()) # pylint: disable=unused-variable
c = jnp.zeros((), dtype=jnp.int8) # pylint: disable=unused-variable
d = jnp.ones((), dtype=jnp.int8) # pylint: disable=unused-variable
e = jnp.zeros((1,)) # pylint: disable=unused-variable
f = jnp.ones((1,)) # pylint: disable=unused-variable
g = jnp.zeros((1,), dtype=jnp.int8) # pylint: disable=unused-variable
h = jnp.ones((1,), dtype=jnp.int8) # pylint: disable=unused-variable
i = jnp.zeros((1, 1)) # pylint: disable=unused-variable
j = jnp.ones((1, 1)) # pylint: disable=unused-variable
k = jnp.zeros((1, 1), dtype=jnp.int8) # pylint: disable=unused-variable
l = jnp.ones((1, 1), dtype=jnp.int8) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.a, 'Array0[float32]')
self.assertEqual(inferred.b, 'Array0[float32]')
# These should be Array0[AnyDType], but because AnyDType is currently
# aliased to `Any`, and pytype doesn't print type arguments at all when
# they're all `Any`, hence just comparing to e.g. Array0.
# Ditto tests below.
self.assertEqual(inferred.c, 'Array0')
self.assertEqual(inferred.d, 'Array0')
self.assertEqual(inferred.e, 'Array1[float32, Any]')
self.assertEqual(inferred.f, 'Array1[float32, Any]')
self.assertEqual(inferred.g, 'Array1')
self.assertEqual(inferred.h, 'Array1')
self.assertEqual(inferred.i, 'Array2[float32, Any, Any]')
self.assertEqual(inferred.j, 'Array2[float32, Any, Any]')
self.assertEqual(inferred.k, 'Array2')
self.assertEqual(inferred.l, 'Array2')
def testSum_ReturnsSameDtypeAsInput(self):
"""Tests that jnp.sum() doesn't change the dtype."""
with utils.SaveCodeAsString() as code_saver:
x32: Array1[float32, A1] = jnp.array([0.0], dtype=jnp.float32) # pylint: disable=unused-variable
x64: Array1[float64, A1] = jnp.array([0.0], dtype=jnp.float64) # pylint: disable=unused-variable
y32: Array2[float32, A1, A1] = jnp.array([[0.0]], dtype=jnp.float32) # pylint: disable=unused-variable
y64: Array2[float64, A1, A1] = jnp.array([[0.0]], dtype=jnp.float64) # pylint: disable=unused-variable
xsum32 = jnp.sum(x32, axis=0) # pylint: disable=unused-variable
xsum64 = jnp.sum(x64, axis=0) # pylint: disable=unused-variable
ysum32 = jnp.sum(y32, axis=0) # pylint: disable=unused-variable
ysum64 = jnp.sum(y64, axis=0) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.xsum32, 'Array0[float32]')
self.assertEqual(inferred.xsum64, 'Array0[float64]')
self.assertEqual(inferred.ysum32, 'Array1[float32, A1]')
self.assertEqual(inferred.ysum64, 'Array1[float64, A1]')
def testArrayAdd_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Array1[int8, A1] = jnp.array([[0]])
a = x + 1 # pylint: disable=unused-variable
b = x + x # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# AnyDType is printed as Any in pytype output.
self.assertEqual(inferred.a, 'Array1[Any, A1]')
self.assertEqual(inferred.b, 'Array1[Any, A1]')
def testArrayUnaryOp_ReturnsSameDTypeAsInput(self):
"""Tests that e.g. `-x` has the same dtype as `x`."""
with utils.SaveCodeAsString() as code_saver:
a8: Array0[int8] = jnp.array([[0]], dtype=jnp.int8)
b8 = abs(a8) # pylint: disable=unused-variable
c8 = -a8 # pylint: disable=unused-variable
a16: Array0[int16] = jnp.array([[0]], dtype=jnp.int16)
b16 = abs(a16) # pylint: disable=unused-variable
c16 = -a16 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b8, 'Array0[int8]')
self.assertEqual(inferred.c8, 'Array0[int8]')
self.assertEqual(inferred.b16, 'Array0[int16]')
self.assertEqual(inferred.c16, 'Array0[int16]')
def testBinaryOpWithScalar_ReturnsAnyDType(self):
"""Tests that e.g. `x + 1` has dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
x: Array1[int8, A1] = jnp.array([0], dtype=jnp.int8)
y1 = x + 1 # pylint: disable=unused-variable
y2 = x - 1 # pylint: disable=unused-variable
y3 = x / 1 # pylint: disable=unused-variable
y4 = x * 1 # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Array1[Any, A1]')
self.assertEqual(inferred.y2, 'Array1[Any, A1]')
self.assertEqual(inferred.y3, 'Array1[Any, A1]')
self.assertEqual(inferred.y4, 'Array1[Any, A1]')
def testBinaryOpWithArray_ReturnsAnyDType(self):
"""Tests that e.g. adding two arrays results in dtype AnyDType."""
with utils.SaveCodeAsString() as code_saver:
a: Array1[int8, A1] = jnp.array([0], dtype=jnp.int8)
b: Array1[int8, A1] = jnp.array([0], dtype=jnp.int8)
y1 = a + b # pylint: disable=unused-variable
y2 = a - b # pylint: disable=unused-variable
y3 = a / b # pylint: disable=unused-variable
y4 = a * b # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# pytype displays AnyDType as Any.
self.assertEqual(inferred.y1, 'Array1[Any, A1]')
self.assertEqual(inferred.y2, 'Array1[Any, A1]')
self.assertEqual(inferred.y3, 'Array1[Any, A1]')
self.assertEqual(inferred.y4, 'Array1[Any, A1]')
def testFunctionWithInt8Argument_AcceptsInt8Value(self):
"""Tests whether a function will accept a value with the right dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array0[int8]):
pass
x: Array0[int8] = jnp.array([0], dtype=jnp.int8)
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_RejectsInt16Value(self):
"""Tests whether a function will reject a value with the wrong dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array0[int8]):
pass
x: Array0[int16] = jnp.array([0], dtype=jnp.int16)
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
def testFunctionWithAnyDTypeArgument_AcceptsInt8Value(self):
"""Tests whether AnyDType makes a function argument compatible with all."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array0[AnyDType]):
pass
x: Array0[int8] = jnp.array([0], dtype=jnp.int8)
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testFunctionWithInt8Argument_AcceptsAnyDTypeValue(self):
"""Tests whether AnyDType is compatible with an arbitrary argument dtype."""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array0[int8]):
pass
x: Array0[AnyDType] = jnp.array([0])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
class JAXAnyDtypeAliasTests(absltest.TestCase):
"""Tests for backwards-compatible aliases that don't use DTypes."""
def testInt8Batch_AcceptsAnyDTypeBatch(self):
"""Is Array1AnyDType[Batch] compatible with Array1[int8, Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[int8, Batch]):
pass
x: Array1AnyDType[Batch] = jnp.array([[0]])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testInt8Batch_RejectsAnyDTypeTime(self):
"""Is Array1AnyDType[Time] compatible with Array1[int8, Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1[int8, Batch]):
pass
x: Array1AnyDType[Time] = jnp.array([[0]])
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
def testAnyDTypeBatch_AcceptsUint8Batch(self):
"""Is Array1[int8, Batch] compatible with Array1AnyDType[Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1AnyDType[Batch]):
pass
x: Array1[int8, Batch] = jnp.array([[0]])
foo(x)
utils.assert_pytype_succeeds(_PREAMBLE + code_saver.code)
def testAnyDTypeBatch_RejectsUint8Time(self):
"""Is Array1[int8, Time] compatible with Array1AnyDType[Batch]?"""
with utils.SaveCodeAsString() as code_saver:
def foo(_: Array1AnyDType[Batch]):
pass
x: Array1[int8, Time] = jnp.array([[0]])
foo(x)
utils.assert_pytype_fails(_PREAMBLE + code_saver.code)
class JAXArrayTests(absltest.TestCase):
"""Test for operations on official jax.Array class.
We need to do some explicit casting in these tests because, since we're
using our stubs, things like jnp.zeros returns an ArrayN, rather than
a jax.Array, as we want.
"""
def testArrayShape_HasInferredTypeTupleInt(self):
"""Tests that pytype infers tuple[int, ...] for jax.Array.shape."""
with utils.SaveCodeAsString() as code_saver:
x = cast(jax.Array, jnp.zeros(3))
s = x.shape # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.s, 'Tuple[int, ...]')
def testArrayFunctions_ReturnJaxArray(self):
"""Tests that the inferred types for eg jax.Array.astype() is are right."""
with utils.SaveCodeAsString() as code_saver:
a = cast(jax.Array, jnp.zeros(3))
b = a.astype(jnp.int64)
c = a + 1
d = 1 + a
e = a - 1
f = 1 - a
g = a * 2
h = 2 * a
i = a / 2
j = a // 2
k = a ** 2
l = a @ a
m = a[0]
n = a.T
o = a.at[0].set(1)
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
# These are all jax.Arrays, right?
# Or close enough - they'll actually be some subclass of jax.Array
# like tensorflow.compiler.xla.python.xla_extension.Array.
self.assertIsInstance(a, jax.Array)
self.assertIsInstance(b, jax.Array)
self.assertIsInstance(c, jax.Array)
self.assertIsInstance(d, jax.Array)
self.assertIsInstance(e, jax.Array)
self.assertIsInstance(f, jax.Array)
self.assertIsInstance(g, jax.Array)
self.assertIsInstance(h, jax.Array)
self.assertIsInstance(i, jax.Array)
self.assertIsInstance(j, jax.Array)
self.assertIsInstance(k, jax.Array)
self.assertIsInstance(l, jax.Array)
self.assertIsInstance(m, jax.Array)
self.assertIsInstance(n, jax.Array)
self.assertIsInstance(o, jax.Array)
# If all the variables are definitely jax.Arrays, then we should have
# inferred jax.Array types.
self.assertEqual(inferred.a, 'jax.Array')
self.assertEqual(inferred.b, 'jax.Array')
self.assertEqual(inferred.c, 'jax.Array')
self.assertEqual(inferred.d, 'jax.Array')
self.assertEqual(inferred.e, 'jax.Array')
self.assertEqual(inferred.f, 'jax.Array')
self.assertEqual(inferred.g, 'jax.Array')
self.assertEqual(inferred.h, 'jax.Array')
self.assertEqual(inferred.i, 'jax.Array')
self.assertEqual(inferred.j, 'jax.Array')
self.assertEqual(inferred.k, 'jax.Array')
self.assertEqual(inferred.l, 'jax.Array')
self.assertEqual(inferred.m, 'jax.Array')
self.assertEqual(inferred.n, 'jax.Array')
self.assertEqual(inferred.o, 'jax.Array')
def testUnaryFunction_ReturnsJaxArray(self):
with utils.SaveCodeAsString() as code_saver:
x = cast(jax.Array, jnp.zeros(3))
y = jnp.abs(x) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'jax.Array')
def testZerosLike_ReturnsJaxArray(self):
"""Tests that jnp.zeros_like(jax.Array) returns a jax.Array."""
with utils.SaveCodeAsString() as code_saver:
a = cast(jax.Array, jnp.zeros(3))
# pylint: disable=unused-variable
b = jnp.zeros_like(a)
c = jnp.zeros_like(a, dtype=jnp.uint8)
# pylint: enable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b, 'jax.Array')
self.assertEqual(inferred.c, 'jax.Array')
def testRound_ReturnsJaxArray(self):
with utils.SaveCodeAsString() as code_saver:
x = cast(jax.Array, jnp.zeros(3))
y = jnp.round(x, 2) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.y, 'jax.Array')
def testSum_ReturnsJaxArray(self):
"""Tests that jnp.sum(jax.Array) returns a jax.Array."""
with utils.SaveCodeAsString() as code_saver:
a = cast(jax.Array, jnp.zeros(3))
# pylint: disable=unused-variable
b = jnp.sum(a)
c = jnp.sum(a, keepdims=True)
d = jnp.sum(a, axis=0)
e = jnp.sum(a, axis=0, keepdims=True)
# pylint: enable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b, 'jax.Array')
self.assertEqual(inferred.c, 'jax.Array')
self.assertEqual(inferred.d, 'jax.Array')
self.assertEqual(inferred.e, 'jax.Array')
def testTranspsoe_ReturnsJaxArray(self):
with utils.SaveCodeAsString() as code_saver:
a = cast(jax.Array, jnp.zeros(3))
b = jnp.transpose(a) # pylint: disable=unused-variable
c = jnp.transpose(a, axes=(0,)) # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b, 'jax.Array')
self.assertEqual(inferred.c, 'jax.Array')
def testArray_HasDtypeAttribute(self):
with utils.SaveCodeAsString() as code_saver:
a = cast(jax.Array, jnp.zeros(3))
b = a.dtype # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b, 'numpy.dtype')
class JnpNdarrayTests(absltest.TestCase):
"""Tests for operations on the plain jnp.ndarray class.
Users might not have all their arrays being typed as Tensor Annotations
types - they might also have some plain jnp.ndarrays around. We need to make
sure they don't get weird type errors on those.
"""
def testSlicingNdArray_ReturnsNdArray(self):
with utils.SaveCodeAsString() as code_saver:
a = cast(jnp.ndarray, jnp.zeros((2, 3)))
b = a[0] # pylint: disable=unused-variable
c = a[0:1] # pylint: disable=unused-variable
d = a[:, 2:] # pylint: disable=unused-variable
inferred = utils.pytype_infer_types(_PREAMBLE + code_saver.code)
self.assertEqual(inferred.b, 'jax.numpy.ndarray')
self.assertEqual(inferred.c, 'jax.numpy.ndarray')
self.assertEqual(inferred.d, 'jax.numpy.ndarray')
if __name__ == '__main__':
absltest.main()
|
tensor_annotations-master
|
tensor_annotations/tests/jax.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tools/templates.py."""
import unittest
from tensor_annotations.tools import templates
class TemplateTests(unittest.TestCase):
def test_axis_list(self):
self.assertEqual(
templates.axis_list(n_axes=1),
'A1'
)
self.assertEqual(
templates.axis_list(n_axes=2),
'A1, A2'
)
self.assertEqual(
templates.axis_list(n_axes=1, reverse=True),
'A1'
)
self.assertEqual(
templates.axis_list(n_axes=2, reverse=True),
'A2, A1'
)
def test_jax_array_type(self):
self.assertEqual(
'Array1[A1]',
templates.jax_array_type(n_axes=1)
)
self.assertEqual(
'Array2[A1, A2]',
templates.jax_array_type(n_axes=2)
)
def test_transpose_axes(self):
self.assertEqual(
list(templates.transpose_axes(1)),
[
templates.TransposeAxes(
n_axes=1,
all_axes='A1',
transpose_axes='Tuple[L0]',
result_axes='A1'
)
]
)
self.assertEqual(
list(templates.transpose_axes(2)),
[
templates.TransposeAxes(
n_axes=2,
all_axes='A1, A2',
transpose_axes='Tuple[L0, L1]',
result_axes='A1, A2'
),
templates.TransposeAxes(
n_axes=2,
all_axes='A1, A2',
transpose_axes='Tuple[L1, L0]',
result_axes='A2, A1'
),
]
)
def test_reduction_axes(self):
self.assertEqual(
list(templates.reduction_axes(1)),
[
templates.ReductionAxes(
n_axes=1,
all_axes='A1',
reduction_axes='L0',
remaining_n_axes=0,
remaining_axes=''
),
templates.ReductionAxes(
n_axes=1,
all_axes='A1',
reduction_axes='LN1',
remaining_n_axes=0,
remaining_axes=''
)
]
)
self.assertEqual(
list(templates.reduction_axes(2)),
[
templates.ReductionAxes(
n_axes=2,
all_axes='A1, A2',
reduction_axes='L0',
remaining_n_axes=1,
remaining_axes=', A2'
),
templates.ReductionAxes(
n_axes=2,
all_axes='A1, A2',
reduction_axes='L1',
remaining_n_axes=1,
remaining_axes=', A1'
),
templates.ReductionAxes(
n_axes=2,
all_axes='A1, A2',
reduction_axes='LN1',
remaining_n_axes=1,
remaining_axes=', A1'
),
templates.ReductionAxes(
n_axes=2,
all_axes='A1, A2',
reduction_axes='Tuple[L0, L1]',
remaining_n_axes=0,
remaining_axes=''
),
templates.ReductionAxes(
n_axes=2,
all_axes='A1, A2',
reduction_axes='Tuple[L0, LN1]',
remaining_n_axes=0,
remaining_axes=''
),
]
)
if __name__ == '__main__':
unittest.main()
|
tensor_annotations-master
|
tensor_annotations/tests/templates.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to confirm that Pytype helpers function correctly."""
import textwrap
import unittest
from tensor_annotations.tests import utils
_PREAMBLE = """
from typing import NewType
from tensor_annotations.jax import Array2, int8
from tensor_annotations import axes
A1 = NewType('A1', axes.Axis)
A2 = NewType('A2', axes.Axis)
"""
class PytypeTests(unittest.TestCase):
def testSimpleCorrectExample_PassesPytype(self):
code = """
def foo(x: Array2[int8, A1, A2]):
pass
x: Array2[int8, A1, A2] = Array2()
foo(x)
"""
code = _PREAMBLE + textwrap.dedent(code)
utils.assert_pytype_succeeds(code)
def testSimpleIncorrectExample_FailsPytype(self):
code = """
def foo(x: Array2[int8, A1, A2]):
pass
x: Array2[int8, A2, A1] = Array2()
foo(x)
"""
code = _PREAMBLE + textwrap.dedent(code)
utils.assert_pytype_fails(code)
if __name__ == '__main__':
unittest.main()
|
tensor_annotations-master
|
tensor_annotations/tests/pytype.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test helpers."""
import importlib
import inspect
import os
import pathlib
import re
import subprocess
import tempfile
import textwrap
import types
from typing import List
from typing import Optional
def run_pytype(code: str, check: bool) -> subprocess.CompletedProcess: # pylint: disable=g-doc-args
"""Runs pytype on the specified code.
Raises:
subprocess.CalledProcessError if check=True and pytype return is non-zero
Returns:
A subprocess.CompletedProcess instance containing stdout
"""
with tempfile.TemporaryDirectory() as tmp_dir:
code_filename = os.path.join(tmp_dir, 'test.py')
with open(code_filename, 'w') as f:
f.write(code)
pytype_path = pathlib.Path('pytype-single')
tensor_annotations_dir = pathlib.Path(__file__).parent.parent
stubs_dir = pathlib.Path(tmp_dir)
_link_stubs(tensor_annotations_dir, stubs_dir)
_generate_tensor_annotations_stubs(pytype_path, tensor_annotations_dir,
stubs_dir)
cmd = ([str(pytype_path), '--pythonpath', str(stubs_dir), code_filename])
if 'TENSOR_ANNOTATIONS_DEBUG' in os.environ:
input(f'About to run:\n{" ".join(cmd)}\nPress enter to continue: ')
proc = subprocess.run(
[str(pytype_path), '--pythonpath',
str(stubs_dir), code_filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=check, # Raise error if non-zero return code.
)
if 'TENSOR_ANNOTATIONS_DEBUG' in os.environ:
print('pytype stdout:')
print(proc.stdout.decode())
print('\npytype stderr:')
print(proc.stderr.decode())
return proc
def _link_stubs(tensor_annotations_dir: pathlib.Path, stubs_dir: pathlib.Path):
"""Link JAX/TensorFlow stubs to a place where pytype can find them."""
google_internal = False
if not google_internal:
jax_module = importlib.import_module('jax-stubs')
jax_stubs_dir = pathlib.Path(jax_module.__path__[0])
tf_module = importlib.import_module('tensorflow-stubs')
tf_stubs_dir = pathlib.Path(tf_module.__path__[0])
np_module = importlib.import_module('numpy-stubs')
np_stubs_dir = pathlib.Path(np_module.__path__[0])
for source, target in [
# Library functions, e.g. tf.reduce_sum.
(jax_stubs_dir, stubs_dir / 'jax'),
(tf_stubs_dir, stubs_dir / 'tensorflow'),
(np_stubs_dir, stubs_dir / 'numpy'),
# Tensor functions, e.g. Tensor.__add__.
(tensor_annotations_dir / 'jax.pyi',
stubs_dir / 'tensor_annotations' / 'jax.pyi'),
(tensor_annotations_dir / 'tensorflow.pyi',
stubs_dir / 'tensor_annotations' / 'tensorflow.pyi'),
(tensor_annotations_dir / 'numpy.pyi',
stubs_dir / 'tensor_annotations' / 'numpy.pyi')
]:
if not os.path.exists(source):
raise Exception(f"Stub file '{source}' does not exist")
target.parent.mkdir(parents=True, exist_ok=True)
target.symlink_to(source)
def _generate_tensor_annotations_stubs(pytype_path: pathlib.Path,
tensor_annotations_dir: pathlib.Path,
stubs_dir: pathlib.Path):
"""Generates stubs for tensor_annotations modules."""
path = tensor_annotations_dir / 'axes.py'
pyi_path = stubs_dir / 'tensor_annotations' / 'axes.pyi'
pyi_path.parent.mkdir(parents=True, exist_ok=True)
subprocess.run(
[
str(pytype_path),
'-o',
str(pyi_path),
str(path),
],
check=True, # Raise error if non-zero return code.
)
def assert_pytype_succeeds(code: str):
try:
process = run_pytype(code, check=True)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
print(e.stderr.decode())
raise e
def assert_pytype_fails(code: str):
try:
run_pytype(code, check=True)
except subprocess.CalledProcessError:
pass
else:
assert False, 'pytype did not raise error'
def pytype_infer_types(code: str) -> types.SimpleNamespace:
"""Runs pytype on `code`, returning inferred type of each variable.
Args:
code: Code to run pytype on. Should include assignment to one or more
variables, e.g. 'x = jnp.zeros((3, 1))'. We collect a list of
variables assigned to, and use pytype to infer type at the end of the
code.
Returns:
A SimpleNamespace whose attributes map from variable names to types.
Raises:
Exception: If types could not be parsed from pytype output.
For example, if `code` is
x = 'foo'
x = 2
y = 3.0
we return a SimpleNamespace `a` such that
a.x = int
a.y = float
"""
# This may contain duplicates, but that's fine.
var_names = re.findall(r'^([^: ]*).*=', code, re.MULTILINE)
var_names = [vn.strip() for vn in var_names] # Remove any newline prefixes
var_names = [vn for vn in var_names if not vn.startswith('#')] # Comments
for var in var_names:
code += f'\nreveal_type({var})'
process = run_pytype(code, check=False)
# We look at both stdout and stderr because pytype behaves differently
# depending on whether we run the Google-internal version or the normal
# version
lines = (process.stdout.decode() + process.stderr.decode()).split('\n')
return _parse_pytype_output(var_names, lines)
def _parse_pytype_output(var_names: List[str],
lines: List[str]) -> types.SimpleNamespace:
"""Parses the inferred type of each variable from pytype output."""
reveal_type_lines = [l for l in lines if '[reveal-type]' in l]
assert len(reveal_type_lines) == len(var_names)
types_dict = {}
for var, line in zip(var_names, reveal_type_lines):
match = re.search(r'File "[^"]*", line \d+, in [^:]*: '
r'(.*) \[reveal-type\]', line)
if match is None:
raise Exception(f"Couldn't parse type from line: {line}")
t = match.group(1)
# Simplifies e.g. `tensor_annotations.jax.Array0` to just `Array0`
t = re.sub(r'tensor_annotations.[^.]*\.', '', t)
types_dict[var] = t
return types.SimpleNamespace(**types_dict)
def _parse_mypy_output(var_names: List[str],
lines: List[str]) -> types.SimpleNamespace:
"""Parses the inferred type of each variable from Mypy output."""
reveal_type_lines = [l for l in lines if 'Revealed type is' in l]
assert len(reveal_type_lines) == len(var_names)
types_dict = {}
for var, line in zip(var_names, reveal_type_lines):
match = re.search("Revealed type is '(.*)'", line)
if match is None:
raise Exception(f"Couldn't parse type from line: {line}")
t = match.group(1)
# Simplifies e.g. `tensor_annotations.jax.Array0` to just `Array0`
t = re.sub(r'tensor_annotations.[^.]*\.', '', t)
# Remove the '*' that Mypy suffixes types with if the types were inferred
# using type variable substitution.
t = t.replace('*', '')
# Mypy will format axis types as e.g. `test.A1`. Get rid of the `test.`.
t = re.sub(r'test.(A\d+)', r'\1', t)
# Mypy will format unparameterised generics as e.g. `Tensor1[Any]`, but
# we wrote tests assuming they'd be formatted as just `Tensor1`, so get
# rid of the `[Any]`.
t = t.replace('[Any]', '')
types_dict[var] = t
return types.SimpleNamespace(**types_dict)
def pytype_infer_shapes(
code: str,
) -> types.SimpleNamespace:
# pylint: disable=g-doc-args,g-doc-return-or-yield,g-doc-exception
"""Runs pytype on `code`, returning inferred shape of array/tensor variables.
Note that shapes are inferred based on the axis labels: axis label 'A1' is
assumed to represent a dimension of size 1, 'A2' a dimension of size 2, and so
on. For example, we assume that a tensor of type Tensor2[int8, A1, A2] has
shape (1, 2).
For example, if `code` is
x: tf.Tensor2[float32, A3, A5] = tf.zeros((3, 5))
y = tf.transpose(x) # tf.Tensor2[float32, A5, A3]
we return a SimpleNamespace `a` such that
a.x = (3, 5)
a.y = (5, 3)
This helper function exists so that we can easily compare the shape of the
real outputs of shape-changing functions (e.g. tf.transpose) to the shape
inferred by the type checker using our stubs, to confirm that our stubs
are correct.
See `pytype_infer_types` for more info.
Args:
code: The code to run pytype on.
expect_dtype: Whether to expect a DType as the first type argument to the
generic. This is a temporary flag used for gradually adding support for
DTypes, and will be removed in the future.
"""
types_namespace = pytype_infer_types(code)
shapes_dict = {}
var_names = [d for d in dir(types_namespace)
if not d.startswith('_')]
for var in var_names:
var_type = getattr(types_namespace, var)
if var_type == 'Any':
shape = 'Any'
elif 'Array' not in var_type and 'Tensor' not in var_type:
continue
elif var_type.endswith('Array0') or var_type.endswith('Tensor0'):
shape = ()
else:
match = re.search(r'\[(.*)\]', var_type)
if match is None:
raise ValueError(f"Couldn't parse type '{var_type}'")
axis_types = match.group(1) # e.g. 'A1, A2'
axis_types_list = axis_types.split(', ')
unused_dtype, *shape_types = axis_types_list
shape_str_list = [t.replace('A', '') for t in shape_types]
shape = tuple(int(s) for s in shape_str_list)
shapes_dict[var] = shape
return types.SimpleNamespace(**shapes_dict)
class SaveCodeAsString:
r"""Saves code executed within the context manager.
Indentation is automatically adjusted such that the first line has no
indenation in the saved code. For example, if used as follows:
with SaveCodeString() as code_saver:
foo = 'bar'
f()
then `code_saver.code` would contain "foo = 'bar'\nf()".
"""
def __init__(self):
self._frame_where_entered = None
self._frame_where_exited = None
self.code: Optional[str] = None
def __enter__(self):
self._frame_where_entered = inspect.stack()[1]
return self
def __exit__(self, *_):
with open(self._frame_where_entered.filename, 'r') as f:
lines = f.readlines()
start_line_num = self._frame_where_entered.lineno - 1
start_line = lines[start_line_num]
start_indentation = len(start_line) - len(start_line.lstrip())
for line_num, line in enumerate(
lines[start_line_num + 1:],
start=start_line_num + 1,
):
if not line.strip():
continue
line_indentation = len(line) - len(line.lstrip())
if line_indentation <= start_indentation:
break
end_line_num = line_num
lines = lines[start_line_num + 1:end_line_num]
self.code = ''.join(lines).rstrip()
self.code = textwrap.dedent(self.code)
|
tensor_annotations-master
|
tensor_annotations/tests/utils.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements experimental routines.
Experimental protocol corresponds to the one discussed in
"Kuzborskij, I., Vernade, C., Gyorgy, A., & Szepesvári, C. (2021, March).
Confident off-policy evaluation and selection through self-normalized importance
weighting. In International Conference on Artificial Intelligence and Statistics
(pp. 640-648). PMLR.".
function run_single_experiment(...) defines an experiment performed on a
single dataset given multiple estimators over multiple data splits.
function run_experiment_suite(...) generalizes the above to multiple datasets.
function print_results prints the results returned by run_experiment_suite.
"""
from typing import Dict, List, Tuple, Any
from absl import logging
import numpy as np
import tabulate
import offpolicy_selection_eslb.data as data
import offpolicy_selection_eslb.estimators as est
import offpolicy_selection_eslb.policies as policies
def run_single_experiment(
estimators: List[est.Estimator],
openml_id: int,
n_trials: int,
behavior_policy_temperature: float,
behavior_faulty_actions: List[int],
target_policy_specs: List[Tuple[str, Dict[str, Any]]],
reward_noise_p: float,
) -> Tuple[np.ndarray, np.ndarray, int]:
"""Returns scores of an experiment on a single dataset for all estimators.
Evaluates all estimators on a single dataset for a given number of trials,
given description of the behavior policy and specifications of target
policies.
Args:
estimators: A list of objects of a base class Estimator (imported as est).
openml_id: OpenML dataset id (integer).
n_trials: Number of experimental trials (data splits).
behavior_policy_temperature: Positive float controlling the temperature of a
Softmax behavior policy.
behavior_faulty_actions: List of labels on which the behavior policy makes
mistakes.
target_policy_specs: Tuple of target policy specifications consisting of
two-element
tuples("<policy class name>", <dict of arguments to be passed to the
constructor>) e.g. ( ("SoftmaxGAPolicy", dict( step_size=0.1, steps=1000,
temperature=0.1, obj_type=policies.TrainedPolicyObjType.IW)), ).
reward_noise_p: Probability of a Bernoulli noise added to the reward.
Returns:
Tuple (all_test_rewards, all_reference_rewards, dataset_name).
Here all_test_rewards is np.array
of dimension (#estimators, #datasets, n_trials) where each entry is a
reward of a given estimator on a dataset at a particular trial (data split).
all_reference_rewards is is np.array
of dimension (#datasets, n_trials) where each entry is a
reward of a best estimator in a hindsight on a dataset at a particular
trial (data split).
dataset_name is a human-readable OpenML dataset name.
"""
np.random.seed(1)
dataset = data.Dataset(
openml_id,
standardize=True,
log_frac=0.50,
subsample=1,
reward_noise=reward_noise_p,
random_state=0)
all_test_rewards = np.zeros((len(estimators), dataset.n_test, n_trials))
all_reference_rewards = np.zeros((dataset.n_test, n_trials))
contexts, labels = dataset.contexts_train, dataset.labels_train
test_contexts, test_labels = dataset.contexts_test, dataset.labels_test
action_set = dataset.get_action_set()
behavior_policy = policies.SoftmaxDataPolicy(
train_contexts=contexts,
train_labels=labels,
test_contexts=test_contexts,
test_labels=test_labels,
action_set=action_set,
temperature=behavior_policy_temperature,
faulty_actions=behavior_faulty_actions)
for i_trial in range(n_trials):
logging.info(
"\u001b[32mrun_single_experiment:: trial = %d/%d ... \u001b[0m",
i_trial + 1, n_trials)
np.random.seed(i_trial)
actions, _ = behavior_policy.query(contexts)
behavior_probs = behavior_policy.get_probs_by_actions(contexts, actions)
rewards = data.get_reward(actions, labels, reward_noise_p)
target_policies = []
for (t_pol_name, t_pol_params) in target_policy_specs:
if t_pol_name == "SoftmaxDataPolicy":
t_pol = policies.SoftmaxDataPolicy(
train_contexts=contexts,
train_labels=labels,
test_contexts=test_contexts,
test_labels=test_labels,
action_set=action_set,
**t_pol_params)
elif t_pol_name == "SoftmaxGAPolicy":
t_pol = policies.SoftmaxGAPolicy(action_set=action_set, **t_pol_params)
logging.debug("run_single_experiment:: training %s", str(t_pol))
t_pol.train(contexts, actions, rewards, behavior_probs)
target_policies.append(t_pol)
test_rewards, _, reference_test_rewards = est.evaluate_estimators(
contexts, actions, rewards, behavior_policy, target_policies,
estimators, dataset)
all_test_rewards[:, :, i_trial] = test_rewards
all_reference_rewards[:, i_trial] = reference_test_rewards
return all_test_rewards, all_reference_rewards, dataset.name
def run_experiment_suite(
list_data_ids: List[int],
n_trials: int,
behavior_policy_temperature: float,
behavior_faulty_actions: List[int],
target_policy_specs: List[Tuple[str, Dict[str, Any]]],
reward_noise_p: float,
estimators: List[est.Estimator],
):
"""Returns results of an experimental suite.
Evaluates all estimators on all datasets for a given number of trials,
given description of the behavior policy and specifications of target
policies.
Args:
list_data_ids: List of OpenML dataset IDs.
n_trials: Number of experimental trials (data splits).
behavior_policy_temperature: Positive float controlling the temperature of a
behavior Softmax policy.
behavior_faulty_actions: List of labels on which the behavior policy makes
mistakes.
target_policy_specs: Tuple of target policy specifications consisting of
two-element tuples ("<policy class name>", <dict of arguments to
be passed to the constructor>) e.g.
("SoftmaxGAPolicy",
dict(step_size=0.1, steps=1000, temperature=0.1,
obj_type=policies.TrainedPolicyObjType.IW))
reward_noise_p: Probability of a Bernoulli noise added to the reward.
estimators: List of bjects of a base class Estimator.
Returns: A tuple (mean_test_rewards, std_test_rewards, mean_reference_rewards,
std_reference_rewards, dataset_names). Here mean_test_rewards and
std_test_rewards are np.array's of dimension (#estimators, #datasets).
mean_test_rewards stand for the average over data splits.
mean_reference_rewards is np.array of dimension #datasets, and stands for
the average reward of the best policy in a hindsight, over data splits.
std_* stands for the standard deviation. dataset_names stands for
human-readable dataset names.
"""
mean_test_rewards = np.zeros((len(estimators), len(list_data_ids)))
std_test_rewards = np.zeros((len(estimators), len(list_data_ids)))
mean_reference_rewards = np.zeros(len(list_data_ids))
std_reference_rewards = np.zeros(len(list_data_ids))
dataset_names = []
for data_id_i, data_id in enumerate(list_data_ids):
logging.info("\u001b[32mrun_experiment_suite:: dataset = %d\u001b[0m",
data_id)
(test_rewards_for_dataset, reference_rewards_for_dataset,
dataset_name) = run_single_experiment(
estimators=estimators,
openml_id=data_id,
n_trials=n_trials,
behavior_policy_temperature=behavior_policy_temperature,
behavior_faulty_actions=behavior_faulty_actions,
target_policy_specs=target_policy_specs,
reward_noise_p=reward_noise_p)
mean_test_rewards[:, data_id_i] = np.array([
test_rewards_for_dataset[i, :, :].mean()
for i in range(test_rewards_for_dataset.shape[0])
])
std_test_rewards[:, data_id_i] = np.array([
test_rewards_for_dataset[i, :, :].std(axis=0).mean()
for i in range(test_rewards_for_dataset.shape[0])
])
mean_reference_rewards[data_id_i] = np.nanmean(
reference_rewards_for_dataset)
std_reference_rewards[data_id_i] = np.nanstd(reference_rewards_for_dataset)
dataset_names.append(dataset_name)
return (mean_test_rewards, std_test_rewards, mean_reference_rewards,
std_reference_rewards, dataset_names)
def print_results(
estimators: List[est.Estimator],
dataset_names: List[str],
mean_test_rewards: np.ndarray,
std_test_rewards: np.ndarray,
mean_reference_rewards: np.ndarray,
std_reference_rewards: np.ndarray,
table_format: str = "psql",
):
"""Prints results of run_experiment_suite(...) in a pretty table.
Printing routines are implemented by the tabulate package.
Args:
estimators: A list of n_est objects of a base class Estimator.
dataset_names: A list of strings, names of the corresponding ids.
mean_test_rewards: Returned by run_experiment_suite.
std_test_rewards: Returned by run_experiment_suite.
mean_reference_rewards: Returned by run_experiment_suite.
std_reference_rewards: Returned by run_experiment_suite.
table_format: Parameter tablefmt of tabulate(...).
"""
headers = [r"Estimator \ Dataset"] + dataset_names
rows = []
for (i, est_) in enumerate(estimators):
rows.append([est_.get_abbrev()] + list(
map(lambda x: "%.3f ± %.3f" % x,
zip(mean_test_rewards[i, :], std_test_rewards[i, :]))))
rows.append(["Best policy on the test set"] + list(
map(lambda x: "%.3f ± %.3f" % x,
zip(mean_reference_rewards, std_reference_rewards))))
print(tabulate.tabulate(rows, headers=headers, tablefmt=table_format))
|
offpolicy_selection_eslb-main
|
experiment.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiple sampling from simplex tool."""
import numpy as np
def sample_from_simplices_m_times(p: np.ndarray, m: int) -> np.ndarray:
"""Samples from each of n probability simplices for m times.
Args:
p: n-times-K matrix where each row describes a probability simplex
m: number of times to sample
Returns:
n-times-m matrix of indices of simplex corners.
"""
axis = 1
r = np.expand_dims(np.random.rand(p.shape[1 - axis], m), axis=axis)
p_ = np.expand_dims(p.cumsum(axis=axis), axis=2)
return (np.repeat(p_, m, axis=2) > r).argmax(axis=1)
|
offpolicy_selection_eslb-main
|
utils.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements several value estimator for contextual bandit off-policy problem.
All estimators are described in
"Kuzborskij, I., Vernade, C., Gyorgy, A., & Szepesvári, C. (2021, March).
Confident off-policy evaluation and selection through self-normalized importance
weighting. In International Conference on Artificial Intelligence and Statistics
(pp. 640-648). PMLR.".
In the following we occasionally refer to the statements in the paper
(e.g. Theorem 1, Proposition 1).
class ESLB implements an Efron-Stein high probability bound for off-policy
evaluation (Theorem 1 and Algorithm 1).
class IWEstimator implements the standard importance weighted estimator (IW).
class SNIWEstimator implements a self-normalized version of IW.
class IWLambdaEmpBernsteinEstimator implements a high probability empirical
Bernstein bound for λ-corrected IW (the estimator is stabilized by adding λ
to the denominator) with appropriate tuning of λ (see Proposition 1).
"""
import abc
import math
import enum
from typing import List
from absl import logging
import numpy as np
from offpolicy_selection_eslb import data
from offpolicy_selection_eslb import policies
from offpolicy_selection_eslb import utils
class Estimator(abc.ABC):
"""Abstract class for a value estimator."""
@abc.abstractmethod
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes an estimate.
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
Must return a Dict with "estimate" entry present.
"""
@abc.abstractmethod
def get_name(self) -> str:
"""Returns a the name of the estimator.
"""
@abc.abstractmethod
def get_abbrev(self) -> str:
"""Returns a shorter version of the name returned by get_name.
"""
class ESLBBiasType(enum.Enum):
"""Bias control type of ESLB estimator.
ESLBBiasType.MultOneHot = Multiplicative bias (only for `one-hot' rewards).
ESLBBiasType.Bernstein = Bernstein bias (for rewards in [0,1], looser than above).
"""
MultOneHot = "MultOneHot"
Bernstein = "Bernstein"
def __str__(self):
return str(self.value)
class ESLB(Estimator):
"""Implements a Semi-Empirical Efron-Stein bound for the SNIW (Self-normalized Importance Weighted estimator).
Attributes:
delta: Error probability in (0,1).
n_iterations: Number of Monte-Carlo simulation iterations for approximating
a multiplicative bias and a variance proxy.
n_batch_size: Monte-Carlo simulation batch size.
bias_type: type of bias control to use (see ESLBBiasType).
"""
def __init__(
self,
delta: float,
n_iterations: int,
n_batch_size: int,
bias_type: ESLBBiasType
):
"""Constructs an estimator.
The estimate holds with probability 1-delta.
Args:
delta: delta: Error probability in (0,1) for a confidence interval.
n_iterations: Monte-Carlo simulation iterations.
n_batch_size: Monte-Carlo simulation batch size.
bias_type: type of bias control to use.
"""
self.delta = delta
self.n_iterations = n_iterations
self.n_batch_size = n_batch_size
self.bias_type = bias_type
def get_name(self):
"""Returns the long name of the estimator."""
return "Semi-Empirical Efron-Stein bound for the Self-normalized Estimator"
def get_abbrev(self):
"""Returns the short name of the estimator."""
return "ESLB"
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes Efron-Stein lower bound of Theorem 1 as described in Algorithm 1.
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
A dictionary with 8 entries:
lower_bound: Corresponds to the actual lower bound.
estimate: Same as lower_bound (required by select_policy(...)).
est_value: Empirical value.
mult_bias: Multiplicative bias.
concentration_of_contexts: Hoeffding term, concentration of contexts.
var_proxy: Variance proxy.
expected_variance_proxy: Estimated expected counterpart.
"""
conf = math.log(2.0 / self.delta)
n = len(actions)
ix_1_n = np.arange(n)
# Importance weights
weights = t_probs[ix_1_n, actions] / b_probs[ix_1_n, actions]
weights_cumsum = weights.cumsum()
weights_cumsum = np.repeat(
np.expand_dims(weights_cumsum, axis=1), self.n_batch_size, axis=1)
weights_repeated = np.repeat(
np.expand_dims(weights, axis=1), self.n_batch_size, axis=1)
weight_table = t_probs / b_probs
var_proxy_unsumed = np.zeros((n,))
expected_var_proxy_unsumed = np.zeros((n,))
loo_expected_recip_weights = 0.0
are_rewards_binary = ((rewards==0) | (rewards==1)).all()
if self.bias_type == ESLBBiasType.MultOneHot and not are_rewards_binary:
raise Exception("""bias_type=ESLBBiasType.MultOneHot only supports one-hot rewards.
Consider using bias_type=ESLBBiasType.Bernstein""")
logging.debug(
"ESLB:: Running Monte-Carlo estimation of the variance proxy and bias")
logging.debug("ESLB:: iterations = %d, batch size = %d", self.n_iterations,
self.n_batch_size)
for i in range(self.n_iterations):
actions_sampled = utils.sample_from_simplices_m_times(
b_probs, self.n_batch_size)
weights_sampled = weight_table[ix_1_n, actions_sampled.T].T
weights_sampled_cumsum = weights_sampled[::-1, :].cumsum(axis=0)[::-1, :]
# Hybrid sums: sums of empirical and sampled weights
weights_hybrid_sums = np.copy(weights_cumsum)
weights_hybrid_sums[:-1, :] += weights_sampled_cumsum[1:, :]
# Computing variance proxy
weights_hybrid_sums_replace_k = weights_hybrid_sums - weights_repeated + weights_sampled
sn_weights = weights_repeated / weights_hybrid_sums
sn_weights_prime = weights_sampled / weights_hybrid_sums_replace_k
var_proxy_t = (sn_weights + sn_weights_prime)**2
var_proxy_new_item = var_proxy_t.mean(axis=1)
var_proxy_unsumed += (var_proxy_new_item - var_proxy_unsumed) / (i + 1)
actions_sampled_for_expected_var = utils.sample_from_simplices_m_times(
b_probs, self.n_batch_size)
weights_sampled_for_expected_var = weight_table[
ix_1_n, actions_sampled_for_expected_var.T].T
expected_var_proxy_new_item = (
(weights_sampled_for_expected_var /
weights_sampled_for_expected_var.sum(axis=0))**2).mean(axis=1)
expected_var_proxy_unsumed += (expected_var_proxy_new_item -
expected_var_proxy_unsumed) / (i + 1)
if self.bias_type == ESLBBiasType.MultOneHot:
# Computing bias (loo = leave-one-out)
# Rewards are `one-hot'
actions_sampled_for_bias = utils.sample_from_simplices_m_times(
b_probs, self.n_batch_size)
weights_sampled_for_bias = weight_table[ix_1_n,
actions_sampled_for_bias.T].T
loo_sum_weights = np.outer(
np.ones((n,)),
np.sum(weights_sampled_for_bias, axis=0)
) - weights_sampled_for_bias
loo_expected_recip_weights += (1 / np.min(loo_sum_weights, axis=0)).mean()
var_proxy = var_proxy_unsumed.sum()
expected_var_proxy = expected_var_proxy_unsumed.sum()
if self.bias_type == ESLBBiasType.MultOneHot:
loo_expected_recip_weights /= self.n_iterations
eff_sample_size = 1.0 / loo_expected_recip_weights
mult_bias = min(1.0, eff_sample_size / n)
add_bias = 0
elif self.bias_type == ESLBBiasType.Bernstein:
# Computing Bernstein bias control (based on lower tail Bernstein's inequality)
expected_sum_weights_sq = (t_probs**2 / b_probs).sum()
bias_x = math.log(n) / 2
mult_bias = 1 - math.sqrt(2 * expected_sum_weights_sq * bias_x) / n
mult_bias = max(0, mult_bias)
add_bias = math.exp(-bias_x)
concentration = math.sqrt(
2.0 * (var_proxy + expected_var_proxy) *
(conf + 0.5 * math.log(1 + var_proxy / expected_var_proxy)))
concentration_of_contexts = math.sqrt(conf / (2 * n))
est_value = weights.dot(rewards) / weights.sum()
lower_bound = mult_bias * (est_value
- concentration
- add_bias) - concentration_of_contexts
return dict(
estimate=max(0, lower_bound),
lower_bound=max(0, lower_bound),
est_value=est_value,
concentration=concentration,
mult_bias=mult_bias,
concentration_of_contexts=concentration_of_contexts,
var_proxy=var_proxy,
expected_var_proxy=expected_var_proxy)
class IWEstimator(Estimator):
"""Implements an importance-weighted estimator of the value."""
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes an importance-weighted (IW) estimate.
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
A dictionary with 1 entry:
estimate: Importance-weighted estimate (required by select_policy(...)).
"""
n = len(actions)
ix_1_n = np.arange(n)
# Importance weights
weights = t_probs[ix_1_n, actions] / b_probs[ix_1_n, actions]
estimate = rewards.dot(weights) / n
return dict(estimate=estimate)
def get_name(self):
"""Returns a long name of an estimator."""
return "Importance-weighted estimator"
def get_abbrev(self):
"""Returns a short name of an estimator."""
return "IW"
class SNIWEstimator(Estimator):
"""Implements a self-normalized importance-weighted estimator of the value."""
def get_name(self):
"""Returns a long name of an estimator."""
return "Self-normalized importance-weighted estimator"
def get_abbrev(self):
"""Returns a short name of an estimator."""
return "SNIW"
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes a self-normalized importance-weighted (SNIW) estimate.
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
A dictionary with 1 entry:
estimate: SNIW estimate (required by select_policy(...)).
"""
n = len(actions)
ix_1_n = np.arange(n)
# Importance weights
weights = t_probs[ix_1_n, actions] / b_probs[ix_1_n, actions]
estimate = rewards.dot(weights) / weights.sum()
return dict(estimate=estimate)
class IWLambdaEmpBernsteinEstimator(Estimator):
"""Implements an empirical Bernstein confidence bound for λ-corrected IW.
λ-corrected importance-weighted (IW) estimator is defined w.r.t. weights of a
form π_target(A|X) / (π_behavior(A|X) + λ), where λ=1/sqrt(n),
and the choice of λ ensures asymptotic convergence of the confidence bound
(see accompaying paper for details).
Attributes:
delta: Error probability in (0,1).
"""
def __init__(self, delta: float):
"""Constructs an estimator.
The estimate holds with probability 1-delta.
Args:
delta: Error probability in (0,1).
"""
self.delta = delta
def get_name(self):
"""Returns a long name of an estimator."""
return ("Empirical Bernstein bound for λ-corrected importance-weighted "
"estimator (λ=1/sqrt(n))")
def get_abbrev(self):
"""Returns a short name of an estimator."""
return "Emp. Bernstein for λ-IW"
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes Empirical Bernstein λ-IW estimate.
Computes an estimate according to the Empirical Bernstein bound for
λ-corrected importance-weighted (see Proposition 1).
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
A dictionary with 8 entries:
lower_bound: Corresponds to the actual lower bound.
estimate: Same as lower_bound (required by select_policy(...)).
est_value: Empirical value.
concentration: Concentration term.
bias: Bias term.
concentration_of_contexts: Hoeffding term, concentration of contexts.
est_var: sample variance of the estimator.
"""
n = len(actions)
ix_1_n = np.arange(n)
conf = math.log(3.0 / self.delta)
lambda_corr = 1.0 / math.sqrt(n)
# Importance weights with lambda correction
weights = t_probs[ix_1_n, actions] / (
b_probs[ix_1_n, actions] + lambda_corr)
v_estimates = weights * rewards
est_value = np.mean(v_estimates)
est_var = np.var(v_estimates)
bias = 0.0
# Computing the bias term
for i_feature in range(n):
for k_action in range(b_probs.shape[1]):
t_prob_context_k = t_probs[i_feature, k_action]
b_prob_context_k = b_probs[i_feature, k_action]
bias += t_prob_context_k * abs(b_prob_context_k /
(b_prob_context_k + lambda_corr) - 1.0)
bias /= n
concentration = math.sqrt(
(2 * conf / n) * est_var) + (7 * conf) / (3 * lambda_corr * (n - 1))
concentration_of_contexts = math.sqrt(2 * conf / n)
lower_bound = est_value - concentration - bias - concentration_of_contexts
return dict(
estimate=max(0, lower_bound),
lower_bound=max(0, lower_bound),
est_value=est_value,
concentration=concentration,
bias=bias,
concentration_of_contexts=concentration_of_contexts,
est_var=est_var)
class SNIWChebyshevEstimator(Estimator):
"""Implements Chebyshev bound for SNIW estimator.
Attributes:
delta: Error probability in (0,1).
"""
def __init__(self, delta: float):
"""Constructs an estimator.
The estimate holds with probability 1-delta.
Args:
delta: Error probability in (0,1).
"""
self.delta = delta
def __call__(
self,
t_probs: np.ndarray,
b_probs: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
):
"""Computes Chebyshev bound for SNIW estimate.
Computes an estimate according to the Chebyshev bound for
the self-normalized importance-weighted estimator (see Proposition 1).
Here n is a sample size, while K is a number actions.
Args:
t_probs: n-times-K matrix, where i-th row corresponds to π_t(. | X_i)
(target probabilities under the target policy).
b_probs: n-times-K matrix, where i-th row corresponds to π_b(. | X_i)
(target probabilities under the behavior policy).
actions: n-sized vector of actions.
rewards: n-sized reward vector.
Returns:
A dictionary with 8 entries:
lower_bound: Corresponds to the actual lower bound;
estimate: Same as lower_bound (required by select_policy(...))
est_value: Empirical value;
concentration: Concentration term;
mult_bias: Multiplicative bias term;
concentration_of_contexts: Hoeffding term, concentration of contexts;
est_var: Sample variance of the estimator;
"""
n = len(actions)
ix_1_n = np.arange(n)
conf = 3.0 / self.delta
ln_conf = math.log(3.0 / self.delta)
# Importance weights
weights = t_probs[ix_1_n, actions] / b_probs[ix_1_n, actions]
t_probs_all_actions = t_probs[ix_1_n, :]
b_probs_all_actions = b_probs[ix_1_n, :]
weights = weights.squeeze()
est_value = rewards.dot(weights) / weights.sum()
expected_sum_weights_sq = (t_probs_all_actions**2 /
b_probs_all_actions).sum()
eff_sample_size = max(
n - math.sqrt(2.0 * ln_conf * expected_sum_weights_sq), 0)
if eff_sample_size > 0:
est_var = expected_sum_weights_sq / eff_sample_size**2
concentration = math.sqrt(conf * est_var)
concentration_of_contexts = math.sqrt((2 * ln_conf) / n)
mult_bias = eff_sample_size / n
lower_bound = mult_bias * (est_value -
concentration) - concentration_of_contexts
else:
est_var = np.nan
est_var = np.nan
concentration = np.nan
concentration_of_contexts = np.nan
mult_bias = np.nan
lower_bound = 0.0
return dict(
estimate=max(0, lower_bound),
lower_bound=max(0, lower_bound),
est_value=est_value,
concentration=concentration,
mult_bias=mult_bias,
concentration_of_contexts=concentration_of_contexts,
est_var=est_var)
def get_name(self):
"""Returns a long name of an estimator."""
return "Chebyshev bound for self-normalized importance-weighted estimator"
def get_abbrev(self):
"""Returns a short name of an estimator."""
return "Cheb-SNIW"
def select_policy(
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_policy: policies.Policy,
t_policies: List[policies.Policy],
estimator: Estimator,
):
"""Selects a policy given an estimator.
Args:
contexts: A n x d matrix of n context vectors.
actions: A n-vector of actions.
rewards: A n-vector of rewards.
b_policy: Behavior policy implementing get_probs(...) method (see
SoftmaxDataPolicy in policies.py).
t_policies: A list of objects of implementing get_probs(...) method
(see SoftmaxGAPolicy).
estimator: An object of a base class Estimator.
Returns:
A tuple (estimate, policy) with the highest estimate.
"""
estimates_and_policies = []
b_probs = b_policy.get_probs(contexts)
for pol in t_policies:
t_probs = pol.get_probs(contexts)
result_dict = estimator(
t_probs=t_probs, b_probs=b_probs, actions=actions, rewards=rewards)
estimates_and_policies.append((result_dict["estimate"], pol))
ordered_estimates_and_policies = sorted(
estimates_and_policies, key=lambda x: x[0])
return ordered_estimates_and_policies[-1]
def evaluate_estimators(
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_policy: policies.Policy,
t_policies: List[policies.Policy],
estimators: List[Estimator],
dataset: data.Dataset,
):
"""Evaluates multiple estimators based on their ability to select a policy.
Args:
contexts: A n x d matrix of n context vectors.
actions: A n-vector of actions.
rewards: A n-vector of rewards.
b_policy: Behavior policy implementing get_probs(...) method (see
SoftmaxDataPolicy).
t_policies: A list of n_pol objects of implementing get_probs(...) method
(see SoftmaxGAPolicy).
estimators: A list of n_est objects of a base class Estimator.
dataset: Object of the class Dataset.
Returns:
A tuple with three elements: (test_rewards, winners, reference_test_rewards)
where test_rewards is a (n_est x n_test) matrix of test rewards such
that n_test is a test sample size; winners a list of size n_est of
high-scoring policies according to each estimator; reference_test_rewards
is a n_test-vector of highest-scoring policy on a test set in hindsight.
"""
winners = [] # winner policies of each estimator
test_rewards = np.zeros((len(estimators), dataset.n_test))
for (est_i, est) in enumerate(estimators):
est_winner, pol_winner = select_policy(contexts, actions, rewards, b_policy,
t_policies, est)
winners.append(pol_winner)
if est_winner > 0:
_, _, pol_winner_test_rewards, _ = dataset.get_test(pol_winner)
test_rewards[est_i, :] = pol_winner_test_rewards
else:
test_rewards[est_i, :] = np.nan
logging.debug("evaluate_estimators:: est '%s' didn't score anything.",
est.get_abbrev())
# Getting test reward of the best policy (as a reference)
reference_test_rewards = []
for pol in t_policies:
_, _, reference_test_rewards_for_pol, _ = dataset.get_test(pol)
reference_test_rewards.append(reference_test_rewards_for_pol)
reference_test_rewards = sorted(reference_test_rewards, key=np.mean)[-1]
return test_rewards, winners, reference_test_rewards
def get_estimators(
delta,
eslb_iter: int,
eslb_batch_size: int,
eslb_bias_type: ESLBBiasType
):
"""Constructs estimators to be used in the benchmark.
Args:
delta: Error probability in (0,1).
eslb_iter: Monte-Carlo simulation iterations for ESLB estimator.
eslb_batch_size: Monte-Carlo simulation batch size for ESLB estimator.
eslb_bias_type: type of bias control to use (see ESLBBiasType).
Returns:
A list of dictionaries containing at least one entry "estimate" (key).
"""
estimators = [
IWEstimator(),
SNIWEstimator(),
SNIWChebyshevEstimator(delta=delta),
IWLambdaEmpBernsteinEstimator(delta=delta),
ESLB(delta=delta, n_iterations=eslb_iter, n_batch_size=eslb_batch_size,
bias_type=eslb_bias_type),
]
return estimators
|
offpolicy_selection_eslb-main
|
estimators.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Policies are functions mapping contexts to actions.
Policies are described in
"Kuzborskij, I., Vernade, C., Gyorgy, A., & Szepesvári, C. (2021, March).
Confident off-policy evaluation and selection through self-normalized importance
weighting. In International Conference on Artificial Intelligence and Statistics
(pp. 640-648). PMLR.".
class SoftmaxDataPolicy is a mock-up policy which can hold either training
sample or a testing sample (each of which consists of context and labels).
When either set of contexts is passed to the policy (get_probs(...))
it returns action probabilities associated with those contexts.
Note that this is a mock-up policy, so only one of the two samples is supported.
class SoftmaxGAPolicy implements a softmax policy with linear parameterized
potential, where parameters are fitted by the gradient ascent maximizing
either importance weighted or self-normalized importance weighted estimator.
"""
import abc
import enum
import math
from typing import Sequence, NamedTuple
from absl import logging
import jax
from jax import numpy as jnp
from jax import scipy as jsc
import numpy as np
import scipy
import sklearn.preprocessing as skl_prep
from offpolicy_selection_eslb.utils import sample_from_simplices_m_times
class Query(NamedTuple):
"""Actions generated by a (randomized) policy when given a set of contexts.
Attributes:
actions: n-times-1 Array -- chosen (sampled) actions
probabilities: n-times-1 Array -- corresponding probabilities
"""
actions: np.ndarray
probabilities: np.ndarray
def log_vhat_importance_weighting(
parameters: np.ndarray,
temperature: float,
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_prob: np.ndarray,
) -> np.ndarray:
"""Returns the log of importance weighted estimator.
Returns the log of importance weighted estimator where each
importance weight is computed w.r.t. the softmax target policy defined
w.r.t. a linear model as defined in the description of a class.
Args:
parameters: Parameters of the linear model of a target policy.
temperature: Positive float controlling the temperature of a Softmax
policy.
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers).
rewards: Rewards (float).
b_prob: Probabilities corresponding to (context, action) pairs
according to the behavior policy.
Returns: The logarithm of importance-weighted estimate.
"""
n, _ = contexts.shape
v = (1.0 / temperature) * contexts.dot(parameters)
pot = (1.0 / temperature) * (contexts *
parameters[:, actions].T).sum(axis=1)
a = jnp.log(rewards / (n * b_prob)) - jsc.special.logsumexp(v, axis=1)
rs = jsc.special.logsumexp(pot + a, axis=0)
return rs
def log_vhat_sn_importance_weighting(
parameters: np.ndarray,
temperature: float,
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_prob: np.ndarray,
) -> np.ndarray:
"""Returns a log of self-normalized (SN) importance weighted estimator.
Returns a log of (SN) importance weighted estimator where each
importance weight is computed w.r.t. the softmax target policy defined
w.r.t. a linear model as defined in the description of a class.
Args:
parameters: Parameters of the linear model of a target policy.
temperature: Positive float controlling the temperature of a Softmax
policy.
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers).
rewards: Rewards (float).
b_prob: Probabilities corresponding to (context, action) pairs
according to the behavior policy.
Returns: The logarithm of SN importance-weighted estimate.
"""
v = (1.0 / temperature) * contexts.dot(parameters)
pot = (1.0 / temperature) * (contexts *
parameters[:, actions].T).sum(axis=1)
a = jnp.log(rewards / b_prob) - jsc.special.logsumexp(v, axis=1)
ln_numer = jsc.special.logsumexp(pot + a, axis=0)
a = -jnp.log(b_prob) - jsc.special.logsumexp(v, axis=1)
ln_denom = jsc.special.logsumexp(pot + a, axis=0)
return ln_numer - ln_denom
class Policy(abc.ABC):
"""A Policy samples actions given contexts.
"""
@abc.abstractmethod
def query(self, contexts: np.ndarray) -> Query:
"""Returns actions and their probs sampled by Policy given the contexts.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: A Tuple of arrays of actions (int) and corresponding probs (float)
"""
@abc.abstractmethod
def get_probs(self, contexts: np.ndarray) -> np.ndarray:
"""Returns probability distribution over actions for each context.
The softmax policy is defined as a probability vector
exp(alt_bin_labels / temp) / sum(exp(alt_bin_labels / temp))
where temp is a temperature of a policy and
alt_bin_labels is a binary encoding of labels altered by alter_labels(...)
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: Array of probabilities according to the policy, where K
is the number of actions (size n-times-K).
Raises:
NotImplementedError: when contexts is not training or testing contexts
"""
class TrainedPolicyObjType(enum.Enum):
"""Softmax gradient ascent fitted policy types with Objective function.
TrainedPolicyObjType.IW = importance-weighted estimator.
TrainedPolicyObjType.SNIW = self-normalized importance-weighted.
"""
IW = "IW"
SNIW = "SNIW"
def __str__(self):
return str(self.value)
class SoftmaxDataPolicy(Policy):
"""Memorization policy (using true labels).
This object can hold either training sample or a testing sample
(each of which consists of context and labels).
When either set of contexts is passed to the policy (get_probs(...))
it returns action probabilities associated with those contexts.
Note that this is a mock-up policy, so only one of the two samples is
supported.
Attributes:
action_set: A list of unique integer actions.
train_contexts: A n-times-d array of training contexts(d=data dim., n=sample
size).
train_labels: A n-array of training labels.
test_contexts: A n-times-d array of training contexts(d=data dim., n'=sample
size).
test_labels: A n'-array of training labels.
temperature: A positive float controlling the temp. of a Softmax policy.
faulty_actions: A list of labels where the behavior policy makes mistakes.
rand: Random state of numpy.random.RandomState type.
"""
def __init__(
self,
train_contexts: np.ndarray,
train_labels: np.ndarray,
test_contexts: np.ndarray,
test_labels: np.ndarray,
action_set: Sequence[int],
temperature: float,
faulty_actions: Sequence[int],
):
"""Constructs a Policy.
Args:
train_contexts: Array of training contexts (n-times-d, d=data dim.,
n=sample size).
train_labels: Array of training labels (size n).
test_contexts: Array of training contexts (n-times-d, d=data dim.,
n'=sample size).
test_labels: Array of training labels 9size n).
action_set: List of unique integer actions.
temperature: Positive float controlling the temperature of a Softmax
policy.
faulty_actions: List of labels on which the behavior policy makes
mistakes.
"""
self.action_set = action_set
self.train_contexts = train_contexts
self.train_labels = train_labels
self.test_contexts = test_contexts
self.test_labels = test_labels
self.temperature = temperature
self.faulty_actions = set(faulty_actions)
self.reset_noise(0)
def reset_noise(self, seed: int):
"""Resets a random state given a seed.
Args:
seed: Integer seed for random state
"""
self.rand = np.random.RandomState(seed)
def alter_labels(self, labels: np.ndarray):
"""Returns altered labels according to the self.faulty_actions spec.
Labels are altered by shifting each label contained in self.faulty_action
to one forward (or to 0 if we have an overflow).
Args:
labels: Vector of labels (size 1 by n=sample size)
Returns:
A vector of the same size with all entries in self.faulty_actions shifted.
"""
num_actions = len(self.action_set)
fault = np.zeros(len(labels))
for i in range(len(labels)):
if labels[i] in self.faulty_actions:
fault[i] = 1
return (labels + fault) % num_actions # faulty actions get shifted by one
def get_probs(self, contexts: np.ndarray):
"""Returns probability distribution over actions for given contexts.
The softmax policy is defined as a probability vector
exp(alt_bin_labels / temp) / sum(exp(alt_bin_labels / temp))
where temp is a temperature of a policy and
alt_bin_labels is a binary encoding of labels altered by alter_labels(...)
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: Array of probabilities according to the policy, where K
is the number of actions (size n-times-K).
Raises:
NotImplementedError: when contexts is not training or testing contexts
"""
# predictions get altered by internal noise :
if contexts is self.train_contexts:
alt_labels = self.alter_labels(self.train_labels)
elif contexts is self.test_contexts:
alt_labels = self.alter_labels(self.test_labels)
else:
raise NotImplementedError
bin_alt_labels = skl_prep.label_binarize(
alt_labels, classes=self.action_set)
v = np.exp(bin_alt_labels / self.temperature)
v = v / v.sum(axis=1)[:, np.newaxis]
return v
def get_probs_by_actions(self, contexts: np.ndarray, actions: np.ndarray):
"""Returns probabilities for each given action in each given context.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
actions: Array of actions (integers) for which probabilies are
requested.
Returns: Probabilities according to the policy.
"""
n = len(actions)
all_probs = self.get_probs(contexts)
probs = all_probs[np.arange(n), actions]
return probs
def query(self, contexts: np.ndarray) -> Query:
"""Returns actions and their probs sampled for the given contexts.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: A Tuple of arrays of actions (int) and corresponding probs (float)
"""
probs = self.get_probs(contexts)
actions = [np.random.choice(self.action_set, p=pi) for pi in probs]
n = probs.shape[0]
probs_by_actions = probs[:n, actions]
return Query(np.array(actions), np.array(probs_by_actions))
def __str__(self):
"""Returns a string representation of a policy with parametrization."""
return f"SoftmaxDataPolicy(τ={self.temperature}, fauly_actions=[{str(self.faulty_actions)}])"
class SoftmaxGAPolicy(Policy):
"""Softmax gradient ascent fitted policy.
This softmax policy is defined as a probability vector
x |-> exp(<W,x> / temp) / sum(exp(<W,x> / temp))
where temp is a temperature of a policy and
W is a K-times-d matrix of parameters (here K is a number of actions
and d is a context dimension).
Parameters W are fitted by the gradient ascent either w.r.t. the
importance-weighted estimator or its self-normalized version.
Attributes:
n_actions: Number of actions.
temperature: Positive float controlling the temp. of a Softmax policy.
steps: Number of gradient ascent steps for fitting the policy
step_size: step size of the gradient ascent for fitting the policy.
obj_type: Objective type, TrainedPolicyObjType.IW = importance-weighted
estimator TrainedPolicyObjType.SNIW = self-normalized importance-weighted
estimator.
parameters: Parameters of the linear model in the softmax policy
ln_obj: Reference to a static method implementing the
log-objective function.
"""
def __init__(
self,
action_set: Sequence[int],
temperature: float,
steps: int = 10000,
step_size: float = 1e-2,
obj_type: TrainedPolicyObjType = TrainedPolicyObjType.IW,
):
"""Constructs a Softmax Gradient Ascent Policy.
Args:
action_set: List of unique integer actions.
temperature: Positive float controlling the temperature of a Softmax
policy.
steps: Number of gradient ascent steps for fitting the policy.
step_size: Step size of the gradient ascent for fitting the policy.
obj_type: Objective type, TrainedPolicyObjType.IW = importance-weighted
estimator TrainedPolicyObjType.SNIW = self-normalized
importance-weighted estimator.
"""
self.n_actions = len(action_set)
self.temperature = temperature
self.steps = steps
self.step_size = step_size
self.parameters = None
self.obj_type = obj_type
if obj_type == TrainedPolicyObjType.IW:
self.ln_obj = log_vhat_importance_weighting
elif obj_type == TrainedPolicyObjType.SNIW:
self.ln_obj = log_vhat_sn_importance_weighting
else:
raise NotImplementedError
def train(
self,
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_prob: np.ndarray,
):
"""Fits the softmax policy according to the chosen objective.
Fits the softmax policy according to the objective chosen during
initialization. The gradient ascent is run for a fixed number of
steps and a step size (specified during initialization).
Gradient computation is done through autodiff jax library.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size)
actions: Actions (integers).
rewards: Rewards (float).
b_prob: Probabilities corresponding to (context, action) pairs
according to the behavior policy.
"""
contexts = jnp.array(contexts)
actions = jnp.array(actions)
rewards = jnp.array(rewards)
b_prob = jnp.array(b_prob)
_, d = contexts.shape
grad_v = jax.jit(jax.grad(self.ln_obj))
obj_params = (self.temperature, contexts, actions, rewards, b_prob)
logging.debug("%s(softmax): iter\t\temp_value ", self.obj_type)
logging.debug("%s(softmax): --------------------------------- ",
self.obj_type)
def update_step_ga(_, parameters: np.ndarray):
"""Returns updated parameters after a single step of gradient ascent.
Args:
_: gradient ascent step
parameters: Parameters to be updated.
Returns: Updated parameters.
"""
g = grad_v(parameters, *obj_params)
parameters += self.step_size * g
return parameters
parameters_init = np.zeros(shape=(d, self.n_actions))
parameters_init = jnp.array(parameters_init)
self.parameters = jax.lax.fori_loop(0, self.steps, update_step_ga,
parameters_init)
logging.debug("%s(softmax): %d\t\t%.2f ", self.obj_type, self.steps,
math.exp(self.ln_obj(self.parameters, *obj_params)))
def get_probs(self, contexts: np.ndarray):
"""Returns probability distribution over actions for the given contexts.
The softmax policy is defined as a probability vector
exp(<W,x> / temp) / sum(exp(<W,x> / temp))
where temp is a temperature of a policy and
W is a K-times-d matrix of parameters (here K is a number of actions
and d is a context dimension) fitted by gradient ascent.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
Returns: Array of probabilities according to the policy.
"""
return np.exp(self.get_logprobs(contexts))
def get_probs_by_actions(self, contexts, actions):
"""Returns probability for each given action in each given context.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers) for which probabilies are requested.
Returns: Probabilities according to the policy.
"""
return np.exp(self.get_logprobs_by_actions(contexts, actions))
def get_logprobs(self, contexts: np.ndarray):
"""Returns log-probabilities over actions for each given context.
The softmax policy is defined as a probability vector
log(exp(<W,x> / temp) / sum(exp(<W,x> / temp)))
where temp is a temperature of a policy and
W is a K-times-d matrix of parameters (here K is a number of actions
and d is a context dimension) fitted by gradient ascent.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
Returns: Array of log-probabilities according to the policy (n-times-K).
"""
v = (1.0 / self.temperature) * contexts.dot(self.parameters)
logprob = v - np.expand_dims(scipy.special.logsumexp(v, axis=1), axis=1)
return logprob
def get_logprobs_by_actions(self, contexts, actions):
"""Returns log-probabilities for each given action and context.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers) for which probabilies are requested.
Returns: Log-probabilities according to the policy.
"""
v = (1.0 / self.temperature) * contexts.dot(self.parameters)
pot = (1.0 / self.temperature) * (contexts *
self.parameters[:, actions].T).sum(axis=1)
logprob = pot - scipy.special.logsumexp(v, axis=1)
return logprob
def query(self, contexts: np.ndarray) -> Query:
"""Returns actions and their probs sampled by the policy given the contexts.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: Array integer actions and array of corresponding probabilities.
"""
n = len(contexts)
probs = np.zeros((self.n_actions, n))
actions = []
sample_probs = []
probs = self.get_probs(contexts)
actions = sample_from_simplices_m_times(p=probs, m=1).squeeze()
sample_probs = probs[:n, actions]
return Query(np.array(actions), np.array(sample_probs))
def query_many_times(self, contexts: np.ndarray, m_times: int):
"""Returns m_times actions sampled according to Policy for each context.
Samples actions m_times times efficiently.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
m_times: Number of times to repeat the query.
Returns: Array of integer actions (n-times-m_times) and n-array of
corresponding probabilities.
"""
n = len(contexts)
k = self.n_actions
probs = np.zeros((k, n))
actions = []
sample_probs = []
probs = self.get_probs(contexts)
actions = sample_from_simplices_m_times(probs, m_times)
sample_probs = probs[np.arange(n), actions.T].T
return np.array(actions), np.array(sample_probs)
def __str__(self):
"""Returns a string representation of a policy with parametrization."""
return ("Softmax (linear potential): %s max`d by GA (T=%s, eta=%s)" %
(self.obj_type, self.steps, self.step_size))
|
offpolicy_selection_eslb-main
|
policies.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a Dataset class which is an interface for OpenML dataset."""
from typing import NamedTuple
import numpy as np
import sklearn.datasets as skl_data
import sklearn.model_selection as skl_ms
import sklearn.preprocessing as skl_prep
from offpolicy_selection_eslb import policies
class FullInfoLoggedData(NamedTuple):
"""A dataset logged by a bandit policy and the true labels for testing.
Attributes:
contexts: n-times-d Array -- feature vectors for each entry
actions: n-times-1 Array -- action taken by logging policy
rewards: n-times-1 Array -- reward received
labels: n-times-1 Array -- True label
"""
contexts: np.ndarray
actions: np.ndarray
rewards: np.ndarray
labels: np.ndarray
def generate_binary_noise(
n: int,
p: float,
) -> np.ndarray:
"""Returns a Bernoulli-distributed noise vector.
Args:
n: Number of points to generate.
p: Bernoulli parameter (same for each point).
Returns: Binary vector of length n.
"""
return np.random.binomial(n=1, p=p, size=n)
def get_reward(
actions: np.ndarray,
labels: np.ndarray,
reward_noise_p: float = 0.1,
low_rew: float = 0.0,
high_rew: float = 1.,
):
"""Returns rewards and corrupted labels for matching actions.
Args:
actions: A n-vector of actions (integer in {0,nb_class -1}).
labels: A n-vector of labels.
reward_noise_p: A noise-level parameter in (0,1).
low_rew: Reward for incorrect action.
high_rew: Reward for correct action.
Returns: A n-vector of rewards after adding noise and rescaling.
"""
rewards = np.equal(actions, labels)
rewards = (rewards + generate_binary_noise(rewards.size, reward_noise_p)) % 2
rewards = high_rew * rewards + low_rew * (1 - rewards)
return rewards
class Dataset:
"""Represents an OpenML dataset.
Attributes:
openml_id: OpenML id of the dataset (for loading).
log_frac: In (0,1), fraction of the data to be used as train data (logged
dataset).
reward_noise: In (0,1) noise level in the rewards obtained by a policy.
name: Name of a dataset according to OpenML.
encoder: Instance of scikit-learn LabelEncoder() preprocessing labels.
contexts_all: Full dataset contexts (unless subsample >1, then subsampled
contexts).
labels_all: Full dataset labels (unless subsample >1, then subsampled
labels).
contexts_train: Train data contexts.
contexts_test: Test data context.
labels_train: Train data labels.
labels_test: Test data labels.
n_train: Train data size.
n_test: Test data size.
size: Total size of the dataset.
"""
def __init__(
self,
openml_id: int,
standardize: bool = True,
log_frac: float = 0.50,
subsample: int = 1,
random_state: int = 0,
reward_noise: float = 0.1,
):
"""Constructs Dataset object.
Args:
openml_id: OpenML id of the dataset (for loading).
standardize: Binary, use True to standardize dataset.
log_frac: In (0,1), fraction of the data to be used as train data
(logged dataset).
subsample: Subsample rate -- use only every "subsample" point from the
dataset.
random_state: Seed for train-test split (sklearn).
reward_noise: In (0,1) noise level in the rewards obtained by a policy.
"""
self.openml_id = openml_id
self.log_frac = log_frac
self.reward_noise = reward_noise
dataset = skl_data.fetch_openml(
data_id=openml_id, cache=True, as_frame=False)
data = dataset.data
target = dataset.target
self.name = dataset.details["name"]
self.encoder = skl_prep.LabelEncoder()
self.encoder.fit(target)
target = self.encoder.transform(target)
self.contexts_all = data[::subsample]
self.labels_all = target[::subsample]
if standardize:
scaler = skl_prep.StandardScaler()
scaler.fit(self.contexts_all)
self.contexts_all = scaler.transform(self.contexts_all)
(self.contexts_train, self.contexts_test, self.labels_train,
self.labels_test) = skl_ms.train_test_split(
self.contexts_all,
self.labels_all,
test_size=1 - self.log_frac,
shuffle=True,
random_state=random_state)
self.n_train = len(self.labels_train)
self.n_test = len(self.labels_test)
self.size = len(self.labels_all)
def get_test(self, policy: policies.Policy) -> FullInfoLoggedData:
"""Returns test data contexts, action, rewards, test labels.
Args:
policy: An object of class Policy
Returns: A tuple FullInfoLoggedData (contexts, actions, rewards, labels).
"""
actions, _ = policy.query(self.contexts_test)
rewards = get_reward(actions, self.labels_test, self.reward_noise)
return FullInfoLoggedData(self.contexts_test, actions, rewards,
self.labels_test)
def get_action_set(self):
"""Returns dictionary mapping labels to corresponding actions."""
return self.encoder.transform(self.encoder.classes_)
|
offpolicy_selection_eslb-main
|
data.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a benchmark executable.
The executable runs experimental suite by calling
run_experiment_suite(...) defined in experiment module.
The type of the experiment and its parameters are set by the flags
(see help for each flag).
The suite is described in
"Kuzborskij, I., Vernade, C., Gyorgy, A., & Szepesvári, C. (2021, March).
Confident off-policy evaluation and selection through self-normalized importance
weighting. In International Conference on Artificial Intelligence and Statistics
(pp. 640-648). PMLR."..
Examples:
# Run evaluation on all datasets considered in the paper for
# 10 trials (data splits) with error probability = 0.01
python3 benchmark.py --dataset_type=uci_all --n_trials=10 --delta=0.01
"""
from absl import app
from absl import flags
from absl import logging
import termcolor
from offpolicy_selection_eslb import estimators
from offpolicy_selection_eslb import experiment
from offpolicy_selection_eslb import policies
_DATASET_TYPE = flags.DEFINE_enum(
"dataset_type",
"demo",
["uci_small", "uci_medium", "uci_all", "demo"],
"UCI dataset subset.",
)
_N_TRIALS = flags.DEFINE_integer(
"n_trials",
10,
"Number of experimental trials (for sample statistics).",
lower_bound=0)
_FAULTY_ACTIONS = flags.DEFINE_list("faulty_actions", ["1", "2"],
"Faulty action indices.")
_DELTA = flags.DEFINE_float(
"delta",
0.05,
"Error probability delta (i.e. confidence intervals hold w.p. at least 1-delta).",
lower_bound=0,
upper_bound=1)
_BEHAVIOR_POL_TEMPERATURE = flags.DEFINE_float(
"behavior_policy_temperature",
0.2,
"Temperature of a softmax behavior policy (small = peaked actions).",
lower_bound=0)
_TARGET_POL_TEMPERATURE = flags.DEFINE_float(
"target_policy_temperature",
0.1,
"Temperature of a softmax target policy (small = peaked actions).",
lower_bound=0)
_REWARD_NOISE_P = flags.DEFINE_float(
"reward_noise_p",
0.1,
"Reward noise probability.",
lower_bound=0,
upper_bound=1)
_GA_STEP_SIZE = flags.DEFINE_float(
"GA_step_size",
0.01,
"Gradient Ascent step size for training softmax target policies.",
lower_bound=0)
_GA_ITER = flags.DEFINE_integer(
"GA_n_iter",
10000,
"Gradient Ascent steps for training softmax target policies.",
lower_bound=0)
_ESLB_ITER = flags.DEFINE_integer(
"eslb_n_iter",
10,
"Number of Monte-Carlo iterations for ESLB.",
lower_bound=0)
_ESLB_BATCH = flags.DEFINE_integer(
"eslb_batch", 1000, "Monte-Carlo batch size for ESLB.", lower_bound=0)
_ESLB_BIAS_TYPE = flags.DEFINE_enum(
"eslb_bias_type",
"mult_one_hot",
["mult_one_hot", "bernstein"],
"Bias control type for ESLB.",
)
_TABLE_FORMAT = flags.DEFINE_string(
"table_format", "psql",
"Result table format (e.g. psql, latex, html). See https://pypi.org/project/tabulate/"
)
green = lambda x: termcolor.colored(x, color="green")
def main(argv):
del argv # Unused.
logging.set_verbosity(logging.INFO)
# Datasets by OpenML IDs (see https://www.openml.org/search?type=data)
if _DATASET_TYPE.value == "uci_small":
dataset_ids = [39, 41, 54, 181, 30, 28, 182, 32]
elif _DATASET_TYPE.value == "uci_medium":
dataset_ids = [181, 30, 28, 182]
elif _DATASET_TYPE.value == "uci_all":
dataset_ids = [181, 30, 28, 182, 300, 32, 6, 184]
elif _DATASET_TYPE.value == "demo":
dataset_ids = [28, 30]
if _ESLB_BIAS_TYPE.value == "mult_one_hot":
eslb_bias_type = estimators.ESLBBiasType.MultOneHot
elif _ESLB_BIAS_TYPE.value == "bernstein":
eslb_bias_type = estimators.ESLBBiasType.Bernstein
logging.info(
green("running on '%s' dataset suite (openml ids: %s); see --help"),
_DATASET_TYPE.value, ", ".join(map(str, dataset_ids)))
logging.info(green("number of trials = %d"), _N_TRIALS.value)
logging.info(green("faulty action indices = %s"),
", ".join(map(str, _FAULTY_ACTIONS.value)))
logging.info(green("confidence bound failure probability (δ) = %f"),
_DELTA.value)
logging.info(green("behavior policy temperature = %f"),
_BEHAVIOR_POL_TEMPERATURE.value)
logging.info(green("target policy temperature = %f"),
_TARGET_POL_TEMPERATURE.value)
logging.info(green("reward noise prob. = %f"),
_REWARD_NOISE_P.value)
logging.info(green("steps of gradient ascent for fitting policies = %d"),
_GA_ITER.value)
logging.info(green("gradient ascent step size = %f"),
_GA_STEP_SIZE.value)
logging.info(green("ESLB estimator Monte-Carlo estimation steps = %d"),
_ESLB_ITER.value)
logging.info(green("ESLB estimator Monte-Carlo batch size = %d"),
_ESLB_BATCH.value)
logging.info(green("ESLB bias control type = %s"),
_ESLB_BIAS_TYPE.value)
estimators_ = estimators.get_estimators(
delta=_DELTA.value,
eslb_iter=_ESLB_ITER.value,
eslb_batch_size=_ESLB_BATCH.value,
eslb_bias_type=eslb_bias_type)
target_policy_specs = [
("SoftmaxGAPolicy",
dict(
step_size=_GA_STEP_SIZE.value,
steps=_GA_ITER.value,
temperature=_TARGET_POL_TEMPERATURE.value,
obj_type=policies.TrainedPolicyObjType.IW)),
("SoftmaxGAPolicy",
dict(
step_size=_GA_STEP_SIZE.value,
steps=_GA_ITER.value,
temperature=_TARGET_POL_TEMPERATURE.value,
obj_type=policies.TrainedPolicyObjType.SNIW)),
("SoftmaxDataPolicy",
dict(temperature=_TARGET_POL_TEMPERATURE.value, faulty_actions=[]))
]
behavior_faulty_actions = list(map(int, _FAULTY_ACTIONS.value))
(mean_test_rewards, std_test_rewards, mean_reference_rewards,
std_reference_rewards, dataset_names) = experiment.run_experiment_suite(
list_data_ids=dataset_ids,
n_trials=_N_TRIALS.value,
behavior_policy_temperature=_BEHAVIOR_POL_TEMPERATURE.value,
behavior_faulty_actions=behavior_faulty_actions,
target_policy_specs=target_policy_specs,
reward_noise_p=_REWARD_NOISE_P.value,
estimators=estimators_)
experiment.print_results(
estimators_,
dataset_names,
mean_test_rewards,
std_test_rewards,
mean_reference_rewards,
std_reference_rewards,
table_format=_TABLE_FORMAT.value)
if __name__ == "__main__":
app.run(main)
|
offpolicy_selection_eslb-main
|
demo/benchmark.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HiP and Perceiver IO model templates."""
import sys
from typing import Any, Dict, Mapping, Optional, Sequence
from absl import logging
import chex
import haiku as hk
from jax import numpy as jnp
from hierarchical_perceiver import perceiver_blocks
from hierarchical_perceiver import perceiver_helpers
PERCEIVER_MODULE_NAME = 'perceiver'
# Perceiver model variants.
VARIANTS = {
'Mini': {
'num_groups': (16, 1, 16),
'num_self_attends_per_block': (2, 1, 1),
'z_index_dim': (128, 64, 128),
'num_z_channels': (128, 1024, 128),
'num_cross_attend_heads': (1, 1, 1),
'num_self_attend_heads': (4, 32, 4),
'cross_attend_widening_factor': (1, 1, 1),
'self_attend_widening_factor': (4, 4, 4),
'num_embedding_channels': 32,
},
'16': {
'num_groups': (16, 4, 1, 1, 1, 4, 16),
'num_self_attends_per_block': (2, 2, 18, 2, 1, 1, 1),
'z_index_dim': (128, 256, 256, 64, 256, 256, 128),
'num_z_channels': (128, 256, 512, 1024, 512, 256, 128),
'num_cross_attend_heads': (1, 1, 1, 1, 1, 1, 1),
'num_self_attend_heads': (4, 8, 16, 32, 16, 8, 4),
'cross_attend_widening_factor': (1, 1, 1, 1, 1, 1, 1),
'self_attend_widening_factor': (4, 4, 4, 4, 4, 4, 4),
'num_embedding_channels': 32,
},
'256': {
'num_groups': (256, 64, 16, 4, 1, 1, 1, 4, 16, 64, 256),
'num_self_attends_per_block': (1, 1, 2, 2, 18, 2, 1, 1, 1, 1, 1),
'z_index_dim': (32, 64, 128, 256, 256, 64, 256, 256, 128, 64, 32),
'num_z_channels': (64, 96, 128, 256, 512, 1024, 256, 128, 64, 32, 16),
'num_cross_attend_heads': (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'num_self_attend_heads': (1, 2, 4, 8, 16, 32, 16, 8, 4, 2, 1),
'cross_attend_widening_factor': (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'self_attend_widening_factor': (4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4),
'num_embedding_channels': 16,
},
'16x3': {
'num_groups': (16, 1, 16),
'num_self_attends_per_block': (2, 18, 2),
'z_index_dim': (128, 256, 128),
'num_z_channels': (128, 1024, 128),
'num_cross_attend_heads': (1, 1, 1),
'num_self_attend_heads': (4, 32, 4),
'cross_attend_widening_factor': (1, 1, 1),
'self_attend_widening_factor': (4, 4, 4),
'num_embedding_channels': 32,
},
# Perceiver IO
'io_mini': {
'num_self_attends_per_block': 2,
'z_index_dim': 128,
'num_z_channels': 128,
'num_cross_attend_heads': 1,
'num_self_attend_heads': 2,
'cross_attend_widening_factor': 1,
'self_attend_widening_factor': 2,
'num_embedding_channels': 128,
},
'io_c_50m': {
'num_self_attends_per_block': 8,
'z_index_dim': 1024,
'num_z_channels': 512,
'num_cross_attend_heads': 8,
'num_self_attend_heads': 8,
'cross_attend_widening_factor': 4,
'self_attend_widening_factor': 4,
'num_embedding_channels': 512,
},
'io_c_150m': {
'num_self_attends_per_block': 12,
'z_index_dim': 1024,
'num_z_channels': 896,
'num_cross_attend_heads': 16,
'num_self_attend_heads': 16,
'cross_attend_widening_factor': 4,
'self_attend_widening_factor': 4,
'num_embedding_channels': 896,
},
}
def _check_and_get_processor_idx(num_groups: Sequence[int]) -> int:
# The processor is the central block in a HiP.
# [enc_1, ..., enc_N, processor, dec_1, ..., dec_N]
processor_idx = len(num_groups) // 2
# The processor block has 1 group: it is essentially a Perceiver IO.
assert num_groups[processor_idx] == 1, 'The processor must use 1 group.'
return processor_idx
class PerceiverIO(hk.Module):
"""Perceiver IO.
Perceiver IO is an anymodal, fully permutation-invariant model. It takes in
a (usually large) input sequence, maps them to a (smaller) sequence with
latent cross-attention, processes them with a homogeneous latent Transformer,
then maps them to a (usually large) output sequence again with latent
cross-attention. See https://arxiv.org/abs/2107.14795 for more details.
For compatibility with HiP, this Perceiver IO includes a singleton group
dimension: inputs are concatenated and newaxis'd to [B, 1, M, C] before
processing (where M is the summed index dim of all input modalities).
"""
def __init__(
self,
# Variant-specific hyperparams
num_self_attends_per_block: int,
z_index_dim: int,
num_z_channels: int,
num_cross_attend_heads: int,
num_self_attend_heads: int,
cross_attend_widening_factor: int,
self_attend_widening_factor: int,
num_embedding_channels: int,
*,
# Shared hyperparameters
num_position_encoding_channels: Optional[int] = None,
activation_name: str = 'sq_relu',
z_index_dim_train: Optional[int] = None,
z_index_dim_eval: Optional[int] = None,
dropout_prob: float = 0.0,
drop_path_rate: float = 0.0,
name: str = PERCEIVER_MODULE_NAME,
):
"""Constructs the model.
Args:
num_self_attends_per_block: The number of self-attention layers in each
block.
z_index_dim: The number of latents in each block.
num_z_channels: The number of channels in each block.
num_cross_attend_heads: The number of heads in cross-attention layers in
each block.
num_self_attend_heads: The number of heads in self-attention layers in
each block.
cross_attend_widening_factor: The MLP channel widening factor in
cross-attention layers in each block.
self_attend_widening_factor: The MLP channel widening factor in
self-attention layers in each block.
num_embedding_channels: The number of channels used to embed inputs to and
outputs from the model. Data from all modalities are projected to
`num_embedding_channels`.
num_position_encoding_channels: The number of channels of the raw position
encoding. If num_position_encoding_channels != num_embedding_channels,
position encodings are projected before adding to embedded inputs.
activation_name: Activation for HiPCrossAttention and SelfAttention.
z_index_dim_train: Optional train-time index dimension override.
z_index_dim_eval: Optional eval-time index dimension override.
dropout_prob: SelfAttention dropout probability.
drop_path_rate: SelfAttention drop path rate.
name: Haiku module name.
"""
super().__init__(name=name)
# Variant-specific hyperparams
self.num_self_attends_per_block = num_self_attends_per_block
self.z_index_dim = z_index_dim
self.num_z_channels = num_z_channels
self.num_cross_attend_heads = num_cross_attend_heads
self.num_self_attend_heads = num_self_attend_heads
self.cross_attend_widening_factor = cross_attend_widening_factor
self.self_attend_widening_factor = self_attend_widening_factor
self.num_embedding_channels = num_embedding_channels
# Shared hyperparameters
self.num_position_encoding_channels = num_position_encoding_channels
self.activation_name = activation_name
self.z_index_dim_train = z_index_dim_train
self.z_index_dim_eval = z_index_dim_eval
self.dropout_prob = dropout_prob
self.drop_path_rate = drop_path_rate
def __call__(self, dataset_name: str, inputs: Mapping[str, chex.Array], *,
is_training: bool) -> Dict[str, chex.Array]:
"""Computes a reconstruction of the inputs through the model.
Args:
dataset_name: The name of the dataset (ignored).
inputs: A dictionary of modality_name: value.
is_training: Is this a training step.
Returns:
The computed output.
"""
grouper = perceiver_blocks.ConcatenateGrouper()
embedder = perceiver_blocks.Embedder(
num_embedding_channels=self.num_embedding_channels)
z_0 = embedder.embed(inputs)
z, mae_query = perceiver_blocks.PositionEncoder(
num_position_encoding_channels=self.num_position_encoding_channels,
)(z_0)
z = grouper.group(z)
mae_query = grouper.group(mae_query)
z = perceiver_blocks.PerceiverBlock(
num_output_groups=1,
output_index_dim=self.z_index_dim,
num_output_channels=self.num_z_channels,
num_self_attend_layers=self.num_self_attends_per_block,
num_self_attend_heads=self.num_self_attend_heads,
self_attend_widening_factor=self.self_attend_widening_factor,
num_cross_attend_heads=self.num_cross_attend_heads,
cross_attend_widening_factor=self.cross_attend_widening_factor,
# Perceiver IO always uses a single group.
regroup_inputs=False,
regroup_type='', # Ignored
activation_name=self.activation_name,
output_index_dim_train=self.z_index_dim_train,
output_index_dim_eval=self.z_index_dim_eval,
dropout_prob=self.dropout_prob,
drop_path_rate=self.drop_path_rate,
name='block_0')(z, is_training=is_training)
reconstruction_z_out = perceiver_blocks.ReconstructionHead()(
z, mae_query=mae_query, is_training=is_training)
reconstruction_z_out = grouper.ungroup(reconstruction_z_out)
reconstruction_output = embedder.unembed(reconstruction_z_out)
z_out = grouper.ungroup(z)
output_keys = perceiver_helpers.ModelOutputKeys
return { # pytype: disable=bad-return-type # numpy-scalars
output_keys.INPUT_RECONSTRUCTION: reconstruction_output,
output_keys.LATENTS: z_out,
}
class HiP(hk.Module):
"""Hierarchical Perceiver.
See: https://arxiv.org/abs/2202.10890
"""
def __init__(
self,
# Variant-specific hyperparams (e.g. for HiP-16, HiP-256)
num_groups: Sequence[int],
num_self_attends_per_block: Sequence[int],
z_index_dim: Sequence[int],
num_z_channels: Sequence[int],
num_cross_attend_heads: Sequence[int],
num_self_attend_heads: Sequence[int],
cross_attend_widening_factor: Sequence[int],
self_attend_widening_factor: Sequence[int],
num_embedding_channels: int,
*,
# Shared hyperparameters
num_position_encoding_channels: Optional[int] = None,
regroup_type: str = 'reshape',
activation_name: str = 'sq_relu',
processor_index_dim_train: Optional[int] = None,
processor_index_dim_eval: Optional[int] = None,
dropout_prob: float = 0.0,
drop_path_rate: float = 0.0,
name: str = PERCEIVER_MODULE_NAME,
):
"""Constructs the model.
Args:
num_groups: The number of groups in each level of the HiP hierarchy.
num_self_attends_per_block: The number of self-attention layers in each
level of the HiP hierarchy.
z_index_dim: The number of latents in each level of the HiP hierarchy.
num_z_channels: The number of channels in each level of the HiP hierarchy.
num_cross_attend_heads: The number of heads in cross-attention layers in
each level of the HiP hierarchy.
num_self_attend_heads: The number of heads in self-attention layers in
each level of the HiP hierarchy.
cross_attend_widening_factor: The MLP channel widening factor in
cross-attention layers in each level of the HiP hierarchy.
self_attend_widening_factor: The MLP channel widening factor in
self-attention layers in each level of the HiP hierarchy.
num_embedding_channels: The number of channels used to embed inputs to and
outputs from the model. Data from all modalities are projected to
`num_embedding_channels`.
num_position_encoding_channels: The number of channels of the raw position
encoding. If num_position_encoding_channels != num_embedding_channels,
position encodings are projected before adding to embedded inputs.
regroup_type: The regrouping strategy to use.
activation_name: Activation for HiPCrossAttention and SelfAttention.
processor_index_dim_train: Optional train-time index dimension override
for the central processor block.
processor_index_dim_eval: Optional eval-time index dimension override
for the central processor block.
dropout_prob: SelfAttention dropout probability.
drop_path_rate: SelfAttention drop path rate.
name: Haiku module name.
"""
super().__init__(name=name)
# Variant-specific hyperparams (e.g. for HiP-16, HiP-256)
self.num_groups = num_groups
self.num_self_attends_per_block = num_self_attends_per_block
self.z_index_dim = z_index_dim
self.num_z_channels = num_z_channels
self.num_cross_attend_heads = num_cross_attend_heads
self.num_self_attend_heads = num_self_attend_heads
self.cross_attend_widening_factor = cross_attend_widening_factor
self.self_attend_widening_factor = self_attend_widening_factor
self.num_embedding_channels = num_embedding_channels
# Shared hyperparameters
self.num_position_encoding_channels = num_position_encoding_channels
self.regroup_type = regroup_type
self.activation_name = activation_name
self.processor_index_dim_train = processor_index_dim_train
self.processor_index_dim_eval = processor_index_dim_eval
self.dropout_prob = dropout_prob
self.drop_path_rate = drop_path_rate
self.num_blocks = len(self.num_groups)
assert self.num_blocks >= 3, (
'At least 3 blocks are needed for U-Net residuals.')
assert self.num_blocks % 2 == 1, (
'HiP assumes an odd number of blocks: any number of paired '
'encoder/decoder blocks plus 1 processor block.')
self.processor_block_idx = _check_and_get_processor_idx(self.num_groups)
def __call__(self, dataset_name: str, inputs: Mapping[str, chex.Array], *,
is_training: bool) -> Dict[str, chex.Array]:
"""Computes a reconstruction of the inputs through the HiP.
Args:
dataset_name: The name of the dataset (ignored).
inputs: A dictionary of modality_name: value.
is_training: Is this a training step.
Returns:
The computed output.
"""
grouper = perceiver_blocks.ConstNumGrouper(num_groups=self.num_groups[0])
embedder = perceiver_blocks.Embedder(
num_embedding_channels=self.num_embedding_channels)
z_0 = embedder.embed(inputs)
z, mae_query = perceiver_blocks.PositionEncoder(
num_position_encoding_channels=self.num_position_encoding_channels,
)(z_0)
z = grouper.group(z)
mae_query = grouper.group(mae_query)
hidden_z = []
for i in range(self.num_blocks):
# UNet skips between corresponding encoder and decoder blocks.
if i > self.processor_block_idx:
pre_attention_residual = hidden_z[self.num_blocks - i - 1]
else:
pre_attention_residual = None
if i == self.processor_block_idx:
# Allow overrides of the number of processor-block latents.
output_index_dim_train = self.processor_index_dim_train
output_index_dim_eval = self.processor_index_dim_eval
else:
# Always use the default number of latents for encoder/decoder blocks.
output_index_dim_train = None
output_index_dim_eval = None
z = perceiver_blocks.PerceiverBlock(
num_output_groups=self.num_groups[i],
output_index_dim=self.z_index_dim[i],
num_output_channels=self.num_z_channels[i],
num_self_attend_layers=self.num_self_attends_per_block[i],
num_self_attend_heads=self.num_self_attend_heads[i],
self_attend_widening_factor=self.self_attend_widening_factor[i],
num_cross_attend_heads=self.num_cross_attend_heads[i],
cross_attend_widening_factor=self.cross_attend_widening_factor[i],
# The grouper takes care of the initial re-grouping.
regroup_inputs=(i > 0),
regroup_type=self.regroup_type,
activation_name=self.activation_name,
output_index_dim_train=output_index_dim_train,
output_index_dim_eval=output_index_dim_eval,
dropout_prob=self.dropout_prob,
drop_path_rate=self.drop_path_rate,
name=f'block_{i}')(
z, is_training=is_training,
pre_attention_residual=pre_attention_residual)
hidden_z.append(z)
reconstruction_z_out = perceiver_blocks.ReconstructionHead()(
z, mae_query=mae_query, is_training=is_training)
reconstruction_z_out = grouper.ungroup(reconstruction_z_out)
reconstruction_output = embedder.unembed(reconstruction_z_out)
z_out = grouper.ungroup(z)
output_keys = perceiver_helpers.ModelOutputKeys
return { # pytype: disable=bad-return-type # numpy-scalars
output_keys.INPUT_RECONSTRUCTION: reconstruction_output,
output_keys.LATENTS: z_out,
}
class HiPClassBottleneck(hk.Module):
"""Hierarchical Perceiver with classes -> processor -> classes.
This template handles class labels by passing them into and reading them out
of the central processor block. All other modalities go through the encoder
and decoder.
See: https://arxiv.org/abs/2202.10890
"""
def __init__(
self,
# Variant-specific hyperparams (e.g. for HiP-16, HiP-256)
num_groups: Sequence[int],
num_self_attends_per_block: Sequence[int],
z_index_dim: Sequence[int],
num_z_channels: Sequence[int],
num_cross_attend_heads: Sequence[int],
num_self_attend_heads: Sequence[int],
cross_attend_widening_factor: Sequence[int],
self_attend_widening_factor: Sequence[int],
num_embedding_channels: int,
label_modalities: Sequence[str],
*,
# Shared hyperparameters
num_position_encoding_channels: Optional[int] = None,
regroup_type: str = 'reshape',
activation_name: str = 'sq_relu',
processor_index_dim_train: Optional[int] = None,
processor_index_dim_eval: Optional[int] = None,
dropout_prob: float = 0.0,
drop_path_rate: float = 0.0,
name: str = PERCEIVER_MODULE_NAME):
"""Constructs the model.
Args:
num_groups: The number of groups in each level of the HiP hierarchy.
num_self_attends_per_block: The number of self-attention layers in each
level of the HiP hierarchy.
z_index_dim: The number of latents in each level of the HiP hierarchy.
num_z_channels: The number of channels in each level of the HiP hierarchy.
num_cross_attend_heads: The number of heads in cross-attention layers in
each level of the HiP hierarchy.
num_self_attend_heads: The number of heads in self-attention layers in
each level of the HiP hierarchy.
cross_attend_widening_factor: The MLP channel widening factor in
cross-attention layers in each level of the HiP hierarchy.
self_attend_widening_factor: The MLP channel widening factor in
self-attention layers in each level of the HiP hierarchy.
num_embedding_channels: The number of channels used to embed inputs to and
outputs from the model. Data from all modalities are projected to
`num_embedding_channels`.
label_modalities: The names of modalities to be passed in to the
bottleneck.
num_position_encoding_channels: The number of channels of the raw position
encoding. If num_position_encoding_channels != num_embedding_channels,
position encodings are projected before adding to embedded inputs.
regroup_type: The regrouping strategy to use.
activation_name: Activation for HiPCrossAttention and SelfAttention.
processor_index_dim_train: Optional train-time index dimension override
for the central processor block.
processor_index_dim_eval: Optional eval-time index dimension override
for the central processor block.
dropout_prob: SelfAttention dropout probability.
drop_path_rate: SelfAttention drop path rate.
name: Haiku module name.
"""
super().__init__(name=name)
# Variant-specific hyperparams (e.g. for HiP-16, HiP-256)
self.num_groups = num_groups
self.num_self_attends_per_block = num_self_attends_per_block
self.z_index_dim = z_index_dim
self.num_z_channels = num_z_channels
self.num_cross_attend_heads = num_cross_attend_heads
self.num_self_attend_heads = num_self_attend_heads
self.cross_attend_widening_factor = cross_attend_widening_factor
self.self_attend_widening_factor = self_attend_widening_factor
self.num_embedding_channels = num_embedding_channels
# Shared hyperparameters
self.num_position_encoding_channels = num_position_encoding_channels
self.regroup_type = regroup_type
self.activation_name = activation_name
self.processor_index_dim_train = processor_index_dim_train
self.processor_index_dim_eval = processor_index_dim_eval
self.dropout_prob = dropout_prob
self.drop_path_rate = drop_path_rate
self.label_modalities = label_modalities
self.num_blocks = len(self.num_groups)
assert self.num_blocks >= 3, (
'At least 3 blocks are needed for U-Net residuals.')
assert self.num_blocks % 2 == 1, (
'HiP assumes an odd number of blocks: any number of paired '
'encoder/decoder blocks plus 1 processor block.')
# Embedded class labels are input to and decoded from this block:
self.processor_block_idx = _check_and_get_processor_idx(self.num_groups)
def __call__(self, dataset_name: str, inputs: Mapping[str, chex.Array], *,
is_training: bool) -> Dict[str, chex.Array]:
"""Computes a reconstruction of the inputs through the HiP.
Args:
dataset_name: The name of the dataset (ignored).
inputs: A dictionary of modality_name: value.
is_training: Is this a training step.
Returns:
The computed output.
"""
grouper = perceiver_blocks.ConstNumGrouper(num_groups=self.num_groups[0])
class_label_inputs = {k: v for k, v in inputs.items()
if k in self.label_modalities}
inputs = {k: v for k, v in inputs.items()
if k not in self.label_modalities}
# Embed, position, and group the non-class-label inputs.
embedder = perceiver_blocks.Embedder(
num_embedding_channels=self.num_embedding_channels)
z_0 = embedder.embed(inputs)
z, mae_query = perceiver_blocks.PositionEncoder(
num_position_encoding_channels=self.num_position_encoding_channels,
)(z_0)
z = grouper.group(z)
mae_query = grouper.group(mae_query)
num_blocks = len(self.num_groups)
assert num_blocks >= 3, 'At least 3 blocks are needed for U-Net residuals.'
hidden_z = []
for i in range(num_blocks):
# UNet skips between corresponding encoder and decoder blocks.
if i > self.processor_block_idx:
pre_attention_residual = hidden_z[num_blocks - i - 1]
else:
pre_attention_residual = None
if i > 0:
# Manually regroup the current latents to allow concatenation.
# The grouper takes care of the initial regroup.
z = perceiver_blocks.regroup(
inputs=z,
num_output_groups=self.num_groups[i],
regroup_type=self.regroup_type)
if i == self.processor_block_idx:
mae_query_class = {}
grouper_class = {}
embedder_class = {}
for k, v in class_label_inputs.items():
# Concatenate the class inputs to the latents.
assert z.shape[perceiver_blocks.GROUPS_DIM] == 1
grouper_class[k] = perceiver_blocks.ConstNumGrouper(num_groups=1)
# Embed and position encode class labels.
embedder_class[k] = perceiver_blocks.Embedder(
num_embedding_channels=z.shape[perceiver_blocks.CHANNELS_DIM])
z_class = embedder_class[k].embed({k: v})
z_class, mae_query_class[k] = perceiver_blocks.PositionEncoder(
# Position encoding matches the embedding size.
num_position_encoding_channels=z.shape[
perceiver_blocks.CHANNELS_DIM])(z_class)
z_class = grouper_class[k].group(z_class)
mae_query_class[k] = grouper_class[k].group(mae_query_class[k])
z = jnp.concatenate([z, z_class], axis=perceiver_blocks.INDEX_DIM)
# Allow overrides of the number of processor-block latents.
output_index_dim_train = self.processor_index_dim_train
output_index_dim_eval = self.processor_index_dim_eval
else:
# Always use the default number of latents for encoder/decoder blocks.
output_index_dim_train = None
output_index_dim_eval = None
z = perceiver_blocks.PerceiverBlock(
num_output_groups=self.num_groups[i],
output_index_dim=self.z_index_dim[i],
num_output_channels=self.num_z_channels[i],
num_self_attend_layers=self.num_self_attends_per_block[i],
num_self_attend_heads=self.num_self_attend_heads[i],
self_attend_widening_factor=self.self_attend_widening_factor[i],
num_cross_attend_heads=self.num_cross_attend_heads[i],
cross_attend_widening_factor=self.cross_attend_widening_factor[i],
# We've already re-grouped the latents: make sure they stay put!
regroup_inputs=False,
regroup_type=self.regroup_type, # Ignored.
activation_name=self.activation_name,
output_index_dim_train=output_index_dim_train,
output_index_dim_eval=output_index_dim_eval,
dropout_prob=self.dropout_prob,
drop_path_rate=self.drop_path_rate,
name=f'perceiver_block_{i}')(
z, is_training=is_training,
pre_attention_residual=pre_attention_residual)
hidden_z.append(z)
if i == self.processor_block_idx:
output_class = dict()
rh_class = perceiver_blocks.ReconstructionHead()
for k, v in mae_query_class.items():
# Reconstruct the class-label inputs
assert z.shape[perceiver_blocks.GROUPS_DIM] == 1 # pytype: disable=attribute-error # numpy-scalars
z_out_class = rh_class(z, mae_query=v, is_training=is_training)
z_out_class = grouper_class[k].ungroup(z_out_class)
output_class.update(embedder_class[k].unembed(z_out_class))
reconstruction_z_out = perceiver_blocks.ReconstructionHead()(
z, mae_query=mae_query, is_training=is_training)
reconstruction_z_out = grouper.ungroup(reconstruction_z_out)
reconstruction_output = embedder.unembed(reconstruction_z_out)
# Merge class-label and non-class-label reconstructions into a single dict.
reconstruction_output = {**reconstruction_output, **output_class}
z_out = grouper.ungroup(z)
output_keys = perceiver_helpers.ModelOutputKeys
return { # pytype: disable=bad-return-type # numpy-scalars
output_keys.INPUT_RECONSTRUCTION: reconstruction_output,
output_keys.LATENTS: z_out,
}
def build_perceiver(
model_base_name: str,
model_variant_name: Optional[str],
model_kwargs: Optional[Mapping[str, Any]] = None,
searched_modules: Sequence[Any] = (sys.modules[__name__],),
) -> hk.Module:
"""Construct a Perceiver instance.
Args:
model_base_name: Name of a HiP-like base model class (e.g., 'HiP').
model_variant_name: Name of a variant (e.g., '16'). Should be None for
model classes with a baked-in variant (e.g. templates.HiPUnrolled16).
model_kwargs: A dictionary of model kwargs. The key of the dictionary is a
base model name (e.g., 'HiP') and the value is a kwargs dictionary.
searched_modules: A list of modules to search for the given class.
Returns:
A constructed instance of the specified model.
"""
candidate = None
for module in searched_modules:
if hasattr(module, model_base_name):
candidate = getattr(module, model_base_name)
break
assert candidate is not None, (
f'Failed to find class {model_base_name} in provided modules.')
logging.info('Using Perceiver template: %s', model_base_name)
if model_kwargs is None:
model_kwargs = {}
if model_variant_name is None:
instance = candidate(
**model_kwargs,
name=PERCEIVER_MODULE_NAME)
else:
assert model_variant_name in VARIANTS, (
f'VARIANTS does not contain {model_variant_name}. '
'Please set variant to `None` if using a model with fixed variant.'
)
logging.info('Using Perceiver variant: %s', model_variant_name)
instance = candidate(
**model_kwargs,
**VARIANTS[model_variant_name],
name=PERCEIVER_MODULE_NAME)
return instance
|
hierarchical_perceiver-main
|
perceiver.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Blocks for the Perceiver IO and HiP."""
from typing import Dict, List, Mapping, Optional, Tuple
import chex
from einshape import jax_einshape as einshape
import haiku as hk
import jax
from jax import numpy as jnp
from hierarchical_perceiver import perceiver_helpers
BATCH_DIM = 0
GROUPS_DIM = 1
INDEX_DIM = -2
CHANNELS_DIM = -1
RECONSTRUCTION_HEAD_NAME = 'reconstruction_head'
def regroup(
inputs: chex.Array,
num_output_groups: int,
regroup_type: str,
) -> chex.Array:
"""Re-group an input array from [B, G, N, C] to [B, G', N', C].
Args:
inputs: the array to regroup.
num_output_groups: The number of output groups G'.
regroup_type: The regrouping strategy to use.
Returns:
The re-grouped array.
"""
batch_size = inputs.shape[BATCH_DIM]
num_input_groups = inputs.shape[GROUPS_DIM]
num_input_latents = inputs.shape[INDEX_DIM]
num_channels = inputs.shape[CHANNELS_DIM]
if regroup_type in ['reshape', 'transpose_reshape']:
new_index_dim = num_input_groups * num_input_latents // num_output_groups
if regroup_type == 'transpose_reshape':
# [B, G, N, C] -> [B, N, G, C]
# This leads to mixing between all input groups, rather than preferential
# mixing between neighboring groups.
inputs = jnp.swapaxes(inputs, 1, 2)
outputs = jnp.reshape(
inputs, (batch_size, num_output_groups, new_index_dim, num_channels))
else:
raise ValueError(f'Unknown regroup_type: {regroup_type}.')
return outputs
class HiPCrossAttention(hk.Module):
"""A HiPCrossAttention module, including a dense block.
Maps batched, grouped arrays of shape B x G x M x C to arrays of shape
B x G x N x D.
"""
def __init__(self,
output_index_dim_train: Optional[int] = None,
output_index_dim_eval: Optional[int] = None,
output_num_channels: Optional[int] = None,
activation_name: str = 'sq_relu',
widening_factor: int = 1,
num_heads: int = 8,
use_post_attention_residual: bool = False,
name: Optional[str] = None):
"""Constructs a new HiPCrossAttention.
Args:
output_index_dim_train: The output index dimension size at train. Ignored
if `query_inputs` is specified directly at call.
output_index_dim_eval: The output index dimension size at eval. Ignored
if `query_inputs` is specified directly at call.
output_num_channels: The number of output channels.
activation_name: The activation to use.
widening_factor: The widening factor to use in the output MLP.
num_heads: The number of heads to use in cross-attention.
use_post_attention_residual: Enable the post-attention residual
connection? This residual adds the query inputs to the output of
attention.
name: Haiku module name.
"""
super(HiPCrossAttention, self).__init__(name=name)
self._output_index_dim_train = output_index_dim_train
self._output_index_dim_eval = output_index_dim_eval
self._output_num_channels = output_num_channels
self._activation_name = activation_name
self._widening_factor = widening_factor
self._num_heads = num_heads
self._use_post_attention_residual = use_post_attention_residual
def _subsample_query_inputs(self, query_inputs):
"""Randomly subsample the number of query inputs.
Args:
query_inputs: An array of latent query inputs, of shape
B x G x N_eval x C
Returns:
A subsampled array of latent query inputs, of shape
B x G x N_train x C,
where N_train < N_eval.
"""
batch_size, num_groups, _, _ = query_inputs.shape
def get_weight_indices():
# Sample indices without replacement for each batch & group.
rng_keys = hk.next_rng_keys(batch_size * num_groups)
rng_keys = jnp.reshape(
jnp.asarray(rng_keys), [batch_size, num_groups, -1])
def get_per_group_weights(random_key):
# Get the weight indices for a single group in a single batch.
weight_indices = jnp.arange(0, self._output_index_dim_eval)
weight_indices = jax.random.shuffle(random_key, weight_indices)
return weight_indices
# Subsample outside of the vmap to avoid compiling to a while loop.
weight_indices = jax.vmap(jax.vmap(get_per_group_weights))(rng_keys)
# [B, G, train_index_dim]
return weight_indices[..., :self._output_index_dim_train]
weight_indices = get_weight_indices()
# One-hot index onto the full weights (note: uses a bfloat16 matmul).
# [B, G, train_index_dim] -> [B, G, train_index_dim, eval_index_dim]
one_hot_indices = jax.nn.one_hot(
weight_indices, num_classes=self._output_index_dim_eval)
# [B, G, train_index_dim, C]
query_inputs = jnp.einsum(
'bgMc,bgmM->bgmc', query_inputs, one_hot_indices)
return query_inputs
def __call__(
self,
inputs: chex.Array,
*,
query_inputs: Optional[chex.Array] = None,
pre_attention_residual: Optional[chex.Array] = None,
attention_mask: Optional[chex.Array] = None,
is_training: bool = True,
) -> chex.Array:
"""Calls the HiPCrossAttention.
Args:
inputs: An input array of shape B x G x M x C to cross-attend to.
query_inputs: Optional query inputs to the cross-attention. If provided,
learned latent queries will not be constructed. Typically used for
decoding from position encoding queries.
pre_attention_residual: An optional array that will be added to the
queries before the cross-attention. Used for U-Net-like skip connections
in HiP.
attention_mask: An optional mask for cross-attention.
is_training: Are we currently training, yes or no?
Returns:
The array after processing with the HiPCrossAttention.
"""
# Input shape is assumed to be
# [batch_size, num_groups, index_dim_per_group, num_channels]
batch_size, num_groups, _, _ = inputs.shape
# If explicit query_inputs are not provided, learn latent queries.
if query_inputs is None:
assert self._output_index_dim_train is not None
assert self._output_index_dim_eval is not None
assert self._output_index_dim_eval >= self._output_index_dim_train
assert self._output_num_channels is not None
query_inputs = perceiver_helpers.TrainablePositionEncoding(
# The underlying array contains all latents expected at eval time
index_dim=num_groups * self._output_index_dim_eval,
num_channels=self._output_num_channels,
name='query_inputs')(batch_size=batch_size)
# Fold groups into the batch dimension
query_inputs = einshape('b(gm)c->bgmc', query_inputs, g=num_groups)
if is_training and (
self._output_index_dim_train < self._output_index_dim_eval):
# Sample a random subset of latent queries for this training batch.
query_inputs = self._subsample_query_inputs(query_inputs)
output_index_dim = query_inputs.shape[-2]
output_num_channels = query_inputs.shape[-1]
if pre_attention_residual is not None:
assert pre_attention_residual.shape[-2] == output_index_dim
# Project pre_attention_residual to the correct shape.
residual_num_channels = pre_attention_residual.shape[-1]
if residual_num_channels != output_num_channels:
pre_attention_residual = perceiver_helpers.conv_1d(
output_channels=output_num_channels,
name='pre_attention_residual_linear')(pre_attention_residual)
query_inputs += pre_attention_residual
# -----------------------------------------
# ---------- Cross-attend -> MLP ----------
# -----------------------------------------
attention = perceiver_helpers.Attention(
num_heads=self._num_heads,
# KV input channels determine the dimension of the attention matmul.
qk_channels=inputs.shape[-1],
v_channels=inputs.shape[-1],
# (Latent) query channels determine the size of the output.
output_channels=query_inputs.shape[-1])(
inputs_q=perceiver_helpers.layer_norm(query_inputs),
inputs_kv=perceiver_helpers.layer_norm(inputs),
attention_mask=attention_mask)
if self._use_post_attention_residual:
attention += query_inputs
output = attention
output += perceiver_helpers.Dense(
widening_factor=self._widening_factor)(
perceiver_helpers.layer_norm(attention), is_training=is_training)
return output
class SelfAttention(hk.Module):
"""A self-attention module, including a dense block."""
def __init__(self,
widening_factor: int = 4,
dropout_prob: float = 0.0,
dropout_attn_prob: float = 0.0,
drop_path_rate: float = 0.0,
num_heads: int = 8,
att_init_scale: float = 1.0,
dense_init_scale: float = 1.0,
qk_channels: Optional[int] = None,
v_channels: Optional[int] = None,
activation_name: str = 'sq_relu',
name: Optional[str] = None):
super(SelfAttention, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._qk_channels = qk_channels
self._v_channels = v_channels
self._activation_name = activation_name
if drop_path_rate > 0.:
self._drop_path = perceiver_helpers.StochasticDepth(drop_path_rate)
else:
self._drop_path = lambda x, _: x
def __call__(self,
inputs: chex.Array,
attention_mask: Optional[chex.Array] = None,
is_training: bool = True) -> chex.Array:
dropout_prob = self._dropout_prob if is_training else 0.0
x = inputs
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
qkv_inputs = perceiver_helpers.layer_norm(inputs)
attention = perceiver_helpers.Attention(
num_heads=self._num_heads,
init_scale=self._att_init_scale,
qk_channels=self._qk_channels,
v_channels=self._v_channels,
dropout_prob=dropout_attn_prob)(qkv_inputs, qkv_inputs,
attention_mask=attention_mask)
attention = hk.dropout(hk.next_rng_key(), dropout_prob, attention)
x = x + self._drop_path(attention, is_training)
dense_layer = perceiver_helpers.Dense(
widening_factor=self._widening_factor,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)
dense_out = dense_layer(perceiver_helpers.layer_norm(x),
is_training=is_training)
x += self._drop_path(dense_out, is_training)
return x
class PerceiverBlock(hk.Module):
"""The PerceiverBlock combines regrouping, cross- and self-attention."""
def __init__(self,
*,
num_output_groups: int,
output_index_dim: int,
num_output_channels: int,
num_self_attend_layers: int,
num_self_attend_heads: int,
self_attend_widening_factor: int = 4,
num_cross_attend_heads: int = 1,
cross_attend_widening_factor: int = 1,
regroup_inputs: bool = True,
regroup_type: str = 'reshape',
activation_name: str = 'sq_relu',
use_post_attention_residual: bool = True,
output_index_dim_train: Optional[int] = None,
output_index_dim_eval: Optional[int] = None,
dropout_prob: float = 0.0,
drop_path_rate: float = 0.0,
name: str):
"""Constructs a new PerceiverBlock.
The constructed block will reshape inputs into num_output_groups, then pass
them through a cross-attention (HiPCrossAttention), followed by a series of
self-attentions (n = num_self_attend_layers). num_output_channels and
output_index_dim control the output size.
The input must be (batch, groups, index_dim, channels), whereas
the groups*index must be divisible by num_output_groups. The output size
will be (batch_size, output_groups, output_index_dim, num_output_channels),
where only batch_size must remain constant with the input.
Args:
num_output_groups: Number of groups for this block.
output_index_dim: The default index dimension size at train time.
num_output_channels: The number of output channels.
num_self_attend_layers: Number of self-attend layers.
num_self_attend_heads: Number of heads per self-attend layer.
self_attend_widening_factor: SelfAttention widening factor.
num_cross_attend_heads: Number of HiPCrossAttention heads.
cross_attend_widening_factor: HiPCrossAttention widening factor.
regroup_inputs: If True, the input array will be restructured to match the
number of output groups. If False, the input will be passed in as is. If
False, the number of input and output groups must match.
regroup_type: The regrouping strategy to use.
activation_name: Activation for HiPCrossAttention and SelfAttention.
use_post_attention_residual: Enable the post-attention residual
connection? This residual adds the query inputs to the output of
attention.
output_index_dim_train: The optional output index dimension size at train.
If specified, overrides output_index_dim at train time.
output_index_dim_eval: The optional output index dimension size at eval.
If specified, overrides output_index_dim at eval time.
dropout_prob: SelfAttention dropout probability.
drop_path_rate: SelfAttention drop path rate.
name: Haiku module name.
"""
super().__init__(name=name)
self.num_output_groups = num_output_groups
self.num_output_channels = num_output_channels
self.regroup_inputs = regroup_inputs
self.regroup_type = regroup_type
# Optionally specialize index dim for more compute at test time.
# Usage: override `output_index_dim_train` to subsample at train and use the
# default index dim at eval, override `output_index_dim_eval` to supersample
# at eval and use the default index dim at train.
if output_index_dim_train is not None and output_index_dim_eval is not None:
raise ValueError(
'Only one of `output_index_dim_train` and `output_index_dim_eval`'
'should be overridden.')
if output_index_dim_train is None:
assert output_index_dim is not None, (
'output_index_dim must be specified '
'if output_index_dim_train is None.')
self.output_index_dim_train = output_index_dim
else:
self.output_index_dim_train = output_index_dim_train
if output_index_dim_eval is None:
assert output_index_dim is not None, (
'output_index_dim must be specified '
'if output_index_dim_eval is None.')
self.output_index_dim_eval = output_index_dim
else:
self.output_index_dim_eval = output_index_dim_eval
assert self.output_index_dim_eval >= self.output_index_dim_train, (
f'output_index_dim_eval (got {self.output_index_dim_eval}) must be '
f'at least as big as output_index_dim_train '
'(got {self.output_index_dim_train}).')
assert (
num_output_channels % num_self_attend_heads == 0
), (f'num_self_attend_heads ({num_self_attend_heads})'
f'should divide num_output_channels ({num_output_channels}) evenly')
assert (
num_output_channels % num_cross_attend_heads == 0
), (f'num_cross_attend_heads ({num_cross_attend_heads})'
f'should divide num_output_channels ({num_output_channels}) evenly')
self.projector = HiPCrossAttention(
activation_name=activation_name,
num_heads=num_cross_attend_heads,
output_num_channels=num_output_channels,
use_post_attention_residual=use_post_attention_residual,
widening_factor=cross_attend_widening_factor,
output_index_dim_train=self.output_index_dim_train,
output_index_dim_eval=self.output_index_dim_eval,
)
self.self_attentions = []
for idx in range(num_self_attend_layers):
self.self_attentions.append(
SelfAttention(
activation_name=activation_name,
dropout_prob=dropout_prob,
drop_path_rate=drop_path_rate,
num_heads=num_self_attend_heads,
widening_factor=self_attend_widening_factor,
name=f'self_attend_id_{idx}'))
def __call__(self, inputs: chex.ArrayTree, *,
is_training: bool,
pre_attention_residual: Optional[chex.ArrayTree] = None,
attention_mask: Optional[chex.Array] = None) -> chex.ArrayTree:
assert len(inputs.shape) == 4 # (batch, groups, index, channels) # pytype: disable=attribute-error # numpy-scalars
if is_training:
output_index_dim = self.output_index_dim_train
else:
output_index_dim = self.output_index_dim_eval
# Optionally regroup (this often means "reshape") inputs to a different
# number of output groups. Elements in different groups can't interact.
# Typically used in HiP.
if self.regroup_inputs:
inputs = regroup(
inputs=inputs,
num_output_groups=self.num_output_groups,
regroup_type=self.regroup_type)
else:
chex.assert_equal(inputs.shape[GROUPS_DIM], self.num_output_groups) # pytype: disable=attribute-error # numpy-scalars
z = self.projector(
inputs=inputs,
pre_attention_residual=pre_attention_residual,
is_training=is_training,
attention_mask=attention_mask)
chex.assert_shape(z, (inputs.shape[BATCH_DIM], self.num_output_groups, # pytype: disable=attribute-error # numpy-scalars
output_index_dim, self.num_output_channels))
for self_attend in self.self_attentions:
z = self_attend(z, is_training=is_training)
chex.assert_shape(z, (inputs.shape[BATCH_DIM], self.num_output_groups, # pytype: disable=attribute-error # numpy-scalars
output_index_dim, self.num_output_channels))
return z
class Embedder(hk.Module):
"""Projects inputs to the target number of channels.
Inputs should be a dictionary of {modality_name:
(batch_size, index_dim, num_channels)}. The output format will be similar, but
with the new number of channels.
Note both inputs and outputs are ungrouped. Grouping is handled by the
Grouper module.
"""
def __init__(self,
*,
num_embedding_channels: int,
with_bias: bool = True,
name: str = 'embedder'):
super().__init__(name=name)
self.with_bias = with_bias
self.num_embedding_channels = num_embedding_channels
self._orig_channels = None
def embed(self, inputs: Mapping[str, chex.Array]) -> Dict[str, chex.Array]:
"""Takes raw inputs and embeds them to num_embedding_channels.
Args:
inputs: A dictionary of modality name and (batch, index, channels) value.
Returns:
A dictionary of modality name and (batch, index, num_embedding_channels).
"""
_assert_input_shapes(inputs, expected_rank=3)
self._orig_channels = {}
out = {}
for modality_name, value in inputs.items():
# value: (batch, index, channels)
self._orig_channels[modality_name] = value.shape[CHANNELS_DIM]
conv_1d_embed = hk.Linear(
output_size=self.num_embedding_channels,
with_bias=self.with_bias,
w_init=hk.initializers.VarianceScaling(1.0),
name=f'embed_{modality_name}')
out[modality_name] = conv_1d_embed(value)
return out
def unembed(self, inputs: Mapping[str,
chex.Array]) -> Dict[str, chex.Array]:
"""Reverses an embed operation, reproducing the shape of original inputs.
Args:
inputs: A dictionary of modality name and (batch, index,
num_embedding_channels).
Returns:
A dictionary of modality name and (batch, index, num_embedding_channels).
"""
_assert_input_shapes(inputs, expected_rank=3, constant_channels=True)
assert self._orig_channels is not None, 'Must call embed() first.'
assert (
list(inputs.keys()) == list(self._orig_channels.keys())
), (
f'Modality names must be consistent. '
f'Expected {self._orig_channels.keys()}; '
f'found {inputs.keys()}.')
out = {}
for modality_name, value in inputs.items():
# value: (batch, index, channels)
conv_1d_unembed = hk.Linear(
output_size=self._orig_channels[modality_name],
with_bias=self.with_bias,
w_init=hk.initializers.VarianceScaling(0.0),
name=f'unembed_{modality_name}')
out[modality_name] = conv_1d_unembed(value)
return out
class PositionEncoder(hk.Module):
"""Adds position encodings to input channels.
Inputs should be a dictionary of {modality_name:
(batch_size, index_dim, num_channels)}. The output format will be identical.
Note both inputs and outputs are ungrouped. Grouping is handled by the
Grouper module.
"""
def __init__(
self,
num_position_encoding_channels: Optional[int] = None,
name: str = 'position_encoder',
):
super().__init__(name=name)
self.num_position_encoding_channels = num_position_encoding_channels
def __call__(
self, inputs: Mapping[str, chex.Array]
) -> Tuple[Dict[str, chex.Array], Dict[str, chex.Array]]:
"""Adds position encodings to the inputs and also returns the encodings.
Args:
inputs: A dictionary of {modality_name:
(batch_size, index_dim, num_channels)} inputs.
Returns:
A tuple of the inputs with added position encodings, as well as the
raw encodings.
"""
_assert_input_shapes(inputs, expected_rank=3, constant_channels=True)
num_channels = next(iter(inputs.values())).shape[CHANNELS_DIM]
if self.num_position_encoding_channels is None:
num_position_encoding_channels = num_channels
else:
num_position_encoding_channels = self.num_position_encoding_channels
out = {}
pos_encodings = {}
for modality_name, value in inputs.items():
# value: (batch, index, channels)
pos_encodings_i = perceiver_helpers.TrainablePositionEncoding(
index_dim=value.shape[INDEX_DIM],
num_channels=value.shape[CHANNELS_DIM],
# Unshared between modalities.
name=f'pos_emb_mae_{modality_name}')(value.shape[BATCH_DIM])
if num_position_encoding_channels != num_channels:
# Project to required size.
conv_1d_encode = hk.Linear(
output_size=num_channels,
with_bias=False,
w_init=hk.initializers.VarianceScaling(1.0),
# Shared between modalities.
name='position_encoding_linear')
pos_encodings_i = conv_1d_encode(pos_encodings_i)
pos_encodings[modality_name] = pos_encodings_i
out[modality_name] = value + pos_encodings[modality_name]
return out, pos_encodings
@chex.dataclass
class _GroupInfo:
modality_name: str
group_idx: List[int]
final_padding: int
group_padding: int
class ConstNumGrouper(hk.Module):
"""Groups inputs into a constant number of groups.
The group size will grow based on the inputs.
Inputs should be a dictionary of {modality_name:
(batch_size, index_dim, num_channels)}. The output format will be (batch_size,
num_groups, new_index_dim (computed), num_channels).
Notes: Inputs will be ordered based on the insertion order of the dict. Make
sure this is consistent across calls. batch_size and num_channels must be
constant across modalities.
The Grouper will ensure that multiple modalities are not mixed in a single
group. Padding will be added proportionally at the end of each group, with
extra padding at the last group of each modality.
"""
def __init__(
self,
*,
num_groups: int,
name: str = 'constant_number_grouper',
):
"""Builds the Grouper.
Args:
num_groups: The number of groups to create.
name: Haiku module name.
"""
super().__init__(name=name)
self.num_groups = num_groups
self._group_map = None
def _build_group_map(self, inputs: Mapping[str, chex.Array]):
index_dims = [v.shape[INDEX_DIM] for v in inputs.values()]
assign_groups_to_modalities = perceiver_helpers.assign_groups_to_modalities
num_groups_per_modality, index_dim_per_group = assign_groups_to_modalities(
self.num_groups, index_dims)
group_map = []
next_group_id = 0
for (name, value), num_modality_groups in zip(inputs.items(),
num_groups_per_modality):
index_dim = value.shape[INDEX_DIM]
assigned_groups = list(
range(next_group_id, next_group_id + num_modality_groups))
next_group_id += num_modality_groups
final_padding = perceiver_helpers.padding_to_make_divisible(
index_dim, num_modality_groups)
local_index_dim_per_group = (index_dim +
final_padding) // num_modality_groups
group_padding = index_dim_per_group - local_index_dim_per_group
group_map.append(
_GroupInfo(
modality_name=name,
group_idx=assigned_groups,
final_padding=final_padding,
group_padding=group_padding))
self._group_map = group_map
def group(self, inputs: Mapping[str, chex.Array]) -> chex.Array:
"""Groups a given input with the appropriate padding.
This method can be called multiple times on inputs that require similar
grouping and padding (e.g., a sample and its attention mask).
Args:
inputs: A dict of modality names and (batch, index, channel) values.
Returns:
A tensor of shape (batch, group, index, channel).
"""
_assert_input_shapes(inputs, expected_rank=3, constant_channels=True)
self._build_group_map(inputs)
grouped_inputs = []
for group_info, value in zip(self._group_map, inputs.values()):
x = jnp.pad(value, ((0, 0), (0, group_info.final_padding), (0, 0)))
x = einshape('b(gm)...->bgm...', x, g=len(group_info.group_idx))
x = jnp.pad(x, ((0, 0), (0, 0), (0, group_info.group_padding), (0, 0)))
grouped_inputs.append(x)
return jnp.concatenate(grouped_inputs, axis=1)
def ungroup(self, latents: chex.Array) -> Dict[str, chex.Array]:
"""Ungroups a given input into a dict of modalities and values.
Args:
latents: A tensor of (batch, group, index, channel).
Returns:
A dict of the original modality names and their values.
"""
assert len(latents.shape) == 4
out = {}
for group_info in self._group_map:
# Select only the relevant groups.
x = latents[:, group_info.group_idx, :, :]
# Remove per-group padding.
x = x[:, :, :x.shape[INDEX_DIM] - group_info.group_padding, :]
x = einshape('bgm...->b(gm)...', x, g=len(group_info.group_idx))
# Remove final padding.
x = x[:, :x.shape[INDEX_DIM] - group_info.final_padding, :]
out[group_info.modality_name] = x
return out
class ConcatenateGrouper(hk.Module):
"""Concatenates inputs into a single group, shared across all modalities.
Inputs should be a dictionary of {modality_name:
(batch_size, index_dim, num_channels)}. The output format will be (batch_size,
num_groups, total_index_dim, num_channels), where
total_index_dim = sum_{modality_i} index_dim_i.
Notes: Inputs will be ordered based on the insertion order of the dict. Make
sure this is consistent across calls. batch_size and num_channels must be
constant across modalities.
"""
def __init__(
self,
*,
name: str = 'concatenate_grouper',
):
"""Builds the Grouper.
Args:
name: Haiku module name.
"""
super().__init__(name=name)
self._index_dims = None
self._input_names = None
def group(self, inputs: Mapping[str, chex.Array]) -> chex.Array:
"""Groups a given input.
This method can be called multiple times on inputs that require similar
grouping (e.g., a sample and its attention mask).
Args:
inputs: A dict of modality names and (batch, index, channel) values.
Returns:
A tensor of shape (batch, group, index, channel).
"""
_assert_input_shapes(inputs, expected_rank=3, constant_channels=True)
self._input_names = inputs.keys()
self._index_dims = [v.shape[INDEX_DIM] for v in inputs.values()]
# [B, (I_0 + I_1 + ... + I_N), C]
grouped = jnp.concatenate(list(inputs.values()), axis=1)
# Add a dummy group axis:
return grouped[:, None, ...]
def ungroup(self, latents: chex.Array) -> Dict[str, chex.Array]:
"""Ungroups a given input into a dict of modalities and values.
Args:
latents: A tensor of (batch, group, index, channel).
Returns:
A dict of the original modality names and their values.
"""
assert len(latents.shape) == 4
start_idx = 0
out = dict()
for name, index_dim in zip(self._input_names, self._index_dims):
end_idx = start_idx + index_dim
# [B, 1, (I_0 + I_1 + ... + I_N), C] -> [B, I_i, C]
out[name] = latents[:, 0, start_idx:end_idx, :]
start_idx = end_idx
return out
class ReconstructionHead(hk.Module):
"""Produces a reconstruction from perceiver latents and an MAE query.
The reconstruction is in a grouped and embedded form, similar to the input
of a PerceiverBlock. It needs to be ungrouped and unembedded.
"""
def __init__(self,
*,
use_post_attention_residual: bool = False,
name: str = RECONSTRUCTION_HEAD_NAME):
super().__init__(name=name)
self._use_post_attention_residual = use_post_attention_residual
def __call__(self, latents: chex.Array, *, mae_query: chex.Array,
is_training: bool) -> chex.Array:
"""Given latents and an MAE query, builds the reconstruction.
Args:
latents: The output of a PerceiverBlock.
mae_query: MAE query - the second return value of PositionalEncoder.
is_training: Current execution mode.
Returns:
A grouped array of reconstructions for the query. The array will have
the shape of the MAE query.
"""
chex.assert_rank(latents, 4)
chex.assert_rank(mae_query, 4)
projector = HiPCrossAttention(
widening_factor=1,
num_heads=1,
use_post_attention_residual=self._use_post_attention_residual,
)
predictions = projector(
inputs=latents, query_inputs=mae_query, is_training=is_training)
return predictions
def _assert_input_shapes(inputs: Mapping[str, chex.Array],
*,
expected_rank: int,
constant_channels: bool = False):
"""Given an inputs dictionary, asserts all shapes are correct."""
batch_size = None
num_channels = None
for modality_name, values in inputs.items():
assert len(values.shape) == expected_rank
if batch_size is None:
batch_size = values.shape[BATCH_DIM]
num_channels = values.shape[CHANNELS_DIM]
else:
assert (batch_size == values.shape[BATCH_DIM]
), f'batch size is inconsistent for input {modality_name}'
if constant_channels:
assert (num_channels == values.shape[-1]
), f'num channels is inconsistent for input {modality_name}'
return batch_size, (num_channels if constant_channels else None)
|
hierarchical_perceiver-main
|
perceiver_blocks.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Perceiver IO and HiP."""
import unittest
from absl import logging
import chex
import haiku as hk
import jax
from ml_collections import config_dict
import numpy as np
from parameterized import parameterized
from hierarchical_perceiver import perceiver
from hierarchical_perceiver import perceiver_helpers
def mock_data():
return {
'imagenet': {
'imagenet_image':
np.random.random((2, 1024, 3)),
'imagenet_label':
np.random.randint(low=0, high=1,
size=(2, 32, 1)).astype(np.float32),
},
'audioset': {
'audioset_audio':
np.random.random((2, 128, 16)),
'audioset_label':
np.random.randint(low=0, high=1,
size=(2, 16, 16)).astype(np.float32),
}
}
SAMPLE_LABEL_MODALITIES = ('audioset_label', 'coco_labels', 'imagenet_label',
'jft_label', 'multi_nli_labels', 'objects365_labels')
DEFAULT_MODEL_KWARGS = config_dict.ConfigDict({
# Canonical models:
'PerceiverIO': {
# The size of the raw ('latent') position encodings.
# If != the embedding size, will be projected.
'num_position_encoding_channels': 512,
'activation_name': 'sq_relu',
'dropout_prob': 0.0,
'drop_path_rate': 0.0,
},
'HiP': {
# The size of the raw ('latent') position encodings.
# If != the embedding size, will be projected.
'num_position_encoding_channels': 512,
'regroup_type': 'reshape',
'activation_name': 'sq_relu',
'dropout_prob': 0.0,
'drop_path_rate': 0.0,
# Optional index dimension overrides:
},
'HiPClassBottleneck': {
# The size of the raw ('latent') position encodings.
# If != the embedding size, will be projected.
'num_position_encoding_channels': 512,
'regroup_type': 'reshape',
'activation_name': 'sq_relu',
'dropout_prob': 0.0,
'drop_path_rate': 0.0,
'label_modalities': SAMPLE_LABEL_MODALITIES,
},
})
def select_mae_outputs(output_dict):
trimmed_output = {}
for dataset, dataset_outs in output_dict.items():
mae_key = perceiver_helpers.ModelOutputKeys.INPUT_RECONSTRUCTION
trimmed_output[dataset] = {k: v for k, v in dataset_outs[mae_key].items()}
return trimmed_output
class PerceiverTest(unittest.TestCase):
@parameterized.expand([
# Standard HiP
('HiP', '16', 99_224_996),
('HiP', '256', 95_585_188),
('HiP', 'Mini', 18_709_668),
('HiPClassBottleneck', 'Mini', 21_114_788),
])
def test_all_perceiver_models(
self, model_base_name, model_variant_name, expected_num_params):
"""Test creating Perceiver-like models, as well as their parameter counts.
Parameters are counted per key, with the expectation that shared bottleneck
models will have more parameters, because the encoder and decoder are not
shared.
Args:
model_base_name: The model's name. Corresponds to a class in `perceiver`.
model_variant_name: The model's variant.
expected_num_params: Expected number of parameters for the module.
"""
rng = jax.random.PRNGKey(4)
def haiku_fn(inputs):
out = {}
model = perceiver.build_perceiver(
model_base_name=model_base_name,
model_variant_name=model_variant_name,
model_kwargs=DEFAULT_MODEL_KWARGS[model_base_name])
for dataset_name, v in inputs.items():
out[dataset_name] = model(dataset_name, v, is_training=True)
return out
inputs = mock_data()
with chex.fake_jit():
transformed = hk.transform_with_state(haiku_fn)
params, state = transformed.init(rng, inputs)
outputs, _ = transformed.apply(params, state, rng, inputs)
outputs = select_mae_outputs(outputs)
chex.assert_trees_all_equal_shapes(outputs, inputs)
_, treedef = jax.tree_flatten(params)
num_params = hk.data_structures.tree_size(params)
# pylint: disable=g-generic-assert
logging.info('Checking parameter counts...')
self.assertEqual(
num_params, expected_num_params,
f'{treedef}: \nExpected {expected_num_params} params, '
f'got {num_params} for model {model_base_name}, '
f'variant {model_variant_name}'
)
# pylint: enable=g-generic-assert
@parameterized.expand([
# Supersample latents at eval time.
('HiP', 'Mini', None, 128),
# Subsample latents at train time.
('HiP', 'Mini', 32, None),
('HiPClassBottleneck', 'Mini', None, 128),
# Subsample latents at train time.
('HiPClassBottleneck', 'Mini', 32, None),
])
def test_processor_index_train_eval(
self,
model_base_name,
model_variant_name,
processor_index_dim_train,
processor_index_dim_eval):
"""Test HiP processor train-time and eval-time index dimension overrides.
Args:
model_base_name: The model's name. Corresponds to a class in `perceiver`.
model_variant_name: The model's variant.
processor_index_dim_train: Train-time index dimension override for the
processor block.
processor_index_dim_eval: Eval-time index dimension override for the
processor block.
"""
rng = jax.random.PRNGKey(4)
def haiku_fn(inputs, is_training):
out = {}
model_kwargs = DEFAULT_MODEL_KWARGS[model_base_name]
# Override the processor_index_dim_ config settings.
with model_kwargs.unlocked():
model_kwargs.processor_index_dim_train = processor_index_dim_train
model_kwargs.processor_index_dim_eval = processor_index_dim_eval
model = perceiver.build_perceiver(
model_base_name=model_base_name,
model_variant_name=model_variant_name,
model_kwargs=model_kwargs)
for dataset_name, v in inputs.items():
out[dataset_name] = model(dataset_name, v, is_training=is_training)
return out
inputs = mock_data()
with chex.fake_jit():
transformed = hk.transform_with_state(haiku_fn)
params, state = transformed.init(rng, inputs, is_training=True)
# Run as training
outputs_train, _ = transformed.apply(
params, state, rng, inputs, is_training=True)
# Run as eval
outputs_eval, _ = transformed.apply(
params, state, rng, inputs, is_training=False)
outputs_train = select_mae_outputs(outputs_train)
chex.assert_trees_all_equal_shapes(outputs_train, inputs)
outputs_eval = select_mae_outputs(outputs_eval)
chex.assert_trees_all_equal_shapes(outputs_eval, inputs)
@parameterized.expand([
# Supersample latents at eval time.
('PerceiverIO', 'io_mini', None, 256),
# Subsample latents at train time.
('PerceiverIO', 'io_mini', 64, None),
])
def test_z_index_train_eval(
self,
model_base_name,
model_variant_name,
z_index_dim_train,
z_index_dim_eval):
"""Test train-time and eval-time index dimension overrides.
Args:
model_base_name: The model's name. Corresponds to a class in `perceiver`.
model_variant_name: The model's variant.
z_index_dim_train: Optional train-time index dimension override.
z_index_dim_eval: Optional eval-time index dimension override.
"""
rng = jax.random.PRNGKey(4)
def haiku_fn(inputs, is_training):
out = {}
model_kwargs = DEFAULT_MODEL_KWARGS[model_base_name]
# Override the `eval_index_widening_factor` config setting.
with model_kwargs.unlocked():
model_kwargs.z_index_dim_train = z_index_dim_train
model_kwargs.z_index_dim_eval = z_index_dim_eval
model = perceiver.build_perceiver(
model_base_name=model_base_name,
model_variant_name=model_variant_name,
model_kwargs=model_kwargs)
for dataset_name, v in inputs.items():
out[dataset_name] = model(dataset_name, v, is_training=is_training)
return out
inputs = mock_data()
with chex.fake_jit():
transformed = hk.transform_with_state(haiku_fn)
params, state = transformed.init(rng, inputs, is_training=True)
# Run as training
outputs_train, _ = transformed.apply(
params, state, rng, inputs, is_training=True)
# Run as eval
outputs_eval, _ = transformed.apply(
params, state, rng, inputs, is_training=False)
outputs_train = select_mae_outputs(outputs_train)
chex.assert_trees_all_equal_shapes(outputs_train, inputs)
outputs_eval = select_mae_outputs(outputs_eval)
chex.assert_trees_all_equal_shapes(outputs_eval, inputs)
|
hierarchical_perceiver-main
|
perceiver_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.