python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements general sampling utils for MC and SWOR sampling with states."""
from __future__ import annotations
import functools
from typing import Any, Tuple
import equinox as eqx
import jax
import jax.numpy as jnp
Array = jax.Array
KeyArray = jax.random.KeyArray
class State(eqx.Module):
"""A class for representing a state of an ancestral sampler."""
def logprobs(self) -> Array:
raise NotImplementedError
def apply_transition(self, a: Array) -> State:
raise NotImplementedError
def is_finished(self) -> Array:
raise NotImplementedError
def ancestral_sampling(key: KeyArray, init_state: State, max_length: int,
k: int, unroll: int = 1) -> Tuple[Array, Array, Array]:
f = functools.partial(single_ancestral_sample, init_state=init_state,
max_length=max_length, unroll=unroll)
keys = jax.random.split(key, k)
return jax.vmap(f)(keys)
def single_ancestral_sample(key: KeyArray, init_state: State, max_length: int,
unroll: int) -> Tuple[Array, Array, Array]:
"""Expands the init_state to produce a single sample of length max_length.
Args:
key: KeyArray for producing a sample.
init_state: Initial sampling state.
max_length: Maximal length of sampling operations.
unroll: Steps of jax.lax.scan to unroll.
Returns:
A triple of final sampler's state, log probability and actual sample.
"""
def f(carry, key):
state: State
state_logprob: Array
state, state_logprob = carry
log_p = state.logprobs()
p = jnp.exp(log_p)
i = jax.random.choice(key, jnp.arange(len(p)), p=p)
new_state = state.apply_transition(i)
new_state_logprob = state_logprob + log_p[i]
return (new_state, new_state_logprob), i
keys = jax.random.split(key, max_length)
(final_state, logprob), sample = jax.lax.scan(
f, (init_state, jnp.zeros([])), keys, unroll=unroll)
return final_state, logprob, sample
def beam_search(init_state: State, max_length: int, k: int,
unroll: int = 1) -> Tuple[State, Array]:
a = jax.eval_shape(init_state.logprobs).shape[-1]
sbs = _GeneralStochasticBeamSearch(k=k, a=a, is_regular_beam_search=True,
unroll=unroll)
beam_state, logprobs, _ = sbs.sample(key=None,
init_state=init_state,
max_length=max_length)
return beam_state, logprobs
def stochastic_beam_search(
key: KeyArray, init_state: State, max_length: int, k: int, unroll: int = 1
) -> Tuple[State, Array, Array]:
a = jax.eval_shape(init_state.logprobs).shape[-1]
sbs = _GeneralStochasticBeamSearch(k=k, a=a, is_regular_beam_search=False,
unroll=unroll)
return sbs.sample(key=key, init_state=init_state, max_length=max_length)
class _GeneralStochasticBeamSearch:
"""Class containing logic for Stochastic Beam Search."""
def __init__(self,
k: int,
a: int,
*,
is_regular_beam_search: bool = False,
unroll: int = 1):
self.k = k # samples
self.a = a # actions
self.is_regular_beam_search = is_regular_beam_search
self.unroll = unroll
def _expand_initial_state_to_beam(self,
init_state) -> Tuple[State, Array, Array]:
beam_state = jax.tree_util.tree_map(
lambda x: jax.lax.broadcast(x, (self.k,)), init_state)
return (beam_state,
jnp.full(self.k, -jnp.inf).at[0].set(0),
jnp.full(self.k, -jnp.inf).at[0].set(0))
def _beam_state_subselect(self, beam_state: State, indices: Array) -> State:
to_gather = indices//self.a
to_apply = indices % self.a
beam_state = jax.tree_util.tree_map(lambda x: x[to_gather], beam_state)
beam_state = jax.vmap(beam_state.__class__.apply_transition)(beam_state,
to_apply)
return beam_state
# pylint: disable=missing-function-docstring
def _stochastic_beam_search_loop_body(self, beam, key: KeyArray
) -> Tuple[Tuple[State, Any, Any], Any]:
beam_state: State
beam_state, phis, gs = beam
phis = (jnp.expand_dims(phis, -1) +
jax.vmap(beam_state.__class__.logprobs)(beam_state)) # (k, a)
if self.is_regular_beam_search:
gs = phis
else:
gs = _gumbel_with_maximum(key=key,
location=phis,
target_max=jnp.expand_dims(gs, -1))
_, best_indices = jax.lax.top_k(gs.reshape(-1), self.k)
new_beam = (
self._beam_state_subselect(beam[0], best_indices),
phis.reshape(-1)[best_indices],
gs.reshape(-1)[best_indices]
)
return new_beam, None # None is output that will be ignored by scan.
def sample(self, key: KeyArray, init_state,
max_length: int) -> Tuple[State, Array, Array]:
if self.is_regular_beam_search:
split_key = jnp.zeros(max_length)
else:
if key is None:
raise ValueError("SBS needs KeyArray")
split_key = jax.random.split(key, max_length)
return jax.lax.scan(self._stochastic_beam_search_loop_body,
self._expand_initial_state_to_beam(init_state),
split_key, unroll=self.unroll)[0]
def _gumbel_with_maximum(key: KeyArray,
location: Array,
target_max: Array,
axis: int = -1) -> Array:
"""Samples a set of gumbels with a given maximum and location.
Note:
This function implements the numericaly stable version of the truncated
Gumbel distribution from appendix B.3 of Kool et al (2019).
References:
Kool et al, 2019 - Appendix B.3: https://arxiv.org/pdf/1903.06059.pdf#page=12
Args:
key: a KeyArray used as the random key.
location: the location of gumbel distribution, e.g. log probabilities
of partial sequence.
target_max: The desired maximum sampled Gumbel, e.g. the previous perturbed
log probabilities of a partial seq.
axis: The dimesion with the maximum values.
Returns:
The sampled gumbels.
""" # pylint: disable=line-too-long
# Gumbels with location (e.g. `log_probabilities`, G_\phi_{S`} in the paper).
gumbels = location + jax.random.gumbel(key, location.shape)
# pylint: disable=invalid-name
# Use equations (23) and (24) in Appendix B.3.
T = target_max # G_\phi_{S}, previous perturbed log_probs of partial seq.
Z = jnp.max(gumbels, axis=axis, keepdims=True) # current maximums
# pylint: enable=invalid-name
# Shift gumbels.
v = T - gumbels + jnp.log1p(-jnp.exp(gumbels - Z))
return T - jax.nn.relu(v) - jnp.log1p(jnp.exp(-jnp.abs(v)))
|
synjax-master
|
synjax/_src/utils/autoregressive_decoding.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for synjax._src.utils.semirings_dot_general."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.utils import semirings_dot_general
# pylint: disable=g-complex-comprehension
class SemiringsTest(parameterized.TestCase):
def assert_allclose(self, x, y):
np.testing.assert_allclose(x, y, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
def test_real_dot_general(self):
lhs = lhs = jax.random.uniform(jax.random.PRNGKey(0), (4, 3))
rhs = jax.random.uniform(jax.random.PRNGKey(2), (3, 5))
dimension_numbers = (([1], [0]), ([], []))
x = jax.lax.dot_general(lhs, rhs, dimension_numbers)
real_dot_general = semirings_dot_general.build_dot_general(jnp.sum,
jnp.multiply)
y = real_dot_general(lhs, rhs, dimension_numbers)
self.assert_allclose(x, y)
def test_log_dot_general(self):
lhs = lhs = jax.random.uniform(jax.random.PRNGKey(0), (4, 3))
rhs = jax.random.uniform(jax.random.PRNGKey(2), (3, 5))
dimension_numbers = (([1], [0]), ([], []))
dot_general_log = semirings_dot_general.build_dot_general(
jax.nn.logsumexp, jnp.add)
x = dot_general_log(lhs, rhs, dimension_numbers)
y = jnp.log(jax.lax.dot_general(jnp.exp(lhs), jnp.exp(rhs),
dimension_numbers))
self.assert_allclose(x, y)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/utils/semirings_dot_general_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flexible dot_general that supports arbitrary semirings."""
import functools
import inspect
import operator
from typing import Tuple, Sequence, Optional
import jax
import jax.numpy as jnp
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def build_dot_general(sum_fn, mul_op):
"""Constructs a dot_general function from arbitrary sum and multiplication.
Note that this implementation will not optimize for using
matrix-multiplication cores.
Args:
sum_fn: Function with the same interface as jnp.sum except that it
optionally supports an additional key argument for randomness.
mul_op: Binary operator that multiplies two JAX arrays.
Returns:
Function with the same interface as jax.lax.dot_general.
"""
def add_unused_key_arg(fn):
def fn2(*args, key=None, **kwargs):
del key
return fn(*args, **kwargs)
return fn2
spec = inspect.getfullargspec(sum_fn)
if "key" not in (spec.args + spec.kwonlyargs):
sum_fn = add_unused_key_arg(sum_fn)
def matmul(lhs: jax.Array, rhs: jax.Array,
key: Optional[jax.random.KeyArray] = None) -> jax.Array:
return sum_fn(mul_op(lhs[..., None], rhs[..., None, :, :]),
axis=-2, key=key)
return dot_general_from_matmul(matmul)
def dot_general_from_matmul(matmul):
"""Constructs a dot_general function from matmul.
Args:
matmul: Function with same interface as jnp.matmul except for one additional
key parameter for optional randomness.
Returns:
A new function with the same interface as jax.lax.dot_general
"""
def dot_general(lhs: jax.Array, rhs: jax.Array,
dimension_numbers: DotDimensionNumbers, precision=None,
preferred_element_type=None,
key: Optional[jax.random.KeyArray] = None) -> jax.Array:
# This function will reorder axes of lhs and rhs so that they are
# (*batch_dims, *lhs_other_dims, *contracting_dims) for lhs and
# (*batch_dims, *contracting_dims, *rhs_other_dims) for rhs and
# after that flatten the other_dims and contracting_dims so that
# lhs has shape (*batch_dims, size(lhs_other_dims), size(contracting_dims))
# rhs has shape (*batch_dims, size(contracting_dims), size(rhs_other_dims)).
# This will make lhs and rhs in the right shape to apply matmul that will
# broadcast over *batch_dims and contract the contracting_dim so that
# result has shape (*batch_dims, size(lhs_other_dims), size(rhs_other_dims))
# which is unflattened before returning to
# (*batch_dims, *lhs_other_dims, *rhs_other_dims).
del precision
del preferred_element_type
((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims)) = dimension_numbers
lhs_other_dims = [
x for x in range(lhs.ndim)
if x not in lhs_contracting_dims and x not in lhs_batch_dims]
rhs_other_dims = [
x for x in range(rhs.ndim)
if x not in rhs_contracting_dims and x not in rhs_batch_dims]
lhs_permutation = [*lhs_batch_dims, *lhs_other_dims, *lhs_contracting_dims]
rhs_permutation = [*rhs_batch_dims, *rhs_contracting_dims, *rhs_other_dims]
def product(xs):
return functools.reduce(operator.mul, xs, 1)
batch_shape = tuple(lhs.shape[x] for x in lhs_batch_dims)
lhs_other_shape = tuple(lhs.shape[x] for x in lhs_other_dims)
rhs_other_shape = tuple(rhs.shape[x] for x in rhs_other_dims)
contracting_size = product(lhs.shape[x] for x in lhs_contracting_dims)
lhs4matmul = jnp.transpose(lhs, lhs_permutation
).reshape(*batch_shape, -1, contracting_size)
rhs4matmul = jnp.transpose(rhs, rhs_permutation
).reshape(*batch_shape, contracting_size, -1)
res = matmul(lhs4matmul, rhs4matmul, key=key)
return res.reshape(*batch_shape, *lhs_other_shape, *rhs_other_shape)
return dot_general
|
synjax-master
|
synjax/_src/utils/semirings_dot_general.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for synjax._src.utils.chart_struct."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from synjax._src.utils import chart_struct
from synjax._src.utils import semirings
# pylint: disable=g-complex-comprehension
def create_numbered_chart(n):
# Creates a chart with 4 axes (semiring, n, n, 2) where last axis
# contains a pair signifying the span that that node covers.
chart_table = jnp.zeros((1, n, n, 2))
chart_table = chart_table.at[0, :, :, 0].add(jnp.arange(n).reshape(n, 1))
chart_table = chart_table.at[0, :, :, 1].add(jnp.arange(n).reshape(1, n))
return chart_struct.from_cky_table(chart_table)
class ChartStructTest(parameterized.TestCase):
def assert_all(self, x, *, msg=""):
self.assertTrue(jnp.all(x), msg=msg)
@parameterized.parameters([
dict(d=d, n=n)
for n in [5] for d in range(1, n+1)
])
def test_get_entries(self, d, n):
chart = create_numbered_chart(n)
diagonal = chart.get_entries(d)
self.assertEqual(diagonal.shape, (1, n, 2))
self.assert_all(diagonal[0, :d, 0] == jnp.arange(d))
spans_count = n-d+1
valid_starts = diagonal[0, :spans_count, 0]
valid_ends = diagonal[0, :spans_count, 1]
invalid_starts = diagonal[0, spans_count:, 0]
invalid_ends = diagonal[0, spans_count:, 1]
self.assertTrue(jnp.all(valid_ends-valid_starts+1 == d))
self.assertTrue(jnp.all(invalid_ends-invalid_starts < 0))
@parameterized.parameters([
dict(d=d, n=n)
for n in [5] for d in range(1, n+1)
])
def test_mask(self, d, n):
chart = create_numbered_chart(n)
sr = semirings.LogSemiring()
# Testing mask without excluding word nodes.
mask = chart.mask(d, sr, exclude_word_nodes=False) > sr.zero()
split_points = jnp.sum(mask[0, :, :, 0], -1)
self.assertTrue(jnp.all(split_points[:-d+1] == d-1))
self.assertTrue(jnp.all(split_points[-d+1:] == 0))
spans_count = jnp.sum(mask[0, :, :, 0], -2)
self.assertTrue(jnp.all(spans_count[:d-1] == n-d+1))
self.assertTrue(jnp.all(spans_count[d-1:] == 0))
# Testing mask with excluding word nodes.
mask = chart.mask(d, sr, exclude_word_nodes=True) > sr.zero()
split_points = jnp.sum(mask[0, :, :, 0], -1)
if d >= 4:
self.assert_all(split_points[:-d+1] == d-3)
else:
self.assert_all(split_points[:-d+1] == 0)
self.assert_all(split_points[-d+1:] == 0)
spans_count = jnp.sum(mask[0, :, :, 0], -2)
if d >= 4:
self.assert_all(spans_count[1:d-2] == n-d+1)
else:
self.assert_all(spans_count[:-d+1] == 0)
self.assert_all(spans_count[d-1:] == 0)
# Testing that left and masked right match.
left_cut = chart.left()[0, :-d+1, :d-1]
sr = semirings.LogSemiring()
right_cut = chart.right(d, sr)[0, :-d+1, :d-1]
# End of left matched beginning of right for all nodes.
self.assert_all(left_cut[:, :, 1]+1 == right_cut[:, :, 0])
# Difference between end of right and beginning of left is parent span size.
self.assertTrue(jnp.all(right_cut[:, :, 1] - left_cut[:, :, 0] + 1 == d))
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/utils/chart_struct_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for synjax._src.utils.general."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.utils import special
# pylint: disable=g-complex-comprehension
class GeneralTest(parameterized.TestCase):
def assert_allclose(self, x, y):
np.testing.assert_allclose(x, y, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
def assert_all(self, x, *, msg=""):
self.assertTrue(all(map(jnp.all, jax.tree_util.tree_flatten(x)[0])),
msg=msg)
def test_log_comb(self):
self.assert_allclose(jnp.log(math.comb(32, 10)), special.log_comb(32, 10))
def test_log_catalan(self):
self.assert_allclose(special.log_catalan(jnp.arange(8)),
jnp.log(jnp.array([1, 1, 2, 5, 14, 42, 132, 429])))
def test_log_delannoy(self):
self.assert_allclose(special.log_delannoy(jnp.arange(7), jnp.arange(7),
max_input_value=10),
jnp.log(jnp.array([1, 3, 13, 63, 321, 1683, 8989])))
self.assert_allclose(special.log_delannoy(jnp.arange(7), jnp.arange(1, 8),
max_input_value=10),
jnp.log(jnp.array([1, 5, 25, 129, 681, 3653, 19825])))
def test_tpu_roll(self):
x = jnp.arange(7*7*7).reshape(7, 7, 7)
axis = -2
shift = 2
self.assert_allclose(special._tpu_roll(x, shift=shift, axis=axis),
jnp.roll(x, shift=shift, axis=axis))
def test_tpu_take(self):
n = 4
x = jnp.arange(n*n*n).reshape(n, n, n)
axis = -2
indices = jnp.arange(n-1, 1, -1)
self.assert_allclose(special._tpu_take(x, indices, axis=axis),
jnp.take(x, indices, axis=axis))
def test_inv(self):
grad = lambda ff, **kw: jax.grad(lambda x: jnp.sum(ff(x, **kw)[..., -1]))
n = 20
for method in ["qr", "solve"]:
with self.subTest(method):
matrix = jax.random.uniform(jax.random.PRNGKey(0), (n, n))
self.assert_allclose(special.inv(matrix, inv_method=method),
jnp.linalg.inv(matrix))
self.assert_allclose(grad(special.inv, inv_method=method)(matrix),
grad(jnp.linalg.inv)(matrix))
matrix = jnp.zeros((n, n))
self.assert_allclose(special.inv(matrix, inv_method=method), 0.)
self.assert_allclose(grad(special.inv, inv_method=method)(matrix), 0.)
self.assert_allclose(special.inv(matrix, inv_method=method,
test_invertability=True), 0.)
self.assert_allclose(grad(special.inv, inv_method=method,
test_invertability=True)(matrix), 0.)
self.assert_all(~jnp.isfinite(special.inv(matrix, inv_method=method,
test_invertability=False)))
self.assert_all(jnp.isnan(grad(special.inv, inv_method=method,
test_invertability=False)(matrix)))
matrix = jnp.ones((n, n))
self.assert_allclose(special.inv(matrix, inv_method=method), 0.)
self.assert_allclose(grad(special.inv, inv_method=method)(matrix), 0.)
def test_safe_slogdet(self):
grad = lambda ff: jax.grad(lambda x: ff(x)[1])
n = 20
matrix = jax.random.uniform(jax.random.PRNGKey(0), (n, n))
self.assert_allclose(special.safe_slogdet(matrix),
jnp.linalg.slogdet(matrix))
self.assert_allclose(grad(special.safe_slogdet)(matrix),
grad(jnp.linalg.slogdet)(matrix))
matrix = jnp.zeros((n, n))
self.assert_all(jnp.isfinite(special.safe_slogdet(matrix)[1]))
self.assert_allclose(grad(special.safe_slogdet)(matrix), 0)
matrix = jnp.ones((n, n))
self.assert_all(jnp.isfinite(special.safe_slogdet(matrix)[1]))
self.assert_allclose(grad(special.safe_slogdet)(matrix), 0)
def test_safe_log(self):
self.assert_all(jnp.exp(special.safe_log(0)) == 0)
self.assert_all(special.safe_log(-0.001) < -1e4)
self.assert_all(special.safe_log(0) < -1e4)
self.assert_all(special.safe_log(1) == 0)
self.assert_all(jax.grad(special.safe_log)(1.) == 1)
self.assert_all(jax.grad(special.safe_log)(0.) > 1e4)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/utils/special_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semirings."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.utils import semirings
# pylint: disable=g-complex-comprehension
class SemiringsTest(parameterized.TestCase):
def assert_allclose(self, x, y):
np.testing.assert_allclose(x, y, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
def test_sampling_semiring(self):
sr = semirings.SamplingSemiring()
a = jax.random.uniform(jax.random.PRNGKey(0), (34, 12, 32))
b = jax.random.uniform(jax.random.PRNGKey(1), (34, 12, 32))
# Test wrap/unwrap.
a_wrapped = sr.wrap(a)
self.assertEqual(a_wrapped.shape, (1,) + a.shape)
a_unwrapped = sr.unwrap(a_wrapped)
self.assertEqual(a_unwrapped.shape, a.shape)
# Test mul.
self.assert_allclose(sr.mul(a, b), a + b)
# Test sum.
logsumexp = jax.scipy.special.logsumexp
key = jax.random.PRNGKey(0)
self.assert_allclose(sr.sum(a, -1, key=key), logsumexp(a, -1))
self.assert_allclose(sr.sum(a, -2, key=key), logsumexp(a, -2))
self.assert_allclose(sr.sum(a, (-1, -2), key=key), logsumexp(a, (-1, -2)))
# Test backprop.
def f(a, key):
a = sr.wrap(a)
keys = jax.random.split(key, a.ndim-1)
for akey in keys:
a = sr.sum(a, -1, key=akey)
return sr.unwrap(a)
sample1 = jax.grad(f)(a, jax.random.PRNGKey(2))
sample2 = jax.grad(f)(a, jax.random.PRNGKey(3))
self.assertEqual(sample1.shape, a.shape)
self.assertEqual(sample2.shape, a.shape)
self.assertEqual(jnp.sum(sample1), 1)
self.assertEqual(jnp.sum(sample2), 1)
self.assertFalse(jnp.all(sample1 == sample2))
@parameterized.parameters(
[dict(approximate=approximate, k=k)
for approximate in [True, False]
for k in [1, 5]])
def test_kbest_semiring(self, approximate, k):
sr = semirings.KBestSemiring(k=k, approximate=approximate)
a = jax.random.uniform(jax.random.PRNGKey(0), (34, 12, 32))
def is_ordered(x):
return x.shape[0] == 1 or jnp.all(x[:-1] >= x[1:])
# Test wrap/unwrap.
a_wrapped = sr.wrap(a)
self.assertEqual(a_wrapped.shape, (k,) + a.shape)
a_unwrapped = sr.unwrap(a_wrapped)
self.assertEqual(a_unwrapped.shape, (k,) + a.shape)
self.assertTrue(is_ordered(a_unwrapped))
# Test mul.
x = jnp.log(jnp.arange(k, 0, -1, dtype=jnp.float32))
kmax = sr.mul(x, x)
if k >= 3:
self.assert_allclose(kmax[:3], jnp.array([2*x[0], x[0]+x[1], x[0]+x[1]]))
# Test sum.
for axis in [-1, -2]:
kbest = sr.sum(a_wrapped, axis)
target_shape = list(a_wrapped.shape)
target_shape.pop(axis)
self.assertEqual(kbest.shape, tuple(target_shape))
self.assertTrue(approximate or is_ordered(a_unwrapped))
# Test backprop.
def f(a):
a = sr.wrap(a)
while a.ndim > 1:
a = sr.sum(a, -1)
return sr.unwrap(a)
samples = jax.jacrev(f)(a)
self.assertEqual(samples.shape, (k,)+a.shape)
def flatten(x):
return x.reshape(x.shape[0], -1)
self.assertTrue(jnp.all(jnp.sum(flatten(samples), -1) == 1))
self.assert_allclose(jnp.sum(flatten(samples * a[None]), -1),
jax.lax.top_k(a.reshape(-1), k)[0])
def test_log_semiring(self):
sr = semirings.LogSemiring()
a = jax.random.uniform(jax.random.PRNGKey(0), (34, 12, 32))
b = jax.random.uniform(jax.random.PRNGKey(1), (34, 12, 32))
# Test wrap/unwrap.
a_wrapped = sr.wrap(a)
self.assertEqual(a_wrapped.shape, (1,) + a.shape)
a_unwrapped = sr.unwrap(a_wrapped)
self.assertEqual(a_unwrapped.shape, a.shape)
# Test mul.
self.assert_allclose(sr.mul(a, b), a + b)
# Test sum.
logsumexp = jax.scipy.special.logsumexp
self.assert_allclose(sr.sum(a, -1), logsumexp(a, -1))
self.assert_allclose(sr.sum(a, -2), logsumexp(a, -2))
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/utils/semirings_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generally useful small functions."""
import functools
import operator
from typing import Union, Tuple, Optional, Literal
import equinox as eqx
import jax
import jax.numpy as jnp
from synjax._src import constants
Array = jax.Array
Shape = Tuple[int, ...]
KeyArray = jax.random.KeyArray
EPS = constants.EPS
INF = constants.INF
############################################################################
#### Missing math utils.
############################################################################
def log_comb(n: Array, k: Array) -> Array:
"""Computes a logarithm of combination (n, k)."""
gammaln = jax.scipy.special.gammaln
return gammaln(n + 1)-gammaln(k + 1)-gammaln(n+1-k)
def log_catalan(n: Array) -> Array:
"""Computes the log of nth number in Catalan series."""
return log_comb(2*n, n) - jnp.log1p(n)
def log_delannoy(m: Array, n: Array, *, max_input_value: int) -> Array:
"""Computes a logarithm of a Delannoy number."""
m = jnp.asarray(m)
n = jnp.asarray(n)
k = jnp.arange(max_input_value)
res = log_comb(m[..., None], k) + log_comb(n[..., None], k) + k * jnp.log(2)
mask = jnp.arange(max_input_value) <= jnp.minimum(m, n)[..., None]
return jax.nn.logsumexp(jnp.where(mask, res, -jnp.inf), axis=-1)
############################################################################
#### Functions with gradients that are more numerically stable.
############################################################################
@jax.custom_gradient
def safe_log(x: Array) -> Array:
x = jnp.asarray(x, dtype=jnp.float32)
res = jnp.where(x < EPS, -INF, jnp.log(jnp.maximum(x, EPS)))
# pytype: disable=bad-return-type
return res, lambda g: g/jnp.maximum(x, EPS)
# pytype: enable=bad-return-type
InversionMethod = Literal["solve", "qr"]
def inv(x: Array, *, inv_method: Optional[InversionMethod] = None,
matmul_precision: Optional[jax.lax.Precision] = None,
test_invertability: bool = True):
"""Matrix inverse with controlable precision, algorithm and invertability."""
@jax.custom_jvp
def inv_fn(x):
if inv_method is None or inv_method == "solve":
inverse = jnp.linalg.solve(
x, jnp.broadcast_to(jnp.eye(x.shape[-1]), x.shape))
elif inv_method == "qr":
q, r = jnp.linalg.qr(x)
r_inv = jax.scipy.linalg.solve_triangular(
r, jnp.broadcast_to(jnp.eye(x.shape[-1]), x.shape), lower=False)
q_tr = jnp.swapaxes(q, -1, -2)
inverse = jnp.matmul(r_inv, q_tr, precision=matmul_precision)
else:
raise NotImplementedError
if test_invertability:
mask = jnp.isfinite(jnp.linalg.slogdet(x)[1])[..., None, None]
return jnp.where(mask, jnp.nan_to_num(inverse), 0.)
else:
return inverse
@inv_fn.defjvp
def inv_fn_jvp(primals, tangents): # pylint: disable=unused-variable
x = inv_fn(primals[0])
a = jnp.matmul(x, tangents[0], precision=matmul_precision)
a = jnp.matmul(a, x, precision=matmul_precision)
return x, -a
return inv_fn(x)
def safe_slogdet(
x: Array, *, logdet_method: Optional[Literal["lu", "qr"]] = None,
inv_method: Optional[InversionMethod] = None,
matmul_precision: Optional[Union[str, jax.lax.Precision]] = None,
test_invertability: bool = True):
"""Signed log determinant with more stable gradients."""
@jax.custom_vjp
def slogdet_fn(y):
sign, log_abs_det = jnp.linalg.slogdet(y, method=logdet_method)
return sign, jnp.clip(jnp.nan_to_num(log_abs_det, neginf=-INF, posinf=INF),
-INF, INF)
def slogdet_fn_fwd(y: Array) -> Tuple[Tuple[Array, Array], Array]:
return slogdet_fn(y), y
def slogdet_fn_bwd(y: Array, g) -> Tuple[Array]:
inverse = inv(y, inv_method=inv_method, matmul_precision=matmul_precision,
test_invertability=test_invertability)
return (jnp.einsum("...,...ij->...ji", g[1], inverse),)
slogdet_fn.defvjp(slogdet_fn_fwd, slogdet_fn_bwd)
return slogdet_fn(x)
############################################################################
#### Improved jnp.roll for TPU.
############################################################################
def is_tpu_machine() -> bool:
return any(dev.platform == "tpu" for dev in jax.devices())
def roll(x: Array, shift: Union[int, Array], axis: int = -1) -> Array:
if isinstance(shift, Array) and is_tpu_machine():
# This happens during vmap on TPU where it's better to roll with matmul.
return _tpu_roll(x, shift, axis)
else:
return jnp.roll(x, shift, axis)
def _tpu_roll(x: Array, shift: Union[int, Array], axis: int = -1) -> Array:
"""Significantly faster version of jnp.roll implemented with matmul on TPU."""
d = x.shape[axis]
permutation = (jnp.arange(d)-shift)%d
return _tpu_take(x, permutation, axis)
def _tpu_take(x: Array, indices: Array, axis: int = -1) -> Array:
if indices.ndim != 1:
raise ValueError("This function supports only 1-dim indices.")
dtype = x.dtype
axis %= x.ndim # Converts possibly negative axis into a positive one.
permutation_matrix = jax.nn.one_hot(indices, num_classes=x.shape[axis],
axis=-1)
x_pre = jnp.moveaxis(x, axis, -2)
x_post = jnp.matmul(permutation_matrix, x_pre)
return jnp.moveaxis(x_post, -2, axis).astype(dtype)
############################################################################
#### One-hot vector utils
############################################################################
def max_one_hot(x: Array, axis: Union[int, Tuple[int, ...]]) -> Array:
max_val = jnp.max(x, axis=axis, keepdims=True)
return jnp.where(x == max_val, x-jax.lax.stop_gradient(x)+1., 0.)
def sample_one_hot(logits: Array, *, key: KeyArray,
axis: Union[int, Tuple[int, ...]] = -1) -> Array:
noise = jax.random.gumbel(key, logits.shape)
return max_one_hot(logits + noise, axis)
############################################################################
#### Shape utils
############################################################################
def split_key_for_shape(key: KeyArray, shape):
shape = asshape(shape)
keys = jax.random.split(key, shape_size(shape))
return keys.reshape(shape+key.shape)
def asshape(shape: Union[Shape, int]) -> Shape:
return (shape,) if isinstance(shape, int) else tuple(shape)
def shape_size(shape: Union[Shape, int]) -> int:
return functools.reduce(operator.mul, asshape(shape), 1)
def vmap_ndim(f, ndim: int):
for _ in range(ndim):
f = jax.vmap(f)
return f
def grad_ndim(f, ndim: int, has_aux: bool = False):
gf = eqx.filter_grad(f, has_aux=has_aux)
gf = vmap_ndim(gf, ndim)
return lambda *inputs: jax.tree_map(jnp.nan_to_num, gf(*inputs))
|
synjax-master
|
synjax/_src/utils/special.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reader for dataset used in the SLIM paper.
Example usage:
filenames, iterator, next_element = make_dataset(batch_size=16)
with tf.Session() as sess:
# Initialize `iterator` with train data.
# training_filenames = ["/var/data/train_1.tfrecord", ...]
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
ne_value = sess.run(next_element)
# Initialize `iterator` with validation data.
# validation_filenames = ["/var/data/train_1.tfrecord", ...]
# sess.run(iterator.initializer, feed_dict={filenames: validation_filenames})
ne_value = sess.run(next_element)
`next_element` is a tuple containing the query, the target, and the raw data.
The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
The raw data is a dictionary with all the fields as read from the tf.Record as
described in the documentation for `_parse_proto`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_NUM_VIEWS = 10
_NUM_RAW_CAMERA_PARAMS = 3
_IMAGE_SCALE = 0.25
_USE_SIMPLIFIED_CAPTIONS = False
_PARSE_METADATA = False
def _parse_proto(buf):
"""Parse binary protocol buffer into tensors.
The protocol buffer is expected to contain the following fields:
* frames: 10 views of the scene rendered as images.
* top_down_frame: single view of the scene from above rendered as an image.
* cameras: 10 vectors describing the camera position from which the frames
have been rendered
* captions: A string description of the scene. For the natural language
dataset, contains descriptions written by human annotators. For
synthetic data contains a string describing each relation between
objects in the scene exactly once.
* simplified_captions: A string description of the scene. For the natural
language dataset contains a string describing each relation between
objects in the scene exactly once. For synthetic datasets contains
a string describing every possible pairwise relation between objects in
the scene.
* meta_shape: A vector of strings describing the object shapes.
* meta_color: A vector of strings describing the object colors.
* meta_size: A vector of strings describing the object sizes.
* meta_obj_positions: A matrix of floats describing the position of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the rotation of each
object in the scene.
* meta_obj_rotations: A matrix of floats describing the color of each
object in the scene as RGBA in the range [0, 1].
Args:
buf: A string containing the serialized protocol buffer.
Returns:
A dictionary containing tensors for each of the fields in the protocol
buffer. If _PARSE_METADATA is False, will omit fields starting with 'meta_'.
"""
feature_map = {
"frames":
tf.FixedLenFeature(shape=[_NUM_VIEWS], dtype=tf.string),
"top_down_frame":
tf.FixedLenFeature(shape=[1], dtype=tf.string),
"cameras":
tf.FixedLenFeature(
shape=[_NUM_VIEWS * _NUM_RAW_CAMERA_PARAMS], dtype=tf.float32),
"captions":
tf.VarLenFeature(dtype=tf.string),
"simplified_captions":
tf.VarLenFeature(dtype=tf.string),
"meta_shape":
tf.VarLenFeature(dtype=tf.string),
"meta_color":
tf.VarLenFeature(dtype=tf.string),
"meta_size":
tf.VarLenFeature(dtype=tf.string),
"meta_obj_positions":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_rotations":
tf.VarLenFeature(dtype=tf.float32),
"meta_obj_colors":
tf.VarLenFeature(dtype=tf.float32),
}
example = tf.parse_single_example(buf, feature_map)
images = tf.concat(example["frames"], axis=0)
images = tf.map_fn(
tf.image.decode_jpeg,
tf.reshape(images, [-1]),
dtype=tf.uint8,
back_prop=False)
top_down = tf.image.decode_jpeg(tf.squeeze(example["top_down_frame"]))
cameras = tf.reshape(example["cameras"], shape=[-1, _NUM_RAW_CAMERA_PARAMS])
captions = tf.sparse_tensor_to_dense(example["captions"], default_value="")
simplified_captions = tf.sparse_tensor_to_dense(
example["simplified_captions"], default_value="")
meta_shape = tf.sparse_tensor_to_dense(
example["meta_shape"], default_value="")
meta_color = tf.sparse_tensor_to_dense(
example["meta_color"], default_value="")
meta_size = tf.sparse_tensor_to_dense(example["meta_size"], default_value="")
meta_obj_positions = tf.sparse_tensor_to_dense(
example["meta_obj_positions"], default_value=0)
meta_obj_positions = tf.reshape(meta_obj_positions, shape=[-1, 3])
meta_obj_rotations = tf.sparse_tensor_to_dense(
example["meta_obj_rotations"], default_value=0)
meta_obj_rotations = tf.reshape(meta_obj_rotations, shape=[-1, 4])
meta_obj_colors = tf.sparse_tensor_to_dense(
example["meta_obj_colors"], default_value=0)
meta_obj_colors = tf.reshape(meta_obj_colors, shape=[-1, 4])
data_tensors = {
"images": images,
"cameras": cameras,
"captions": captions,
"simplified_captions": simplified_captions,
"top_down": top_down
}
if _PARSE_METADATA:
data_tensors.update({
"meta_shape": meta_shape,
"meta_color": meta_color,
"meta_size": meta_size,
"meta_obj_positions": meta_obj_positions,
"meta_obj_rotations": meta_obj_rotations,
"meta_obj_colors": meta_obj_colors
})
return data_tensors
def _make_indices():
indices = tf.range(0, _NUM_VIEWS)
indices = tf.random_shuffle(indices)
return indices
def _convert_and_resize_images(images, old_size):
images = tf.image.convert_image_dtype(images, dtype=tf.float32)
new_size = tf.cast(old_size, tf.float32) * _IMAGE_SCALE
new_size = tf.cast(new_size, tf.int32)
images = tf.image.resize_images(images, new_size, align_corners=True)
return images
def _preprocess_images(images, indices):
images_processed = tf.gather(images, indices)
old_size = tf.shape(images_processed)[1:3]
images_processed = _convert_and_resize_images(images_processed, old_size)
return images_processed
def _preprocess_td(td_image):
old_size = tf.shape(td_image)[0:2]
td_image = _convert_and_resize_images(td_image, old_size)
return td_image
def _preprocess_cameras(raw_cameras, indices):
"""Apply a nonlinear transformation to the vector of camera angles."""
raw_cameras = tf.gather(raw_cameras, indices)
azimuth = raw_cameras[:, 0]
pos = raw_cameras[:, 1:]
cameras = tf.concat(
[
pos,
tf.expand_dims(tf.sin(azimuth), -1),
tf.expand_dims(tf.cos(azimuth), -1)
],
axis=1)
return cameras
def _preprocess_captions(raw_caption, indices):
return tf.gather(raw_caption, indices)
def _preprocess_data(raw_data):
"""Randomly shuffle viewpoints and apply preprocessing to each modality."""
indices = _make_indices()
images = _preprocess_images(raw_data["images"], indices)
cameras = _preprocess_cameras(raw_data["cameras"], indices)
top_down = _preprocess_td(raw_data["top_down"])
if _USE_SIMPLIFIED_CAPTIONS:
captions = _preprocess_captions(raw_data["simplified_captions"], indices)
else:
captions = _preprocess_captions(raw_data["captions"], indices)
return [images, cameras, top_down, captions]
def _split_scene(images, cameras, top_down, captions):
"""Splits scene into query and target.
Args:
images: A tensor containing images.
cameras: A tensor containing cameras.
top_down: A tensor containing the scene seen from top.
captions: A tensor containing captions.
Returns:
A tuple query, target. The query is a tuple where the first element is the
sequence of 9 (images, cameras, captions) which can be given to the model
as context. The second element in the query is the camera angle of the
viewpoint to reconstruct. The target contains the image corresponding to the
queried viewpoint, the text description from that viewpoint and an image of
the scene viewed from above.
"""
context_image = images[:-1, :, :, :]
context_camera = cameras[:-1, :]
context_caption = captions[:-1]
target_image = images[-1, :, :, :]
target_camera = cameras[-1, :]
target_caption = captions[-1]
query = ((context_image, context_camera, context_caption), target_camera)
target = (target_image, target_caption, top_down)
return query, target
def _parse_function(buf):
raw_data = _parse_proto(buf)
scene_data = _preprocess_data(raw_data)
query, target = _split_scene(*scene_data)
return query, target, raw_data
def make_dataset(batch_size):
"""Returns a tf.data.Dataset object with the dataset."""
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function)
dataset = dataset.repeat()
dataset = dataset.shuffle(128)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
return filenames, iterator, next_element
|
slim-dataset-master
|
reader.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Video utils."""
from typing import Sequence
import numpy as np
from PIL import Image
def video_reshaper(sample_video: np.ndarray) -> np.ndarray:
"""Reshape video into the correct shape."""
if sample_video.shape[0] == 1:
v = np.array(sample_video[0])
elif sample_video.shape[0] == 4:
# Make a grid of 2x2.
v = np.concatenate(
[np.concatenate([sample_video[0], sample_video[1]], axis=1),
np.concatenate([sample_video[2], sample_video[3]], axis=1)],
axis=2)
else:
# Concatenate all horizontally.
v = np.concatenate(sample_video, axis=2)
return v
def save_video(frame_generator: Sequence[np.ndarray], video_path: str):
"""Save video to the given path."""
first_frame = Image.fromarray(frame_generator[0])
first_frame.save(
video_path, save_all=True,
append_images=[Image.fromarray(frame)
for frame in frame_generator[1:]], duration=40, loop=0)
|
compressed_vision-main
|
utils/video_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute metrics."""
from typing import Sequence
import chex
import jax.numpy as jnp
import numpy as np
def get_compression_rate(images: chex.Array, codes: Sequence[chex.Array],
bits_per_element: int) -> float:
codes_size = np.sum([np.prod(jnp.asarray(code).shape) for code in codes])
compression_rate = np.prod(images.shape) / codes_size
bit_ratio = 8 / np.log2(bits_per_element)
return compression_rate * bit_ratio
|
compressed_vision-main
|
utils/metric_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for transforming the input data."""
import chex
import jax
def convert_im_to_codes(
codec_encoder,
codec_params,
codec_state,
images: chex.Array,
is_return_quantized: bool = False,
):
"""Converts input images to codes."""
codes, _ = codec_encoder(
codec_params,
codec_state,
jax.random.PRNGKey(42),
images,
is_return_quantized=is_return_quantized,
)
return codes
def convert_codes_to_im(
codec_decoder,
codec_params,
codec_state,
codes: chex.Array,
is_quantized: bool = False
):
"""Decode codes."""
images, _ = codec_decoder(
codec_params,
codec_state,
jax.random.PRNGKey(42),
codes,
is_quantized=is_quantized
)
return images
def encode_decode(
codec_encoder,
codec_decoder,
codec_params,
codec_state,
inputs: chex.Array,
):
"""Encode and decode video."""
codes = convert_im_to_codes(
codec_encoder,
codec_params,
codec_state,
inputs,
is_return_quantized=False,
)
outputs = convert_codes_to_im(
codec_decoder=codec_decoder,
codec_params=codec_params,
codec_state=codec_state,
codes=codes,
is_quantized=False)
return outputs, codes
|
compressed_vision-main
|
utils/data_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A checkpoint loader."""
import pickle
from typing import Any, Mapping
_SAVED_VALUES = [
'params',
'state',
'config',
'augm_params',
'augm_state',
'augm_config'
]
def load_params_state(model_path) -> Mapping[str, Any]:
saved_params = pickle.load(model_path)
for param in _SAVED_VALUES:
assert param in saved_params.keys(), f'Checkpoint is missing key {param}.'
return {k: saved_params[k] for k in _SAVED_VALUES}
|
compressed_vision-main
|
utils/checkpoint_loader.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Models that apply a transformation on some group of codes."""
import functools
from typing import Optional, Sequence
from compressed_vision.models import transformer
import haiku as hk
import jax
import jax.numpy as jnp
class NeuralTransformation(hk.Module):
"""Uses a transformer to transform a set of latent codes."""
def __init__(self,
channels: Sequence[int] = (64, 64, 64),
hidden_size: int = 128,
num_hidden_layers: int = 2,
num_heads: int = 4,
num_attention_heads: int = 1,
key_size: int = 128,
value_size: int = 128,
intermediate_size: int = 128,
lambda_v: float = 0.0001,
output_video_shape: Optional[Sequence[int]] = None,
name: Optional[str] = None):
super().__init__(name=name)
self._lambda = lambda_v
self._output_video_shape = output_video_shape
self.channels = channels
self.neural_transformer = functools.partial(
transformer.SpatioTemporalTransformerXL,
num_layers=num_hidden_layers,
num_heads=num_attention_heads,
key_size=key_size,
value_size=value_size,
ffw_hidden_size=intermediate_size,
dropout_rate=0.0)
def __call__(self, video, augmentation):
batch_size = video.shape[0]
time = video.shape[1]
channels = video.shape[-1]
augmentation = augmentation.reshape((augmentation.shape[0], -1))
mlp = hk.nets.MLP(tuple(self.channels) + (channels,))
augmentation = mlp(augmentation).reshape(batch_size, 1, 1, channels)
video_btsc = video.reshape(batch_size, time, -1, channels)
augmentation = augmentation.repeat(video_btsc.shape[1], 1)
augmentation = augmentation.repeat(video_btsc.shape[2], 2)
all_inputs = jnp.concatenate((augmentation, video_btsc), -1)
result = self.neural_transformer(
absolute_position_length=(video_btsc.shape[1], video_btsc.shape[2]),
d_model=channels*2)(all_inputs)[:, :, :, -channels:]
result = result.reshape(video.shape)
if self._output_video_shape is not None:
result = jax.image.resize(
result,
shape=(batch_size, time) + self._output_video_shape,
method='nearest')
return result
def get_equivariant_network(network_name):
if network_name == 'transformer':
return NeuralTransformation
else:
raise ValueError(f'Unknown network: {network_name}.')
|
compressed_vision-main
|
models/equivariant_networks.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Haiku compression model."""
from typing import Sequence, Tuple, Union
import chex
from compressed_vision.models import i3d
from compressed_vision.models import normalization
import haiku as hk
import jax
import jax.numpy as jnp
class CompressionConvEncoderDecoder(hk.Module):
"""Model for VQGAN style encoder/decoder (with a transformer)."""
def __init__(self,
num_channels: int = 3,
bot_channels: int = 128,
bot_channels_code_index: int = 32,
stride=(1, 2, 2),
kernel_shape=(1, 4, 4),
num_conv_layers=1,
res_bot_mult=4.0,
extra_encode_conv_layers=2,
extra_encode_conv_kernel=(1, 3, 3),
extra_encode_conv_use_bn=False,
extra_decode_conv_layers=2,
extra_decode_conv_use_bn=False,
extra_decode_conv_kernel=(1, 3, 3),
use_multicodebook: bool = True,
use_ratio_for_codebooks: bool = False,
use_attention: bool = False,
use_skips: bool = False,
skips_one_embedding: bool = False,
attention_resolutions: Sequence[int] = (),
last_linearity: str = '',
vq_embedding_dims: int = 16,
vq_num_embeddings: int = 256,
vq_commitment_cost: float = 0.9,
vq_decay: float = 0.5,
padding: str = 'VALID',
use_normalized_embeddings: bool = False,
use_tanh: bool = False,
use_time_compression_2x: bool = False,
name: str = 'CompressionConvEncoderDecoder'):
"""Initializes the module."""
super().__init__(name=name)
self._num_channels = num_channels
self._bot_channels = bot_channels
self._res_bot_mult = res_bot_mult
self._stride = stride
self._kernel_shape = kernel_shape
self._num_conv_layers = num_conv_layers
assert not use_attention
self._attention_resolutions = attention_resolutions
assert not use_skips
assert not skips_one_embedding
self._padding = padding
self._use_tanh = use_tanh
self._use_time_compression_2x = use_time_compression_2x
# Used for the improved VQVAE setup from VIM.
self._bot_channels_code_index = bot_channels_code_index
assert not use_normalized_embeddings
self._extra_encode_conv_kernel = extra_encode_conv_kernel
self._extra_encode_conv_layers = extra_encode_conv_layers
self._extra_encode_conv_use_bn = extra_encode_conv_use_bn
self._extra_decode_conv_layers = extra_decode_conv_layers
self._extra_decode_conv_kernel = extra_decode_conv_kernel
self._extra_decode_conv_use_bn = extra_decode_conv_use_bn
self._activation_fn = jax.nn.relu
self._vq_embedding_dims = vq_embedding_dims
self._vq_num_embeddings = vq_num_embeddings
self._vq_commitment_cost = vq_commitment_cost
self._vq_decay = vq_decay
self._conv_layer_channels = [
bot_channels // pow(2, self._num_conv_layers - i - 1)
for i in range(self._num_conv_layers)
]
if use_multicodebook:
self._num_codebooks, remainder = divmod(bot_channels, vq_embedding_dims)
assert not remainder, remainder
else:
self._num_codebooks = 1
self._vq_embedding_dims = bot_channels
if last_linearity:
self._final_linearity = getattr(jax.nn, last_linearity)
else:
self._final_linearity = lambda x: x
vqvae_module = hk.nets.VectorQuantizerEMA
# Initialised here so they can be accessed by multiple methods.
self._all_vqs = []
self._all_codebooks = []
vqs = []
for i in range(self._num_codebooks):
vqs.append(
vqvae_module(
embedding_dim=self._vq_embedding_dims,
num_embeddings=self._vq_num_embeddings,
commitment_cost=self._vq_commitment_cost,
decay=self._vq_decay,
name=f'vqvae_{i}'))
self._all_vqs.append(vqs)
self._all_codebooks.append(self._num_codebooks)
im_size = 32
for idx, num_channels in enumerate(self._conv_layer_channels, 1):
vqs = []
ratio = (im_size // 8) ** 2
if use_ratio_for_codebooks:
num_codebooks = (self._num_codebooks // ratio)
else:
num_codebooks = self._num_codebooks
t_embedding_dim = num_channels // num_codebooks
im_size = im_size // 2
num_embeddings = self._vq_num_embeddings
for i in range(num_codebooks):
vqs.append(
vqvae_module(
embedding_dim=t_embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=self._vq_commitment_cost,
decay=self._vq_decay,
name=f'vqvae_{idx}_{i}'))
self._all_vqs.append(vqs)
self._all_codebooks.append(num_codebooks)
def encode_embedding(
self,
inputs: chex.Array,
is_training: bool,
) -> chex.Array:
"""Encodes input."""
x = inputs # Expected shape [B, T, H, W, C].
chex.assert_rank(x, 5)
if x.shape[-1] != self._num_channels:
raise ValueError(f'Input shape: {x.shape} does not match the num_channels'
f' passed to the constructor: {self._num_channels}.')
for i, num_channels in enumerate(self._conv_layer_channels):
if i == self._num_conv_layers - 1:
activation = None
else:
activation = self._activation_fn
x = i3d.Unit3D(
output_channels=num_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
activation_fn=activation,
normalize_fn=None,
padding=self._padding,
name=f'conv_{i}')(x, is_training=is_training)
if self._use_time_compression_2x:
x = i3d.Unit3D(
output_channels=self._conv_layer_channels[-1],
kernel_shape=self._kernel_shape,
stride=(2, 1, 1),
activation_fn=activation,
normalize_fn=None,
padding=self._padding,
name='conv_temporal')(x, is_training=is_training)
if self._extra_encode_conv_use_bn:
norm_fn = normalization.get_normalize_fn('batch_norm')
else:
norm_fn = None
for i in range(self._extra_encode_conv_layers):
x_new = self._activation_fn(x)
x_new = i3d.Unit3D(
output_channels=int(self._bot_channels*self._res_bot_mult),
kernel_shape=self._extra_encode_conv_kernel,
stride=(1, 1, 1),
activation_fn=self._activation_fn,
normalize_fn=norm_fn,
padding=self._padding,
name=f'encoder_res_0_{i}')(x_new, is_training=is_training)
x_new = i3d.Unit3D(
output_channels=self._bot_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
activation_fn=None,
normalize_fn=norm_fn,
padding=self._padding,
name=f'encoder_res_1_{i}')(x_new, is_training=is_training)
x = x_new + x
if self._use_tanh:
x = jax.nn.tanh(x)
assert x.shape[-1] % self._vq_embedding_dims == 0, x.shape
return x
def decode_embedding(self,
inputs: chex.Array,
is_training: bool) -> chex.Array:
"""Decodes quantized embedding to logits."""
x = inputs # Assumed shape [B, T, H, W, C]
chex.assert_rank(x, 5)
if self._extra_encode_conv_use_bn:
norm_fn = normalization.get_normalize_fn('batch_norm')
else:
norm_fn = None
for i in range(self._extra_decode_conv_layers):
x_inputs = x
x = self._activation_fn(x)
x = i3d.Unit3D(
output_channels=int(self._bot_channels * self._res_bot_mult),
kernel_shape=self._extra_decode_conv_kernel,
stride=(1, 1, 1),
activation_fn=self._activation_fn,
normalize_fn=norm_fn,
padding=self._padding,
name=f'decoder_res_0_{i}')(x, is_training=is_training)
x = i3d.Unit3D(
output_channels=self._bot_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
activation_fn=None,
normalize_fn=norm_fn,
padding=self._padding,
name=f'decoder_res_1_{i}')(x, is_training=is_training)
x = x + x_inputs
if self._use_time_compression_2x:
x = hk.Conv3DTranspose(
self._conv_layer_channels[-1],
self._kernel_shape,
(2, 1, 1),
# target_shape,
name='decoder_temporal')(x)
output_channel_order = reversed(
[self._num_channels] + self._conv_layer_channels[:-1])
for i, num_channels in enumerate(output_channel_order):
_, time, height, width, _ = x.shape
target_shape = [stride*dim_size for stride, dim_size
in zip(self._stride, (time, height, width))]
x = hk.Conv3DTranspose(
num_channels,
self._kernel_shape,
self._stride,
target_shape,
name=f'decoder_{i}')(x)
if i < len(self._conv_layer_channels) - 1: # No activation if last layer.
x = self._activation_fn(x)
return self._final_linearity(x)
def encode(
self,
inputs: chex.Array,
is_return_quantized: bool = False,
) -> Union[chex.Array, Tuple[chex.Array, chex.Array]]:
"""Encodes to codes."""
embeddings = self.encode_embedding(
inputs,
is_training=False,
)
_, codes, _ = self.quantize(embeddings, is_training=False)
if is_return_quantized:
quantized = self.codes_to_quantize(codes)
return codes, quantized
else:
return codes
def codes_to_quantize(
self,
codes: chex.Array,
) -> chex.Array:
"""Encodes to quantize."""
chex.assert_rank(codes, 5)
quantized = [vq.quantize(codes[..., i])
for i, vq in enumerate(self._all_vqs[0])]
quantized = jnp.concatenate(quantized, axis=-1)
return quantized
def decode(
self,
inputs: Tuple[chex.Array, Sequence[chex.Array]],
is_quantized: bool = False
) -> chex.Array:
"""Decodes from codes."""
x = inputs # Assumed shape [B, T, H, W, C]
if is_quantized:
quantized = x
else:
quantized = self.codes_to_quantize(codes=x,)
return self.decode_embedding(
quantized,
is_training=False,
)
def quantize(
self,
inputs: chex.Array,
is_training: bool,
quantize_idx: int = 0,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
x = inputs # Assumed shape [B, T, H, W, C]
chex.assert_rank(x, 5)
# [B, T, H, W, C] -> [B, T, H, W, C", D]
batch_size, time_size, height, width, channels = x.shape
x = x.reshape(
batch_size, time_size, height, width,
channels // self._all_codebooks[quantize_idx],
self._all_codebooks[quantize_idx])
vq_out = []
for i, vq in enumerate(self._all_vqs[quantize_idx]):
vq_out.append(vq(x[..., i], is_training))
quantized, partial_loss, codes = [
[out[value] for out in vq_out]
for value in ['quantize', 'loss', 'encoding_indices']]
quantized = jnp.concatenate(quantized, axis=4)
codes = jnp.stack(codes, axis=4)
loss = jnp.mean(jnp.asarray(partial_loss))
return quantized, codes, loss
def __call__(
self,
inputs: chex.Array,
is_training: bool,
) -> Tuple[chex.Array, Sequence[chex.Array], chex.Array, chex.Array]:
"""Runs the full embed+quantise+reconstruct model."""
embedding = self.encode_embedding(
inputs,
is_training,
)
quantized, codes, loss = self.quantize(embedding, is_training)
reconstruction = self.decode_embedding(
quantized,
is_training,
)
return quantized, [codes], loss, reconstruction
|
compressed_vision-main
|
models/encoder_decoder_unet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer-XL implementation."""
import functools
from typing import Any, Callable, Mapping, Optional, Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def _layer_norm(
x: jnp.ndarray,
use_bias: bool = True,
name: Optional[str] = None,
) -> jnp.ndarray:
ln = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=use_bias, name=name)
return ln(x)
class DenseBlock(hk.Module):
"""Dense block."""
def __init__(
self,
*,
ffw_hidden_size: int,
dropout_rate: float,
init_scale: float,
final_init_scale_multiplier: float,
use_final_bias: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
output_channels: Optional[int] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
self._ffw_hidden_size = ffw_hidden_size
self._dropout_rate = dropout_rate
self._init_scale = init_scale
self._final_init_scale = init_scale * final_init_scale_multiplier
self._use_final_bias = use_final_bias
self._activation = activation
self._output_channels = output_channels
def __call__(self, x: jnp.ndarray, is_training: bool) -> jnp.ndarray:
d_model = x.shape[-1]
ffw_layer = hk.Linear(
self._ffw_hidden_size,
w_init=hk.initializers.VarianceScaling(self._init_scale))
x = ffw_layer(x)
x = self._activation(x)
if is_training:
x = hk.dropout(hk.next_rng_key(), self._dropout_rate, x)
final_layer = hk.Linear(
self._output_channels or d_model,
w_init=hk.initializers.VarianceScaling(self._final_init_scale),
with_bias=self._use_final_bias)
return final_layer(x)
class SpaceTimeMultiHeadAttention(hk.Module):
"""SpaceTimeMultihead attention module with memory."""
def __init__(
self,
*,
value_size: int,
key_size: int,
num_heads: int,
init_scale: float,
dropout_rate: float,
use_bias: bool,
use_final_bias: bool,
final_init_scale_multiplier: float,
space_time_mode: str,
name: str = 'spacetime_multihead_attention',
):
"""Initialises the SpaceTimeMultiHeadAttention module."""
super().__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._num_heads = num_heads
self._dropout_rate = dropout_rate
self._init_scale = init_scale
self._final_init_scale = final_init_scale_multiplier * init_scale
self._use_bias = use_bias
self._use_final_bias = use_final_bias
assert space_time_mode in ['space', 'time']
self._space_time_mode = space_time_mode
@hk.transparent
def _multihead_linear(self, inputs: jnp.ndarray, hidden_size: int, name: str):
linear = hk.Linear(
self._num_heads * hidden_size,
with_bias=self._use_bias,
w_init=hk.initializers.VarianceScaling(scale=self._init_scale),
name=name)
out = linear(inputs)
return jnp.reshape(out, inputs.shape[:-1] + (self._num_heads, hidden_size))
@hk.transparent
def _call_main(
self,
query: jnp.ndarray,
key: jnp.ndarray,
value: jnp.ndarray,
is_training: bool,
) -> jnp.ndarray:
batch_size, time_len, seq_len, embedding_size = query.shape
query_heads = self._multihead_linear(query, self._key_size, 'query')
key_heads = self._multihead_linear(key, self._key_size, 'key')
value_heads = self._multihead_linear(value, self._value_size, 'value')
if self._space_time_mode == 'time':
einsum_template = 'btlhd,bTlhd->blhtT'
else:
einsum_template = 'btlhd,btLhd->bthlL'
logits = jnp.einsum(einsum_template, query_heads, key_heads)
scaled_logits = logits * self._key_size**(-0.5)
weights = jax.nn.softmax(scaled_logits)
if is_training:
weights = hk.dropout(hk.next_rng_key(), self._dropout_rate, weights)
if self._space_time_mode == 'time':
einsum_template = 'blhtT,bTlhd->btlhd'
else:
einsum_template = 'bthlL,btLhd->btlhd'
attn_vec = jnp.einsum(einsum_template, weights, value_heads)
attn_vec = jnp.reshape(
attn_vec, [batch_size, time_len,
seq_len, self._num_heads * self._value_size])
final_linear = hk.Linear(
embedding_size,
w_init=hk.initializers.VarianceScaling(scale=self._final_init_scale),
with_bias=self._use_final_bias)
outputs = final_linear(attn_vec)
return outputs
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool,
) -> jnp.ndarray:
"""Computes the attention values.
We use the following shape conventions: `B` for batch size, `T` for chunk
size, and `D` for the embedding dimension.
Args:
inputs: array of shape [B, T, L, D]
is_training: Whether to apply dropout
Returns:
An array of shape [B, T, D] the result of applying self-attention to
inputs.
"""
query = inputs
key = value = inputs
return self._call_main(
query, key, value, is_training=is_training)
def spatio_temporal_gpt2_block(
*,
layer: int,
mha_kwargs: Mapping[str, Any],
ffw_kwargs: Mapping[str, Any],
dropout_rate: float,
is_training: bool,
inputs: jnp.ndarray,
use_layer_norm_bias: bool,
final_residual: bool = True,
) -> jnp.ndarray:
"""Pure function for a single GPT-2 block."""
time_attn = SpaceTimeMultiHeadAttention(name=f'h{layer}_time_attn',
space_time_mode='time',
**mha_kwargs)
space_attn = SpaceTimeMultiHeadAttention(name=f'h{layer}_space_attn',
space_time_mode='space',
**mha_kwargs)
dense_block = DenseBlock(name=f'h{layer}_mlp', **ffw_kwargs)
ln_time = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=use_layer_norm_bias,
name=f'h{layer}_ln_time')
ln_space = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=use_layer_norm_bias,
name=f'h{layer}_ln_space')
attn_input = ln_time(inputs)
h_attention = time_attn(
inputs=attn_input,
is_training=is_training)
if is_training:
h_attention = hk.dropout(hk.next_rng_key(), dropout_rate, h_attention)
h = inputs + h_attention
attn_input = ln_space(h)
h_attention = space_attn(
inputs=attn_input,
is_training=is_training)
if is_training:
h_attention = hk.dropout(hk.next_rng_key(), dropout_rate, h_attention)
h = h + h_attention
h_ffw = dense_block(
_layer_norm(
h, name=f'h{layer}_ln_2', use_bias=use_layer_norm_bias),
is_training)
if is_training:
h_ffw = hk.dropout(hk.next_rng_key(), dropout_rate, h_ffw)
if final_residual:
return h + h_ffw
else:
return h_ffw
class SpatioTemporalTransformerXL(hk.Module):
"""TimeTransformer-XL implementation."""
def __init__(
self,
d_model: int,
num_layers: int,
num_heads: int,
key_size: int,
value_size: int,
ffw_hidden_size: int,
dropout_rate: float,
absolute_position_length: Tuple[int, int] = (0, 0),
use_layer_norm_bias: bool = True,
same_attention_length: bool = False,
use_attn_bias: bool = False,
remat: bool = False,
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu,
name: str = 'transformer_xl',
):
"""Initialises the module.
Args:
d_model: Size of the embeddings.
num_layers: Number of transformer block layers.
num_heads: Number of attention heads to use.
key_size: Size of key (and query) embedding for attention.
value_size: Size of value embedding for attention.
ffw_hidden_size: Hidden size for MLP that follows attention.
dropout_rate: How much dropout to apply to attention and MLP modules.
used if greater than 0 and relative_position_embeddings is True.
absolute_position_length: How many tokens to embed using absolute position
embeddings. Note that this can be toggled on or off independently of
`relative_position_embeddings`. The default (`0`) indicates no absolute
position embeddings.
use_layer_norm_bias: Whether to use a learnable bias for layer norm.
same_attention_length: Whether each token attends over the same history
length.
use_attn_bias: Whether or not to use biases in attention linear layers.
remat: Whether to use gradient rematerialization.
activation: The nonlinearity to use in the DenseBlocks.
name: The Haiku name of the module.
"""
super().__init__(name=name)
self._d_model = d_model
self._num_layers = num_layers
self._dropout_rate = dropout_rate
self._remat = remat
self._absolute_position_length = absolute_position_length
self._use_layer_norm_bias = use_layer_norm_bias
self._same_attention_length = same_attention_length
self._mha_kwargs = dict(
value_size=value_size,
key_size=key_size,
num_heads=num_heads,
init_scale=2. / np.sqrt(self._num_layers),
dropout_rate=self._dropout_rate,
use_bias=use_attn_bias,
use_final_bias=True,
final_init_scale_multiplier=1.,
)
self._ffw_kwargs = dict(
ffw_hidden_size=ffw_hidden_size,
dropout_rate=self._dropout_rate,
init_scale=2. / np.sqrt(self._num_layers),
final_init_scale_multiplier=1.,
use_final_bias=True,
activation=activation,
)
def _get_position_embedding(
self,
inputs: jnp.DeviceArray,
) -> jnp.DeviceArray:
"""Computes absolute positional embeddings.
Args:
inputs: input token embeddings array of shape [B, L, T, D]
Returns:
The absolute positional embeddings of shape [B, L, T, D].
"""
_, l, t, d = inputs.shape
position_embeddings = hk.get_parameter(
name='position_embedding',
shape=list(self._absolute_position_length) + [self._d_model],
init=hk.initializers.TruncatedNormal(stddev=0.02))
assert l <= self._absolute_position_length[0]
assert t <= self._absolute_position_length[1]
assert d == self._d_model
return position_embeddings[:l, :t]
def __call__(
self,
input_embeddings: jnp.ndarray,
is_training: bool = True,
) -> jnp.ndarray:
"""Computes the logits and next memory.
Args:
input_embeddings: array of shape [B, T, d_model]
is_training: Whether to use dropout.
Returns:
The final layer embeddings
"""
assert len(input_embeddings.shape) == 4
assert len(self._absolute_position_length) == 2
assert self._absolute_position_length[0] > 0
assert self._absolute_position_length[1] > 0
input_embeddings += self._get_position_embedding(input_embeddings)
h = input_embeddings
if is_training:
h = hk.dropout(hk.next_rng_key(), self._dropout_rate, h)
for i in range(self._num_layers):
# Parameterize function on options.
block = functools.partial(
spatio_temporal_gpt2_block,
layer=i,
mha_kwargs=self._mha_kwargs,
ffw_kwargs=self._ffw_kwargs,
dropout_rate=self._dropout_rate,
use_layer_norm_bias=self._use_layer_norm_bias,
is_training=is_training)
# Optionally rematerialize at the block level
if self._remat:
block = hk.remat(block)
h = block(inputs=h)
h = _layer_norm(h, name='ln_f', use_bias=self._use_layer_norm_bias)
return h
|
compressed_vision-main
|
models/transformer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Haiku I3D Unit.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
"""
from typing import Callable, Optional, Sequence
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class _MaxPool(hk.MaxPool):
"""A `hk.MaxPool` accepting (and discarding) an `is_training` argument."""
def __call__(self,
x: chex.Array,
is_training: bool = True) -> jnp.ndarray:
del is_training # Unused.
return super().__call__(x)
class Unit3D(hk.Module):
"""Basic I3D unit containing Conv3D + Normalization + non-linearity."""
def __init__(self,
output_channels: int,
kernel_shape: Sequence[int] = (1, 1, 1),
stride: Sequence[int] = (1, 1, 1),
with_bias: bool = False,
normalize_fn: Optional[Callable[..., chex.Array]] = None,
activation_fn: Optional[Callable[[chex.Array],
chex.Array]] = jax.nn.relu,
padding: str = 'SAME',
name: str = 'Unit3D'):
"""Initializes the Unit3D module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. A sequence of length 3.
stride: Stride for the kernel. A sequence of length 3.
with_bias: Whether to add a bias to the convolution.
normalize_fn: Function used for normalization.
activation_fn: Function used as non-linearity.
padding: Which type of padding to use (default `SAME`).
name: The name of the module.
Raises:
ValueError: If `kernel_shape` or `stride` has the wrong shape.
"""
super().__init__(name=name)
# Check args.
if len(kernel_shape) != 3:
raise ValueError(
'Given `kernel_shape` must have length 3 but has length '
f'{len(kernel_shape)}.')
if len(stride) != 3:
raise ValueError(
f'Given `stride` must have length 3 but has length {len(stride)}.')
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._with_bias = with_bias
self._normalize_fn = normalize_fn
self._activation_fn = activation_fn
self._padding = padding
def __call__(self,
inputs: chex.Array,
is_training: bool = True) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 5-D float array of shape `[B, T, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 5-D float array of shape `[B, new_t, new_h, new_w, output_channels]`.
"""
if self._padding == 'VALID':
t = (self._kernel_shape[0] - 1) // 2
h = (self._kernel_shape[1] - 1) // 2
w = (self._kernel_shape[2] - 1) // 2
# Needs to be numpy so it does not become a traced array.
pad_dims = np.array([[0, 0], [t, t], [h, h], [w, w], [0, 0]])
inputs = jax.numpy.pad(inputs, pad_dims, mode='reflect')
out = hk.Conv3D(
output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=self._padding,
with_bias=self._with_bias)(
inputs)
if self._normalize_fn is not None:
out = self._normalize_fn(out, is_training=is_training)
if self._activation_fn is not None:
out = self._activation_fn(out)
return out
|
compressed_vision-main
|
models/i3d.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalize functions constructors."""
from typing import Any, Dict, Optional
import chex
import haiku as hk
from jax import numpy as jnp
class _BatchNorm(hk.BatchNorm):
"""A `hk.BatchNorm` with adapted default arguments."""
def __init__(self,
create_scale: bool = True,
create_offset: bool = True,
decay_rate: float = 0.9,
eps: float = 1e-5,
test_local_stats: bool = False,
**kwargs):
# Check args.
if kwargs.get('cross_replica_axis', None) is not None:
raise ValueError(
'Attempting to use \'batch_norm\' normalizer, but specifying '
'`cross_replica_axis`. This is not supported.')
self._test_local_stats = test_local_stats
super().__init__(create_scale=create_scale,
create_offset=create_offset,
decay_rate=decay_rate,
eps=eps,
**kwargs)
def __call__(self,
x: chex.Array,
is_training: bool = True) -> jnp.ndarray:
return super().__call__(x, is_training,
test_local_stats=self._test_local_stats)
_NORMALIZER_NAME_TO_CLASS = {
'batch_norm': _BatchNorm,
}
def get_normalize_fn(
normalizer_name: str = 'batch_norm',
normalizer_kwargs: Optional[Dict[str, Any]] = None):
"""Handles NormalizeFn creation.
These functions are expected to be used as part of Haiku model. On each
application of the returned normalization_fn, a new Haiku layer will be added
to the model.
Args:
normalizer_name: The name of the normalizer to be constructed.
normalizer_kwargs: The kwargs passed to the normalizer constructor.
Returns:
A `NormalizeFn` that when applied will create a new layer.
Raises:
ValueError: If `normalizer_name` is unknown.
"""
# Check args.
if normalizer_name not in _NORMALIZER_NAME_TO_CLASS:
raise ValueError(f'Unrecognized `normalizer_name` {normalizer_name}.')
normalizer_class = _NORMALIZER_NAME_TO_CLASS[normalizer_name]
normalizer_kwargs = normalizer_kwargs or dict()
return lambda *a, **k: normalizer_class(**normalizer_kwargs)(*a, **k) # pylint: disable=unnecessary-lambda
|
compressed_vision-main
|
models/normalization.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'tsuite', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `tsuite/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='tsuite',
version=_get_version(),
url='https://github.com/deepmind/tsuite',
license='Apache 2.0',
author='DeepMind',
description=('tsuite: Get your RL agent fixed today!'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='tsuite-devs@google.com',
keywords='RL testing debugging python machine learning',
packages=find_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
include_package_data=True,
python_requires='>=3.9',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Testing :: Mocking',
'Topic :: Software Development :: Testing :: Unit',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
tsuite-main
|
setup.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tsuite."""
from absl.testing import absltest
from absl.testing import parameterized
import tsuite
class TsuiteTest(parameterized.TestCase):
"""Test tsuite can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(tsuite, 'TSuiteEnvironment'))
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/tsuite_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tsuite: Get your RL agent fixed today!"""
from tsuite._src.tsuite import list_test_tasks
from tsuite._src.tsuite import PROPERTY_BEST_ACTION
from tsuite._src.tsuite import PROPERTY_RANDOM_ACTION
from tsuite._src.tsuite import PROPERTY_WORST_ACTION
from tsuite._src.tsuite import TSuiteEnvironment
from tsuite._src.updater import mini_batch_generator
from tsuite._src.updater import Updater
__version__ = "1.0"
__all__ = (
"list_test_tasks",
"mini_batch_generator",
"Updater",
"PROPERTY_BEST_ACTION",
"PROPERTY_RANDOM_ACTION",
"PROPERTY_WORST_ACTION",
"TSuiteEnvironment",
)
# _________________________________________
# / Please don't use symbols in `_src`. They \
# \ are not part of the Tsuite public API. /
# ------------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
|
tsuite-main
|
tsuite/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base."""
import enum
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
import numpy as np
from tsuite._src import base
class _StringLikeEnum(str, enum.Enum):
OBS = 'obs'
class _StringLikeEnum2(str, enum.Enum):
__str__ = str.__str__
OBS = 'obs'
class BaseTest(parameterized.TestCase):
def test_set_names_in_spec(self):
spec = {
'a': [specs.DiscreteArray(num_values=4)],
'b': specs.DiscreteArray(num_values=4, name='b')
}
expected_spec = {
'a': [specs.DiscreteArray(num_values=4, name="('a', 0)")],
'b': specs.DiscreteArray(num_values=4, name='b')
}
self.assertDictEqual(base.set_names_in_spec(spec), expected_spec)
def test_get_action(self):
action_spec = {
'a': specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1,
name='a'),
'b': specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1,
name='b1|b2'),
'c': specs.BoundedArray(
shape=(2, 1), dtype=np.float32, minimum=-1, maximum=1,
name='c'),
'd': specs.BoundedArray(
shape=(2,), dtype=np.int32, minimum=-1, maximum=1,
name='d'),
'e': specs.StringArray((), name='e'),
'f': specs.Array(shape=(2,), dtype=bool, name='f'),
}
action = {
'a': np.array([-1.0, 1.0]),
'b': np.array([0.0, 1.0]),
'c': np.array([[-1.0], [1.0]]),
'd': np.array([-1, 1]),
'e': np.array(base.ExternalStringAction.HIGH.value, dtype=np.str_),
'f': np.array([True, False]),
}
observation_spec = specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1, name='obs')
t1 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='a_0')
self.assertEqual(t1.map_external_to_internal_action(action),
base.InternalAction.LOW)
t2 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='a_1')
self.assertEqual(t2.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t3 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='b1')
self.assertEqual(t3.map_external_to_internal_action(action),
base.InternalAction.NOOP)
t4 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='b2')
self.assertEqual(t4.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t5 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='c_0')
self.assertEqual(t5.map_external_to_internal_action(action),
base.InternalAction.LOW)
t6 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='c_1')
self.assertEqual(t6.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t7 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='d_0')
self.assertEqual(t7.map_external_to_internal_action(action),
base.InternalAction.LOW)
t8 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='d_1')
self.assertEqual(t8.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t9 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='e')
self.assertEqual(t9.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t10 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='f_0')
self.assertEqual(t10.map_external_to_internal_action(action),
base.InternalAction.HIGH)
t11 = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name='f_1')
self.assertEqual(t11.map_external_to_internal_action(action),
base.InternalAction.LOW)
def test_get_inverse_action(self):
action_spec = {
'a': specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1, name='a'),
'b': specs.DiscreteArray(num_values=4, name='b'),
'c': specs.BoundedArray(
shape=(2, 3), dtype=np.float32, minimum=-1, maximum=1,
name='c'),
'd': specs.StringArray((), name='d'),
'e': specs.Array(shape=(2,), dtype=bool, name='e'),
}
observation_spec = specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1, name='obs')
for default_action_name in ['a_0', 'a_1', 'b', 'c_0', 'c_1', 'c_2',
'c_3', 'c_4', 'c_5', 'd']:
test_case = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name=default_action_name)
for action in [base.InternalAction.LOW, base.InternalAction.HIGH,
base.InternalAction.NOOP]:
with self.subTest(f'{default_action_name}_{action}'):
self.assertEqual(
test_case.map_external_to_internal_action(
test_case.map_internal_to_external_action(action)),
action)
# Test boolean case.
for default_action_name in ['e_0', 'e_1']:
test_case = base.TestCase(
action_spec=action_spec,
observation_spec=observation_spec,
default_action_name=default_action_name)
for action in [base.InternalAction.LOW, base.InternalAction.HIGH]:
with self.subTest(f'{default_action_name}_{action}'):
self.assertEqual(
test_case.map_external_to_internal_action(
test_case.map_internal_to_external_action(action)),
action)
@parameterized.parameters(
dict(spec=specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=-1, maximum=1, name='obs'),
signal=np.array([1, 1]), no_signal=np.array([-1, -1])),
dict(spec=specs.Array(shape=(2,), dtype=np.bytes_, name='obs'),
signal=np.array([b'1', b'1']), no_signal=np.array([b'0', b'0'])),
dict(spec=specs.Array(shape=(2,), dtype=np.str_, name='obs'),
signal=np.array(['1', '1']), no_signal=np.array(['0', '0'])),
dict(spec=specs.Array(shape=(), dtype=np.float32, name='obs'),
signal=np.array(1), no_signal=np.array(0)),
dict(spec=specs.Array(shape=(2,), dtype=np.float32, name=b'obs'),
signal=np.array([1, 1]), no_signal=np.array([0, 0])),
dict(spec=specs.Array(
shape=(2,), dtype=np.float32, name='obs'),
signal=np.array([1, 1]), no_signal=np.array([0, 0])),
dict(spec=specs.Array(
shape=(2,), dtype=np.float32, name=_StringLikeEnum.OBS),
signal=np.array([1, 1]), no_signal=np.array([0, 0])),
dict(spec=specs.Array(
shape=(2,), dtype=np.float32, name=_StringLikeEnum2.OBS),
signal=np.array([1, 1]), no_signal=np.array([0, 0])),
)
def test_get_signal_injector_visitor_fn(
self, spec: specs.Array, signal: np.ndarray, no_signal: np.ndarray):
# Spec names are always converted to string in tsuite.
name = str(spec.name)
with self.subTest('InjectSignal'):
fn = base.make_signal_injector_visitor_fn([name])
np.testing.assert_array_equal(fn(spec), signal)
with self.subTest('NotInjectSignal'):
fn = base.make_signal_injector_visitor_fn([])
np.testing.assert_array_equal(fn(spec), no_signal)
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/base_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the latency of the agent-runloop."""
from collections.abc import Sequence
import time
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if the latency is below a given threshold.
This test returns a reward if the latency is below a certain threshold for all
steps in the episode.
The latency is specified in milliseconds.
This test ensures that the agent (including the entire runloop) can
fulfill latency guarantees.
"""
def __init__(self, latency_in_ms: str, episode_length: str, **kwargs):
"""Initializes a new LatencyTestCase.
Args:
latency_in_ms: latency threshold in milliseconds
episode_length: the length of each episode
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._latency_in_ms = int(latency_in_ms)
self._episode_length = int(episode_length)
if self._latency_in_ms <= 0:
raise ValueError("Latency threshold must be positive.")
if self._episode_length <= 0:
raise ValueError("Episode length must be positive.")
self._timestamps = []
def reset(self) -> dm_env.TimeStep:
self._timestamps = [time.time()]
return super().base_reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
del action
self._timestamps.append(time.time())
# Reached episode end.
if self.step_counter + 1 == self._episode_length:
last_timestamp = self._timestamps[0]
for timestamp in self._timestamps[1:]:
if timestamp - last_timestamp > self._latency_in_ms / 1000.:
return super().base_step(success=False, terminate=True)
last_timestamp = timestamp
return super().base_step(success=True, terminate=True)
return super().base_step()
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
# Test cases correspond to 100Hz, 30Hz and 8Hz.
# Each test runs for 128 timesteps by default, which corresponds to
# multiple seconds for most framerates.
return [f"latency@{n}@128" for n in [10, 34, 125]]
|
tsuite-main
|
tsuite/_src/latency.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for overfit."""
from absl.testing import absltest
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import overfit
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class OverfitTest(test_utils.TSuiteTest):
def test_overfit_correct_behaviour(self):
env = tsuite.TSuiteEnvironment(
'overfit',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays one episode with the correct behaviour.
timestep = env.reset()
# The following action sequence corresponds to the "secret" in the
# overfit.TestCase class.
for discrete_action in [0, 3, 3, 0]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
def test_overfit_incorrect_behaviour(self):
env = tsuite.TSuiteEnvironment(
'overfit',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays one episode with the incorrect behaviour.
timestep = env.reset()
for discrete_action in [0, 3, 3, 2]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_overfit_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'overfit',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertIsNone(timestep.reward)
for _ in range(4):
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertEqual(timestep.reward, SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'overfit',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_overfit_list_test_tasks(self):
self.assertSetEqual(
set(overfit.list_test_tasks()),
{'overfit'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/overfit_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for future_leakage."""
from absl.testing import absltest
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import future_leakage
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class FutureLeakageTest(test_utils.TSuiteTest):
def test_future_leakage_correct_behaviour(self):
env = tsuite.TSuiteEnvironment(
'future_leakage',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episode with the correct behaviour.
for discrete_action in [0, 3]:
env.reset()
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
def test_future_leakage_incorrect_behaviour(self):
env = tsuite.TSuiteEnvironment(
'future_leakage',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays four episode with the incorrect behaviour.
for discrete_action in [3, 0, 1, 2]:
env.reset()
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_future_leakage_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'future_leakage',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'future_leakage',
n_updates=50)
self.assertLess(logs[-1]['value'], 0.6)
self.assertGreater(logs[-1]['value'], 0.4)
def test_future_leakage_list_test_tasks(self):
self.assertSetEqual(
set(future_leakage.list_test_tasks()),
{'future_leakage'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/future_leakage_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests utils."""
from absl.testing import parameterized
from dm_env import specs
import numpy as np
class TSuiteTest(parameterized.TestCase):
"""Helper class which sets up action and obs spec."""
def setUp(self):
super().setUp()
self._action_spec = (
{'a': [specs.DiscreteArray(num_values=4, name='discrete')]},
specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=[-1, -1], maximum=1,
name='cont_up|cont_left'),
specs.BoundedArray(
shape=(2, 3),
dtype=np.float32,
minimum=np.zeros((2, 3)),
maximum=np.ones((2, 3)),
name='tensor',
),
)
self._observation_spec = {
'rgb': specs.BoundedArray(shape=(8, 8, 3), dtype=np.float32, minimum=-1,
maximum=1, name='rgb'),
'text': (specs.BoundedArray(shape=(5,), dtype=np.int32, minimum=0,
maximum=9, name='text'),
specs.DiscreteArray(num_values=5, name='text_length')),
'raw_text': specs.Array((), dtype=np.str_, name='raw_text'),
'dvector': specs.BoundedArray(shape=(3,), dtype=np.int32,
minimum=-1, maximum=1, name='dvector'),
'float': specs.BoundedArray(shape=(), minimum=-1, maximum=1,
dtype=np.float32, name='float'),
}
|
tsuite-main
|
tsuite/_src/test_utils.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the ability of the agent to deal with a slow environment."""
from collections.abc import Sequence
import time
import dm_env
from tsuite._src import base
from tsuite._src import overfit
class TestCase(overfit.TestCase):
"""Tests the capability of the agent to learn from a slow environment.
This is the same as the overfit test, except for an additional configurable
sleep parameter that simulates a long step time of the underlying environment.
"""
def __init__(self, delay_in_ms: str, **kwargs):
"""Initializes a new TestCase.
Args:
delay_in_ms: delay in milliseconds.
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._delay_in_ms = int(delay_in_ms)
if self._delay_in_ms <= 0:
raise ValueError("Delay must be positive.")
def reset(self) -> dm_env.TimeStep:
time.sleep(self._delay_in_ms / 1000.0)
return super().reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
time.sleep(self._delay_in_ms / 1000.0)
return super().step(action)
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["slow_env@500"]
|
tsuite-main
|
tsuite/_src/slow_env.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests if the agent is robust against bad timesteps."""
from collections.abc import Sequence
import itertools
import dm_env
import numpy as np
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if the agent is robust against bad timesteps.
This TestCase simulates a broken environment; it is reasonable for an agent
to crash with a meaningful error message.
The agent receives bad input for the timestep variables `discount`, `reward`,
`step_type`. Depending on the mode, the agent receives:
- `negative`: a negative value,
- `oor`: a finite non-negative out of range value (only available for
discount and step_type),
- `nan`: not a number,
- `inf`: an infinite value.
"""
def __init__(self, timestep_var: str, mode: str, **kwargs):
super().__init__(**kwargs)
if mode == "nan":
self._timestep_update = {timestep_var: np.nan}
elif mode == "inf":
self._timestep_update = {timestep_var: np.inf}
elif mode == "negative":
# Integer type timestep variables
if timestep_var in ["step_type"]:
self._timestep_update = {timestep_var: -1}
else:
self._timestep_update = {timestep_var: -1.0}
elif mode == "oor":
if timestep_var == "discount":
self._timestep_update = dict(discount=1.1)
elif timestep_var == "step_type":
self._timestep_update = dict(step_type=3)
# In all other cases there is no finite non-negative out-of-range value.
else:
raise ValueError(
f"Timestep variable {timestep_var} does not support oor mode.")
else:
raise ValueError(f"Unknown mode {mode} passed to bad_timestep.TestCase.")
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
del action
if self.step_counter >= 1:
timestep = super().base_step(success=True, terminate=True)
else:
timestep = super().base_step(success=False, terminate=False)
timestep = timestep._replace(**self._timestep_update)
return timestep
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
names = ["discount", "reward", "step_type"]
modes = ["nan", "inf", "negative"]
tasks = [f"bad_timestep@{name}@{mode}"
for name, mode in itertools.product(names, modes)]
tasks += ["bad_timestep@discount@oor", "bad_timestep@step_type@oor"]
return tasks
|
tsuite-main
|
tsuite/_src/bad_timestep.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for thread_safety."""
import concurrent.futures
from absl.testing import absltest
import numpy as np
from tsuite._src import base
from tsuite._src import test_utils
from tsuite._src import thread_safety
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
class ThreadSafetyTest(test_utils.TSuiteTest):
def _run_single_env(self, n_steps: int):
env = tsuite.TSuiteEnvironment(
'thread_safety', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
timestep = env.reset()
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
for _ in range(n_steps):
timestep = env.step(action)
return timestep
def test_thread_safety_single_env(self):
timestep = self._run_single_env(n_steps=1)
self.assertEqual(timestep.reward, SUCCESS)
def test_thread_safety_multi_env(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
futures = [executor.submit(self._run_single_env, n_steps=20)
for _ in range(20)]
with self.assertRaises(RuntimeError):
for f in futures:
f.result()
def test_thread_safety_list_test_tasks(self):
self.assertSetEqual(
set(thread_safety.list_test_tasks()),
{'thread_safety'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/thread_safety_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the reaction of the agent to user-provided inputs."""
from collections.abc import Mapping, Sequence
import pickle
from typing import TypeVar
import dm_env
import numpy as np
import tree
from tsuite._src import base
internal_open = open
ArrayT = TypeVar("ArrayT", bound=base.ArrayTree)
def tree_update(lhs: ArrayT, rhs: base.ArrayTree) -> ArrayT:
"""Returns updated array trees.
The lhs provides the default values, which are potentially overwritten by
the rhs. The right hand side does not need to be of the exact same type as
the lhs, as long as the path structure matches.
e.g. lhs = dm_env.Timestep(observation, reward, step_type, discount) can be
updated using a dictionary dict(reward=2).
Args:
lhs: left hand side that provides defaults.
rhs: right hand side that provides overrides.
"""
flat_lhs = tree.flatten_with_path(lhs)
flat_rhs = tree.flatten_with_path(rhs)
map_rhs = dict(flat_rhs)
updated_lhs = [map_rhs.get(path, value) for path, value in flat_lhs]
return tree.unflatten_as(lhs, updated_lhs)
def tree_intersection_compare(lhs: base.ArrayTree, rhs: base.ArrayTree) -> bool:
"""Returns whether the two given arrays trees are equal.
Only elements present in both structures need to be equal.
In particular tree_intersection_compare(AnyTree, {}) would always be True.
Args:
lhs: left hand side.
rhs: right hand side.
"""
flat_lhs = tree.flatten_with_path(lhs)
flat_rhs = tree.flatten_with_path(rhs)
map_rhs = dict(flat_rhs)
for path, lhs_value in flat_lhs:
if path in map_rhs:
rhs_value = map_rhs[path]
if np.any(lhs_value != rhs_value):
return False
return True
class TestCase(base.TestCase):
"""Tests the reaction of the agent to user-provided inputs.
The agent receives a reward if its able to output a high / low value
for a given user-provided input.
This test enables users to create custom tsuite tests on the fly, by
providing observations and the expected action.
Custom user input format
========================
The user provides the custom input as a pickle file. The pickle file must
contain a Sequence of episodes.
Episode:
-------
Each episode must consists of Mapping with three optional entries "timestep",
"internal_action" or "external_action".
If present the value of the "timestep" entry must be an ArrayTree that can be
used to update the default timestep returned by tsuite.
If present the value of "internal_action" or "external_action" is compared to
the action returned by the agent for this timestep, and will determine the
reward associated with the next default timestep returned by tsuite.
"internal_action" has to be given in the tsuite internal format
where 0 refers to a low-action, 1 to a noop, and 2 to a high-action.
"external_action" has to be given in the action_spec of the environment,
the external_action is compared using `tree_intersection_compare`.
TSuite default timesteps:
------------------------
For the first timestep in the episode tsuite returns:
dm_env.TimeStep(observation=zero_obs_according_to_spec,
step_type=first
reward=None,
discount=None)
For the last timestep in the episode tsuite returns:
dm_env.TimeStep(observation=zero_obs_according_to_spec,
step_type=last
# determined by "internal_action" or "external_action"
reward=reward,
discount=0.0)
For all other timesteps in the episode tsuite returns:
dm_env.TimeStep(observation=zero_obs_according_to_spec,
step_type=mid
# determined by "internal_action" or "external_action"
reward=reward,
discount=1.0)
Example: Matching colors
------------------------
Given an observation spec: {'rgb': Array(shape=(96, 72, 3))},
and an action spec: {'color': DiscreteArray(num_values=3)}
A valid custom test that checks if an agent can react with the correct color.
# Sequence of episodes
[
# First episode "red"
[
{
'timestep': dict(observation={'rgb': red_array}),
'external_action': {'color': np.array(0)}
},
{}, # Use default tsuite timestep for last-timestep.
],
# Second episode "green"
[
{
'timestep': dict(observation={'rgb': green_array}),
'external_action': {'color': np.array(1)}
},
{}, # Use default tsuite timestep for last-timestep.
],
# Third episode "blue"
[
{
'timestep': dict(observation={'rgb': blue_array}),
'external_action': {'color': np.array(2)}
},
{}, # Use default tsuite timestep for last-timestep.
]
]
"""
def __init__(self, path: str, **kwargs):
"""Initializes a new TestCase.
Args:
path: path to the user provided file in pickle format.
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
with internal_open(path, "rb") as f:
self._episodes = pickle.load(f)
if not isinstance(self._episodes, Sequence):
raise ValueError(f"Provided episodes are malformed: {self._episodes}")
for episode in self._episodes:
if not isinstance(episode, Sequence):
raise ValueError(f"Provided episode is malformed: {episode}")
if len(episode) < 2:
raise ValueError(f"Provided episode is too short: {len(episode)}")
for step in episode:
if not isinstance(step, Mapping):
raise ValueError(f"Provided episode is malformed: {episode}")
self._episode = self._episodes[0]
def reset(self) -> dm_env.TimeStep:
self._episode = self._episodes[
self.episode_counter % len(self._episodes)]
timestep = self.base_reset()
if "timestep" in self._episode[0]:
timestep = tree_update(timestep, self._episode[0]["timestep"])
return timestep
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
success = False
last_step = self._episode[self.step_counter]
if "internal_action" in last_step:
internal_action = self.map_external_to_internal_action(action)
success = internal_action == last_step["internal_action"]
elif "external_action" in last_step:
success = tree_intersection_compare(action, last_step["external_action"])
timestep = super().base_step(
success=success,
# Step counter is updated in base_step.
terminate=len(self._episode) == self.step_counter + 2)
if "timestep" in self._episode[self.step_counter]:
timestep = tree_update(
timestep, self._episode[self.step_counter]["timestep"])
return timestep
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
if "internal_action" in self._episode[self.step_counter]:
return self._episode[self.step_counter]["internal_action"]
elif "external_action" in self._episode[self.step_counter]:
return self.map_external_to_internal_action(
self._episode[self.step_counter]["external_action"])
return base.InternalAction.NOOP
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
# There are no default custom test-cases. If one would exists it would look
# like this:
# return ["custom@/home/user/custom.pickle"]
return []
|
tsuite-main
|
tsuite/_src/custom.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the observation space of the agent."""
from collections.abc import Sequence
import dm_env
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the observation space of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of a specific observation.
Every episode in this test case is of length 1. The observation in an episode
can either contain a signal or not. If the observation contains a signal, the
agent is expected to output the "high" action, otherwise the agent is expected
to output the "low" action to get reward.
This test ensures that the agent is sensitive to its observation space.
"""
def __init__(self, observation_name: str, **kwargs):
kwargs["default_observation_name"] = observation_name
super().__init__(**kwargs)
def reset(self) -> dm_env.TimeStep:
# The episode counter is not yet increased, so we check for == 1 here
# instead of == 0.
signal = (self.episode_counter % 2) == 1
return super().base_reset(observation=self.get_observation(signal))
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
observation=self.get_observation(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = [spec.name for spec in tree.flatten(observation_spec)]
return [f"observation_space@{name}" for name in names]
|
tsuite-main
|
tsuite/_src/observation_space.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the ability of the agent to overfit to a sequence."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the capability of the agent to overfit to a sequence.
The agent only receives rewards if it learns to output certain sequence of
high and low values for the default action:
(1xL, 2xH, 1xL).
In total there are 4 actions, hence it is likely that the agent can discover
the solution on its own.
"""
def __init__(self, **kwargs):
"""Initializes a new TestCase.
Args:
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._secret = [base.InternalAction.LOW,
base.InternalAction.HIGH, base.InternalAction.HIGH,
base.InternalAction.LOW]
self._sequence = []
def reset(self) -> dm_env.TimeStep:
self._sequence = []
return super().base_reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
self._sequence.append(self.map_external_to_internal_action(action))
if len(self._sequence) < len(self._secret):
return super().base_step()
return super().base_step(
success=self._sequence == self._secret, terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
n = len(self._sequence)
if n >= len(self._secret):
return self._secret[0]
return self._secret[n]
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["overfit"]
|
tsuite-main
|
tsuite/_src/overfit.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the memory of the agent."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the memory capabilities of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of a specific observation. The agent is
required to output the action with a delay of `n_steps`.
This test ensures that the agent is able to memorize information during the
episode.
"""
def __init__(self, n_steps: str, **kwargs):
"""Initializes a new MemoryTestCase.
Args:
n_steps: delay between the observation of the signal and the action.
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._n_steps = int(n_steps)
def reset(self) -> dm_env.TimeStep:
# The episode counter is not yet increased, so we check for == 1 here
# instead of == 0.
signal = (self.episode_counter % 2) == 1
return super().base_reset(observation=self.get_observation(signal))
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
# Waits for n_steps before giving a reward for the correct action.
if self.step_counter < self._n_steps:
return super().base_step()
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
observation=self.get_observation(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
if self.step_counter < self._n_steps:
return base.InternalAction.NOOP
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return [f"memory@{n}" for n in range(10)]
|
tsuite-main
|
tsuite/_src/memory.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for action_space."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import action_space
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class ActionSpaceTest(test_utils.TSuiteTest):
@parameterized.parameters(
('discrete@high', [SUCCESS, FAIL, FAIL]),
('discrete@low', [FAIL, FAIL, SUCCESS]),
('cont_up@high', [FAIL, FAIL, SUCCESS]),
('cont_up@low', [FAIL, SUCCESS, FAIL]),
('cont_left@high', [FAIL, SUCCESS, FAIL]),
('cont_left@low', [FAIL, FAIL, SUCCESS]),
('tensor_0@low', [FAIL, FAIL, SUCCESS]),
('tensor_0@high', [FAIL, SUCCESS, FAIL]),
('tensor_1@low', [FAIL, SUCCESS, FAIL]),
('tensor_1@high', [SUCCESS, FAIL, FAIL]),
('tensor_2@low', [SUCCESS, FAIL, FAIL]),
('tensor_2@high', [FAIL, FAIL, SUCCESS]),
('tensor_3@low', [SUCCESS, FAIL, FAIL]),
('tensor_3@high', [FAIL, SUCCESS, FAIL]),
('tensor_4@low', [FAIL, SUCCESS, FAIL]),
('tensor_4@high', [FAIL, FAIL, SUCCESS]),
('tensor_5@low', [FAIL, FAIL, SUCCESS]),
('tensor_5@high', [SUCCESS, FAIL, FAIL]),
)
def test_action_space(self, identifier, reward_sequence):
env = tsuite.TSuiteEnvironment(
f'action_space@{identifier}', self._action_spec, self._observation_spec)
action = ({'a': [np.array(3, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.array([[0.5, 1.0, 0.0], [0.0, 0.5, 1.0]], dtype=np.float32))
self.assertIsNone(env.step(action).reward)
self.assertEqual(env.step(action).reward, reward_sequence[0])
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.9, 0.9], dtype=np.float32),
np.array([[1.0, 0.0, 0.5], [1.0, 0.0, 0.5]], dtype=np.float32))
self.assertIsNone(env.step(action).reward)
self.assertEqual(env.step(action).reward, reward_sequence[1])
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([0.9, -0.9], dtype=np.float32),
np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 0.0]], dtype=np.float32))
self.assertIsNone(env.step(action).reward)
self.assertEqual(env.step(action).reward, reward_sequence[2])
@parameterized.parameters(
('discrete@high',),
('discrete@low',),
('cont_up@high',),
('cont_up@low',),
('cont_left@high',),
('cont_left@low',),
('tensor_0@low',),
('tensor_0@high',),
('tensor_1@low',),
('tensor_1@high',),
('tensor_2@low',),
('tensor_2@high',),
('tensor_3@low',),
('tensor_3@high',),
('tensor_4@low',),
('tensor_4@high',),
('tensor_5@low',),
('tensor_5@high',),
)
def test_action_space_with_best_action(self, identifier):
env = tsuite.TSuiteEnvironment(
f'action_space@{identifier}', self._action_spec, self._observation_spec)
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
@parameterized.parameters(
('discrete@high',),
('discrete@low',),
)
def test_with_agent(self, identifier):
logs = agent.fit_agent_to_tsuite_task(
f'action_space@{identifier}',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_action_space_list_test_tasks(self):
self.assertSetEqual(
set(action_space.list_test_tasks(self._action_spec)),
{'action_space@discrete@high', 'action_space@discrete@low',
'action_space@cont_up@high', 'action_space@cont_up@low',
'action_space@cont_left@high', 'action_space@cont_left@low',
'action_space@tensor_0@low', 'action_space@tensor_0@high',
'action_space@tensor_1@low', 'action_space@tensor_1@high',
'action_space@tensor_2@low', 'action_space@tensor_2@high',
'action_space@tensor_3@low', 'action_space@tensor_3@high',
'action_space@tensor_4@low', 'action_space@tensor_4@high',
'action_space@tensor_5@low', 'action_space@tensor_5@high'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/action_space_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bad_observation."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import bad_observation
from tsuite._src import test_utils
from tsuite._src import tsuite
class BadObservationTest(test_utils.TSuiteTest):
@parameterized.parameters(
('rgb', 'nan'),
('rgb', 'inf'),
('rgb', 'dtype'),
('dvector', 'dtype'),
('float', 'nan'),
('float', 'inf'),
('float', 'dtype'),
)
def test_bad_observations(self, identifier, mode):
env = tsuite.TSuiteEnvironment(
f'bad_observation@{identifier}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
timestep = env.reset()
if mode == 'nan':
self.assertTrue(np.all(np.isnan(timestep.observation[identifier])))
elif mode == 'inf':
self.assertTrue(np.all(np.isinf(timestep.observation[identifier])))
elif mode == 'dtype':
if identifier in ['rgb', 'float']:
self.assertEqual(timestep.observation[identifier].dtype, np.int32)
else:
self.assertEqual(timestep.observation[identifier].dtype, np.float32)
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
if mode == 'nan':
self.assertTrue(np.all(np.isnan(timestep.observation[identifier])))
elif mode == 'inf':
self.assertTrue(np.all(np.isinf(timestep.observation[identifier])))
elif mode == 'dtype':
if identifier in ['rgb', 'float']:
self.assertEqual(timestep.observation[identifier].dtype, np.int32)
else:
self.assertEqual(timestep.observation[identifier].dtype, np.float32)
def test_bad_observations_list_test_tasks(self):
self.assertSetEqual(
set(bad_observation.list_test_tasks(self._observation_spec)),
{'bad_observation@rgb@nan', 'bad_observation@text@nan',
'bad_observation@text_length@nan',
'bad_observation@dvector@nan', 'bad_observation@float@nan',
'bad_observation@rgb@inf', 'bad_observation@text@inf',
'bad_observation@text_length@inf',
'bad_observation@dvector@inf', 'bad_observation@float@inf',
'bad_observation@rgb@dtype', 'bad_observation@text@dtype',
'bad_observation@text_length@dtype',
'bad_observation@dvector@dtype', 'bad_observation@float@dtype',
'bad_observation@raw_text@inf', 'bad_observation@raw_text@nan',
'bad_observation@raw_text@dtype'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/bad_observation_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tsuite."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import test_utils
from tsuite._src import tsuite
class TsuiteTest(test_utils.TSuiteTest):
@parameterized.parameters([True, False])
def test_list_test_tasks(self, include_broken):
tsuite.list_test_tasks(
self._action_spec,
self._observation_spec,
include_broken_env_tasks=include_broken)
@parameterized.parameters([True, False])
def test_tsuite_environment(self, verbose):
test_tasks = tsuite.list_test_tasks(
self._action_spec,
self._observation_spec,
include_broken_env_tasks=False)
for test_task in test_tasks:
with self.subTest(f'{test_task}'):
env = tsuite.TSuiteEnvironment(
test_task=test_task,
action_spec=self._action_spec,
observation_spec=self._observation_spec,
verbose_logging=verbose)
ts = env.reset()
self.assertTrue(ts.first())
action = ({'a': [np.array(1, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
env.step(action)
action = env.read_property(tsuite.PROPERTY_BEST_ACTION)
env.step(action)
action = env.read_property(tsuite.PROPERTY_RANDOM_ACTION)
env.step(action)
action = env.read_property(tsuite.PROPERTY_WORST_ACTION)
env.step(action)
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/tsuite_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bad_timestep."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import bad_timestep
from tsuite._src import test_utils
from tsuite._src import tsuite
class BadTimestepTest(test_utils.TSuiteTest):
@parameterized.parameters(
('discount', 'negative', -1.0),
('discount', 'oor', 1.1),
('discount', 'nan', np.nan),
('discount', 'inf', np.inf),
('reward', 'negative', -1.0),
('reward', 'nan', np.nan),
('reward', 'inf', np.inf),
('step_type', 'negative', -1),
('step_type', 'oor', 3),
('step_type', 'nan', np.nan),
('step_type', 'inf', np.inf),
)
def test_bad_timestep(self, identifier, mode, expected):
env = tsuite.TSuiteEnvironment(
f'bad_timestep@{identifier}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
env.reset()
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
var = getattr(timestep, identifier)
self.assertTrue(expected is var or expected == var)
def test_bad_timestep_list_test_tasks(self):
self.assertSetEqual(
set(bad_timestep.list_test_tasks()),
{'bad_timestep@discount@nan', 'bad_timestep@discount@inf',
'bad_timestep@discount@oor', 'bad_timestep@discount@negative',
'bad_timestep@reward@nan', 'bad_timestep@reward@inf',
'bad_timestep@reward@negative',
'bad_timestep@step_type@nan', 'bad_timestep@step_type@inf',
'bad_timestep@step_type@oor', 'bad_timestep@step_type@negative',})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/bad_timestep_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sensitivity."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import sensitivity
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class SensitivitySpaceTest(test_utils.TSuiteTest):
# Test only float observations here, because integer observations cannot
# represent numbers smaller than 1, hence n<0 will fail.
@parameterized.product(
identifier=('rgb', 'float'),
n=(-2, -1, 0, 1, 2),
)
def test_sensitivity_correct_behaviour(self, identifier, n):
env = tsuite.TSuiteEnvironment(
f'sensitivity@{identifier}@{n}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
if np.mean(timestep.observation[identifier]) > 0.0:
discrete_action = 3
else:
discrete_action = 0
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.product(
identifier=('rgb', 'dvector', 'text', 'float'),
n=(-2, 1, 0, 1, 2),
)
def test_sensitivity_incorrect_behaviour(self, identifier, n):
env = tsuite.TSuiteEnvironment(
f'sensitivity@{identifier}@{n}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
timestep = env.reset()
self.assertTrue(timestep.first())
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
@parameterized.product(
identifier=('rgb', 'dvector', 'text', 'float'),
n=(-2, 1, 0, 1, 2),
)
def test_sensitivity_with_best_action(self, identifier, n):
env = tsuite.TSuiteEnvironment(
f'sensitivity@{identifier}@{n}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'sensitivity@float@0',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_sensitivity_list_test_tasks(self):
self.assertSetEqual(
set(sensitivity.list_test_tasks(self._observation_spec)),
{'sensitivity@rgb@-1', 'sensitivity@rgb@0', 'sensitivity@rgb@1',
'sensitivity@text_length@-1', 'sensitivity@text_length@0',
'sensitivity@text_length@1',
'sensitivity@text@-1', 'sensitivity@text@0',
'sensitivity@text@1',
'sensitivity@dvector@-1', 'sensitivity@dvector@0',
'sensitivity@dvector@1',
'sensitivity@float@-1', 'sensitivity@float@0', 'sensitivity@float@1'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/sensitivity_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
tsuite-main
|
tsuite/_src/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom."""
import os
import pickle
import tempfile
from absl.testing import absltest
import numpy as np
from tsuite._src import base
from tsuite._src import custom
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
def _get_episodes():
red_array = np.zeros((8, 8, 3))
red_array[..., 0] = 1
green_array = np.zeros((8, 8, 3))
green_array[..., 1] = 1
blue_array = np.zeros((8, 8, 3))
blue_array[..., 2] = 1
return [
[
{
'timestep': dict(observation={'rgb': red_array}),
'external_action': ({'a': [np.array(0)]},)
},
{}, # Use default tsuite timestep for last-timestep.
],
[
{
'timestep': dict(observation={'rgb': green_array}),
'external_action': ({'a': [np.array(1)]},)
},
{}, # Use default tsuite timestep for last-timestep.
],
[
{
'timestep': dict(observation={'rgb': blue_array}),
'external_action': ({'a': [np.array(3)]},)
},
{}, # Use default tsuite timestep for last-timestep.
]
]
class CustomTest(test_utils.TSuiteTest):
def setUp(self):
super().setUp()
fd, self._path = tempfile.mkstemp(suffix='.pickle')
os.close(fd)
with open(self._path, 'wb') as f:
pickle.dump(_get_episodes(), f)
def tearDown(self):
try:
os.remove(self._path)
except OSError:
pass
super().tearDown()
def test_custom(self):
env = tsuite.TSuiteEnvironment(
f'custom@{self._path}',
self._action_spec,
self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
with self.subTest('red_episode_correct'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([1.0, 0.0, 0.0]))
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
with self.subTest('green_episode_correct'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([0.0, 1.0, 0.0]))
action = ({'a': [np.array(1, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
with self.subTest('blue_episode_correct'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([0.0, 0.0, 1.0]))
action = ({'a': [np.array(3, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
with self.subTest('red_episode_incorrect'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([1.0, 0.0, 0.0]))
action = ({'a': [np.array(1, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
with self.subTest('green_episode_incorrect'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([0.0, 1.0, 0.0]))
action = ({'a': [np.array(3, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
with self.subTest('blue_episode_incorrect'):
timestep = env.reset()
np.testing.assert_array_almost_equal(
timestep.observation['rgb'].mean(axis=0).mean(axis=0),
np.array([0.0, 0.0, 1.0]))
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_custom_with_best_action(self):
env = tsuite.TSuiteEnvironment(
f'custom@{self._path}',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_custom_list_test_tasks(self):
self.assertEmpty(custom.list_test_tasks())
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/custom_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests if the agent is robust against bad observations."""
from collections.abc import Sequence
import itertools
import dm_env
import numpy as np
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if the agent is robust against bad observations.
This TestCase simulates a broken environment; it is reasonable for an agent
to crash with a meaningful error message.
The agent receives bad input for its observation. Depending on the mode, the
agent receives:
- `nan`: not a number,
- `inf`: infinity,
- `dtype`: the wrong dtype (int <-> float)
"""
def __init__(self, observation_name: str, mode: str, **kwargs):
kwargs["default_observation_name"] = observation_name
super().__init__(**kwargs)
if mode == "nan":
def visitor(path, node):
if path == self._default_observation_path:
node = np.full_like(node, np.nan)
return node
elif mode == "inf":
def visitor(path, node):
if path == self._default_observation_path:
node = np.full_like(node, np.inf)
return node
elif mode == "dtype":
def visitor(path, node):
if path == self._default_observation_path:
if np.issubdtype(node.dtype, np.integer):
node = node.astype(np.float32)
else:
node = node.astype(np.int32)
return node
else:
raise ValueError(
f"Unknown mode {mode} passed to bad_observation.TestCase.")
self._observations = tree.map_structure_with_path(
visitor, self.get_observation())
def reset(self) -> dm_env.TimeStep:
return super().base_reset(
observation=self._observations)
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
del action
return super().base_step(success=True,
terminate=True,
observation=self._observations)
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = [spec.name for spec in tree.flatten(observation_spec)]
modes = ["nan", "inf", "dtype"]
return [f"bad_observation@{name}@{mode}"
for name, mode in itertools.product(names, modes)]
|
tsuite-main
|
tsuite/_src/bad_observation.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests if agent works with zero discount."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if agent works with zero discount."""
def reset(self) -> dm_env.TimeStep:
return super().base_reset(
observation=self.get_observation(signal=(self.episode_counter % 2) == 0)
)
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
timestep = super().base_step(
success=internal_action == self.best_next_internal_action(),
terminate=self.step_counter == 1)
timestep = timestep._replace(discount=0.0)
return timestep
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["zero_discount"]
|
tsuite-main
|
tsuite/_src/zero_discount.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the sensitivity of the agent to small numerical observations."""
from collections.abc import Sequence
import itertools
import dm_env
import numpy as np
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the visual capabilities of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of a numerical observation.
The signal-size of the numerical observation can be reduced in powers of ten.
A sensitivity of n means that the agent has to distinguish 0 and 10^n.
A sensitivity of 1 means that the agent has to distinguish 0 and 10.
A sensitivity of 0 means that the agent has to distinguish 0 and 1.
A sensitivity of -1 means that the agent has to distinguish 0 and 0.1.
A sensitivity of -n means that the agent has to distinguish 0 and 10^-n.
"""
def __init__(self, observation_name: str, sensitivity: str, **kwargs):
kwargs["default_observation_name"] = observation_name
super().__init__(**kwargs)
self._factor = 10**int(sensitivity)
def high_visitor(path, node):
if path == self._default_observation_path:
return np.full_like(node, self._factor)
return node
def low_visitor(path, node):
if path == self._default_observation_path:
return np.zeros_like(node)
return node
self._numeric_observations = [
tree.map_structure_with_path(low_visitor, self.get_observation()),
tree.map_structure_with_path(high_visitor, self.get_observation())
]
def reset(self) -> dm_env.TimeStep:
return super().base_reset(
observation=self._numeric_observations[self.episode_counter % 2])
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = []
for node in tree.flatten(observation_spec):
if np.issubdtype(node.dtype, np.number):
names.append(node.name)
return [f"sensitivity@{name}@{n}"
for name, n in itertools.product(names, [-1, 0, 1])]
|
tsuite-main
|
tsuite/_src/sensitivity.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for knows_prerecorded_sequence."""
from absl.testing import absltest
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import knows_prerecorded_sequence
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class KnowsPrerecordedSequenceTest(test_utils.TSuiteTest):
def test_knows_prerecorded_sequence_correct_behaviour(self):
env = tsuite.TSuiteEnvironment(
'knows_prerecorded_sequence',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays one episode with the correct behaviour.
timestep = env.reset()
# The following action sequence corresponds to the "secret" in the
# knows_prerecorded_sequence.TestCase class.
for discrete_action in [0, 3, 3, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 3, 3, 0]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
def test_knows_prerecorded_sequence_incorrect_behaviour(self):
env = tsuite.TSuiteEnvironment(
'knows_prerecorded_sequence',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays one episode with the incorrect behaviour.
timestep = env.reset()
for discrete_action in [0, 3, 3, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 3, 3, 2]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_knows_prerecorded_sequence_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'knows_prerecorded_sequence',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertIsNone(timestep.reward)
while not timestep.last():
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertEqual(timestep.reward, SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'knows_prerecorded_sequence',
n_updates=50)
self.assertLess(logs[-1]['value'], 0.1)
def test_knows_prerecorded_sequence_list_test_tasks(self):
self.assertSetEqual(
set(knows_prerecorded_sequence.list_test_tasks()),
{'knows_prerecorded_sequence'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/knows_prerecorded_sequence_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple updater using JAX ecosystem.
This updater can be used for supervised learning, unsupervised learning and
reinforcement learning. Within tsuite it is used to test the tsuite tasks with
a simple actor-critic agent in an RL setup (see agent.py).
"""
from collections.abc import Callable, Mapping, Sequence
from typing import Any, TypeVar, Union
from absl import logging
import chex
import haiku as hk
import jax
import numpy as np
import optax
T = TypeVar('T')
def _assert_no_jax_tracers(nest):
def _assert(x):
msg = 'Detected jax.core.Tracer! This code should not be jit-compiled.'
assert not isinstance(x, jax.core.Tracer), msg
jax.tree_map(_assert, nest)
class Updater():
"""Simple Updater using the JAX ecosystem."""
# pylint: disable=g-bare-generic
def __init__(
self,
optimizer: optax.GradientTransformation,
loss: Callable[..., Union[tuple, chex.Array]],
metrics: dict[str, Callable],
rng_key: chex.Array,
*loss_args,
**loss_kwargs):
(self._update_rng_key,
self._metric_rng_key,
self._transformed_rng_key,
init_rng_key) = jax.random.split(rng_key, 4)
loss_init, self._loss_apply = hk.transform_with_state(loss)
self._net_params, self._net_state = jax.jit(loss_init)(
init_rng_key, *loss_args, **loss_kwargs)
self._opt_state = optimizer.init(self._net_params)
self._step_counter = 0
self._loss = 0
self._loss_ema = 0
self._ema_decay = 0.99
self._metrics = {k: jax.jit(hk.transform_with_state(m).apply)
for k, m in metrics.items()}
self._logs = []
def _update(
opt_state: chex.ArrayTree,
net_params: chex.ArrayTree,
net_state: chex.ArrayTree,
rng_key: chex.Array,
*loss_args,
**loss_kwargs,
) -> tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]:
def loss(net_params, net_state):
loss_output, net_state = self._loss_apply(
net_params, net_state, rng_key, *loss_args, **loss_kwargs)
if isinstance(loss_output, tuple):
return loss_output[0], (net_state, loss_output)
return loss_output, (net_state, (loss_output,))
gradients, (net_state, loss_output) = jax.grad(
loss, has_aux=True)(net_params, net_state)
updates, opt_state = optimizer.update(gradients, opt_state)
net_params = optax.apply_updates(net_params, updates)
return opt_state, net_params, net_state, loss_output
self._update = jax.jit(_update)
def __call__(
self, *loss_args, **loss_kwargs
) -> tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]:
self._update_rng_key, rng_key = jax.random.split(self._update_rng_key)
(self._opt_state,
self._net_params,
self._net_state,
loss_output) = self._update(
self._opt_state, self._net_params, self._net_state, rng_key,
*loss_args, **loss_kwargs)
self._step_counter += 1
self._loss = float(loss_output[0])
if self._step_counter == 1:
self._loss_ema = self._loss
self._loss_ema *= self._ema_decay
self._loss_ema += (1 - self._ema_decay) * self._loss
return self._net_params, self._net_state, loss_output
def add_loss_and_ema_loss_to_log(self) -> None:
"""Returns logs and adds loss and ema loss to the logs of the updater."""
self._logs.append(dict(
label='Train',
name='loss',
step=self._step_counter,
value=self._loss))
self._logs.append(dict(
label='Train',
name='ema_loss',
step=self._step_counter,
value=self._loss_ema))
def add_metrics_to_log(
self, label: str, *args, **kwargs) -> None:
"""Returns metric logs and adds them to the logs of the updater."""
self._metric_rng_key, *rng_keys = jax.random.split(
self._metric_rng_key, len(self._metrics) + 1)
metric_logs = []
for (name, fn), rng_key in zip(
self._metrics.items(), rng_keys, strict=True):
value = fn(self._net_params, self._net_state, rng_key, *args, **kwargs)[0]
metric = dict(
label=label, name=name, step=self._step_counter, value=float(value))
log_msg = f"{metric['label']} {metric['name']} {metric['value']:.3f}"
logging.info(log_msg)
metric_logs.append(metric)
self._logs.extend(metric_logs)
def transform(
self, fn: Callable[..., T], jit: bool = False) -> Callable[..., T]:
"""Returns hk.transform_with_state (and optionally jitted) function."""
apply_fn = hk.transform_with_state(fn).apply
if jit:
apply_fn = jax.jit(apply_fn)
def transformed_fn(*args, **kwargs):
# Ensure that the user doesn't jit compile the transformed_fn, because
# otherwise the params, state and rng is fixed!
# We could have a more general solution here, but for now we just make
# sure that it does break with an assert instead of silently failing.
_assert_no_jax_tracers((args, kwargs))
self._transformed_rng_key, rng_key = jax.random.split(
self._transformed_rng_key)
output, unused_net_state = apply_fn(
self._net_params, self._net_state, rng_key, *args, **kwargs)
return output
return transformed_fn
@property
def logs(self) -> Sequence[Mapping[str, Any]]:
return self._logs
def mini_batch_generator(
dataset: chex.Array,
*additional_datasets,
batch_size: int,
n_epochs: int,
progress_callback_fn: Callable[[float], Any] = lambda x: None,
):
"""Yields mini-batches.
Usage in a colab:
.. code-block:: python
from colabtools.interactive_widgets import ProgressBar
def mini_batch_generator(*args, **kwargs):
progress_bar = ProgressBar()
progress_bar.Publish()
yield from updater.mini_batch_generator(
*args, **kwargs, progress_callback_fn=progress_bar.SetProgress)
progress_bar.Unpublish()
Usage for supervised learning on images with labels:
.. code-block:: python
for x, y in updater.mini_batch_generator(images, labels):
...
Args:
dataset: the dataset to split into mini-batches.
*additional_datasets: additional datasets to split into mini-batches.
batch_size: mini-batch size.
n_epochs: number of epochs.
progress_callback_fn: called once per mini-batch with a float indicating
the progress between 0 and 100.
"""
n_samples = len(dataset)
indices = np.arange(n_samples)
total = n_epochs * n_samples // batch_size
i = 0
for epoch in range(n_epochs):
np.random.shuffle(indices)
for batch in range(0, n_samples, batch_size):
progress_callback_fn(i / total * 100.0)
batch_indices = indices[batch:batch+batch_size]
additional_batch = tuple(d[batch_indices] for d in additional_datasets)
yield epoch, dataset[batch_indices], *additional_batch
i += 1
|
tsuite-main
|
tsuite/_src/updater.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reward."""
from absl.testing import absltest
import numpy as np
from tsuite._src import base
from tsuite._src import reward
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
class RewardTest(test_utils.TSuiteTest):
def test_reward(self):
env = tsuite.TSuiteEnvironment(
'reward', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
timestep = env.reset()
self.assertTrue(timestep.first())
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
def test_reward_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'reward', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_reward_list_test_tasks(self):
self.assertSetEqual(
set(reward.list_test_tasks()),
{'reward'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/reward_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for agent."""
from absl.testing import absltest
from absl.testing import parameterized
from tsuite._src import agent
class AgentTest(parameterized.TestCase):
def test_agent(self):
logs = agent.fit_agent_to_tsuite_task('overfit')
self.assertGreater(logs[-1]['value'], 0.9)
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/agent_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for zero_discount."""
from absl.testing import absltest
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import test_utils
from tsuite._src import tsuite
from tsuite._src import zero_discount
SUCCESS = base.SUCCESS
class ZeroDiscountTest(test_utils.TSuiteTest):
def test_zero_discount(self):
env = tsuite.TSuiteEnvironment(
'zero_discount', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
env.reset()
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.discount, 0.0)
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
self.assertEqual(timestep.discount, 0.0)
def test_discount_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'zero_discount', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'zero_discount',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_zero_discount_list_test_tasks(self):
self.assertSetEqual(
set(zero_discount.list_test_tasks()),
{'zero_discount'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/zero_discount_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checks for violations in causality."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if information from the future is leaked to the agent.
This test can only be solved by leaking information from the future. If an
agent passes this test, it indicates a problem with causality in the agent. In
particular, the last observation contains the information necessary to choose
the correct action.
"""
def expected_reward_to_pass_test(self):
# We expect the agent to NOT learn to solve this task!
return (base.SUCCESS + base.FAIL) / 2
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
expected = self.best_next_internal_action()
# Observation in the last time-step, leaks the secret of the current
# episode!
return super().base_step(
success=self.map_external_to_internal_action(action) == expected,
observation=self.get_observation(self._get_current_signal()),
terminate=True)
def _get_current_signal(self) -> bool:
return (self.episode_counter % 2) == 0
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
if self._get_current_signal():
return base.InternalAction.HIGH
else:
return base.InternalAction.LOW
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["future_leakage"]
|
tsuite-main
|
tsuite/_src/future_leakage.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for updater."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from tsuite._src import updater
class UpdaterTest(parameterized.TestCase):
def test_updater(self):
y_train = np.random.randint(2, size=1000)
x_train = np.random.normal(y_train, 0.5, size=1000)[..., None]
y_test = np.random.randint(2, size=1000)
x_test = np.random.normal(y_test, 0.5, size=1000)[..., None]
def network(x: chex.Array) -> chex.Array:
return hk.nets.MLP(output_sizes=[16, 2])(x)
def loss(x: chex.Array, y: chex.Array) -> chex.Array:
return jnp.mean(optax.softmax_cross_entropy(
logits=network(x), labels=jax.nn.one_hot(y, 2)))
def accuracy(x: chex.Array, y: chex.Array) -> chex.Array:
return jnp.mean(jnp.argmax(network(x), axis=1) == y)
my_updater = updater.Updater(
optimizer=optax.adam(learning_rate=1e-3),
loss=loss,
rng_key=jax.random.PRNGKey(42),
metrics=dict(accuracy=accuracy),
x=x_train[:1],
y=y_train[:1])
generator = updater.mini_batch_generator(
x_train, y_train, batch_size=16, n_epochs=1)
for _, x, y in generator:
my_updater(x, y)
my_updater.add_loss_and_ema_loss_to_log()
my_updater.add_metrics_to_log('Test', x_test, y_test)
self.assertGreater(my_updater.logs[-1]['value'], 0.7)
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/updater_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for language."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import base
from tsuite._src import language
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class LanguageTest(test_utils.TSuiteTest):
@parameterized.parameters(
('raw_text', 'content',),
('raw_text', 'length',),
)
def test_language_correct_behaviour(self, observation_name, mode):
env = tsuite.TSuiteEnvironment(
f'language@{observation_name}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
discrete_action = 2
if mode == 'content':
if timestep.observation[observation_name] == 'left':
discrete_action = 3
if timestep.observation[observation_name] == 'right':
discrete_action = 0
if mode == 'length':
if timestep.observation[observation_name] == 'no no':
discrete_action = 3
if timestep.observation[observation_name] == 'no no no':
discrete_action = 0
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(('raw_text', 'content',), ('raw_text', 'length',))
def test_language_incorrect_behaviour(self, observation_name, mode):
env = tsuite.TSuiteEnvironment(
f'language@{observation_name}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
env.reset()
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_language_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'language@raw_text@length',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_language_list_test_tasks(self):
self.assertSetEqual(
set(language.list_test_tasks(self._observation_spec)),
{'language@raw_text@content', 'language@raw_text@length'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/language_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the causality inference of the agent."""
from collections.abc import Sequence
import random
import dm_env
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the causality inference of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of a specific observation, a second
observation is provided which is merely correlated with reward.
Every episode in this test case is of length 1. The observation in an episode
can either contain a signal or not. If the observation contains a signal, the
agent is expected to output the "high" action, otherwise the agent is expected
to output the "low" action to get reward.
This test ensures that the agent infers the correct causal structure.
"""
def __init__(
self,
causal_observation_name: str,
correlated_observation_name: str,
correlation_percentage: str,
**kwargs):
kwargs["default_observation_name"] = causal_observation_name
self._causal_observation_name = causal_observation_name
self._correlated_observation_name = correlated_observation_name
self._correlation_percentage = int(correlation_percentage)
super().__init__(**kwargs)
def reset(self) -> dm_env.TimeStep:
# The episode counter is not yet increased, so we check for == 1 here
# instead of == 0.
signal_obs = []
if (self.episode_counter % 2) == 1:
signal_obs.append(self._causal_observation_name)
if (random.random() * 100) < self._correlation_percentage:
signal_obs.append(self._correlated_observation_name)
observation = tree.map_structure(
base.make_signal_injector_visitor_fn(signal_obs),
self._observation_spec)
return super().base_reset(observation=observation)
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
observation=self.get_observation(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = [spec.name for spec in tree.flatten(observation_spec)]
tasks = []
for n1 in names:
for n2 in names:
if n1 != n2:
for percentage in [90, 99]:
tasks.append(f"causal@{n1}@{n2}@{percentage}")
return tasks
|
tsuite-main
|
tsuite/_src/causal.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the language capabilities of the agent."""
from collections.abc import Sequence
import itertools
import dm_env
import numpy as np
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the language capabilities of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of a text-like observation. Depending
on the mode, the agent has to be able to distinguish:
- `content`: the word "left" from the word "right",
- `length`: the text "no no" from the text "no no no".
"""
def __init__(self, observation_name: str, mode: str, **kwargs):
kwargs["default_observation_name"] = observation_name
super().__init__(**kwargs)
if mode == "content":
def high_visitor(path, node):
if path == self._default_observation_path:
node = np.broadcast_to(np.array("left", np.str_), node.shape)
return node
def low_visitor(path, node):
if path == self._default_observation_path:
node = np.broadcast_to(np.array("right", np.str_), node.shape)
return node
elif mode == "length":
def high_visitor(path, node):
if path == self._default_observation_path:
node = np.broadcast_to(np.array("no no", np.str_), node.shape)
return node
def low_visitor(path, node):
if path == self._default_observation_path:
node = np.broadcast_to(np.array("no no no", np.str_), node.shape)
return node
else:
raise ValueError(f"Unknown mode {mode} passed to LanguageTestCase.")
self._text_observations = [
tree.map_structure_with_path(low_visitor, self.get_observation()),
tree.map_structure_with_path(high_visitor, self.get_observation())
]
def reset(self) -> dm_env.TimeStep:
return super().base_reset(
observation=self._text_observations[self.episode_counter % 2])
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = []
for node in tree.flatten(observation_spec):
# Dtype indicates text observation.
if node.dtype.type in [np.bytes_, np.str_]:
names.append(node.name)
modes = ["content", "length"]
return [f"language@{name}@{mode}"
for name, mode in itertools.product(names, modes)]
|
tsuite-main
|
tsuite/_src/language.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple Actor-critic agent.
The agent learns to output a binary action, and expects a single float as
observation.
It can be used to learn most of the tsuite-tasks by calling the
fit_agent_to_tsuite_task function.
The implementation is a simple actor-critic, as described e.g. here
https://arxiv.org/abs/1602.01783.
"""
from collections.abc import Mapping, Sequence
from typing import Any, Optional
import chex
import distrax
import dm_env
from dm_env import specs
import haiku as hk
import jax
from jax import numpy as jnp
import numpy as np
import optax
import rlax
from tsuite._src import tsuite
from tsuite._src import updater
_N_DISCRETE_ACTIONS = 2
_ACTION_SPEC = specs.DiscreteArray(
num_values=_N_DISCRETE_ACTIONS, name='discrete')
_OBSERVATION_NAME_1 = 'float'
_OBSERVATION_NAME_2 = 'float_2'
_OBSERVATION_SPEC = {
_OBSERVATION_NAME_1: specs.BoundedArray(
shape=(1,), minimum=-1, maximum=1,
dtype=np.float32, name=_OBSERVATION_NAME_1),
_OBSERVATION_NAME_2: specs.BoundedArray(
shape=(1,), minimum=-1, maximum=1,
dtype=np.float32, name=_OBSERVATION_NAME_2),
}
_AGENT_DISCOUNT = 0.99
_AGENT_LAMBDA = 0.9
_AGENT_ENTROPY_WEIGHT = 2e-2
expand_dim = rlax.tree_fn(lambda x: jnp.expand_dims(x, axis=0))
squeeze_dim = rlax.tree_fn(lambda x: jnp.squeeze(x, axis=0))
def _get_core() -> hk.RNNCore:
return hk.ResetCore(hk.LSTM(16))
def _initial_state(batch_size: int) -> chex.ArrayTree:
_, net_state_init = hk.without_apply_rng(
hk.transform(lambda bs: _get_core().initial_state(bs)))
return net_state_init({}, batch_size)
def _network(
timesteps: dm_env.TimeStep,
state: chex.ArrayTree,
) -> tuple[distrax.Distribution, chex.Array, chex.ArrayTree]:
"""Returns agent policy, value and state."""
# Network supports arbitrary observation-specs by:
# - flattening the observation tree
# - reshaping all observations to [T, B, C]
# - converting all observations to float32
# - concatenating all observations into a single array.
obs = jax.tree_util.tree_leaves(timesteps.observation)
obs = [x.reshape(x.shape[:2] + (-1,)) for x in obs]
obs = [x.astype(jnp.float32) for x in obs]
obs = jnp.concatenate(obs, axis=-1)
x = hk.BatchApply(hk.nets.MLP(output_sizes=[8, 8]))(obs)
x, state = hk.dynamic_unroll(
_get_core(), (x, timesteps.step_type == dm_env.StepType.FIRST), state)
x = hk.BatchApply(hk.nets.MLP(output_sizes=[8, _N_DISCRETE_ACTIONS]))(x)
v = hk.BatchApply(hk.nets.MLP(output_sizes=[8, 1]))(x)
return distrax.Softmax(x), jnp.squeeze(v, axis=-1), state
def _actor_step(
timesteps: dm_env.TimeStep,
state: chex.ArrayTree) -> tuple[chex.Array, chex.ArrayTree]:
"""Compute actions for an entire batch of environments."""
dist, unused_value, state = _network(expand_dim(timesteps), state)
return squeeze_dim(dist.sample(seed=hk.next_rng_key())), state
def _batch(trees: Sequence[chex.ArrayTree]) -> chex.ArrayTree:
return jax.tree_map(lambda *x: jnp.stack(x, axis=0), *trees)
def _mean_return(timesteps: dm_env.TimeStep) -> chex.Array:
n_episodes = jnp.sum(timesteps.step_type == dm_env.StepType.LAST)
n_episodes = jnp.maximum(n_episodes, 1)
return jnp.sum(timesteps.reward) / n_episodes
def _actor_critic_loss(
timesteps: dm_env.TimeStep,
actions: chex.Array,
state: chex.ArrayTree) -> chex.Array:
"""Returns actor-critic loss."""
# Mask, with 0s only on timesteps of type LAST.
mask_last = jnp.logical_not(timesteps.step_type == dm_env.StepType.LAST)
mask_tm1 = mask_last[:-1]
mask_t = mask_last[1:]
r_t = timesteps.reward[1:]
d_t = timesteps.discount[1:] * _AGENT_DISCOUNT * mask_t
dist, v, unused_state = _network(timesteps, state)
# Compute lambda return.
v_tm1 = v[:-1]
v_t = v[1:]
batch_lambda_returns = jax.vmap(
rlax.lambda_returns, in_axes=(1, 1, 1, None), out_axes=1)
returns = batch_lambda_returns(r_t, d_t, v_t, _AGENT_LAMBDA)
# Value loss.
delta_tm1 = jax.lax.stop_gradient(returns) - v_tm1
v_loss = jnp.mean(mask_tm1 * jnp.square(delta_tm1)) / 2.
# Policy gradient loss.
logpia_tm1 = dist.log_prob(actions)[:-1]
pi_loss = -jnp.mean(mask_tm1 * jax.lax.stop_gradient(delta_tm1) * logpia_tm1)
# Entropy loss
entropy_tm1 = dist.entropy()[:-1]
entropy_loss = -jnp.mean(mask_tm1 * entropy_tm1)
# Sum and weight losses.
total_loss = v_loss + pi_loss + _AGENT_ENTROPY_WEIGHT * entropy_loss
return total_loss
def fit_agent_to_tsuite_task(
tsuite_task: str,
batch_size: int = 16,
unroll_length: int = 16,
n_updates: int = 100,
early_stopping_mean_return: Optional[float] = None,
) -> Sequence[Mapping[str, Any]]:
"""Returns logs of training."""
envs = []
for _ in range(batch_size):
envs.append(tsuite.TSuiteEnvironment(
tsuite_task, _ACTION_SPEC, _OBSERVATION_SPEC, remove_nones=True))
timesteps_b = _batch([env.reset() for env in envs])
actions_b = _batch([
env.read_property(tsuite.PROPERTY_RANDOM_ACTION) for env in envs])
initial_state_b = _initial_state(batch_size)
my_updater = updater.Updater(
optimizer=optax.adam(learning_rate=1e-2),
loss=_actor_critic_loss,
rng_key=jax.random.PRNGKey(42),
metrics=dict(accuracy=_mean_return),
timesteps=_batch([timesteps_b] * unroll_length),
actions=_batch([actions_b] * unroll_length),
state=initial_state_b)
transformed_actor_step = my_updater.transform(_actor_step, jit=True)
state_b = initial_state_b
timesteps_tb = []
actions_tb = []
for _ in range(n_updates):
first_state_b = state_b
# Reset time-batch buffers, keeping the last item around, creating
# an overlap of one between subsequent unrolls.
# On the first iteration the buffers will be empty.
timesteps_tb = timesteps_tb[-1:]
actions_tb = actions_tb[-1:]
while len(timesteps_tb) < unroll_length:
actions_b, state_b = transformed_actor_step(timesteps_b, state_b)
timesteps_tb.append(timesteps_b)
actions_tb.append(actions_b)
timesteps_b = _batch(
[envs[i].step(actions_b[i]) for i in range(batch_size)])
my_updater(_batch(timesteps_tb), _batch(actions_tb), first_state_b)
my_updater.add_metrics_to_log('Test', _batch(timesteps_tb))
if early_stopping_mean_return is not None:
if my_updater.logs[-1]['value'] > early_stopping_mean_return:
break
return my_updater.logs
|
tsuite-main
|
tsuite/_src/agent.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slow_env."""
import time
from absl.testing import absltest
import numpy as np
from tsuite._src import base
from tsuite._src import slow_env
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class SlowEnvTest(test_utils.TSuiteTest):
def test_slow_env_correct_behaviour(self):
env = tsuite.TSuiteEnvironment(
'slow_env@100',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
start_time = time.time()
# Plays one episode with the correct behaviour.
timestep = env.reset()
# The following action sequence corresponds to the "secret" in the
# overfit.TestCase class.
for discrete_action in [0, 3, 3, 0]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
# With the slow_env, each reset and step takes at least 100 ms, hence the
# test should take at least 500 ms.
self.assertGreater(time.time() - start_time, 0.5)
self.assertEqual(timestep.reward, SUCCESS)
def test_slow_env_incorrect_behaviour(self):
env = tsuite.TSuiteEnvironment(
'slow_env@100',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
start_time = time.time()
# Plays one episode with the incorrect behaviour.
timestep = env.reset()
# The following action sequence does not corresponds to the "secret" in the
# overfit.TestCase class.
for discrete_action in [0, 3, 3, 2]:
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
# With the slow_env, each reset and step takes at least 100 ms, hence the
# test should take at least 500 ms.
self.assertGreater(time.time() - start_time, 0.5)
self.assertEqual(timestep.reward, FAIL)
def test_slow_env_list_test_tasks(self):
self.assertSetEqual(
set(slow_env.list_test_tasks()),
{'slow_env@500'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/slow_env_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simulates an environment which is not thread safe."""
from collections.abc import Sequence
import threading
import time
import dm_env
from tsuite._src import base
# Global lock.
_lock = threading.Lock()
class TestCase(base.TestCase):
"""Simulates an environment which is not thread safe.
This TestCase simulates a broken environment; it is reasonable for an agent
to crash with a meaningful error message.
This test always returns a reward regardless of the agent input.
"""
def reset(self) -> dm_env.TimeStep:
if _lock.locked():
raise RuntimeError("Encountered thread safety issue!")
with _lock:
# Sleeps for 100ms to increase chance of collisions between threads.
time.sleep(0.1)
return super().base_reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
if _lock.locked():
raise RuntimeError("Encountered thread safety issue!")
with _lock:
# Sleeps for 100ms to increase chance of collisions between threads.
time.sleep(0.1)
return super().base_step(success=True, terminate=True)
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["thread_safety"]
|
tsuite-main
|
tsuite/_src/thread_safety.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the visual capabilities of the agent."""
from collections.abc import Sequence
import itertools
import dm_env
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the visual capabilities of the agent.
The agent receives a reward if it's able to output a high / low value for the
default action depending on the value of an image-like observation. Depending
on the mode, the agent has to be able to distinguish:
- `color`: a signal in the red from a signal in the green channel.
- `size`: a signal in the small square from a signal in a large square.
- `vertical_position`: a signal in the upper half from a signal in the lower
half of the image.
- `horizontal_position`: a signal in the left half from a signal in the
right half of the image.
"""
def __init__(self, observation_name: str, mode: str, **kwargs):
kwargs["default_observation_name"] = observation_name
super().__init__(**kwargs)
if mode == "color":
def high_visitor(path, node):
if path == self._default_observation_path:
# Sets red channel to 1.
node[:, :, 0] = 1
return node
def low_visitor(path, node):
if path == self._default_observation_path:
# Sets green channel to 1.
node[:, :, 1] = 1
return node
elif mode == "size":
def high_visitor(path, node):
if path == self._default_observation_path:
# Creates a large square with 1.
width_div_2 = int(node.shape[0] / 2)
height_div_2 = int(node.shape[1] / 2)
width_div_4 = int(width_div_2 / 2)
height_div_4 = int(height_div_2 / 2)
node[width_div_2-width_div_4:width_div_2+width_div_4,
height_div_2-height_div_4:height_div_2+height_div_4, 0] = 1
return node
def low_visitor(path, node):
if path == self._default_observation_path:
# Creates a small square with 1.
width_div_2 = int(node.shape[0] / 2)
height_div_2 = int(node.shape[1] / 2)
width_div_8 = int(width_div_2 / 4)
height_div_8 = int(height_div_2 / 4)
node[width_div_2-width_div_8:width_div_2+width_div_8,
height_div_2-height_div_8:height_div_2+height_div_8, 0] = 1
return node
elif mode == "vertical_position":
def high_visitor(path, node):
if path == self._default_observation_path:
# Sets upper half to 1.
width_div_2 = int(node.shape[0] / 2)
node[:width_div_2, :, 0] = 1
return node
def low_visitor(path, node):
if path == self._default_observation_path:
# Sets lower half to 1.
width_div_2 = int(node.shape[0] / 2)
node[width_div_2:, :, 0] = 1
return node
elif mode == "horizontal_position":
def high_visitor(path, node):
if path == self._default_observation_path:
# Sets upper half to 1.
height_div_2 = int(node.shape[1] / 2)
node[:, :height_div_2, 0] = 1
return node
def low_visitor(path, node):
if path == self._default_observation_path:
# Sets lower half to 1.
height_div_2 = int(node.shape[1] / 2)
node[:, height_div_2:, 0] = 1
return node
else:
raise ValueError(f"Unknown mode {mode} passed to VisualTestCase.")
self._visual_observations = [
tree.map_structure_with_path(low_visitor, self.get_observation()),
tree.map_structure_with_path(high_visitor, self.get_observation())
]
def reset(self) -> dm_env.TimeStep:
return super().base_reset(
observation=self._visual_observations[self.episode_counter % 2])
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(
success=internal_action == self.best_next_internal_action(),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
signal = (self.episode_counter % 2) == 0
return base.InternalAction.HIGH if signal else base.InternalAction.LOW
def list_test_tasks(observation_spec: base.SpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
observation_spec: defines the observations consumed by the agent.
"""
names = []
for node in tree.flatten(observation_spec):
# Shape indicates RGB or RGBA observation.
if len(node.shape) == 3 and node.shape[2] in [3, 4]:
names.append(node.name)
modes = ["color", "size", "vertical_position", "horizontal_position"]
return [f"visual@{name}@{mode}"
for name, mode in itertools.product(names, modes)]
|
tsuite-main
|
tsuite/_src/visual.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checks for cross contamination between episodes."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if episodes are cross contaminated.
This test can only be solved by leaking information between episodes. If an
agent passes this test, it indicates a problem with the resetting of its
memory. In particular, the last observation leaks into the next episode.
"""
def expected_reward_to_pass_test(self):
# We expect the agent to NOT learn to solve this task!
return (base.SUCCESS + base.FAIL) / 2
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
expected = self.best_next_internal_action()
next_signal = (self.episode_counter + 1) % 2 == 0
# Observation in the last time-step, leaks the secret of the next episode!
return super().base_step(
success=self.map_external_to_internal_action(action) == expected,
observation=self.get_observation(next_signal),
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
if (self.episode_counter % 2) == 0:
return base.InternalAction.HIGH
else:
return base.InternalAction.LOW
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["cross_contamination"]
|
tsuite-main
|
tsuite/_src/cross_contamination.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests if agent uses its discount correctly.
The overall undiscounted return of the agent is 1.0 if the agent uses its
discount rate correctly, otherwise it will be larger or smaller than 1.0.
"""
from collections.abc import Sequence
import dm_env
import numpy as np
from tsuite._src import base
N_OPTIMAL_STEPS = 9
# Approximation of partial haromonic sum: H_n = sum_{k=1}^{n} 1/k
_HARMONIC_FACTOR = (
np.log(N_OPTIMAL_STEPS-1) + np.euler_gamma + 1 / (2 * (N_OPTIMAL_STEPS-1)))
class TestCase(base.TestCase):
"""Tests if agent takes its discount correctly into account."""
def __init__(self, target_discount: str, **kwargs):
"""Initializes a new ActionSpaceTestCase.
Args:
target_discount: discount-rate of the agent.
**kwargs: additional keyword arguments forwarded to the base class.
"""
target_discount = float(target_discount)
# The agent will receive a linearly decreasing rewards alpha/step_count
# and a constant terminal reward. Alpha is chosen such that the discounted
# reward is maximal if the agent terminates the episode after
# _N_OPTIMAL_STEPS steps, and the overall return will be 1.0 in that case.
self._alpha = (1 - target_discount) * N_OPTIMAL_STEPS
self._terminal_reward = 1 / (1 + self._alpha * _HARMONIC_FACTOR)
super().__init__(**kwargs)
def _get_intermediate_reward(self) -> float:
return self._terminal_reward * self._alpha / self.step_counter
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
self.step_counter += 1
internal_action = self.map_external_to_internal_action(action)
observation = self.get_observation()
if internal_action == base.InternalAction.HIGH:
return dm_env.termination(
reward=self._terminal_reward, observation=observation)
else:
return dm_env.transition(
reward=self._get_intermediate_reward(), observation=observation)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
if (self.step_counter + 1) < N_OPTIMAL_STEPS:
return base.InternalAction.LOW
return base.InternalAction.HIGH
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["discount@0.99"]
|
tsuite-main
|
tsuite/_src/discount.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for crashing_env."""
from absl.testing import absltest
import numpy as np
from tsuite._src import crashing_env
from tsuite._src import test_utils
from tsuite._src import tsuite
class CrashingEnvTest(test_utils.TSuiteTest):
def test_env_does_not_crash_with_probability_0(self):
env = tsuite.TSuiteEnvironment(
'crashing_env@0',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
timestep = env.reset()
self.assertTrue(timestep.first())
action = ({'a': [np.array(1, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
# Ensure that environment does not crash for 100 steps.
for _ in range(100):
env.step(action)
def test_env_does_crash_with_probability_1(self):
env = tsuite.TSuiteEnvironment(
'crashing_env@100',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
with self.assertRaises(RuntimeError):
env.reset()
def test_crashing_env_list_test_tasks(self):
self.assertSetEqual(
set(crashing_env.list_test_tasks()),
{'crashing_env@1'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/crashing_env_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unittest Reinforcement Learning environment."""
from collections.abc import Sequence, Mapping
import copy
from typing import Any, Optional
from absl import logging
import dm_env
import numpy as np
import tree
from tsuite._src import action_space
from tsuite._src import bad_observation
from tsuite._src import bad_timestep
from tsuite._src import base
from tsuite._src import causal
from tsuite._src import crashing_env
from tsuite._src import cross_contamination
from tsuite._src import custom
from tsuite._src import discount
from tsuite._src import future_leakage
from tsuite._src import knows_prerecorded_sequence
from tsuite._src import language
from tsuite._src import latency
from tsuite._src import memory
from tsuite._src import observation_space
from tsuite._src import overfit
from tsuite._src import reward
from tsuite._src import sensitivity
from tsuite._src import slow_env
from tsuite._src import thread_safety
from tsuite._src import visual
from tsuite._src import zero_discount
PROPERTY_BEST_ACTION = "best_action"
PROPERTY_WORST_ACTION = "worst_action"
PROPERTY_RANDOM_ACTION = "random_action"
def list_test_tasks(
action_spec: base.ActionSpecsTree,
observation_spec: base.SpecsTree,
include_broken_env_tasks: bool = True,
) -> Sequence[str]:
"""Returns a list of all available test-tasks."""
action_spec = base.set_names_in_spec(action_spec)
observation_spec = base.set_names_in_spec(observation_spec)
tasks = [
*action_space.list_test_tasks(action_spec),
*discount.list_test_tasks(),
*causal.list_test_tasks(observation_spec),
*cross_contamination.list_test_tasks(),
*custom.list_test_tasks(),
*future_leakage.list_test_tasks(),
*knows_prerecorded_sequence.list_test_tasks(),
*latency.list_test_tasks(),
*language.list_test_tasks(observation_spec),
*memory.list_test_tasks(),
*observation_space.list_test_tasks(observation_spec),
*overfit.list_test_tasks(),
*reward.list_test_tasks(),
*sensitivity.list_test_tasks(observation_spec),
*slow_env.list_test_tasks(),
*visual.list_test_tasks(observation_spec),
*zero_discount.list_test_tasks(),
]
# The following tasks simulate a broken environment. It is reasonable for an
# agent to crash with a meaningful error message for these test tasks.
if include_broken_env_tasks:
tasks += [
*bad_observation.list_test_tasks(observation_spec),
*bad_timestep.list_test_tasks(),
*crashing_env.list_test_tasks(),
*thread_safety.list_test_tasks(),
]
return tasks
class TSuiteEnvironment(dm_env.Environment):
"""A test environment built on the dm_env.Environment class.
Supported test-cases are:
- action_space: tests the action space of the agent, see
`action_space.TestCase`.
- bad_observation: tests for bad observations, see
`bad_observation.TestCase`.
- bad_timestep: tests for bad timesteps, see `bad_timestep.TestCase`.
- causal: test that an agent discovers correct causal relationship.
- crashing_env: tests behaviour on environment errors,
see `crashing_env.TestCase`.
- cross_contamination: checks for cross contamination between episodes, see
`cross_contamination.TestCase`.
- custom: tests user-provided custom test-cases, see `custom.TestCase`.
- discount: tests that an agent uses its discount correctly, see
`discount.TestCase`.
- future_leakage: checks for violations in causality, see
`future_leakage.TestCase`.
- knows_prerecorded_sequence: tests the ability to learn a prerecorded
sequence e.g. from expert demonstrations, see
`knows_prerecorded_sequence.TestCase`.
- latency: tests the latency guarantees of the agent,
see `latency.TestCase`.
- language: tests the language capabilities of the agent, see
`language.TestCase`.
- memory: tests the memory of the agent, see `memory.TestCase`.
- observation_space: tests the observation space of the agent, see
`observation_space.TestCase`.
- overfit: tests the ability of the agent to overfit to a sequence, see
`overfit.TestCase`.
- reward: tests the reward processing, see `reward.TestCase`.
- sensitivity: tests the agent's sensitivity to differently scaled numerical
observations, see `latency.TestCase`.
- slow_env: tests the ability of the agent to overfit to a sequence from a
slow environment, see `slow_env.TestCase`.
- thread_safety: test for threading issues if the environment is not thread
safe, see `thread_safety.TestCase`.
- visual: tests the visual capabilities of the agent, see
`visual.TestCase`.
- zero_discount: ests if agent works with zero discount, see
`zero_discount.TestCase`.
Additional arguments required by the test-cases are encoded in the test-task
name and are separated by the '@' character.
For instance,
- to test if the agent can output the maximum value of a discrete action
named "discrete", the agent should be tested on the test task with the
name "action_space@discrete@high".
- to test if the agent can retain provided information for 5 environment
steps, the agent should be tested on the test task with the name
"memory@5".
"""
def __init__(self,
test_task: str,
action_spec: base.ActionSpecsTree,
observation_spec: base.SpecsTree,
default_action_name: Optional[str] = None,
default_observation_name: Optional[str] = None,
validate_action_spec: bool = True,
remove_nones: bool = False,
verbose_logging: bool = False):
"""Initializes a new unittest environment.
Args:
test_task: name of the test task.
action_spec: defines the action space of the agent.
observation_spec: defines the observations consumed by the agent.
default_action_name: name of the action in the action_spec, which is
used to check for a reaction from the agent by default.
default_observation_name: name of the observation in the observation_spec,
which is used to provide signals to the agent by default.
validate_action_spec: whether to validate the given action in the step
function against the action_spec.
remove_nones: whether to remove None values from the reward and discount
in the first timestep.
verbose_logging: whether to log additional information like actions,
timesteps, etc. This can be useful for debugging purposes.
"""
logging.info("Unittest test task %s", test_task)
logging.info("Unittest ActionSpec %s", action_spec)
logging.info("Unittest ObservationSpec %s", observation_spec)
logging.info("Unittest default action name %s", default_action_name)
logging.info("Unittest default observation name %s",
default_observation_name)
self._action_spec = copy.deepcopy(action_spec)
self._observation_spec = copy.deepcopy(observation_spec)
self._reset_next_step = True
self._validate_action_spec = validate_action_spec
self._remove_nones = remove_nones
self._verbose_logging = verbose_logging
impl_cls: Mapping[str, base.TestCaseCtor] = {
"action_space": action_space.TestCase,
"bad_observation": bad_observation.TestCase,
"bad_timestep": bad_timestep.TestCase,
"causal": causal.TestCase,
"crashing_env": crashing_env.TestCase,
"cross_contamination": cross_contamination.TestCase,
"custom": custom.TestCase,
"discount": discount.TestCase,
"future_leakage": future_leakage.TestCase,
"knows_prerecorded_sequence": knows_prerecorded_sequence.TestCase,
"latency": latency.TestCase,
"language": language.TestCase,
"memory": memory.TestCase,
"observation_space": observation_space.TestCase,
"overfit": overfit.TestCase,
"reward": reward.TestCase,
"sensitivity": sensitivity.TestCase,
"slow_env": slow_env.TestCase,
"thread_safety": thread_safety.TestCase,
"visual": visual.TestCase,
"zero_discount": zero_discount.TestCase,
}
case, *args = test_task.split("@")
self._impl = impl_cls[case](
*args,
action_spec=self._action_spec,
observation_spec=self._observation_spec,
default_action_name=default_action_name,
default_observation_name=default_observation_name)
def read_property(self, key: str) -> Any:
self._verbose_log(f"Called read_property with key {key}.")
if key == PROPERTY_BEST_ACTION:
value = self._impl.map_internal_to_external_action(
self._impl.best_next_internal_action())
elif key == PROPERTY_WORST_ACTION:
internal_action = self._impl.best_next_internal_action()
if internal_action == base.InternalAction.LOW:
internal_action = base.InternalAction.HIGH
elif internal_action == base.InternalAction.HIGH:
internal_action = base.InternalAction.LOW
# Note: Leave action == base.Action.NOOP as it is.
value = self._impl.map_internal_to_external_action(internal_action)
elif key == PROPERTY_RANDOM_ACTION:
def random_action_from_spec(spec) -> base.ArrayTree:
if np.issubdtype(spec.dtype, np.integer):
return np.random.randint(
spec.minimum, spec.maximum + 1, dtype=spec.dtype)
elif np.issubdtype(spec.dtype, np.inexact):
return np.random.uniform(
spec.minimum, spec.maximum).astype(spec.dtype)
else:
raise ValueError(f"Unsupported dtype {spec.dtype} for action spec.")
value = tree.map_structure(random_action_from_spec, self._action_spec)
else:
value = ""
self._verbose_log(f"Finished read_property returning {value}.")
return value
def _verbose_log(self, log: str):
if self._verbose_logging:
logging.info(log)
def write_property(self, key: str, value: str) -> None:
logging.warning("Attempting to write property %s: %s", key, value)
pass
def _maybe_remove_none(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
if self._remove_nones and timestep.first():
if timestep.reward is None:
self._verbose_log("Replacing None reward with 0.0.")
timestep = timestep._replace(reward=0.0)
if timestep.discount is None:
self._verbose_log("Replacing None discount with 1.0.")
timestep = timestep._replace(discount=1.0)
return timestep
def reset(self) -> dm_env.TimeStep:
"""Returns the first `TimeStep` of a new episode."""
self._verbose_log("Called reset.")
self._reset_next_step = False
ts = copy.deepcopy(self._maybe_remove_none(self._impl.reset()))
self._verbose_log(f"Finished reset returning timestep {ts}.")
return ts
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
"""Updates the environment according to the action."""
self._verbose_log(f"Called step with action {action}.")
if self._reset_next_step:
ts = self.reset()
self._verbose_log(f"Finished step returning timestep {ts}.")
return ts
if self._validate_action_spec:
self._verbose_log("Validating action.")
tree.map_structure(
lambda leaf_spec, leaf_action: leaf_spec.validate(leaf_action),
self._action_spec, action)
transition = self._impl.step(action)
if transition.last():
self._reset_next_step = True
ts = copy.deepcopy(self._maybe_remove_none(transition))
self._verbose_log(f"Finished step returning timestep {ts}.")
return ts
def observation_spec(self) -> base.SpecsTree:
"""Returns the observation spec."""
self._verbose_log("Called observation_spec.")
spec = copy.deepcopy(self._observation_spec)
self._verbose_log(f"Finished observation_spec returning spec {spec}.")
return spec
def action_spec(self) -> base.SpecsTree:
"""Returns the action spec."""
self._verbose_log("Called action_spec.")
spec = copy.deepcopy(self._action_spec)
self._verbose_log(f"Finished action_spec returning spec {spec}.")
return spec
|
tsuite-main
|
tsuite/_src/tsuite.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cross_contamination."""
from absl.testing import absltest
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import cross_contamination
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class CrossContaminationTest(test_utils.TSuiteTest):
def test_cross_contamination_correct_behaviour(self):
env = tsuite.TSuiteEnvironment(
'cross_contamination',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episode with the correct behaviour.
for discrete_action in [0, 3]:
env.reset()
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
def test_cross_contamination_incorrect_behaviour(self):
env = tsuite.TSuiteEnvironment(
'cross_contamination',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays four episode with the incorrect behaviour.
for discrete_action in [3, 0, 1, 2]:
env.reset()
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_cross_contamination_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'cross_contamination',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'cross_contamination',
n_updates=50)
self.assertLess(logs[-1]['value'], 0.6)
self.assertGreater(logs[-1]['value'], 0.4)
def test_cross_contamination_list_test_tasks(self):
self.assertSetEqual(
set(cross_contamination.list_test_tasks()),
{'cross_contamination'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/cross_contamination_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for discount."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import discount
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
def _get_returns_for_n_step_episode(
env: dm_env.Environment,
n_steps: int,
gamma: float) -> tuple[float, float]:
env.reset()
low_action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
high_action = ({'a': [np.array(3, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
total_return = 0.0
discounted_return = 0.0
total_gamma = 1.0
for i in range(n_steps):
timestep = env.step(high_action if i+1 == n_steps else low_action)
total_return += timestep.reward
discounted_return += total_gamma * timestep.reward
total_gamma *= gamma
return total_return, discounted_return
class DiscountTest(test_utils.TSuiteTest):
@parameterized.parameters(
(0.9,),
(0.95,),
(0.99,),
)
def test_discount(self, gamma):
env = tsuite.TSuiteEnvironment(
f'discount@{gamma}', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
r_nm1, dr_nm1 = _get_returns_for_n_step_episode(
env, discount.N_OPTIMAL_STEPS-1, gamma)
r_n, dr_n = _get_returns_for_n_step_episode(
env, discount.N_OPTIMAL_STEPS, gamma)
r_np1, dr_np1 = _get_returns_for_n_step_episode(
env, discount.N_OPTIMAL_STEPS+1, gamma)
r_np2, dr_np2 = _get_returns_for_n_step_episode(
env, discount.N_OPTIMAL_STEPS+2, gamma)
# Reward is close to one for the optimal steps.
self.assertAlmostEqual(r_n, 1.0, places=3)
self.assertLess(r_nm1, 1.0)
self.assertGreater(r_np1, 1.0)
self.assertGreater(r_np2, r_np1)
# Discounted reward has a maximum at the optimal steps.
self.assertLess(dr_nm1, dr_n)
self.assertLessEqual(dr_np1, dr_n)
self.assertLess(dr_np2, dr_n)
def test_discount_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'discount@0.99', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
ts = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertIsNone(ts.reward)
total_return = 0.0
while not ts.last():
action = env.read_property(tsuite.PROPERTY_BEST_ACTION)
ts = env.step(action)
logging.info(ts.reward)
total_return += ts.reward
self.assertAlmostEqual(total_return, SUCCESS, places=2)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'discount@0.99',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_zero_discount_list_test_tasks(self):
self.assertSetEqual(
set(discount.list_test_tasks()),
{'discount@0.99'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/discount_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the ability of the agent to learn a prerecorded sequence."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the capability of the agent to learn a prerecorded sequence.
The agent only receives rewards if it learns to output certain sequence of
high and low values for the default action:
(1xL, 2xH, 3xL, 4xH, 3xL, 2xH, 1xL).
In total there are 16 actions, hence it is extremely unlikely that the agent
can discover the solution on its own. This tasks should therefore only
solvable if suitable expert demonstrations are provided.
"""
def __init__(self, **kwargs):
"""Initializes a new TestCase.
Args:
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._secret = [base.InternalAction.LOW,
base.InternalAction.HIGH,
base.InternalAction.HIGH,
base.InternalAction.LOW,
base.InternalAction.LOW,
base.InternalAction.LOW,
base.InternalAction.HIGH,
base.InternalAction.HIGH,
base.InternalAction.HIGH,
base.InternalAction.HIGH,
base.InternalAction.LOW,
base.InternalAction.LOW,
base.InternalAction.LOW,
base.InternalAction.HIGH,
base.InternalAction.HIGH,
base.InternalAction.LOW]
self._sequence = []
def reset(self) -> dm_env.TimeStep:
self._sequence = []
return super().base_reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
self._sequence.append(self.map_external_to_internal_action(action))
if len(self._sequence) < len(self._secret):
return super().base_step()
return super().base_step(
success=self._sequence == self._secret, terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
n = len(self._sequence)
if n >= len(self._secret):
return self._secret[0]
return self._secret[n]
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["knows_prerecorded_sequence"]
|
tsuite-main
|
tsuite/_src/knows_prerecorded_sequence.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for visual."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import base
from tsuite._src import test_utils
from tsuite._src import tsuite
from tsuite._src import visual
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class VisualTest(test_utils.TSuiteTest):
@parameterized.parameters(
('rgb', 'color',),
('rgb', 'vertical_position',),
('rgb', 'horizontal_position',),
('rgb', 'size',),
)
def test_visual_correct_behaviour(self, observation_name, mode):
env = tsuite.TSuiteEnvironment(
f'visual@{observation_name}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
discrete_action = 2
if mode == 'color':
if np.mean(timestep.observation[observation_name][:, :, 0]) > 0.0:
discrete_action = 3
if np.mean(timestep.observation[observation_name][:, :, 1]) > 0.0:
discrete_action = 0
if mode == 'size':
if np.mean(timestep.observation[observation_name][:3, :, 0]) > -1.0:
discrete_action = 3
else:
discrete_action = 0
if mode == 'vertical_position':
if np.mean(timestep.observation[observation_name][:4, :, 0]) > 0.0:
discrete_action = 3
if np.mean(timestep.observation[observation_name][4:, :, 0]) > 0.0:
discrete_action = 0
if mode == 'horizontal_position':
if np.mean(timestep.observation[observation_name][:, :4, 0]) > 0.0:
discrete_action = 3
if np.mean(timestep.observation[observation_name][:, 4:, 0]) > 0.0:
discrete_action = 0
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(
('rgb', 'color',),
('rgb', 'vertical_position',),
('rgb', 'horizontal_position',),
('rgb', 'size',),
)
def test_visual_incorrect_behaviour(self, observation_name, mode):
env = tsuite.TSuiteEnvironment(
f'visual@{observation_name}@{mode}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
env.reset()
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_visual_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'visual@rgb@size',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_visual_list_test_tasks(self):
self.assertSetEqual(
set(visual.list_test_tasks(self._observation_spec)),
{'visual@rgb@color', 'visual@rgb@size', 'visual@rgb@vertical_position',
'visual@rgb@horizontal_position'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/visual_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simulates an environment which crashing with a certain probability."""
from collections.abc import Sequence
import random
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Simulates an environment which crashes with a certain probability.
This TestCase simulates a broken environment; it is reasonable for an agent
to crash with a meaningful error message.
This test always returns a reward regardless of the agent input.
"""
def __init__(self, crash_probability_in_percent: str, **kwargs):
"""Initializes a new TestCase.
Args:
crash_probability_in_percent: crash probability in percent.
**kwargs: additional keyword arguments forwarded to the base class.
"""
super().__init__(**kwargs)
self._crash_probability_in_percent = int(crash_probability_in_percent)
if self._crash_probability_in_percent < 0:
raise ValueError("crash_probability_in_percent must be positive.")
if self._crash_probability_in_percent > 100:
raise ValueError("crash_probability_in_percent must be <= 100.")
def reset(self) -> dm_env.TimeStep:
self._maybe_raise_intentional_error()
return super().base_reset()
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
self._maybe_raise_intentional_error()
return super().base_step(success=True, terminate=True)
def _maybe_raise_intentional_error(self):
if random.random() * 100 < self._crash_probability_in_percent:
raise RuntimeError("This is an intentional error for testing purposes.")
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["crashing_env@1"]
|
tsuite-main
|
tsuite/_src/crashing_env.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the reward processing."""
from collections.abc import Sequence
import dm_env
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests if reward is correctly passed to the agent.
This test always returns a reward regardless of the agent input.
"""
def step(self, action: base.ArrayTree) -> dm_env.TimeStep:
del action
return super().base_step(
success=True,
terminate=True)
def list_test_tasks() -> Sequence[str]:
"""Returns available test-tasks of TestCase."""
return ["reward"]
|
tsuite-main
|
tsuite/_src/reward.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the action space of the agent."""
from collections.abc import Sequence
import itertools
import dm_env
import numpy as np
import tree
from tsuite._src import base
class TestCase(base.TestCase):
"""Tests the action space of the agent.
The agent receives a reward if its able to output a high / low value
for a specific action.
This test ensures that the agent can take advantage of the entire action
space.
"""
def __init__(self, action_name: str, mode: str, **kwargs):
"""Initializes a new ActionSpaceTestCase.
Args:
action_name: name of the tested action.
mode: defines which output receives the reward, either high or low.
**kwargs: additional keyword arguments forwarded to the base class.
"""
kwargs["default_action_name"] = action_name
super().__init__(**kwargs)
if mode == "high":
self._expected = base.InternalAction.HIGH
elif mode == "low":
self._expected = base.InternalAction.LOW
else:
raise ValueError(f"Unknown mode {mode} passed to ActionSpaceTestCase.")
def step(self, action) -> dm_env.TimeStep:
internal_action = self.map_external_to_internal_action(action)
return super().base_step(success=internal_action == self._expected,
terminate=True)
def best_next_internal_action(self) -> base.InternalAction:
"""Returns the best next action based on the current state of the env."""
return self._expected
def list_test_tasks(action_spec: base.ActionSpecsTree) -> Sequence[str]:
"""Returns available test-tasks of TestCase.
Args:
action_spec: defines the action space of the agent.
"""
names = []
for node in tree.flatten(action_spec):
n_dimensions = len(node.shape)
if n_dimensions == 0:
names.append(node.name)
elif "|" in node.name:
names += list(node.name.split("|"))
else:
names += [node.name + f"_{index}"
for index in range(np.prod(node.shape))]
modes = ["high", "low"]
return [f"action_space@{name}@{mode}"
for name, mode in itertools.product(names, modes)]
|
tsuite-main
|
tsuite/_src/action_space.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic definitions for the tsuite environment."""
from collections.abc import Callable, Iterable, Mapping, Sequence
import enum
import hashlib
from typing import Any, Optional, Union, Protocol
from absl import logging
import dm_env
from dm_env import specs
import numpy as np
import tree
ArrayTree = Union[np.ndarray, Iterable["ArrayTree"], Mapping[Any, "ArrayTree"]]
SpecsTree = Union[specs.Array, Iterable["SpecsTree"], Mapping[Any, "SpecsTree"]]
ActionSpecType = Union[specs.BoundedArray, specs.StringArray]
ActionSpecsTree = Union[
ActionSpecType,
Iterable["ActionSpecsTree"],
Mapping[Any, "ActionSpecsTree"]]
class InternalAction(enum.Enum):
"""Discretized action space used by the test-cases.
All test-cases assume that the agent is able to communicate three distinct
states with their default action.
See `TestCase.get_action` for details on how discrete and continuous actions
are mapped to these three states.
"""
LOW, NOOP, HIGH = list(range(3))
class ExternalStringAction(str, enum.Enum):
"""External actions of type string that match to the internal actions.
The strings have been manually chosen to return 0, 1, 2, when mapped by the
external_to_internal_action mapping method of the TestCase below.
"""
LOW = "red"
NOOP = "green"
HIGH = "blue"
# Rewards returned by TestCase for success and fail.
SUCCESS = 1.0
FAIL = 0.0
# Minimum and maximum values for observations.
OBSERVATION_MAX_DEFAULT = 1
OBSERVATION_MIN_DEFAULT = 0
def _fingerprint(x: Union[str, np.ndarray, bytes]) -> int:
"""Returns a fingerpint of a string."""
if isinstance(x, np.ndarray):
x = x.item()
if isinstance(x, str):
x = x.encode("utf-8")
assert isinstance(x, bytes), f"Expected bytes got {type(x)} with value {x}"
return int(hashlib.sha256(x).hexdigest(), 16)
def set_names_in_spec(nested_spec: SpecsTree) -> SpecsTree:
"""Returns spec with None replaced by a proper name.
If the name of an element in a nested spec is None, it is replaced
with the string representation of the path of this element in the nest.
Args:
nested_spec: nested spec.
"""
def visitor_function(path, element: specs.Array) -> specs.Array:
if element.name is None:
if isinstance(element, specs.BoundedArray):
element = specs.BoundedArray(
shape=element.shape,
dtype=element.dtype,
maximum=element.maximum,
minimum=element.minimum,
name="_".join(map(str, path)))
else:
element = specs.Array(
shape=element.shape,
dtype=element.dtype,
name="_".join(map(str, path)))
return element
return tree.map_structure_with_path(visitor_function, nested_spec)
def make_signal_injector_visitor_fn(
observation_names: Sequence[str],
) -> Callable[[specs.Array], np.ndarray]:
"""Returns function that injects signal into the given observations."""
def visitor_function(node: specs.Array) -> np.ndarray:
if str(node.name) in observation_names:
if node.dtype.type in [np.bytes_, np.str_]:
# Returns an array of bytes/strings filled with '1'.
return np.full(node.shape, OBSERVATION_MAX_DEFAULT, node.dtype)
else:
value = OBSERVATION_MAX_DEFAULT
if hasattr(node, "maximum"):
value = node.maximum
return np.asarray(np.full(node.shape, value, node.dtype), node.dtype)
else:
if node.dtype.type in [np.bytes_, np.str_]:
# Returns an array of bytes/strings filled with '0'.
return np.full(node.shape, OBSERVATION_MIN_DEFAULT, node.dtype)
else:
value = OBSERVATION_MIN_DEFAULT
if hasattr(node, "minimum"):
value = node.minimum
return np.asarray(np.full(node.shape, value, node.dtype), node.dtype)
return visitor_function
def _extract_scalar_bounds_spec_with_name(
element: specs.Array, index: tuple[int], name: str
) -> specs.BoundedArray | specs.StringArray:
"""Returns a spec with scalar bounds and the given name."""
if isinstance(element, specs.BoundedArray):
# The rest of the code assumes that maximum and minimum are single
# values. A new BoundedArray spec is created with the maximum and
# minimum of the selected action.
if element.maximum.shape:
maximum = element.maximum[index]
else:
maximum = element.maximum
if element.minimum.shape:
minimum = element.minimum[index]
else:
minimum = element.minimum
element = specs.BoundedArray(
shape=element.shape,
dtype=element.dtype,
maximum=maximum,
minimum=minimum,
name=name)
elif element.dtype == bool:
element = specs.BoundedArray(
shape=element.shape,
dtype=element.dtype,
maximum=True,
minimum=False,
name=name)
elif isinstance(element, specs.StringArray):
pass
else:
raise ValueError(f"Unsupported action spec {element}.")
return element
class TestCaseCtor(Protocol):
"""Protocol for the test-case implementations used by tsuite."""
def __call__(self,
*args: str,
action_spec: ActionSpecsTree,
observation_spec: SpecsTree,
default_action_name: Optional[str] = None,
default_observation_name: Optional[str] = None,
verbose_logging: bool = False):
"""Initializes a new TestCase.
Args:
*args: string arguments passed to the TestCase constructor.
action_spec: defines the action space of the agent.
observation_spec: defines the observations consumed by the agent.
default_action_name: name of the action in the action_spec, which is
used to check for a reaction from the agent by default.
If None, the first action in the action spec is used as default action.
default_observation_name: name of the observation in the observation_spec,
which is used to provide signals to the agent by default.
If None, the first observation in the observation spec is used as
default observation.
verbose_logging: whether to log additional information. This can be useful
for debugging purposes.
"""
class TestCase():
"""Base class for the test-case implementations used by the unittest env."""
def __init__(self,
*,
action_spec: ActionSpecsTree,
observation_spec: SpecsTree,
default_action_name: Optional[str] = None,
default_observation_name: Optional[str] = None,
verbose_logging: bool = False):
"""Initializes a new TestCase.
Args:
action_spec: defines the action space of the agent.
observation_spec: defines the observations consumed by the agent.
default_action_name: name of the action in the action_spec, which is
used to check for a reaction from the agent by default.
If None, the first action in the action spec is used as default action.
default_observation_name: name of the observation in the observation_spec,
which is used to provide signals to the agent by default.
If None, the first observation in the observation spec is used as
default observation.
verbose_logging: whether to log additional information. This can be useful
for debugging purposes.
"""
self._action_spec = set_names_in_spec(action_spec)
self._default_action_name = default_action_name
self._observation_spec = set_names_in_spec(observation_spec)
self._default_observation_name = default_observation_name
self._verbose_logging = verbose_logging
if self._default_action_name is None:
self._default_action_name = tree.flatten(self._action_spec)[0].name
# Ensures action name is represented as a string.
# It is supported that the action-spec contains non-string names, but
# internally we represent and match everything based on the string
# representation
self._default_action_name = str(self._default_action_name)
self._default_action_path, self._default_action_spec = (
self._parse_action_spec())
if not isinstance(self._default_action_spec, ActionSpecType):
raise ValueError(f"Unsupported action spec {self._default_action_spec}.")
if self._default_observation_name is None:
self._default_observation_name = tree.flatten(observation_spec)[0].name
# Ensures observation name is represented as a string.
# It is supported that the action-spec contains non-string names, but
# internally we represent and match everything based on the string
# representation
self._default_observation_name = str(self._default_observation_name)
self._default_observation_path, self._default_observation_spec = (
self._parse_observation_spec())
self.step_counter = 0
self.episode_counter = 0
def _verbose_log(self, log: str):
if self._verbose_logging:
logging.info(log)
def expected_reward_to_pass_test(self) -> float:
"""Returns the expected reward required to pass the test.
Most tests require the agent to solve the given task and obtain a reward
of 1.0. On the other hand, some tests check if an agent learns in situations
where it should not learn anything. The expected reward for those tests is
usually 0.5.
"""
return SUCCESS
def _parse_action_spec(
self) -> tuple[tuple[Any, ...], ActionSpecType]:
"""Returns the path and spec of the default action."""
self._verbose_log(f"Called _parse_action_spec for {self._action_spec}")
for path, element in tree.flatten_with_path(self._action_spec):
self._verbose_log(f"Processing {path}, {element}.")
n_dimensions = len(element.shape)
# Ensures element name is represented as a string.
# It is supported that the action-spec contains non-string names, but
# internally we represent and match everything based on the string
# representation
name = str(element.name)
if n_dimensions == 0:
self._verbose_log(f"{name} is dimension 0.")
if self._default_action_name == name:
self._verbose_log(
f"Finished _parse_action_spec returning {path}, {element}.")
return path, element
else:
# Assumes that name of a tensor-like action, encodes the names of the
# sub-action in the form "action1|action2|...". The gotham environment
# fullfills this assumption.
if "|" in name:
names = name.split("|")
else:
names = [name + f"_{index}"
for index in range(np.prod(element.shape))]
self._verbose_log(
f"{name} is dimension {n_dimensions} and was parsed to {names}.")
for index, name in enumerate(names):
self._verbose_log(f"Inner processing {index}, {name}.")
if self._default_action_name == name:
if index >= np.prod(element.shape):
raise ValueError(
"ActionSpec name indicates more elements than the shape!")
index = np.unravel_index(index, element.shape)
element = _extract_scalar_bounds_spec_with_name(
element, index, name)
path = path + index
self._verbose_log(
f"Finished _parse_action_spec returning {path}, {element}.")
return path, element
else:
raise ValueError(
f"Could not find element named {self._default_action_name} "
f"in {self._action_spec}")
def _parse_observation_spec(self) -> tuple[tuple[Any, ...], specs.Array]:
"""Returns the path and spec of the default observation."""
self._verbose_log(
f"Called _parse_observation_spec for {self._observation_spec}")
for path, element in tree.flatten_with_path(self._observation_spec):
if self._default_observation_name == str(element.name):
self._verbose_log(
f"Finished _parse_observation_spec returning {path}, {element}.")
return path, element
else:
raise ValueError(
f"Could not find element named {self._default_observation_name} "
f"in {self._observation_spec}")
def base_reset(
self, observation: Optional[ArrayTree] = None) -> dm_env.TimeStep:
"""Returns timestep and resets the internal state of this test case.
This function should be used by subclasses to ensure that the episodes
and the steps are correctly counted.
Args:
observation: returned observation.
"""
self._verbose_log(f"Called base_reset with {observation}.")
self._verbose_log(
f"State step: {self.step_counter} episode: {self.episode_counter}.")
self.step_counter = 0
self.episode_counter += 1
if observation is None:
observation = self.get_observation()
ts = dm_env.restart(observation)
self._verbose_log(f"Finished base_reset returning {ts}.")
return ts
def reset(self) -> dm_env.TimeStep:
return self.base_reset()
def base_step(
self,
terminate: bool = False,
success: bool = False,
observation: Optional[ArrayTree] = None) -> dm_env.TimeStep:
"""Returns timestep.
This function should be used by subclasses to ensure that the steps are
correctly counted.
Args:
terminate: whether to terminate the episode.
success: whether to return a reward or not.
observation: returned observation.
"""
self._verbose_log(
f"Called base_reset with {terminate} {success} {observation}.")
self._verbose_log(
f"State step: {self.step_counter} episode: {self.episode_counter}.")
self.step_counter += 1
if observation is None:
observation = self.get_observation()
reward = SUCCESS if success else FAIL
if terminate:
ts = dm_env.termination(reward=reward, observation=observation)
else:
ts = dm_env.transition(reward=reward, observation=observation)
self._verbose_log(f"Finished base_step returning {ts}.")
return ts
def step(self, action: ArrayTree) -> dm_env.TimeStep:
del action
return self.base_step()
def best_next_internal_action(self) -> InternalAction:
"""Returns the best next action based on the current state of the env."""
self._verbose_log("Called best_next_internal_action returning NOOP.")
return InternalAction.NOOP
def map_external_to_internal_action(
self, external_action: ArrayTree) -> InternalAction:
"""Returns the action corresponding to the default action name.
Actions are discretized into LOW, NOOP, HIGH.
- If the default action is discrete, LOW corresponds to the minimum value,
HIGH to the maximum value, and NOOP to all other values.
- If the default action is continuous, LOW corresponds to a value of 10%,
HIGH to a value of 90% and NOOP to all other values, where the percent
is defined with a linear equation between the minimum (0%) and
maximum (100%) value of the action.
Args:
external_action: np.array with the structure defined by the action_spec.
"""
self._verbose_log(
f"Called map_external_to_internal_action with {external_action}.")
# Traverses the action structure to extract the desired default action.
for action_path in self._default_action_path:
external_action = external_action[action_path]
if (isinstance(self._default_action_spec, specs.BoundedArray) and
(np.issubdtype(self._default_action_spec.dtype, np.integer) or
self._default_action_spec.dtype == bool)):
if external_action == self._default_action_spec.maximum:
internal_action = InternalAction.HIGH
elif external_action == self._default_action_spec.minimum:
internal_action = InternalAction.LOW
else:
internal_action = InternalAction.NOOP
elif (isinstance(self._default_action_spec, specs.BoundedArray) and
np.issubdtype(self._default_action_spec.dtype, np.inexact)):
def linear_from_spec(x, spec):
return x * (spec.maximum - spec.minimum) + spec.minimum
if external_action > linear_from_spec(0.9, self._default_action_spec):
internal_action = InternalAction.HIGH
elif external_action < linear_from_spec(0.1, self._default_action_spec):
internal_action = InternalAction.LOW
else:
internal_action = InternalAction.NOOP
elif isinstance(self._default_action_spec, specs.StringArray):
internal_action = InternalAction(_fingerprint(external_action) % 3)
else:
raise ValueError(
f"Unsupported dtype {self._default_action_spec.dtype} "
"for action spec.")
self._verbose_log(
f"Finished map_external_to_internal_action returning {internal_action}."
)
return internal_action
def map_internal_to_external_action(
self, internal_action: InternalAction) -> ArrayTree:
"""Returns action in original action-space.
Args:
internal_action: Discretized action used by tsuite internally.
"""
self._verbose_log(
f"Called map_internal_to_external_action with {internal_action}.")
def _get_external_action(path, spec: specs.BoundedArray) -> ArrayTree:
array = spec.generate_value()
# The default_action_path includes the indices of the array for
# vector-like arrays. Hence, we only check for the same prefix here.
prefix = self._default_action_path[:len(path)]
postfix = self._default_action_path[len(path):]
if path == prefix:
if internal_action == InternalAction.NOOP:
override = _spec_noop_value(spec)
elif internal_action == InternalAction.LOW:
override = _spec_min_max_value(spec)[0]
elif internal_action == InternalAction.HIGH:
override = _spec_min_max_value(spec)[1]
else:
raise ValueError(f"Unknown action: {internal_action}")
if hasattr(override, "shape") and override.shape == array.shape:
array = override
else:
array[tuple(postfix)] = override
return array
external_action = tree.map_structure_with_path(
_get_external_action, self._action_spec)
self._verbose_log(
f"Finished map_internal_to_external_action returning {external_action}."
)
return external_action
def get_observation(self, signal: bool = False) -> ArrayTree:
"""Returns observation with the structure defined by the observation_spec.
The observation consists of the minimum value for each element in the
observation spec. An additional signal can be present, represented by the
maximum value for the element with the default observation name.
Args:
signal: whether to inject a signal into the default observation.
"""
signal_obs = []
if signal:
signal_obs.append(self._default_observation_name)
return tree.map_structure(
make_signal_injector_visitor_fn(signal_obs), self._observation_spec)
def _spec_min_max_value(spec: specs.Array):
if isinstance(spec, specs.BoundedArray):
return (spec.minimum, spec.maximum)
elif isinstance(spec, specs.StringArray):
return (ExternalStringAction.LOW.value, ExternalStringAction.HIGH.value)
elif spec.dtype == bool:
return (False, True)
else:
iinfo = np.iinfo(spec.dtype)
return (iinfo.min, iinfo.max)
def _spec_noop_value(spec: specs.Array):
"""Returns noop value for external action given its spec."""
if np.issubdtype(spec.dtype, np.integer):
min_value, max_value = _spec_min_max_value(spec)
return np.asarray((min_value + max_value) // 2).astype(spec.dtype)
elif np.issubdtype(spec.dtype, np.inexact):
min_value, max_value = _spec_min_max_value(spec)
return np.asarray((min_value + max_value) / 2.0).astype(spec.dtype)
elif isinstance(spec, specs.StringArray):
return np.asarray(
ExternalStringAction.NOOP.value).astype(spec.dtype)
elif spec.dtype == bool:
return False
else:
raise ValueError(f"Unsupported dtype {spec.dtype} for action spec.")
|
tsuite-main
|
tsuite/_src/base.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for causal."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import causal
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class CausalTest(test_utils.TSuiteTest):
@parameterized.parameters(
('text',),
('dvector',),
('float',),
)
def test_causalobservation_space_correct_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'causal@rgb@{identifier}@90',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
if np.mean(timestep.observation['rgb']) > 0.0:
discrete_action = 3
else:
discrete_action = 0
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(
('text',),
('dvector',),
('float',),
)
def test_causal_incorrect_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'causal@rgb@{identifier}@90',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
env.reset()
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_causal_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'causal@rgb@float@90',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'causal@float@float_2@90',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_causal_list_test_tasks(self):
self.assertSetEqual(
set(causal.list_test_tasks(self._observation_spec)),
{'causal@rgb@text@90', 'causal@rgb@text@99',
'causal@rgb@text_length@90', 'causal@rgb@text_length@99',
'causal@rgb@raw_text@90', 'causal@rgb@raw_text@99',
'causal@rgb@float@90', 'causal@rgb@float@99',
'causal@rgb@dvector@90', 'causal@rgb@dvector@99',
'causal@text@rgb@90', 'causal@text@rgb@99',
'causal@text@text_length@90', 'causal@text@text_length@99',
'causal@text@raw_text@90', 'causal@text@raw_text@99',
'causal@text@float@90', 'causal@text@float@99',
'causal@text@dvector@90', 'causal@text@dvector@99',
'causal@text_length@rgb@90', 'causal@text_length@rgb@99',
'causal@text_length@text@90', 'causal@text_length@text@99',
'causal@text_length@raw_text@90', 'causal@text_length@raw_text@99',
'causal@text_length@float@90', 'causal@text_length@float@99',
'causal@text_length@dvector@90', 'causal@text_length@dvector@99',
'causal@raw_text@rgb@90', 'causal@raw_text@rgb@99',
'causal@raw_text@text@90', 'causal@raw_text@text@99',
'causal@raw_text@text_length@90', 'causal@raw_text@text_length@99',
'causal@raw_text@float@90', 'causal@raw_text@float@99',
'causal@raw_text@dvector@90', 'causal@raw_text@dvector@99',
'causal@float@rgb@90', 'causal@float@rgb@99',
'causal@float@text@90', 'causal@float@text@99',
'causal@float@raw_text@90', 'causal@float@raw_text@99',
'causal@float@text_length@90', 'causal@float@text_length@99',
'causal@float@dvector@90', 'causal@float@dvector@99',
'causal@dvector@rgb@90', 'causal@dvector@rgb@99',
'causal@dvector@text@90', 'causal@dvector@text@99',
'causal@dvector@raw_text@90', 'causal@dvector@raw_text@99',
'causal@dvector@float@90', 'causal@dvector@float@99',
'causal@dvector@text_length@90', 'causal@dvector@text_length@99',
})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/causal_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for observation_space."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import observation_space
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class ObservationSpaceTest(test_utils.TSuiteTest):
@parameterized.parameters(
('rgb',),
('text',),
('dvector',),
('float',),
)
def test_observation_space_correct_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'observation_space@{identifier}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
if identifier == 'text':
if np.mean(timestep.observation[identifier][0]) > 0.0:
discrete_action = 3
else:
discrete_action = 0
else:
if np.mean(timestep.observation[identifier]) > 0.0:
discrete_action = 3
else:
discrete_action = 0
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(
('rgb',),
('text',),
('dvector',),
('float',),
)
def test_observation_space_incorrect_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'observation_space@{identifier}',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
timestep = env.reset()
self.assertTrue(timestep.first())
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_observation_space_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'observation_space@rgb',
self._action_spec,
self._observation_spec,
default_action_name='discrete')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_with_agent(self):
logs = agent.fit_agent_to_tsuite_task(
'observation_space@float',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_observation_space_list_test_tasks(self):
self.assertSetEqual(
set(observation_space.list_test_tasks(self._observation_spec)),
{'observation_space@rgb', 'observation_space@text',
'observation_space@text_length', 'observation_space@raw_text',
'observation_space@dvector', 'observation_space@float'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/observation_space_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import agent
from tsuite._src import base
from tsuite._src import memory
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class MemoryTest(test_utils.TSuiteTest):
@parameterized.parameters(
('1',),
('2',),
('10',),
)
def test_memory_correct_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'memory@{identifier}', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Plays two episodes with the correct behaviour.
for _ in range(2):
timestep = env.reset()
if timestep.observation['float'] > 0.0:
discrete_action = 3
else:
discrete_action = 0
while not timestep.last():
action = ({'a': [np.array(discrete_action, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(
('1',),
('2',),
('10',),
)
def test_memory_incorrect_behaviour(self, identifier):
env = tsuite.TSuiteEnvironment(
f'memory@{identifier}', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Plays two episodes with the incorrect behaviour.
for _ in range(2):
timestep = env.reset()
while not timestep.last():
action = ({'a': [np.array(2, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
timestep = env.step(action)
self.assertEqual(timestep.reward, FAIL)
def test_memory_incorrect_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'memory@5',
self._action_spec,
self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertIsNone(timestep.reward)
while not timestep.last():
timestep = env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION))
self.assertEqual(timestep.reward, SUCCESS)
@parameterized.parameters(
('memory@1',),
('memory@2',),
)
def test_with_agent(self, identifier):
logs = agent.fit_agent_to_tsuite_task(
f'{identifier}',
early_stopping_mean_return=0.9)
self.assertGreater(logs[-1]['value'], 0.9)
def test_memory_list_test_tasks(self):
self.assertSetEqual(
set(memory.list_test_tasks()),
{'memory@0', 'memory@1', 'memory@2', 'memory@3', 'memory@4',
'memory@5', 'memory@6', 'memory@7', 'memory@8', 'memory@9'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/memory_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for latency."""
import time
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tsuite._src import base
from tsuite._src import latency
from tsuite._src import test_utils
from tsuite._src import tsuite
SUCCESS = base.SUCCESS
FAIL = base.FAIL
class LatencyTest(test_utils.TSuiteTest):
def test_input_validation(self):
with self.subTest('LatencyMustBePositive'):
with self.assertRaises(ValueError):
tsuite.TSuiteEnvironment(
'latency@-1@5',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
with self.subTest('LatencyCantBeZero'):
with self.assertRaises(ValueError):
tsuite.TSuiteEnvironment(
'latency@0@5',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
with self.subTest('EpisodeLengthMustBePositive'):
with self.assertRaises(ValueError):
tsuite.TSuiteEnvironment(
'latency@10@-1',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
with self.subTest('EpisodeLengthCantBeZero'):
with self.assertRaises(ValueError):
tsuite.TSuiteEnvironment(
'latency@10@0',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
@parameterized.parameters(
(100, 1),
(200, 1),
(100, 5),
(200, 5),
)
def test_latency_success(self, latency_in_ms, episode_length):
env = tsuite.TSuiteEnvironment(
f'latency@{latency_in_ms}@{episode_length}',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
timestep = env.reset()
self.assertTrue(timestep.first())
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
for i in range(episode_length):
timestep = env.step(action)
if i != episode_length - 1:
self.assertEqual(timestep.reward, 0)
self.assertTrue(timestep.mid())
self.assertEqual(timestep.reward, SUCCESS)
self.assertTrue(timestep.last())
@parameterized.parameters(
(100, 1),
(200, 1),
(100, 5),
(200, 5),
)
def test_latency_failure(self, latency_in_ms, episode_length):
env = tsuite.TSuiteEnvironment(
f'latency@{latency_in_ms}@{episode_length}',
self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
action = ({'a': [np.array(0, dtype=np.int32)]},
np.array([-0.5, 0.5], dtype=np.float32),
np.ones((2, 3), dtype=np.float32))
with self.subTest('FirstStepSlow'):
timestep = env.reset()
# Wait until latency constraint is violated.
time.sleep(latency_in_ms / 1000.)
self.assertTrue(timestep.first())
for i in range(episode_length):
timestep = env.step(action)
if i != episode_length - 1:
self.assertEqual(timestep.reward, 0)
self.assertTrue(timestep.mid())
self.assertEqual(timestep.reward, FAIL)
self.assertTrue(timestep.last())
with self.subTest('LastStepSlow'):
timestep = env.reset()
self.assertTrue(timestep.first())
for i in range(episode_length):
if i == episode_length - 1:
# Wait until latency constraint is violated.
time.sleep(latency_in_ms / 1000.)
timestep = env.step(action)
if i != episode_length - 1:
self.assertEqual(timestep.reward, 0)
self.assertTrue(timestep.mid())
self.assertEqual(timestep.reward, FAIL)
self.assertTrue(timestep.last())
def test_latency_with_best_action(self):
env = tsuite.TSuiteEnvironment(
'latency@100@1', self._action_spec, self._observation_spec,
default_action_name='discrete',
default_observation_name='float')
# Runs 5 episodes with optimal action-sequence.
for _ in range(5):
self.assertIsNone(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward)
self.assertEqual(
env.step(env.read_property(tsuite.PROPERTY_BEST_ACTION)).reward,
SUCCESS)
def test_reward_list_test_tasks(self):
self.assertSetEqual(
set(latency.list_test_tasks()),
{'latency@10@128', 'latency@34@128', 'latency@125@128'})
if __name__ == '__main__':
absltest.main()
|
tsuite-main
|
tsuite/_src/latency_test.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import inspect
import os
import sys
import typing
def _add_annotations_import(path):
"""Appends a future annotations import to the file at the given path."""
with open(path) as f:
contents = f.read()
if contents.startswith('from __future__ import annotations'):
# If we run sphinx multiple times then we will append the future import
# multiple times too.
return
assert contents.startswith('#'), (path, contents.split('\n')[0])
with open(path, 'w') as f:
# NOTE: This is subtle and not unit tested, we're prefixing the first line
# in each Python file with this future import. It is important to prefix
# not insert a newline such that source code locations are accurate (we link
# to GitHub). The assertion above ensures that the first line in the file is
# a comment so it is safe to prefix it.
f.write('from __future__ import annotations ')
f.write(contents)
def _recursive_add_annotations_import():
for path, _, files in os.walk('../tsuite/'):
for file in files:
if file.endswith('.py'):
_add_annotations_import(os.path.abspath(os.path.join(path, file)))
if 'READTHEDOCS' in os.environ:
_recursive_add_annotations_import()
# TODO(b/254461517) Remove the annotation filtering when we drop Python 3.8
# support.
# We remove `None` type annotations as this breaks Sphinx under Python 3.7 and
# 3.8 with error `AssertionError: Invalid annotation [...] None is not a class.`
filter_nones = lambda x: dict((k, v) for k, v in x.items() if v is not None)
typing.get_type_hints = lambda obj, *unused: filter_nones(obj.__annotations__)
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
import tsuite
from sphinxcontrib import katex
# -- Project information -----------------------------------------------------
project = 'Tsuite'
copyright = '2023, DeepMind' # pylint: disable=redefined-builtin
author = 'Tsuite Contributors'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinxcontrib.katex',
'sphinx_autodoc_typehints',
'sphinx_rtd_theme',
'coverage_check',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# html_favicon = '_static/favicon.ico'
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = 'macros: {' + katex_macros + '}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
return 'https://github.com/deepmind/tsuite/tree/master/tsuite/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
tsuite.__file__)), lineno, lineno + len(source) - 1)
source_suffix = ['.rst', '.md', '.ipynb']
|
tsuite-main
|
docs/conf.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asserts all public symbols are covered in the docs."""
import inspect
import types
from typing import Any, Mapping, Sequence, Tuple
from sphinx import application
from sphinx import builders
from sphinx import errors
import tsuite as _module
def find_internal_python_modules(
root_module: types.ModuleType,) -> Sequence[Tuple[str, types.ModuleType]]:
"""Returns `(name, module)` for all submodules under `root_module`."""
modules = set([(root_module.__name__, root_module)])
visited = set()
to_visit = [root_module]
while to_visit:
mod = to_visit.pop()
visited.add(mod)
for name in dir(mod):
obj = getattr(mod, name)
if inspect.ismodule(obj) and obj not in visited:
if obj.__name__.startswith(_module.__name__):
if '_src' not in obj.__name__:
to_visit.append(obj)
modules.add((obj.__name__, obj))
return sorted(modules)
def get_public_symbols() -> Sequence[Tuple[str, types.ModuleType]]:
names = set()
for module_name, module in find_internal_python_modules(_module):
for name in module.__all__:
names.add(module_name + '.' + name)
return tuple(names)
class CoverageCheck(builders.Builder):
"""Builder that checks all public symbols are included."""
name = 'coverage_check'
def get_outdated_docs(self) -> str:
return 'coverage_check'
def write(self, *ignored: Any) -> None:
pass
def finish(self) -> None:
documented_objects = frozenset(self.env.domaindata['py']['objects'])
undocumented_objects = set(get_public_symbols()) - documented_objects
# Exclude deprecated API symbols.
assertion_exceptions = ()
undocumented_objects -= {'tsuite.' + s for s in assertion_exceptions}
# Exclude pytypes.
pytypes_exceptions = ()
undocumented_objects -= {'tsuite.' + s for s in pytypes_exceptions}
if undocumented_objects:
undocumented_objects = tuple(sorted(undocumented_objects))
raise errors.SphinxError(
'All public symbols must be included in our documentation, did you '
'forget to add an entry to `api.rst`?\n'
f'Undocumented symbols: {undocumented_objects}')
def setup(app: application.Sphinx) -> Mapping[str, Any]:
app.add_builder(CoverageCheck)
return dict(version=_module.__version__, parallel_read_safe=True)
|
tsuite-main
|
docs/ext/coverage_check.py
|
#!/usr/bin/python
#
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/__init__.py
|
#!/usr/bin/python
#
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write predictions from a model for the benchmark."""
import os
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from image_obfuscation_benchmark.eval import data_utils
from image_obfuscation_benchmark.eval import evaluate_lib
_DATASET_PATH = flags.DEFINE_string(
'dataset_path', None, 'Path to the dataset.', required=True)
_MODEL_PATH = flags.DEFINE_string(
'model_path', None, 'Path to the exported model. Can be a TF Hub address.',
required=True)
_EVALUATE_OBFUSCATION = flags.DEFINE_string(
'evaluate_obfuscation', None, 'On what obfuscation to evaluate on.',
required=True)
_NORMALIZATION = flags.DEFINE_enum_class(
'normalization', data_utils.Normalization.ZERO_ONE,
data_utils.Normalization,
'How to normalize the images. Either `zero_one` ([0, 1]), `minus_plus_one` '
'([-1, 1]) or `imagenet_channel_wise_norm` (using ImageNet mean and std.')
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', '/tmp/', 'Where to save the predictions.')
_USE_CLASS_GROUPS = flags.DEFINE_bool(
'use_class_groups', True,
'Whether to use the stylized imagenet / conflict stimuli class groups.')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
dataset_path = _DATASET_PATH.value
model_path = _MODEL_PATH.value
obfuscation = _EVALUATE_OBFUSCATION.value
normalization = _NORMALIZATION.value
logging.info('Loading dataset for obfuscation `%s` from `%s`.',
obfuscation, dataset_path)
ds = data_utils.get_data(dataset_path=dataset_path,
obfuscation=obfuscation,
normalization=normalization)
logging.info('Loading model from `%s`.', model_path)
model = evaluate_lib.load_model(model_path)
image_ids, labels, predictions = evaluate_lib.predict(
ds, model, _USE_CLASS_GROUPS.value)
filename = os.path.join(_OUTPUT_DIR.value, f'{obfuscation}.csv')
logging.info('Saving predictions to `%s`.', filename)
evaluate_lib.save_predictions(image_ids, labels, predictions, filename)
if __name__ == '__main__':
app.run(main)
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/eval/predict.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the dataset."""
import enum
from typing import Callable, MutableMapping, Optional, Sequence
import tensorflow as tf
import tensorflow_datasets as tfds
CLEAN = 'Clean'
TRAIN_OBFUSCATIONS = [
CLEAN,
'AdversarialPatches',
'BackgroundBlurComposition',
'ColorNoiseBlocks',
'Halftoning',
'HighContrastBorder',
'IconOverlay',
'ImageOverlay',
'Interleave',
'InvertLines',
'LineShift',
'PerspectiveTransform',
'PhotoComposition',
'RotateBlocks',
'RotateImage',
'StyleTransfer',
'SwirlWarp',
'TextOverlay',
'Texturize',
'WavyColorWarp',
]
HOLD_OUT_OBFUSCATIONS = [
'ColorPatternOverlay',
'LowContrastTriangles',
'PerspectiveComposition',
]
EVAL_OBFUSCATIONS = TRAIN_OBFUSCATIONS + HOLD_OUT_OBFUSCATIONS
_DATASET_NAME = 'obfuscated_imagenet'
_IMAGENET_MEAN = [0.485 * 255, 0.456 * 255, 0.406 * 255]
_IMAGENET_STD = [0.229 * 255, 0.224 * 255, 0.225 * 255]
# Type definitions.
_BATCH = MutableMapping[str, tf.Tensor]
_EVAL_SPLIT = 'validation'
class Normalization(enum.Enum):
"""What values the images are normalized to."""
ZERO_ONE = 'zero_one'
MINUS_PLUS_ONE = 'minus_plus_one'
IMAGENET_CHANNEL_WISE_NORM = 'imagenet_channel_wise_norm'
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@staticmethod
def from_string(name: str) -> 'Split':
return {
'TRAIN': Split.TRAIN,
'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID,
'VALIDATION': Split.VALID,
'TEST': Split.TEST
}[name.upper()]
def get_obfuscations(split: Split) -> Sequence[str]:
return EVAL_OBFUSCATIONS if split == Split.TEST else TRAIN_OBFUSCATIONS
def _uint8_to_unit_float(batch: _BATCH) -> _BATCH:
batch['image'] = tf.cast(batch['image'], tf.float32) / 255.0
return batch
def _uint8_to_neg1_pos1_float(batch: _BATCH) -> _BATCH:
batch['image'] = 2 * tf.cast(batch['image'], tf.float32) / 255.0 - 1
return batch
def _imagenet_channel_wise_norm(batch: _BATCH) -> _BATCH:
batch['image'] = (tf.cast(batch['image'], tf.float32) -
_IMAGENET_MEAN) / _IMAGENET_STD
return batch
def _get_normalize_fn(
normalization: Normalization) -> Callable[[_BATCH], _BATCH]:
if normalization == Normalization.ZERO_ONE:
return _uint8_to_unit_float
elif normalization == Normalization.MINUS_PLUS_ONE:
return _uint8_to_neg1_pos1_float
elif normalization == Normalization.IMAGENET_CHANNEL_WISE_NORM:
return _imagenet_channel_wise_norm
else:
raise ValueError(f'Unknown normalization: `{normalization}`.')
def get_data(dataset_path: str,
obfuscation: str,
normalization: Normalization,
batch_size: int = 32,
dataset_version: Optional[str] = None,
num_samples: int = 0) -> tf.data.Dataset:
"""Builds and returns the dataset."""
if dataset_version:
dataset_name = f'{_DATASET_NAME}:{dataset_version}'
else:
dataset_name = _DATASET_NAME
ds = tfds.load(dataset_name,
data_dir=dataset_path,
split=f'{_EVAL_SPLIT}_{obfuscation}')
normalize_fn = _get_normalize_fn(normalization)
ds = ds.map(normalize_fn)
if num_samples:
ds = ds.take(num_samples)
ds = ds.batch(batch_size)
return ds
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/eval/data_utils.py
|
#!/usr/bin/python
#
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gathers the results for all obfuscations and calculates metrics."""
import os
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from image_obfuscation_benchmark.eval import evaluate_lib
import tensorflow as tf
_LABEL_WEIGHTED = flags.DEFINE_bool(
'label_weighted', True,
'Whether accuracy should be weighted equally across the labels.')
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Where to save the predictions.', required=True)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
label_dict, predictions_dict = evaluate_lib.gather_predictions(
_OUTPUT_DIR.value)
metrics = evaluate_lib.calculate_metrics(
label_dict, predictions_dict, _LABEL_WEIGHTED.value)
with tf.gfile.Open(os.path.join(_OUTPUT_DIR.value, 'metrics.csv'), 'w') as f:
for name, value in metrics.items():
logging.info('%s: %.2f%%', name, 100 * value)
f.write(f'{name}, {value}\n')
if __name__ == '__main__':
app.run(main)
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/eval/gather_results.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to evaluates a model for the benchmark."""
import functools
import os
from typing import Callable, Mapping, Sequence, Tuple
from absl import logging
from image_obfuscation_benchmark.eval import data_utils
import numpy as np
import scipy
import tensorflow as tf
import tensorflow_hub as hub
import tqdm
_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_TOP_N_METRICS = (1, 3, 5, 10)
_MAX_TOP_N = max(_TOP_N_METRICS)
_CONFLICT_STIMULI_GROUPS = [
[404],
[294, 295, 296, 297],
[444, 671],
[8, 10, 11, 12, 13, 14, 15, 16, 18, 19, 20, 22, 23, 24, 80, 81, 82, 83, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 98, 99, 100, 127, 128, 129, 130, 131,
132, 133, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145],
[472, 554, 625, 814, 914],
[440, 720, 737, 898, 899, 901, 907],
[436, 511, 817],
[281, 282, 283, 284, 285, 286],
[423, 559, 765, 857],
[409, 530, 892],
[152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 205, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 243, 244, 245,
246, 247, 248, 249, 250, 252, 253, 254, 255, 256, 257, 259, 261, 262, 263,
265, 266, 267, 268],
[385, 386],
[508, 878],
[499],
[766],
[555, 569, 656, 675, 717, 734, 864, 867],
]
def _load_hub_model(model_path: str) -> tf.keras.Model:
model = tf.keras.Sequential([hub.KerasLayer(model_path)])
model.build([None, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS])
return model
def _arg_top_n(values, n):
return np.flip(np.argsort(values)[:, -n:], axis=-1)
def _group_logits(
logits: np.ndarray,
aggregate_fn: Callable[[np.ndarray], np.ndarray]) -> np.ndarray:
"""Groups logits together to use the 16 classes from Stylized ImageNet."""
new_logits = np.zeros((logits.shape[0], len(_CONFLICT_STIMULI_GROUPS)))
probabilities = scipy.special.softmax(logits, axis=-1)
for i, class_group in enumerate(_CONFLICT_STIMULI_GROUPS):
new_logits[:, i] = aggregate_fn(probabilities[:, class_group])
return new_logits
def _get_predictions(images: tf.Tensor, model: tf.keras.Model, top_n: int = 10,
use_class_groups: bool = False) -> np.ndarray:
# Some models return 1001 logits per example. The first one is for a
# background class, the rest are the 1000 ImageNet classes.
logits = model(images).numpy()[:, -1000:]
if use_class_groups:
logits = _group_logits(logits, functools.partial(np.mean, axis=-1))
return _arg_top_n(logits, top_n)
def _filter_class_groups(
image_ids: np.ndarray,
labels: np.ndarray,
predictions: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Filters by images that contain the class groups and adjusts."""
all_classes = []
for group in _CONFLICT_STIMULI_GROUPS:
for im_class in group:
all_classes.append(im_class)
all_classes = np.array(all_classes)
filter_ids = np.isin(labels, all_classes)
logging.info('Filtering images not in class groups:')
logging.info('Before: %d, after: %d.', len(filter_ids), np.sum(filter_ids))
labels = labels[filter_ids]
image_ids = image_ids[filter_ids]
predictions = predictions[filter_ids]
new_labels = -1 * np.ones_like(labels)
for i, class_group in enumerate(_CONFLICT_STIMULI_GROUPS):
new_labels[np.isin(labels, class_group)] = i
return image_ids, new_labels, predictions
def load_model(model_path: str) -> tf.keras.Model:
if model_path.startswith('http') or model_path.startswith('@'):
model = _load_hub_model(model_path)
else: # We assume it's a filepath.
model = tf.saved_model.load(model_path)
return model
def predict(dataset: tf.data.Dataset,
model: tf.keras.Model,
use_class_groups: bool = False,
top_n: int = _MAX_TOP_N
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Produces predictions for a specific dataset ordered by image_ids."""
image_ids = []
labels = []
predictions = []
for batch in tqdm.tqdm(dataset, desc='Making predictions'):
image_ids.extend(batch['file_name'].numpy().tolist())
labels.extend(list(batch['label']))
predictions.extend(
list(_get_predictions(batch['image'],
model,
top_n=top_n,
use_class_groups=use_class_groups)))
sort_idx = np.argsort(image_ids)
image_ids = np.array(image_ids)[sort_idx]
labels = np.array(labels)[sort_idx]
predictions = np.array(predictions)[sort_idx]
if use_class_groups:
image_ids, labels, predictions = _filter_class_groups(
image_ids, labels, predictions)
return image_ids, labels, predictions
def save_predictions(image_ids: np.ndarray,
labels: np.ndarray,
predictions: np.ndarray,
filename: str):
if not tf.gfile.Exists(os.path.dirname(filename)):
tf.gfile.MakeDirs(os.path.dirname(filename))
with tf.gfile.Open(filename, 'w') as f:
for image_id, label, prediction in zip(image_ids, labels, predictions):
f.write(f'{image_id}, {label}, {prediction}\n')
def load_predictions(
filename: str,
dtype: np.dtype = np.int32) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Loads predictions from a csv file."""
image_ids = []
labels = []
predictions = []
with tf.gfile.Open(filename, 'r') as f:
for line in f:
line = line.rstrip('\n')
if line:
image_id, label, prediction = line.split(',')
image_ids.append(image_id)
labels.append(int(label))
predictions.append(
np.fromstring(prediction.strip(' []'), dtype=dtype, sep=' '))
return np.array(image_ids), np.array(labels), np.array(predictions)
def gather_predictions(
filedir: str,
dtype: np.dtype = np.int32) -> Tuple[Mapping[str, np.ndarray],
Mapping[str, np.ndarray]]:
"""Loads predictions for all obfuscations."""
image_id_dict = {}
label_dict = {}
predictions_dict = {}
for obfuscation in data_utils.get_obfuscations(data_utils.Split.TEST):
filename = os.path.join(filedir, f'{obfuscation}.csv')
logging.info('Loading predictions for obfuscation `%s` from `%s`.',
obfuscation, filename)
image_ids, labels, predictions = load_predictions(filename, dtype)
image_id_dict[obfuscation] = image_ids
label_dict[obfuscation] = labels
predictions_dict[obfuscation] = predictions
# Predictions should be sorted by image_ids so they should all match up.
assert data_utils.CLEAN in image_id_dict
for image_ids in image_id_dict.values():
assert (image_id_dict[data_utils.CLEAN] == image_ids).all()
return label_dict, predictions_dict
def _is_in_top_n(
labels: np.ndarray, predictions: np.ndarray, n: int) -> np.ndarray:
return (labels[..., None] == predictions[:, :n]).any(1)
def _calculate_mean(correctness: np.ndarray, labels: np.ndarray,
label_weighted: bool = False) -> float:
if not label_weighted:
return np.mean(correctness)
correctness = correctness.flatten()
labels = labels.flatten()
label_counts = np.bincount(labels)
weights = [1.0 / float(label_counts[label]) for label in labels]
return float(np.average(correctness, weights=weights))
def calculate_metrics(
label_dict: Mapping[str, np.ndarray],
predictions_dict: Mapping[str, np.ndarray],
label_weighted: bool = False,
top_ns: Sequence[int] = _TOP_N_METRICS) -> Mapping[str, float]:
"""Calculates the metrics from predictions and labels."""
metrics = {}
num_hold_out = len(data_utils.HOLD_OUT_OBFUSCATIONS)
for n in top_ns:
correctness_table = []
label_table = []
for obfuscation in data_utils.get_obfuscations(data_utils.Split.TEST):
correctness_table.append(_is_in_top_n(
label_dict[obfuscation], predictions_dict[obfuscation], n))
label_table.append(label_dict[obfuscation])
correctness_table = np.stack(correctness_table, axis=0)
label_table = np.stack(label_table, axis=0)
for i, obfuscation in enumerate(label_dict):
metrics[f'{obfuscation}-top-{n}'] = _calculate_mean(
correctness_table[i, :], label_table[i, :], label_weighted)
metrics[f'mean-training-top-{n}'] = _calculate_mean(
correctness_table[1:-num_hold_out, :],
label_table[1:-num_hold_out, :], label_weighted)
metrics[f'mean-hold-out-top-{n}'] = _calculate_mean(
correctness_table[-num_hold_out:, :],
label_table[-num_hold_out:, :], label_weighted)
metrics[f'mean-all-top-{n}'] = _calculate_mean(
correctness_table[1:, :], label_table[1:, :], label_weighted)
metrics[f'worst-training-top-{n}'] = _calculate_mean(
np.all(correctness_table[1:-num_hold_out, :], axis=0),
label_table[0, :], label_weighted)
metrics[f'worst-hold-out-top-{n}'] = _calculate_mean(
np.all(correctness_table[-num_hold_out:, :], axis=0),
label_table[0, :], label_weighted)
metrics[f'worst-all-top-{n}'] = _calculate_mean(
np.all(correctness_table[1:, :], axis=0),
label_table[0, :], label_weighted)
return metrics
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/eval/evaluate_lib.py
|
#!/usr/bin/python
#
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the image obfuscation evaluation library."""
from absl.testing import absltest
from absl.testing import parameterized
from image_obfuscation_benchmark.eval import data_utils
from image_obfuscation_benchmark.eval import evaluate_lib
import mock
import numpy as np
import tensorflow as tf
class EvaluateLibTest(parameterized.TestCase):
def setUp(self):
super(EvaluateLibTest, self).setUp()
images = [0, 1, 2, 3, 4]
self._image_ids = [b'a', b'b', b'c', b'd', b'e']
self._labels = [11, 440, 409, 294, 15]
self._super_class_labels = [3, 5, 9, 1, 3]
self._top_n_predictions = {0: [11, 12, 9, 555, 766],
1: [720, 440, 436, 281, 424],
2: [404, 409, 152, 385, 386],
3: [1, 2, 8, 152, 404],
4: [1, 2, 15, 385, 294]}
self._dataset = tf.data.Dataset.from_tensor_slices(
{'file_name': self._image_ids, 'label': self._labels, 'image': images}
).batch(1)
def _logit_function(tensor):
logits = np.zeros((1, 1000))
for i, n in enumerate(self._top_n_predictions[tensor.numpy()[0]]):
logits[0, n] = 100 - i * 10
return tf.constant(logits, dtype=tf.float32)
self._model = mock.MagicMock(side_effect=_logit_function)
@parameterized.parameters([
dict(use_class_groups=False, top_n=1,
expected_predictions=[[11], [720], [404], [1], [1]]),
dict(use_class_groups=False, top_n=2,
expected_predictions=[[11, 12], [720, 440], [404, 409], [1, 2],
[1, 2]]),
dict(use_class_groups=False, top_n=3,
expected_predictions=[[11, 12, 9], [720, 440, 436], [404, 409, 152],
[1, 2, 8], [1, 2, 15]]),
dict(use_class_groups=True, top_n=1,
expected_predictions=[[3], [5], [0], [3], [3]]),
dict(use_class_groups=True, top_n=2,
expected_predictions=[[3, 15], [5, 6], [0, 9], [3, 10], [3, 11]]),
dict(use_class_groups=True, top_n=3,
expected_predictions=[[3, 15, 14], [5, 6, 7], [0, 9, 10], [3, 10, 0],
[3, 11, 1]]),
])
def test_predict(self, use_class_groups, top_n, expected_predictions):
image_ids, labels, predictions = evaluate_lib.predict(
self._dataset, self._model,
use_class_groups=use_class_groups,
top_n=top_n)
np.testing.assert_equal(image_ids, self._image_ids)
if use_class_groups:
np.testing.assert_equal(labels, self._super_class_labels)
else:
np.testing.assert_equal(labels, self._labels)
np.testing.assert_equal(predictions.shape,
(len(self._image_ids), top_n))
np.testing.assert_equal(predictions, expected_predictions)
def test_calculate_metrics(self):
label_dict = {}
predictions_dict = {}
for obfuscation in data_utils.EVAL_OBFUSCATIONS:
label_dict[obfuscation] = np.array(self._labels)
predictions_dict[obfuscation] = np.array(
list(self._top_n_predictions.values()))
metrics = evaluate_lib.calculate_metrics(
label_dict, predictions_dict, label_weighted=False, top_ns=(1, 2, 3))
top_1 = 0.2
top_2 = 0.6
top_3 = 0.8
expected_metrics = {}
for prefix in ('mean-training', 'mean-hold-out', 'mean-all',
'worst-training', 'worst-hold-out', 'worst-all'):
expected_metrics[f'{prefix}-top-1'] = top_1
expected_metrics[f'{prefix}-top-2'] = top_2
expected_metrics[f'{prefix}-top-3'] = top_3
for obfuscation in data_utils.EVAL_OBFUSCATIONS:
expected_metrics[f'{obfuscation}-top-1'] = top_1
expected_metrics[f'{obfuscation}-top-2'] = top_2
expected_metrics[f'{obfuscation}-top-3'] = top_3
self.assertEqual(metrics, expected_metrics)
if __name__ == '__main__':
absltest.main()
|
image_obfuscation_benchmark-master
|
image_obfuscation_benchmark/eval/evaluate_lib_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for length generalization tasks."""
import abc
from typing import TypedDict
import chex
import jax.nn as jnn
import jax.numpy as jnp
Batch = TypedDict('Batch', {'input': chex.Array, 'output': chex.Array})
class GeneralizationTask(abc.ABC):
"""A task for the generalization project.
Exposes a sample_batch method, and some details about input/output sizes,
losses and accuracies.
"""
@abc.abstractmethod
def sample_batch(self, rng: chex.PRNGKey, batch_size: int,
length: int) -> Batch:
"""Returns a batch of inputs/outputs."""
def pointwise_loss_fn(self, output: chex.Array,
target: chex.Array) -> chex.Array:
"""Returns the pointwise loss between an output and a target."""
return -target * jnn.log_softmax(output)
def accuracy_fn(self, output: chex.Array, target: chex.Array) -> chex.Array:
"""Returns the accuracy between an output and a target."""
return (jnp.argmax(output,
axis=-1) == jnp.argmax(target,
axis=-1)).astype(jnp.float32)
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Returns a mask to compute the accuracies, to remove the superfluous ones."""
# Target is a shape of shape (B, T, C) where C is the number of classes.
# We want a mask per input (B, T), so we take this shape.
return jnp.ones(target.shape[:-1])
@property
@abc.abstractmethod
def input_size(self) -> int:
"""Returns the size of the input of the models trained on this task."""
@property
@abc.abstractmethod
def output_size(self) -> int:
"""Returns the size of the output of the models trained on this task."""
def output_length(self, input_length: int) -> int:
"""Returns the length of the output, given an input length."""
del input_length
return 1
|
neural_networks_chomsky_hierarchy-main
|
tasks/task.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Add two binary numbers."""
import random
from typing import Sequence
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from neural_networks_chomsky_hierarchy.tasks import task
def numbers_to_variable_length_binary(
numbers: Sequence[int],
lengths: Sequence[int],
little_endian: bool = True,
) -> list[list[int]]:
"""Returns the binary notation of a certain length for a sequence of numbers.
Args:
numbers: The numbers to be converted to binary.
lengths: The lengths of the binary representations (every number uses its
own length). This argument has no effect if the binary representation is
longer than the specified length.
little_endian: Whether to use little- or big-endian notation.
"""
binary_strings = [f'{num:b}'.zfill(len) for num, len in zip(numbers, lengths)]
if little_endian:
binary_strings = [bin[::-1] for bin in binary_strings]
return [list(map(int, bin)) for bin in binary_strings]
def numbers_to_fixed_length_binary(
numbers: Sequence[int],
length: int,
little_endian: bool = True,
) -> list[list[int]]:
"""Returns the binary notation of a certain length for a sequence of numbers.
Args:
numbers: The numbers to be converted to binary.
length: The length of the binary representations (all numbers use the same
length). This argument has no effect if the binary representation is
longer than the specified length.
little_endian: Whether to use little- or big-endian notation.
"""
return numbers_to_variable_length_binary(
numbers=numbers,
lengths=[length] * len(numbers),
little_endian=little_endian,
)
def expression_from_numbers(
numbers_n: Sequence[list[int]],
numbers_m: Sequence[list[int]],
) -> list[list[int]]:
"""Returns an expression with a placeholder value to denote the operation."""
return [n + [2] + m for n, m in zip(numbers_n, numbers_m)]
class BinaryAddition(task.GeneralizationTask):
"""A task with the goal of summing two numbers in binary (little-endian).
The input is a string of the form `first_number+second_number` in
(little-endian) binary notation (e.g., `01001+011`). The goal of the agent is
to output the result, also in (little-endian) binary form (i.e., in the
example `18 + 6 = 24 = 00011`). The output is padded with 0s to match the
input length, and the end of the sum is denoted with a termination token
(i.e., the output has values in `{0, 1, 2}`).
Examples:
001 + 01101 = 010112000 (4 + 22 = 26)
1001 + 000001 = 10010120000 (9 + 32 = 41)
"""
def _sample_expressions_and_results(
self,
batch_size: int,
length: int,
) -> tuple[Sequence[list[int]], Sequence[list[int]]]:
"""Samples pairs of numbers and sums them in (little-endian) binary.
We use Python's bignums, which can represent arbitrary-precision integers to
perform addition of two potentially very large values (roughly of the size
`2 ** (length // 2)`).
Args:
batch_size: The number of expressions and results to sample.
length: The length of the input expression containing the two numbers and
the separation token.
Returns:
The expression and the sum of the two numbers. The expression has the
format: `[first_number, 2, second_number]`, where the numbers are in
(little-endian) binary notation. The sum is also in (little-endian) binary
notation, without leading (i.e., ending) zeros.
"""
# If `length <= 2`, we just sample a binary value and return it (without
# leading zeros in little-endian notation).
if length <= 2:
# Since `length <= 2`, we can use `np.random`` without overflow errors.
numbers = np.random.randint(0, 2**length - 1, size=(batch_size))
expressions = numbers_to_fixed_length_binary(numbers, length)
results = numbers_to_fixed_length_binary(numbers, 0)
return expressions, results
# We only use `length - 1` tokens for the two values to account for the `+`.
length_n = np.random.randint(1, length - 1, size=(batch_size,))
length_m = length - 1 - length_n
integer_n = [random.randint(1, 2**int(len_n) - 1) for len_n in length_n]
integer_m = [random.randint(1, 2**int(len_m) - 1) for len_m in length_m]
binary_n = numbers_to_variable_length_binary(integer_n, length_n)
binary_m = numbers_to_variable_length_binary(integer_m, length_m)
expressions = expression_from_numbers(binary_n, binary_m)
integer_sum = list(map(sum, zip(integer_n, integer_m)))
results = numbers_to_fixed_length_binary(integer_sum, length=0)
return expressions, results
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of binary additions and their results."""
del rng
expressions, results = self._sample_expressions_and_results(
batch_size=batch_size, length=length)
# Append the termination token to the result and pad the result with zeros
# to match the output length (accounting for the termination token).
results = [res + [2] + [0] * (length - len(res)) for res in results]
expressions = jnp.array(expressions, dtype=jnp.int32)
results = jnp.array(results, dtype=jnp.int32)
return {
'input': jnn.one_hot(expressions, self.input_size),
'output': jnn.one_hot(results, self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
return input_length + 1
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes a mask that ignores everything after the termination token.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/binary_addition.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiply two binary numbers."""
import random
from typing import Sequence
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from neural_networks_chomsky_hierarchy.tasks import task
from neural_networks_chomsky_hierarchy.tasks.cs import binary_addition
class BinaryMultiplication(task.GeneralizationTask):
"""A task with the goal of multiplying two numbers in binary (little-endian).
The input is a string of the form `first_number£second_number` in
(little-endian) binary notation (e.g., `01001*011`). The goal of the agent is
to output the result, also in (little-endian) binary form (i.e., in the
example `18 * 6 = 108 = 00110011`). The output is padded with 0s to match the
input length, and the end of the product is denoted with a termination token
(i.e., the output has values in `{0, 1, 2}`).
Examples:
001 * 01101 = 000110120 (4 * 22 = 88)
1001 * 000001 = 00000100120 (9 * 32 = 288)
"""
def _sample_expressions_and_results(
self,
batch_size: int,
length: int,
) -> tuple[Sequence[list[int]], Sequence[list[int]]]:
"""Samples pairs of numbers and multiplies them in (little-endian) binary.
We use Python's bignums, which can represent arbitrary-precision integers to
perform multiplication of two potentially very large values (roughly of the
size `2 ** (length // 2)`).
Args:
batch_size: The number of expressions and results to sample.
length: The length of the input expression containing the two numbers and
the separation token.
Returns:
The expression and the product of the two numbers. The expression has the
format: `[first_number, 2, second_number]`, where the numbers are in
(little-endian) binary notation. The product is also in (little-endian)
binary notation, without leading (i.e., ending) zeros.
"""
# If `length <= 2`, we just sample a binary sequence for the expression and
# arbitrarily set the result to a fixed value (`[]` for `length == 1` and
# `[0]` for `length == 2`) to maintain the invariant that the result has
# length has most `length - 1`.
if length <= 2:
# Since `length <= 2`, we can use `np.random`` without overflow errors.
numbers = np.random.randint(0, 2**length - 1, size=(batch_size))
expressions = binary_addition.numbers_to_fixed_length_binary(
numbers, length)
return expressions, [[0] * (length - 1)] * batch_size
# We only use `length - 1` tokens for the two values to account for the `*`.
length_n = np.random.randint(1, length - 1, size=(batch_size,))
length_m = length - 1 - length_n
integer_n = [random.randint(1, 2**int(len_n) - 1) for len_n in length_n]
integer_m = [random.randint(1, 2**int(len_m) - 1) for len_m in length_m]
binary_n = binary_addition.numbers_to_variable_length_binary(
integer_n, length_n)
binary_m = binary_addition.numbers_to_variable_length_binary(
integer_m, length_m)
expressions = binary_addition.expression_from_numbers(binary_n, binary_m)
integer_prod = [int_n * int_m for int_n, int_m in zip(integer_n, integer_m)]
results = binary_addition.numbers_to_fixed_length_binary(
integer_prod, length=0)
return expressions, results
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of binary multiplications and their results."""
del rng
expressions, results = self._sample_expressions_and_results(
batch_size=batch_size, length=length)
# Append the termination token to the result and pad the result with zeros
# to match the output length (accounting for the termination token). The
# binary representation of the result will have at most length
# `#(first_number) + #(second_number)`, where #() denotes the number of
# digits of the binary notation. Since we use the token `2` to separate the
# two numbers in the expression, the result will have length at most
# `length - 1`, and thus by appending the termination token above it will
# have length at most `length`, as desired.
results = [res + [2] + [0] * (length - 1 - len(res)) for res in results]
expressions = jnp.array(expressions, dtype=jnp.int32)
results = jnp.array(results, dtype=jnp.int32)
return {
'input': jnn.one_hot(expressions, self.input_size),
'output': jnn.one_hot(results, self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
return input_length
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes a mask that ignores everything after the termination token.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/binary_multiplication.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sort tokens from a fixed alphabet (i.e., bucket sort)."""
import functools
import chex
import jax
from jax import nn as jnn
from jax import numpy as jnp
from jax import random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class BucketSort(task.GeneralizationTask):
"""A task with the goal of sorting tokens from a fixed alphabet.
The input string is composed of tokens from a fixed-size alphabet, i.e.,
`{0, 1, ..., vocab_size - 1}`, and the goal is to return the sorted string (in
lexicographically increasing order).
Examples:
10204112 -> 00111224 (with `vocab_size = 5`)
1110001 -> 0001111 (with `vocab_size = 2`)
"""
def __init__(self, *args, vocab_size: int = 5, **kwargs) -> None:
"""Initializes the task.
Args:
*args: The args for the base task class.
vocab_size: The size of the alphabet.
**kwargs: The kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of strings and tokens sorted by (inc.) occurrence."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size)
sorted_strings = jnp.sort(strings, axis=-1)
return {
'input': jnn.one_hot(strings, num_classes=self.input_size),
'output': jnn.one_hot(sorted_strings, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return input_length
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/bucket_sort.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the floor of the square root of a binary number."""
import math
import random
import chex
import jax.nn as jnn
import jax.numpy as jnp
from neural_networks_chomsky_hierarchy.tasks import task
from neural_networks_chomsky_hierarchy.tasks.cs import binary_addition
class ComputeSqrt(task.GeneralizationTask):
"""A task with the goal of computing the square root of a binary number.
The input is a number in binary (big-endian), and the output is the floor of
the square root of this number, also in binary.
Note the output length ie the length of the square root in binary is always
ceil(input_length / 2) (because log(sqrt(x)) = 1/2 log(x)).
Examples:
100101 = 37 -> square root is 6.08... -> floor(6.08) = 6 -> 101
111 = 7 -> square root is 2.64 -> floor(2.64) = 2 -> 10
"""
def sample_batch(self, rng: chex.PRNGKey, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of binary numbers and their square roots, in binary."""
del rng
numbers = [random.randint(1, 2**length - 1) for _ in range(batch_size)]
binary_numbers = binary_addition.numbers_to_fixed_length_binary(
numbers, length=length, little_endian=False)
sqrts = list(map(math.isqrt, numbers))
binary_sqrts = binary_addition.numbers_to_fixed_length_binary(
sqrts, length=self.output_length(length), little_endian=False)
binary_numbers = jnp.array(binary_numbers, jnp.int32)
binary_sqrts = jnp.array(binary_sqrts, jnp.int32)
inputs = jnn.one_hot(binary_numbers, self.input_size)
output = jnn.one_hot(binary_sqrts, self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
def output_length(self, input_length: int) -> int:
return math.ceil(input_length / 2)
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/compute_sqrt.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Duplicate a string."""
import functools
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class DuplicateString(task.GeneralizationTask):
"""A task with the goal of duplicating a string.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the same string outputted twice without any separator, ie:
s_1 ... s_n s_1 ... s_n
Examples:
101 -> 101 101
111111 -> 111111 111111
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
def __init__(self, vocab_size: int, *args, **kwargs):
"""Initializes the remember_string task.
Args:
vocab_size: The size of the alphabet.
*args: Args for the base task class.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and their copies."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size)
one_hot_strings = jnn.one_hot(strings, num_classes=self._vocab_size)
output = jnp.concatenate([one_hot_strings, one_hot_strings], axis=1)
return {"input": one_hot_strings, "output": output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return 2 * input_length
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/duplicate_string.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sort a string by the parity of the indices (odd indices first)."""
import functools
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class OddsFirst(task.GeneralizationTask):
"""A task with the goal of outputting a string's tokens at odd indices first.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the same string, but where the values at odd indexes have been put
first: s_1 s_3 s_5 ... s_2 s_4 s_6 ...
Examples:
00110101 -> 0100 0111
110 -> 10 1
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
def __init__(self, vocab_size: int, *args, **kwargs):
"""Initializes the odds_first task.
Args:
vocab_size: The size of the alphabet.
*args: Args for the base task class.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and their outputs."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size)
one_hot_strings = jnn.one_hot(strings, num_classes=self._vocab_size)
output = jnp.concatenate(
[one_hot_strings[:, 1::2], one_hot_strings[:, ::2]], axis=1)
return {"input": one_hot_strings, "output": output}
@property
def input_size(self) -> int:
"""Returns the input size for the model."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the model."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for the model."""
return input_length
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/odds_first.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Predict the missing symbol in a duplicated string."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class MissingDuplicateString(task.GeneralizationTask):
"""A task with the goal of finding the missing symbol in a duplicated string.
Given a binary string that is presented twice with exactly one element omitted
(denoted by the placeholder token `2`), predict the value of that element.
Thus, an agent trying to solve this task needs to recognize the underlying
duplicated string to be able to produce the correct output.
If the length is odd, the duplicated strings of length `length // 2` are
padded with the empty token `3`.
Examples
01100210 -> 1 (the substring is 0110, so the missing value is 1)
1011213 -> 0 (the subtring is 101, so the missing value is 0)
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
# For `length == 1`, we cannot meaningfully define substrings of length
# `length // 2`, so we arbitrarily set the inputs and outputs to `1`.
if length == 1:
return {
'input':
jnn.one_hot(
jnp.ones((batch_size, length)), num_classes=self.input_size),
'output':
jnn.one_hot(
jnp.ones((batch_size,)), num_classes=self.output_size),
}
strings_rng, indices_rng = jrandom.split(rng)
strings = jrandom.randint(
strings_rng, shape=(batch_size, length // 2), minval=0, maxval=2)
duplicated_strings = jnp.concatenate((strings, strings), axis=-1)
indices = jrandom.randint(
indices_rng,
shape=(batch_size,),
minval=0,
maxval=duplicated_strings.shape[1])
output = jax.vmap(lambda x, y: x[y])(duplicated_strings, indices)
masked_strings = jax.vmap(lambda x, y: x.at[y].set(2))(duplicated_strings,
indices)
# If `length` is odd, we pad the strings with the empty token `3` at the end
# to ensure that the final input length is equal to `length` given the two
# substrings of length `length // 2`.
padding = jnp.full((batch_size, length % 2), fill_value=3)
padded_strings = jnp.concatenate((masked_strings, padding), axis=-1)
return {
'input': jnn.one_hot(padded_strings, num_classes=self.input_size),
'output': jnn.one_hot(output, num_classes=self.output_size)
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 4
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
|
neural_networks_chomsky_hierarchy-main
|
tasks/cs/missing_duplicate_string.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modular arithmetic with brackets."""
import collections
from typing import Sequence
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
import tqdm
import tree
from neural_networks_chomsky_hierarchy.tasks import task
def generate_one_expression_and_result(
modulus: int, length: int, mult: bool = False
) -> tuple[str, int]:
"""Returns a modular arithmetic expression with brackets, and its result.
The values in the expression are in {0, 1, ..., modulus-1}. The allowed
operations are either {+, -} (mult=False) or {+, -, *} (mult=True).
Args:
modulus: The modulus to use for the expression.
length: The length of the expression.
mult: Whether to include the multiplication operator in the expressions.
Raises:
ValueError if length < 1.
"""
# Generates a terminal (digit).
def gen_terminal():
terminal = np.random.randint(low=0, high=modulus)
return str(terminal), terminal
# If length is less than 1, issue an error.
if length < 1:
raise ValueError(
f'Can\'t generate expressions of length < 1. Got {length}.')
# If length is less than 5, generate a digit d, -d, (d), or (-d).
if length == 1:
return gen_terminal()
elif length == 2:
term_str, term_val = gen_terminal()
return f'-{term_str}', -term_val % modulus
elif length == 3:
term_str, term_val = gen_terminal()
return f'({term_str})', term_val % modulus
elif length == 4:
term_str, term_val = gen_terminal()
return f'(-{term_str})', -term_val % modulus
# First split the length into a left and right part.
left_length = np.random.randint(low=1, high=length - 3)
right_length = length - (left_length + 3)
left_str, left_val = generate_one_expression_and_result(
modulus, left_length, mult=mult)
right_str, right_val = generate_one_expression_and_result(
modulus, right_length, mult=mult)
# Now sample an operator and return.
maxop = 3 if mult else 2
op = np.random.randint(low=0, high=maxop)
if op == 0:
return '(' + left_str + '+' + right_str + ')', (left_val +
right_val) % modulus
elif op == 1:
return '(' + left_str + '-' + right_str + ')', (left_val -
right_val) % modulus
else:
return '(' + left_str + '*' + right_str + ')', (left_val *
right_val) % modulus
def generate_raw_dataset(
n: int,
lengths: Sequence[int],
modulus: int,
mult: bool = False,
with_tqdm: bool = False,
) -> dict[int, dict[str, np.ndarray]]:
"""Generates a dataset of maths expressions with brackets, and their results.
Args:
n: The number of datapoints in the dataset.
lengths: The lengths of the sequences to generate. n is evenly distributed
over these lengths.
modulus: Modulus used to compute the expressions.
mult: Whether to include the multiplication operator in the expressions.
with_tqdm: As the computation might be long, whether to add a tqdm progress
bar or not.
Returns:
A dict which keys are the passed lengths, and the values are dicts with keys
'equations' and 'solutions', and values are the data numpy arrays.
"""
alphabet_to_int = {
'+': modulus,
'-': modulus + 1,
'*': modulus + 2,
'(': modulus + 3,
')': modulus + 4,
'x': modulus + 5,
'=': modulus + 6,
}
for x in range(modulus):
alphabet_to_int[str(x)] = x
make_default_dict = lambda: {'expressions': [], 'results': []}
sequences = collections.defaultdict(make_default_dict)
range_lengths = tqdm.tqdm(lengths) if with_tqdm else lengths
for length in range_lengths:
for _ in range(n // len(lengths)):
seq, label = generate_one_expression_and_result(modulus, length, mult)
seq = [alphabet_to_int[x] for x in seq]
sequences[length]['expressions'].append(seq)
sequences[length]['results'].append(label)
sequences = tree.traverse(
lambda l: np.array(l, dtype=np.int32) if isinstance(l, list) else l,
sequences,
top_down=False,
)
return dict(sequences)
class ModularArithmeticBrackets(task.GeneralizationTask):
"""A task with the goal of reducing an arithmetic expression with brackets."""
def __init__(self, modulus: int, *args, mult: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._modulus = modulus
self._mult = mult
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of inputs/outputs."""
del rng
batch = generate_raw_dataset(
batch_size, lengths=[length], modulus=self._modulus,
mult=self._mult)[length]
inputs = jnn.one_hot(batch['expressions'], self.input_size)
output = jnn.one_hot(batch['results'], self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + 6
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
|
neural_networks_chomsky_hierarchy-main
|
tasks/dcf/modular_arithmetic_brackets.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the reverse of an input string."""
import functools
import jax
import jax.numpy as jnp
from neural_networks_chomsky_hierarchy.tasks import task
from neural_networks_chomsky_hierarchy.tasks.cs import duplicate_string
class ReverseString(duplicate_string.DuplicateString):
"""A task with the goal of reversing a given string.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the string, reversed, ie s_n ... s_1.
Examples:
011010 -> 010110
123021 -> 120321
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and their reversed version."""
batch = super().sample_batch(rng, batch_size, length)
batch['output'] = jnp.flip(batch['input'], axis=1)
return batch
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return input_length
|
neural_networks_chomsky_hierarchy-main
|
tasks/dcf/reverse_string.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solve for the value of an unknown variable in an equation."""
import collections
from typing import Sequence
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
import tqdm
import tree
from neural_networks_chomsky_hierarchy.tasks import task
from neural_networks_chomsky_hierarchy.tasks.dcf import modular_arithmetic_brackets as mab
def generate_equation_and_solution(
modulus: int, length: int, mult: bool = False
) -> tuple[str, int]:
"""Returns a modular arithmetic equation with brackets, and its solution.
The values are in {0, 1, ..., modulus-1}, and the unknown
value is x. The allowed operations are either {+, -} (mult=False) or
{+, -, *} (mult=True).
Warning: if mult=True, x might have multiple valid solutions.
Args:
modulus: The modulus to use for the expression.
length: The length of the expression.
mult: Whether to include the multiplication operator in the expressions.
Raises:
ValueError if the length is < 3.
"""
# Generate the expression.
expr, val = mab.generate_one_expression_and_result(
modulus, length - 2, mult=mult)
# Replace random digit with 'x'.
idx = np.random.randint(low=0, high=len(expr))
digits = [str(n) for n in range(modulus)]
while expr[idx] not in digits:
idx = (idx + 1) % (length - 2)
solution = int(expr[idx])
equation = f'{expr[:idx]}x{expr[idx + 1:]}={val}'
return equation, solution
def generate_raw_dataset(
n: int,
lengths: Sequence[int],
modulus: int,
mult: bool = False,
with_tqdm: bool = False,
) -> dict[int, dict[str, np.ndarray]]:
"""Generates a dataset of equations and their solutions.
Args:
n: The number of datapoints in the dataset.
lengths: The lengths of the sequences to generate. n is evenly distributed
over these lengths.
modulus: Modulus used to compute the expressions.
mult: Whether to include the multiplication operator in the expressions.
with_tqdm: As the computation might be long, whether to add a tqdm progress
bar or not.
Returns:
A dict which keys are the passed lengths, and the values are dicts with keys
'equations' and 'solutions', and values are the data numpy arrays.
"""
alphabet_to_int = {
'+': modulus,
'-': modulus + 1,
'(': modulus + 2,
')': modulus + 3,
'x': modulus + 4,
'=': modulus + 5,
}
for x in range(modulus):
alphabet_to_int[str(x)] = x
sequences = collections.defaultdict(lambda: { # pylint: disable=g-long-lambda
'equations': [],
'solutions': []
})
range_lengths = tqdm.tqdm(lengths) if with_tqdm else lengths
for length in range_lengths:
for _ in range(n // len(lengths)):
seq, label = generate_equation_and_solution(modulus, length, mult=mult)
seq = [alphabet_to_int[x] for x in seq]
sequences[length]['equations'].append(seq)
sequences[length]['solutions'].append(label)
# Convert the list of numbers we have to arrays at the leaves.
sequences = tree.traverse(
lambda l: np.array(l, dtype=np.int32) if isinstance(l, list) else l,
sequences,
top_down=False,
)
return dict(sequences)
class SolveEquation(task.GeneralizationTask):
"""A task with the goal of solving an modular equation for an unknown."""
def __init__(self, modulus: int, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modulus = modulus
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of inputs/outputs."""
if length < 3:
return {
'input':
jnn.one_hot(
jnp.zeros((batch_size, length)), num_classes=self.input_size),
'output':
jnn.one_hot(
jnp.zeros((batch_size,)), num_classes=self.output_size)
}
batch = generate_raw_dataset(
batch_size, lengths=[length], modulus=self._modulus)[length]
inputs = jnn.one_hot(batch['equations'], self.input_size)
output = jnn.one_hot(batch['solutions'], self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + 6
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
|
neural_networks_chomsky_hierarchy-main
|
tasks/dcf/solve_equation.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Manipulate an input stack, using the input actions."""
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from neural_networks_chomsky_hierarchy.tasks import task
class StackManipulation(task.GeneralizationTask):
"""A task with the goal of following instructions and returning the end stack.
The input is composed of a stack of 0s and 1s followed by a sequence of
instructions POP/PUSH 0/PUSH 1 (represented by 2s/3s/4s). The input stack is
given bottom-to-top, and the agent needs to execute the instructions given
(left-to-rigth) and output the final stack top-to-bottom (i.e., as if it were
popping the final stack). If a POP action is to be called on an empty stack,
the action is ignored. The output is padded with 0s to match the input length
+ 1 (to accommodate for the termination token), and the end of the final stack
is denoted with the termination symbol 2 (i.e., the output has values in {0,
1, 2}).
Examples:
0 1 1 0 PUSH 1 POP POP
initial 0 1 1 0 (the stack is received bottom-to-top)
PUSH 1 0 1 1 0 1
POP 0 1 1 0
POP 0 1 1
-> 1 1 0 2 0 0 0 0 (the stack is returned top-to-bottom)
1 1 0 POP POP POP
initial 1 1 0
POP 1 1
POP 1
POP
-> 2 0 0 0 0 0 0 0 (the stack is empty and padded with zeros)
"""
def _sample_expression_and_result(
self, length: int
) -> tuple[np.ndarray, list[int]]:
"""Returns an expression with stack instructions, and the result stack."""
if length == 1:
value = np.random.randint(low=0, high=2, size=(1,))
return value, list(value)
# Initialize the stack content and the actions (POP/PUSH).
stack_length = np.random.randint(low=1, high=length)
stack = np.random.randint(low=0, high=2, size=(stack_length,))
actions = np.random.randint(low=2, high=5, size=(length - stack_length,))
# Apply the actions on the stack.
current_stack = list(stack)
for action in actions:
if action == 2: # POP
if current_stack:
current_stack.pop()
elif action in [3, 4]: # PUSH a 0 (case 3) or a 1 (case 4)
current_stack.append(action - 3)
return np.concatenate([stack, actions]), current_stack[::-1]
def sample_batch(self, rng: chex.PRNGKey, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and the expected class."""
expressions, results = [], []
for _ in range(batch_size):
expression, result = self._sample_expression_and_result(length)
expressions.append(expression)
# Append the termination token to the result.
result += [self.output_size - 1]
# Pad the result with zeros to match the input length (accounting for the
# termination token).
result += [0] * (length + 1 - len(result))
results.append(result)
expressions = jnp.array(expressions)
results = jnp.array(results)
inputs = jnn.one_hot(expressions, self.input_size)
output = jnn.one_hot(results, self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models.
The value is 5 because we have two possible tokens in the stack (0, 1), plus
three tokens to describe the PUSH 0, PUSH 1, and POP actions.
"""
return 5
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
"""Returns the output length of the task."""
return input_length + 1
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes mask that ignores everything after the termination tokens.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
|
neural_networks_chomsky_hierarchy-main
|
tasks/dcf/stack_manipulation.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute whether the number of 01's and 10's is even."""
import functools
import jax
from jax import nn as jnn
from jax import numpy as jnp
from jax import random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class EvenPairs(task.GeneralizationTask):
"""A task with the goal of checking whether the number of 01s and 10s is even.
The input is a binary string, composed of 0s and 1s. If the result is even,
the class is 0, otherwise it's one.
Examples:
001110 -> 1 '10' and 1 '01' -> class 0
0101001 -> 2 '10' and 3 '01' -> class 1
Note the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and the expected class."""
strings = jrandom.randint(
rng,
shape=(batch_size, length),
minval=0,
maxval=2,
)
one_hot_strings = jnn.one_hot(strings, num_classes=2)
unequal_pairs = jnp.logical_xor(strings[:, :-1], strings[:, 1:])
odd_unequal_pairs = jnp.sum(unequal_pairs, axis=-1) % 2
return {
'input': one_hot_strings,
'output': jnn.one_hot(odd_unequal_pairs, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
|
neural_networks_chomsky_hierarchy-main
|
tasks/regular/even_pairs.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute whether the number of 1s in a string is even."""
import functools
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class ParityCheck(task.GeneralizationTask):
"""A task with the goal of counting the number of '1' in a string, modulo 2.
The input is a string, composed of 0s and 1s. If the result is even, the class
is 0, otherwise it's 1.
Examples:
1010100 -> 3 1s (odd) -> class 1
01111 -> 4 1s (even) -> class 0
Note that the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: jnp.ndarray, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and the expected class."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=2)
n_b = jnp.sum(strings, axis=1) % 2
n_b = jnn.one_hot(n_b, num_classes=2)
one_hot_strings = jnn.one_hot(strings, num_classes=2)
return {"input": one_hot_strings, "output": n_b}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
|
neural_networks_chomsky_hierarchy-main
|
tasks/regular/parity_check.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the final state after randomly walking on a circle."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class CycleNavigation(task.GeneralizationTask):
"""A task with the goal of computing the final state on a circle.
The input is a string of actions, composed of 0s, 1s or -1s. The actions give
directions to take on a finite length circle (0 is for stay, 1 is for right,
-1 is for left). The goal is to give the final position on the circle after
all the actions have been taken. The agent starts at position 0.
By default, the length the circle is 5.
Examples:
1 -1 0 -1 -1 -> -2 = class 3
1 1 1 -1 -> 2 = class 2
Note that the sampling is jittable so it is fast.
"""
@property
def _cycle_length(self) -> int:
"""Returns the cycle length, number of possible states."""
return 5
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(self, rng: chex.PRNGKey, batch_size: int,
length: int) -> task.Batch:
"""Returns a batch of strings and the expected class."""
actions = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=3)
final_states = jnp.sum(actions - 1, axis=1) % self._cycle_length
final_states = jnn.one_hot(final_states, num_classes=self.output_size)
one_hot_strings = jnn.one_hot(actions, num_classes=self.input_size)
return {"input": one_hot_strings, "output": final_states}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._cycle_length
|
neural_networks_chomsky_hierarchy-main
|
tasks/regular/cycle_navigation.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modular arithmetic without brackets.
Note this allows to generate samples using a jittable function, and is therefore
much faster than its 'brackets' counterpart, which requires to simulate the full
CF grammar, non-jittable.
"""
import functools
from typing import Optional, Sequence
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
# Public as this may be used to encode/decode strings of numbers/symbols.
OP_BY_CHARACTER = {'+': 0, '-': 1, '*': 2, '_': 3}
def _replace_subtractions(expression: jnp.ndarray, modulus: int) -> jnp.ndarray:
"""Replaces subtractions in an expression by additions with the inverse.
e.g. the expression [1, -, 3] results in [1, +, -3].
Args:
expression: Encoded expression (a 1D array of integers) in which to replace
subtractions.
modulus: The modulus to use for the modular arithmetic.
Returns:
The expression with all subtractions replaced by additions with the inverse.
"""
if expression.size < 2:
return expression
mask = (expression == modulus + OP_BY_CHARACTER['-'])
subtract_replaced = jnp.where(mask, modulus + OP_BY_CHARACTER['+'],
expression)
return subtract_replaced.at[2:].multiply(1 - 2 * mask[1:-1])
def _perform_multiplications(expression: jnp.ndarray,
modulus: int) -> jnp.ndarray:
"""Performs all multiplications in an expression containing only + and *.
This is done at fixed length and the result is zero-padded to achieve this.
Since the result of performing multiplications is an expression containing
only + operators, the operators are dropped from the output. For example, the
expression [1, +, 3, *, 4] results in [1, 12, 0].
Args:
expression: Encoded expression in which to perform multiplications.
modulus: The modulus to use for the modular arithmetic.
Returns:
An array with the results of the multiplications (potentially zero-padded).
"""
term_ids = jnp.cumsum(expression == modulus + OP_BY_CHARACTER['+'])[::2]
# Segment_prod can only be jit-compiled with a fixed number of segments.
# Therefore, we have to set to the maximum number of terms possible and
# mask out superfluous segment results with zeros afterwards.
maximum_term_number = expression.shape[0] // 2 + 1
products = jax.ops.segment_prod(
expression[::2],
term_ids,
num_segments=maximum_term_number,
indices_are_sorted=True)
valid_segment_mask = jnp.arange(maximum_term_number) <= term_ids[-1]
return products * valid_segment_mask
def _replace_blanks(expression: jnp.ndarray, modulus: int) -> jnp.ndarray:
"""Replaces blank symbols in expression with either `+` or `0`.
Depending on whether the blank symbol is at the position of an operator or a
residual, the blank symbol is replaced with a `+` operator or a `0`.
Args:
expression: Encoded expression in which to replace blank symbols.
modulus: The modulus to use for the modular arithmetic.
Returns:
An array with blank symbols replaced by either `+` or `0`.
"""
mask = (expression == OP_BY_CHARACTER['_'] + modulus)
operator_mask = mask.at[::2].set(False)
residual_mask = mask.at[1::2].set(False)
blanks_replaced = jnp.where(operator_mask, OP_BY_CHARACTER['+'] + modulus,
expression)
blanks_replaced = jnp.where(residual_mask, 0, blanks_replaced)
return blanks_replaced
def _evaluate_expression(expression: jnp.ndarray, modulus: int) -> jnp.ndarray:
"""Returns the result of evaluating a modular arithmetic expression."""
expression = _replace_blanks(expression, modulus)
expression = _replace_subtractions(expression, modulus)
additive_terms = _perform_multiplications(expression, modulus)
return jnp.sum(additive_terms) % modulus
class ModularArithmetic(task.GeneralizationTask):
"""A task with the goal of reducing a simple arithmetic expression.
The input is a string, composed of numbers (in {0, ..., modulus-1}), and
operators (in {+, -, *}). The output is the reduced value of this expression,
which is also in {0, ..., modulus-1}.
Examples (modulo 5):
1 + 2 * 3 = 2
1 - 1 - 1 = 4
0 * 1 + 4 * 3 - 2 = 0
Note that the input strings are always of odd length.
"""
def __init__(self,
modulus: int,
*args,
operators: Optional[Sequence[str]] = None,
**kwargs):
"""Initializes the modular arithmetic task.
Args:
modulus: The modulus used for the computation.
*args: Args for the base task class.
operators: Operators to be used in the sequences. By default it's None,
meaning all operators available are used.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._modulus = modulus
if operators is None:
operators = ('+', '*', '-')
self._operators = (OP_BY_CHARACTER[op] for op in operators)
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: jnp.ndarray,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of modular arithmetic expressions and their labels.
Args:
rng: The jax random number generator.
batch_size: The size of the batch returned.
length: The length of the sequence. As this length must be odd for the
modular arithmetic dataset, if it's not, we force it to be by
subtracting one to the length passed.
"""
# Subtracting one to the length if it's not odd already.
if length % 2 != 1:
length -= 1
batch = jnp.empty((batch_size, length), dtype=int)
rng1, rng2 = jax.random.split(rng)
remainders = jax.random.randint(rng1,
(batch_size, length // 2 + 1), 0,
self._modulus)
ops = self._modulus + jnp.array(list(self._operators))
operations = jrandom.choice(rng2, ops, (batch_size, length // 2))
batch = batch.at[:, ::2].set(remainders)
expressions = batch.at[:, 1::2].set(operations)
evaluate = functools.partial(_evaluate_expression, modulus=self._modulus)
labels = jax.vmap(evaluate)(expressions)
labels = jnn.one_hot(labels, self._modulus)
one_hot_expressions = jnn.one_hot(expressions,
self._modulus + len(OP_BY_CHARACTER))
return {'input': one_hot_expressions, 'output': labels}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + len(OP_BY_CHARACTER)
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
|
neural_networks_chomsky_hierarchy-main
|
tasks/regular/modular_arithmetic.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curricula over sequence lengths used to evaluate length generalization.
Allows to sample different sequence lengths along training. For instance,
one might want to start with length=1 and regularly increase the length by 1,
every 50k steps.
"""
import abc
from collections.abc import Collection
import random
import numpy as np
class Curriculum(abc.ABC):
"""Curriculum to sample lengths."""
@abc.abstractmethod
def sample_sequence_length(self, step: int) -> int:
"""Samples a sequence length from the current distribution."""
class FixedCurriculum(Curriculum):
"""A fixed curriculum, always sampling the same sequence length."""
def __init__(self, sequence_length: int):
"""Initializes.
Args:
sequence_length: The sequence length to sample.
"""
super().__init__()
self._sequence_length = sequence_length
def sample_sequence_length(self, step: int) -> int:
"""Returns a fixed sequence length."""
del step
return self._sequence_length
class UniformCurriculum(Curriculum):
"""A uniform curriculum, sampling different sequence lengths."""
def __init__(self, values: Collection[int]):
"""Initializes.
Args:
values: The sequence lengths to sample.
"""
super().__init__()
self._values = tuple(values)
def sample_sequence_length(self, step: int) -> int:
"""Returns a sequence length sampled from a uniform distribution."""
del step
return random.choice(self._values)
class ReverseExponentialCurriculum(Curriculum):
"""A reverse exponential curriculum, sampling different sequence lengths."""
def __init__(self, values: Collection[int], tau: bool):
"""Initializes.
Args:
values: The sequence lengths to sample.
tau: The exponential rate to use.
"""
super().__init__()
self._values = tuple(values)
self._tau = tau
def sample_sequence_length(self, step: int) -> int:
"""Returns a length sampled from a reverse exponential distribution."""
del step
probs = self._tau**np.array(self._values)
probs = np.array(probs, dtype=np.float32)
probs = probs / np.sum(probs)
return np.random.choice(self._values, p=probs)
class RegularIncreaseCurriculum(Curriculum):
"""Curriculum for sequence lengths with a regular increase."""
def __init__(self, initial_sequence_length: int, increase_frequency: int,
increase_amount: int, sample_all_length: bool):
"""Initializes.
Args:
initial_sequence_length: The value of the sequence length at the beginning
of the curriculum.
increase_frequency: How often we increase the possible sequence length.
increase_amount: The amount of the increase in length.
sample_all_length: Whether to sample all length lower than the current one
or just return the current one.
"""
super().__init__()
self._initial_sequence_length = initial_sequence_length
self._increase_frequency = increase_frequency
self._increase_amount = increase_amount
self._sample_all_length = sample_all_length
def sample_sequence_length(self, step: int) -> int:
"""Returns a sequence length from the curriculum with the current step."""
if not self._sample_all_length:
return self._initial_sequence_length + self._increase_amount * (
step // self._increase_frequency
)
return (
self._initial_sequence_length
+ self._increase_amount
* np.random.randint(0, step // self._increase_frequency + 1)
)
|
neural_networks_chomsky_hierarchy-main
|
experiments/curriculum.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for our length generalization experiments."""
import functools
import haiku as hk
from neural_networks_chomsky_hierarchy.experiments import curriculum as curriculum_lib
from neural_networks_chomsky_hierarchy.models import ndstack_rnn
from neural_networks_chomsky_hierarchy.models import rnn
from neural_networks_chomsky_hierarchy.models import stack_rnn
from neural_networks_chomsky_hierarchy.models import tape_rnn
from neural_networks_chomsky_hierarchy.models import transformer
from neural_networks_chomsky_hierarchy.tasks.cs import binary_addition
from neural_networks_chomsky_hierarchy.tasks.cs import binary_multiplication
from neural_networks_chomsky_hierarchy.tasks.cs import bucket_sort
from neural_networks_chomsky_hierarchy.tasks.cs import compute_sqrt
from neural_networks_chomsky_hierarchy.tasks.cs import duplicate_string
from neural_networks_chomsky_hierarchy.tasks.cs import missing_duplicate_string
from neural_networks_chomsky_hierarchy.tasks.cs import odds_first
from neural_networks_chomsky_hierarchy.tasks.dcf import modular_arithmetic_brackets
from neural_networks_chomsky_hierarchy.tasks.dcf import reverse_string
from neural_networks_chomsky_hierarchy.tasks.dcf import solve_equation
from neural_networks_chomsky_hierarchy.tasks.dcf import stack_manipulation
from neural_networks_chomsky_hierarchy.tasks.regular import cycle_navigation
from neural_networks_chomsky_hierarchy.tasks.regular import even_pairs
from neural_networks_chomsky_hierarchy.tasks.regular import modular_arithmetic
from neural_networks_chomsky_hierarchy.tasks.regular import parity_check
MODEL_BUILDERS = {
'rnn':
functools.partial(rnn.make_rnn, rnn_core=hk.VanillaRNN),
'lstm':
functools.partial(rnn.make_rnn, rnn_core=hk.LSTM),
'stack_rnn':
functools.partial(
rnn.make_rnn,
rnn_core=stack_rnn.StackRNNCore,
inner_core=hk.VanillaRNN),
'ndstack_rnn':
functools.partial(
rnn.make_rnn,
rnn_core=ndstack_rnn.NDStackRNNCore,
inner_core=hk.VanillaRNN),
'stack_lstm':
functools.partial(
rnn.make_rnn, rnn_core=stack_rnn.StackRNNCore, inner_core=hk.LSTM),
'transformer_encoder':
transformer.make_transformer_encoder,
'transformer':
transformer.make_transformer,
'tape_rnn':
functools.partial(
rnn.make_rnn,
rnn_core=tape_rnn.TapeInputLengthJumpCore,
inner_core=hk.VanillaRNN),
}
CURRICULUM_BUILDERS = {
'fixed': curriculum_lib.FixedCurriculum,
'regular_increase': curriculum_lib.RegularIncreaseCurriculum,
'reverse_exponential': curriculum_lib.ReverseExponentialCurriculum,
'uniform': curriculum_lib.UniformCurriculum,
}
TASK_BUILDERS = {
'modular_arithmetic':
modular_arithmetic.ModularArithmetic,
'parity_check':
parity_check.ParityCheck,
'even_pairs':
even_pairs.EvenPairs,
'cycle_navigation':
cycle_navigation.CycleNavigation,
'modular_arithmetic_brackets':
functools.partial(
modular_arithmetic_brackets.ModularArithmeticBrackets, mult=True),
'reverse_string':
reverse_string.ReverseString,
'missing_duplicate_string':
missing_duplicate_string.MissingDuplicateString,
'duplicate_string':
duplicate_string.DuplicateString,
'binary_addition':
binary_addition.BinaryAddition,
'binary_multiplication':
binary_multiplication.BinaryMultiplication,
'compute_sqrt':
compute_sqrt.ComputeSqrt,
'odds_first':
odds_first.OddsFirst,
'solve_equation':
solve_equation.SolveEquation,
'stack_manipulation':
stack_manipulation.StackManipulation,
'bucket_sort':
bucket_sort.BucketSort,
}
TASK_LEVELS = {
'modular_arithmetic': 'regular',
'parity_check': 'regular',
'even_pairs': 'regular',
'cycle_navigation': 'regular',
'modular_arithmetic_brackets': 'dcf',
'reverse_string': 'dcf',
'stack_manipulation': 'dcf',
'solve_equation': 'dcf',
'missing_duplicate_string': 'cs',
'compute_sqrt': 'cs',
'duplicate_string': 'cs',
'binary_addition': 'cs',
'binary_multiplication': 'cs',
'odds_first': 'cs',
'bucket_sort': 'cs',
}
|
neural_networks_chomsky_hierarchy-main
|
experiments/constants.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation of a network on sequences of different lengths."""
import dataclasses
import random
from typing import Any, Callable, Mapping
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tqdm
_Batch = Mapping[str, jnp.ndarray]
@dataclasses.dataclass
class EvaluationParams:
"""The parameters used for range evaluation of networks."""
model: hk.Transformed
params: hk.Params
accuracy_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]
sample_batch: Callable[[jnp.ndarray, int, int], _Batch]
max_test_length: int
total_batch_size: int
sub_batch_size: int # We use this to avoid memory overflow.
is_autoregressive: bool = False
def range_evaluation(
eval_params: EvaluationParams,
use_tqdm: bool = False,
) -> list[Mapping[str, Any]]:
"""Evaluates the model on longer, never seen strings and log the results.
Args:
eval_params: The evaluation parameters, see above.
use_tqdm: Whether to use a progress bar with tqdm.
Returns:
The list of dicts containing the accuracies.
"""
model = eval_params.model
params = eval_params.params
random.seed(1)
np.random.seed(1)
rng_seq = hk.PRNGSequence(1)
if eval_params.is_autoregressive:
apply_fn = jax.jit(model.apply, static_argnames=('sample',))
else:
apply_fn = jax.jit(model.apply)
results = []
lengths = range(1, eval_params.max_test_length + 1)
if use_tqdm:
lengths = tqdm.tqdm(lengths)
for length in lengths:
# We need to clear the cache of jitted functions, to avoid overflow as we
# are jitting len(lengths) ones, which can be a lot.
apply_fn.clear_cache()
sub_accuracies = []
for _ in range(eval_params.total_batch_size // eval_params.sub_batch_size):
batch = eval_params.sample_batch(
next(rng_seq), eval_params.sub_batch_size, length)
if eval_params.is_autoregressive:
outputs = apply_fn(
params,
next(rng_seq),
batch['input'],
jnp.empty_like(batch['output']),
sample=True)
else:
outputs = apply_fn(params, next(rng_seq), batch['input'])
sub_accuracies.append(
float(np.mean(eval_params.accuracy_fn(outputs, batch['output']))))
log_data = {
'length': length,
'accuracy': np.mean(sub_accuracies),
}
logging.info(log_data)
results.append(log_data)
return results
|
neural_networks_chomsky_hierarchy-main
|
experiments/range_evaluation.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train and evaluate a network."""
from absl import app
from absl import flags
import haiku as hk
import jax.numpy as jnp
import numpy as np
from neural_networks_chomsky_hierarchy.experiments import constants
from neural_networks_chomsky_hierarchy.experiments import curriculum as curriculum_lib
from neural_networks_chomsky_hierarchy.experiments import training
from neural_networks_chomsky_hierarchy.experiments import utils
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size',
default=128,
help='Training batch size.',
lower_bound=1,
)
_SEQUENCE_LENGTH = flags.DEFINE_integer(
'sequence_length',
default=40,
help='Maximum training sequence length.',
lower_bound=1,
)
_TASK = flags.DEFINE_string(
'task',
default='even_pairs',
help='Length generalization task (see `constants.py` for other tasks).',
)
_ARCHITECTURE = flags.DEFINE_string(
'architecture',
default='tape_rnn',
help='Model architecture (see `constants.py` for other architectures).',
)
_IS_AUTOREGRESSIVE = flags.DEFINE_boolean(
'is_autoregressive',
default=False,
help='Whether to use autoregressive sampling or not.',
)
_COMPUTATION_STEPS_MULT = flags.DEFINE_integer(
'computation_steps_mult',
default=0,
help=(
'The amount of computation tokens to append to the input tape (defined'
' as a multiple of the input length)'
),
lower_bound=0,
)
# The architecture parameters depend on the architecture, so we cannot define
# them as via flags. See `constants.py` for the required values.
_ARCHITECTURE_PARAMS = {
'hidden_size': 256,
'memory_cell_size': 8,
'memory_size': 40,
}
def main(unused_argv) -> None:
# Create the task.
curriculum = curriculum_lib.UniformCurriculum(
values=list(range(1, _SEQUENCE_LENGTH.value + 1))
)
task = constants.TASK_BUILDERS[_TASK.value]()
# Create the model.
single_output = task.output_length(10) == 1
model = constants.MODEL_BUILDERS[_ARCHITECTURE.value](
output_size=task.output_size,
return_all_outputs=True,
**_ARCHITECTURE_PARAMS,
)
if _IS_AUTOREGRESSIVE.value:
if 'transformer' not in _ARCHITECTURE.value:
model = utils.make_model_with_targets_as_input(
model, _COMPUTATION_STEPS_MULT.value
)
model = utils.add_sampling_to_autoregressive_model(model, single_output)
else:
model = utils.make_model_with_empty_targets(
model, task, _COMPUTATION_STEPS_MULT.value, single_output
)
model = hk.transform(model)
# Create the loss and accuracy based on the pointwise ones.
def loss_fn(output, target):
loss = jnp.mean(jnp.sum(task.pointwise_loss_fn(output, target), axis=-1))
return loss, {}
def accuracy_fn(output, target):
mask = task.accuracy_mask(target)
return jnp.sum(mask * task.accuracy_fn(output, target)) / jnp.sum(mask)
# Create the final training parameters.
training_params = training.ClassicTrainingParams(
seed=0,
model_init_seed=0,
training_steps=10_000,
log_frequency=100,
length_curriculum=curriculum,
batch_size=_BATCH_SIZE.value,
task=task,
model=model,
loss_fn=loss_fn,
learning_rate=1e-3,
accuracy_fn=accuracy_fn,
compute_full_range_test=True,
max_range_test_length=100,
range_test_total_batch_size=512,
range_test_sub_batch_size=64,
is_autoregressive=_IS_AUTOREGRESSIVE.value,
)
training_worker = training.TrainingWorker(training_params, use_tqdm=True)
_, eval_results, _ = training_worker.run()
# Gather results and print final score.
accuracies = [r['accuracy'] for r in eval_results]
score = np.mean(accuracies[_SEQUENCE_LENGTH.value + 1 :])
print(f'Network score: {score}')
if __name__ == '__main__':
app.run(main)
|
neural_networks_chomsky_hierarchy-main
|
experiments/example.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.