python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple implementation of Bootstrapped DQN with prior networks.
References:
1. "Deep Exploration via Bootstrapped DQN" (Osband et al., 2016)
2. "Deep Exploration via Randomized Value Functions" (Osband et al., 2017)
3. "Randomized Prior Functions for Deep RL" (Osband et al, 2018)
Links:
1. https://arxiv.org/abs/1602.04621
2. https://arxiv.org/abs/1703.07608
3. https://arxiv.org/abs/1806.03335
Notes:
- This agent is implemented with TensorFlow 2 and Sonnet 2. For installation
instructions for these libraries, see the README.md in the parent folder.
- This implementation is potentially inefficient, as it does not parallelise
computation across the ensemble for simplicity and readability.
"""
import copy
from typing import Callable, NamedTuple, Optional, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import numpy as np
import sonnet as snt
import tensorflow as tf
import tree
class BootstrappedDqn(base.Agent):
"""Bootstrapped DQN with additive prior functions."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
ensemble: Sequence[snt.Module],
batch_size: int,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
optimizer: snt.Optimizer,
mask_prob: float,
noise_scale: float,
epsilon_fn: Callable[[int], float] = lambda _: 0.,
seed: Optional[int] = None,
):
"""Bootstrapped DQN with additive prior functions."""
# Agent components.
self._ensemble = ensemble
self._forward = [tf.function(net) for net in ensemble]
self._target_ensemble = [copy.deepcopy(network) for network in ensemble]
self._num_ensemble = len(ensemble)
self._optimizer = optimizer
self._replay = replay.Replay(capacity=replay_capacity)
# Create variables for each network in the ensemble
for network in ensemble:
snt.build(network, (None, *obs_spec.shape))
# Agent hyperparameters.
self._num_actions = action_spec.num_values
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._min_replay_size = min_replay_size
self._epsilon_fn = epsilon_fn
self._mask_prob = mask_prob
self._noise_scale = noise_scale
self._rng = np.random.RandomState(seed)
self._discount = discount
# Agent state.
self._total_steps = tf.Variable(1)
self._active_head = 0
tf.random.set_seed(seed)
@tf.function
def _step(self, transitions: Sequence[tf.Tensor]):
"""Does a step of SGD for the whole ensemble over `transitions`."""
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
variables = tree.flatten(
[model.trainable_variables for model in self._ensemble])
with tf.GradientTape() as tape:
losses = []
for k in range(self._num_ensemble):
net = self._ensemble[k]
target_net = self._target_ensemble[k]
# Q-learning loss with added reward noise + half-in bootstrap.
q_values = net(o_tm1)
one_hot_actions = tf.one_hot(a_tm1, depth=self._num_actions)
train_value = tf.reduce_sum(q_values * one_hot_actions, axis=-1)
target_value = tf.stop_gradient(tf.reduce_max(target_net(o_t), axis=-1))
target_y = r_t + z_t[:, k] + self._discount * d_t * target_value
loss = tf.square(train_value - target_y) * m_t[:, k]
losses.append(loss)
loss = tf.reduce_mean(tf.stack(losses))
gradients = tape.gradient(loss, variables)
self._total_steps.assign_add(1)
self._optimizer.apply(gradients, variables)
# Periodically update the target network.
if tf.math.mod(self._total_steps, self._target_update_period) == 0:
for k in range(self._num_ensemble):
for src, dest in zip(self._ensemble[k].variables,
self._target_ensemble[k].variables):
dest.assign(src)
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Select values via Thompson sampling, then use epsilon-greedy policy."""
if self._rng.rand() < self._epsilon_fn(self._total_steps.numpy()):
return self._rng.randint(self._num_actions)
# Greedy policy, breaking ties uniformly at random.
batched_obs = tf.expand_dims(timestep.observation, axis=0)
q_values = self._forward[self._active_head](batched_obs)[0].numpy()
action = self._rng.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Update the agent: add transition to replay and periodically do SGD."""
if new_timestep.last():
self._active_head = self._rng.randint(self._num_ensemble)
self._replay.add(
TransitionWithMaskAndNoise(
o_tm1=timestep.observation,
a_tm1=action,
r_t=np.float32(new_timestep.reward),
d_t=np.float32(new_timestep.discount),
o_t=new_timestep.observation,
m_t=self._rng.binomial(1, self._mask_prob,
self._num_ensemble).astype(np.float32),
z_t=self._rng.randn(self._num_ensemble).astype(np.float32) *
self._noise_scale,
))
if self._replay.size < self._min_replay_size:
return
if tf.math.mod(self._total_steps, self._sgd_period) == 0:
minibatch = self._replay.sample(self._batch_size)
minibatch = [tf.convert_to_tensor(x) for x in minibatch]
self._step(minibatch)
class TransitionWithMaskAndNoise(NamedTuple):
o_tm1: np.ndarray
a_tm1: base.Action
r_t: float
d_t: float
o_t: np.ndarray
m_t: np.ndarray
z_t: np.ndarray
class NetworkWithPrior(snt.Module):
"""Combines network with additive untrainable "prior network"."""
def __init__(self,
network: snt.Module,
prior_network: snt.Module,
prior_scale: float = 1.):
super().__init__(name='network_with_prior')
self._network = network
self._prior_network = prior_network
self._prior_scale = prior_scale
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
q_values = self._network(inputs)
prior_q_values = self._prior_network(inputs)
return q_values + self._prior_scale * tf.stop_gradient(prior_q_values)
def make_ensemble(num_actions: int,
num_ensemble: int = 20,
num_hidden_layers: int = 2,
num_units: int = 50,
prior_scale: float = 3.) -> Sequence[snt.Module]:
"""Convenience function to make an ensemble from flags."""
output_sizes = [num_units] * num_hidden_layers + [num_actions]
ensemble = []
for _ in range(num_ensemble):
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
prior_network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
ensemble.append(NetworkWithPrior(network, prior_network, prior_scale))
return ensemble
def default_agent(
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
num_ensemble: int = 20,
) -> BootstrappedDqn:
"""Initialize a Bootstrapped DQN agent with default parameters."""
ensemble = make_ensemble(
num_actions=action_spec.num_values, num_ensemble=num_ensemble)
optimizer = snt.optimizers.Adam(learning_rate=1e-3)
return BootstrappedDqn(
obs_spec=obs_spec,
action_spec=action_spec,
ensemble=ensemble,
batch_size=128,
discount=.99,
replay_capacity=10000,
min_replay_size=128,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
mask_prob=0.5,
noise_scale=0.0,
epsilon_fn=lambda t: 10 / (10 + t),
seed=42,
)
|
bsuite-master
|
bsuite/baselines/tf/boot_dqn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import boot_dqn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = boot_dqn.default_agent(
env.observation_spec(), env.action_spec(), num_ensemble=2)
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/tf/boot_dqn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an actor-critic agent instance on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import actor_critic_rnn
from bsuite.baselines.utils import pool
import sonnet as snt
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
# algorithm
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_integer('num_hidden_layers', 3, 'number of hidden layers')
flags.DEFINE_integer('num_units', 64, 'number of units per hidden layer')
flags.DEFINE_float('learning_rate', 1e-3, 'the learning rate')
flags.DEFINE_integer('sequence_length', 32, 'mumber of transitions to batch')
flags.DEFINE_float('td_lambda', 0.9, 'mixing parameter for boostrapping')
flags.DEFINE_float('discount', .99, 'discounting on the agent side')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs an A2C agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
obs_spec = env.observation_spec()
action_spec = env.action_spec()
num_actions = env.action_spec().num_values
hidden_sizes = [FLAGS.num_units] * FLAGS.num_hidden_layers
network = actor_critic_rnn.PolicyValueRNN(hidden_sizes, num_actions)
agent = actor_critic_rnn.ActorCriticRNN(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
optimizer=snt.optimizers.Adam(learning_rate=FLAGS.learning_rate),
max_sequence_length=FLAGS.sequence_length,
td_lambda=FLAGS.td_lambda,
discount=FLAGS.discount,
seed=FLAGS.seed,
)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/tf/actor_critic_rnn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow 2-based implementation of a recurrent actor-critic."""
from bsuite.baselines.tf.actor_critic_rnn.agent import ActorCriticRNN
from bsuite.baselines.tf.actor_critic_rnn.agent import default_agent
from bsuite.baselines.tf.actor_critic_rnn.agent import PolicyValueRNN
|
bsuite-master
|
bsuite/baselines/tf/actor_critic_rnn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow-based implementation of a recurrent actor-critic.
References:
1. "Simple Statistical Gradient-Following Algorithms for Connectionist
Reinforcement Learning" (Williams, 1992).
2. "Long Short-Term Memory" (Hochreiter, 1991).
Links:
1. http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf.
2. https://www.bioinf.jku.at/publications/older/2604.pdf
"""
from typing import Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import sequence
import dm_env
from dm_env import specs
import sonnet as snt
import tensorflow as tf
import tree
import trfl
class ActorCriticRNN(base.Agent):
"""A recurrent TensorFlow-based feedforward actor-critic implementation."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.Array,
network: 'PolicyValueRNN',
optimizer: snt.Optimizer,
max_sequence_length: int,
td_lambda: float,
discount: float,
seed: int,
entropy_cost: float = 0.,
):
"""A recurrent actor-critic agent."""
# Internalise network and optimizer.
self._forward = tf.function(network)
self._network = network
self._optimizer = optimizer
# Initialise recurrent state.
self._state = network.initial_state(1)
self._rollout_initial_state = network.initial_state(1)
# Set seed and internalise hyperparameters.
tf.random.set_seed(seed)
self._discount = discount
self._td_lambda = td_lambda
self._entropy_cost = entropy_cost
# Initialise rolling experience buffer.
self._buffer = sequence.Buffer(obs_spec, action_spec, max_sequence_length)
@tf.function
def _step(self, trajectory: sequence.Trajectory):
"""Do a batch of SGD on actor + critic loss on a sequence of experience."""
observations, actions, rewards, discounts = trajectory
# Add dummy batch dimensions.
actions = tf.expand_dims(actions, axis=-1) # [T, 1]
rewards = tf.expand_dims(rewards, axis=-1) # [T, 1]
discounts = tf.expand_dims(discounts, axis=-1) # [T, 1]
observations = tf.expand_dims(observations, axis=1) # [T+1, 1, ...]
# Extract final observation for bootstrapping.
observations, final_observation = observations[:-1], observations[-1]
with tf.GradientTape() as tape:
# Build actor and critic losses.
(logits, values), state = snt.dynamic_unroll(
self._network, observations, self._rollout_initial_state)
(_, bootstrap_value), state = self._network(final_observation, state)
values = tf.squeeze(values, axis=-1)
bootstrap_value = tf.squeeze(bootstrap_value, axis=-1)
critic_loss, (advantages, _) = trfl.td_lambda(
state_values=values,
rewards=rewards,
pcontinues=self._discount * discounts,
bootstrap_value=bootstrap_value,
lambda_=self._td_lambda)
actor_loss = trfl.discrete_policy_gradient_loss(
logits, actions, advantages)
entropy_loss = trfl.discrete_policy_entropy_loss(logits).loss
loss = actor_loss + critic_loss + self._entropy_cost * entropy_loss
loss = tf.reduce_mean(loss)
gradients = tape.gradient(loss, self._network.trainable_variables)
gradients, _ = tf.clip_by_global_norm(gradients, 5.)
self._optimizer.apply(gradients, self._network.trainable_variables)
return state
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Selects actions according to the latest softmax policy."""
if timestep.first():
self._state = self._network.initial_state(1)
self._rollout_initial_state = self._network.initial_state(1)
observation = tf.expand_dims(timestep.observation, axis=0)
(logits, _), self._state = self._forward(observation, self._state)
return tf.random.categorical(logits, num_samples=1).numpy().squeeze()
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Receives a transition and performs a learning update."""
self._buffer.append(timestep, action, new_timestep)
if self._buffer.full() or new_timestep.last():
trajectory = self._buffer.drain()
trajectory = tree.map_structure(tf.convert_to_tensor, trajectory)
self._rollout_initial_state = self._step(trajectory)
class PolicyValueRNN(snt.RNNCore):
"""A recurrent multi-layer perceptron with a value and a policy head."""
def __init__(self, hidden_sizes: Sequence[int], num_actions: int):
super().__init__(name='policy_value_net')
self._torso = snt.nets.MLP(hidden_sizes, activate_final=True, name='torso')
self._core = snt.LSTM(hidden_sizes[-1], name='rnn')
self._policy_head = snt.Linear(num_actions, name='policy_head')
self._value_head = snt.Linear(1, name='value_head')
def __call__(self, inputs: tf.Tensor, state: snt.LSTMState):
flat_inputs = snt.Flatten()(inputs)
embedding = self._torso(flat_inputs)
lstm_output, next_state = self._core(embedding, state)
embedding += tf.nn.relu(lstm_output) # Note: skip connection.
logits = self._policy_head(embedding)
value = self._value_head(embedding)
return (logits, value), next_state
def initial_state(self, *args, **kwargs) -> snt.LSTMState:
"""Creates the core initial state."""
return self._core.initial_state(*args, **kwargs)
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray) -> base.Agent:
"""Initialize a DQN agent with default parameters."""
network = PolicyValueRNN(
hidden_sizes=[64, 64],
num_actions=action_spec.num_values,
)
return ActorCriticRNN(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
optimizer=snt.optimizers.Adam(learning_rate=3e-3),
max_sequence_length=32,
td_lambda=0.9,
discount=0.99,
seed=42,
)
|
bsuite-master
|
bsuite/baselines/tf/actor_critic_rnn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import actor_critic_rnn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = actor_critic_rnn.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/tf/actor_critic_rnn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Read functionality for local csv-based experiments."""
import glob
import os
from typing import List, Tuple
from bsuite import sweep
from bsuite.logging import csv_logging
from bsuite.logging import logging_utils
import pandas as pd
def load_one_result_set(results_dir: str) -> pd.DataFrame:
"""Returns a pandas DataFrame of bsuite results stored in results_dir."""
data = []
for file_path in glob.glob(os.path.join(results_dir, '*.csv')):
_, name = os.path.split(file_path)
# Rough and ready error-checking for only bsuite csv files.
if not name.startswith(csv_logging.BSUITE_PREFIX):
print('Warning - we recommend you use a fresh folder for bsuite results.')
continue
# Then we will assume that the file is actually a bsuite file
df = pd.read_csv(file_path)
file_bsuite_id = name.strip('.csv').split(csv_logging.INITIAL_SEPARATOR)[1]
bsuite_id = file_bsuite_id.replace(csv_logging.SAFE_SEPARATOR,
sweep.SEPARATOR)
df['bsuite_id'] = bsuite_id
df['results_dir'] = results_dir
data.append(df)
df = pd.concat(data, sort=False)
return logging_utils.join_metadata(df)
def load_bsuite(
results_dirs: logging_utils.PathCollection
) -> Tuple[pd.DataFrame, List[str]]:
"""Returns a pandas DataFrame of bsuite results."""
return logging_utils.load_multiple_runs(
path_collection=results_dirs,
single_load_fn=load_one_result_set,
)
|
bsuite-master
|
bsuite/logging/csv_load.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple logger that pretty-prints to terminal."""
import logging as std_logging
import numbers
from typing import Any, Mapping
from absl import logging
from bsuite import environments
from bsuite.logging import base
from bsuite.utils import wrappers
import dm_env
def wrap_environment(env: environments.Environment,
pretty_print: bool = True,
log_every: bool = False,
log_by_step: bool = False) -> dm_env.Environment:
"""Returns a wrapped environment that logs to terminal."""
# Set logging up to show up in STDERR.
std_logging.getLogger().addHandler(logging.PythonHandler())
logger = Logger(pretty_print, absl_logging=True)
return wrappers.Logging(
env, logger, log_by_step=log_by_step, log_every=log_every)
class Logger(base.Logger):
"""Writes data to terminal."""
def __init__(self, pretty_print: bool = True, absl_logging: bool = False):
self._pretty_print = pretty_print
self._print_fn = logging.info if absl_logging else print
def write(self, data: Mapping[str, Any]):
"""Writes to terminal, pretty-printing the results."""
if self._pretty_print:
data = pretty_dict(data)
self._print_fn(data)
def pretty_dict(data: Mapping[str, Any]) -> str:
"""Prettifies a dictionary into a string as `k1 = v1 | ... | kn = vn`."""
msg = []
for key in sorted(data):
value = value_format(data[key])
msg_pair = f'{key} = {value}'
msg.append(msg_pair)
return ' | '.join(msg)
def value_format(value: Any) -> str:
"""Convenience function for string formatting."""
if isinstance(value, numbers.Integral):
return str(value)
if isinstance(value, numbers.Number):
return f'{value:0.4f}'
return str(value)
|
bsuite-master
|
bsuite/logging/terminal_logging.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.utils.csv_load."""
import random
import sys
from absl import flags
from absl.testing import absltest
from bsuite.logging import csv_load
from bsuite.logging import csv_logging
FLAGS = flags.FLAGS
_NUM_WRITES = 10
def generate_results(bsuite_id, results_dir):
logger = csv_logging.Logger(bsuite_id, results_dir)
steps_per_episode = 7
total_return = 0.0
for i in range(_NUM_WRITES):
episode_return = random.random()
total_return += episode_return
data = dict(
steps=i * steps_per_episode,
episode=i,
total_return=total_return,
episode_len=steps_per_episode,
episode_return=episode_return,
extra=42,
)
logger.write(data)
class CsvLoadTest(absltest.TestCase):
def test_logger(self):
try:
flags.FLAGS.test_tmpdir
except flags.UnparsedFlagAccessError:
# Need to initialize flags when running `pytest`.
flags.FLAGS(sys.argv)
results_dir = self.create_tempdir().full_path
generate_results(bsuite_id='catch/0', results_dir=results_dir)
generate_results(bsuite_id='catch/1', results_dir=results_dir)
df = csv_load.load_one_result_set(results_dir=results_dir)
self.assertLen(df, _NUM_WRITES * 2)
# Check that sweep metadata is joined correctly.
# Catch includes a 'seed' parameter, so we expect to see it here.
self.assertIn('seed', df.columns)
self.assertIn('bsuite_id', df.columns)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/logging/csv_load_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Logging functionality for CSV-based experiments."""
import os
from typing import Any, Mapping
from bsuite import environments
from bsuite import sweep
from bsuite.logging import base
from bsuite.utils import wrappers
import dm_env
import pandas as pd
SAFE_SEPARATOR = '-'
INITIAL_SEPARATOR = '_-_'
BSUITE_PREFIX = 'bsuite_id' + INITIAL_SEPARATOR
def wrap_environment(env: environments.Environment,
bsuite_id: str,
results_dir: str,
overwrite: bool = False,
log_by_step: bool = False) -> dm_env.Environment:
"""Returns a wrapped environment that logs using CSV."""
logger = Logger(bsuite_id, results_dir, overwrite)
return wrappers.Logging(env, logger, log_by_step=log_by_step)
class Logger(base.Logger):
"""Saves data to a CSV file via Pandas.
In this simplified logger, each bsuite_id logs to a unique CSV index by
bsuite_id. These are saved to a single results_dir by experiment.
We strongly suggest that you use a *fresh* folder for each bsuite run.
The write method rewrites the entire CSV file on each call. This is not
intended to be an optimized example. However, writes are infrequent due to
bsuite's logarithmically-spaced logging.
This logger, along with the corresponding load functionality, serves as a
simple, minimal example for users who need to implement logging to a different
storage system.
"""
def __init__(self,
bsuite_id: str,
results_dir: str = '/tmp/bsuite',
overwrite: bool = False):
"""Initializes a new CSV logger."""
if not os.path.exists(results_dir):
try:
os.makedirs(results_dir)
except OSError: # concurrent processes can makedir at same time
pass
# The default '/' symbol is dangerous for file systems!
safe_bsuite_id = bsuite_id.replace(sweep.SEPARATOR, SAFE_SEPARATOR)
filename = f'{BSUITE_PREFIX}{safe_bsuite_id}.csv'
save_path = os.path.join(results_dir, filename)
if os.path.exists(save_path) and not overwrite:
raise ValueError(
f'File {save_path} already exists. Specify a different '
'directory, or set overwrite=True to overwrite existing data.')
self._data = []
self._save_path = save_path
def write(self, data: Mapping[str, Any]):
"""Adds a row to the internal list of data and saves to CSV."""
self._data.append(data)
df = pd.DataFrame(self._data)
df.to_csv(self._save_path, index=False)
|
bsuite-master
|
bsuite/logging/csv_logging.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/logging/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An abstract base class for loggers."""
import abc
from typing import Any, Mapping
class Logger(abc.ABC):
"""A logger has a `write` method."""
@abc.abstractmethod
def write(self, data: Mapping[str, Any]):
"""Writes `data` to destination (file, terminal, database, etc)."""
|
bsuite-master
|
bsuite/logging/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Read functionality for local csv-based experiments."""
from collections import abc
import copy
from typing import Any, Callable, List, Mapping, Sequence, Tuple, Union
from bsuite import sweep
import pandas as pd
import six
def join_metadata(df: pd.DataFrame) -> pd.DataFrame:
"""Returns a DataFrame with bsuite sweep metadata joined on bsuite_id."""
# Assume we are loading in the settings via sweep.py, without any changes.
assert 'bsuite_id' in df.columns
metadata = copy.deepcopy(sweep.SETTINGS) # be careful not to change this
data = []
for bsuite_id, env_kwargs in metadata.items():
# Add environment and id to dataframe.
bsuite_env = bsuite_id.split(sweep.SEPARATOR)[0]
bsuite_params = {
'bsuite_id': bsuite_id,
'bsuite_env': bsuite_env,
}
bsuite_params.update(env_kwargs)
data.append(bsuite_params)
bsuite_df = pd.DataFrame(data)
return pd.merge(df, bsuite_df, on='bsuite_id')
PathCollection = Union[str, Sequence[str], Mapping[str, Any]]
SingleLoadFn = Callable[[str], pd.DataFrame]
def load_multiple_runs(
path_collection: PathCollection,
single_load_fn: SingleLoadFn) -> Tuple[pd.DataFrame, List[str]]:
"""Returns a pandas DataFrame of bsuite results.
Args:
path_collection: Paths to one or more locations of bsuite results.
be given as one of: - A sequence (e.g. list, tuple) of paths - a mapping
from agent or algorithm name to path. - A string containing a single
path.
single_load_fn: A function that takes in a single path (as specified in the
path_collection and loads the bsuite results for one agent run).
Returns:
A tuple of:
- A pandas DataFrame containing the bsuite results.
- A list of column names to group by, used in ipython notebook provided.
When grouping by these columns, each group corresponds to one set of
results.
"""
# Convert any inputs to dictionary format.
if isinstance(path_collection, six.string_types):
path_collection = {path_collection: path_collection}
if not isinstance(path_collection, abc.Mapping):
path_collection = {path: path for path in path_collection}
# Loop through multiple bsuite runs, and apply single_load_fn to each.
data = []
for name, path in path_collection.items():
df = single_load_fn(path)
df['agent_name'] = name
data.append(df)
sweep_vars = ['agent_name']
return pd.concat(data, sort=False), sweep_vars
|
bsuite-master
|
bsuite/logging/logging_utils.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
from setuptools import find_namespace_packages
from setuptools import setup
def _get_version():
with open('haiku/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g) # pylint: disable=exec-used
return g['__version__']
raise ValueError('`__version__` not defined in `haiku/__init__.py`')
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
_VERSION = _get_version()
setup(
name='dm-haiku',
version=_VERSION,
url='https://github.com/deepmind/dm-haiku',
license='Apache 2.0',
author='DeepMind',
description='Haiku is a library for building neural networks in JAX.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='haiku-dev-os@google.com',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py', 'examples']),
install_requires=_parse_requirements('requirements.txt'),
extras_require={'jax': _parse_requirements('requirements-jax.txt')},
tests_require=_parse_requirements('requirements-test.txt'),
requires_python='>=3.9',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
|
dm-haiku-main
|
setup.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Configs."""
from haiku._src.config import context
from haiku._src.config import set # pylint: disable=redefined-builtin
__all__ = (
"context",
"set",
)
|
dm-haiku-main
|
haiku/config.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Utilities for mixed precision."""
from haiku._src.mixed_precision import clear_policy
from haiku._src.mixed_precision import current_policy
from haiku._src.mixed_precision import get_policy
from haiku._src.mixed_precision import push_policy
from haiku._src.mixed_precision import set_policy
__all__ = (
'clear_policy',
'current_policy',
'get_policy',
'push_policy',
'set_policy',
)
|
dm-haiku-main
|
haiku/mixed_precision.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Haiku is a neural network library for JAX."""
from haiku import config
from haiku import data_structures
from haiku import experimental
from haiku import initializers
from haiku import mixed_precision
from haiku import nets
from haiku import pad
from haiku import testing
from haiku._src.attention import MultiHeadAttention
from haiku._src.base import current_name
from haiku._src.base import custom_creator
from haiku._src.base import custom_getter
from haiku._src.base import custom_setter
from haiku._src.base import DO_NOT_STORE
from haiku._src.base import get_current_state
from haiku._src.base import get_initial_state
from haiku._src.base import get_parameter
from haiku._src.base import get_params
from haiku._src.base import get_state
from haiku._src.base import GetterContext
from haiku._src.base import maybe_get_rng_sequence_state
from haiku._src.base import maybe_next_rng_key
from haiku._src.base import next_rng_key
from haiku._src.base import next_rng_keys
from haiku._src.base import PRNGSequence
from haiku._src.base import replace_rng_sequence_state
from haiku._src.base import reserve_rng_keys
from haiku._src.base import set_state
from haiku._src.base import SetterContext
from haiku._src.base import with_rng
from haiku._src.basic import BatchApply
from haiku._src.basic import dropout
from haiku._src.basic import expand_apply
from haiku._src.basic import Linear
from haiku._src.basic import multinomial
from haiku._src.basic import one_hot
from haiku._src.basic import Sequential
from haiku._src.basic import to_module
from haiku._src.batch_norm import BatchNorm
from haiku._src.bias import Bias
from haiku._src.conv import Conv1D
from haiku._src.conv import Conv1DTranspose
from haiku._src.conv import Conv2D
from haiku._src.conv import Conv2DTranspose
from haiku._src.conv import Conv3D
from haiku._src.conv import Conv3DTranspose
from haiku._src.conv import ConvND
from haiku._src.conv import ConvNDTranspose
from haiku._src.deferred import Deferred
from haiku._src.depthwise_conv import DepthwiseConv1D
from haiku._src.depthwise_conv import DepthwiseConv2D
from haiku._src.depthwise_conv import DepthwiseConv3D
from haiku._src.depthwise_conv import SeparableDepthwiseConv2D
from haiku._src.dot import to_dot
from haiku._src.embed import Embed
from haiku._src.embed import EmbedLookupStyle
from haiku._src.group_norm import GroupNorm
from haiku._src.layer_norm import InstanceNorm
from haiku._src.layer_norm import LayerNorm
from haiku._src.layer_stack import layer_stack
from haiku._src.layer_stack import LayerStackTransparencyMapping
from haiku._src.lift import lift
from haiku._src.lift import lift_with_state
from haiku._src.lift import LiftWithStateUpdater
from haiku._src.lift import transparent_lift
from haiku._src.lift import transparent_lift_with_state
from haiku._src.module import force_name
from haiku._src.module import intercept_methods
from haiku._src.module import MethodContext
from haiku._src.module import Module
from haiku._src.module import name_like
from haiku._src.module import name_scope
from haiku._src.module import transparent
from haiku._src.moving_averages import EMAParamsTree
from haiku._src.moving_averages import ExponentialMovingAverage
from haiku._src.multi_transform import multi_transform
from haiku._src.multi_transform import multi_transform_with_state
from haiku._src.multi_transform import MultiTransformed
from haiku._src.multi_transform import MultiTransformedWithState
from haiku._src.multi_transform import without_apply_rng
from haiku._src.pool import avg_pool
from haiku._src.pool import AvgPool
from haiku._src.pool import max_pool
from haiku._src.pool import MaxPool
from haiku._src.recurrent import Conv1DLSTM
from haiku._src.recurrent import Conv2DLSTM
from haiku._src.recurrent import Conv3DLSTM
from haiku._src.recurrent import deep_rnn_with_skip_connections
from haiku._src.recurrent import DeepRNN
from haiku._src.recurrent import dynamic_unroll
from haiku._src.recurrent import GRU
from haiku._src.recurrent import IdentityCore
from haiku._src.recurrent import LSTM
from haiku._src.recurrent import LSTMState
from haiku._src.recurrent import ResetCore
from haiku._src.recurrent import RNNCore
from haiku._src.recurrent import static_unroll
from haiku._src.recurrent import VanillaRNN
from haiku._src.reshape import Flatten
from haiku._src.reshape import Reshape
from haiku._src.rms_norm import RMSNorm
from haiku._src.spectral_norm import SNParamsTree
from haiku._src.spectral_norm import SpectralNorm
from haiku._src.stateful import cond
from haiku._src.stateful import eval_shape
from haiku._src.stateful import fori_loop
from haiku._src.stateful import grad
from haiku._src.stateful import map # pylint: disable=redefined-builtin
from haiku._src.stateful import remat
from haiku._src.stateful import scan
from haiku._src.stateful import switch
from haiku._src.stateful import value_and_grad
from haiku._src.stateful import vmap
from haiku._src.stateful import while_loop
from haiku._src.transform import running_init
from haiku._src.transform import transform
from haiku._src.transform import transform_with_state
from haiku._src.transform import Transformed
from haiku._src.transform import TransformedWithState
from haiku._src.transform import with_empty_state
from haiku._src.transform import without_state
from haiku._src.typing import ModuleProtocol
from haiku._src.typing import MutableParams
from haiku._src.typing import MutableState
from haiku._src.typing import Params
from haiku._src.typing import State
from haiku._src.typing import SupportsCall
from haiku._src.utils import get_channel_index
__version__ = "0.0.11.dev"
__all__ = (
"AvgPool",
"BatchApply",
"BatchNorm",
"Bias",
"Conv1D",
"Conv1DLSTM",
"Conv1DTranspose",
"Conv2D",
"Conv2DLSTM",
"Conv2DTranspose",
"Conv3D",
"Conv3DLSTM",
"Conv3DTranspose",
"ConvND",
"ConvNDTranspose",
"DeepRNN",
"Deferred",
"DepthwiseConv1D",
"DepthwiseConv2D",
"DepthwiseConv3D",
"DO_NOT_STORE",
"EMAParamsTree",
"Embed",
"EmbedLookupStyle",
"ExponentialMovingAverage",
"Flatten",
"GetterContext",
"GRU",
"GroupNorm",
"IdentityCore",
"InstanceNorm",
"LayerStackTransparencyMapping",
"LiftWithStateUpdater",
"LSTM",
"LSTMState",
"LayerNorm",
"Linear",
"MaxPool",
"MethodContext",
"Module",
"ModuleProtocol",
"MultiHeadAttention",
"MultiTransformed",
"MultiTransformedWithState",
"MutableParams",
"MutableState",
"PRNGSequence",
"Params",
"RNNCore",
"ResetCore",
"Reshape",
"RMSNorm",
"SNParamsTree",
"SetterContext",
"Sequential",
"SpectralNorm",
"State",
"SupportsCall",
"Transformed",
"TransformedWithState",
"VanillaRNN",
"avg_pool",
"cond",
"config",
"eval_shape",
"current_name",
"custom_creator",
"custom_getter",
"custom_setter",
"data_structures",
"deep_rnn_with_skip_connections",
"dropout",
"dynamic_unroll",
"expand_apply",
"fori_loop",
"force_name",
"get_channel_index",
"get_current_state",
"get_initial_state",
"get_params",
"get_parameter",
"get_state",
"grad",
"initializers",
"intercept_methods",
"layer_stack",
"lift",
"lift_with_state",
"map",
"max_pool",
"maybe_get_rng_sequence_state",
"maybe_next_rng_key",
"mixed_precision",
"multi_transform",
"multi_transform_with_state",
"multinomial",
"name_like",
"name_scope",
"nets",
"next_rng_key",
"next_rng_keys",
"one_hot",
"pad",
"remat",
"replace_rng_sequence_state",
"reserve_rng_keys",
"running_init",
"scan",
"set_state",
"static_unroll",
"switch",
"testing",
"to_dot",
"to_module",
"transform",
"transform_with_state",
"transparent",
"transparent_lift",
"transparent_lift_with_state",
"value_and_grad",
"vmap",
"while_loop",
"with_empty_state",
"with_rng",
"without_apply_rng",
"without_state",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Haiku public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
try:
del _src # pylint: disable=undefined-variable
except NameError:
pass
|
dm-haiku-main
|
haiku/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Paddings."""
from haiku._src.pad import causal
from haiku._src.pad import create_from_padfn
from haiku._src.pad import create_from_tuple
from haiku._src.pad import full
from haiku._src.pad import is_padfn
from haiku._src.pad import PadFn
from haiku._src.pad import reverse_causal
from haiku._src.pad import same
from haiku._src.pad import valid
create = create_from_padfn # Legacy alias.
__all__ = (
"PadFn",
"causal",
"create",
"create_from_padfn",
"create_from_tuple",
"full",
"is_padfn",
"reverse_causal",
"same",
"valid",
)
|
dm-haiku-main
|
haiku/pad.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Haiku is a neural network library for JAX."""
from haiku._src.initializers import Constant
from haiku._src.initializers import Identity
from haiku._src.initializers import Orthogonal
from haiku._src.initializers import RandomNormal
from haiku._src.initializers import RandomUniform
from haiku._src.initializers import TruncatedNormal
from haiku._src.initializers import UniformScaling
from haiku._src.initializers import VarianceScaling
from haiku._src.typing import Initializer
__all__ = (
"Constant",
"Orthogonal",
"Identity",
"Initializer",
"RandomNormal",
"RandomUniform",
"TruncatedNormal",
"UniformScaling",
"VarianceScaling",
)
|
dm-haiku-main
|
haiku/initializers.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Public Haiku data structures."""
from haiku._src.data_structures import to_haiku_dict
from haiku._src.data_structures import to_immutable_dict
from haiku._src.data_structures import to_mutable_dict
from haiku._src.filtering import filter # pylint: disable=redefined-builtin
from haiku._src.filtering import is_subset
from haiku._src.filtering import map # pylint: disable=redefined-builtin
from haiku._src.filtering import merge
from haiku._src.filtering import partition
from haiku._src.filtering import partition_n
from haiku._src.filtering import traverse
from haiku._src.utils import tree_bytes
from haiku._src.utils import tree_size
__all__ = (
"is_subset",
"filter",
"map",
"merge",
"partition",
"partition_n",
"to_haiku_dict",
"to_mutable_dict",
"to_immutable_dict",
"traverse",
"tree_bytes",
"tree_size",
)
|
dm-haiku-main
|
haiku/data_structures.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Type hints."""
from haiku._src.typing import MutableParams
from haiku._src.typing import MutableState
from haiku._src.typing import Params
from haiku._src.typing import State
__all__ = (
"MutableParams",
"MutableState",
"Params",
"State",
)
|
dm-haiku-main
|
haiku/typing.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Test utilities for Haiku."""
from haiku._src.test_utils import transform_and_run
__all__ = (
"transform_and_run",
)
|
dm-haiku-main
|
haiku/testing.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Common networks built using Haiku modules."""
from haiku._src.nets.mlp import MLP
from haiku._src.nets.mobilenetv1 import MobileNetV1
from haiku._src.nets.resnet import ResNet
from haiku._src.nets.resnet import ResNet101
from haiku._src.nets.resnet import ResNet152
from haiku._src.nets.resnet import ResNet18
from haiku._src.nets.resnet import ResNet200
from haiku._src.nets.resnet import ResNet34
from haiku._src.nets.resnet import ResNet50
from haiku._src.nets.vqvae import VectorQuantizer
from haiku._src.nets.vqvae import VectorQuantizerEMA
__all__ = (
"ResNet",
"ResNet18",
"ResNet34",
"ResNet50",
"ResNet101",
"ResNet152",
"ResNet200",
"MLP",
"MobileNetV1",
"VectorQuantizer",
"VectorQuantizerEMA",
)
|
dm-haiku-main
|
haiku/nets.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Tools for understanding JAX + Haiku programs."""
from haiku._src.jaxpr_info import as_html
from haiku._src.jaxpr_info import as_html_page
from haiku._src.jaxpr_info import css
from haiku._src.jaxpr_info import Expression
from haiku._src.jaxpr_info import format_module
from haiku._src.jaxpr_info import js
from haiku._src.jaxpr_info import make_model_info
from haiku._src.jaxpr_info import Module
__all__ = (
"as_html",
"as_html_page",
"css",
"Expression",
"format_module",
"js",
"make_model_info",
"Module",
)
|
dm-haiku-main
|
haiku/experimental/jaxpr_info.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Experimental features developed by the Haiku core team.
Features may be removed or modified at any time.
"""
from haiku._src.base import current_name
from haiku._src.base import custom_creator
from haiku._src.base import custom_getter
from haiku._src.base import DO_NOT_STORE
from haiku._src.base import get_current_state
from haiku._src.base import get_initial_state
from haiku._src.base import get_params
from haiku._src.base import GetterContext
from haiku._src.base import maybe_get_rng_sequence_state
from haiku._src.base import replace_rng_sequence_state
from haiku._src.config import check_jax_usage
from haiku._src.config import module_auto_repr
from haiku._src.config import rng_reserve_size
from haiku._src.dot import abstract_to_dot
from haiku._src.dot import to_dot
from haiku._src.eval_shape import fast_eval_shape
from haiku._src.layer_stack import layer_stack
from haiku._src.layer_stack import LayerStackTransparencyMapping
from haiku._src.lift import lift
from haiku._src.lift import lift_with_state
from haiku._src.lift import LiftWithStateUpdater
from haiku._src.lift import transparent_lift
from haiku._src.lift import transparent_lift_with_state
from haiku._src.module import force_name
from haiku._src.module import intercept_methods
from haiku._src.module import MethodContext
from haiku._src.module import name_like
from haiku._src.module import name_scope
from haiku._src.random import optimize_rng_use
from haiku._src.summarise import ArraySpec
from haiku._src.summarise import eval_summary
from haiku._src.summarise import MethodInvocation
from haiku._src.summarise import ModuleDetails
from haiku._src.summarise import tabulate
from haiku.experimental import flax
from haiku.experimental import jaxpr_info
# TODO(tomhennigan): Remove deprecated alias.
ParamContext = GetterContext
__all__ = (
"abstract_to_dot",
"ArraySpec",
"eval_summary",
"check_jax_usage",
"current_name",
"custom_creator",
"custom_getter",
"DO_NOT_STORE",
"force_name",
"intercept_methods",
"flax",
"get_current_state",
"get_initial_state",
"get_params",
"jaxpr_info",
"layer_stack",
"LayerStackTransparencyMapping",
"lift",
"lift_with_state",
"LiftWithStateUpdater",
"maybe_get_rng_sequence_state",
"module_auto_repr",
"MethodContext",
"MethodInvocation",
"ModuleDetails",
"name_like",
"name_scope",
"optimize_rng_use",
"GetterContext",
"ParamContext",
"replace_rng_sequence_state",
"rng_reserve_size",
"tabulate",
"to_dot",
"transparent_lift",
"transparent_lift_with_state",
)
|
dm-haiku-main
|
haiku/experimental/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-importing-member
"""Tools for working with Haiku and Flax."""
from haiku._src.flax.flax_module import Module
from haiku._src.flax.transform_flax import lift
from haiku._src.flax.utils import flatten_flax_to_haiku
__all__ = (
'flatten_flax_to_haiku',
'Module',
'lift',
)
|
dm-haiku-main
|
haiku/experimental/flax.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.random."""
from collections.abc import Sequence
import functools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import random
from haiku._src import transform
import jax
from jax import prng
import jax.numpy as jnp
import numpy as np
class RandomTest(absltest.TestCase):
def test_optimize_rng_splitting(self):
def f():
k1 = base.next_rng_key()
k2 = base.next_rng_key()
return k1, k2
key = jax.random.PRNGKey(42)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-5)
# With optimize_rng_use the keys returned should be equal to split(n).
f_opt = transform.transform(random.optimize_rng_use(f))
jax.tree_util.tree_map(
assert_allclose,
f_opt.apply({}, key),
tuple(jax.random.split(key, 3))[1:],
)
# Without optimize_rng_use the keys should be equivalent to splitting in a
# loop.
f = transform.transform(f)
jax.tree_util.tree_map(
assert_allclose, f.apply({}, key), tuple(split_for_n(key, 2))
)
def test_rbg_default_impl(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)
self.assertEqual(key.shape, (4,))
_, apply = transform.transform(base.next_rng_key)
out_key = apply({}, key)
self.assertEqual(out_key.shape, (4,))
def test_rbg_default_impl_invalid_key_shape(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)[0:2]
self.assertEqual(key.shape, (2,))
init, _ = transform.transform(base.next_rng_key)
with self.assertRaisesRegex(
ValueError, "Init must be called with an RNG"
):
init(key)
def test_invalid_key(self):
init, _ = transform.transform(base.next_rng_key)
with self.assertRaisesRegex(ValueError, "Init must be called with an RNG"):
init([1, 2])
class CustomRNGTest(parameterized.TestCase):
def test_non_custom_key(self):
init, _ = transform.transform(base.next_rng_key)
init(jax.random.PRNGKey(42)) # does not crash
@parameterized.parameters(False, True)
def test_custom_key(self, do_jit):
if do_jit:
self.skipTest("init returns nothing, so compilation may DCE it")
count = 0
def count_splits(_, num):
nonlocal count
count += 1
num = tuple(num) if isinstance(num, Sequence) else (num,)
return jnp.zeros((*num, 13), np.uint32)
differently_shaped_prng_impl = prng.PRNGImpl(
# Testing a different key shape to make sure it's accepted by Haiku
key_shape=(13,),
seed=lambda _: jnp.zeros((13,), np.uint32),
split=count_splits,
random_bits=lambda *_, data: jnp.zeros(data, np.uint32),
fold_in=lambda key, _: key,
)
init, _ = transform.transform(base.next_rng_key)
if do_jit:
init = jax.jit(init)
key = prng.seed_with_impl(differently_shaped_prng_impl, 42)
init(key)
self.assertEqual(count, 1)
def split_for_n(key, n):
for _ in range(n):
key, subkey = jax.random.split(key)
yield subkey
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/random_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.base."""
import functools
import itertools as it
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import base
from haiku._src import config
from haiku._src import test_utils
import jax
import jax.numpy as jnp
import numpy as np
# TODO(tomhennigan) Improve test coverage.
custom_state_creator = functools.partial(
base.custom_creator, params=False, state=True)
custom_state_getter = functools.partial(
base.custom_getter, params=False, state=True)
identity_carry = lambda f: lambda carry, x: (carry, f(x))
ignore_index = lambda f: lambda i, x: f(x)
def with_rng_example():
with base.with_rng(jax.random.PRNGKey(42)):
pass
def replace_rng_sequence_state_example():
base.replace_rng_sequence_state((jax.random.PRNGKey(42), tuple()))
# Methods in Haiku that mutate internal state.
SIDE_EFFECTING_FUNCTIONS = (
("get_parameter", lambda: base.get_parameter("w", [], init=jnp.zeros)),
("get_state", lambda: base.get_state("w", [], init=jnp.zeros)),
("set_state", lambda: base.set_state("w", 1)),
("next_rng_key", base.next_rng_key),
("next_rng_keys", lambda: base.next_rng_keys(2)),
("reserve_rng_keys", lambda: base.reserve_rng_keys(2)),
("with_rng", with_rng_example),
(
"replace_rng_sequence_state",
replace_rng_sequence_state_example,
),
)
# JAX transforms and control flow that need to be aware of Haiku internal
# state to operate unsurprisingly.
# pylint: disable=g-long-lambda
JAX_PURE_EXPECTING_FNS = (
# Just-in-time compilation.
("jit", jax.jit),
("make_jaxpr", jax.make_jaxpr),
("eval_shape", lambda f: (lambda x: jax.eval_shape(f, x))),
# Parallelization.
# TODO(tomhennigan): Add missing features (e.g. pjit,xmap).
("pmap", lambda f: jax.pmap(f, "i")),
# Vectorization.
("vmap", jax.vmap),
# Control flow.
# TODO(tomhennigan): Enable for associative_scan.
# ("associative_scan", lambda f:
# (lambda x: jax.lax.associative_scan(
# lambda a, b: [f(a + b), a + b][-1], jnp.stack([x, x, x, x])))),
("cond", lambda f: (lambda x: jax.lax.cond(True, f, f, x))),
(
"fori_loop",
lambda f:
(lambda x: jax.lax.fori_loop(0, 1, ignore_index(f), x))),
("map", lambda f: (lambda x: jax.lax.map(f, x))),
("scan", lambda f: (lambda x: jax.lax.scan(identity_carry(f), None, x))),
("switch", lambda f: (lambda x: jax.lax.switch(0, [f, f], x))),
("while_loop", lambda f: (lambda x: jax.lax.while_loop(
lambda xs: xs[0] == 0, lambda xs: [1, f(xs[1])], (0, x)))),
# Automatic differentiation.
# TODO(tomhennigan): Add missing features (e.g. custom_vjp, custom_jvp).
("grad", lambda f: jax.grad(lambda x: f(x).sum())),
("value_and_grad", lambda f: jax.value_and_grad(lambda x: f(x).sum())),
("checkpoint", jax.checkpoint), # aka. remat
)
# pylint: enable=g-long-lambda
class BaseTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_parameter_reuse(self):
w1 = base.get_parameter("w", [], init=jnp.zeros)
w2 = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(w1, w2)
def test_params(self):
with base.new_context() as ctx:
w = base.get_parameter("w", [], init=jnp.zeros)
self.assertEqual(ctx.collect_params(), {"~": {"w": w}})
@test_utils.transform_and_run
def test_naked_get_parameter(self):
w1 = base.get_parameter("w", [], init=jnp.zeros)
w2 = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(w1, w2)
def test_naked_parameter_in_tilde_collection(self):
with base.new_context() as ctx:
w1 = base.get_parameter("w1", [], init=jnp.zeros)
w2 = base.get_parameter("w2", [], init=jnp.ones)
self.assertIsNot(w1, w2)
self.assertEqual(ctx.collect_params(), {"~": {"w1": w1, "w2": w2}})
@parameterized.parameters(({},), ({"~": {}},))
def test_parameter_in_immutable_ctx(self, params):
with base.new_context(params=params):
with self.assertRaisesRegex(
ValueError, "parameters must be created as part of `init`"):
base.get_parameter("w", [], init=jnp.zeros)
def test_get_parameter_rng_exception(self):
with base.new_context():
with self.assertRaisesRegex(
base.MissingRNGError, "pass a non-None PRNGKey to init"
):
base.get_parameter(
"w", [], init=lambda shape, dtype: base.next_rng_key()
)
def test_get_parameter_wrong_shape(self):
with base.new_context():
with self.assertRaisesRegex(ValueError, "does not match shape"):
base.get_parameter("w", (1,), init=jnp.zeros)
base.get_parameter("w", (2,), init=jnp.zeros)
def test_get_parameter_no_init(self):
with base.new_context():
with self.assertRaisesRegex(ValueError, "Initializer must be specified."):
base.get_parameter("w", [])
def test_get_parameter_no_init_during_init_second_call(self):
with base.new_context():
w = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(base.get_parameter("w", []), w)
def test_get_parameter_no_init_during_apply(self):
w = jnp.zeros([])
with base.new_context(params={"~": {"w": w}}):
self.assertIs(base.get_parameter("w", []), w)
@parameterized.parameters(base.next_rng_key, lambda: base.next_rng_keys(1))
def test_rng_no_transform(self, f):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
f()
@test_utils.transform_and_run
def test_rng(self):
a = base.next_rng_key()
b = base.next_rng_key()
self.assertIsNot(a, b)
@test_utils.transform_and_run
def test_rngs(self):
a, b = base.next_rng_keys(2)
c, d = base.next_rng_keys(2)
for l, r in it.permutations((a, b, c, d), 2):
self.assertIsNot(l, r)
@test_utils.transform_and_run(seed=None)
def test_no_rng(self):
with self.assertRaisesRegex(ValueError, "must pass a non-None PRNGKey"):
base.next_rng_key()
def test_invalid_rng(self):
with self.assertRaisesRegex(ValueError, "not a JAX PRNGKey"):
base.new_context(rng="nonsense") # type: ignore
def test_invalid_rng_none_ignored(self):
with base.new_context(rng=None):
pass
def test_maybe_rng_no_transform(self):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
base.maybe_next_rng_key()
@test_utils.transform_and_run(seed=None)
def test_maybe_no_rng(self):
self.assertIsNone(base.maybe_next_rng_key())
def test_maybe_rng_vs_not(self):
"""If we have an rng, then next_rng_key() == maybe_next_rng_key()."""
rngs = []
maybes = []
@test_utils.transform_and_run
def three():
for _ in range(3):
rngs.append(base.next_rng_key())
@test_utils.transform_and_run
def maybe_three():
for _ in range(3):
maybes.append(base.maybe_next_rng_key())
three()
maybe_three()
self.assertLen(rngs, 6)
self.assertTrue(jnp.all(jnp.array(rngs) == jnp.array(maybes)))
def test_maybe_get_rng_seq_state_no_transform(self):
with self.assertRaisesRegex(
ValueError, "must be used as part of an `hk.transform`"
):
base.maybe_get_rng_sequence_state()
@test_utils.transform_and_run(seed=None)
def test_maybe_get_rng_seq_state_no_rng(self):
self.assertIsNone(base.maybe_get_rng_sequence_state())
def test_maybe_get_rng_seq_state_vs_next_rng(self):
rngs_next = []
rngs_state = []
@test_utils.transform_and_run
def next_rng_three():
for _ in range(3):
rngs_next.append(base.next_rng_key())
@test_utils.transform_and_run
def get_state_three():
rng_state = base.maybe_get_rng_sequence_state()
for _ in range(3):
seq = hk.PRNGSequence(rng_state)
rng = next(seq)
rng_state = seq.internal_state
rngs_state.append(rng)
next_rng_three()
get_state_three()
self.assertLen(rngs_next, 6)
self.assertTrue(jnp.all(jnp.array(rngs_next) == jnp.array(rngs_state)))
def test_replace_rng_seq_state_no_transform(self):
with self.assertRaisesRegex(
ValueError, "must be used as part of an `hk.transform`"
):
base.replace_rng_sequence_state((jax.random.PRNGKey(42), tuple()))
@test_utils.transform_and_run(seed=None)
def test_replace_rng_seq_state_no_rng(self):
with self.assertRaisesRegex(
base.MissingRNGError,
"requires an RNG to be passed into the transformed function",
):
base.replace_rng_sequence_state((jax.random.PRNGKey(42), tuple()))
@test_utils.transform_and_run(seed=1)
def test_replace_then_get_rng_seq_state(self):
rng_state = (
jax.random.PRNGKey(123),
(jax.random.PRNGKey(234), jax.random.PRNGKey(345)),
)
base.replace_rng_sequence_state(rng_state)
self.assertEqual(base.maybe_get_rng_sequence_state(), rng_state)
def test_replace_get_rng_seq_state_vs_no_replace(self):
rngs_no_replace = []
rngs_replace = []
seed = 123
@test_utils.transform_and_run(seed=seed)
def no_replace_three():
for _ in range(3):
rngs_no_replace.append(base.next_rng_key())
@test_utils.transform_and_run(seed=1)
def replace_three():
if hk.running_init():
replace_seed = seed
else:
replace_seed = seed + 1
base.replace_rng_sequence_state(
(jax.random.PRNGKey(replace_seed), tuple())
)
for _ in range(3):
rngs_replace.append(base.next_rng_key())
no_replace_three()
replace_three()
self.assertLen(rngs_no_replace, 6)
self.assertTrue(
jnp.all(jnp.array(rngs_no_replace) == jnp.array(rngs_replace))
)
@parameterized.parameters(
(base.get_parameter, base.custom_creator, "collect_params"),
(base.get_state, custom_state_creator, "collect_state"))
def test_init_custom_creator(self, get_x, custom_x, collect_x):
def zeros_creator(next_creator, shape, dtype, init, context):
self.assertEqual(context.full_name, "~/w")
self.assertEqual(context.module_name, "~")
self.assertEqual(context.name, "w")
self.assertEqual(shape, [])
self.assertEqual(dtype, jnp.float32)
self.assertEqual(init, jnp.ones)
return next_creator(shape, dtype, jnp.zeros)
with base.new_context() as ctx:
with custom_x(zeros_creator):
get_x("w", [], init=jnp.ones)
self.assertEqual(getattr(ctx, collect_x)(), {"~": {"w": jnp.zeros([])}})
@parameterized.parameters((base.get_parameter, base.custom_creator),
(base.get_state, custom_state_creator))
def test_nested_creators(self, get_x, custom_x):
log = []
def logging_creator(log_msg):
def _logging_creator(next_creator, shape, dtype, init, context):
del context
log.append(log_msg)
return next_creator(shape, dtype, init)
return _logging_creator
with base.new_context():
with custom_x(logging_creator("a")), \
custom_x(logging_creator("b")), \
custom_x(logging_creator("c")):
get_x("w", [], init=jnp.ones)
self.assertEqual(log, ["a", "b", "c"])
@parameterized.parameters((base.get_parameter, base.custom_creator,
base.custom_getter, "collect_params"),
(base.get_state, custom_state_creator,
custom_state_getter, "collect_state"))
def test_original_dtype(self, get_x, custom_create_x, custom_get_x,
collect_x):
def dtype_cast_creator(next_creator, shape, dtype, init, context):
if context.original_dtype == jnp.bfloat16:
dtype = jnp.float32
return next_creator(shape, dtype, init)
def dtype_recast_getter(next_getter, value, context):
if context.original_dtype == jnp.bfloat16:
assert value.dtype == jnp.float32
value = value.astype(jnp.bfloat16)
return next_getter(value)
with base.new_context() as ctx:
with custom_create_x(dtype_cast_creator), \
custom_get_x(dtype_recast_getter):
value = get_x("w", [], jnp.bfloat16, jnp.ones)
orig_value = jax.tree_util.tree_leaves(getattr(ctx, collect_x)())[0]
assert value.dtype == jnp.bfloat16
assert orig_value.dtype == jnp.float32
@parameterized.parameters((base.get_parameter, base.custom_creator),
(base.get_state, custom_state_creator))
def test_original_shape(self, get_x, custom_x):
def new_shape_creator(next_creator, shape, dtype, init, context):
del shape
del context
new_shape = (1, 2, 3)
return next_creator(new_shape, dtype, init)
def original_shape_restorer(next_creator, shape, dtype, init, context):
assert shape == (1, 2, 3)
return next_creator(context.original_shape, dtype, init)
with base.new_context():
with custom_x(new_shape_creator):
with custom_x(original_shape_restorer):
value = get_x("w", [5], jnp.bfloat16, jnp.ones)
assert value.shape == (5,)
@parameterized.parameters(
(base.get_parameter, base.custom_getter, "collect_params"),
(base.get_state, custom_state_getter, "collect_state"))
def test_custom_getter_bf16(self, get_x, custom_x, collect_x):
def bf16_getter(next_getter, value, context):
del context
if value.dtype == jnp.float32:
value = value.astype(jnp.bfloat16)
return next_getter(value)
with base.new_context() as ctx:
with custom_x(bf16_getter):
f = get_x("f", [], jnp.float32, init=jnp.ones)
i = get_x("i", [], jnp.int32, init=jnp.ones)
collection = getattr(ctx, collect_x)()
self.assertEqual(collection["~"]["f"].dtype, jnp.float32)
self.assertEqual(f.dtype, jnp.bfloat16)
self.assertEqual(collection["~"]["i"].dtype, jnp.int32)
self.assertEqual(i.dtype, jnp.int32)
@parameterized.parameters((base.get_parameter, base.custom_getter),
(base.get_state, custom_state_getter))
def test_nested_getters(self, get_x, custom_x):
log = []
def logging_getter(log_msg, dtype_in, dtype_out):
def _logging_getter(next_getter, value, context):
del context
log.append(log_msg)
self.assertEqual(value.dtype, dtype_in)
value = value.astype(dtype_out)
return next_getter(value)
return _logging_getter
with base.new_context():
with custom_x(logging_getter("a", jnp.float32, jnp.bfloat16)), \
custom_x(logging_getter("b", jnp.bfloat16, jnp.int32)), \
custom_x(logging_getter("c", jnp.int32, jnp.int8)):
w = get_x("w", [], init=jnp.ones)
self.assertEqual(w.dtype, jnp.int8)
self.assertEqual(log, ["a", "b", "c"])
@parameterized.parameters(*it.permutations([True, False], 2))
def test_creator_types(self, params, state):
log = []
def logging_creator(next_creator, shape, dtype, init, context):
log.append(context.full_name)
return next_creator(shape, dtype, init)
with base.new_context():
with base.custom_creator(logging_creator, params=params, state=state):
base.get_parameter("params", [], init=jnp.zeros)
base.get_state("state", [], init=jnp.zeros)
self.assertLen(log, int(params) + int(state))
if params:
self.assertIn("~/params", log)
if state:
self.assertIn("~/state", log)
@parameterized.parameters(*it.permutations([True, False], 2))
def test_getter_types(self, params, state):
log = []
def logging_getter(next_getter, value, context):
log.append(context.full_name)
return next_getter(value)
with base.new_context():
with base.custom_getter(logging_getter, params=params, state=state):
base.get_parameter("params", [], init=jnp.zeros)
base.get_state("state", [], init=jnp.zeros)
self.assertLen(log, int(params) + int(state))
if params:
self.assertIn("~/params", log)
if state:
self.assertIn("~/state", log)
@parameterized.parameters(base.custom_creator, custom_state_creator)
def test_creator_requires_context(self, custom_x):
def my_creator(next_creator, shape, dtype, init, context):
del context
return next_creator(shape, dtype, init)
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
with custom_x(my_creator):
pass
@parameterized.parameters(base.custom_getter, custom_state_getter)
def test_getter_requires_context(self, custom_x):
def my_getter(next_getter, value, context):
del context
return next_getter(value)
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
with custom_x(my_getter):
pass
def test_setter_requires_context(self):
def my_setter(next_setter, value, context):
del context
return next_setter(value)
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
with base.custom_setter(my_setter):
pass
def test_setter_array(self):
witness = []
x = jnp.ones([])
y = x + 1
def my_setter(next_setter, value, context):
self.assertIs(value, x)
self.assertEqual(context.original_shape, value.shape)
self.assertEqual(context.original_dtype, value.dtype)
self.assertEqual(context.full_name, "~/x")
self.assertEqual(context.name, "x")
self.assertIsNone(context.module)
witness.append(None)
del next_setter
return y
with base.new_context():
with base.custom_setter(my_setter):
base.set_state("x", x)
x = base.get_state("x")
self.assertIs(x, y)
self.assertNotEmpty(witness)
def test_setter_tree(self):
witness = []
x = {"a": jnp.ones([]), "b": jnp.zeros([123])}
y = jax.tree_util.tree_map(lambda x: x + 1, x)
def my_setter(next_setter, value, ctx):
self.assertIs(value, x)
self.assertEqual(ctx.original_shape, {"a": (), "b": (123,)})
self.assertEqual(ctx.original_dtype, {"a": jnp.float32, "b": jnp.float32})
self.assertEqual(ctx.full_name, "~/x")
self.assertEqual(ctx.name, "x")
self.assertIsNone(ctx.module)
witness.append(None)
del next_setter
return y
with base.new_context():
with base.custom_setter(my_setter):
base.set_state("x", x)
x = base.get_state("x")
self.assertIs(x, y)
self.assertNotEmpty(witness)
def test_get_state_no_init_raises(self):
with base.new_context():
with self.assertRaisesRegex(ValueError, "set an init function"):
base.get_state("i")
with base.new_context(state={"~": {}}):
with self.assertRaisesRegex(ValueError, "set an init function"):
base.get_state("i")
def test_get_state_no_shape_raises(self):
with base.new_context():
with self.assertRaisesRegex(ValueError, "provide shape and dtype"):
base.get_state("i", init=jnp.zeros)
with base.new_context(state={"~": {}}):
with self.assertRaisesRegex(ValueError, "provide shape and dtype"):
base.get_state("i", init=jnp.zeros)
def test_set_then_get(self):
with base.new_context() as ctx:
base.set_state("i", 1)
base.get_state("i")
self.assertEqual(ctx.collect_initial_state(), {"~": {"i": 1}})
for _ in range(10):
with ctx:
base.set_state("i", 1)
y = base.get_state("i")
self.assertEqual(y, 1)
self.assertEqual(ctx.collect_initial_state(), {"~": {"i": 1}})
def test_stateful(self):
with base.new_context() as ctx:
for _ in range(10):
count = base.get_state("count", (), jnp.int32, jnp.zeros)
base.set_state("count", count + 1)
self.assertEqual(ctx.collect_initial_state(), {"~": {"count": 0}})
self.assertEqual(ctx.collect_state(), {"~": {"count": 10}})
def test_new_state_in_apply(self):
with base.new_context(params={}, state={}) as ctx:
base.set_state("count", 1)
self.assertEqual(ctx.collect_initial_state(), {"~": {"count": 1}})
self.assertEqual(ctx.collect_state(), {"~": {"count": 1}})
@parameterized.product(
seed=[42, 28], wrap_seed=[True, False], jitted=[True, False])
def test_prng_sequence(self, seed, wrap_seed, jitted):
def create_random_values(key_or_seed):
key_seq = base.PRNGSequence(key_or_seed)
return (jax.random.normal(next(key_seq), []),
jax.random.normal(next(key_seq), []))
# Values using our sequence.
key_or_seed = jax.random.PRNGKey(seed) if wrap_seed else seed
seq_v1, seq_v2 = (jax.jit(create_random_values)(key_or_seed)
if jitted else create_random_values(key_or_seed))
# Generate values using manual splitting.
key = jax.random.PRNGKey(seed)
key, temp_key = jax.random.split(key)
raw_v1 = jax.random.normal(temp_key, [])
_, temp_key = jax.random.split(key)
raw_v2 = jax.random.normal(temp_key, [])
self.assertEqual(raw_v1, seq_v1)
self.assertEqual(raw_v2, seq_v2)
def test_prng_sequence_invalid_input(self):
with self.assertRaisesRegex(ValueError, "not a JAX PRNGKey"):
base.PRNGSequence("nonsense") # type: ignore
def test_prng_sequence_wrong_shape(self):
with self.assertRaisesRegex(ValueError,
"key did not have expected shape and/or dtype"):
base.PRNGSequence(jax.random.split(jax.random.PRNGKey(42), 2))
def test_prng_sequence_wrong_shape_custom_prng(self):
with self.assertRaisesRegex(ValueError,
"key did not have expected shape and/or dtype"):
with jax.enable_custom_prng():
base.PRNGSequence(jax.random.split(jax.random.PRNGKey(42), 2))
def test_prng_reserve(self):
k = jax.random.PRNGKey(42)
s = base.PRNGSequence(k)
s.reserve(10)
hk_keys = tuple(next(s) for _ in range(10))
jax_keys = tuple(jax.random.split(k, num=11)[1:])
jax.tree_util.tree_map(
np.testing.assert_array_equal, hk_keys, jax_keys)
def test_prng_reserve_twice(self):
k = jax.random.PRNGKey(42)
s = base.PRNGSequence(k)
s.reserve(2)
s.reserve(2)
hk_keys = tuple(next(s) for _ in range(4))
k, subkey1, subkey2 = tuple(jax.random.split(k, num=3))
_, subkey3, subkey4 = tuple(jax.random.split(k, num=3))
jax_keys = (subkey1, subkey2, subkey3, subkey4)
jax.tree_util.tree_map(
np.testing.assert_array_equal, hk_keys, jax_keys)
def test_prng_sequence_split(self):
k = jax.random.PRNGKey(42)
s = base.PRNGSequence(k)
hk_keys = s.take(10)
jax_keys = tuple(jax.random.split(k, num=11)[1:])
jax.tree_util.tree_map(
np.testing.assert_array_equal, hk_keys, jax_keys)
@parameterized.parameters(42, 28)
def test_with_rng(self, seed):
ctx_key = jax.random.PRNGKey(seed * 2 + 1)
key = jax.random.PRNGKey(seed)
_, next_key = jax.random.split(key)
expected_output = jax.random.uniform(next_key, ())
with base.new_context(rng=ctx_key):
without_decorator_out = jax.random.uniform(base.next_rng_key(), ()).item()
with base.new_context(rng=ctx_key):
with base.with_rng(key):
with_decorator_out = jax.random.uniform(base.next_rng_key(), ()).item()
self.assertNotEqual(without_decorator_out, expected_output)
self.assertEqual(with_decorator_out, expected_output)
def test_with_rng_no_transform(self):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
with base.with_rng(jax.random.PRNGKey(428)):
pass
def test_new_context(self):
with base.new_context() as ctx:
pass
self.assertEmpty(ctx.collect_params())
self.assertEmpty(ctx.collect_initial_state())
self.assertEmpty(ctx.collect_state())
def test_context_copies_input(self):
before = {"~": {"w": jnp.array(1.)}}
with base.new_context(params=before, state=before) as ctx:
base.get_parameter("w", [], init=jnp.ones)
base.set_state("w", jnp.array(2.))
self.assertEqual(ctx.collect_params(), {"~": {"w": jnp.array(1.)}})
self.assertIsNot(ctx.collect_initial_state(), before)
self.assertEqual(ctx.collect_initial_state(), before)
self.assertEqual(ctx.collect_state(), {"~": {"w": jnp.array(2.)}})
self.assertEqual(before, {"~": {"w": jnp.array(1.)}})
def test_assert_no_new_parameters(self):
with base.new_context():
base.get_parameter("w", [], init=jnp.zeros)
with base.assert_no_new_parameters():
# Should not raise, "w" already exists.
base.get_parameter("w", [], init=jnp.zeros)
with self.assertRaisesRegex(AssertionError,
"New parameters were created: .*x"):
with base.assert_no_new_parameters():
# Should raise, "x" does not exist.
base.get_parameter("x", [], init=jnp.zeros)
def test_context_cleanup_after_error(self):
with base.new_context():
with self.assertRaisesRegex(ValueError, "expected"):
raise ValueError("expected")
self.assertEmpty(base.frame_stack)
@test_utils.combined_named_parameters(SIDE_EFFECTING_FUNCTIONS,
JAX_PURE_EXPECTING_FNS)
@test_utils.transform_and_run
@test_utils.with_guardrails
def test_unsafe_use_of_jax(self, haiku_side_effect_fn, jax_fn):
# Make `f` identify with the side effecting function included.
f = jax_fn(lambda x: [haiku_side_effect_fn(), x][1])
x = jnp.ones([1])
with self.assertRaises(base.JaxUsageError):
f(x)
def test_do_not_store(self):
def my_creator(next_creator, shape, dtype, init, context):
del next_creator, shape, dtype, init, context
return base.DO_NOT_STORE
def my_getter(next_getter, value, context):
assert value is base.DO_NOT_STORE
return next_getter(
context.original_init(context.original_shape, context.original_dtype))
def my_setter(next_setter, value, context):
del next_setter, value, context
return base.DO_NOT_STORE
with base.new_context() as ctx:
with base.custom_creator(my_creator, state=True), \
base.custom_getter(my_getter, state=True), \
base.custom_setter(my_setter):
self.assertEqual(base.get_parameter("w", [], init=jnp.ones), 1)
self.assertEqual(base.get_state("s1", [], init=jnp.ones), 1)
base.set_state("s2", jnp.ones([]))
self.assertEmpty(ctx.collect_params())
self.assertEmpty(ctx.collect_state())
def test_do_not_store_array_like(self):
with self.assertRaises(ValueError):
base.DO_NOT_STORE.shape # pylint: disable=pointless-statement # pytype: disable=attribute-error
with self.assertRaises(ValueError):
base.DO_NOT_STORE.dtype # pylint: disable=pointless-statement # pytype: disable=attribute-error
def test_current_name_no_transform(self):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
base.current_name()
@test_utils.transform_and_run(seed=123, run_apply=False)
def test_rng_reserve_size(self):
size = 5
with config.context(rng_reserve_size=size):
split_key = jax.random.PRNGKey(123)
for _ in range(2):
split_key, *expected_keys = jax.random.split(split_key, size+1)
hk_keys = hk.next_rng_keys(size)
np.testing.assert_array_equal(hk_keys, expected_keys)
@parameterized.parameters(
base.get_params, base.get_current_state, base.get_initial_state
)
def test_get_params_or_state_must_be_inside_transform(self, f):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
f()
def test_get_params_or_state_empty(self):
def f():
self.assertEmpty(base.get_params())
self.assertEmpty(base.get_initial_state())
self.assertEmpty(base.get_current_state())
test_utils.transform_and_run(f)
def test_get_params_or_state(self):
sidechannel = [({}, {}, {}), ({}, {}, {})]
def f():
sidechannel[0] = (
base.get_params(),
base.get_initial_state(),
base.get_current_state(),
)
base.get_parameter("w", [], init=jnp.ones)
x = base.get_state("x", [], init=jnp.zeros)
base.set_state("x", x + 1)
sidechannel[1] = (
base.get_params(),
base.get_initial_state(),
base.get_current_state(),
)
f = test_utils.transform.transform_with_state(f)
params, state = f.init(None)
(
(params_before, initial_state_before, current_state_before),
(params_after, initial_state_after, current_state_after),
) = sidechannel
# Initially params/state are empty.
self.assertEmpty(params_before)
self.assertEmpty(initial_state_before)
self.assertEmpty(current_state_before)
# At the end of the function the params and initial state should match the
# output of the init function.
self.assertEqual(params, params_after)
self.assertEqual(state, initial_state_after)
# The current state at the end of the function should have advanced.
self.assertEqual(current_state_after, {"~": {"x": 1}})
# The arrays at the leaves of the various dicts should alias.
self.assertIs(params["~"]["w"], params_after["~"]["w"])
self.assertIs(state["~"]["x"], initial_state_after["~"]["x"])
# But the dicts themselves should be different.
self.assertIsNot(params, params_after)
self.assertIsNot(params_before, params_after)
self.assertIsNot(params["~"], params_after["~"])
_, state = f.apply(params, state, None)
(
(params_before, initial_state_before, current_state_before),
(params_after, initial_state_after, current_state_after),
) = sidechannel
# The params should always match the parameters passed into the apply
# function.
self.assertEqual(params_before, params_after)
self.assertEqual(params, params_after)
# Initial state should not change during the apply function.
self.assertEqual(initial_state_before, initial_state_after)
self.assertEqual(initial_state_before, {"~": {"x": 0}})
# The current state at the end of the function should match the output of
# apply.
self.assertEqual(state, current_state_after)
# The arrays at the leaves of the various dicts should alias.
self.assertIs(params_before["~"]["w"], params_after["~"]["w"])
self.assertIs(params["~"]["w"], params_after["~"]["w"])
self.assertIs(state["~"]["x"], current_state_after["~"]["x"])
# But the dicts themselves should be different.
self.assertIsNot(params, params_after)
self.assertIsNot(params_before, params_after)
self.assertIsNot(params["~"], params_after["~"])
self.assertIsNot(params_before["~"], params_after["~"])
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/base_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral Normalization tools.
This implementation follows the use in:
https://arxiv.org/abs/1802.05957
https://arxiv.org/abs/1805.08318
https://arxiv.org/abs/1809.11096
"""
import re
from typing import Optional
from haiku._src import base
from haiku._src import data_structures
from haiku._src import initializers
from haiku._src import module
import jax
import jax.lax
import jax.numpy as jnp
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
initializers = initializers
data_structures = data_structures
get_parameter = base.get_parameter
get_state = base.get_state
set_state = base.set_state
Module = module.Module
# pylint: enable=invalid-name
del base, data_structures, module, initializers
def _l2_normalize(x, axis=None, eps=1e-12):
"""Normalizes along dimension `axis` using an L2 norm.
This specialized function exists for numerical stability reasons.
Args:
x: An input ndarray.
axis: Dimension along which to normalize, e.g. `1` to separately normalize
vectors in a batch. Passing `None` views `t` as a flattened vector when
calculating the norm (equivalent to Frobenius norm).
eps: Epsilon to avoid dividing by zero.
Returns:
An array of the same shape as 'x' L2-normalized along 'axis'.
"""
return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)
class SpectralNorm(hk.Module):
"""Normalizes an input by its first singular value.
This module uses power iteration to calculate this value based on the
input and an internal hidden state.
"""
def __init__(
self,
eps: float = 1e-4,
n_steps: int = 1,
name: Optional[str] = None,
):
"""Initializes an SpectralNorm module.
Args:
eps: The constant used for numerical stability.
n_steps: How many steps of power iteration to perform to approximate the
singular value of the input.
name: The name of the module.
"""
super().__init__(name=name)
self.eps = eps
self.n_steps = n_steps
def __call__(
self,
value,
update_stats: bool = True,
error_on_non_matrix: bool = False,
) -> jax.Array:
"""Performs Spectral Normalization and returns the new value.
Args:
value: The array-like object for which you would like to perform an
spectral normalization on.
update_stats: A boolean defaulting to True. Regardless of this arg, this
function will return the normalized input. When
`update_stats` is True, the internal state of this object will also be
updated to reflect the input value. When `update_stats` is False the
internal stats will remain unchanged.
error_on_non_matrix: Spectral normalization is only defined on matrices.
By default, this module will return scalars unchanged and flatten
higher-order tensors in their leading dimensions. Setting this flag to
True will instead throw errors in those cases.
Returns:
The input value normalized by it's first singular value.
Raises:
ValueError: If `error_on_non_matrix` is True and `value` has ndims > 2.
"""
value = jnp.asarray(value)
value_shape = value.shape
# Handle scalars.
if value.ndim <= 1:
raise ValueError("Spectral normalization is not well defined for "
"scalar or vector inputs.")
# Handle higher-order tensors.
elif value.ndim > 2:
if error_on_non_matrix:
raise ValueError(
f"Input is {value.ndim}D but error_on_non_matrix is True")
else:
value = jnp.reshape(value, [-1, value.shape[-1]])
u0 = hk.get_state("u0", [1, value.shape[-1]], value.dtype,
init=hk.initializers.RandomNormal())
# Power iteration for the weight's singular value.
for _ in range(self.n_steps):
v0 = _l2_normalize(jnp.matmul(u0, value.transpose([1, 0])), eps=self.eps)
u0 = _l2_normalize(jnp.matmul(v0, value), eps=self.eps)
u0 = jax.lax.stop_gradient(u0)
v0 = jax.lax.stop_gradient(v0)
sigma = jnp.matmul(jnp.matmul(v0, value), jnp.transpose(u0))[0, 0]
value /= sigma
value_bar = value.reshape(value_shape)
if update_stats:
hk.set_state("u0", u0)
hk.set_state("sigma", sigma)
return value_bar
@property
def u0(self):
return hk.get_state("u0")
@property
def sigma(self):
return hk.get_state("sigma", shape=(), init=jnp.ones)
class SNParamsTree(hk.Module):
"""Applies Spectral Normalization to all parameters in a tree.
This is isomorphic to EMAParamsTree in moving_averages.py.
"""
def __init__(
self,
eps: float = 1e-4,
n_steps: int = 1,
ignore_regex: str = "",
name: Optional[str] = None,
):
"""Initializes an SNParamsTree module.
Args:
eps: The constant used for numerical stability.
n_steps: How many steps of power iteration to perform to approximate the
singular value of the input.
ignore_regex: A string. Any parameter in the tree whose name matches this
regex will not have spectral normalization applied to it. The empty
string means this module applies to all parameters.
name: The name of the module.
"""
super().__init__(name=name)
self.eps = eps
self.n_steps = n_steps
self.ignore_regex = ignore_regex
def __call__(self, tree, update_stats=True):
def maybe_sn(k, v):
if self.ignore_regex and re.match(self.ignore_regex, k):
return v
else:
sn_name = k.replace("/", "__").replace("~", "_tilde")
return SpectralNorm(self.eps, self.n_steps, name=sn_name)(
v, update_stats=update_stats)
# We want to potentially replace params with Spectral Normalized versions.
new_values = {}
for module_name, param_dict in tree.items():
new_values[module_name] = {
k: maybe_sn("/".join([module_name, k]), v)
for k, v in param_dict.items()
}
return hk.data_structures.to_haiku_dict(new_values)
|
dm-haiku-main
|
haiku/_src/spectral_norm.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bias module."""
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import utils
import jax
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
# pylint: enable=invalid-name
del base, module
class Bias(hk.Module):
"""Adds a bias to inputs.
Example Usage:
>>> N, H, W, C = 1, 2, 3, 4
>>> x = jnp.ones([N, H, W, C])
>>> scalar_bias = hk.Bias(bias_dims=[])
>>> scalar_bias_output = scalar_bias(x)
>>> assert scalar_bias.bias_shape == ()
Create a bias over all non-minibatch dimensions:
>>> all_bias = hk.Bias()
>>> all_bias_output = all_bias(x)
>>> assert all_bias.bias_shape == (H, W, C)
Create a bias over the last non-minibatch dimension:
>>> last_bias = hk.Bias(bias_dims=[-1])
>>> last_bias_output = last_bias(x)
>>> assert last_bias.bias_shape == (C,)
Create a bias over the first non-minibatch dimension:
>>> first_bias = hk.Bias(bias_dims=[1])
>>> first_bias_output = first_bias(x)
>>> assert first_bias.bias_shape == (H, 1, 1)
Subtract and later add the same learned bias:
>>> bias = hk.Bias()
>>> h1 = bias(x, multiplier=-1)
>>> h2 = bias(x)
>>> h3 = bias(x, multiplier=-1)
>>> reconstructed_x = bias(h3)
>>> assert (x == reconstructed_x).all()
"""
def __init__(
self,
output_size: Optional[Sequence[int]] = None,
bias_dims: Optional[Sequence[int]] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
"""Constructs a ``Bias`` module that supports broadcasting.
Args:
output_size: Output size (output shape without batch dimension). If
``output_size`` is left as `None`, the size will be directly inferred by
the input.
bias_dims: Sequence of which dimensions to retain from the input shape
when constructing the bias. The remaining dimensions will be broadcast
over (given size of 1), and leading dimensions will be removed
completely. See class doc for examples.
b_init: Optional initializer for the bias. Default to zeros.
name: Name of the module.
"""
super().__init__(name=name)
self.output_size = output_size
self.bias_dims = bias_dims
self.b_init = b_init or jnp.zeros
self.bias_shape = None
def __call__(
self,
inputs: jax.Array,
multiplier: Optional[Union[float, jax.Array]] = None,
) -> jax.Array:
"""Adds bias to ``inputs`` and optionally multiplies by ``multiplier``.
Args:
inputs: A Tensor of size ``[batch_size, input_size1, ...]``.
multiplier: A scalar or Tensor which the bias term is multiplied by before
adding it to ``inputs``. Anything which works in the expression ``bias *
multiplier`` is acceptable here. This may be useful if you want to add a
bias in one place and subtract the same bias in another place via
``multiplier=-1``.
Returns:
A Tensor of size ``[batch_size, input_size1, ...]``.
"""
utils.assert_minimum_rank(inputs, 2)
if self.output_size is not None and self.output_size != inputs.shape[1:]:
raise ValueError(
f"Input shape must be {(-1,) + tuple(self.output_size)} not"
f" {inputs.shape}"
)
self.bias_shape = calculate_bias_shape(inputs.shape, self.bias_dims)
self.input_size = inputs.shape[1:]
b = hk.get_parameter("b", self.bias_shape, inputs.dtype, init=self.b_init)
b = jnp.broadcast_to(b, inputs.shape)
if multiplier is not None:
b = b * multiplier
return inputs + b
def calculate_bias_shape(input_shape: Sequence[int], bias_dims: Sequence[int]):
"""Calculate `bias_shape` based on the `input_shape` and `bias_dims`.
Args:
input_shape: Shape of the input being passed into the module. The leading
dimension is the mini-batch size.
bias_dims: The dimensions that bias should be applied over. The remaining
dimensions will be broadcast over.
Returns:
bias_shape: Tuple corresponding to the shape of bias Variable to create.
Raises:
ValueError: If the user attempts to add bias over the mini-batch dimension,
e.g. `bias_dims=[0]`.
"""
input_rank = len(input_shape)
if bias_dims is None:
# If None, default is to use all dimensions.
return input_shape[1:]
elif not bias_dims:
# If empty list, use a scalar bias.
return ()
else:
# Otherwise, calculate bias_shape from bias_dims.
bias_shape = [1] * input_rank
# Populate bias dimensions.
for dim in bias_dims:
if dim < 0:
dim %= input_rank
if dim == 0:
raise ValueError("Cannot apply bias across the minibatch dimension.")
elif dim >= input_rank:
raise ValueError(
"Dimension %d (bias_dims=%r) out of range for input of rank %r." %
(dim, tuple(bias_dims), input_rank))
bias_shape[dim] = input_shape[dim]
# Strip leading unit dimensions.
start = input_rank
for dim in range(1, input_rank):
if bias_shape[dim] != 1:
start = dim
break
return tuple(bias_shape[start:]) # Do not apply across minibatch dimension.
|
dm-haiku-main
|
haiku/_src/bias.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Moving averages."""
import re
from typing import Optional, Union
import warnings
from haiku._src import base
from haiku._src import data_structures
from haiku._src import initializers
from haiku._src import module
import jax
import jax.numpy as jnp
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_state = base.get_state
set_state = base.set_state
Module = module.Module
data_structures = data_structures
initializers = initializers
# pylint: enable=invalid-name
del base, data_structures, module, initializers
class ExponentialMovingAverage(hk.Module):
"""Maintains an exponential moving average.
This uses the Adam debiasing procedure.
See https://arxiv.org/pdf/1412.6980.pdf for details.
"""
def __init__(
self,
decay,
zero_debias: bool = True,
warmup_length: int = 0,
name: Optional[str] = None,
):
"""Initializes an ExponentialMovingAverage module.
Args:
decay: The chosen decay. Must in ``[0, 1)``. Values close to 1 result in
slow decay; values close to ``0`` result in fast decay.
zero_debias: Whether to run with zero-debiasing.
warmup_length: A positive integer, EMA has no effect until
the internal counter has reached `warmup_length` at which point the
initial value for the decaying average is initialized to the input value
after `warmup_length` iterations.
name: The name of the module.
"""
super().__init__(name=name)
self.decay = decay
self.warmup_length = warmup_length
self.zero_debias = zero_debias
if warmup_length < 0:
raise ValueError(
f"`warmup_length` is {warmup_length}, but should be non-negative.")
if warmup_length and zero_debias:
raise ValueError(
"Zero debiasing does not make sense when warming up the value of the "
"average to an initial value. Set zero_debias=False if setting "
"warmup_length to a non-zero value.")
def initialize(self, shape, dtype=jnp.float32):
"""If uninitialized sets the average to ``zeros`` of the given shape/dtype."""
if hasattr(shape, "shape"):
warnings.warn("Passing a value into initialize instead of a shape/dtype "
"is deprecated. Update your code to use: "
"`ema.initialize(v.shape, v.dtype)`.",
category=DeprecationWarning)
shape, dtype = shape.shape, shape.dtype
hk.get_state("hidden", shape, dtype, init=jnp.zeros)
hk.get_state("average", shape, dtype, init=jnp.zeros)
def __call__(
self,
value: Union[float, jax.Array],
update_stats: bool = True,
) -> jax.Array:
"""Updates the EMA and returns the new value.
Args:
value: The array-like object for which you would like to perform an
exponential decay on.
update_stats: A Boolean, whether to update the internal state
of this object to reflect the input value. When `update_stats` is False
the internal stats will remain unchanged.
Returns:
The exponentially weighted average of the input value.
"""
if not isinstance(value, jax.Array):
value = jnp.asarray(value)
counter = hk.get_state("counter", (), jnp.int32,
init=hk.initializers.Constant(-self.warmup_length))
counter = counter + 1
decay = jax.lax.convert_element_type(self.decay, value.dtype)
if self.warmup_length > 0:
decay = jax.lax.select(counter <= 0, 0.0, decay)
one = jnp.ones([], value.dtype)
hidden = hk.get_state("hidden", value.shape, value.dtype, init=jnp.zeros)
hidden = hidden * decay + value * (one - decay)
average = hidden
if self.zero_debias:
average /= (one - jnp.power(decay, counter))
if update_stats:
hk.set_state("counter", counter)
hk.set_state("hidden", hidden)
hk.set_state("average", average)
return average
@property
def average(self):
return hk.get_state("average")
class EMAParamsTree(hk.Module):
"""Maintains an exponential moving average for all parameters in a tree.
While ExponentialMovingAverage is meant to be applied to single parameters
within a function, this class is meant to be applied to the entire tree of
parameters for a function.
Given a set of parameters for some network:
>>> network_fn = lambda x: hk.Linear(10)(x)
>>> x = jnp.ones([1, 1])
>>> params = hk.transform(network_fn).init(jax.random.PRNGKey(428), x)
You might use the EMAParamsTree like follows:
>>> ema_fn = hk.transform_with_state(lambda x: hk.EMAParamsTree(0.2)(x))
>>> _, ema_state = ema_fn.init(None, params)
>>> ema_params, ema_state = ema_fn.apply(None, ema_state, None, params)
Here, we are transforming a Haiku function and constructing its parameters via
an init_fn as normal, but are creating a second transformed function which
expects a tree of parameters as input. This function is then called with
the current parameters as input, which then returns an identical tree with
every parameter replaced with its exponentially decayed average. This
ema_params object can then be passed into the `network_fn` as usual, and will
cause it to run with EMA weights.
"""
def __init__(
self,
decay,
zero_debias: bool = True,
warmup_length: int = 0,
ignore_regex: str = "",
name: Optional[str] = None,
):
"""Initializes an EMAParamsTree module.
Args:
decay: The chosen decay. Must in ``[0, 1)``. Values close to ``1`` result
in slow decay; values close to ``0`` result in fast decay.
zero_debias: Whether to run with zero-debiasing.
warmup_length: A positive integer, EMA has no effect until
the internal counter has reached `warmup_length` at which point the
initial value for the decaying average is initialized to the input value
after `warmup_length` iterations.
ignore_regex: A string. Any parameter in the tree whose name matches this
regex will not have any moving average applied to it. The empty string
means this module will EMA all parameters.
name: The name of the module.
"""
super().__init__(name=name)
self.decay = decay
self.zero_debias = zero_debias
self.warmup_length = warmup_length
self.ignore_regex = ignore_regex
def __call__(self, tree, update_stats=True):
def maybe_ema(k, v):
if self.ignore_regex and re.match(self.ignore_regex, k):
return v
else:
ema_name = k.replace("/", "__").replace("~", "_tilde_")
return ExponentialMovingAverage(
self.decay, self.zero_debias, self.warmup_length, name=ema_name)(
v, update_stats=update_stats)
# We want to potentially replace params with EMA'd versions.
new_values = {}
for module_name, param_dict in tree.items():
new_values[module_name] = {
k: maybe_ema("/".join([module_name, k]), v)
for k, v in param_dict.items()
}
return hk.data_structures.to_haiku_dict(new_values)
|
dm-haiku-main
|
haiku/_src/moving_averages.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""(Multi-Head) Attention module for use in Transformer architectures."""
from typing import Optional
import warnings
from haiku._src import basic
from haiku._src import initializers
from haiku._src import module
import jax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
Linear = basic.Linear
transparent = module.transparent
initializers = initializers
# pylint: enable=invalid-name
del basic, module, initializers
class MultiHeadAttention(hk.Module):
"""Multi-headed attention (MHA) module.
This module is intended for attending over sequences of vectors.
Rough sketch:
- Compute keys (K), queries (Q), and values (V) as projections of inputs.
- Attention weights are computed as W = softmax(QK^T / sqrt(key_size)).
- Output is another projection of WV^T.
For more detail, see the original Transformer paper:
"Attention is all you need" https://arxiv.org/abs/1706.03762.
Glossary of shapes:
- T: Sequence length.
- D: Vector (embedding) size.
- H: Number of attention heads.
"""
def __init__(
self,
num_heads: int,
key_size: int,
# TODO(b/240019186): Remove `w_init_scale`.
w_init_scale: Optional[float] = None,
*,
w_init: Optional[hk.initializers.Initializer] = None,
with_bias: bool = True,
b_init: Optional[hk.initializers.Initializer] = None,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
name: Optional[str] = None,
):
"""Initialises the module.
Args:
num_heads: Number of independent attention heads (H).
key_size: The size of keys (K) and queries used for attention.
w_init_scale: DEPRECATED. Please use w_init instead.
w_init: Initialiser for weights in the linear map. Once `w_init_scale` is
fully deprecated `w_init` will become mandatory. Until then it has a
default value of `None` for backwards compatability.
with_bias: Whether to add a bias when computing various linear
projections.
b_init: Optional initializer for bias. By default, zero.
value_size: Optional size of the value projection (V). If None, defaults
to the key size (K).
model_size: Optional size of the output embedding (D'). If None, defaults
to the key size multiplied by the number of heads (K * H).
name: Optional name for this module.
"""
super().__init__(name=name)
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * num_heads
# Backwards-compatibility for w_init_scale.
if w_init_scale is not None:
warnings.warn(
"w_init_scale is deprecated; please pass an explicit weight "
"initialiser instead.", DeprecationWarning)
if w_init and w_init_scale:
raise ValueError("Please provide only `w_init`, not `w_init_scale`.")
if w_init is None and w_init_scale is None:
raise ValueError("Please provide a weight initializer: `w_init`. "
"`w_init` will become mandatory once `w_init_scale` is "
"fully deprecated.")
if w_init is None:
w_init = hk.initializers.VarianceScaling(w_init_scale)
self.w_init = w_init
self.with_bias = with_bias
self.b_init = b_init
def __call__(
self,
query: jax.Array,
key: jax.Array,
value: jax.Array,
mask: Optional[jax.Array] = None,
) -> jax.Array:
"""Computes (optionally masked) MHA with queries, keys & values.
This module broadcasts over zero or more 'batch-like' leading dimensions.
Args:
query: Embeddings sequence used to compute queries; shape [..., T', D_q].
key: Embeddings sequence used to compute keys; shape [..., T, D_k].
value: Embeddings sequence used to compute values; shape [..., T, D_v].
mask: Optional mask applied to attention weights; shape [..., H=1, T', T].
Returns:
A new sequence of embeddings, consisting of a projection of the
attention-weighted value projections; shape [..., T', D'].
"""
# In shape hints below, we suppress the leading dims [...] for brevity.
# Hence e.g. [A, B] should be read in every case as [..., A, B].
*leading_dims, sequence_length, _ = query.shape
projection = self._linear_projection
# Compute key/query/values (overload K/Q/V to denote the respective sizes).
query_heads = projection(query, self.key_size, "query") # [T', H, Q=K]
key_heads = projection(key, self.key_size, "key") # [T, H, K]
value_heads = projection(value, self.value_size, "value") # [T, H, V]
# Compute attention weights.
attn_logits = jnp.einsum("...thd,...Thd->...htT", query_heads, key_heads)
attn_logits = attn_logits / np.sqrt(self.key_size).astype(key.dtype)
if mask is not None:
if mask.ndim != attn_logits.ndim:
raise ValueError(
f"Mask dimensionality {mask.ndim} must match logits dimensionality "
f"{attn_logits.ndim}."
)
attn_logits = jnp.where(mask, attn_logits, -1e30)
attn_weights = jax.nn.softmax(attn_logits) # [H, T', T]
# Weight the values by the attention and flatten the head vectors.
attn = jnp.einsum("...htT,...Thd->...thd", attn_weights, value_heads)
attn = jnp.reshape(attn, (*leading_dims, sequence_length, -1)) # [T', H*V]
# Apply another projection to get the final embeddings.
final_projection = hk.Linear(self.model_size, w_init=self.w_init,
with_bias=self.with_bias, b_init=self.b_init)
return final_projection(attn) # [T', D']
@hk.transparent
def _linear_projection(
self,
x: jax.Array,
head_size: int,
name: Optional[str] = None,
) -> jax.Array:
y = hk.Linear(self.num_heads * head_size, w_init=self.w_init,
with_bias=self.with_bias, b_init=self.b_init, name=name)(x)
*leading_dims, _ = x.shape
return y.reshape((*leading_dims, self.num_heads, head_size))
|
dm-haiku-main
|
haiku/_src/attention.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.recurrent."""
import itertools as it
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import basic
from haiku._src import recurrent
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.nn
import jax.numpy as jnp
import numpy as np
import tree
class DuplicateCore(recurrent.RNNCore):
"""A wrapper which duplicates the outputs of the wrapped :class:`RNNCore`."""
def __init__(self, base_core: recurrent.RNNCore):
super().__init__()
self.base_core = base_core
def __call__(self, inputs, prev_state):
"""See base class."""
outputs, next_state = self.base_core(inputs, prev_state)
return [outputs, outputs], next_state
def initial_state(self, batch_size):
"""See base class."""
return self.base_core.initial_state(batch_size)
def make_sequence(shape):
# Skips 0 for meaningful multiplicative interactions.
return np.arange(1, np.prod(shape) + 1, dtype=np.float32).reshape(shape)
class RecurrentTest(parameterized.TestCase):
UNROLLS = (recurrent.dynamic_unroll, recurrent.static_unroll)
CORES = (recurrent.VanillaRNN, recurrent.LSTM, recurrent.GRU)
UNROLL_KWARGS = ({}, {"unroll": 1}, {"unroll": 2}, {"unroll": 4})
def _skip_if_static_unroll(self, unroll, unroll_kwargs):
if unroll == recurrent.static_unroll and "unroll" in unroll_kwargs:
self.skipTest("static_unroll does not have an unroll parameter")
def test_add_batch(self):
sample_tree = dict(
a=[jnp.zeros([]), jnp.zeros([1])],
b=jnp.zeros([1, 1]),
)
batch_size = 2
out = recurrent.add_batch(sample_tree, batch_size)
tree.assert_same_structure(sample_tree, out)
flat_in = tree.flatten(sample_tree)
flat_out = tree.flatten(out)
for in_array, out_array in zip(flat_in, flat_out):
self.assertEqual(out_array.shape[0], batch_size)
self.assertEqual(out_array.shape[1:], in_array.shape)
# These two tests assume that the core takes argument hidden_size, and the
# output is a single tensor with the same size as hidden_size.
# They should be generalized when new cores are added.
@parameterized.parameters(*it.product(UNROLLS, CORES, UNROLL_KWARGS))
@test_utils.transform_and_run
def test_core_unroll_unbatched(self, unroll, core_cls, unroll_kwargs):
self._skip_if_static_unroll(unroll, unroll_kwargs)
seqs = make_sequence([8, 1]) # [T, F]
core = core_cls(hidden_size=4)
out, _ = unroll(core, seqs, core.initial_state(batch_size=None),
**unroll_kwargs)
self.assertEqual(out.shape, (8, 4))
@parameterized.parameters(*it.product(UNROLLS, CORES, UNROLL_KWARGS))
@test_utils.transform_and_run
def test_core_unroll_batched(self, unroll, core_cls, unroll_kwargs):
self._skip_if_static_unroll(unroll, unroll_kwargs)
seqs = make_sequence([4, 8, 1]) # [T, B, F]
core = core_cls(hidden_size=4)
batch_size = seqs.shape[1]
out, _ = unroll(core, seqs, core.initial_state(batch_size),
**unroll_kwargs)
self.assertEqual(out.shape, (4, 8, 4))
@parameterized.parameters(*it.product(UNROLLS, UNROLL_KWARGS))
@test_utils.transform_and_run
def test_core_unroll_nested(self, unroll, unroll_kwargs):
self._skip_if_static_unroll(unroll, unroll_kwargs)
seqs = make_sequence([4, 8, 1])
batch_size = seqs.shape[1]
core = DuplicateCore(recurrent.VanillaRNN(hidden_size=4))
outs, _ = unroll(core, seqs, core.initial_state(batch_size),
**unroll_kwargs)
self.assertLen(outs, 2)
for out in outs:
self.assertEqual(out.shape, (4, 8, 4))
@parameterized.parameters(*it.product(UNROLLS, UNROLL_KWARGS))
def test_unroll_outside_transform(self, unroll, unroll_kwargs):
self._skip_if_static_unroll(unroll, unroll_kwargs)
core = lambda x, s: (x + 1, s + 1)
seqs = jnp.arange(8)
outs, state = unroll(
core, seqs, 0, **unroll_kwargs)
np.testing.assert_allclose(outs, jnp.arange(9)[1:])
np.testing.assert_allclose(state, 8)
@parameterized.parameters(*it.product(CORES, UNROLL_KWARGS))
@test_utils.transform_and_run
def test_dynamic_unroll_all_states(self, core_cls, unroll_kwargs):
seqs = make_sequence([4, 8, 1]) # [T, B, F]
core = core_cls(hidden_size=4)
batch_size = seqs.shape[1]
initial_state = core.initial_state(batch_size)
out, all_states = recurrent.dynamic_unroll(
core, seqs, initial_state, return_all_states=True, **unroll_kwargs)
self.assertEqual(out.shape, (4, 8, 4))
tree.map_structure(
lambda array: self.assertEqual(array.shape[0], 4), all_states)
class VanillaRNNTest(absltest.TestCase):
@test_utils.transform_and_run
def test_double_bias_length_parameters(self):
double_bias = recurrent.VanillaRNN(1, double_bias=True)
double_bias(jnp.zeros([1]), double_bias.initial_state(None))
double_bias_params = jax.tree_util.tree_leaves(double_bias.params_dict())
vanilla = recurrent.VanillaRNN(1, double_bias=False)
vanilla(jnp.zeros([1]), vanilla.initial_state(None))
vanilla_params = jax.tree_util.tree_leaves(vanilla.params_dict())
self.assertLen(double_bias_params, len(vanilla_params) + 1)
class LSTMTest(absltest.TestCase):
@test_utils.transform_and_run
def test_lstm_raises(self):
core = recurrent.LSTM(4)
with self.assertRaisesRegex(ValueError, "rank-1 or rank-2"):
core(jnp.zeros([]), core.initial_state(None))
with self.assertRaisesRegex(ValueError, "rank-1 or rank-2"):
expanded_state = tree.map_structure(lambda x: jnp.expand_dims(x, 0),
core.initial_state(1))
core(jnp.zeros([1, 1, 1]), expanded_state)
class ConvLSTMTest(parameterized.TestCase):
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_connect_conv_same(self, n):
batch_size = 2
input_shape = (16,) * n
input_shape_b = (batch_size,) + input_shape + (4,)
data = jnp.zeros(input_shape_b)
core = recurrent.ConvNDLSTM(
n, input_shape=input_shape, output_channels=3, kernel_shape=3)
state = core.initial_state(batch_size=batch_size)
out, state = core(data, state)
expected_output_shape = (batch_size,) + input_shape + (3,)
self.assertEqual(out.shape, expected_output_shape)
self.assertEqual(state[0].shape, expected_output_shape)
self.assertEqual(state[1].shape, expected_output_shape)
class GRUTest(absltest.TestCase):
@test_utils.transform_and_run
def test_gru_raises(self):
core = recurrent.GRU(4)
with self.assertRaisesRegex(ValueError, "rank-1 or rank-2"):
core(jnp.zeros([]), core.initial_state(None))
class _DummyCore(recurrent.RNNCore):
def __init__(self, state, name="dummy"):
super().__init__(name=name)
self._state = state
def __call__(self, inputs, prev_state):
return inputs, prev_state
def initial_state(self, batch_size):
return jax.tree_util.tree_map(jnp.zeros_like, self._state)
class _IncrementByOneCore(recurrent.RNNCore):
def __init__(self, state_size=4, name=None):
super().__init__(name=name)
self._state_size = state_size
def __call__(self, inputs, prev_state):
del inputs
state = prev_state + 1.
return state, state
def initial_state(self, batch_size):
if batch_size is not None:
return jnp.zeros((batch_size, self._state_size))
return jnp.zeros(self._state_size)
class _BatchedOnlyCore(recurrent.RNNCore):
def __call__(self, inputs, prev_state):
return inputs, prev_state
def initial_state(self, batch_size):
assert batch_size is not None
return jnp.zeros([batch_size])
def static_unroll_with_states(core, inputs, state):
outs = []
states = []
steps = tree.flatten(inputs)[0].shape[0]
for i in range(steps):
step_input = tree.map_structure(lambda x: x[i], inputs) # pylint: disable=cell-var-from-loop
out, state = core(step_input, state)
outs.append(out)
states.append(state)
outs = jnp.stack(outs, axis=0)
states = tree.map_structure(lambda *a: jnp.stack(a, axis=0), *states)
return outs, states
class ResetCoreTest(parameterized.TestCase):
UNROLLS = (recurrent.dynamic_unroll, recurrent.static_unroll)
UNROLL_KWARGS = ({}, {"unroll": 2})
@parameterized.parameters(*it.product(UNROLLS, UNROLL_KWARGS))
def test_resetting(self, unroll, unroll_kwargs):
if unroll == recurrent.static_unroll and unroll_kwargs.get("unroll", 1) > 1:
self.skipTest("static_unroll does not have an unroll parameter")
def net(seqs, should_reset):
# seqs is [T, B, F].
core = recurrent.LSTM(hidden_size=4)
reset_core = recurrent.ResetCore(core)
batch_size = seqs.shape[1]
# Statically unroll, collecting states.
core_outs, core_states = static_unroll_with_states(
core, seqs, core.initial_state(batch_size))
reset_outs, reset_states = static_unroll_with_states(
reset_core, (seqs, should_reset),
reset_core.initial_state(batch_size))
# Unroll without access to intermediate states.
dynamic_core_outs, dynamic_core_state = unroll(
core, seqs, core.initial_state(batch_size),
**unroll_kwargs)
dynamic_reset_outs, dynamic_reset_state = unroll(
reset_core, (seqs, should_reset),
reset_core.initial_state(batch_size),
**unroll_kwargs)
return dict(
core_outs=core_outs,
core_states=core_states,
reset_outs=reset_outs,
reset_states=reset_states,
dynamic_core_outs=dynamic_core_outs,
dynamic_core_state=dynamic_core_state,
dynamic_reset_outs=dynamic_reset_outs,
dynamic_reset_state=dynamic_reset_state,
)
batch_size = 4
# Reset one batch element on the second step.
resets = [[False] * batch_size, [True] + [False] * (batch_size - 1)]
resets = np.asarray(resets)
# Each sequence is the same input twice.
seqs = make_sequence([batch_size, 1])
seqs = np.stack([seqs, seqs], axis=0)
init_fn, apply_fn = transform.transform(net)
params = init_fn(jax.random.PRNGKey(428), seqs, resets)
result = apply_fn(params, None, seqs, resets)
# Verify dynamic and static unroll gave same outs and final states.
np.testing.assert_allclose(
result["core_outs"], result["dynamic_core_outs"], rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(
result["reset_outs"],
result["dynamic_reset_outs"],
rtol=1e-6,
atol=1e-6)
for s, d in zip(result["core_states"], result["dynamic_core_state"]):
np.testing.assert_allclose(s[-1], d, rtol=1e-6, atol=1e-6)
for s, d in zip(result["reset_states"], result["dynamic_reset_state"]):
np.testing.assert_allclose(s[-1], d, rtol=1e-6, atol=1e-6)
# Now, test resetting behavior on static outputs.
core_outs = result["core_outs"]
core_states = result["core_states"]
reset_outs = result["reset_outs"]
reset_states = result["reset_states"]
# If no reset occurred, the reset core should do nothing.
np.testing.assert_allclose(
core_outs[0], reset_outs[0], rtol=1e-6, atol=1e-6)
for cs, rs in zip(core_states, reset_states):
np.testing.assert_allclose(cs[0], rs[0], rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(
core_outs[1, 1:], reset_outs[1, 1:], rtol=1e-6, atol=1e-6)
for cs, rs in zip(core_states, reset_states):
np.testing.assert_allclose(cs[1, 1:], rs[1, 1:], rtol=1e-6, atol=1e-6)
# Check that the reset occurred where specified.
np.testing.assert_allclose(
core_outs[0, 0], reset_outs[1, 0], rtol=1e-6, atol=1e-6)
for cs, rs in zip(core_states, reset_states):
np.testing.assert_allclose(cs[0, 0], rs[1, 0], rtol=1e-6, atol=1e-6)
@parameterized.parameters(recurrent.dynamic_unroll, recurrent.static_unroll)
@test_utils.transform_and_run
def test_unbatched(self, unroll):
reset_time = 2
seq_len = 5
state_size = 4
core = recurrent.ResetCore(_IncrementByOneCore(state_size=state_size))
inputs = jnp.arange(0, seq_len)
batch_size = None # Unbatched.
should_reset = inputs == reset_time
initial_state = core.initial_state(batch_size)
result, _ = unroll(core, (inputs, should_reset), initial_state)
expected_result = np.array([ # seq_len x state_size
[1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0],
[1.0, 1.0, 1.0, 1.0], # reset_time = 2.
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 3.0]
])
np.testing.assert_allclose(result, expected_result, rtol=1e-6, atol=1e-6)
@parameterized.parameters(*it.product((None, 3), UNROLL_KWARGS))
@test_utils.transform_and_run
def test_reversed_dynamic_unroll(self, batch_size, unroll_kwargs):
reset_time = 2
seq_len = 7
state_size = 4
core = recurrent.ResetCore(_IncrementByOneCore(state_size=state_size))
initial_state = core.initial_state(batch_size)
inputs = jnp.arange(0, seq_len) # seq_len
if batch_size is not None:
# seq_len x batch_size
inputs = jnp.stack([inputs] * batch_size, axis=1)
should_reset = inputs == reset_time
fwd_result, _ = recurrent.dynamic_unroll(
core, (inputs[::-1], should_reset[::-1]), initial_state, reverse=False,
**unroll_kwargs)
rev_result, _ = recurrent.dynamic_unroll(
core, (inputs, should_reset), initial_state, reverse=True,
**unroll_kwargs)
np.testing.assert_allclose(fwd_result[::-1], rev_result)
@test_utils.transform_and_run
def test_allow_batched_only_cores(self):
# Ensures batched-only cores can be wrapped with ResetCore.
core = recurrent.ResetCore(_BatchedOnlyCore())
batch_size = 5
inputs = jnp.ones((batch_size, 4))
prev_state = core.initial_state(batch_size)
should_reset = 0 * prev_state
core((inputs, should_reset), prev_state)
@parameterized.parameters(
(np.array((True, False)),
np.array(((0, 0), (0, 0)))),
(np.array((True, False)),
dict(core=np.array(((0, 0), (0, 0))))),
(np.array((True, False)),
np.array(((0, 0, 0, 0), (0, 0, 0, 0))).reshape((2, 1, 1, 4))),
(dict(core=np.array((True, False))),
dict(core=np.array(((0, 0), (0, 0))))),
)
@test_utils.transform_and_run
def test_input_conform(self, reset, state):
core = recurrent.ResetCore(core=_DummyCore(state=state))
core((state, reset), state)
@parameterized.parameters(
(np.array((True, False)).reshape((2, 1, 1)),
np.array(((0, 0), (0, 0)))),
(dict(core=np.array((True, False))),
dict(another_core=np.array(((0, 0), (0, 0))))),
)
@test_utils.transform_and_run
def test_input_conform_fails(self, reset, state):
core = recurrent.ResetCore(core=_DummyCore(state=state))
with self.assertRaises(ValueError):
core((state, reset), state)
class IdentityCoreTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_identity_core_call(self):
core = recurrent.IdentityCore()
inputs, state_in = object(), object()
outputs, state_out = core(inputs, state_in)
self.assertIs(inputs, outputs)
self.assertIs(state_in, state_out)
@test_utils.transform_and_run
def test_identity_core_initial_state(self):
core = recurrent.IdentityCore()
self.assertEqual(core.initial_state(1), ())
class DeepRNNTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_only_callables(self):
x = make_sequence([4, 3]) # [B, F]
core = recurrent.DeepRNN([jnp.tanh, jnp.square])
initial_state = core.initial_state(x.shape[0])
out, next_state = core(x, initial_state)
np.testing.assert_allclose(out, np.square(np.tanh(x)), rtol=1e-4)
self.assertEmpty(next_state)
self.assertEmpty(initial_state)
@test_utils.transform_and_run
def test_connection_and_shapes(self):
batch_size = 4
x = make_sequence([batch_size, 3]) # [B, F]
core = recurrent.DeepRNN([
recurrent.VanillaRNN(hidden_size=3),
basic.Linear(2),
jax.nn.relu,
recurrent.VanillaRNN(hidden_size=5),
jax.nn.relu,
])
initial_state = core.initial_state(x.shape[0])
out, next_state = core(x, initial_state)
self.assertEqual(out.shape, (batch_size, 5))
# Verifies that at least last layer of relu is applied.
self.assertTrue(np.all(out >= np.zeros([batch_size, 5])))
self.assertLen(next_state, 2)
self.assertEqual(initial_state[0].shape, (batch_size, 3))
self.assertEqual(initial_state[1].shape, (batch_size, 5))
self.assertLen(initial_state, 2)
np.testing.assert_allclose(initial_state[0], jnp.zeros([batch_size, 3]))
np.testing.assert_allclose(initial_state[1], jnp.zeros([batch_size, 5]))
@test_utils.transform_and_run
def test_skip_connections(self):
batch_size = 4
x = make_sequence([batch_size, 3]) # [B, F]
core = recurrent.deep_rnn_with_skip_connections([
recurrent.VanillaRNN(hidden_size=3),
recurrent.VanillaRNN(hidden_size=5),
])
initial_state = core.initial_state(x.shape[0])
out, _ = core(x, initial_state)
self.assertEqual(out.shape, (batch_size, 8))
# Previous tests test the correctness of state handling.
@test_utils.transform_and_run
def test_skip_validation(self):
with self.assertRaisesRegex(ValueError, "skip_connections requires"):
recurrent.deep_rnn_with_skip_connections([jax.nn.relu])
class BatchMajorUnrollTest(parameterized.TestCase):
@parameterized.parameters(recurrent.dynamic_unroll, recurrent.static_unroll)
@test_utils.transform_and_run
def test_batch_major(self, unroll):
core = recurrent.LSTM(4)
sequence_len, batch_size = 10, 5
inputs = np.random.randn(sequence_len, batch_size, 2)
batch_major_inputs = jnp.swapaxes(inputs, 0, 1)
initial_state = core.initial_state(batch_size)
time_major_outputs, time_major_unroll_state_out = unroll(
core, inputs, initial_state, time_major=True)
batch_major_outputs, batch_major_unroll_state_out = unroll(
core, batch_major_inputs, initial_state, time_major=False)
jax.tree_util.tree_map(
np.testing.assert_array_equal,
time_major_unroll_state_out, batch_major_unroll_state_out)
jax.tree_util.tree_map(
lambda x, y: np.testing.assert_array_equal(x, jnp.swapaxes(y, 0, 1)),
time_major_outputs, batch_major_outputs)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/recurrent_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities for Haiku."""
from collections.abc import Generator, Sequence
import functools
import inspect
import itertools
import os
import types
from typing import Any, Callable, Optional, TypeVar
from absl.testing import parameterized
from haiku._src import config
from haiku._src import transform
import jax
T = TypeVar("T")
Fn = Callable[..., T]
Key = Any # NOTE: jax.random.PRNGKey is not actually a type.
def transform_and_run(
f: Optional[Fn] = None,
seed: Optional[int] = 42,
run_apply: bool = True,
jax_transform: Optional[Callable[[Fn], Fn]] = None,
*,
map_rng: Optional[Callable[[Key], Key]] = None,
) -> T:
r"""Transforms the given function and runs init then (optionally) apply.
Equivalent to:
>>> def f(x):
... return x
>>> x = jnp.ones([])
>>> rng = jax.random.PRNGKey(42)
>>> f = hk.transform_with_state(f)
>>> params, state = f.init(rng, x)
>>> out = f.apply(params, state, rng, x)
This function makes it very convenient to unit test Haiku:
>>> class MyTest(unittest.TestCase):
... @hk.testing.transform_and_run
... def test_linear_output(self):
... mod = hk.Linear(1)
... out = mod(jnp.ones([1, 1]))
... self.assertEqual(out.ndim, 2)
It can also be combined with ``chex`` to test all pure/jit/pmap versions of a
function:
>>> class MyTest(unittest.TestCase):
... @chex.all_variants
... def test_linear_output(self):
... @hk.testing.transform_and_run(jax_transform=self.variant)
... def f(inputs):
... mod = hk.Linear(1)
... return mod(inputs)
... out = f(jnp.ones([1, 1]))
... self.assertEqual(out.ndim, 2)
And can also be useful in an interactive environment like ipython, Jupyter or
Google Colaboratory:
>>> f = lambda x: hk.Bias()(x)
>>> print(hk.testing.transform_and_run(f)(jnp.ones([1, 1])))
[[1.]]
See :func:`transform` for more details.
To use this with `pmap` (without ``chex``) you need to additionally pass in a
function to map the init/apply rng keys. For example, if you want every
instance of your pmap to have the same key:
>>> def same_key_on_all_devices(key):
... return jnp.broadcast_to(key, (jax.local_device_count(), *key.shape))
>>> @hk.testing.transform_and_run(jax_transform=jax.pmap,
... map_rng=same_key_on_all_devices)
... def test_something():
... ...
Or you can use a different key:
>>> def different_key_on_all_devices(key):
... return jax.random.split(key, jax.local_device_count())
>>> @hk.testing.transform_and_run(jax_transform=jax.pmap,
... map_rng=different_key_on_all_devices)
... def test_something_else():
... ...
Args:
f: A function method to transform.
seed: A seed to pass to init and apply.
run_apply: Whether to run apply as well as init. Defaults to true.
jax_transform: An optional jax transform to apply on the init and apply
functions.
map_rng: If set to a non-None value broadcast the init/apply rngs
broadcast_rng-ways.
Returns:
A function that :func:`~haiku.transform`\ s ``f`` and runs ``init`` and
optionally ``apply``.
"""
if f is None:
return functools.partial(
transform_and_run,
seed=seed,
run_apply=run_apply,
jax_transform=jax_transform,
map_rng=map_rng)
@functools.wraps(f)
def wrapper(*a, **k):
"""Runs init and apply of f."""
if seed is not None:
init_rng, apply_rng = (jax.random.PRNGKey(seed),
jax.random.PRNGKey(seed + 1))
if map_rng is not None:
init_rng, apply_rng = map(map_rng, (init_rng, apply_rng))
else:
init_rng, apply_rng = None, None
init, apply = transform.transform_with_state(lambda: f(*a, **k))
if jax_transform:
init, apply = map(jax_transform, (init, apply))
params, state = init(init_rng)
if run_apply:
out, state = apply(params, state, apply_rng)
return out
return wrapper
def find_internal_python_modules(
root_module: types.ModuleType,
) -> Sequence[tuple[str, types.ModuleType]]:
"""Returns `(name, module)` for all Haiku submodules under `root_module`."""
modules = {(root_module.__name__, root_module)}
visited = set()
to_visit = [root_module]
while to_visit:
mod = to_visit.pop()
visited.add(mod)
for name in dir(mod):
obj = getattr(mod, name)
if inspect.ismodule(obj) and obj not in visited:
if obj.__name__.startswith("haiku"):
to_visit.append(obj)
modules.add((obj.__name__, obj))
return sorted(modules)
def find_subclasses(
root_python_module: types.ModuleType,
base_class: T,
) -> Generator[T, None, None]:
"""Recursively traverse modules finding subclasses of the given type."""
seen = set()
for _, module in find_internal_python_modules(root_python_module):
for _, value in module.__dict__.items():
if not inspect.isclass(value) or isinstance(value, types.GenericAlias):
continue
if issubclass(value, base_class) and value not in seen:
seen.add(value)
yield value
def combined_named_parameters(*parameters):
"""Combines multiple ``@parameterized.named_parameters`` compatible sequences.
>>> foos = ("a_for_foo", "a"), ("b_for_foo", "b")
>>> bars = ("c_for_bar", "c"), ("d_for_bar", "d")
>>> @named_parameters(foos)
... def testFoo(self, foo):
... assert foo in ("a", "b")
>>> @combined_named_parameters(foos, bars):
... def testFooBar(self, foo, bar):
... assert foo in ("a", "b")
... assert bar in ("c", "d")
Args:
*parameters: A sequence of parameters that will be combined and be passed
into ``parameterized.named_parameters``.
Returns:
A test generator to be handled by ``parameterized.TestGeneratorMetaclass``.
"""
combine = lambda a, b: ("_".join((a[0], b[0])),) + a[1:] + b[1:]
return parameterized.named_parameters(
functools.reduce(combine, r) for r in itertools.product(*parameters))
def named_bools(name) -> Sequence[tuple[str, bool]]:
"""Returns a pair of booleans suitable for use with ``named_parameters``."""
return (name, True), (f"not_{name}", False)
def named_range(name, stop: int) -> Sequence[tuple[str, int]]:
"""Equivalent to `range()` but suitable for use with ``named_parameters``."""
return tuple((f"{name}_{i}", i) for i in range(stop))
def with_environ(key: str, value: Optional[str]):
"""Runs the given test with envrionment variables set."""
def set_env(new_value):
if new_value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = new_value
def decorator(f):
def wrapper(*a, **k):
value_before = os.environ.get(key, None)
set_env(value)
try:
return f(*a, **k)
finally:
set_env(value_before)
return wrapper
return decorator
def with_guardrails(f):
"""Runs the given test with JAX guardrails on."""
@functools.wraps(f)
def wrapper(*a, **k):
old = config.check_jax_usage(True)
try:
return f(*a, **k)
finally:
config.check_jax_usage(old)
return wrapper
|
dm-haiku-main
|
haiku/_src/test_utils.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.layer_norm."""
import functools
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import initializers
from haiku._src import layer_norm
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
def with_param_axis_error(f):
@functools.wraps(f)
def wrapper(*a, **k):
old = layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT
layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT = True
try:
return f(*a, **k)
finally:
layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT = old
return wrapper
class LayerNormTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_connection(self):
data = jnp.zeros([2, 3, 4, 5])
normalize = (
lambda a: layer_norm.LayerNorm(a, True, True, param_axis=-1)(data))
normalize(0)
normalize(1)
normalize(2)
normalize(3)
normalize(slice(1, None))
normalize(slice(2, None))
normalize(slice(1, -1))
@parameterized.parameters(itertools.product([True, False], repeat=3))
def test_bf16(self, create_scale, create_offset, use_fast_variance):
"""For all configurations, ensure bf16 outputs from bf16 inputs."""
def f(x):
ln = layer_norm.LayerNorm(
axis=-1,
create_scale=create_scale,
create_offset=create_offset,
use_fast_variance=use_fast_variance,
param_axis=-1)
return ln(x)
fwd = transform.transform(f)
data = jnp.zeros([2, 3, 4, 5], dtype=jnp.bfloat16)
params = fwd.init(jax.random.PRNGKey(428), data)
bf16_params = jax.tree_util.tree_map(
lambda t: t.astype(jnp.bfloat16), params)
self.assertEqual(fwd.apply(bf16_params, None, data).dtype, jnp.bfloat16)
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_simple_case(self, use_fast_variance):
layer = layer_norm.LayerNorm([1, 2],
create_scale=False,
create_offset=False,
use_fast_variance=use_fast_variance,
param_axis=-1)
inputs = np.ones([2, 3, 3, 5])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 0.0)
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_simple_case_var(self, use_fast_variance):
layer = layer_norm.LayerNorm([1, 2],
create_scale=True,
create_offset=True,
scale_init=initializers.Constant(0.5),
offset_init=initializers.Constant(2.0),
use_fast_variance=use_fast_variance,
param_axis=-1)
inputs = np.ones([2, 3, 3, 5])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@test_utils.transform_and_run
def test_simple_case_tensor(self):
layer = layer_norm.LayerNorm([1, 2],
create_scale=False,
create_offset=False,
param_axis=-1)
inputs = np.ones([2, 3, 3, 5])
scale = np.full((5,), 0.5)
offset = np.full((5,), 2.0)
outputs = layer(inputs, scale, offset)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@parameterized.named_parameters(("String", "foo"), ("ListString", ["foo"]))
@test_utils.transform_and_run
def test_invalid_axis(self, axis):
with self.assertRaisesRegex(
ValueError, "`axis` should be an int, slice or iterable of ints."):
layer_norm.LayerNorm(axis, create_scale=False, create_offset=False)
@test_utils.transform_and_run
def test_no_scale_and_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `scale_init` if `create_scale=False`."):
layer_norm.LayerNorm(
3, create_scale=False, create_offset=True, scale_init=np.ones)
@test_utils.transform_and_run
def test_no_offset_beta_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `offset_init` if `create_offset=False`."):
layer_norm.LayerNorm(
3, create_scale=True, create_offset=False, offset_init=np.zeros)
@test_utils.transform_and_run
def test_create_scale_and_scale_provided(self):
layer = layer_norm.LayerNorm([2], create_scale=True, create_offset=False)
with self.assertRaisesRegex(
ValueError, "Cannot pass `scale` at call time if `create_scale=True`."):
layer(np.ones([2, 3, 4]), scale=np.ones([4]))
@test_utils.transform_and_run
def test_create_offset_and_offset_provided(self):
layer = layer_norm.LayerNorm([2], create_offset=True, create_scale=False)
with self.assertRaisesRegex(
ValueError,
"Cannot pass `offset` at call time if `create_offset=True`."):
layer(np.ones([2, 3, 4]), offset=np.ones([4]))
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_slice_axis(self, use_fast_variance):
slice_layer = layer_norm.LayerNorm(
slice(1, -1),
create_scale=False,
create_offset=False,
use_fast_variance=use_fast_variance,
param_axis=-1)
axis_layer = layer_norm.LayerNorm((1, 2),
create_scale=False,
create_offset=False,
use_fast_variance=use_fast_variance,
param_axis=-1)
inputs = np.random.uniform(size=[3, 4, 4, 5], low=0, high=10)
scale = np.random.normal(size=(5,), loc=1.0)
offset = np.random.normal(size=(5,))
slice_outputs = slice_layer(inputs, scale, offset)
axis_outputs = axis_layer(inputs, scale, offset)
np.testing.assert_array_equal(slice_outputs, axis_outputs)
@test_utils.transform_and_run
def test_connection_instance_norm(self):
layer = layer_norm.InstanceNorm(create_scale=True, create_offset=True)
inputs = np.ones([3, 4, 5, 6])
result = layer(inputs)
self.assertEqual(result.shape, (3, 4, 5, 6))
@test_utils.transform_and_run
def test_param_axis_not_required_for_final_axis(self):
ln = layer_norm.LayerNorm(-1, True, True)
x = jnp.ones([3, 4, 5, 6])
ln(x)
self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, (6,))
self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, (6,))
@test_utils.transform_and_run
def test_error_prone_param_axis(self):
# NOTE: This test defends current, potentially error prone behaviour
# (passing axis!=-1 and not passing param_axis). It will be removed in a
# future version of Haiku.
ln = layer_norm.LayerNorm(1, True, True)
x = jnp.ones([3, 4, 5, 6])
ln(x)
self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, (6,))
self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, (6,))
@parameterized.parameters(0, 1, 2, ((0, 1),), ((0, 1, 2),), -2, -3, -4,
slice(0, 2))
@test_utils.transform_and_run
@with_param_axis_error
def test_param_axis_required_for_non_final_axis(self, axis):
ln = layer_norm.LayerNorm(axis, True, True)
x = jnp.ones([3, 4, 5, 6])
with self.assertRaisesRegex(ValueError, "pass.*param_axis.*in the ctor"):
ln(x)
@parameterized.parameters(
(-1, (6,)),
(-2, (1, 1, 5, 1)),
(-3, (1, 4, 1, 1)),
(-4, (3, 1, 1, 1)),
(0, (3, 1, 1, 1)),
(1, (1, 4, 1, 1)),
(2, (1, 1, 5, 1)),
(3, (6,)),
)
@test_utils.transform_and_run
def test_param_axis_sets_param_shape(self, param_axis, param_shape):
ln = layer_norm.LayerNorm(-1, True, True, param_axis=param_axis)
x = jnp.ones([3, 4, 5, 6])
ln(x)
self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, param_shape)
self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, param_shape)
@parameterized.parameters(
((0, 1, 2), (3, 4, 5, 1)),
((-4, -2, -3), (3, 4, 5, 1)),
((0, 1), (3, 4, 1, 1)),
((0, 3), (3, 1, 1, 6)),
((-4, -1), (3, 1, 1, 6)),
((-1, -4), (3, 1, 1, 6)),
)
@test_utils.transform_and_run
def test_multiple_param_axis(self, param_axis, param_shape):
ln = layer_norm.LayerNorm(-1, True, True, param_axis=param_axis)
x = jnp.ones([3, 4, 5, 6])
ln(x)
self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, param_shape)
self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, param_shape)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/layer_norm_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.dot."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import dot
from haiku._src import module
from haiku._src import test_utils
import jax
import jax.numpy as jnp
class DotTest(parameterized.TestCase):
def test_empty(self):
graph, args, out = dot.to_graph(lambda: None)()
self.assertEmpty(args)
self.assertIsNone(out)
self.assertEmpty(graph.nodes)
self.assertEmpty(graph.edges)
self.assertEmpty(graph.subgraphs)
@test_utils.transform_and_run
def test_add_module(self):
mod = AddModule()
a = b = jnp.ones([])
graph, args, c = dot.to_graph(mod)(a, b)
self.assertEqual(args, (a, b))
self.assertEqual(c, a + b)
self.assertEmpty(graph.edges)
add_graph, = graph.subgraphs
self.assertEqual(add_graph.title, "add_module")
self.assertEmpty(add_graph.subgraphs)
add_edge_a, add_edge_b = add_graph.edges
self.assertEqual(add_edge_a, (a, c))
self.assertEqual(add_edge_b, (b, c))
add_node, = add_graph.nodes
self.assertEqual(add_node.title, "add")
add_out, = add_node.outputs
self.assertEqual(add_out, c)
@test_utils.transform_and_run
def test_inline_jit_add_module(self):
mod = InlineJitAddModule()
a = b = jnp.ones([])
graph, args, c = dot.to_graph(mod)(a, b)
self.assertEqual(args, (a, b))
self.assertEqual(c, a + b)
self.assertEmpty(graph.edges)
add_graph, = graph.subgraphs
self.assertEqual(add_graph.title, "inline_jit_add_module")
self.assertEmpty(add_graph.subgraphs)
add_edge_a, add_edge_b = add_graph.edges
self.assertEqual(add_edge_a, (a, c))
self.assertEqual(add_edge_b, (b, c))
add_node, = add_graph.nodes
self.assertEqual(add_node.title, "add")
add_out, = add_node.outputs
self.assertEqual(add_out, c)
def test_call(self):
def my_function(x):
return x
graph, _, _ = dot.to_graph(jax.jit(my_function))(jnp.ones([]))
self.assertEmpty(graph.nodes)
self.assertEmpty(graph.edges)
jit, = graph.subgraphs
self.assertIn(jit.title, ("pjit (my_function)",))
def test_pmap(self):
def my_function(x):
return x
n = jax.local_device_count()
graph, _, _ = dot.to_graph(jax.pmap(my_function))(jnp.ones([n]))
self.assertEmpty(graph.nodes)
self.assertEmpty(graph.edges)
jit, = graph.subgraphs
self.assertEqual(jit.title, "xla_pmap (my_function)")
class AddModule(module.Module):
def __call__(self, a, b):
return a + b
class InlineJitAddModule(module.Module):
def __call__(self, a, b):
return jax.jit(lambda x, y: x + y, inline=True)(a, b)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/dot_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.deferred."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import deferred
from haiku._src import module
from haiku._src import test_utils
import jax.numpy as jnp
class DeferredTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_target(self):
target = ExampleModule()
mod = deferred.Deferred(lambda: target)
self.assertIs(mod.target, target)
@test_utils.transform_and_run
def test_only_computes_target_once(self):
target = ExampleModule()
targets = [target]
mod = deferred.Deferred(targets.pop) # pytype: disable=wrong-arg-types
for _ in range(10):
# If target was recomputed more than once pop should fail.
self.assertIs(mod.target, target)
self.assertEmpty(targets)
@test_utils.transform_and_run
def test_attr_forwarding_fails_before_construction(self):
mod = deferred.Deferred(ExampleModule)
with self.assertRaises(AttributeError):
getattr(mod, "foo")
@test_utils.transform_and_run
def test_getattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
self.assertIs(mod.w, mod.target.w) # pytype: disable=attribute-error
@test_utils.transform_and_run
def test_setattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
new_w = jnp.ones_like(mod.w)
mod.w = new_w
self.assertIs(mod.w, new_w)
self.assertIs(mod.target.w, new_w) # pytype: disable=attribute-error
@test_utils.transform_and_run
def test_setattr_on_target(self):
mod = deferred.Deferred(ExampleModule)
mod()
w = jnp.ones_like(mod.w)
mod.w = None
# Assigning to the target directly should reflect in the parent.
mod.target.w = w
self.assertIs(mod.w, w)
self.assertIs(mod.target.w, w)
@test_utils.transform_and_run
def test_delattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
self.assertTrue(hasattr(mod.target, "w"))
del mod.w
self.assertFalse(hasattr(mod.target, "w"))
@test_utils.transform_and_run
def test_alternative_forward(self):
mod = deferred.Deferred(AlternativeForwardModule, call_methods=("forward",))
self.assertEqual(mod.forward(), 42)
@test_utils.transform_and_run
def test_alternative_forward_call_type_error(self):
mod = deferred.Deferred(AlternativeForwardModule, call_methods=("forward",))
msg = "'AlternativeForwardModule' object is not callable"
with self.assertRaisesRegex(TypeError, msg):
mod()
@test_utils.transform_and_run
def test_str(self):
m = ExampleModule()
d = deferred.Deferred(lambda: m)
self.assertEqual("Deferred(%s)" % m, str(d))
@test_utils.transform_and_run
def test_repr(self):
m = ExampleModule()
d = deferred.Deferred(lambda: m)
self.assertEqual("Deferred(%r)" % m, repr(d))
@test_utils.transform_and_run
def test_deferred_naming_name_scope(self):
with module.name_scope("foo"):
d = deferred.Deferred(ExampleModule)
mod = d.target
self.assertEqual(mod.module_name, "foo/example_module")
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_deferred_naming_outer_module(self, call_module):
outer = OuterModule()
if call_module:
outer()
mod = outer.deferred.target
self.assertEqual(mod.module_name, "outer/~/example_module")
class OuterModule(module.Module):
def __init__(self, name="outer"):
super().__init__(name=name)
self.deferred = deferred.Deferred(ExampleModule)
def __call__(self):
return self.deferred()
class ExampleModule(module.Module):
def __init__(self):
super().__init__()
self.w = jnp.ones([])
def __str__(self):
return "ExampleModuleStr"
def __repr__(self):
return "ExampleModuleRepr"
def __call__(self):
return self.w
class AlternativeForwardModule(module.Module):
def forward(self):
return 42
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/deferred_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration for Haiku."""
import contextlib
import dataclasses
import threading
from typing import Optional
@dataclasses.dataclass
class Config:
check_jax_usage: bool
module_auto_repr: bool
restore_flatmap: bool
rng_reserve_size: int
@classmethod
def default(cls) -> "Config":
return Config(
check_jax_usage=False,
module_auto_repr=True,
restore_flatmap=False,
rng_reserve_size=1,
)
def write(config, **overrides):
for name, value in overrides.items():
assert hasattr(config, name)
setattr(config, name, value)
filter_none_values = lambda d: {k: v for k, v in d.items() if v is not None}
# pylint: disable=redefined-outer-name,unused-argument
def context(
*,
check_jax_usage: Optional[bool] = None,
module_auto_repr: Optional[bool] = None,
restore_flatmap: Optional[bool] = None,
rng_reserve_size: Optional[int] = None,
):
"""Context manager for setting config options.
This context manager can be used to override config settings in a given
context, values that are not explicitly passed as keyword arguments retain
their current value:
>>> with hk.config.context(check_jax_usage=True):
... pass
Args:
check_jax_usage: Checks that jax transforms and control flow are used
appropriately in Haiku transformed functions.
module_auto_repr: Can be used to disable the "to string" functionality that
is part of Haiku's base contructor.
restore_flatmap: Whether legacy checkpoints should be restored in the old
FlatMap datatype (as returned by ``to_immtable_dict``), default is to
restore these as plain dicts.
rng_reserve_size: amount of keys to reserve when splitting off a key
through ``next_rng_key()``, defaults to 1. Reserving larger blocks of keys
can improve compilation and run-time of your model. Changing the
reservation size will change RNG keys returned by ``next_rng_key``, and
will change the generated random numbers.
Returns:
Context manager that applies the given configs while active.
"""
return assign(**filter_none_values(locals()))
# pylint: enable=redefined-outer-name,unused-argument
# pylint: disable=redefined-outer-name,unused-argument,redefined-builtin
def set(
*,
check_jax_usage: Optional[bool] = None,
module_auto_repr: Optional[bool] = None,
restore_flatmap: Optional[bool] = None,
rng_reserve_size: Optional[int] = None,
):
"""Sets the given config option(s).
>>> hk.config.set(module_auto_repr=False)
>>> hk.Linear(1)
<...Linear object at ...>
>>> hk.config.set(module_auto_repr=True)
>>> hk.Linear(1)
Linear(output_size=1)
Args:
check_jax_usage: Checks that jax transforms and control flow are used
appropriately in Haiku transformed functions.
module_auto_repr: Can be used to disable the "to string" functionality that
is part of Haiku's base contructor.
restore_flatmap: Whether legacy checkpoints should be restored in the old
FlatMap datatype (as returned by ``to_immtable_dict``), default is to
restore these as plain dicts.
rng_reserve_size: amount of keys to reserve when splitting off a key
through ``next_rng_key()``, defaults to 1. Reserving larger blocks of keys
can improve compilation and run-time of your model. Changing the
reservation size will change RNG keys returned by ``next_rng_key``, and
will change the generated random numbers.
"""
write(get_config(), **filter_none_values(locals()))
# pylint: enable=redefined-outer-name,unused-argument,redefined-builtin
@contextlib.contextmanager
def assign(**overrides):
"""Context manager used to override config settings."""
config = get_config()
previous = {name: getattr(config, name) for name in overrides}
write(config, **overrides)
try:
yield
finally:
write(config, **previous)
def with_config(**overrides):
"""Decorator used to run a wrapped function with overriden config."""
def decorator(f):
def wrapper(*args, **kwargs):
with assign(**overrides):
return f(*args, **kwargs)
return wrapper
return decorator
# We keep a reference to the Config for the importing thread (assumed to be the
# main thread in the process) such that other threads can inherit values set for
# it when they first request the config.
main_thread_config = Config.default()
class ThreadLocalStorage(threading.local):
def __init__(self):
super().__init__()
self.config = Config(**dataclasses.asdict(main_thread_config))
tls = ThreadLocalStorage()
tls.config = main_thread_config
def get_config() -> Config:
return tls.config
def module_auto_repr(enabled: bool) -> bool:
"""Disables automatically generating an implementation of Module.__repr__.
By default, Haiku will automatically generate a useful string representation
of modules for printing. For example:
>>> print(hk.Linear(1))
Linear(output_size=1)
In some cases, objects passed into module constructors may be slow to print,
for example very nested data structures, or you may be rapidly creating and
throwing away modules (e.g. in a test) and don't want to pay the overhead of
converting to string.
This config option enables users to disable the automatic repr feature
globally in Haiku:
>>> previous_value = hk.experimental.module_auto_repr(False)
>>> print(hk.Linear(1))
<...Linear object at ...>
>>> previous_value = hk.experimental.module_auto_repr(True)
>>> print(hk.Linear(1))
Linear(output_size=1)
To disable the feature on a per-subclass basis assign
``AUTO_REPR = False`` as a property on your class, for example:
>>> class NoAutoRepr(hk.Module):
... AUTO_REPR = False
>>> print(NoAutoRepr())
<...NoAutoRepr object at ...>
Args:
enabled: Boolean indicating whether a module should be enabled.
Returns:
The previous value of this config setting.
"""
config = get_config()
previous_value, config.module_auto_repr = config.module_auto_repr, enabled
return previous_value
def check_jax_usage(enabled: bool = True) -> bool:
"""Ensures JAX APIs (e.g. :func:`jax.vmap`) are used correctly with Haiku.
JAX transforms (like :func:`jax.vmap`) and control flow (e.g.
:func:`jax.lax.cond`) expect pure functions to be passed in. Some functions
in Haiku (for example :func:`~haiku.get_parameter`) have side effects and thus
functions using them are only pure after using :func:`~haiku.transform` (et
al).
Sometimes it is convenient to use JAX transforms or control flow before
transforming your function (for example, to :func:`~haiku.vmap` the
application of a module) but when doing so you need to be careful to use the
Haiku overloaded version of the underlying JAX function, which carefully makes
the function(s) you pass in pure functions before calling the underlying JAX
function.
:func:`check_jax_usage` enables checking raw JAX transforms are used
appropriately inside Haiku transformed functions. Incorrect usage of JAX
transforms will result in an error.
Consider the function below, it is not a pure function (a function of its
inputs with no side effects) because we call into a Haiku API
(:func:`~haiku.get_parameter`) which during init will create a parameter and
register it with Haiku.
>>> def f():
... return hk.get_parameter("some_param", [], init=jnp.zeros)
We should not use this with JAX APIs like :func:`jax.vmap` (because it is not
a pure function). :func:`check_jax_usage` allows you to tell Haiku to make
incorrect usages of JAX APIs an error:
>>> previous_value = hk.experimental.check_jax_usage(True)
>>> jax.vmap(f, axis_size=2)()
Traceback (most recent call last):
...
haiku.JaxUsageError: ...
Using the Haiku wrapped version works correctly:
>>> print(hk.vmap(f, axis_size=2, split_rng=False)())
[0. 0.]
Args:
enabled: Boolean indicating whether usage should be checked or not.
Returns:
Boolean with the previous value for this setting.
"""
config = get_config()
previous_value, config.check_jax_usage = config.check_jax_usage, enabled
return previous_value
def rng_reserve_size(size: int) -> int:
"""Change amount of RNG keys reserved when calling ``next_rng_key``.
Args:
size: amount of keys to reserve when splitting off a key
through ``next_rng_key()``, defaults to 1. Reserving larger blocks of keys
can improve compilation and run-time of your model. Changing the
reservation size will change RNG keys returned by ``next_rng_key``, and
will change the generated random numbers.
Returns:
The previous value of the rng_reserve_size setting.
"""
if size <= 0:
raise ValueError(f"RNG reserve size needs to be more than 0, got {size}.")
config = get_config()
before, config.rng_reserve_size = config.rng_reserve_size, size
return before
|
dm-haiku-main
|
haiku/_src/config.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.utils."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
from haiku._src import utils
import jax.numpy as jnp
import numpy as np
lines = lambda *a: "\n".join(a)
class UtilsTest(parameterized.TestCase):
def test_indent(self):
self.assertEqual(
lines(" foo",
" bar"),
utils.indent(2, lines("foo", "bar")))
def test_auto_repr(self):
self.assertEqual("SomeClass(a=1, b=2)",
utils.auto_repr(SomeClass, 1, 2))
self.assertEqual("SomeClass(a=1, b=2, c=3)",
utils.auto_repr(SomeClass, 1, 2, 3))
self.assertEqual("SomeClass(a=1, b=2, c=3)",
utils.auto_repr(SomeClass, 1, 2, c=3))
self.assertEqual("SomeClass(a=1, b=2, c=3)",
utils.auto_repr(SomeClass, 1, b=2, c=3))
self.assertEqual("SomeClass(a=1, b=2, c=3)",
utils.auto_repr(SomeClass, a=1, b=2, c=3))
SHAPES = (("r0", []), ("r1", [1]), ("r2", [200, 200]), ("r3", [2, 3, 4]),
("r1_empty", [0]))
DTYPES = (("f32", np.float32), ("f16", np.float16), ("s8", np.int8),
("bf16", jnp.bfloat16))
CONTAINERS = (("list", lambda x: [x]), ("tuple", lambda x: (x,)),
("dict", lambda x: {"a": x}))
@test_utils.combined_named_parameters(SHAPES, DTYPES, CONTAINERS)
def test_tree_size(self, shape, dtype, container):
x = np.ones(shape, dtype=dtype)
expected_size = np.prod(x.shape) if x.ndim else 1
self.assertEqual(utils.tree_size(container(x)), expected_size)
@test_utils.combined_named_parameters(SHAPES, DTYPES, CONTAINERS)
def test_tree_bytes(self, shape, dtype, container):
x = np.ones(shape, dtype=dtype)
expected_bytes = (np.prod(x.shape) if x.ndim else 1) * x.itemsize
self.assertEqual(utils.tree_bytes(container(x)), expected_bytes)
def test_format_array(self):
self.assertEqual(utils.format_array(np.ones([], np.float32)), "f32[]")
self.assertEqual(utils.format_array(np.ones([1, 2], np.int8)), "s8[1,2]")
self.assertEqual(utils.format_array(np.ones([], jnp.bfloat16)), "bf16[]")
def test_format_bytes(self):
self.assertEqual(utils.format_bytes(0), "0.00 B")
self.assertEqual(utils.format_bytes(999), "999.00 B")
self.assertEqual(utils.format_bytes(1234), "1.23 KB")
self.assertEqual(utils.format_bytes(1235), "1.24 KB")
self.assertEqual(utils.format_bytes(999010), "999.01 KB")
self.assertEqual(utils.format_bytes(1e3), "1.00 KB")
self.assertEqual(utils.format_bytes(2e6), "2.00 MB")
self.assertEqual(utils.format_bytes(3e9), "3.00 GB")
self.assertEqual(utils.format_bytes(4e12), "4.00 TB")
self.assertEqual(utils.format_bytes(5e20), "500000000.00 TB")
class ReplicateTest(parameterized.TestCase):
@parameterized.named_parameters(("Int", 42), ("String", "foo"),
("Callable", lambda a: a))
def testSingleValue(self, value):
result = utils.replicate(value, 3, "value")
self.assertLen(result, 3)
self.assertEqual(result, (value,) * 3)
@parameterized.named_parameters(("Int", 42), ("String", "foo"),
("Callable", lambda a: a))
def testListLengthOne(self, value):
result = utils.replicate([value], 3, "value")
self.assertLen(result, 3)
self.assertEqual(result, (value,) * 3)
@parameterized.named_parameters(("Int", 42), ("String", "foo"),
("Callable", lambda a: a))
def testTupleLengthN(self, value):
v = (value,) * 3
result = utils.replicate(v, 3, "value")
self.assertLen(result, 3)
self.assertEqual(result, (value,) * 3)
@parameterized.named_parameters(("Int", 42), ("String", "foo"),
("Callable", lambda a: a))
def testListLengthN(self, value):
v = list((value,) * 3)
result = utils.replicate(v, 3, "value")
self.assertLen(result, 3)
self.assertEqual(result, (value,) * 3)
def testIncorrectLength(self):
v = [2, 2]
with self.assertRaisesRegex(
TypeError,
r"must be a scalar or sequence of length 1 or sequence of length 3"):
utils.replicate(v, 3, "value")
class SomeClass:
def __init__(self, a, b, c=2):
pass
class ChannelIndexTest(parameterized.TestCase):
@parameterized.parameters("channels_first", "NCHW", "NC", "NCDHW")
def test_returns_index_channels_first(self, data_format):
self.assertEqual(utils.get_channel_index(data_format), 1)
@parameterized.parameters("channels_last", "NHWC", "NDHWC", "BTWHD", "TBD")
def test_returns_index_channels_last(self, data_format):
self.assertEqual(utils.get_channel_index(data_format), -1)
@parameterized.parameters("foo", "NCHC", "BTDTD", "chanels_first", "NHW")
def test_invalid_strings(self, data_format):
with self.assertRaisesRegex(
ValueError,
f"Unable to extract channel information from '{data_format}'.",
):
utils.get_channel_index(data_format)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/utils_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.layer_stack."""
import functools
import re
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import basic
from haiku._src import config
from haiku._src import initializers
from haiku._src import layer_stack
from haiku._src import module
from haiku._src import multi_transform
from haiku._src import transform
from haiku._src import utils
import jax
import jax.numpy as jnp
import numpy as np
from scipy import stats
# Suffixes applied by Haiku for repeated module names.
suffixes = [""] + [f"_{i}" for i in range(1, 100)]
def _slice_layers_params(layers_params):
sliced_layers_params = {}
for k, v in layers_params.items():
for inner_k in v:
for var_slice, suffix in zip(v[inner_k], suffixes):
k_new = k.split("/")[-1] + suffix
if k_new not in sliced_layers_params:
sliced_layers_params[k_new] = {}
sliced_layers_params[k_new][inner_k] = var_slice
return sliced_layers_params
class LayerStackTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._prev_check_jax_usage = config.check_jax_usage(True)
def tearDown(self):
super().tearDown()
config.check_jax_usage(self._prev_check_jax_usage)
@parameterized.parameters([1, 2, 4])
def test_layer_stack(self, unroll):
"""Compare layers_stack to the equivalent unrolled stack.
Tests that the layers_stack application of a Haiku layer function is
equivalent to repeatedly applying the layer function in an unrolled loop.
Args:
unroll: number of unrolled layers.
"""
num_layers = 20
def inner_fn(x):
x += basic.Linear(100, name="linear1")(x)
x += basic.Linear(100, name="linear2")(x)
x /= jnp.mean(x)
return x
def outer_fn_unrolled(x):
for _ in range(num_layers):
x = inner_fn(x)
return x
def outer_fn_layer_stack(x):
stack = layer_stack.layer_stack(num_layers, unroll=unroll)(inner_fn)
return stack(x)
unrolled_fn = transform.transform(outer_fn_unrolled)
layer_stack_fn = transform.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x)
sliced_params = _slice_layers_params(params)
unrolled_pred = unrolled_fn.apply(sliced_params, None, x)
layer_stack_pred = layer_stack_fn.apply(params, None, x)
np.testing.assert_allclose(unrolled_pred, layer_stack_pred, atol=1e-3)
def test_layer_stack_multi_args(self):
"""Compare layers_stack to the equivalent unrolled stack.
Similar to `test_layer_stack`, but use a function that takes more than one
argument.
"""
num_layers = 20
def inner_fn(x, y):
x_out = x + basic.Linear(100, name="linear1")(y)
y_out = y + basic.Linear(100, name="linear2")(x)
return x_out, y_out
def outer_fn_unrolled(x, y):
for _ in range(num_layers):
x, y = inner_fn(x, y)
return x, y
def outer_fn_layer_stack(x, y):
stack = layer_stack.layer_stack(num_layers)(inner_fn)
return stack(x, y)
unrolled_fn = transform.transform(outer_fn_unrolled)
layer_stack_fn = transform.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
y = jax.random.uniform(jax.random.PRNGKey(1), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x, y)
sliced_params = _slice_layers_params(params)
unrolled_x, unrolled_y = unrolled_fn.apply(sliced_params, None, x, y)
layer_stack_x, layer_stack_y = layer_stack_fn.apply(params, None, x, y)
np.testing.assert_allclose(unrolled_x, layer_stack_x, atol=1e-3)
np.testing.assert_allclose(unrolled_y, layer_stack_y, atol=1e-3)
def test_layer_stack_no_varargs(self):
"""Test an error is raised when using a function with varargs."""
class VarArgsModule(module.Module):
"""When used, this module should cause layer_stack to raise an Error."""
def __call__(self, *args):
return args
class NoVarArgsModule(module.Module):
"""This module should be fine to use with layer_stack."""
def __call__(self, x):
return x
def build_and_init_stack(module_class):
def stack_fn(x):
mod = module_class()
return layer_stack.layer_stack(1)(mod)(x)
stack = multi_transform.without_apply_rng(transform.transform(stack_fn))
stack.init(jax.random.PRNGKey(1729), jnp.ones([5]))
build_and_init_stack(NoVarArgsModule)
with self.assertRaisesRegex(
ValueError, "The function `f` should not have any `varargs`"):
build_and_init_stack(VarArgsModule)
def test_layer_stack_no_state_error(self):
def outer_fn_layer_stack(x):
stack = layer_stack.layer_stack(1)(lambda x: base.set_state("hi", x))
return stack(x)
layer_stack_fn = transform.transform_with_state(outer_fn_layer_stack)
x = jnp.ones((1,))
with self.assertRaisesRegex(layer_stack.LayerStackStateError,
"LayerStack.*state"):
layer_stack_fn.init(None, x)
@parameterized.parameters([1, 2, 4])
def test_layer_stack_grads(self, unroll):
"""Compare layers_stack gradients to the equivalent unrolled stack.
Tests that the layers_stack application of a Haiku layer function is
equivalent to repeatedly applying the layer function in an unrolled loop.
Args:
unroll: number of unrolled layers.
"""
num_layers = 20
def inner_fn(x):
x += basic.Linear(100, name="linear1")(x)
x += basic.Linear(100, name="linear2")(x)
x /= jnp.mean(x)
return x
def outer_fn_unrolled(x):
for _ in range(num_layers):
x = inner_fn(x)
return x
def outer_fn_layer_stack(x):
stack = layer_stack.layer_stack(num_layers, unroll=unroll)(inner_fn)
return stack(x)
unrolled_fn = transform.transform(outer_fn_unrolled)
layer_stack_fn = transform.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x)
sliced_params = _slice_layers_params(params)
unrolled_grad = jax.grad(
lambda p, x: jnp.mean(unrolled_fn.apply(p, None, x)))(sliced_params, x)
layer_stack_grad = jax.grad(
lambda p, x: jnp.mean(layer_stack_fn.apply(p, None, x)))(params, x)
assert_fn = functools.partial(
np.testing.assert_allclose, atol=1e-4, rtol=1e-4)
jax.tree_util.tree_map(
assert_fn, unrolled_grad, _slice_layers_params(layer_stack_grad))
def test_random(self):
"""Random numbers should be handled correctly."""
n = 100
@transform.transform
@layer_stack.layer_stack(n)
def add_random(x):
x = x + jax.random.normal(base.next_rng_key())
return x
# Evaluate a bunch of times
key, *keys = jax.random.split(jax.random.PRNGKey(7), 1024 + 1)
params = add_random.init(key, 0.)
apply_fn = jax.jit(add_random.apply)
values = [apply_fn(params, key, 0.) for key in keys]
# Should be roughly N(0, sqrt(n))
cdf = stats.norm(scale=np.sqrt(n)).cdf
_, p = stats.kstest(values, cdf) # pytype: disable=attribute-error
self.assertLess(p, 0.1)
def test_threading(self):
"""Test @layer_stack when the function gets per-layer inputs."""
n = 5
@layer_stack.layer_stack(n, with_per_layer_inputs=True)
def f(x, y):
x = x + y * jax.nn.one_hot(y, len(x)) / 10
return x, 2 * y
@multi_transform.without_apply_rng
@transform.transform
def g(x, ys):
x, zs = f(x, ys)
# Check here to catch issues at init time
self.assertEqual(zs.shape, (n,))
return x, zs
rng = jax.random.PRNGKey(7)
x = np.zeros(n)
ys = np.arange(n).astype(np.float32)
params = g.init(rng, x, ys)
x, zs = g.apply(params, x, ys)
self.assertTrue(np.allclose(x, [0, .1, .2, .3, .4]))
self.assertTrue(np.all(zs == 2 * ys))
def test_nested_stacks(self):
def stack_fn(x):
def layer_fn(x):
return basic.Linear(100)(x)
outer_fn = layer_stack.layer_stack(10)(layer_fn)
layer_outer = layer_stack.layer_stack(20)(outer_fn)
return layer_outer(x)
hk_mod = transform.transform(stack_fn)
apply_rng, init_rng = jax.random.split(jax.random.PRNGKey(0))
params = hk_mod.init(init_rng, jnp.zeros([10, 100]))
hk_mod.apply(params, apply_rng, jnp.zeros([10, 100]))
p, = params.values()
assert p["w"].shape == (20, 10, 100, 100), p["w"].shape
assert p["b"].shape == (20, 10, 100), p["b"].shape
def test_with_per_layer_inputs_multi_args(self):
"""Test layer_stack with per-layer inputs with multiple arguments."""
width = 4
batch_size = 5
stack_height = 3
def f_with_multi_args(x, a, b):
return basic.Linear(
width, w_init=initializers.Constant(
jnp.eye(width)))(x) * a + b, None
@multi_transform.without_apply_rng
@transform.transform
def hk_fn(x):
return layer_stack.layer_stack(
stack_height, with_per_layer_inputs=True)(f_with_multi_args)(
x, jnp.full([stack_height], 2.), jnp.ones([stack_height]))
x = jnp.zeros([batch_size, width])
key_seq = base.PRNGSequence(19)
params = hk_fn.init(next(key_seq), x)
output, z = hk_fn.apply(params, x)
self.assertIsNone(z)
self.assertEqual(output.shape, (batch_size, width))
np.testing.assert_equal(output, np.full([batch_size, width], 7.))
def test_with_container_state(self):
width = 2
batch_size = 2
stack_height = 3
def f_with_container_state(x):
hk_layer = basic.Linear(
width, w_init=initializers.Constant(jnp.eye(width)))
layer_output = hk_layer(x)
layer_state = {
"raw_output": layer_output,
"output_projection": jnp.sum(layer_output)
}
return layer_output + jnp.ones_like(layer_output), layer_state
@multi_transform.without_apply_rng
@transform.transform
def hk_fn(x):
return layer_stack.layer_stack(
stack_height,
with_per_layer_inputs=True)(f_with_container_state)(x)
x = jnp.zeros([batch_size, width])
key_seq = base.PRNGSequence(19)
params = hk_fn.init(next(key_seq), x)
output, z = hk_fn.apply(params, x)
self.assertEqual(z["raw_output"].shape, (stack_height, batch_size, width))
self.assertEqual(output.shape, (batch_size, width))
self.assertEqual(z["output_projection"].shape, (stack_height,))
np.testing.assert_equal(np.sum(z["output_projection"]), np.array(12.))
np.testing.assert_equal(
np.all(z["raw_output"] == np.array([0., 1., 2.])[..., None, None]),
np.array(True))
@classmethod
def _compute_weights(cls, stack_height: int, alpha: jax.Array):
forward = [(alpha, alpha)]
backward = [(stack_height * alpha, stack_height * alpha)]
for i in range(2, stack_height + 1):
a, b = forward[-1]
forward.append((a * i * alpha, (b + 1) * i * alpha))
j = stack_height - i + 1
a, b = backward[-1]
backward.append((a * j * alpha, (b + 1) * j * alpha))
return forward, backward
def test_reverse(self):
# The layer stack below runs iteratively the update equation:
# x_n = n * alpha * (x_{n-1} + 1)
# with x_0 = 1, for n={1, ..., N}, where N = stack_height
# The reverse layer stack as a result runs the update equation:
# y_{n-1} = (N - n + 1) * alpha * (y_n + 1)
# with y_N = 1, for n={N-1, ..., 0}, where N = stack_height
width = 2
batch_size = 3
stack_height = 4
alpha = jnp.power(24, - 1. / stack_height)
forward, backward = self._compute_weights(stack_height, alpha)
def inner_fn(x):
# Here we initialize the layer to an identity + 1, while later we multiply
# each parameter by the index `n`.
return basic.Linear(
x.shape[1],
w_init=initializers.Constant(jnp.eye(x.shape[1])),
b_init=initializers.Constant(1.0),
)(x)
@multi_transform.without_apply_rng
@transform.transform
def hk_fn(x, reverse=False):
return layer_stack.layer_stack(stack_height)(inner_fn)(x, reverse=reverse)
key_seq = base.PRNGSequence(19)
init_value = 1 + jax.random.uniform(next(key_seq), [batch_size, width])
def mul_by_m(x):
m_x = jnp.arange(stack_height) + 1
while m_x.ndim < x.ndim:
m_x = m_x[..., None]
return x * m_x * alpha
params = jax.tree_util.tree_map(
mul_by_m, hk_fn.init(next(key_seq), init_value))
a, b = forward[-1]
x_n = hk_fn.apply(params, init_value)
np.testing.assert_allclose(x_n, a * init_value + b, rtol=1e-6)
a, b = backward[-1]
y_0 = hk_fn.apply(params, init_value, reverse=True)
np.testing.assert_allclose(y_0, a * init_value + b, rtol=1e-6)
def test_reverse_with_additional_inputs(self):
# The layer stack below runs iteratively the update equation:
# x_n = n * alpha * (x_{n-1} + 1)
# with x_0 = 1, for n={1, ..., N}, where N = stack_height
# The reverse layer stack as a result runs the update equation:
# y_{n-1} = (N - n + 1) * alpha * (y_n + 1)
# with y_N = 1, for n={N-1, ..., 0}, where N = stack_height
width = 2
batch_size = 3
stack_height = 4
total_multiplier = 24
alpha = jnp.power(total_multiplier, - 1. / stack_height)
forward, backward = self._compute_weights(stack_height, alpha)
def inner_fn(x, extra):
# Compared to previous test we pass in the `extra` argument as an
# additional input, in order to directly initialize the parameters to the
# index `n` of the iteration.
out = basic.Linear(
x.shape[1],
w_init=initializers.Constant(extra * jnp.eye(x.shape[1])),
b_init=initializers.Constant(extra),
)(x)
return out, out
@multi_transform.without_apply_rng
@transform.transform
def hk_fn(x, extra, reverse=False):
return layer_stack.layer_stack(
stack_height, with_per_layer_inputs=True
)(inner_fn)(x, extra, reverse=reverse)
extra = jnp.arange(stack_height) + 1
extra = extra * alpha
key_seq = base.PRNGSequence(19)
init_value = 1 + jax.random.uniform(next(key_seq), [batch_size, width])
params = hk_fn.init(next(key_seq), init_value, extra)
x_n, x_all = hk_fn.apply(params, init_value, extra)
self.assertEqual(x_all.shape[0], stack_height)
for x_t, (a, b) in zip(x_all, forward):
np.testing.assert_allclose(x_t, a * init_value + b, rtol=1e-6)
np.testing.assert_allclose(x_n, x_all[-1], rtol=1e-6)
y_0, y_all = hk_fn.apply(params, init_value, extra, reverse=True)
self.assertEqual(y_all.shape[0], stack_height)
for y_t, (a, b) in zip(y_all, reversed(backward)):
np.testing.assert_allclose(y_t, a * init_value + b, rtol=1e-6)
np.testing.assert_allclose(y_0, y_all[0], rtol=1e-6)
def test_reverse_with_pass_reverse_to_layer_fn(self):
# The layer stack below runs iteratively the update equation:
# x_n = n * alpha * (x_{n-1} + 1)
# with x_0 = 1, for n={1, ..., N}, where N = stack_height
# The reverse layer stack as a result runs the update equation:
# y_{n-1} = (N - n + 1) * alpha * (y_n + 1)
# with y_N = 1, for n={N-1, ..., 0}, where N = stack_height
# This test is equivalent to the previous one, but we nest the iterations in
# two layer stacks.
width = 2
batch_size = 3
stack_height = 4
total_multiplier = 24
alpha = jnp.power(total_multiplier, - 1. / stack_height)
forward, backward = self._compute_weights(stack_height, alpha)
def inner_fn(x, extra):
out = basic.Linear(
x.shape[1],
w_init=initializers.Constant(extra * jnp.eye(x.shape[1])),
b_init=initializers.Constant(extra),
)(x)
return out, out
def outer_fn(x, extra, reverse=False):
return layer_stack.layer_stack(
stack_height // 2, with_per_layer_inputs=True
)(inner_fn)(x, extra, reverse=reverse)
@multi_transform.without_apply_rng
@transform.transform
def hk_fn(x, extra, reverse=False):
return layer_stack.layer_stack(
2, with_per_layer_inputs=True, pass_reverse_to_layer_fn=True
)(outer_fn)(x, extra, reverse=reverse)
extra = jnp.arange(stack_height).reshape([2, stack_height // 2]) + 1
extra = extra * alpha
key_seq = base.PRNGSequence(19)
init_value = 1 + jax.random.uniform(next(key_seq), [batch_size, width])
params = hk_fn.init(next(key_seq), init_value, extra)
x_n, x_all = hk_fn.apply(params, init_value, extra)
self.assertEqual(x_all.shape[:2], (2, stack_height // 2))
x_all = x_all.reshape((stack_height, *x_all.shape[2:]))
for x_t, (a, b) in zip(x_all, forward):
np.testing.assert_allclose(x_t, a * init_value + b, rtol=1e-6)
np.testing.assert_allclose(x_n, x_all[-1], rtol=1e-6)
y_0, y_all = hk_fn.apply(params, init_value, extra, reverse=True)
self.assertEqual(y_all.shape[:2], (2, stack_height // 2))
y_all = y_all.reshape((stack_height, *y_all.shape[2:]))
for y_t, (a, b) in zip(y_all, reversed(backward)):
np.testing.assert_allclose(y_t, a * init_value + b, rtol=1e-6)
np.testing.assert_allclose(y_0, y_all[0], rtol=1e-6)
def test_parameter_reuse(self):
def block(x: jax.Array) -> jax.Array:
h = basic.Linear(output_size=x.shape[-1], with_bias=False)(x)
h = jax.nn.relu(h)
return h
class MLP(basic.hk.Module):
def __call__(self, x):
return layer_stack.layer_stack(5)(block)(x)
def f(x):
mlp = MLP()
return mlp(mlp(x))
x = jnp.ones((2, 2))
params = transform.transform(f).init(jax.random.PRNGKey(0), x)
param_size = utils.tree_size(params)
# 5 layers * (2 * 2 weights) = 20.
np.testing.assert_equal(param_size, 20)
def test_transparent(self):
num_layers = 3
class TransparencyMap(layer_stack.LayerStackTransparencyMapping):
def stacked_to_flat(self, stacked_module_name: str, scan_idx: int) -> str:
return stacked_module_name.replace("0", str(scan_idx))
def flat_to_stacked(
self, unstacked_module_name: str
) -> Optional[tuple[str, int]]:
idx = int(re.findall(r"\d+", unstacked_module_name)[0])
return unstacked_module_name.replace(str(idx), "0"), idx
def block(x: jax.Array, i: int) -> jax.Array:
return basic.Linear(output_size=x.shape[-1], name=f"linear_{i}")(x)
def looped(x: jax.Array) -> jax.Array:
for i in range(num_layers):
x = block(x, i)
return x
def stacked(x: jax.Array) -> jax.Array:
return layer_stack.layer_stack(
num_layers=3, transparent=True, transparency_map=TransparencyMap()
)(lambda y: block(y, 0))(x)
looped = transform.transform(looped)
stacked = transform.transform(stacked)
x = jnp.ones((2, 2))
rng = jax.random.PRNGKey(0)
looped_params = looped.init(rng, x)
stacked_params = stacked.init(rng, x)
self.assertEqual(
jax.tree_util.tree_structure(looped_params),
jax.tree_util.tree_structure(stacked_params),
)
# Use same set of params for both calls since stacked_params have different
# value than looped params because differences in RNG splitting.
np.testing.assert_allclose(
looped.apply(looped_params, rng, x),
stacked.apply(looped_params, rng, x),
rtol=1e-6,
)
if __name__ == "__main__":
jax.config.update("jax_check_tracer_leaks", True)
jax.config.update("jax_default_matmul_precision", "float32")
absltest.main()
|
dm-haiku-main
|
haiku/_src/layer_stack_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.moving_averages."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import basic
from haiku._src import moving_averages
from haiku._src import multi_transform
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import jax.random as random
import numpy as np
import tree
class MovingAveragesTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_zero_decay(self):
ema = moving_averages.ExponentialMovingAverage(0.)
random_input = jax.random.uniform(jax.random.PRNGKey(428), shape=(2, 3, 4))
# The ema should be equal to the input with decay=0.
np.testing.assert_allclose(random_input[0], ema(random_input[0]))
np.testing.assert_allclose(random_input[1], ema(random_input[1]))
@test_utils.transform_and_run
def test_warmup(self):
ema = moving_averages.ExponentialMovingAverage(
0.5, warmup_length=2, zero_debias=False)
random_input = jax.random.uniform(jax.random.PRNGKey(428), shape=(2, 3, 4))
# The ema should be equal to the input for the first two calls.
np.testing.assert_allclose(random_input[0], ema(random_input[0]))
np.testing.assert_allclose(random_input[0], ema(random_input[0]))
# After the warmup period, with decay = 0.5 it should be halfway between the
# first two inputs and the new input.
np.testing.assert_allclose(
(random_input[0] + random_input[1]) / 2, ema(random_input[1]))
@test_utils.transform_and_run
def test_invalid_warmup_length(self):
with self.assertRaises(ValueError):
moving_averages.ExponentialMovingAverage(
0.5, warmup_length=-1, zero_debias=False)
@test_utils.transform_and_run
def test_warmup_length_and_zero_debias(self):
with self.assertRaises(ValueError):
moving_averages.ExponentialMovingAverage(
0.5, warmup_length=2, zero_debias=True)
@test_utils.transform_and_run
def test_call(self):
ema = moving_averages.ExponentialMovingAverage(0.5)
self.assertAlmostEqual(ema(3.), 3.)
self.assertAlmostEqual(ema(6.), 5.)
@test_utils.transform_and_run
def test_fast_slow_decay(self):
ema_fast = moving_averages.ExponentialMovingAverage(0.2)
ema_slow = moving_averages.ExponentialMovingAverage(0.8)
np.testing.assert_allclose(ema_fast(1.), ema_slow(1.), rtol=1e-4)
# Expect fast decay to increase more quickly than slow.
self.assertGreater(ema_fast(2.), ema_slow(2.))
@test_utils.transform_and_run
def test_fast_slow_decay_without_update(self):
ema_fast = moving_averages.ExponentialMovingAverage(0.5)
ema_slow = moving_averages.ExponentialMovingAverage(0.8)
# This shouldn't have an effect.
np.testing.assert_allclose(
ema_fast(1., update_stats=False),
ema_slow(1., update_stats=False),
rtol=1e-4)
np.testing.assert_allclose(ema_fast(1.), ema_slow(1.), rtol=1e-4)
self.assertGreater(ema_fast(2.), ema_slow(2.))
def test_ema_is_identity_on_unchanged_data(self):
def f(x):
return moving_averages.ExponentialMovingAverage(0.5)(x)
inp_value = 1.0
init_fn, apply_fn = multi_transform.without_apply_rng(
transform.transform_with_state(f))
_, params_state = init_fn(None, inp_value)
# The output should never change as long as the input doesn't.
value = inp_value
for _ in range(10):
value, params_state = apply_fn(None, params_state, value)
# Floating point error creeps up to 1e-7 (the default).
np.testing.assert_allclose(inp_value, value, rtol=1e-6)
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_initialize(self, legacy_initialize):
ema = moving_averages.ExponentialMovingAverage(0.99)
if legacy_initialize:
ema.initialize(jnp.ones([]))
else:
ema.initialize([])
self.assertEqual(ema.average, 0.)
ema(jnp.array(100.))
# Matching the behavior of Sonnet 2 initialize only sets the value to zero
# if the EMA has not already been initialized.
if legacy_initialize:
ema.initialize(jnp.ones([]))
else:
ema.initialize([])
self.assertNotEqual(ema.average, 0.)
class EMAParamsTreeTest(absltest.TestCase):
def test_ema_naming_scheme(self):
ema_name = "this_is_a_wacky_but_valid_name"
linear_name = "so_is_this"
def f():
return basic.Linear(output_size=2, name=linear_name)(jnp.zeros([6]))
init_fn, _ = transform.transform(f)
params = init_fn(random.PRNGKey(428))
def g(x):
return moving_averages.EMAParamsTree(0.2, name=ema_name)(x)
init_fn, _ = transform.transform_with_state(g)
_, params_state = init_fn(None, params)
expected_ema_states = [f"{ema_name}/{linear_name}__{s}" for s in ["w", "b"]]
self.assertEqual(set(expected_ema_states), set(params_state.keys()))
def test_ema_on_changing_data(self):
def f():
return basic.Linear(output_size=2, b_init=jnp.ones)(jnp.zeros([6]))
init_fn, _ = transform.transform(f)
params = init_fn(random.PRNGKey(428))
def g(x):
return moving_averages.EMAParamsTree(0.2)(x)
init_fn, apply_fn = multi_transform.without_apply_rng(
transform.transform_with_state(g))
_, params_state = init_fn(None, params)
params, params_state = apply_fn(None, params_state, params)
# Let's modify our params.
changed_params = tree.map_structure(lambda t: 2. * t, params)
ema_params, params_state = apply_fn(None, params_state, changed_params)
# ema_params should be different from changed params!
tree.assert_same_structure(changed_params, ema_params)
for p1, p2 in zip(tree.flatten(params), tree.flatten(ema_params)):
self.assertEqual(p1.shape, p2.shape)
with self.assertRaisesRegex(AssertionError, "Not equal to tolerance"):
np.testing.assert_allclose(p1, p2, atol=1e-6)
def test_ignore_regex(self):
def f():
return basic.Linear(output_size=2, b_init=jnp.ones)(jnp.zeros([6]))
init_fn, _ = transform.transform(f)
params = init_fn(random.PRNGKey(428))
def g(x):
return moving_averages.EMAParamsTree(0.2, ignore_regex=".*w")(x)
init_fn, apply_fn = multi_transform.without_apply_rng(
transform.transform_with_state(g))
_, params_state = init_fn(None, params)
params, params_state = apply_fn(None, params_state, params)
# Let's modify our params.
changed_params = tree.map_structure(lambda t: 2. * t, params)
ema_params, params_state = apply_fn(None, params_state, changed_params)
# W should be the same!
# ... but b should have changed!
self.assertTrue(
(changed_params["linear"]["b"] != ema_params["linear"]["b"]).all())
self.assertTrue(
(changed_params["linear"]["w"] == ema_params["linear"]["w"]).all())
def test_tree_update_stats(self):
def f():
return basic.Linear(output_size=2, b_init=jnp.ones)(jnp.zeros([6]))
init_fn, _ = transform.transform(f)
params = init_fn(random.PRNGKey(428))
def g(x):
"""This should never update internal stats."""
return moving_averages.EMAParamsTree(0.2)(x, update_stats=False)
init_fn, apply_fn_g = multi_transform.without_apply_rng(
transform.transform_with_state(g))
_, params_state = init_fn(None, params)
# Let's modify our params.
changed_params = tree.map_structure(lambda t: 2. * t, params)
ema_params, params_state = apply_fn_g(None, params_state, changed_params)
ema_params2, params_state = apply_fn_g(None, params_state, changed_params)
# ema_params should be the same as ema_params2 with update_stats=False!
for p1, p2 in zip(tree.flatten(ema_params2), tree.flatten(ema_params)):
self.assertEqual(p1.shape, p2.shape)
np.testing.assert_allclose(p1, p2)
def h(x):
"""This will behave like normal."""
return moving_averages.EMAParamsTree(0.2)(x, update_stats=True)
init_fn, apply_fn_h = multi_transform.without_apply_rng(
transform.transform_with_state(h))
_, params_state = init_fn(None, params)
params, params_state = apply_fn_h(None, params_state, params)
# Let's modify our params.
changed_params = tree.map_structure(lambda t: 2. * t, params)
ema_params, params_state = apply_fn_h(None, params_state, changed_params)
ema_params2, params_state = apply_fn_h(None, params_state, changed_params)
# ema_params should be different as ema_params2 with update_stats=False!
for p1, p2 in zip(tree.flatten(ema_params2), tree.flatten(ema_params)):
self.assertEqual(p1.shape, p2.shape)
with self.assertRaisesRegex(AssertionError, "Not equal to tolerance"):
np.testing.assert_allclose(p1, p2, atol=1e-6)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/moving_averages_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Automatic Mixed Precision (AMP) utilities."""
import collections
import contextlib
import threading
from typing import TypeVar, Optional, Union
from haiku._src import base
from haiku._src import data_structures
from haiku._src import module
import jmp
T = TypeVar('T')
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
custom_getter = base.custom_getter
custom_creator = base.custom_creator
MethodContext = module.MethodContext
Module = module.Module
# pylint: enable=invalid-name
# TODO(slebedev): This make the module non-forkable.
Stack = data_structures.Stack[T]
del data_structures
ClassInfo = collections.namedtuple('ClassInfo', 'module,qualname')
ClassInfoOrType = Union[ClassInfo, type[hk.Module]]
def key_for_module(cls: type[hk.Module]) -> ClassInfoOrType:
"""Returns a suitable key for the given module class."""
if '<locals>' in cls.__qualname__:
# Some APIs (e.g. `hk.to_module`) are factory functions that create modules.
# It is not desirable for us to use the qualname in this case since that
# would associate all class instances created by the factory with a single
# policy. Instead we use the class object itself, with the assumption that
# these types are less likely to be created as a side effect of force
# reloading modules.
return cls
else:
return ClassInfo(cls.__module__, cls.__qualname__)
class _ThreadState(threading.local):
"""Holds per-thread state on mixed precision policies."""
def __init__(self):
super().__init__()
self._installed_interceptor = False
self._cls_policy: dict[ClassInfoOrType, jmp.Policy] = {}
self._current_policy = Stack[jmp.Policy]()
def push_current_policy(self, policy: jmp.Policy):
return self._current_policy(policy)
@property
def has_current_policy(self):
return bool(self._current_policy)
@property
def current_policy(self) -> jmp.Policy:
return self._current_policy.peek()
def clear_policy(self, cls: type[hk.Module]):
key = key_for_module(cls)
if key in self._cls_policy:
del self._cls_policy[key]
def set_policy(self, cls: type[hk.Module], policy: jmp.Policy):
if not self._installed_interceptor:
module.intercept_methods_global(_mixed_precision_interceptor)
self._installed_interceptor = True
key = key_for_module(cls)
self._cls_policy[key] = policy
def get_policy(self, cls: type[hk.Module]) -> Optional[jmp.Policy]:
key = key_for_module(cls)
return self._cls_policy.get(key)
_thread_local_state = _ThreadState()
def reset_thread_local_state_for_test():
global _thread_local_state
_thread_local_state = _ThreadState()
def current_policy() -> Optional[jmp.Policy]:
"""Retrieves the currently active policy in the current context.
Returns:
The currently active mixed precision policy, or ``None``.
See also:
- :func:`clear_policy`: Clears any policies associated with a class.
- :func:`get_policy`: Gets the policy for a given class.
- :func:`set_policy`: Sets a policy for a given class.
- :func:`push_policy`: Context manager for setting policies.
"""
tls = _thread_local_state
return tls.current_policy if tls.has_current_policy else None
def get_policy(cls: type[hk.Module]) -> Optional[jmp.Policy]:
"""Retrieves the currently active policy for the given class.
Note that policies applied explicitly to a top level class (e.g. ``ResNet``)
will be applied implicitly to all child modules (e.g. ``ConvND``) called from
the parent. This function only returns policies that have been applied
explicitly (e.g. via :func:`set_policy`).
Args:
cls: A Haiku module class.
Returns:
A JMP policy that is used for the given class, or ``None`` if one is not
active.
See also:
- :func:`current_policy`: Retrieves the currently active policy (if any).
- :func:`clear_policy`: Clears any policies associated with a class.
- :func:`set_policy`: Sets a policy for a given class.
- :func:`push_policy`: Context manager for setting policies.
"""
return _thread_local_state.get_policy(cls)
def set_policy(cls: type[hk.Module], policy: jmp.Policy):
"""Uses the given policy for all instances of the module class.
NOTE: Policies are only applied to modules created in the current thread.
A mixed precision policy describes how inputs, module parameters and module
outputs should be cast at runtime. By applying a policy to a given type of
module, you can control how all instances of that module behave in your
program.
For example, you might want to try running a ResNet50 model in a mixture of
``float16`` and ``float32`` on GPU to get higher throughput. To do so you can
apply a mixed precision policy to the ResNet50 type that will create
parameters in ``float32``, but cast them to ``float16`` before use, along with
all module inputs:
>>> policy = jmp.get_policy('params=float32,compute=float16,output=float32')
>>> hk.mixed_precision.set_policy(hk.nets.ResNet50, policy)
>>> net = hk.nets.ResNet50(4)
>>> x = jnp.ones([4, 224, 224, 3])
>>> print(net(x, is_training=True))
[[nan nan nan nan]
[nan nan nan nan]
[nan nan nan nan]
[nan nan nan nan]]
Oh no, nan! This is because modules like batch norm are not numerically stable
in ``float16``. To address this, we apply a second policy to our batch norm
modules to keep them in full precision. We are careful to return a ``float16``
output from the module such that subsequent modules receive ``float16`` input:
>>> policy = jmp.get_policy('params=float32,compute=float32,output=float16')
>>> hk.mixed_precision.set_policy(hk.BatchNorm, policy)
>>> print(net(x, is_training=True))
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
For a fully worked mixed precision example see the imagenet example in Haiku's
examples directory. This example shows mixed precision on GPU offering a 2x
speedup in training time with only a small impact on final top-1 accuracy.
>>> hk.mixed_precision.clear_policy(hk.nets.ResNet50)
>>> hk.mixed_precision.clear_policy(hk.BatchNorm)
Args:
cls: A Haiku module class.
policy: A JMP policy to apply to the module.
See also:
- :func:`push_policy`: Context manager for setting policies.
- :func:`current_policy`: Retrieves the currently active policy (if any).
- :func:`clear_policy`: Clears any policies associated with a class.
- :func:`get_policy`: Gets the policy for a given class.
"""
assert policy is not None, 'To unset policies use clear_policy.'
_thread_local_state.set_policy(cls, policy)
@contextlib.contextmanager
def push_policy(cls: type[hk.Module], policy: jmp.Policy):
"""Sets the given policy for the given class while the context is active.
Args:
cls: A Haiku module class.
policy: A JMP policy to apply to the module.
Yields:
``None``.
See also:
- :func:`clear_policy`: Clears any policies associated with a class.
- :func:`get_policy`: Gets the policy for a given class.
- :func:`set_policy`: Sets a policy for a given class.
- :func:`current_policy`: Retrieves the currently active policy (if any).
"""
assert policy is not None, 'To unset policies use clear_policy.'
# Check for trying to push a new policy inside a module method. In theory it
# is safe to do this when varying the parameter dtype, but we are defensive
# and ask users to set policies before calling module methods to avoid
# confusion.
current_module = base.inside_transform() and base.current_module()
if (current_module and
key_for_module(type(current_module)) == key_for_module(cls)):
raise ValueError(
'Pushing a policy inside a method on the same class is not supported.')
old_policy = get_policy(cls)
set_policy(cls, policy)
try:
yield
finally:
if old_policy is not None:
set_policy(cls, old_policy)
else:
clear_policy(cls)
def clear_policy(cls: type[hk.Module]):
"""Clears any policy assocated with the given class.
Args:
cls: A Haiku module class.
See also:
- :func:`current_policy`: Retrieves the currently active policy (if any).
- :func:`get_policy`: Gets the policy for a given class.
- :func:`set_policy`: Sets a policy for a given class.
- :func:`push_policy`: Context manager for setting policies.
"""
_thread_local_state.clear_policy(cls)
def _mixed_precision_creator(next_creator, shape, dtype, init, context):
del context
dtype = _thread_local_state.current_policy.param_dtype
return next_creator(shape, dtype, init)
def _mixed_precision_getter(next_getter, value, context):
del context
value = _thread_local_state.current_policy.cast_to_compute(value)
return next_getter(value)
def _mixed_precision_interceptor(next_f, args, kwargs,
context: hk.MethodContext):
"""Method interceptor used to apply mixed precision policies to classes."""
policy = get_policy(type(context.module))
if policy is None:
return next_f(*args, **kwargs)
ctx = contextlib.ExitStack()
with ctx:
if not _thread_local_state.has_current_policy:
ctx.enter_context(hk.custom_creator(_mixed_precision_creator))
ctx.enter_context(hk.custom_getter(_mixed_precision_getter, state=True))
ctx.enter_context(_thread_local_state.push_current_policy(policy))
args, kwargs = policy.cast_to_compute((args, kwargs))
out = next_f(*args, **kwargs)
return policy.cast_to_output(out)
|
dm-haiku-main
|
haiku/_src/mixed_precision.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to stack repeats of a layer function without shared parameters."""
import collections
import functools
import inspect
from typing import Any, Callable, Optional, Protocol, Union
from haiku._src import base
from haiku._src import lift
from haiku._src import module
from haiku._src import transform
import jax
import jax.numpy as jnp
class LayerStackStateError(Exception):
"""Raise if trying to use layer_stack with Haiku state."""
LayerStackCarry = collections.namedtuple("LayerStackCarry", ["x"])
LayerStackScanned = collections.namedtuple("LayerStackScanned",
["params", "rng", "args_ys"])
# WrappedFn should take in arbitrarily nested `jax.Array`, and return the
# exact same type. We cannot express this with `typing`. So we just use it
# to inform the user. In reality, the typing below will accept anything.
NestedArray = Any
WrappedFn = Callable[..., Union[NestedArray, tuple[NestedArray]]]
def _check_no_varargs(f):
if list(inspect.signature(
f).parameters.values())[0].kind == inspect.Parameter.VAR_POSITIONAL:
raise ValueError(
"The function `f` should not have any `varargs` (that is *args) "
"argument. Instead, it should only use explicit positional"
"arguments")
def _get_rng_stack(count: int) -> Optional[jax.Array]:
rng = base.maybe_next_rng_key()
if rng is None:
return None
return jax.random.split(rng, count)
class LayerStackTransparencyMapping(Protocol):
"""Module name mapping for transparent layer_stack."""
def stacked_to_flat(self, stacked_module_name: str, scan_idx: int) -> str:
"""Creates flat module name from stacked name and index during scan."""
...
def flat_to_stacked(
self, unstacked_module_name: str
) -> Optional[tuple[str, int]]:
"""Creates stacked module name and scan index from flat name.
Returns None when the module is not a part of layer_stack. This happens
when the caller module transparently calling layer_stack has its own
parameters. This function is basically inverse of `stacked_to_flat`,
Args:
unstacked_module_name: Name of the module to be converted to stacked.
Returns:
Name and layer index of the module when stacked. None if the module is
not part of the stack.
"""
...
def _split_params(
stacked_params: base.Params,
num_layers: int,
name_map: LayerStackTransparencyMapping,
) -> base.Params:
"""Splits the stacked parameters."""
def _split(x):
return [jnp.squeeze(s, axis=0) for s in jnp.split(x, x.shape[0], axis=0)]
params = {}
for mod_name, mod_params in stacked_params.items():
split_mod_params = {k: _split(v) for k, v in mod_params.items()}
for i in range(num_layers):
new_mod_name = name_map.stacked_to_flat(mod_name, i)
if new_mod_name in params:
raise ValueError(
f"Found conflicting unstacked module name for {mod_name} at"
f" {new_mod_name}."
)
params[new_mod_name] = {k: v[i] for k, v in split_mod_params.items()}
return params
def _stack_params(
split_params: base.Params,
num_layers: int,
name_map: LayerStackTransparencyMapping,
) -> base.Params:
"""Stacks the split parameters."""
params = {}
make_empty_param_stack = lambda: ([None] * num_layers)
for mod_name, mod_params in split_params.items():
stacked_name_idx = name_map.flat_to_stacked(mod_name)
if stacked_name_idx is None:
continue
stacked_mod_name, idx = stacked_name_idx
if stacked_mod_name not in params:
params[stacked_mod_name] = collections.defaultdict(make_empty_param_stack)
for k, v in mod_params.items():
if params[stacked_mod_name][k][idx] is not None:
raise ValueError(
f"Found conflicting values for param {stacked_mod_name}/{k} at"
f" index {idx}."
)
params[stacked_mod_name][k][idx] = v
for mod_name, mod_params in params.items():
for k, v in mod_params.items():
if None in v:
raise ValueError(f"Couldn't find all params for {mod_name}/{k}: {v}")
mod_params[k] = jnp.stack(v, axis=0)
return params
class _LayerStack:
"""Module to compose parameterized functions, implemented as a scan."""
def __init__(
self,
count: int,
unroll: int,
pass_reverse_to_layer_fn: bool = False,
transparency_map: Optional[LayerStackTransparencyMapping] = None,
name: str = "",
):
"""Iterate f count times, with non-shared parameters."""
self._name = name
self._count = count
self._unroll = unroll
self._pass_reverse_to_layer_fn = pass_reverse_to_layer_fn
self._transparency_map = transparency_map
def __call__(self, x, *args_ys, reverse=False):
count = self._count
init_fn, apply_fn = transform.transform(self._call_wrapped)
def per_layer_init_fn(c, a):
c, rng = c
if rng is not None:
rng, next_rng, apply_rng = jax.random.split(rng, 3)
else:
rng, next_rng, apply_rng = None, None, None
params = init_fn(rng, c, *a)
c, _ = apply_fn(params, apply_rng, c, *a)
return (c, next_rng), params
def scanned_init_fn(x, rng):
_, params = jax.lax.scan(per_layer_init_fn, (x, rng), args_ys,
length=self._count)
if self._transparency_map is not None:
return _split_params(params, self._count, self._transparency_map)
else:
return params
rng = base.maybe_next_rng_key()
try:
if self._transparency_map is not None:
lifted_init_fn = lift.transparent_lift(
scanned_init_fn, allow_reuse=True
)
else:
lifted_init_fn = lift.lift(
scanned_init_fn, allow_reuse=True, name=self._name
)
params = lifted_init_fn(x, rng)
except base.NonEmptyStateError as e:
raise LayerStackStateError("LayerStack can only be used on Haiku "
"functions which do not make use of Haiku "
"state.") from e
# Use scan during apply, threading through random seed so that it's
# unique for each layer.
def layer(
carry: LayerStackCarry, scanned: LayerStackScanned
) -> tuple[LayerStackCarry, Any]:
rng = scanned.rng
params = scanned.params
kwargs = {}
if self._pass_reverse_to_layer_fn:
kwargs["reverse"] = reverse
out_x, z = apply_fn(params, rng, carry.x, *scanned.args_ys, **kwargs)
return LayerStackCarry(x=out_x), z
rng = _get_rng_stack(count)
if self._transparency_map is not None:
params = _stack_params(params, self._count, self._transparency_map)
carry = LayerStackCarry(x=x)
scanned = LayerStackScanned(params=params,
rng=rng,
args_ys=args_ys)
carry, zs = jax.lax.scan(
layer, carry, scanned, length=count, unroll=self._unroll,
reverse=reverse)
return carry.x, zs
def _call_wrapped(
self,
x: jax.Array,
*args,
) -> tuple[jax.Array, Optional[jax.Array]]:
raise NotImplementedError()
class _LayerStackNoPerLayer(_LayerStack):
"""_LayerStack impl with no per-layer inputs provided to the function."""
def __init__(
self,
f: WrappedFn,
count: int,
unroll: int,
pass_reverse_to_layer_fn: bool = False,
transparency_map: Optional[LayerStackTransparencyMapping] = None,
name: str = "",
):
super().__init__(
count=count,
unroll=unroll,
pass_reverse_to_layer_fn=pass_reverse_to_layer_fn,
transparency_map=transparency_map,
name=name,
)
_check_no_varargs(f)
self._f = f
@module.transparent
def _call_wrapped(self, x, **kwargs):
ret = self._f(*x, **kwargs)
if len(x) == 1:
# If the function takes a single argument, the wrapped function receives
# a tuple of length 1, and therefore it must return a tuple of length 1.
ret = (ret,)
return ret, None
class _LayerStackWithPerLayer(_LayerStack):
"""_LayerStack impl with per-layer inputs provided to the function."""
def __init__(
self,
f: WrappedFn,
count: int,
unroll: int,
pass_reverse_to_layer_fn: bool = False,
transparency_map: Optional[LayerStackTransparencyMapping] = None,
name: str = "",
):
super().__init__(
count=count,
unroll=unroll,
pass_reverse_to_layer_fn=pass_reverse_to_layer_fn,
transparency_map=transparency_map,
name=name,
)
self._f = f
@module.transparent
def _call_wrapped(self, x, *args, **kwargs):
return self._f(x, *args, **kwargs)
def layer_stack(
num_layers: int,
with_per_layer_inputs=False,
unroll: int = 1,
pass_reverse_to_layer_fn: bool = False,
transparent: bool = False,
transparency_map: Optional[LayerStackTransparencyMapping] = None,
name: Optional[str] = None,
):
"""Utility to wrap a Haiku function and recursively apply it to an input.
This can be used to improve model compile times.
A function is valid if it uses only explicit position parameters, and
its return type matches its input type. The position parameters can be
arbitrarily nested structures with ``jax.Array`` at the leaf nodes. Note
that kwargs are not supported, neither are functions with variable number
of parameters (specified by ``*args``).
Note that `layer_stack` cannot at the moment be used with functions that build
Haiku modules with state.
If ``with_per_layer_inputs=False`` then the new, wrapped function can be
understood as performing the following:
>>> f = lambda x: x+1
>>> num_layers = 4
>>> x = 0
>>> for i in range(num_layers):
... x = f(x)
>>> x
4
And if ``with_per_layer_inputs=True``, assuming ``f`` takes two arguments on
top of ``x``:
>>> f = lambda x, y0, y1: (x+1, y0+y1)
>>> num_layers = 4
>>> x = 0
>>> ys_0 = [1, 2, 3, 4]
>>> ys_1 = [5, 6, 7, 8]
>>> zs = []
>>> for i in range(num_layers):
... x, z = f(x, ys_0[i], ys_1[i])
... zs.append(z)
>>> x, zs
(4, [6, 8, 10, 12])
The code using ``layer_stack`` for the above function would be:
>>> f = lambda x, y0, y1: (x+1, y0+y1)
>>> num_layers = 4
>>> x = 0
>>> ys_0 = jnp.array([1, 2, 3, 4])
>>> ys_1 = jnp.array([5, 6, 7, 8])
>>> stack = hk.layer_stack(num_layers, with_per_layer_inputs=True)
>>> x, zs = stack(f)(x, ys_0, ys_1)
>>> print(x, zs)
4 [ 6 8 10 12]
Check the tests in ``layer_stack_test.py`` for further examples.
Crucially, any parameters created inside ``f`` will not be shared across
iterations.
Args:
num_layers: The number of times to iterate the wrapped function.
with_per_layer_inputs: Whether or not to pass per-layer inputs to the
wrapped function.
unroll: the unroll used by ``scan``.
pass_reverse_to_layer_fn: Whether or not to pass the ``reverse`` keyword to
the function ``f``, so that it is aware if the layer stack is being run
forward or in reverse (and the underlying ``scan``). To run the layer
stack in reverse you need to pass in ``reverse=True`` to the call to the
layer stack.
transparent: Whether to apply layer_stack transparently. When this is True,
and a correct transparency_map is provided, the parameters are generated
in such a way that layer_stack can be replaced by a regular for loop
without changing the parameter tree.
transparency_map: How to map stacked module names to flat names and reverse.
See ``LayerStackTransparencyMapping`` and ``layer_stack_test.py`` for an
example.
name: name of the Haiku context.
Returns:
Callable that will produce a layer stack when called with a valid function.
"""
if transparent and transparency_map is None:
raise ValueError("transparency_map must be provided with transparent=True.")
if not name:
if with_per_layer_inputs:
name = "__layer_stack_with_per_layer"
else:
name = "__layer_stack_no_per_layer"
def iterate(f):
if with_per_layer_inputs:
@functools.wraps(f)
def wrapped(x, *args, **kwargs):
for ys in jax.tree_util.tree_leaves(args):
assert ys.shape[0] == num_layers, f"{ys.shape[0]} != {num_layers}"
mod = _LayerStackWithPerLayer(
f,
num_layers,
unroll=unroll,
pass_reverse_to_layer_fn=pass_reverse_to_layer_fn,
transparency_map=transparency_map,
name=name,
)
return mod(x, *args, **kwargs)
else:
_check_no_varargs(f)
@functools.wraps(f)
def wrapped(*args, **kwargs):
mod = _LayerStackNoPerLayer(
f,
num_layers,
unroll=unroll,
pass_reverse_to_layer_fn=pass_reverse_to_layer_fn,
transparency_map=transparency_map,
name=name,
)
ret = mod(x=args, **kwargs)[0]
if len(args) == 1:
# If the function takes a single argument, we must also return a
# single value, and not a tuple of length 1.
ret = ret[0]
return ret
return wrapped
return iterate
|
dm-haiku-main
|
haiku/_src/layer_stack.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modules for performing embedding lookups in Haiku."""
from collections.abc import Sequence
import enum
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
import jax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
Module = module.Module
initializers = initializers
# pylint: enable=invalid-name
del base, module, initializers
class EmbedLookupStyle(enum.Enum):
"""How to return the embedding matrices given IDs."""
# Looks up embeddings using a gather `embeddings[ids]`. This is significantly
# faster on GPU, and faster on TPU for large values of `vocab_size` (> 1k at
# HIGHEST precision, > 2-3k at DEFAULT precision).
ARRAY_INDEX = 1
# Looks up embeddings using `dot(embeddings, one_hot(ids))`. This is usually
# faster on TPU for smaller values of `vocab_size` (< 1k at HIGHEST precision,
# < 2-3k at DEFAULT precision).
ONE_HOT = 2
# Needed for members to show in sphinx docs.
for style in EmbedLookupStyle:
style.__doc__ = style.name
hk.EmbedLookupStyle = EmbedLookupStyle
class Embed(hk.Module):
"""Module for embedding tokens in a low-dimensional space."""
def __init__(
self,
vocab_size: Optional[int] = None,
embed_dim: Optional[int] = None,
embedding_matrix: Optional[Union[np.ndarray, jax.Array]] = None,
w_init: Optional[hk.initializers.Initializer] = None,
lookup_style: Union[str, EmbedLookupStyle] = "ARRAY_INDEX",
name: Optional[str] = None,
precision: jax.lax.Precision = jax.lax.Precision.HIGHEST,
):
"""Constructs an Embed module.
Args:
vocab_size: The number of unique tokens to embed. If not provided, an
existing vocabulary matrix from which ``vocab_size`` can be inferred
must be provided as ``embedding_matrix``.
embed_dim: Number of dimensions to assign to each embedding. If an
existing vocabulary matrix initializes the module, this should not be
provided as it will be inferred.
embedding_matrix: A matrix-like object equivalent in size to
``[vocab_size, embed_dim]``. If given, it is used as the initial value
for the embedding matrix and neither ``vocab_size`` or ``embed_dim``
need be given. If they are given, their values are checked to be
consistent with the dimensions of ``embedding_matrix``.
w_init: An initializer for the embeddings matrix. As a default, embeddings
are initialized via a truncated normal distribution.
lookup_style: One of the enum values of :class:`EmbedLookupStyle`
determining how to access the value of the embeddings given an ID.
Regardless the input should be a dense array of integer values
representing ids. This setting changes how internally this module maps
those ids to embeddings. The result is the same, but the speed and
memory tradeoffs are different. It defaults to using NumPy-style array
indexing. This value is only the default for the module, and at any
given invocation can be overridden in :meth:`__call__`.
name: Optional name for this module.
precision: Only used when lookup_style is ONE_HOT. The precision to use
for the dot-product between the one-hot-encoded inputs and the embedding
vectors. It is possible to attain a ~2x speedup on TPU using
`jax.lax.Precision.DEFAULT` at the cost of a slightly lower precision.
Raises:
ValueError: If none of ``embed_dim``, ``embedding_matrix`` and
``vocab_size`` are supplied, or if ``embedding_matrix`` is supplied
and ``embed_dim`` or ``vocab_size`` is not consistent with the
supplied matrix.
"""
super().__init__(name=name)
if embedding_matrix is None and not (vocab_size and embed_dim):
raise ValueError(
"hk.Embed must be supplied either with an initial `embedding_matrix` "
"or with `embed_dim` and `vocab_size`.")
if embedding_matrix is not None:
embedding_matrix = jnp.asarray(embedding_matrix)
if vocab_size and embedding_matrix.shape[0] != vocab_size:
raise ValueError(
"An `embedding_matrix` was supplied but the `vocab_size` of "
f"{vocab_size} was not consistent with its shape "
f"{embedding_matrix.shape}.")
if embed_dim and embedding_matrix.shape[1] != embed_dim:
raise ValueError(
"An `embedding_matrix` was supplied but the `embed_dim` of "
f"{embed_dim} was not consistent with its shape "
f"{embedding_matrix.shape}.")
w_init = lambda _, __: embedding_matrix
vocab_size = embedding_matrix.shape[0]
embed_dim = embedding_matrix.shape[1]
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.lookup_style = lookup_style
self.precision = precision
self.w_init = w_init or hk.initializers.TruncatedNormal()
@property
def embeddings(self):
return hk.get_parameter("embeddings", [self.vocab_size, self.embed_dim],
init=self.w_init)
def __call__(
self,
ids: Union[jax.Array, Sequence[int]],
lookup_style: Optional[Union[str, hk.EmbedLookupStyle]] = None,
precision: Optional[jax.lax.Precision] = None,
) -> jax.Array:
r"""Lookup embeddings.
Looks up an embedding vector for each value in ``ids``. All ids must be
within ``[0, vocab_size)`` to prevent ``NaN``\ s from propagating.
Args:
ids: integer array.
lookup_style: Overrides the ``lookup_style`` given in the constructor.
precision: Overrides the ``precision`` given in the constructor.
Returns:
Tensor of ``ids.shape + [embedding_dim]``.
Raises:
AttributeError: If ``lookup_style`` is not valid.
ValueError: If ``ids`` is not an integer array.
"""
# TODO(tomhennigan) Consider removing asarray here.
ids = jnp.asarray(ids)
if not jnp.issubdtype(ids.dtype, jnp.integer):
raise ValueError("hk.Embed's __call__ method must take an array of "
"integer dtype but was called with an array of "
f"{ids.dtype}")
lookup_style = lookup_style or self.lookup_style
if isinstance(lookup_style, str):
lookup_style = getattr(hk.EmbedLookupStyle, lookup_style.upper())
if lookup_style == hk.EmbedLookupStyle.ARRAY_INDEX:
# If you don't wrap ids in a singleton tuple then JAX will try to unpack
# it along the row dimension and treat each row as a separate index into
# one of the dimensions of the array. The error only surfaces when
# indexing with DeviceArray, while indexing with numpy.ndarray works fine.
# See https://github.com/google/jax/issues/620 for more details.
# Cast to a jnp array in case `ids` is a tracer (eg un a dynamic_unroll).
return jnp.asarray(self.embeddings)[(ids,)]
elif lookup_style == hk.EmbedLookupStyle.ONE_HOT:
one_hot_ids = jax.nn.one_hot(ids, self.vocab_size)
precision = self.precision if precision is None else precision
return jnp.dot(one_hot_ids, self.embeddings, precision=precision)
else:
raise NotImplementedError(f"{lookup_style} is not supported by hk.Embed.")
|
dm-haiku-main
|
haiku/_src/embed.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.depthwise_conv."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import depthwise_conv
from haiku._src import initializers
from haiku._src import transform
from jax import random
import jax.numpy as jnp
import numpy as np
def create_constant_initializers(w, b, with_bias):
if with_bias:
return {
"w_init": initializers.Constant(w),
"b_init": initializers.Constant(b)
}
else:
return {"w_init": initializers.Constant(w)}
class DepthwiseConvNDTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_convolution_1d(self, with_bias):
def f():
data = np.ones([1, 10, 3])
data[0, :, 1] += 1
data[0, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv1D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 3))
self.assertLen(np.unique(out[0, :, 0]), 1)
self.assertLen(np.unique(out[0, :, 1]), 1)
self.assertLen(np.unique(out[0, :, 2]), 1)
if with_bias:
self.assertEqual(np.unique(out[0, :, 0])[0], 1 * 3.0 + 1)
self.assertEqual(np.unique(out[0, :, 1])[0], 2 * 3.0 + 1)
self.assertEqual(np.unique(out[0, :, 2])[0], 3 * 3.0 + 1)
else:
self.assertEqual(np.unique(out[0, :, 0])[0], 1 * 3.0)
self.assertEqual(np.unique(out[0, :, 1])[0], 2 * 3.0)
self.assertEqual(np.unique(out[0, :, 2])[0], 3 * 3.0)
@parameterized.parameters(True, False)
def test_convolution_3d(self, with_bias):
def f():
data = np.ones([1, 10, 10, 10, 3])
data[0, :, :, :, 1] += 1
data[0, :, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv3D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 8, 8, 3))
self.assertLen(np.unique(out[0, :, :, :, 0]), 1)
self.assertLen(np.unique(out[0, :, :, :, 1]), 1)
self.assertLen(np.unique(out[0, :, :, :, 2]), 1)
if with_bias:
self.assertEqual(np.unique(out[0, :, :, :, 0])[0], 1 * 3.0**3 + 1)
self.assertEqual(np.unique(out[0, :, :, :, 1])[0], 2 * 3.0**3 + 1)
self.assertEqual(np.unique(out[0, :, :, :, 2])[0], 3 * 3.0**3 + 1)
else:
self.assertEqual(np.unique(out[0, :, :, :, 0])[0], 1 * 3.0**3)
self.assertEqual(np.unique(out[0, :, :, :, 1])[0], 2 * 3.0**3)
self.assertEqual(np.unique(out[0, :, :, :, 2])[0], 3 * 3.0**3)
class DepthwiseConv2DTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_convolution(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 8, 3))
self.assertLen(np.unique(out[0, :, :, 0]), 1)
self.assertLen(np.unique(out[0, :, :, 1]), 1)
self.assertLen(np.unique(out[0, :, :, 2]), 1)
if with_bias:
self.assertEqual(np.unique(out[0, :, :, 0])[0], 1*3.0*3.0 +1)
self.assertEqual(np.unique(out[0, :, :, 1])[0], 2*3.0*3.0 +1)
self.assertEqual(np.unique(out[0, :, :, 2])[0], 3*3.0*3.0 +1)
else:
self.assertEqual(np.unique(out[0, :, :, 0])[0], 1*3.0*3.0)
self.assertEqual(np.unique(out[0, :, :, 1])[0], 2*3.0*3.0)
self.assertEqual(np.unique(out[0, :, :, 2])[0], 3*3.0*3.0)
@parameterized.parameters(True, False)
def test_padding(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 10, 10, 3))
@parameterized.parameters(True, False)
def test_channel_multiplier(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv2D(
channel_multiplier=3,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 8, 9))
@parameterized.parameters(True, False)
def test_channels_first(self, with_bias):
def f():
data = np.ones([1, 3, 10, 10])
data[0, 1, :, :] += 1
data[0, 2, :, :] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_first",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 8, 8))
if with_bias:
self.assertEqual(np.unique(out[0, 0, :, :])[0], 1*3.0*3.0+1)
self.assertEqual(np.unique(out[0, 1, :, :])[0], 2*3.0*3.0+1)
self.assertEqual(np.unique(out[0, 2, :, :])[0], 3*3.0*3.0+1)
else:
self.assertEqual(np.unique(out[0, 0, :, :])[0], 1*3.0*3.0)
self.assertEqual(np.unique(out[0, 1, :, :])[0], 2*3.0*3.0)
self.assertEqual(np.unique(out[0, 2, :, :])[0], 3*3.0*3.0)
@parameterized.parameters(True, False)
def test_channels_first_mult(self, with_bias):
def f():
data = np.ones([1, 3, 10, 10])
data[0, 1, :, :] += 1
data[0, 2, :, :] += 2
data = jnp.array(data)
net = depthwise_conv.DepthwiseConv2D(
channel_multiplier=9,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_first",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 27, 8, 8))
class SeparableDepthwiseConv2DTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_convolution(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.SeparableDepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 8, 3))
self.assertLen(np.unique(out[0, :, :, 0]), 1)
self.assertLen(np.unique(out[0, :, :, 1]), 1)
self.assertLen(np.unique(out[0, :, :, 2]), 1)
if with_bias:
self.assertEqual(np.unique(out[0, :, :, 0])[0], 1*3.0*3.0 +1)
self.assertEqual(np.unique(out[0, :, :, 1])[0], 2*3.0*3.0 +1)
self.assertEqual(np.unique(out[0, :, :, 2])[0], 3*3.0*3.0 +1)
else:
self.assertEqual(np.unique(out[0, :, :, 0])[0], 1*3.0*3.0)
self.assertEqual(np.unique(out[0, :, :, 1])[0], 2*3.0*3.0)
self.assertEqual(np.unique(out[0, :, :, 2])[0], 3*3.0*3.0)
@parameterized.parameters(True, False)
def test_padding(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.SeparableDepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 10, 10, 3))
@parameterized.parameters(True, False)
def test_channel_multiplier(self, with_bias):
def f():
data = np.ones([1, 10, 10, 3])
data[0, :, :, 1] += 1
data[0, :, :, 2] += 2
data = jnp.array(data)
net = depthwise_conv.SeparableDepthwiseConv2D(
channel_multiplier=3,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_last",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 8, 8, 9))
@parameterized.parameters(True, False)
def test_channels_first(self, with_bias):
def f():
data = np.ones([1, 3, 10, 10])
data[0, 1, :, :] += 1
data[0, 2, :, :] += 2
data = jnp.array(data)
net = depthwise_conv.SeparableDepthwiseConv2D(
channel_multiplier=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_first",
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 8, 8))
if with_bias:
self.assertEqual(np.unique(out[0, 0, :, :])[0], 1*3.0*3.0+1)
self.assertEqual(np.unique(out[0, 1, :, :])[0], 2*3.0*3.0+1)
self.assertEqual(np.unique(out[0, 2, :, :])[0], 3*3.0*3.0+1)
else:
self.assertEqual(np.unique(out[0, 0, :, :])[0], 1*3.0*3.0)
self.assertEqual(np.unique(out[0, 1, :, :])[0], 2*3.0*3.0)
self.assertEqual(np.unique(out[0, 2, :, :])[0], 3*3.0*3.0)
@parameterized.parameters(True, False)
def test_channels_first_mult(self, with_bias):
def f():
data = np.ones([1, 3, 10, 10])
data[0, 1, :, :] += 1
data[0, 2, :, :] += 2
data = jnp.array(data)
net = depthwise_conv.SeparableDepthwiseConv2D(
channel_multiplier=9,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
data_format="channels_first",
**create_constant_initializers(1.0, 0.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 27, 8, 8))
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/depthwise_conv_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lifting parameters in Haiku."""
from collections.abc import Mapping, MutableMapping
import functools
from typing import Any, Callable, Optional, TypeVar
from haiku._src import base
from haiku._src import data_structures
from haiku._src import module
from haiku._src import transform
from haiku._src.typing import LiftingModuleType
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Params = base.Params
State = base.State
Module = module.Module
running_init = transform.running_init
data_structures = data_structures
# pylint: enable=invalid-name
del module, data_structures, transform
T = TypeVar("T")
MutableBundle = MutableMapping[str, MutableMapping[str, Any]]
def pack_into_dict(src: hk.Params,
dst: MutableMapping[str, Any],
prefix: str,
state: bool = False,
check_param_reuse: bool = True):
"""Puts items from src into dst, with an added prefix."""
for key, value in src.items():
new_key = f"{prefix}/{key}" if prefix else key
if check_param_reuse and new_key in dst:
raise ValueError(
f"Key '{new_key}' already exists in the destination params. To "
"prevent accidental parameter re-use during lift, you can't re-use a "
"parameter already defined in the outer scope.")
value = dict(value)
if state:
value = {k: base.StatePair(v, v) for k, v in value.items()}
dst[new_key] = value
def unpack_from_dict(src: hk.Params, prefix: str) -> MutableBundle:
"""Returns pairs from src where key begins with prefix, cutting off prefix."""
if not prefix:
return {k: dict(v) for k, v in src.items()}
l = len(prefix)
return {k[l:]: dict(v) for k, v in src.items() if k.startswith(prefix)}
def add_state_to_init_fn(
init_fn: Callable[..., hk.Params],
) -> Callable[..., tuple[hk.Params, hk.State]]:
def wrapped_init_fn(*a, **k):
params = init_fn(*a, **k)
if not isinstance(params, Mapping):
raise base.NonEmptyStateError("For stateful lifted functions use "
"`hk.lift_with_state`")
return params, {}
return wrapped_init_fn
# TODO(tycai): Make sure transformed functions have better names.
class LiftingModule(hk.Module, LiftingModuleType):
"""See :func:`lift` or :func:`lift_with_state`."""
def __init__(self,
init_fn,
transparent=False,
allow_reuse=False,
name="lifted"):
super().__init__(name=name)
self._init_fn = init_fn
if transparent:
self.prefix_name = "/".join(self.module_name.split("/")[:-1])
else:
self.prefix_name = self.module_name
self._allow_reuse = allow_reuse
def __call__(self, *args, **kwargs):
frame = base.current_frame()
outer_params = frame.params
outer_state = frame.state
if hk.running_init():
inner_params, inner_state = self._init_fn(*args, **kwargs)
# Lift parameters into this transform's params_dict.
check_param_reuse = not self._allow_reuse
pack_into_dict(
inner_params,
outer_params,
self.prefix_name,
check_param_reuse=check_param_reuse)
pack_into_dict(
inner_state,
outer_state,
self.prefix_name,
state=True,
check_param_reuse=check_param_reuse)
return inner_params, inner_state
else:
if self.prefix_name:
prefix = f"{self.prefix_name}/"
else:
prefix = ""
inner_params = unpack_from_dict(outer_params, prefix)
inner_state = unpack_from_dict(outer_state, prefix)
inner_state = base.extract_state(inner_state, initial=False)
inner_params = hk.data_structures.to_haiku_dict(inner_params)
return inner_params, inner_state
def lift(
init_fn: Callable[..., hk.Params],
*,
allow_reuse: bool = False,
name: str = "lifted",
) -> Callable[..., hk.Params]:
r"""Registers parameters from an inner init function in an outer transform.
HINT: :func:`lift` is for when you want to make non-trivial use of JAX
transforms (e.g. ``jax.vmap``) **inside** of a :func:`transform` or
:func:`transform_with_state`. We generally recommend trying to use JAX
transforms on the pure functions returned by :func:`transform`, in which case
you do not need :func:`lift`.
Use :func:`lift`\ when nesting Haiku transforms to register the parameters of
the inner transform in any outer transform. This is mainly useful when using
JAX functions inside of a Haiku module (eg. using ``jax.vmap`` on a layer).
See
https://dm-haiku.readthedocs.io/en/latest/notebooks/transforms.html#Using-hk.lift
for more explanation of when to use :func:`lift`\ . (If you're not using JAX
functions inside of a module or don't need access to your parameters inside of
a transform, you probably don't need to use :func:`lift`\ )
Must be called inside :func:`transform`\ , and be passed the ``init``
member of a :class:`Transformed`\ .
During init, the returned callable will run the given ``init_fn``, and include
the resulting params in the outer transform's dictionaries.
During ``apply``, the returned callable will instead pull the relevant
parameters from the outer transform's dictionaries.
By default, users must ensure that the given ``init`` does not accidentally
catch modules from an outer :func:`transform` via functional
closure. If this behavior is desirable, set ``allow_reuse`` to ``True``.
Example:
A common usage of :func:`lift` is to use JAX transformations like ``vmap`` in
non-trivial ways, inside a :func:`transform`. For example, we can use
:func:`lift` and ``jax.vmap`` to create an ensemble.
First we'll create a helper function that uses :func:`lift` to apply ``vmap``
to our model. As you can see from the comments, we are using ``vmap`` to
change how parameters should be created (in this case we create a unique set
of parameters for each member of the ensemble) and we change how apply works
(we "map" the parameters meaning JAX will compute the forward pass separately,
in parallel, for each member of the ensemble):
>>> def create_ensemble(model, size: int):
... init_rng = hk.next_rng_keys(size) if hk.running_init() else None
... model = hk.transform(model)
... # in_axes: rng is mapped, data is not.
... init_model = jax.vmap(model.init, in_axes=(0, None))
... # Use hk.lift to "lift" parameters created by `init_model` into the
... # outer transform.
... init_model = hk.lift(init_model, name="ensemble")
... def ensemble(x):
... params = init_model(init_rng, x)
... # in_axes: params are mapped, rng/data are not.
... return jax.vmap(model.apply, in_axes=(0, None, None))(params, None, x)
... return ensemble
We can now use this function to ensemble any Haiku module(s), inside of a
transform. First we define a function for each member of the ensemble:
>>> def member_fn(x):
... return hk.nets.MLP([300, 100, 10])(x)
Secondly we can combine our two functions, inside a :func:`transform` to
create an ensemble:
>>> def f(x):
... ensemble = create_ensemble(member_fn, size=4)
... x = ensemble(x)
... # You could create other modules here which were not ensembled.
... return x
>>> f = hk.transform(f)
When we initialize the network, our ensemble member's parameters have a
leading dimension the size of the ensemble:
>>> rng = jax.random.PRNGKey(777)
>>> x = jnp.ones([32, 128])
>>> params = f.init(rng, x)
>>> jax.tree_util.tree_map(lambda x: x.shape, params)
{'ensemble/mlp/~/linear_0': {'b': (4, 300), 'w': (4, 128, 300)},
'ensemble/mlp/~/linear_1': {'b': (4, 100), 'w': (4, 300, 100)},
'ensemble/mlp/~/linear_2': {'b': (4, 10), 'w': (4, 100, 10)}}
When we apply the network, we get an output for each member of the ensemble
for the entire batch:
>>> y = f.apply(params, None, x)
>>> y.shape
(4, 32, 10)
Args:
init_fn: The ``init`` function from an :class:`Transformed`\ .
allow_reuse: Allows lifted parameters and state to be reused from the
outer :func:`transform`. This can be desirable when using ``lift`` within
control flow (e.g. ``hk.scan``).
name: A string name to prefix parameters with.
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` retrieves parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
See also:
- :func:`lift_with_state`: Register params and state with an outer
transform.
- :func:`transparent_lift`: Register params with an outer transform
without a namespace.
- :func:`transparent_lift_with_state`: Register params and state with
an outer transform without a namespace.
"""
base.assert_context("lift")
init_fn = add_state_to_init_fn(init_fn)
params_and_state_fn, updater = lift_with_state(
init_fn, allow_reuse=allow_reuse, name=name)
updater.ignore_update()
return lambda *a, **k: params_and_state_fn(*a, **k)[0]
def transparent_lift(
init_fn: Callable[..., hk.Params],
*,
allow_reuse: bool = False) -> Callable[..., hk.Params]:
r"""Registers parameters in an outer transform without adding a name scope.
Functionally this is equivalent to :func:`lift`\ but without automatically
adding an additional variable scoping. Note that closing over a module from
an outer scope is disallowed.
See :func:`lift`\ for more context on when to use ``lift``.
Args:
init_fn: The ``init`` function from an :class:`Transformed`\ .
allow_reuse: Allows lifted parameters to be reused from the outer
:func:`transform_with_state`\ . This can be desirable when e.g. within
control flow (e.g. ``hk.scan``).
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` reuses parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
See also:
- :func:`lift`: Register params with an outer transform.
- :func:`lift_with_state`: Register params and state with an outer
transform.
- :func:`transparent_lift_with_state`: Register params and state with an
outer transform without a namespace.
"""
base.assert_context("transparent_lift")
init_fn = add_state_to_init_fn(init_fn)
lifted = LiftingModule(
init_fn, transparent=True, allow_reuse=allow_reuse)
def fn(*a, **k):
with base.closure_boundary_stack(base.current_frame().frame_id+1):
return lifted(*a, **k)[0]
return fn
def with_assert_not_used(f):
"""Validates that an updater method is called correctly."""
# NOTE defined outside LiftWithStateUpdater to avoid adding this to the public
# API and letting users call directly.
@functools.wraps(f)
def wrapper(self, *a, **k):
if self._used: # pylint: disable=protected-access
raise ValueError("State updater must only be used once.")
if not base.inside_transform():
raise ValueError(
"State updater must be used inside hk.transform_with_state.")
if self._context_id != id(base.current_context()): # pylint: disable=protected-access
raise ValueError(
"State updater must be used within the same call to init/apply.")
self._used = True # pylint: disable=protected-access
return f(self, *a, **k)
return wrapper
class LiftWithStateUpdater:
"""Handles updating the state for a `lift_with_state` computation."""
__slots__ = ("_used", "_name", "_context_id")
def __init__(self, name: Optional[str]):
self._used = False
self._name = name
ctx = base.current_context()
# Note: using ID is safe here because we know the lifetime of the context
# instance will outlive the updater thanks to the callback.
self._context_id = id(ctx)
ctx.add_teardown_callback(self.assert_used)
def assert_used(self):
if not self._used:
raise ValueError("LiftWithStateUpdater (from `lift_with_state`) must be "
"used, call `.update(..)` or `.ignore_update()` before "
"it goes out of scope.")
@with_assert_not_used
def ignore_update(self):
"""Notifies the updater that state does not need to be updated."""
pass
@with_assert_not_used
def update(self, state: hk.State):
"""Updates Haiku's internal state to the given state."""
frame = base.current_frame()
for mod_name, bundle in state.items():
if self._name is not None:
mod_name = f"{self._name}/{mod_name}"
for name, value in bundle.items():
initial_pair = base.StatePair(value, value)
initial = frame.state[mod_name].get(name, initial_pair).initial
frame.state[mod_name][name] = base.StatePair(initial, value)
def _to_callable(f: Callable[..., T]) -> Callable[..., T]:
"""Enapsulates the given callable inside a lambda."""
# Prevents us from leaking methods other than __call__ on `f`.
return lambda *a, **k: f(*a, **k) # pylint: disable=unnecessary-lambda
def lift_with_state(
init_fn: Callable[..., tuple[hk.Params, hk.State]],
*,
allow_reuse: bool = False,
name: str = "lifted",
) -> tuple[Callable[..., tuple[hk.Params, hk.State]], LiftWithStateUpdater]:
r"""Registers params and state from an init function in an outer transform.
See :func:`lift`\ for more context on when to use ``lift``.
This function returns two objects. The first is a callable that runs your init
function with slightly different behaviour based on if it's run during init
vs. apply time. The second is an updater that can be used to pass updated
state values that result from running your apply function. See later in the
docs for a worked example.
During init, the returned callable will run the given ``init_fn``, and include
the resulting params/state in the outer transform's dictionaries. During
``apply``, the returned callable will instead pull the relevant params/state
from the outer transform's dictionaries.
Must be called inside :func:`transform_with_state`\ , and be passed the
``init`` member of a :class:`TransformedWithState`\ .
By default, users must ensure that the given ``init`` does not accidentally
catch modules from an outer :func:`transform_with_state` via functional
closure. If this behavior is desirable, set ``allow_reuse`` to ``True``.
Example:
>>> def g(x):
... return hk.nets.ResNet50(1)(x, True)
>>> g = hk.transform_with_state(g)
>>> params_and_state_fn, updater = (
... hk.lift_with_state(g.init, name='f_lift'))
>>> init_rng = hk.next_rng_key() if hk.running_init() else None
>>> x = jnp.ones([1, 224, 224, 3])
>>> params, state = params_and_state_fn(init_rng, x)
>>> out, state = g.apply(params, state, None, x)
>>> updater.update(state)
Args:
init_fn: The ``init`` function from an :class:`TransformedWithState`\ .
allow_reuse: Allows lifted parameters and state to be reused from the
outer :func:`transform_with_state`. This can be desirable when using
``lift_with_state`` within control flow (e.g. ``hk.scan``).
name: A string name to prefix parameters with.
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` reuses parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
The ``init`` function additionally returns an object used to update the
outer context with new state after ``apply`` is called.
See also:
- :func:`lift`: Register parameters with an outer transform.
- :func:`transparent_lift`: Register parameters with an outer transform
without a namespace.
- :func:`transparent_lift_with_state`: Register parameters and state with an
outer transform without a namespace.
"""
base.assert_context("lift_with_state")
params_and_state_fn = _to_callable(
LiftingModule(init_fn, allow_reuse=allow_reuse, name=name))
if base.current_module():
name = f"{base.current_name()}/{name}"
updater = LiftWithStateUpdater(name)
return params_and_state_fn, updater
def transparent_lift_with_state(
init_fn: Callable[..., tuple[hk.Params, hk.State]],
*,
allow_reuse: bool = False,
) -> tuple[Callable[..., tuple[hk.Params, hk.State]], LiftWithStateUpdater]:
r"""Registers params and state in an outer transform without adding scope.
Functionally this is equivalent to :func:`lift_with_state`\ but without
automatically adding an additional variable scoping.
See :func:`lift_with_state`\ for more context on when to use
``lift_with_state``.
Args:
init_fn: The ``init`` function from an :class:`TransformedWithState`\.
allow_reuse: Allows lifted parameters and state to be reused from the outer
:func:`transform_with_state`\ . This can be desirable when e.g. within
control flow (e.g. ``hk.scan``).
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` reuses parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
The ``init`` function additionally returns an object used to update the
outer context with new state after ``apply`` is called.
See also:
- :func:`lift`: Register params with an outer transform.
- :func:`lift_with_state`: Register params and state with an outer
transform.
- :func:`transparent_lift`: Register params with an outer transform
without a namespace.
"""
base.assert_context("lift_with_state")
params_and_state_fn = _to_callable(
LiftingModule(init_fn, transparent=True, allow_reuse=allow_reuse))
name = base.current_name() if base.current_module() else None
updater = LiftWithStateUpdater(name)
return params_and_state_fn, updater
|
dm-haiku-main
|
haiku/_src/lift.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.transform."""
from collections.abc import Mapping
import inspect
from typing import Optional, Union
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import multi_transform
from haiku._src import test_utils
from haiku._src import transform
from haiku._src import typing
import jax
import jax.numpy as jnp
import tensorflow as tf
PRNGKey = typing.PRNGKey
State = typing.State
Params = typing.Params
# TODO(tomhennigan) Improve test coverage.
class TransformTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_parameter_reuse(self):
w1 = base.get_parameter("w", [], init=jnp.zeros)
w2 = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(w1, w2)
def test_params(self):
def f():
w = base.get_parameter("w", [], init=jnp.zeros)
return w
init_fn, _ = transform.transform(f)
params = init_fn(None)
self.assertEqual(params, {"~": {"w": jnp.zeros([])}})
@test_utils.transform_and_run
def test_naked_get_parameter(self):
w1 = base.get_parameter("w", [], init=jnp.zeros)
w2 = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(w1, w2)
def test_naked_parameter_in_tilde_collection(self):
def net():
w1 = base.get_parameter("w1", [], init=jnp.zeros)
w2 = base.get_parameter("w2", [], init=jnp.ones)
self.assertIsNot(w1, w2)
init_fn, _ = transform.transform(net)
params = init_fn(None)
self.assertEqual(params,
{"~": {"w1": jnp.zeros([]), "w2": jnp.ones([])}})
@parameterized.parameters((None,), ({},), ({"~": {}},))
def test_parameter_in_apply(self, params):
_, apply_fn = transform.transform(
lambda: base.get_parameter("w", [], init=jnp.zeros))
with self.assertRaisesRegex(
ValueError, "parameters must be created as part of `init`"):
apply_fn(params, None)
def test_missing_parameter_without_apply_rng(self):
# Test we're getting a "missing parameter" error, not a "missing RNG" error.
initializer_with_rng = lambda s, d: base.next_rng_key()
_, apply_fn = multi_transform.without_apply_rng(
transform.transform(
lambda: base.get_parameter("w", [], init=initializer_with_rng)))
with self.assertRaisesRegex(ValueError,
"parameters must be created as part of `init`"):
apply_fn({})
@test_utils.transform_and_run(seed=None)
def test_no_rng(self):
with self.assertRaisesRegex(base.MissingRNGError,
"must pass a non-None PRNGKey"):
base.next_rng_key()
def test_invalid_rng(self):
f = transform.transform(lambda: None)
with self.assertRaisesRegex(
ValueError, "Init must be called with an RNG as the first argument"):
f.init("nonsense")
with self.assertRaisesRegex(
ValueError, "Apply must be called with an RNG as the second argument"):
f.apply({}, "nonsense")
def test_invalid_rng_state(self):
f = transform.transform_with_state(lambda: None)
with self.assertRaisesRegex(
ValueError, "Init must be called with an RNG as the first argument"):
f.init("nonsense")
with self.assertRaisesRegex(
ValueError, "Apply must be called with an RNG as the third argument"):
f.apply({}, {"x": {}}, "nonsense")
def test_invalid_rng_empty_state(self):
f = transform.transform_with_state(lambda: None)
with self.assertRaisesRegex(
ValueError, "Init must be called with an RNG as the first argument"):
f.init("nonsense")
with self.assertRaisesRegex(
ValueError, "Apply must be called with an RNG as the third argument"):
f.apply({}, {}, "nonsense")
@parameterized.parameters(transform.transform,
transform.transform_with_state)
def test_invalid_rng_none_ignored(self, transform_fn):
f = transform_fn(lambda: None)
args = f.init(None)
if not isinstance(args, tuple):
args = (args,)
f.apply(*args, None)
def test_invalid_params(self):
f = transform.transform_with_state(lambda: None)
with self.assertRaisesRegex(TypeError,
"params argument does not appear valid"):
f.apply("z", {}, None)
def test_invalid_state(self):
f = transform.transform_with_state(lambda: None)
with self.assertRaisesRegex(TypeError,
"state argument does not appear valid"):
f.apply({}, "z", None)
def test_maybe_rng_no_transform(self):
with self.assertRaisesRegex(ValueError,
"must be used as part of an `hk.transform`"):
base.maybe_next_rng_key()
@test_utils.transform_and_run(seed=None)
def test_maybe_no_rng(self):
self.assertIsNone(base.maybe_next_rng_key())
def test_maybe_rng_vs_not(self):
"""If we have an rng, then next_rng_key() == maybe_next_rng_key()."""
rngs = []
maybes = []
@test_utils.transform_and_run
def three():
for _ in range(3):
rngs.append(base.next_rng_key())
@test_utils.transform_and_run
def maybe_three():
for _ in range(3):
maybes.append(base.maybe_next_rng_key())
three()
maybe_three()
self.assertLen(rngs, 6)
self.assertTrue(jnp.all(jnp.array(rngs) == jnp.array(maybes)))
def test_init_custom_creator(self):
def zeros_creator(next_creator, shape, dtype, init, context):
self.assertEqual(context.full_name, "~/w")
self.assertEqual(shape, [])
self.assertEqual(dtype, jnp.float32)
self.assertEqual(init, jnp.ones)
return next_creator(shape, dtype, jnp.zeros)
def f():
with base.custom_creator(zeros_creator):
return base.get_parameter("w", [], init=jnp.ones)
params = transform.transform(f).init(None)
self.assertEqual(params, {"~": {"w": jnp.zeros([])}})
def test_not_triggered_in_apply(self):
log = []
def counting_creator(next_creator, shape, dtype, init, context):
log.append(context.full_name)
return next_creator(shape, dtype, init)
def net():
with base.custom_creator(counting_creator):
for i in range(4):
base.get_parameter(f"w{i}", [], init=jnp.zeros)
init_fn, apply_fn = transform.transform(net)
params = init_fn(None)
self.assertEqual(log, ["~/w0", "~/w1", "~/w2", "~/w3"])
del log[:]
apply_fn(params, None)
self.assertEmpty(log)
def test_nested_creators(self):
log = []
def logging_creator(log_msg):
def _logging_creator(next_creator, shape, dtype, init, context):
del context
log.append(log_msg)
return next_creator(shape, dtype, init)
return _logging_creator
def f():
a, b, c = map(logging_creator, ["a", "b", "c"])
with base.custom_creator(a), \
base.custom_creator(b), \
base.custom_creator(c):
return base.get_parameter("w", [], init=jnp.ones)
transform.transform(f).init(None)
self.assertEqual(log, ["a", "b", "c"])
def test_argspec(self):
init_fn, apply_fn = transform.transform_with_state(lambda: None)
init_fn_spec = inspect.getfullargspec(init_fn)
apply_fn_spec = inspect.getfullargspec(apply_fn)
self.assertEqual(init_fn_spec.args, ["rng"])
self.assertEqual(apply_fn_spec.args, ["params", "state", "rng"])
def test_get_state_no_init_raises(self):
init_fn, apply_fn = transform.transform_with_state(
lambda: base.get_state("i"))
with self.assertRaisesRegex(ValueError, "set an init function"):
init_fn(None)
state = params = {"~": {}}
with self.assertRaisesRegex(ValueError, "set an init function"):
apply_fn(params, state, None)
def test_get_state_no_shape_raises(self):
init_fn, apply_fn = transform.transform_with_state(
lambda: base.get_state("i", init=jnp.zeros))
with self.assertRaisesRegex(ValueError, "provide shape and dtype"):
init_fn(None)
state = params = {"~": {}}
with self.assertRaisesRegex(ValueError, "provide shape and dtype"):
apply_fn(params, state, None)
def test_get_state_no_init(self):
_, apply_fn = transform.transform_with_state(lambda: base.get_state("i"))
for i in range(10):
state_in = {"~": {"i": i}}
_, state_out = apply_fn({}, state_in, None)
self.assertEqual(state_in, state_out)
def test_set_then_get(self):
def net():
base.set_state("i", 1)
return base.get_state("i")
init_fn, apply_fn = transform.transform_with_state(net)
params, state = init_fn(None)
self.assertEqual(state, {"~": {"i": 1}})
for i in range(10):
state_in = {"~": {"i": i}}
y, state_out = apply_fn(params, state_in, None)
self.assertEqual(y, 1)
self.assertEqual(state_out, {"~": {"i": 1}})
def test_stateful(self):
def f():
for _ in range(10):
count = base.get_state("count", (), jnp.int32, jnp.zeros)
base.set_state("count", count + 1)
return count
init_fn, apply_fn = transform.transform_with_state(f)
params, state = init_fn(None)
self.assertEqual(state, {"~": {"count": 0}})
_, state = apply_fn(params, state, None)
self.assertEqual(state, {"~": {"count": 10}})
def test_without_state(self):
def f():
w = base.get_parameter("w", [], init=jnp.zeros)
return w
init_fn, apply_fn = transform.without_state(
transform.transform_with_state(f))
params = init_fn(None)
out = apply_fn(params, None)
self.assertEqual(out, 0)
def test_without_state_raises_if_state_used(self):
def f():
for _ in range(10):
count = base.get_state("count", (), jnp.int32, jnp.zeros)
base.set_state("count", count + 1)
return count
init_fn, _ = transform.without_state(transform.transform_with_state(f))
with self.assertRaisesRegex(base.NonEmptyStateError,
"use.*transform_with_state"):
init_fn(None)
def test_with_empty_state(self):
def f():
w = base.get_parameter("w", [], init=jnp.zeros)
return w
init_fn, apply_fn = transform.with_empty_state(
transform.transform(f))
params, state = init_fn(None)
self.assertEmpty(state)
out, state = apply_fn(params, state, None)
self.assertEqual(out, 0)
self.assertEmpty(state)
def test_inline_use(self):
def f():
w = base.get_parameter("w", [], init=jnp.zeros)
return w
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
params = f.init(rng)
w = f.apply(params, None)
self.assertEqual(w, 0)
def test_method(self):
obj = ObjectWithTransform()
x = jnp.ones([])
params = obj.forward.init(None, x)
obj_out, y = obj.forward.apply(params, None, x)
self.assertEqual(y, 1)
self.assertIs(obj, obj_out)
params = jax.tree_util.tree_map(lambda v: v + 1, params)
obj_out, y = obj.forward.apply(params, None, x)
self.assertEqual(y, 2)
self.assertIs(obj, obj_out)
def test_trampoline(self):
obj = ObjectWithTransform()
x = jnp.ones([])
params = obj.trampoline.init(None, x)
obj_out, y = obj.trampoline.apply(params, None, x)
self.assertEqual(y, 1)
self.assertIs(obj, obj_out)
@parameterized.parameters((42, True), (42, False),
(28, True), (28, False))
def test_prng_sequence(self, seed, wrap_seed):
# Values using our sequence.
key_or_seed = jax.random.PRNGKey(seed) if wrap_seed else seed
key_seq = base.PRNGSequence(key_or_seed)
seq_v1 = jax.random.normal(next(key_seq), [])
seq_v2 = jax.random.normal(next(key_seq), [])
# Generate values using manual splitting.
key = jax.random.PRNGKey(seed)
key, temp_key = jax.random.split(key)
raw_v1 = jax.random.normal(temp_key, [])
_, temp_key = jax.random.split(key)
raw_v2 = jax.random.normal(temp_key, [])
self.assertEqual(raw_v1, seq_v1)
self.assertEqual(raw_v2, seq_v2)
def test_prng_sequence_invalid_input(self):
with self.assertRaisesRegex(ValueError, "not a JAX PRNGKey"):
base.PRNGSequence("nonsense") # type: ignore
def test_prng_sequence_wrong_shape(self):
with self.assertRaisesRegex(ValueError,
"key did not have expected shape and/or dtype"):
base.PRNGSequence(jax.random.split(jax.random.PRNGKey(42), 2))
@parameterized.parameters(42, 28)
def test_with_rng(self, seed):
key = jax.random.PRNGKey(seed)
unrelated_key = jax.random.PRNGKey(seed * 2 + 1)
_, next_key = jax.random.split(key)
expected_output = jax.random.uniform(next_key, ())
def without_decorator():
return jax.random.uniform(base.next_rng_key(), ())
without_decorator = transform.transform(without_decorator)
without_decorator_out = without_decorator.apply(None, unrelated_key).item()
def with_decorator():
with base.with_rng(key):
return jax.random.uniform(base.next_rng_key(), ())
with_decorator = transform.transform(with_decorator)
with_decorator_out = with_decorator.apply(None, unrelated_key).item()
self.assertNotEqual(without_decorator_out, expected_output)
self.assertEqual(with_decorator_out, expected_output)
def test_without_apply_rng_output_type(self):
def f():
w = base.get_parameter("w", [], init=jnp.zeros)
return w
f_t = multi_transform.without_apply_rng(transform.transform_with_state(f))
self.assertIsInstance(f_t, transform.TransformedWithState)
f_t = multi_transform.without_apply_rng(transform.transform(f))
self.assertIsInstance(f_t, transform.Transformed)
def test_new_context(self):
with base.new_context() as ctx:
pass
self.assertEmpty(ctx.collect_params())
self.assertEmpty(ctx.collect_initial_state())
self.assertEmpty(ctx.collect_state())
def test_context_copies_input(self):
before = {"~": {"w": jnp.array(1.)}}
with base.new_context(params=before, state=before) as ctx:
base.get_parameter("w", [], init=jnp.ones)
base.set_state("w", jnp.array(2.))
self.assertEqual(ctx.collect_params(), {"~": {"w": jnp.array(1.)}})
self.assertIsNot(ctx.collect_initial_state(), before)
self.assertEqual(ctx.collect_initial_state(), before)
self.assertEqual(ctx.collect_state(), {"~": {"w": jnp.array(2.)}})
self.assertEqual(before, {"~": {"w": jnp.array(1.)}})
def test_without_state_raises_if_state_used_on_apply(self):
f = lambda: base.set_state("~", 1)
f = transform.without_state(transform.transform_with_state(f))
rng = jax.random.PRNGKey(42)
with self.assertRaisesRegex(base.NonEmptyStateError,
"use.*transform_with_state"):
params = f.init(rng)
f.apply(params, rng)
def test_running_init(self):
l = []
f = transform.transform(lambda: l.append(transform.running_init()))
f.init(None)
f.apply({}, None)
init_value, apply_value = l # pylint: disable=unbalanced-tuple-unpacking # pytype: disable=bad-unpacking
self.assertEqual(init_value, True)
self.assertEqual(apply_value, False)
def test_running_init_outside_transform(self):
with self.assertRaisesRegex(ValueError,
"running_init.*used as part of.*transform"):
transform.running_init()
@parameterized.parameters(
None,
multi_transform.without_apply_rng,
transform.with_empty_state)
def test_persists_original_fn(self, without):
orig_f = lambda: None
f = transform.transform(orig_f)
if without is not None:
f = without(f)
self.assertPersistsOriginal(f, orig_f)
@parameterized.parameters(
None,
transform.without_state,
multi_transform.without_apply_rng,
lambda f: transform.without_state(multi_transform.without_apply_rng(f)),
lambda f: transform.with_empty_state(transform.without_state(f)))
def test_persists_original_fn_transform_with_state(self, without):
orig_f = lambda: None
f = transform.transform_with_state(orig_f)
if without is not None:
f = without(f)
self.assertPersistsOriginal(f, orig_f)
def assertPersistsOriginal(self, f, orig_f):
self.assertIs(transform.get_original_fn(f), orig_f)
self.assertIs(transform.get_original_fn(f.init), orig_f)
self.assertIs(transform.get_original_fn(f.apply), orig_f)
@parameterized.parameters(
transform.transform,
lambda f: transform.without_state(transform.transform_with_state(f)))
def test_calling_with_duplicate_state_kwarg(self, transform_fn):
def f(state):
del state
self.assert_raises_by_name_error(transform_fn(f))
@parameterized.parameters(transform.transform, transform.transform_with_state)
def test_calling_with_duplicate_rng_kwarg(self, transform_fn):
def f(rng):
del rng
self.assert_raises_by_name_error(
multi_transform.without_apply_rng(transform_fn(f)))
def assert_raises_by_name_error(self, f):
with self.assertRaisesRegex(TypeError, "pass them positionally"):
f.apply(params=None, state=None, rng=None)
def test_output_type_default(self):
def f():
base.get_parameter("w", [], init=jnp.zeros)
base.get_state("w", [], init=jnp.zeros)
init, apply = transform.transform_with_state(f)
params, state_in = init(None)
_, state_out = apply(params, state_in, None)
self.assertLen(params, 1)
self.assertLen(state_in, 1)
self.assertLen(state_out, 1)
self.assertEqual(type(params), dict)
self.assertEqual(type(params["~"]), dict)
self.assertEqual(type(state_in["~"]), dict)
self.assertEqual(type(state_out["~"]), dict)
def test_unexpected_tracer_error_hint(self):
def leaks_and_uses_tracer():
jax.jit(base.next_rng_key)()
base.next_rng_key()
init, _ = transform.transform(leaks_and_uses_tracer)
with self.assertRaisesRegex(jax.errors.UnexpectedTracerError,
"want to use the Haiku version"):
init(jax.random.PRNGKey(42))
@test_utils.combined_named_parameters(
(("jit", jax.jit),
("pmap", jax.pmap)),
(("transform", transform.transform),
("transform_with_state", transform.transform_with_state)))
def test_passing_function_to_transform(self, jax_transform, hk_transform):
f = jax_transform(lambda: 0)
with self.assertRaisesRegex(
ValueError,
r"instead .*jax.jit\(hk.transform\(f\).apply\)"):
hk_transform(f)
def test_with_tensorflow_dict_wrapper(self):
class Mod(tf.Module):
def __init__(self):
super().__init__()
self.x = {}
m = Mod().x
self.assertIsInstance(m, Mapping)
self.assertIs(transform.check_mapping("params", m), m)
def test_do_not_store(self):
def my_creator(next_creator, shape, dtype, init, context):
del next_creator, shape, dtype, init, context
return base.DO_NOT_STORE
def my_getter(next_getter, value, context):
assert value is base.DO_NOT_STORE
return next_getter(
context.original_init(context.original_shape, context.original_dtype))
def my_setter(next_setter, value, context):
del next_setter, value, context
return base.DO_NOT_STORE
def f():
with base.custom_creator(my_creator, state=True), \
base.custom_getter(my_getter, state=True), \
base.custom_setter(my_setter):
self.assertEqual(base.get_parameter("w", [], init=jnp.ones), 1)
self.assertEqual(base.get_state("s1", [], init=jnp.ones), 1)
base.set_state("s2", jnp.ones([]))
f = transform.transform_with_state(f)
params, state = f.init(None)
self.assertEmpty(params)
self.assertEmpty(state)
_, state = f.apply({}, {}, None)
self.assertEmpty(state)
def test_signature_transform_with_state(self):
@transform.transform_with_state
def f(pos, key=37) -> int:
del pos, key
return 2
def expected_f_init(
rng: Optional[Union[PRNGKey, int]], pos, key=37
) -> tuple[Params, State]:
del rng, pos, key
raise NotImplementedError
def expected_f_apply(
params: Optional[Params],
state: Optional[State],
rng: Optional[Union[PRNGKey, int]],
pos,
key=37,
) -> tuple[int, State]:
del params, state, rng, pos, key
raise NotImplementedError
self.assertEqual(
inspect.signature(f.init), inspect.signature(expected_f_init))
self.assertEqual(
inspect.signature(f.apply), inspect.signature(expected_f_apply))
def test_signature_transform(self):
@transform.transform
def f(pos, *, key: int = 37) -> int:
del pos, key
return 2
def expected_f_init(rng: Optional[Union[PRNGKey, int]],
pos, *, key: int = 37) -> Params:
del rng, pos, key
raise NotImplementedError
def expected_f_apply(
params: Optional[Params], rng: Optional[Union[PRNGKey, int]],
pos, *, key: int = 37) -> int:
del params, rng, pos, key
raise NotImplementedError
self.assertEqual(
inspect.signature(f.init), inspect.signature(expected_f_init))
self.assertEqual(
inspect.signature(f.apply), inspect.signature(expected_f_apply))
def test_init_return_type_is_mutable(self):
init, _ = transform.transform(lambda: None)
params = init(None)
params["a"] = None # Check type-checker does not complain.
class ObjectWithTransform:
def __init__(self):
self.trampoline = transform.transform(self._trampoline)
self.forward = transform.transform(self._forward)
def _trampoline(self, x):
return self._forward(x)
def _forward(self, x):
w = base.get_parameter("w", [], init=jnp.zeros)
return self, x + w
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/transform_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.batch_norm."""
import os
from absl.testing import absltest
from haiku._src import base
from haiku._src import batch_norm
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class BatchNormTest(absltest.TestCase):
@test_utils.transform_and_run
def test_basic(self):
data = jnp.arange(2 * 3 * 4, dtype=jnp.float32).reshape([2, 3, 4])
norm = batch_norm.BatchNorm(True, True, 0.9)
result = norm(data, is_training=True)
result_0_replicated = jnp.broadcast_to(result[:, :, :1], result.shape)
# Input data is symmetrical variance per-channel.
np.testing.assert_allclose(result, result_0_replicated)
# Running through again in test mode produces same output.
np.testing.assert_allclose(norm(data, is_training=False), result, rtol=2e-2)
@test_utils.transform_and_run
def test_simple_training(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = np.ones([2, 3, 3, 5])
scale = np.full((5,), 0.5)
offset = np.full((5,), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_nchw(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
data_format="NCHW")
inputs = np.ones([2, 5, 3, 3])
scale = np.full((5, 1, 1), 0.5)
offset = np.full((5, 1, 1), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_normalized_axes(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
axis=[0, 2, 3]) # Not the second axis.
# This differs only in the second axis.
inputs = np.stack([2.0 * np.ones([5, 3, 3]), np.ones([5, 3, 3])], 1)
result = layer(inputs, True)
# Despite not all values being identical, treating slices from the first
# axis separately leads to a fully normalized = equal array.
np.testing.assert_equal(result, np.zeros(inputs.shape))
def test_simple_training_cross_replica_axis(self):
ldc = jax.local_device_count()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4)
key = jax.random.PRNGKey(42)
key = jnp.broadcast_to(key, (ldc, *key.shape))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
mean = np.mean(inputs, axis=0)
std = np.std(inputs, axis=0) + 1e-10
expected = (inputs - mean) / std
np.testing.assert_array_almost_equal(result, expected)
def test_simple_training_cross_replica_axis_index_groups(self):
ldc = jax.local_device_count()
if ldc < 2:
self.skipTest("Cross-replica test requires at least 2 devices.")
num_groups = ldc // 2
num_group_devices = ldc // num_groups
# for 8 devices this produces [[0, 1], [2, 3], [4, 5], [6, 7]] groups.
groups = np.arange(ldc).reshape(num_groups, num_group_devices).tolist()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
cross_replica_axis_index_groups=groups,
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4).astype(np.float32)
key = jax.random.PRNGKey(42)
key = jnp.broadcast_to(key, (ldc, *key.shape))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
expected = np.empty_like(inputs)
for g in range(num_groups):
group_inputs = inputs[num_group_devices*g:num_group_devices*(g + 1)]
group_mean = np.mean(group_inputs, axis=0)
group_std = np.std(group_inputs, axis=0) + 1e-10
group_inputs = (group_inputs - group_mean) / group_std
expected[num_group_devices*g:num_group_devices*(g + 1)] = group_inputs
np.testing.assert_array_almost_equal(result, expected)
@test_utils.transform_and_run
def test_no_scale_and_offset(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = jnp.ones([2, 5, 3, 3, 3])
result = layer(inputs, True)
np.testing.assert_equal(result, np.zeros_like(inputs))
@test_utils.transform_and_run
def test_no_scale_and_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `scale_init` if `create_scale=False`"):
batch_norm.BatchNorm(
create_scale=False,
create_offset=True,
decay_rate=0.9,
scale_init=jnp.ones)
@test_utils.transform_and_run
def test_no_offset_beta_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `offset_init` if `create_offset=False`"):
batch_norm.BatchNorm(
create_scale=True,
create_offset=False,
decay_rate=0.9,
offset_init=jnp.zeros)
def test_eps_cast_to_var_dtype(self):
# See https://github.com/google/jax/issues/4718 for more info. In the
# context of this test we need to assert NumPy bf16 params/state and a
# Python float for eps preserve bf16 output.
def f(x, is_training):
return batch_norm.BatchNorm(True, True, 0.9, eps=0.1)(x, is_training)
f = transform.transform_with_state(f)
x = np.ones([], jnp.bfloat16)
key = jax.random.PRNGKey(42)
params, state = jax.device_get(f.init(key, x, True))
y, _ = f.apply(params, state, None, x, False)
self.assertEqual(y.dtype, jnp.bfloat16)
def test_no_type_promotion(self):
def get_batch_norm():
return batch_norm.BatchNorm(
create_scale=True, create_offset=True, decay_rate=0.99)
# Initialize the model and "train" it with float32, and check that
# everything is float32.
@transform.transform_with_state
def forward_training(x):
return get_batch_norm()(x, is_training=True)
input_float32 = np.random.normal(size=[100, 5]).astype(np.float32)
rng = jax.random.PRNGKey(0)
params, state = forward_training.init(rng, input_float32)
output, state = forward_training.apply(params, state, rng, input_float32)
self.assertEqual(output.dtype, jnp.float32)
# Now for eval we want to run with "bfloat16". We use custom getter that
# will ensure that everytime we ask for a variable with type bfloat16 (as
# types requested should usually be defined by the type of the inputs),
# we cast what used to be a float32 into a bfloat16)
def _bfloat16_getter(next_getter, value, context):
if context.original_dtype == jnp.bfloat16:
assert value.dtype == jnp.float32
value = value.astype(jnp.bfloat16)
return next_getter(value)
@transform.transform_with_state
def forward_eval_bfloat16(x):
with base.custom_getter(_bfloat16_getter, state=True):
return get_batch_norm()(x, is_training=False)
# Run it with bfloat16, inputs, and check that output is still bfloat16.
input_bfloat16 = input_float32.astype(jnp.bfloat16)
output, _ = forward_eval_bfloat16.apply(params, state, rng, input_bfloat16)
self.assertEqual(output.dtype, jnp.bfloat16)
if __name__ == "__main__":
_xla_flags = os.environ.get("XLA_FLAGS", "")
os.environ["XLA_FLAGS"] = (_xla_flags +
" --xla_force_host_platform_device_count=8")
absltest.main()
os.environ["XLA_FLAGS"] = _xla_flags
|
dm-haiku-main
|
haiku/_src/batch_norm_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku recurrent core."""
import abc
from collections.abc import Sequence
from typing import Any, NamedTuple, Optional, Union
from haiku._src import base
from haiku._src import basic
from haiku._src import conv
from haiku._src import initializers
from haiku._src import module
from haiku._src import stateful
import jax
import jax.nn
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
initializers = initializers
Linear = basic.Linear
ConvND = conv.ConvND
get_parameter = base.get_parameter
Module = module.Module
scan = stateful.scan
# pylint: enable=invalid-name
# TODO(slebedev): This makes the module non-forkable.
inside_transform = base.inside_transform
del base, basic, conv, initializers, module
class RNNCore(abc.ABC, hk.Module):
"""Base class for RNN cores.
This class defines the basic functionality that every core should
implement: :meth:`initial_state`, used to construct an example of the
core state; and :meth:`__call__` which applies the core parameterized
by a previous state to an input.
Cores may be used with :func:`dynamic_unroll` and :func:`static_unroll` to
iteratively construct an output sequence from the given input sequence.
"""
@abc.abstractmethod
def __call__(self, inputs, prev_state) -> tuple[Any, Any]:
"""Run one step of the RNN.
Args:
inputs: An arbitrarily nested structure.
prev_state: Previous core state.
Returns:
A tuple with two elements ``output, next_state``. ``output`` is an
arbitrarily nested structure. ``next_state`` is the next core state, this
must be the same shape as ``prev_state``.
"""
@abc.abstractmethod
def initial_state(self, batch_size: Optional[int]):
"""Constructs an initial state for this core.
Args:
batch_size: Optional int or an integral scalar tensor representing
batch size. If None, the core may either fail or (experimentally)
return an initial state without a batch dimension.
Returns:
Arbitrarily nested initial state for this core.
"""
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_util.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_util.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_util.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_util.tree_map(
lambda *args: jnp.stack(args, axis=time_axis), *output_sequence)
return output_sequence, state
def _swap_batch_time(inputs):
"""Swaps batch and time axes, assumed to be the first two axes."""
return jax.tree_util.tree_map(lambda x: jnp.swapaxes(x, 0, 1), inputs)
def dynamic_unroll(core,
input_sequence,
initial_state,
time_major=True,
reverse=False,
return_all_states=False,
unroll=1):
"""Performs a dynamic unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *dynamic* unroll preserves the loop structure when executed inside
:func:`jax.jit`. See :func:`static_unroll` for an unroll function which
replaces a loop with its body repeated multiple times.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
reverse: If True, inputs are scanned in the reversed order. Equivalent to
reversing the time dimension in both inputs and outputs. See
https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html for
more details.
return_all_states: If True, all intermediate states are returned rather than
only the last one in time.
unroll: How many scan iterations to unroll within a single iteration
of a loop.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **state_sequence** - If return_all_states is True, returns the sequence
of core states. Otherwise, core state at time step ``T``.
"""
scan = hk.scan if inside_transform() else jax.lax.scan
# Swap the input and output of core.
def scan_f(prev_state, inputs):
outputs, next_state = core(inputs, prev_state)
if return_all_states:
return next_state, (outputs, next_state)
return next_state, outputs
# TODO(hamzamerzic): Remove axis swapping once scan supports time axis arg.
if not time_major:
input_sequence = _swap_batch_time(input_sequence)
scan_result = scan(
scan_f, initial_state, input_sequence, reverse=reverse, unroll=unroll)
if return_all_states:
_, (output_sequence, state_sequence) = scan_result
else:
last_state, output_sequence = scan_result
if not time_major:
output_sequence = _swap_batch_time(output_sequence)
if return_all_states:
state_sequence = _swap_batch_time(state_sequence)
if return_all_states:
return output_sequence, state_sequence
return output_sequence, last_state
def add_batch(nest, batch_size: Optional[int]):
"""Adds a batch dimension at axis 0 to the leaves of a nested structure."""
broadcast = lambda x: jnp.broadcast_to(x, (batch_size,) + x.shape)
return jax.tree_util.tree_map(broadcast, nest)
class VanillaRNN(RNNCore):
r"""Basic fully-connected RNN core.
Given :math:`x_t` and the previous hidden state :math:`h_{t-1}` the
core computes
.. math::
h_t = \operatorname{ReLU}(w_i x_t + b_i + w_h h_{t-1} + b_h)
The output is equal to the new state, :math:`h_t`.
"""
def __init__(
self,
hidden_size: int,
double_bias: bool = True,
name: Optional[str] = None
):
"""Constructs a vanilla RNN core.
Args:
hidden_size: Hidden layer size.
double_bias: Whether to use a bias in the two linear layers. This changes
nothing to the learning performance of the cell. However, doubling will
create two sets of bias parameters rather than one.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
self.double_bias = double_bias
def __call__(self, inputs, prev_state):
input_to_hidden = hk.Linear(self.hidden_size)
# TODO(b/173771088): Consider changing default to double_bias=False.
hidden_to_hidden = hk.Linear(self.hidden_size, with_bias=self.double_bias)
out = jax.nn.relu(input_to_hidden(inputs) + hidden_to_hidden(prev_state))
return out, out
def initial_state(self, batch_size: Optional[int]):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class LSTMState(NamedTuple):
"""An LSTM core state consists of hidden and cell vectors.
Attributes:
hidden: Hidden state.
cell: Cell state.
"""
hidden: jax.Array
cell: jax.Array
class LSTM(RNNCore):
r"""Long short-term memory (LSTM) RNN core.
The implementation is based on :cite:`zaremba2014recurrent`. Given
:math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})` the core
computes
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} x_t + W_{hi} h_{t-1} + b_i) \\
f_t = \sigma(W_{if} x_t + W_{hf} h_{t-1} + b_f) \\
g_t = \tanh(W_{ig} x_t + W_{hg} h_{t-1} + b_g) \\
o_t = \sigma(W_{io} x_t + W_{ho} h_{t-1} + b_o) \\
c_t = f_t c_{t-1} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`i_t`, :math:`f_t`, :math:`o_t` are input, forget and
output gate activations, and :math:`g_t` is a vector of cell updates.
The output is equal to the new hidden, :math:`h_t`.
Notes:
Forget gate initialization:
Following :cite:`jozefowicz2015empirical` we add 1.0 to :math:`b_f`
after initialization in order to reduce the scale of forgetting in
the beginning of the training.
"""
def __init__(self, hidden_size: int, name: Optional[str] = None):
"""Constructs an LSTM.
Args:
hidden_size: Hidden layer size.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
def __call__(
self,
inputs: jax.Array,
prev_state: LSTMState,
) -> tuple[jax.Array, LSTMState]:
if len(inputs.shape) > 2 or not inputs.shape:
raise ValueError("LSTM input must be rank-1 or rank-2.")
x_and_h = jnp.concatenate([inputs, prev_state.hidden], axis=-1)
gated = hk.Linear(4 * self.hidden_size)(x_and_h)
# TODO(slebedev): Consider aligning the order of gates with Sonnet.
# i = input, g = cell_gate, f = forget_gate, o = output_gate
i, g, f, o = jnp.split(gated, indices_or_sections=4, axis=-1)
f = jax.nn.sigmoid(f + 1) # Forget bias, as in sonnet.
c = f * prev_state.cell + jax.nn.sigmoid(i) * jnp.tanh(g)
h = jax.nn.sigmoid(o) * jnp.tanh(c)
return h, LSTMState(h, c)
def initial_state(self, batch_size: Optional[int]) -> LSTMState:
state = LSTMState(hidden=jnp.zeros([self.hidden_size]),
cell=jnp.zeros([self.hidden_size]))
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class ConvNDLSTM(RNNCore):
r"""``num_spatial_dims``-D convolutional LSTM.
The implementation is based on :cite:`xingjian2015convolutional`.
Given :math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})`
the core computes
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} * x_t + W_{hi} * h_{t-1} + b_i) \\
f_t = \sigma(W_{if} * x_t + W_{hf} * h_{t-1} + b_f) \\
g_t = \tanh(W_{ig} * x_t + W_{hg} * h_{t-1} + b_g) \\
o_t = \sigma(W_{io} * x_t + W_{ho} * h_{t-1} + b_o) \\
c_t = f_t c_{t-1} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`*` denotes the convolution operator; :math:`i_t`,
:math:`f_t`, :math:`o_t` are input, forget and output gate activations,
and :math:`g_t` is a vector of cell updates.
The output is equal to the new hidden state, :math:`h_t`.
Notes:
Forget gate initialization:
Following :cite:`jozefowicz2015empirical` we add 1.0 to :math:`b_f`
after initialization in order to reduce the scale of forgetting in
the beginning of the training.
"""
def __init__(
self,
num_spatial_dims: int,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a convolutional LSTM.
Args:
num_spatial_dims: Number of spatial dimensions of the input.
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length ``num_spatial_dims``),
or an int. ``kernel_shape`` will be expanded to define a kernel size in
all dimensions.
name: Name of the module.
"""
super().__init__(name=name)
self.num_spatial_dims = num_spatial_dims
self.input_shape = tuple(input_shape)
self.output_channels = output_channels
self.kernel_shape = kernel_shape
def __call__(
self,
inputs,
state: LSTMState,
) -> tuple[jax.Array, LSTMState]:
input_to_hidden = hk.ConvND(
num_spatial_dims=self.num_spatial_dims,
output_channels=4 * self.output_channels,
kernel_shape=self.kernel_shape,
name="input_to_hidden")
hidden_to_hidden = hk.ConvND(
num_spatial_dims=self.num_spatial_dims,
output_channels=4 * self.output_channels,
kernel_shape=self.kernel_shape,
name="hidden_to_hidden")
gates = input_to_hidden(inputs) + hidden_to_hidden(state.hidden)
i, g, f, o = jnp.split(gates, indices_or_sections=4, axis=-1)
f = jax.nn.sigmoid(f + 1)
c = f * state.cell + jax.nn.sigmoid(i) * jnp.tanh(g)
h = jax.nn.sigmoid(o) * jnp.tanh(c)
return h, LSTMState(h, c)
def initial_state(self, batch_size: Optional[int]) -> LSTMState:
shape = self.input_shape + (self.output_channels,)
state = LSTMState(jnp.zeros(shape), jnp.zeros(shape))
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class Conv1DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "1")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 1-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 1), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=1,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class Conv2DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "2")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 2-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 2), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=2,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class Conv3DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "3")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 3-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 3), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=3,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class GRU(RNNCore):
r"""Gated Recurrent Unit.
The implementation is based on: https://arxiv.org/pdf/1412.3555v1.pdf with
biases.
Given :math:`x_t` and the previous state :math:`h_{t-1}` the core computes
.. math::
\begin{array}{ll}
z_t &= \sigma(W_{iz} x_t + W_{hz} h_{t-1} + b_z) \\
r_t &= \sigma(W_{ir} x_t + W_{hr} h_{t-1} + b_r) \\
a_t &= \tanh(W_{ia} x_t + W_{ha} (r_t \bigodot h_{t-1}) + b_a) \\
h_t &= (1 - z_t) \bigodot h_{t-1} + z_t \bigodot a_t
\end{array}
where :math:`z_t` and :math:`r_t` are reset and update gates.
The output is equal to the new hidden state, :math:`h_t`.
"""
def __init__(
self,
hidden_size: int,
w_i_init: Optional[hk.initializers.Initializer] = None,
w_h_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.w_i_init = w_i_init or hk.initializers.VarianceScaling()
self.w_h_init = w_h_init or hk.initializers.VarianceScaling()
self.b_init = b_init or jnp.zeros
def __call__(self, inputs, state):
if inputs.ndim not in (1, 2):
raise ValueError("GRU input must be rank-1 or rank-2.")
input_size = inputs.shape[-1]
hidden_size = self.hidden_size
w_i = hk.get_parameter("w_i", [input_size, 3 * hidden_size], inputs.dtype,
init=self.w_i_init)
w_h = hk.get_parameter("w_h", [hidden_size, 3 * hidden_size], inputs.dtype,
init=self.w_h_init)
b = hk.get_parameter("b", [3 * hidden_size], inputs.dtype, init=self.b_init)
w_h_z, w_h_a = jnp.split(w_h, indices_or_sections=[2 * hidden_size], axis=1)
b_z, b_a = jnp.split(b, indices_or_sections=[2 * hidden_size], axis=0)
gates_x = jnp.matmul(inputs, w_i)
zr_x, a_x = jnp.split(
gates_x, indices_or_sections=[2 * hidden_size], axis=-1)
zr_h = jnp.matmul(state, w_h_z)
zr = zr_x + zr_h + jnp.broadcast_to(b_z, zr_h.shape)
z, r = jnp.split(jax.nn.sigmoid(zr), indices_or_sections=2, axis=-1)
a_h = jnp.matmul(r * state, w_h_a)
a = jnp.tanh(a_x + a_h + jnp.broadcast_to(b_a, a_h.shape))
next_state = (1 - z) * state + z * a
return next_state, next_state
def initial_state(self, batch_size: Optional[int]):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class IdentityCore(RNNCore):
"""A recurrent core that forwards the inputs and an empty state.
This is commonly used when switching between recurrent and feedforward
versions of a model while preserving the same interface.
"""
def __call__(self, inputs, state):
return inputs, state
def initial_state(self, batch_size: Optional[int]):
return ()
def _validate_and_conform(should_reset, state):
"""Ensures that should_reset is compatible with state."""
if should_reset.shape == state.shape[:should_reset.ndim]:
broadcast_shape = should_reset.shape + (1,)*(state.ndim - should_reset.ndim)
return jnp.reshape(should_reset, broadcast_shape)
raise ValueError(
"should_reset signal shape {} is not compatible with "
"state shape {}".format(should_reset.shape, state.shape))
class ResetCore(RNNCore):
"""A wrapper for managing state resets during unrolls.
When unrolling an :class:`RNNCore` on a batch of inputs sequences it may be
necessary to reset the core's state at different timesteps for different
elements of the batch. The :class:`ResetCore` class enables this by taking a
batch of ``should_reset`` booleans in addition to the batch of inputs, and
conditionally resetting the core's state for individual elements of the batch.
You may also reset individual entries of the state by passing a
``should_reset`` nest compatible with the state structure.
"""
def __init__(self, core: RNNCore, name: Optional[str] = None):
super().__init__(name=name)
self.core = core
def __call__(self, inputs, state):
"""Run one step of the wrapped core, handling state reset.
Args:
inputs: Tuple with two elements, ``inputs, should_reset``, where
``should_reset`` is the signal used to reset the wrapped core's state.
``should_reset`` can be either tensor or nest. If nest, ``should_reset``
must match the state structure, and its components' shapes must be
prefixes of the corresponding entries tensors' shapes in the state nest.
If tensor, supported shapes are all commom shape prefixes of the state
component tensors, e.g. ``[batch_size]``.
state: Previous wrapped core state.
Returns:
Tuple of the wrapped core's ``output, next_state``.
"""
inputs, should_reset = inputs
if jax.tree_util.treedef_is_leaf(
jax.tree_util.tree_structure(should_reset)):
# Equivalent to not tree.is_nested, but with support for Jax extensible
# pytrees.
should_reset = jax.tree_util.tree_map(lambda _: should_reset, state)
# We now need to manually pad 'on the right' to ensure broadcasting operates
# correctly.
# Automatic broadcasting would in fact implicitly pad 'on the left',
# resulting in the signal to trigger resets for parts of the state
# across batch entries. For example:
#
# import jax
# import jax.numpy as jnp
#
# shape = (2, 2, 2)
# x = jnp.zeros(shape)
# y = jnp.ones(shape)
# should_reset = jnp.array([False, True])
# v = jnp.where(should_reset, x, y)
# for batch_entry in range(shape[0]):
# print("batch_entry {}:\n".format(batch_entry), v[batch_entry])
#
# >> batch_entry 0:
# >> [[1. 0.]
# >> [1. 0.]]
# >> batch_entry 1:
# >> [[1. 0.]
# >> [1. 0.]]
#
# Note how manually padding the should_reset tensor yields the desired
# behavior.
#
# import jax
# import jax.numpy as jnp
#
# shape = (2, 2, 2)
# x = jnp.zeros(shape)
# y = jnp.ones(shape)
# should_reset = jnp.array([False, True])
# dims_to_add = x.ndim - should_reset.ndim
# should_reset = should_reset.reshape(should_reset.shape + (1,)*dims_to_add)
# v = jnp.where(should_reset, x, y)
# for batch_entry in range(shape[0]):
# print("batch_entry {}:\n".format(batch_entry), v[batch_entry])
#
# >> batch_entry 0:
# >> [[1. 1.]
# >> [1. 1.]]
# >> batch_entry 1:
# >> [[0. 0.]
# >> [0. 0.]]
should_reset = jax.tree_util.tree_map(
_validate_and_conform, should_reset, state)
if self._is_batched(state):
batch_size = jax.tree_util.tree_leaves(inputs)[0].shape[0]
else:
batch_size = None
initial_state = jax.tree_util.tree_map(
lambda s, i: i.astype(s.dtype), state, self.initial_state(batch_size))
state = jax.tree_util.tree_map(
jnp.where, should_reset, initial_state, state)
return self.core(inputs, state)
def initial_state(self, batch_size: Optional[int]):
return self.core.initial_state(batch_size)
def _is_batched(self, state):
state = jax.tree_util.tree_leaves(state)
if not state: # Empty state is treated as unbatched.
return False
batched = jax.tree_util.tree_leaves(self.initial_state(batch_size=1))
return all(b.shape[1:] == s.shape[1:] for b, s in zip(batched, state))
class _DeepRNN(RNNCore):
"""Underlying implementation of DeepRNN with skip connections."""
def __init__(
self,
layers: Sequence[Any],
skip_connections: bool,
name: Optional[str] = None
):
super().__init__(name=name)
self.layers = layers
self.skip_connections = skip_connections
if skip_connections:
for layer in layers:
if not isinstance(layer, RNNCore):
raise ValueError("skip_connections requires for all layers to be "
"`hk.RNNCore`s. Layers is: {}".format(layers))
def __call__(self, inputs, state):
current_inputs = inputs
next_states = []
outputs = []
state_idx = 0
concat = lambda *args: jnp.concatenate(args, axis=-1)
for idx, layer in enumerate(self.layers):
if self.skip_connections and idx > 0:
current_inputs = jax.tree_util.tree_map(concat, inputs, current_inputs)
if isinstance(layer, RNNCore):
current_inputs, next_state = layer(current_inputs, state[state_idx])
outputs.append(current_inputs)
next_states.append(next_state)
state_idx += 1
else:
current_inputs = layer(current_inputs)
if self.skip_connections:
out = jax.tree_util.tree_map(concat, *outputs)
else:
out = current_inputs
return out, tuple(next_states)
def initial_state(self, batch_size: Optional[int]):
return tuple(
layer.initial_state(batch_size)
for layer in self.layers
if isinstance(layer, RNNCore))
class DeepRNN(_DeepRNN):
r"""Wraps a sequence of cores and callables as a single core.
>>> deep_rnn = hk.DeepRNN([
... hk.LSTM(hidden_size=4),
... jax.nn.relu,
... hk.LSTM(hidden_size=2),
... ])
The state of a :class:`DeepRNN` is a tuple with one element per
:class:`RNNCore`. If no layers are :class:`RNNCore`\ s, the state is an empty
tuple.
"""
def __init__(self, layers: Sequence[Any], name: Optional[str] = None):
super().__init__(layers, skip_connections=False, name=name)
def deep_rnn_with_skip_connections(layers: Sequence[RNNCore],
name: Optional[str] = None) -> RNNCore:
r"""Constructs a :class:`DeepRNN` with skip connections.
Skip connections alter the dependency structure within a :class:`DeepRNN`.
Specifically, input to the i-th layer (i > 0) is given by a
concatenation of the core's inputs and the outputs of the (i-1)-th layer.
The output of the :class:`DeepRNN` is the concatenation of the outputs of all
cores.
.. code-block:: python
outputs0, ... = layers[0](inputs, ...)
outputs1, ... = layers[1](tf.concat([inputs, outputs0], axis=-1], ...)
outputs2, ... = layers[2](tf.concat([inputs, outputs1], axis=-1], ...)
...
Args:
layers: List of :class:`RNNCore`\ s.
name: Name of the module.
Returns:
A :class:`_DeepRNN` with skip connections.
Raises:
ValueError: If any of the layers is not an :class:`RNNCore`.
"""
return _DeepRNN(layers, skip_connections=True, name=name)
|
dm-haiku-main
|
haiku/_src/recurrent.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for filtering parameters and state in Haiku."""
import collections
from collections.abc import Generator, Mapping, MutableMapping
from typing import (Any, Callable, TypeVar)
from haiku._src import data_structures
from haiku._src import utils
import jax
T = TypeVar("T")
InT = TypeVar("InT")
OutT = TypeVar("OutT")
def traverse(
structure: Mapping[str, Mapping[str, T]],
) -> Generator[tuple[str, str, T], None, None]:
"""Iterates over a structure yielding module names, names and values.
NOTE: Items are iterated in key sorted order.
Args:
structure: The structure to traverse.
Yields:
Tuples of the module name, name and value from the given structure.
"""
for module_name in sorted(structure):
bundle = structure[module_name]
for name in sorted(bundle):
value = bundle[name]
yield module_name, name, value
def partition(
predicate: Callable[[str, str, jax.Array], bool],
structure: Mapping[str, Mapping[str, T]],
) -> tuple[Mapping[str, Mapping[str, T]], Mapping[str, Mapping[str, T]]]:
"""Partitions the input structure in two according to a given predicate.
For a given set of parameters, you can use :func:`partition` to split them:
>>> params = {'linear': {'w': None, 'b': None}}
>>> predicate = lambda module_name, name, value: name == 'w'
>>> weights, biases = hk.data_structures.partition(predicate, params)
>>> weights
{'linear': {'w': None}}
>>> biases
{'linear': {'b': None}}
Note: returns new structures not a view.
Args:
predicate: criterion to be used to partition the input data.
The ``predicate`` argument is expected to be a boolean function taking as
inputs the name of the module, the name of a given entry in the module
data bundle (e.g. parameter name) and the corresponding data.
structure: Haiku params or state data structure to be partitioned.
Returns:
A tuple containing all the params or state as partitioned by the input
predicate. Entries matching the predicate will be in the first structure,
and the rest will be in the second.
"""
f = lambda m, n, v: int(not predicate(m, n, v))
return partition_n(f, structure, 2)
def partition_n(
fn: Callable[[str, str, T], int],
structure: Mapping[str, Mapping[str, T]],
n: int,
) -> tuple[Mapping[str, Mapping[str, T]], ...]:
"""Partitions a structure into `n` structures.
For a given set of parameters, you can use :func:`partition_n` to split them
into ``n`` groups. For example, to split your parameters/gradients by module
name:
>>> def partition_by_module(structure):
... cnt = itertools.count()
... d = collections.defaultdict(lambda: next(cnt))
... fn = lambda m, n, v: d[m]
... return hk.data_structures.partition_n(fn, structure, len(structure))
>>> structure = {f'layer_{i}': {'w': None, 'b': None} for i in range(3)}
>>> for substructure in partition_by_module(structure):
... print(substructure)
{'layer_0': {'b': None, 'w': None}}
{'layer_1': {'b': None, 'w': None}}
{'layer_2': {'b': None, 'w': None}}
Args:
fn: Callable returning which bucket in ``[0, n)`` the given element should
be output.
structure: Haiku params or state data structure to be partitioned.
n: The total number of buckets.
Returns:
A tuple of size ``n``, where each element will contain the values for which
the function returned the current index.
"""
out = [collections.defaultdict(dict) for _ in range(n)]
for module_name, name, value in traverse(structure):
i = fn(module_name, name, value)
assert 0 <= i < n, f"{i} must be in range [0, {n})"
out[i][module_name][name] = value
return tuple(data_structures.to_haiku_dict(o) for o in out)
def filter( # pylint: disable=redefined-builtin
predicate: Callable[[str, str, T], bool],
structure: Mapping[str, Mapping[str, T]],
) -> Mapping[str, Mapping[str, T]]:
"""Filters an input structure according to a user specified predicate.
>>> params = {'linear': {'w': None, 'b': None}}
>>> predicate = lambda module_name, name, value: name == 'w'
>>> hk.data_structures.filter(predicate, params)
{'linear': {'w': None}}
Note: returns a new structure not a view.
Args:
predicate: criterion to be used to partition the input data.
The ``predicate`` argument is expected to be a boolean function taking as
inputs the name of the module, the name of a given entry in the module
data bundle (e.g. parameter name) and the corresponding data.
structure: Haiku params or state data structure to be filtered.
Returns:
All the input parameters or state as selected by the input predicate.
"""
out = collections.defaultdict(dict)
for module_name, name, value in traverse(structure):
if predicate(module_name, name, value):
out[module_name][name] = value
return data_structures.to_haiku_dict(out)
def map( # pylint: disable=redefined-builtin
fn: Callable[[str, str, InT], OutT],
structure: Mapping[str, Mapping[str, InT]],
) -> Mapping[str, Mapping[str, OutT]]:
"""Maps a function to an input structure accordingly.
>>> params = {'linear': {'w': 1.0, 'b': 2.0}}
>>> fn = lambda module_name, name, value: 2 * value if name == 'w' else value
>>> hk.data_structures.map(fn, params)
{'linear': {'b': 2.0, 'w': 2.0}}
Note: returns a new structure not a view.
Args:
fn: criterion to be used to map the input data.
The ``fn`` argument is expected to be a function taking as inputs the
name of the module, the name of a given entry in the module data bundle
(e.g. parameter name) and the corresponding data, and returning a new
value.
structure: Haiku params or state data structure to be mapped.
Returns:
All the input parameters or state as mapped by the input fn.
"""
out = collections.defaultdict(dict)
for module_name, name, value in traverse(structure):
out[module_name][name] = fn(module_name, name, value)
return data_structures.to_haiku_dict(out)
def merge(
*structures: Mapping[str, Mapping[str, Any]],
check_duplicates: bool = False,
) -> MutableMapping[str, MutableMapping[str, Any]]:
"""Merges multiple input structures.
>>> weights = {'linear': {'w': None}}
>>> biases = {'linear': {'b': None}}
>>> hk.data_structures.merge(weights, biases)
{'linear': {'w': None, 'b': None}}
When structures are not disjoint the output will contain the value from the
last structure for each path:
>>> weights1 = {'linear': {'w': 1}}
>>> weights2 = {'linear': {'w': 2}}
>>> hk.data_structures.merge(weights1, weights2)
{'linear': {'w': 2}}
Note: returns a new structure not a view.
Args:
*structures: One or more structures to merge.
check_duplicates: If True, a ValueError will be thrown if an array is
found in multiple structures but with a different shape and dtype.
Returns:
A single structure with an entry for each path in the input structures.
"""
array_like = lambda o: hasattr(o, "shape") and hasattr(o, "dtype")
shaped = lambda a: (a.shape, a.dtype) if array_like(a) else None
fmt = lambda a: utils.format_array(a) if array_like(a) else repr(a)
out = collections.defaultdict(dict)
for structure in structures:
for module_name, name, value in traverse(structure):
if check_duplicates and (name in out[module_name]):
previous = out[module_name][name]
if shaped(previous) != shaped(value):
raise ValueError(
"Duplicate array found with different shape/dtype for "
f"{module_name}.{name}: {fmt(previous)} vs {fmt(value)}.")
out[module_name][name] = value
return data_structures.to_haiku_dict(out)
def is_subset(
*,
subset: Mapping[str, Mapping[str, Any]],
superset: Mapping[str, Mapping[str, Any]],
) -> bool:
"""Checks whether the leaves of subset appear in superset.
Note that this is vacuously true in the case that both structures have no
leaves:
>>> hk.data_structures.is_subset(subset={'a': {}}, superset={})
True
Args:
subset: The subset to check.
superset: The superset to check.
Returns:
A boolean indicating whether all elements in subset are contained in
superset.
"""
subset = {(m, n) for m, n, _ in traverse(subset)}
superset = {(m, n) for m, n, _ in traverse(superset)}
return subset.issubset(superset)
|
dm-haiku-main
|
haiku/_src/filtering.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.pad."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import pad
class PadTest(parameterized.TestCase):
def test_padding_2d(self):
a = pad.create_from_padfn((pad.causal, pad.full), (3), (1, 1), 2)
self.assertEqual(a, ((2, 0), (2, 2)))
def test_padding_1d(self):
a = pad.create_from_padfn(pad.full, 3, 1, 1)
self.assertEqual(a, ((2, 2),))
def test_padding_3d(self):
a = pad.create_from_padfn((pad.causal, pad.full, pad.full), (3, 2, 3), (1),
3)
self.assertEqual(a, ((2, 0), (1, 1), (2, 2)))
@parameterized.parameters((2, (2, 2)), (3, (4, 4, 4, 4)), ((2, 2), 3),
((4, 4, 4, 4), 3))
def test_padding_incorrect_input(self, kernel_size, rate):
with self.assertRaisesRegex(
TypeError,
r"must be a scalar or sequence of length 1 or sequence of length 3."):
pad.create_from_padfn(pad.full, kernel_size, rate, 3)
def test_padding_valid(self):
a = pad.create_from_padfn(pad.valid, 4, 3, 2)
self.assertEqual(a, ((0, 0), (0, 0)))
def test_padding_same(self):
a = pad.create_from_padfn(pad.same, 4, 3, 2)
self.assertEqual(a, ((4, 5), (4, 5)))
def test_padding_full(self):
a = pad.create_from_padfn(pad.full, 4, 3, 2)
self.assertEqual(a, ((9, 9), (9, 9)))
def test_padding_causal(self):
a = pad.create_from_padfn(pad.causal, 4, 3, 2)
self.assertEqual(a, ((9, 0), (9, 0)))
def test_padding_reverse_causal(self):
a = pad.create_from_padfn(pad.reverse_causal, 4, 3, 2)
self.assertEqual(a, ((0, 9), (0, 9)))
def test_pad_tuple(self):
a = pad.create_from_tuple((1, 1), 2)
self.assertEqual(a, ((1, 1), (1, 1)))
def test_pad_sequence_tuple(self):
a = pad.create_from_tuple([(1, 1), (2, 20)], 2)
self.assertEqual(a, ((1, 1), (2, 20)))
def test_pad_sequence_tuple_wrong_length(self):
with self.assertRaisesRegex(
TypeError, r"must be a Tuple\[int, int\] or sequence of length 1"):
pad.create_from_tuple([(1, 1), (2, 20)], 3)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/pad_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reshaping Haiku modules."""
from collections.abc import Sequence
from typing import Optional
from haiku._src import module
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
# pylint: enable=invalid-name
del module
def _infer_shape(output_shape, dimensions):
"""Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
output_shape: Output shape.
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input.
n = np.prod(dimensions)
# Size of output where defined.
v = np.array(output_shape)
m = abs(np.prod(v))
# Replace wildcard.
v = [n // m if k == -1 else k for k in v]
return tuple(v)
class Reshape(hk.Module):
"""Reshapes input Tensor, preserving the batch dimension.
For example, given an input tensor with shape ``[B, H, W, C, D]``::
>>> B, H, W, C, D = range(1, 6)
>>> x = jnp.ones([B, H, W, C, D])
The default behavior when ``output_shape`` is ``(-1, D)`` is to flatten
all dimensions between ``B`` and ``D``::
>>> mod = hk.Reshape(output_shape=(-1, D))
>>> assert mod(x).shape == (B, H*W*C, D)
You can change the number of preserved leading dimensions via
``preserve_dims``::
>>> mod = hk.Reshape(output_shape=(-1, D), preserve_dims=2)
>>> assert mod(x).shape == (B, H, W*C, D)
>>> mod = hk.Reshape(output_shape=(-1, D), preserve_dims=3)
>>> assert mod(x).shape == (B, H, W, C, D)
>>> mod = hk.Reshape(output_shape=(-1, D), preserve_dims=4)
>>> assert mod(x).shape == (B, H, W, C, 1, D)
Alternatively, a negative value of ``preserve_dims`` specifies
the number of trailing dimensions to replace with ``output_shape``::
>>> mod = hk.Reshape(output_shape=(-1, D), preserve_dims=-3)
>>> assert mod(x).shape == (B, H, W*C, D)
This is useful in the case of applying the same module to batched
and unbatched outputs::
>>> mod = hk.Reshape(output_shape=(-1, D), preserve_dims=-3)
>>> assert mod(x[0]).shape == (H, W*C, D)
"""
def __init__(
self,
output_shape: Sequence[int],
preserve_dims: int = 1,
name: Optional[str] = None,
):
"""Constructs a :class:`Reshape` module.
Args:
output_shape: Shape to reshape the input tensor to while preserving its
first ``preserve_dims`` dimensions. When the special value ``-1``
appears in ``output_shape`` the corresponding size is automatically
inferred. Note that ``-1`` can only appear once in ``output_shape``.
To flatten all non-batch dimensions use :class:`Flatten`.
preserve_dims: Number of leading dimensions that will not be reshaped.
If negative, this is interpreted instead as the number of trailing
dimensions to replace with the new shape.
name: Name of the module.
Raises:
ValueError: If ``preserve_dims`` is zero.
"""
super().__init__(name=name)
if preserve_dims == 0:
raise ValueError("Argument preserve_dims should be non-zero.")
if output_shape.count(-1) > 1:
raise ValueError("-1 can only occur once in `output_shape`.")
self.output_shape = tuple(output_shape)
self.preserve_dims = preserve_dims
def __call__(self, inputs):
if inputs.ndim <= self.preserve_dims:
return inputs
if -1 in self.output_shape:
reshaped_shape = _infer_shape(self.output_shape,
inputs.shape[self.preserve_dims:])
else:
reshaped_shape = self.output_shape
shape = inputs.shape[:self.preserve_dims] + reshaped_shape
return jnp.reshape(inputs, shape)
class Flatten(Reshape):
"""Flattens the input, preserving the batch dimension(s).
By default, :class:`Flatten` combines all dimensions except the first.
Additional leading dimensions can be preserved by setting ``preserve_dims``.
>>> x = jnp.ones([3, 2, 4])
>>> flat = hk.Flatten()
>>> flat(x).shape
(3, 8)
When the input to flatten has fewer than ``preserve_dims`` dimensions it is
returned unchanged:
>>> x = jnp.ones([3])
>>> flat(x).shape
(3,)
Alternatively, a negative value of `preserve_dims` specifies the number of
trailing dimensions flattened:
>>> x = jnp.ones([3, 2, 4])
>>> negative_flat = hk.Flatten(preserve_dims=-2)
>>> negative_flat(x).shape
(3, 8)
This allows the same module to be seamlessly applied to a single element or a
batch of elements with the same element shape:
>> negative_flat(x[0]).shape
(8,)
"""
def __init__(
self,
preserve_dims: int = 1,
name: Optional[str] = None,
):
super().__init__(
output_shape=(-1,),
preserve_dims=preserve_dims,
name=name)
|
dm-haiku-main
|
haiku/_src/reshape.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.conv."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import conv
from haiku._src import initializers
from haiku._src import test_utils
from haiku._src import transform
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def create_constant_initializers(w, b, with_bias):
if with_bias:
return {
"w_init": initializers.Constant(w),
"b_init": initializers.Constant(b)
}
else:
return {"w_init": initializers.Constant(w)}
class ConvTest(parameterized.TestCase):
@parameterized.parameters(0, -2)
def testIncorrectN(self, n):
init_fn, _ = transform.transform(
lambda: conv.ConvND(n, output_channels=1, kernel_shape=3))
with self.assertRaisesRegex(
ValueError,
"convolution operations for `num_spatial_dims` greater than 0"):
init_fn(None)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_same(self, n):
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
padding="SAME")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (16,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_valid(self, n):
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
padding="VALID")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (14,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_strided_conv(self, n):
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3, stride=3)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (6,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_diluted_conv(self, n):
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3, rate=3)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (16,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_channels_first(self, n):
input_shape = [2, 4] + [16]*n
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
data_format="channels_first")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2, 3) + (16,)*n
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_unbatched(self, n):
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (16,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_padding_function_valid(self, n):
reached = [0]
def foo(ks): # pylint: disable=unused-argument
reached[0] += 1
return (0, 0)
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
padding=foo)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (14,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
self.assertEqual(reached[0], n*2)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_padding_function_same(self, n):
reached = [0]
def foo(ks):
reached[0] += 1
return ((ks-1)//2, ks//2)
input_shape = [2] + [16]*n + [4]
def f():
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
padding=foo)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (16,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
self.assertEqual(reached[0], n*2)
@test_utils.transform_and_run(run_apply=False)
def test_invalid_input_shape(self):
n = 1
input_shape = [2, 4] + [16]*n
with self.assertRaisesRegex(
ValueError, r"Input to ConvND needs to have rank in \[2, 3\]"):
data = jnp.zeros(input_shape * 2)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
data_format="channels_first")
net(data)
@test_utils.transform_and_run(run_apply=False)
def test_invalid_mask_shape(self):
n = 1
input_shape = [2, 4] + [16]*n
with self.assertRaisesRegex(ValueError, "Mask needs to have the same "
"shape as weights. Shapes are:"):
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
data_format="channels_first", mask=jnp.ones([1, 5, 1]))
net(data)
@test_utils.transform_and_run
def test_valid_mask_shape(self):
n = 2
input_shape = [2, 4] + [16]*n
data = jnp.zeros(input_shape)
net = conv.ConvND(n, output_channels=3, kernel_shape=3,
data_format="channels_first",
mask=jnp.ones([3, 3, 4, 3]))
out = net(data)
expected_output_shape = (2, 3) + (16,)*n
self.assertEqual(out.shape, expected_output_shape)
@test_utils.transform_and_run
def test_group_conv(self):
batch_size = 3
seqlen = 12
hidden = 32
hidden_out = 64
feature_group_count = 2
inputs = np.zeros((batch_size, seqlen, hidden))
inputs[0, 0, :2] = 1.0
inputs[0, 5, 24] = 1.0
inputs[0, 7, 28:32] = 1.0
data = jnp.asarray(inputs)
net = conv.Conv1D(
output_channels=hidden_out,
kernel_shape=1,
with_bias=False,
feature_group_count=feature_group_count)
out = net(data)
expected_output_shape = (batch_size, seqlen, hidden_out)
self.assertEqual(out.shape, expected_output_shape)
# Make sure changing first half in time step 0 did affect exactly
# all first half elements in the output:
self.assertTrue((out[0, 0, :hidden_out//feature_group_count] != 0).all())
self.assertTrue((out[0, 0, hidden_out//feature_group_count:-1] == 0).all())
# Make sure time step 5 and 7 it is the second half exactly.
self.assertTrue((out[0, 5, :hidden_out//feature_group_count] == 0).all())
self.assertTrue((out[0, 7, hidden_out//feature_group_count:-1] != 0).all())
class Conv1DTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
expected_out = [2, 3, 3, 3, 2]
def f():
data = jnp.ones([1, 5, 1])
net = conv.Conv1D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 5, 1))
out = jnp.squeeze(out, axis=(0, 2))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
expected_out = [3, 3, 3]
def f():
data = jnp.ones([1, 5, 1])
net = conv.Conv1D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 1))
out = np.squeeze(out, axis=(0, 2))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
class Conv2DTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
expected_out = [[4, 6, 6, 6, 4], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6],
[6, 9, 9, 9, 6], [4, 6, 6, 6, 4]]
def f():
data = jnp.ones([1, 5, 5, 1])
net = conv.Conv2D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 5, 5, 1))
out = np.squeeze(out, axis=(0, 3))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
expected_out = [[9, 9, 9], [9, 9, 9], [9, 9, 9]]
def f():
data = jnp.ones([1, 5, 5, 1])
net = conv.Conv2D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 3, 1))
out = np.squeeze(out, axis=(0, 3))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
class Conv3DTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
expected_out = np.asarray([
9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 13, 19, 19,
19, 13, 9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19,
28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 13, 19, 19, 19,
13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19,
19, 19, 13, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19,
19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9, 13, 19, 19,
19, 13, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9
],
dtype=float).reshape((5, 5, 5))
if not with_bias:
expected_out -= 1
def f():
data = jnp.ones([1, 5, 5, 5, 1])
net = conv.Conv3D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 5, 5, 5, 1))
out = np.squeeze(out, axis=(0, 4))
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
expected_out = np.asarray([
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28
],
dtype=float).reshape((3, 3, 3))
if not with_bias:
expected_out -= 1
def f():
data = jnp.ones([1, 5, 5, 5, 1])
net = conv.Conv3D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 3, 3, 1))
out = np.squeeze(out, axis=(0, 4))
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@test_utils.transform_and_run(run_apply=False)
def test_invalid_input_shape(self):
with_bias = True
with self.assertRaisesRegex(
ValueError,
r"Input to ConvND needs to have rank in \[4, 5\], but input has shape"):
data = jnp.ones([1, 5, 5, 5, 1, 9, 9])
net = conv.Conv3D(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
net(data)
def default_output_shape(input_shape, kernel, stride, padding):
if padding == "SAME":
return input_shape * stride
elif padding == "VALID":
return (input_shape - 1) * stride + kernel
class ConvTransposeTest(parameterized.TestCase):
@parameterized.parameters(0, -2)
def testIncorrectN(self, n):
init_fn, _ = transform.transform(
lambda: conv.ConvNDTranspose(n, output_channels=1, kernel_shape=3))
with self.assertRaisesRegex(
ValueError,
"convolution operations for `num_spatial_dims` greater than 0"):
init_fn(None)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_transpose_same(self, n):
def f():
input_shape = [2] + [16]*n + [4]
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, padding="SAME")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (16,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_transpose_valid(self, n):
def f():
input_shape = [2] + [16]*n + [4]
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, padding="VALID")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (18,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_transpose_strided(self, n):
def f():
input_shape = [2] + [8]*n + [4]
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, stride=3)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2,) + (24,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_unbatched(self, n):
def f():
input_shape = [8]*n + [4]
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, stride=3)
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (24,)*n + (3,)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(1, 2, 3)
def test_connect_conv_transpose_channels_first(self, n):
def f():
input_shape = [2, 4] + [16]*n
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, data_format="channels_first")
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_output_shape = (2, 3) + (16,)*n
self.assertEqual(out.shape, expected_output_shape)
@test_utils.transform_and_run(run_apply=False)
def test_invalid_input_shape(self):
n = 1
with self.assertRaisesRegex(ValueError,
"Input to ConvNDTranspose needs to have rank"):
input_shape = [2, 4] + [16]*n
data = jnp.zeros(input_shape*2)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3, data_format="channels_first")
return net(data)
@test_utils.transform_and_run(run_apply=False)
def test_invalid_input_mask(self):
n = 2
with self.assertRaisesRegex(ValueError, "Mask needs to have the same "
"shape as weights. Shapes are:"):
input_shape = [2, 4] + [16]*n
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3,
data_format="channels_first",
mask=jnp.zeros([1, 2, 3]))
net(data)
@test_utils.transform_and_run
def test_valid_input_mask(self):
n = 2
input_shape = [2, 4] + [16]*n
data = jnp.zeros(input_shape)
net = conv.ConvNDTranspose(
n, output_channels=3, kernel_shape=3,
data_format="channels_first",
mask=jnp.zeros([3, 3, 3, 4]))
out = net(data)
expected_output_shape = (2, 3, 16, 16)
self.assertEqual(out.shape, expected_output_shape)
@parameterized.parameters(
(1, (3,), 128, 5, "NWC"),
(2, (4, 4), 64, 3, "NHWC"),
(3, (4, 4, 4), 64, 3, "NDHWC"))
@test_utils.transform_and_run
def test_initializer_variance(self, num_spatial_dims, kernel_shape,
in_channels, output_channels, data_format):
c = conv.ConvNDTranspose(
num_spatial_dims=num_spatial_dims,
kernel_shape=kernel_shape,
output_channels=output_channels,
data_format=data_format)
inputs = jnp.ones([16] + ([32] * num_spatial_dims) + [in_channels])
c(inputs)
w = c.params_dict()["conv_nd_transpose/w"]
actual_std = w.std()
expected_std = 1 / (np.sqrt(np.prod(kernel_shape + (in_channels,))))
# This ratio of the error compared to the expected std might be somewhere
# around 0.15 normally. We check it is not > 0.5, as that would indicate
# something seriously wrong (ie the previous buggy initialization).
rel_diff = np.abs(actual_std - expected_std) / expected_std
self.assertLess(rel_diff, 0.5)
@parameterized.parameters(
(10, 20, 5, 2, "SAME", (3, 2)),
(11, 77, 4, 7, "SAME", (3, 6)), # Tests max(0, padding_needed) branch.
(10, 23, 5, 2, "VALID", (4, 4)),
)
@test_utils.transform_and_run
def test_compute_adjusted_padding(self, input_size, output_size, kernel,
stride, padding, expected_adjusted_padding):
self.assertEqual(
conv.compute_adjusted_padding(
input_size=input_size,
output_size=output_size,
kernel_size=kernel,
stride=stride,
padding=padding), expected_adjusted_padding)
@parameterized.parameters(
([7, 9], None, 5, 3, "SAME", "channels_first"),
([7, 9, 16], None, 5, 2, "VALID", "channels_first"),
([9, 13], None, 5, 4, "VALID", "channels_last"),
([7, 9, 13], None, 5, 3, "VALID", "channels_last"),
# Default is: 21, 27, 48
([7, 9, 16], [19, 25, 48], 5, 3, "SAME", "channels_first"),
# Default is: 23, 41, 50
([7, 13, 16], [25, 42, 50], 5, 3, "VALID", "channels_first"),
# Default is: 45, 65, 80
([9, 13, 16], [43, 64, 80], 6, 5, "SAME", "channels_last"),
# Default is: 36, 46, 66
([7, 9, 13], [38, 48, 67], 6, 5, "VALID", "channels_last"),
)
@test_utils.transform_and_run
def test_output_sizes(self, input_shape, output_shape, kernel, stride,
padding, data_format):
batch_dim = 2
num_channels = 3
if data_format == "channels_first":
data = jnp.zeros([batch_dim, num_channels] + input_shape)
if data_format == "channels_last":
data = jnp.zeros([batch_dim] + input_shape + [num_channels])
net = conv.ConvNDTranspose(
num_spatial_dims=len(input_shape),
output_channels=3,
kernel_shape=kernel,
output_shape=output_shape,
stride=stride,
padding=padding,
data_format=data_format)
out = net(data)
if output_shape is None:
output_shape = [
default_output_shape(in_shape, kernel, stride, padding)
for in_shape in input_shape
]
if data_format == "channels_first":
expected_shape = tuple([batch_dim, num_channels] + output_shape)
if data_format == "channels_last":
expected_shape = tuple([batch_dim] + output_shape + [num_channels])
self.assertEqual(out.shape, expected_shape)
class Conv1DTransposeTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
expected_out = [2, 3, 2]
def f():
data = jnp.ones([1, 3, 1])
net = conv.Conv1DTranspose(
output_channels=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 1))
out = np.squeeze(out, axis=(0, 2))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
expected_out = [1, 2, 3, 2, 1]
def f():
data = jnp.ones([1, 3, 1])
net = conv.Conv1DTranspose(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 5, 1))
out = np.squeeze(out, axis=(0, 2))
expected_out = np.asarray(expected_out, dtype=float)
if with_bias:
expected_out += 1
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
class Conv2TransposeTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
def f():
data = np.ones([1, 3, 3, 1])
net = conv.Conv2DTranspose(
output_channels=1,
kernel_shape=3,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_out = np.array([[4, 6, 4], [6, 9, 6], [4, 6, 4]])
if with_bias:
expected_out += 1
expected_out = np.expand_dims(np.atleast_3d(expected_out), axis=0)
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
"""Example taken from Figure 5 of https://link.medium.com/suSvMCsDv1 ."""
def f():
data = np.ones([1, 4, 4, 1])
net = conv.Conv2DTranspose(
output_channels=1,
kernel_shape=3,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
expected_out = np.array([[1, 2, 3, 3, 2, 1],
[2, 4, 6, 6, 4, 2],
[3, 6, 9, 9, 6, 3],
[3, 6, 9, 9, 6, 3],
[2, 4, 6, 6, 4, 2],
[1, 2, 3, 3, 2, 1]])
if with_bias:
expected_out += 1
expected_out = np.expand_dims(np.atleast_3d(expected_out), axis=0)
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
class Conv3DTransposeTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_computation_padding_same(self, with_bias):
expected_out = np.asarray([
8, 12, 8, 12, 18, 12, 8, 12, 8, 12, 18, 12, 18, 27, 18, 12, 18, 12, 8,
12, 8, 12, 18, 12, 8, 12, 8
]).reshape((3, 3, 3))
if with_bias:
expected_out += 1
def f():
data = jnp.ones([1, 3, 3, 3, 1])
net = conv.Conv3DTranspose(
output_channels=1,
kernel_shape=3,
stride=1,
padding="SAME",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 3, 3, 3, 1))
out = np.squeeze(out, axis=(0, 4))
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
@parameterized.parameters(True, False)
def test_computation_padding_valid(self, with_bias):
expected_out = np.asarray([
1, 2, 3, 2, 1, 2, 4, 6, 4, 2, 3, 6, 9, 6, 3, 2, 4, 6, 4, 2, 1, 2, 3, 2,
1, 2, 4, 6, 4, 2, 4, 8, 12, 8, 4, 6, 12, 18, 12, 6, 4, 8, 12, 8, 4, 2,
4, 6, 4, 2, 3, 6, 9, 6, 3, 6, 12, 18, 12, 6, 9, 18, 27, 18, 9, 6, 12,
18, 12, 6, 3, 6, 9, 6, 3, 2, 4, 6, 4, 2, 4, 8, 12, 8, 4, 6, 12, 18, 12,
6, 4, 8, 12, 8, 4, 2, 4, 6, 4, 2, 1, 2, 3, 2, 1, 2, 4, 6, 4, 2, 3, 6, 9,
6, 3, 2, 4, 6, 4, 2, 1, 2, 3, 2, 1.
]).reshape((5, 5, 5))
if with_bias:
expected_out += 1
def f():
data = jnp.ones([1, 3, 3, 3, 1])
net = conv.Conv3DTranspose(
output_channels=1,
kernel_shape=3,
stride=1,
padding="VALID",
with_bias=with_bias,
**create_constant_initializers(1.0, 1.0, with_bias))
return net(data)
init_fn, apply_fn = transform.transform(f)
out = apply_fn(init_fn(random.PRNGKey(428)), None)
self.assertEqual(out.shape, (1, 5, 5, 5, 1))
out = np.squeeze(out, axis=(0, 4))
np.testing.assert_allclose(out, expected_out, rtol=1e-5)
PRECISIONS = (None, jax.lax.Precision.DEFAULT, jax.lax.Precision.HIGH,
jax.lax.Precision.HIGHEST)
NAMED_PRECISIONS = ((str(p), p) for p in PRECISIONS)
class PrecisionTest(parameterized.TestCase):
@test_utils.combined_named_parameters(
NAMED_PRECISIONS,
(("ConvND", conv.ConvND), ("ConvNDTranspose", conv.ConvNDTranspose)))
def test_precision(self, precision, cls):
def f(x):
net = cls(2, output_channels=3, kernel_shape=3, padding="VALID")
return net(x, precision=precision)
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
x = jnp.zeros([2, 16, 16, 4])
params = f.init(rng, x)
c = (
jax.jit(lambda x: f.apply(params, None, x))
.lower(x)
.compiler_ir(dialect="hlo")
)
hlo = c.as_hlo_text()
op_line = next(l for l in hlo.split("\n") if "convolution(" in l)
if precision is not None and precision != jax.lax.Precision.DEFAULT:
name = str(precision).lower()
self.assertRegex(op_line, f"operand_precision={{{name},{name}}}")
else:
self.assertNotIn("operand_precision", op_line)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/conv_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to visualize JAX expressions and Haiku modules.
The main entry point to this module is `make_model_info`, which captures JAX
expression and Haiku module usage information about the provided callable.
The resulting nested `Module` information can either be consumed directly, or
formatted in a textual representation using `format_module` or in an interactive
HTML tree visualization using `as_html` or `as_html_page`.
Example usage:
mod = jaxpr_info.make_model_info(my_callable)(some, arguments)
print(jaxpr_info.format_module(mod))
"""
from collections.abc import Mapping, Sequence
import dataclasses
import itertools
import logging
import os
import sys
from typing import Any, Callable, NamedTuple, Optional
from haiku._src import summarise
import jax
import jax.core
@dataclasses.dataclass
class Module:
"""Information about a Haiku module."""
# Name of the module, e.g. the Haiku module name.
name: str
# How many flops it takes to compute this module, including all operations
# contained by sub-modules.
# Only populated if `compute_flops` was passed to `make_model_info`.
flops: Optional[int] = None
# Expressions that are directly part of this module, e.g. in the __call__
# function.
expressions: list['Expression'] = dataclasses.field(default_factory=list)
# How many parameters are used by this module, including all sub-modules,
# and shape information for each parameter. Inferred from haiku module
# information.
total_param_size: int = 0
param_info: dict[str, str] = dataclasses.field(default_factory=dict)
# Same as above, but for haiku state.
total_state_size: int = 0
state_info: dict[str, str] = dataclasses.field(default_factory=dict)
@dataclasses.dataclass
class Expression:
"""Information about a single JAX expression."""
# Type of the expression, e.g. 'add' or 'conv_general_dilated'.
primitive: str
# Space separated lists of input and output variables.
invars: str
outvars: str
# Estimated number of flops required to compute this expression.
# Only populated if `compute_flops` was passed to `make_model_info`.
flops: Optional[int] = None
# Additional details, e.g. input/output shapes.
details: str = ''
# Some expressions take extra parameters, such as input/output dtypes for
# casts.
params: dict[str, str] = dataclasses.field(default_factory=dict)
# Some expressions, such as named_call, contain a whole subtree of modules
# and expressions.
submodule: Optional[Module] = None
# For internal use only, the first variable in outvars.
first_outvar: str = ''
# For internal use, the name scope stack of this expression.
name_stack: Sequence[str] = dataclasses.field(default_factory=list)
ComputeFlopsFn = Callable[[jax.core.JaxprEqn, Expression], int]
def make_model_info(
f: Callable[..., Any],
name: Optional[str] = None,
include_module_info: bool = True,
compute_flops: Optional[ComputeFlopsFn] = None,
axis_env: Optional[Sequence[tuple[Any, int]]] = None,
) -> Callable[..., Module]:
"""Creates a function that computes flop, param and state information.
Args:
f: The function for which to compute information. Haiku modules and
jax.named_call expressions will be represented as nested Modules in the
result.
name: Optional, the name of the root expression.
include_module_info: Whether to include parameter and state count
information for haiku modules. Can be slow for very large computations.
compute_flops: Optional, a function that returns an estimate of the number
of flops required to execute an equation.
axis_env: Sizes of pmapped axes. See docs of jax.make_jaxpr for details.
Returns:
A wrapped version of `f` that when applied to example arguments returns a
`Module` representation of `f` for those arguments.
`Module` and `Expression` contain high level information about JAX operations
(jaxprs) and can be visualized in concise and interactive formats; see
`format_module`, `as_html_page` or `as_html`.
"""
if not name:
name = f.__name__
make_jaxpr = jax.make_jaxpr(f, axis_env=axis_env)
if include_module_info:
# Wrap f in a lambda so eval_summary doesn't try to un-transform it.
# TODO(tomhennigan): remove lambda trick
make_module_info = summarise.eval_summary(lambda *a, **k: f(*a, **k)) # pylint: disable=unnecessary-lambda
def make_module(*args, **kwargs):
old_limit = sys.getrecursionlimit()
try:
# Increase recursion limit as graphs may be very deep
sys.setrecursionlimit(int(10e3))
jaxpr = make_jaxpr(*args, **kwargs).jaxpr
# Compute flops for all expressions.
module = Module(name=name)
_process_jaxpr(
jaxpr,
compute_flops,
scope=_ModuleScope(named_call_id='0'),
seen=set(),
module=module)
_name_scopes_to_modules(module)
if include_module_info:
# Add haiku param and state counts for all haiku modules.
module_infos = make_module_info(*args, **kwargs)
by_name = {i.module_details.module.module_name: i for i in module_infos}
by_name = {k.replace('/~/', '/'): v for k, v in by_name.items()}
for expr in module.expressions:
submodule = expr.submodule
if submodule:
_add_param_counts(submodule, submodule.name, by_name)
finally:
sys.setrecursionlimit(old_limit)
return module
return make_module
def _name_scopes_to_modules(module: Module):
"""Converts name scopes to nested Modules, as if a call jaxpr were present."""
expressions = list(module.expressions)
del module.expressions[:]
if module.flops:
# Flops are recomputed below.
module.flops = 0
# The nested set of Modules corresponding to the current name scope stack.
module_stack = [module]
for e in expressions:
if e.submodule is not None:
_name_scopes_to_modules(e.submodule)
# Close open scopes.
while len(module_stack) > len(e.name_stack) + 1:
module_stack.pop()
for i, n in enumerate(e.name_stack):
i = i + 1 # Offset to account for the outermost module.
if i < len(module_stack) and n != module_stack[i].name:
# Entering a different name scope.
while len(module_stack) > i:
module_stack.pop()
if i == len(module_stack):
# Represent the scope as a dummy module.
scope = Expression(primitive='name_scope', invars='', outvars='')
scope.submodule = Module(name=n)
module_stack[-1].expressions.append(scope)
module_stack.append(scope.submodule)
# Expressions belong to the corresponding innermost module.
module_stack[-1].expressions.append(e)
if e.flops:
for m in module_stack:
m.flops = (m.flops or 0) + e.flops
class _ModuleScope(NamedTuple):
"""Helper object to track the nesting of named_calls and module scopes."""
# The concatenation of all outer named_call jaxprs. This is used to uniquely
# identify which computations we've already seen.
named_call_id: str
def join(self, eqn: jax.core.JaxprEqn) -> '_ModuleScope':
return _ModuleScope(
named_call_id=self.named_call_id + '/' + str(id(eqn)))
def _format_shape(var):
return var.aval.str_short().replace('float', 'f')
def _mark_seen(
binder_idx: dict[jax.core.Var, int],
seen: set[str],
var: jax.core.Var,
scope: _ModuleScope,
) -> bool:
"""Marks a variable as seen. Returns True if it was not previously seen."""
key = scope.named_call_id + '/' + _var_to_str(binder_idx, var)
if key in seen:
return False
seen.add(key)
return True
def _var_sort_key(s: str):
"""Sorts variables in order of use by JAX, short names first and `_` last."""
return (1000 if s == '_' else len(s), s)
def _var_to_str(
binder_idx: dict[jax.core.Var, int], atom: jax.core.Atom
) -> str:
"""Returns an atom name based on var binding order in its containing jaxpr."""
if isinstance(atom, jax.core.DropVar):
return '_'
if isinstance(atom, jax.core.Literal):
return str(atom)
assert isinstance(atom, jax.core.Var)
n = binder_idx[atom]
s = ''
while not s or n:
n, i = n // 26, n % 26
s = chr(97 + i % 26) + s
return s
def _process_eqn(
eqn: jax.core.JaxprEqn,
seen: set[str],
eqns_by_output: Mapping[str, jax.core.JaxprEqn],
compute_flops: Optional[ComputeFlopsFn],
scope: _ModuleScope,
module: Module,
binder_idx: dict[jax.core.Var, int],
) -> Optional[int]:
"""Recursive walks the JaxprEqn to compute the flops it takes."""
for out_var in eqn.outvars:
_mark_seen(binder_idx, seen, out_var, scope)
outvars = sorted([_var_to_str(binder_idx, e) for e in eqn.outvars],
key=_var_sort_key)
name_stack = str(eqn.source_info.name_stack)
expression = Expression(
primitive=eqn.primitive.name,
invars=' '.join(_var_to_str(binder_idx, v) for v in eqn.invars)
if len(eqn.invars) < 10 else f'{len(eqn.invars)} inputs',
outvars=' '.join(outvars) if len(outvars) < 10 else
f'{outvars[0]}, {len(outvars) - 1} more outputs',
first_outvar=outvars[0],
name_stack=name_stack.split('/') if name_stack else [])
if eqn.primitive.name in [
'named_call', 'custom_jvp_call_jaxpr', 'custom_vjp_call_jaxpr',
'custom_jvp_call', 'custom_vjp_call', 'remat_call', 'scan', 'while',
'xla_call', 'xla_pmap', 'pjit', 'remat2'
]:
flops_multiplier = 1
if eqn.primitive.name in ['named_call', 'xla_call']:
# Haiku module or named scope.
name = eqn.params['name']
jaxpr = eqn.params['call_jaxpr']
elif eqn.primitive.name in ['remat_call', 'xla_pmap']:
name = eqn.primitive.name + ' ' + eqn.params['name']
jaxpr = eqn.params['call_jaxpr']
elif eqn.primitive.name == 'remat2':
name = 'remat'
jaxpr = eqn.params['jaxpr']
elif eqn.primitive.name == 'while':
name = 'while'
jaxpr = eqn.params['body_jaxpr'].jaxpr
# The loop also has a `cond_jaxpr` that we don't display for now.
elif eqn.primitive.name == 'scan':
name = f'scan length={eqn.params["length"]} unroll={eqn.params["unroll"]}'
jaxpr = eqn.params['jaxpr'].jaxpr
flops_multiplier = eqn.params['length'] // eqn.params['unroll']
elif eqn.primitive.name == 'pjit':
name = eqn.params['name']
jaxpr = eqn.params['jaxpr'].jaxpr
elif eqn.primitive.name in ['custom_jvp_call', 'custom_vjp_call']:
name = eqn.primitive.name.replace('_call', '')
jaxpr = eqn.params['call_jaxpr']
elif eqn.primitive.name in [
'custom_jvp_call_jaxpr', 'custom_vjp_call_jaxpr'
]:
# Custom gradient.
name = eqn.primitive.name.replace('_call_jaxpr', '')
jaxpr = eqn.params['fun_jaxpr'].jaxpr
else:
raise ValueError(f'unmatched eqn {eqn.primitive.name}')
expression.submodule = Module(name=name)
flops = _process_jaxpr(jaxpr, compute_flops, scope.join(eqn), seen,
expression.submodule)
if compute_flops is not None:
flops *= flops_multiplier
expression.submodule.flops = flops
expression.flops = flops
if compute_flops is None or flops > 0:
module.expressions.append(expression)
else:
details = []
if eqn.invars:
details.append('in ' + ', '.join(_format_shape(v) for v in eqn.invars))
if eqn.outvars:
details.append('out ' + ', '.join(_format_shape(v) for v in eqn.outvars))
expression.details = ', '.join(details)
expression.params.update({k: str(v) for k, v in eqn.params.items()})
flops = None if compute_flops is None else compute_flops(eqn, expression)
expression.flops = flops
module.expressions.append(expression)
for var in eqn.invars:
if isinstance(var, jax.core.Literal):
continue
key = _var_to_str(binder_idx, var)
if key == '*':
continue
if not _mark_seen(binder_idx, seen, var, scope):
continue
if key not in eqns_by_output:
logging.warning('missing var %s = %s', key, type(var))
continue
f = _process_eqn(eqns_by_output[key], seen, eqns_by_output, compute_flops,
scope, module, binder_idx)
if compute_flops is not None:
flops += f
return flops
def _process_jaxpr(
jaxpr: jax.core.Jaxpr,
compute_flops: Optional[ComputeFlopsFn],
scope: _ModuleScope,
seen: set[str],
module: Module,
) -> Optional[int]:
"""Computes the flops used for a JAX expression, tracking module scope."""
if isinstance(jaxpr, jax.core.ClosedJaxpr):
return _process_jaxpr(jaxpr.jaxpr, compute_flops, scope, seen, module)
# Label variables by the order in which they're introduced.
lam_binders = itertools.chain(jaxpr.constvars, jaxpr.invars)
let_binders = itertools.chain.from_iterable(e.outvars for e in jaxpr.eqns)
all_binders = itertools.chain(lam_binders, let_binders)
binder_idx = dict(zip(all_binders, itertools.count()))
# Build a map that for each output contains the equation to compute it.
eqns_by_output = {}
for eqn in jaxpr.eqns:
for var in eqn.outvars:
eqns_by_output[_var_to_str(binder_idx, var)] = eqn
# Seed the set of variables that have been seen with the inputs and constants.
for var in jaxpr.invars + jaxpr.constvars:
_mark_seen(binder_idx, seen, var, scope)
# Recursively walk the computation graph.
flops = None if compute_flops is None else 0
for var in jaxpr.outvars:
if (isinstance(var, jax.core.Var) and
_mark_seen(binder_idx, seen, var, scope)):
f = _process_eqn(eqns_by_output[_var_to_str(binder_idx, var)], seen,
eqns_by_output, compute_flops, scope, module, binder_idx)
if compute_flops is not None:
flops += f
module.expressions.sort(key=lambda e: _var_sort_key(e.first_outvar))
module.flops = flops
return flops
def _add_param_counts(model_info: Module, prefix: str,
invocations: Mapping[str, summarise.MethodInvocation]):
"""Adds parameter count to the module info, if present."""
if prefix in invocations:
invocation = invocations[prefix]
model_info.total_param_size = sum(
spec.size for spec in invocation.module_details.params.values())
model_info.total_state_size = sum(
spec.size for spec in invocation.module_details.state.values())
for k, s in invocation.module_details.params.items():
model_info.param_info[k.replace('/~/', '/').replace(prefix, '.')] = str(s)
for k, s in invocation.module_details.state.items():
model_info.state_info[k.replace('/~/', '/').replace(prefix, '.')] = str(s)
for expr in model_info.expressions:
submodule = expr.submodule
if submodule:
_add_param_counts(
submodule, os.path.join(prefix, submodule.name), invocations
)
def _decimal_prefix(n: int, unit: str, precision: int = 4):
prefixes = [('E', 18), ('P', 15), ('T', 12), ('G', 9), ('M', 6), ('k', 3)]
for prefix, exponent in prefixes:
scale = 10**exponent
if n > scale:
return f'%.{precision}g {prefix}{unit}' % (n / scale)
return f'{n} {unit}'
def format_module(module: Module, depth: int = 0) -> str:
"""Recursively formats module information as a human readable string."""
s = ' ' * depth + module.name
if module.flops is not None:
s += ' ' + _decimal_prefix(module.flops, 'flops')
if module.total_param_size > 0:
s += ' ' + _decimal_prefix(module.total_param_size, 'params')
if module.total_state_size > 0:
s += ' ' + _decimal_prefix(module.total_state_size, 'state')
s += '\n'
for exp in module.expressions:
s += format_expression(exp, depth + 1)
return s
def format_expression(exp: Expression, depth: int) -> str:
if exp.submodule:
return format_module(exp.submodule, depth)
s = ' ' * depth + exp.primitive
if exp.flops is not None:
s += ' ' + _decimal_prefix(exp.flops, 'flops')
s += ' ' + exp.details
return s + '\n'
def as_html_page(module: Module, min_flop: int = 1000) -> str:
"""Formats the output of `make_model_info` as an interactive HTML page."""
return f"""<html>
<head>
<style>{css()}</style>
<script>{js()}</script>
</head>
<body>{as_html(module, min_flop=min_flop)}</body>
</html>"""
def css() -> str:
"""The CSS for HTML visualization of a `Module`."""
return """.node {
list-style-type:none;
}
.node ol {
padding-left: 0;
margin-left: 30px;
}
li.node {
border-left: 1px solid black;
}
li.node:last-child {
border-left: none;
}
/* Correctly align the elbow connector for the last child node. */
li.node:last-child > span:first-child {
margin-left: -5px;
}
.expander {
cursor: pointer;
font-family: monospace;
}
.expression {
padding-left: 25px;
}
.tooltip-container .tooltip {
visibility: hidden;
background-color: #e3eaff;
padding: 8px;
margin-top: 1.4em;
border-radius: 6px;
position: absolute;
z-index: 1;
}
.tooltip-container:hover .tooltip {
visibility: visible;
}
.model-info {
font-family: monospace;
}
.flops {
color: grey;
}
.primitive {
color: #3f48b0;
}
"""
def js() -> str:
"""The JavaScript for HTML visualization of a `Module`."""
return """
function expand(el) {
console.log(el);
const siblings = el.parentElement.children;
siblings[0].onclick = () => collapse(el);
// Expand list of children.
siblings[siblings.length - 1].style.display = 'block';
// Hide expander link.
siblings[siblings.length - 2].style.display = 'none';
}
function collapse(el) {
console.log(el);
const siblings = el.parentElement.children;
siblings[0].onclick = () => expand(el);
// Hide list of children.
siblings[siblings.length - 1].style.display = 'none';
// Show expander link.
siblings[siblings.length - 2].style.display = 'inline';
}
"""
def as_html(module: Module,
min_flop: int = 1000,
outvars: str = '',
last: bool = False) -> str:
"""Formats a `Module` as a tree of interactive HTML elements.
When embedding this in a page, the outputs of `css` and `js` must be embedded
too for the visualization to work.
To only visualize a single module directly, see `as_html_page`.
Args:
module: The module to visualize, as an interactive HTML tree.
min_flop: Minimum number of flops for an operation to be shown.
outvars: For internal use, the outputs of this module.
last: For internal use, whether this module is the last of its siblings.
Returns:
HTML representation of `module`.
"""
s = '<span>'
if last:
s += """ <span class = "expander"
onclick = "javascript:expand(this)"
style = "padding-right: 6px" >
└─
</span>"""
else:
s += """<span class="expander"
onclick="javascript:expand(this)">
──
</span>"""
s += f"""<span class="tooltip-container"> {outvars} = <b>{module.name}</b>"""
if module.flops is not None:
s += f"<span class='flops'>{_decimal_prefix(module.flops, 'flop')}</span>"
if module.total_param_size > 0:
s += f"<span class='flops'>{_decimal_prefix(module.total_param_size, 'param')}</span>"
if module.total_state_size > 0:
s += f"<span class='flops'>{_decimal_prefix(module.total_state_size, 'state')}</span>"
# Tooltip with detailed param and state information.
s += """<span class="tooltip">
<table>
<tr><th colspan=2>Param</th></tr>"""
for key, val in module.param_info.items():
s += f'<tr><td>{key}</td><td>{val}</td></tr>'
s += '<tr><th colspan=2>State</th></tr>'
for key, val in module.state_info.items():
s += f'<tr><td>{key}</td><td>{val}</td></tr>'
s += """
</table>
</span>
</span>"""
expressions = [
e for e in module.expressions if e.flops is None or e.flops >= min_flop
]
if expressions:
s += """
<span class="expander" onclick="javascript:expand(this)">
[expand]
</span>
<ol style="display:none">
"""
for i, exp in enumerate(expressions):
last = i + 1 == len(expressions)
s += "<li class='node'>"
if exp.submodule:
s += as_html(
exp.submodule, min_flop=min_flop, outvars=exp.outvars, last=last)
else:
s += f"""<div class='expression tooltip-container'>
<span class='tooltip'>{exp.details}"""
for key, val in exp.params.items():
s += f'<div>{key}: {val}</div>'
s += f"""</span>
{exp.outvars} = <span class='primitive'>{exp.primitive}</span>
{exp.invars}
<span class='flops'>{_decimal_prefix((exp.flops or 0), 'flops')}</span>
</div>"""
s += '</li>'
s += '</ol>'
return s + '</span>'
|
dm-haiku-main
|
haiku/_src/jaxpr_info.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.initializers."""
import itertools as it
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import initializers
from haiku._src import test_utils
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
class InitializersTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_initializers(self):
as_np_f64 = lambda t: np.array(t, dtype=np.float64)
# This just makes sure we can call the initializers in accordance to the
# API and get the right shapes and dtypes out.
inits = [
initializers.Constant(42.0),
initializers.Constant(as_np_f64(42.0)),
initializers.RandomNormal(),
initializers.RandomNormal(2.0),
initializers.RandomNormal(as_np_f64(2.0)),
initializers.RandomUniform(),
initializers.RandomUniform(3.0),
initializers.RandomUniform(as_np_f64(3.0)),
initializers.VarianceScaling(),
initializers.VarianceScaling(2.0),
initializers.VarianceScaling(as_np_f64(2.0)),
initializers.VarianceScaling(2.0, mode="fan_in"),
initializers.VarianceScaling(as_np_f64(2.0), mode="fan_in",
fan_in_axes=[0]),
initializers.VarianceScaling(2.0, mode="fan_in", fan_in_axes=[0]),
initializers.VarianceScaling(as_np_f64(2.0), mode="fan_in"),
initializers.VarianceScaling(2.0, mode="fan_out"),
initializers.VarianceScaling(as_np_f64(2.0), mode="fan_out"),
initializers.VarianceScaling(2.0, mode="fan_avg"),
initializers.VarianceScaling(as_np_f64(2.0), mode="fan_avg"),
initializers.VarianceScaling(2.0, distribution="truncated_normal"),
initializers.VarianceScaling(
as_np_f64(2.0), distribution="truncated_normal"),
initializers.VarianceScaling(2.0, distribution="normal"),
initializers.VarianceScaling(as_np_f64(2.0), distribution="normal"),
initializers.VarianceScaling(2.0, distribution="uniform"),
initializers.VarianceScaling(as_np_f64(2.0), distribution="uniform"),
initializers.UniformScaling(),
initializers.UniformScaling(2.0),
initializers.UniformScaling(as_np_f64(2.0)),
initializers.TruncatedNormal(),
initializers.Orthogonal(),
initializers.Identity(),
initializers.Identity(as_np_f64(2.0)),
# Users are supposed to be able to use these.
jnp.zeros,
jnp.ones,
]
# TODO(ibab): Test other shapes as well.
shape = (20, 42)
dtype = jnp.float32
for init in inits:
generated = init(shape, dtype)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, dtype)
@test_utils.transform_and_run
def test_invalid_variance_scale(self):
with self.assertRaisesRegex(ValueError, "scale.*must be a positive float"):
initializers.VarianceScaling(scale=-1.0)
with self.assertRaisesRegex(ValueError, "Invalid `mode` argument*"):
initializers.VarianceScaling(mode="foo")
with self.assertRaisesRegex(ValueError, "Invalid `distribution` argument*"):
initializers.VarianceScaling(distribution="bar")
@test_utils.transform_and_run
def test_compute_fans(self):
fan_in_out1 = initializers._compute_fans([])
self.assertEqual(fan_in_out1, (1, 1))
fan_in_out2 = initializers._compute_fans([2])
self.assertEqual(fan_in_out2, (2, 2))
fan_in_out3 = initializers._compute_fans([3, 4])
self.assertEqual(fan_in_out3, (3, 4))
fan_in_out4 = initializers._compute_fans([1, 2, 3, 4])
self.assertEqual(fan_in_out4, (6, 8))
fan_in_out5 = initializers._compute_fans([3, 5, 9], fan_in_axes=[0])
self.assertEqual(fan_in_out5, (3, 45))
fan_in_out6 = initializers._compute_fans([3, 5, 7, 4], fan_in_axes=[0, 1])
self.assertEqual(fan_in_out6, (15, 28))
@test_utils.transform_and_run
def test_orthogonal_invalid_shape(self):
init = initializers.Orthogonal()
shape = (20,)
with self.assertRaisesRegex(
ValueError, "Orthogonal initializer requires at least a 2D shape."):
init(shape, jnp.float32)
@test_utils.transform_and_run
def test_orthogonal_orthogonal(self):
init = initializers.Orthogonal()
shape = (42, 20)
generated = init(shape, jnp.float32)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, jnp.float32)
@test_utils.transform_and_run
def test_identity_identity(self):
init = initializers.Identity()
shape = (42, 20)
generated = init(shape, jnp.float32)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, jnp.float32)
key = jax.random.PRNGKey(42)
some_matrix = jax.random.normal(key, (62, 42), jnp.float32)
np.testing.assert_allclose(some_matrix @ generated, some_matrix[:, :20],
rtol=1e-2)
@test_utils.transform_and_run
def test_identity_invalid_shape(self):
init = initializers.Identity()
shape = (20,)
with self.assertRaisesRegex(ValueError, "requires at least a 2D shape."):
init(shape, jnp.float32)
@parameterized.parameters(
*it.product([(4, 5), (3, 3), (3, 4, 5), (6, 2, 3, 3)],
[3, 1],
[jnp.float32, jnp.int32]))
def testRange(self, shape, gain, dtype):
init = initializers.Identity(gain)
value = init(shape, dtype)
self.assertEqual(value.shape, shape)
np.testing.assert_almost_equal(value.mean(), gain / shape[-1], decimal=4)
np.testing.assert_almost_equal(value.max(), gain, decimal=4)
@test_utils.transform_and_run
def test_complex_dtype(self):
if jax.local_devices()[0].platform == "tpu":
self.skipTest("Complex dtype not supported by TPU")
# This just makes sure we can call the initializers in accordance to the
# API and get the right shapes and dtypes out.
inits = [
initializers.Constant(42. + 1j * 1729.),
initializers.RandomNormal(),
initializers.RandomNormal(2.0),
initializers.RandomNormal(2. - 3j),
initializers.TruncatedNormal(),
initializers.TruncatedNormal(2.),
initializers.TruncatedNormal(2., 1. - 1j),
# Users are supposed to be able to use these.
jnp.zeros,
jnp.ones,
]
shape = (5, 13, 17)
dtype = jnp.complex64
for init in inits:
generated = init(shape, dtype)
self.assertEqual(generated.shape, shape)
self.assertEqual(generated.dtype, dtype)
@parameterized.parameters(
*it.product([[1], [1, 2, 3], [1, 2, 3, 4]],
[jnp.int32, jnp.float32],
[True, False]))
def test_constant_with_list(self, k, dtype, broadcast):
init = initializers.Constant(k)
shape = (1, 1, len(k)) if broadcast else (len(k),)
actual = init(shape, dtype)
expected = jnp.broadcast_to(jnp.asarray(k).astype(dtype), shape)
np.testing.assert_array_equal(actual, expected)
self.assertEqual(actual.shape, shape)
self.assertEqual(actual.dtype, dtype)
if __name__ == "__main__":
config.update("jax_enable_x64", True)
absltest.main()
|
dm-haiku-main
|
haiku/_src/initializers_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Group normalization implementation for Haiku."""
import collections
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import utils
import jax
import jax.numpy as jnp
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
get_channel_index = utils.get_channel_index
# pylint: enable=invalid-name
del base, initializers, module, utils
class GroupNorm(hk.Module):
r"""Group normalization module.
This applies group normalization to the x. This involves splitting the
channels into groups before calculating the mean and variance. The default
behaviour is to compute the mean and variance over the spatial dimensions and
the grouped channels. The mean and variance will never be computed over the
created groups axis.
It transforms the input ``x`` into:
.. math::
\d{outputs} = \d{scale} \dfrac{x - \mu}{\sigma + \epsilon} + \d{offset}
Where :math:`\mu` and :math:`\sigma` are respectively the mean and standard
deviation of ``x``.
There are many different variations for how users want to manage scale and
offset if they require them at all. These are:
- No ``scale``/``offset`` in which case ``create_*`` should be set to
``False`` and ``scale``/``offset`` aren't passed when the module is
called.
- Trainable ``scale``/``offset`` in which case create_* should be set to
``True`` and again ``scale``/``offset`` aren't passed when the module is
called. In this case this module creates and owns the scale/offset
parameters.
- Externally generated ``scale``/``offset``, such as for conditional
normalization, in which case ``create_*`` should be set to ``False`` and
then the values fed in at call time.
"""
def __init__(
self,
groups: int,
axis: Union[int, slice, Sequence[int]] = slice(1, None),
create_scale: bool = True,
create_offset: bool = True,
eps: float = 1e-5,
scale_init: Optional[hk.initializers.Initializer] = None,
offset_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "channels_last",
name: Optional[str] = None,
):
"""Constructs a ``GroupNorm`` module.
Args:
groups: number of groups to divide the channels by. The number of channels
must be divisible by this.
axis: ``int``, ``slice`` or sequence of ints representing the axes which
should be normalized across. By default this is all but the first
dimension. For time series data use `slice(2, None)` to average over the
none Batch and Time data.
create_scale: whether to create a trainable scale per channel applied
after the normalization.
create_offset: whether to create a trainable offset per channel applied
after normalization and scaling.
eps: Small epsilon to add to the variance to avoid division by zero.
Defaults to ``1e-5``.
scale_init: Optional initializer for the scale parameter. Can only be set
if ``create_scale=True``. By default scale is initialized to ``1``.
offset_init: Optional initializer for the offset parameter. Can only be
set if ``create_offset=True``. By default offset is initialized to
``0``.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``. See :func:`get_channel_index`.
name: Name of the module.
"""
super().__init__(name=name)
if isinstance(axis, slice):
self.axis = axis
elif isinstance(axis, int):
self.axis = (axis,)
elif (isinstance(axis, collections.abc.Iterable) and
all(isinstance(ax, int) for ax in axis)):
self.axis = axis
else:
raise ValueError("`axis` should be an int, slice or iterable of ints.")
self.groups = groups
self.eps = eps
self.data_format = data_format
self.channel_index = hk.get_channel_index(data_format)
self.create_scale = create_scale
self.create_offset = create_offset
self.rank = None
if self.create_scale:
if scale_init is None:
scale_init = jnp.ones
self.scale_init = scale_init
elif scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`.")
if self.create_offset:
if offset_init is None:
offset_init = jnp.zeros
self.offset_init = offset_init
elif offset_init is not None:
raise ValueError("Cannot set `offset_init` if `create_offset=False`.")
def __call__(
self,
x: jax.Array,
scale: Optional[jax.Array] = None,
offset: Optional[jax.Array] = None,
) -> jax.Array:
"""Returns normalized inputs.
Args:
x: An n-D tensor of the ``data_format`` specified in the constructor
on which the transformation is performed.
scale: A tensor up to n-D. The shape of this tensor must be broadcastable
to the shape of ``x``. This is the scale applied to the normalized
x. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: A tensor up to n-D. The shape of this tensor must be broadcastable
to the shape of ``x``. This is the offset applied to the normalized
``x``. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
An n-d tensor of the same shape as x that has been normalized.
"""
if self.rank is not None and x.ndim != self.rank:
raise ValueError(
"The rank of the inputs cannot change between calls, the"
f" original call was rank={self.rank} but this call was "
f"rank={x.ndim}.")
if self.create_scale and scale is not None:
raise ValueError(
"Cannot pass `scale` at call time if `create_scale=True`.")
if self.create_offset and offset is not None:
raise ValueError(
"Cannot pass `offset` at call time if `create_offset=True`.")
channels = x.shape[self.channel_index]
if channels % self.groups != 0:
raise ValueError(
"The number of channels must be divisible by the number of groups, "
f"was channels={channels}, groups={self.groups}")
if self.rank is None:
self._initialize(x, channels)
dtype = x.dtype
if self.channel_index == -1:
params_shape = (x.shape[-1],)
else:
assert self.channel_index == 1
params_shape = (x.shape[1],) + (1,) * (self.rank - 2)
if self.create_scale:
scale = hk.get_parameter("scale", params_shape, dtype, self.scale_init)
if self.create_offset:
offset = hk.get_parameter("offset", params_shape, dtype, self.offset_init)
x = x.reshape(self.group_shape)
mean = jnp.mean(x, self.axis, keepdims=True)
# TODO(tycai): Consider faster but less precise variance formulation.
var = jnp.var(x, self.axis, keepdims=True)
x = (x - mean) * jax.lax.rsqrt(var + self.eps)
x = x.reshape(self.first_input_shape)
if scale is not None:
scale = jax.lax.broadcast_to_rank(scale, x.ndim)
x = x * scale
if offset is not None:
offset = jax.lax.broadcast_to_rank(offset, x.ndim)
x = x + offset
return x
def _initialize(self, x: jax.Array, channels: int):
assert self.rank is None
self.rank = x.ndim
# Turns slice into list of axis
if isinstance(self.axis, slice):
axes = tuple(range(self.rank))
self.axis = axes[self.axis]
if self.channel_index == -1:
self.axis = tuple(a if a != self.rank - 1 else a + 1 for a in self.axis)
self.group_shape = (
(-1,) + x.shape[1:-1] + (self.groups, channels // self.groups))
else:
assert self.channel_index == 1
self.axis = tuple(a if a == 0 else a + 1 for a in self.axis)
self.group_shape = (
(-1, self.groups, channels // self.groups) + x.shape[2:])
self.first_input_shape = (-1,) + x.shape[1:]
|
dm-haiku-main
|
haiku/_src/group_norm.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
dm-haiku-main
|
haiku/_src/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.spectral_norm."""
from absl.testing import absltest
from haiku._src import basic
from haiku._src import spectral_norm
from haiku._src import test_utils
from haiku._src import transform
import jax.numpy as jnp
import jax.random as random
import numpy as np
class SpectralNormTest(absltest.TestCase):
@test_utils.transform_and_run
def test_scalar(self):
sn = spectral_norm.SpectralNorm()
with self.assertRaisesRegex(ValueError, "not well defined"):
sn(1.0)
@test_utils.transform_and_run
def test_vector(self):
sn = spectral_norm.SpectralNorm()
with self.assertRaisesRegex(ValueError, "not well defined"):
sn(jnp.ones(shape=[5]))
@test_utils.transform_and_run
def test_3d_tensor(self):
sn = spectral_norm.SpectralNorm()
input_3d = (4.0 * jnp.eye(8, 8))[None, :, :]
sn(input_3d)
with self.assertRaisesRegex(ValueError, "Input is 3D but"):
sn(input_3d, error_on_non_matrix=True)
@test_utils.transform_and_run
def test_matrix(self):
sn = spectral_norm.SpectralNorm()
# We can easily calculate the first singular value for this matrix.
input_ = 4.0 * jnp.eye(8, 8)
sn(input_)
np.testing.assert_allclose(sn.sigma, 4.0, atol=1e-3)
@test_utils.transform_and_run
def test_matrix_multiple_steps(self):
sn = spectral_norm.SpectralNorm(n_steps=3)
# We can easily calculate the first singular value for this matrix.
input_ = 4.0 * jnp.eye(8, 8)
sn(input_)
np.testing.assert_allclose(sn.sigma, 4.0, atol=1e-3)
@test_utils.transform_and_run
def test_matrix_no_stats(self):
sn = spectral_norm.SpectralNorm()
# We can easily calculate the first singular value for this matrix.
input_ = 4.0 * jnp.eye(8, 8)
sn(input_, update_stats=False)
np.testing.assert_allclose(sn.sigma, 1.0)
class SNParamsTreeTest(absltest.TestCase):
def test_sn_naming_scheme(self):
sn_name = "this_is_a_wacky_but_valid_name"
linear_name = "so_is_this"
def f():
return basic.Linear(output_size=2, name=linear_name)(jnp.zeros([6, 6]))
init_fn, _ = transform.transform(f)
params = init_fn(random.PRNGKey(428))
def g(x):
return spectral_norm.SNParamsTree(ignore_regex=".*b", name=sn_name)(x)
init_fn, _ = transform.transform_with_state(g)
_, params_state = init_fn(random.PRNGKey(428), params)
expected_sn_states = [f"{sn_name}/{linear_name}__{s}" for s in ["w"]]
self.assertSameElements(expected_sn_states, params_state.keys())
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/spectral_norm_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.eval_shape."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import basic
from haiku._src import eval_shape
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
class EvalShapeTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_fast_eval_shape_dropout(self):
f = lambda rng, x: basic.dropout(rng, 0.5, x)
rng = jax.random.PRNGKey(0)
x = jnp.ones([1])
y_slow = jax.eval_shape(f, rng, x)
y_fast = eval_shape.fast_eval_shape(f, rng, x)
self.assertEqual(y_slow, y_fast)
def test_fast_eval_shape_fold_in(self):
f = lambda rng, x: jax.random.fold_in(rng, 1)
rng = jax.random.PRNGKey(0)
x = jnp.ones([1])
y_slow = jax.eval_shape(f, rng, x)
y_fast = eval_shape.fast_eval_shape(f, rng, x)
self.assertEqual(y_slow, y_fast)
def test_fast_eval_shape_already_transformed(self):
f = transform.transform(lambda x: basic.Linear(20)(x)) # pylint: disable=unnecessary-lambda
rng = jax.random.PRNGKey(0)
x = jnp.ones([1, 12])
# init_fn
y_slow = jax.eval_shape(f.init, rng, x)
y_fast = eval_shape.fast_eval_shape(f.init, rng, x)
self.assertEqual(y_slow, y_fast)
self.assertEqual(
y_slow, {'linear': {'w': jax.ShapeDtypeStruct((12, 20), jnp.float32),
'b': jax.ShapeDtypeStruct((20,), jnp.float32)}})
# apply_fn
y_slow = jax.eval_shape(f.apply, y_slow, rng, x)
y_fast = eval_shape.fast_eval_shape(f.apply, y_fast, rng, x)
self.assertEqual(y_slow, y_fast)
def test_fast_eval_shape_within_transform(self):
def f(x):
m = basic.Linear(20)
y_slow = stateful.eval_shape(m, x)
y_fast = eval_shape.fast_eval_shape(m, x)
self.assertEqual(y_slow, y_fast)
return m(x)
f = transform.transform(f)
rng = jax.random.PRNGKey(0)
x = jnp.ones([1, 12])
params = jax.eval_shape(f.init, rng, x)
self.assertEqual(
params, {'linear': {'w': jax.ShapeDtypeStruct((12, 20), jnp.float32),
'b': jax.ShapeDtypeStruct((20,), jnp.float32)}})
jax.eval_shape(f.apply, params, rng, x)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/eval_shape_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.mixed_precision."""
import importlib
from typing import Optional
from absl.testing import absltest
from haiku._src import base
from haiku._src import conv
from haiku._src import mixed_precision
from haiku._src import module
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import jmp
def with_policy(cls: type[module.Module], policy: Optional[jmp.Policy]):
def decorator(f):
def wrapper(*args, **kwargs):
with mixed_precision.push_policy(cls, policy):
return f(*args, **kwargs)
return wrapper
return decorator
class OuterModule(module.Module):
def __call__(self):
self.w = base.get_parameter('w', [], jnp.bfloat16, init=jnp.ones)
self.inner = InnerModule()
self.inner_ret = self.inner()
return jnp.ones([], dtype=jnp.bfloat16)
class InnerModule(module.Module):
def __call__(self):
self.w = base.get_parameter('w', [], jnp.bfloat16, init=jnp.ones)
return jnp.ones([], dtype=jnp.bfloat16)
class InnerInnerModule(module.Module):
def __call__(self):
self.w = base.get_parameter('w', [], jnp.bfloat16, init=jnp.ones)
return jnp.ones([], dtype=jnp.bfloat16)
def transform_and_run_once(f, *args, **kwargs):
f = transform.transform(f)
def g(*args, **kwargs):
rng = jax.random.PRNGKey(28)
params = f.init(rng, *args, **kwargs)
out = f.apply(params, None, *args, **kwargs)
return params, out
return jax.tree_util.tree_map(
lambda x: x.dtype, jax.eval_shape(g, *args, **kwargs))
class MixedPrecisionTest(absltest.TestCase):
def test_get_policy(self):
self.assertIsNone(mixed_precision.get_policy(InnerModule))
policy = jmp.get_policy('p=f16,c=f32,o=f16')
mixed_precision.set_policy(InnerModule, policy)
self.assertEqual(mixed_precision.get_policy(InnerModule), policy)
mixed_precision.clear_policy(InnerModule)
self.assertIsNone(mixed_precision.get_policy(InnerModule))
@test_utils.transform_and_run
def test_current_policy(self):
policy = jmp.get_policy('p=f16,c=f32,o=f16')
test = self
class Foo(module.Module):
def __call__(self):
test.assertEqual(mixed_precision.current_policy(), policy)
class Bar(module.Module):
def __call__(self):
test.assertEqual(mixed_precision.current_policy(), policy)
Foo()()
test.assertEqual(mixed_precision.current_policy(), policy)
class Baz(module.Module):
def __call__(self):
test.assertIsNone(mixed_precision.current_policy())
Bar()()
test.assertIsNone(mixed_precision.current_policy())
mixed_precision.set_policy(Bar, policy)
Baz()()
def test_set_global_policy(self):
self.assertGlobalPolicy(InnerModule)
def test_set_global_policy_inner_class(self):
self.assertGlobalPolicy(InnerModule.InnerInnerModule)
def test_set_global_policy_local_class(self):
class LocalModule(InnerModule):
pass
self.assertGlobalPolicy(LocalModule)
def assertGlobalPolicy(self, cls):
policy = jmp.get_policy('p=f16,c=f32,o=f16')
with_policy(cls, policy)(self.assertGlobalPolicy_inner)(cls)
def assertGlobalPolicy_inner(self, cls):
def f():
mod = cls(name='inner_module')
return mod(), mod.w
params, (ret, w) = transform_and_run_once(f)
self.assertEqual(ret, jnp.float16)
self.assertEqual(w, jnp.float32)
self.assertEqual(params['inner_module'], {'w': jnp.float16})
@test_utils.transform_and_run
def test_set_policy_factory(self):
def factory():
class MyModule(module.Module):
def __call__(self, x):
return x
return MyModule
cls1 = factory()
cls2 = factory()
mixed_precision.set_policy(cls1, jmp.get_policy('o=f16'))
mixed_precision.set_policy(cls2, jmp.get_policy('o=bf16'))
x = jnp.ones([])
self.assertEqual(cls1()(x).dtype, jnp.float16)
self.assertEqual(cls2()(x).dtype, jnp.bfloat16)
@test_utils.transform_and_run
def test_push_policy(self):
policy = jmp.get_policy('o=f16')
test = self
class FooModule(module.Module):
def __call__(self):
test.assertEqual(mixed_precision.current_policy(), policy)
mod = FooModule()
with mixed_precision.push_policy(FooModule, policy):
self.assertEqual(mixed_precision.get_policy(FooModule), policy)
mod()
self.assertIsNone(mixed_precision.get_policy(FooModule))
@test_utils.transform_and_run
def test_push_policy_maintains_old_policy(self):
old_policy = jmp.get_policy('o=f16')
new_policy = jmp.get_policy('o=f64')
self.assertIsNone(mixed_precision.get_policy(InnerModule))
mixed_precision.set_policy(InnerModule, old_policy)
with mixed_precision.push_policy(InnerModule, new_policy):
self.assertEqual(mixed_precision.get_policy(InnerModule), new_policy)
self.assertEqual(mixed_precision.get_policy(InnerModule), old_policy)
mixed_precision.clear_policy(InnerModule)
@test_utils.transform_and_run
def test_push_policy_not_allowed_in_method_of_same_class(self):
any_policy = jmp.get_policy('o=f16')
class PushesInMethod(module.Module):
def __call__(self):
with mixed_precision.push_policy(PushesInMethod, any_policy):
pass
mod = PushesInMethod()
with self.assertRaisesRegex(ValueError, 'same class is not supported'):
mod()
@with_policy(InnerModule, jmp.get_policy('p=f16,c=f32,o=f16'))
def test_clear_global_policy(self):
def f():
mod = InnerModule()
return mod(), mod.w
mixed_precision.clear_policy(InnerModule)
params, (ret, w) = transform_and_run_once(f)
self.assertEqual(ret, jnp.bfloat16)
self.assertEqual(w, jnp.bfloat16)
self.assertEqual(params['inner_module'], {'w': jnp.bfloat16})
@with_policy(OuterModule, jmp.get_policy('p=f32,c=f16,o=f32'))
@with_policy(InnerModule, jmp.get_policy('p=f16,c=f32,o=f32'))
def test_set_global_policy_nested(self):
def f():
outer = OuterModule()
outer_ret = outer()
return outer_ret, outer.inner_ret, outer.w, outer.inner.w
params, (outer_ret, inner_ret, outer_w, inner_w) = transform_and_run_once(f)
# The return type of the modules should use the output type of the module.
self.assertEqual(outer_ret, jnp.float32)
self.assertEqual(inner_ret, jnp.float32)
# Inside the module we should use the compute type of the policy.
self.assertEqual(outer_w, jnp.float16)
self.assertEqual(inner_w, jnp.float32)
# The parameters returned from init should use the param type of the policy.
self.assertEqual(params['outer_module'], {'w': jnp.float32})
self.assertEqual(params['outer_module/inner_module'], {'w': jnp.float16})
def test_policy_for_reloaded_class(self):
conv_local = conv
policy = jmp.get_policy('p=f16,c=f32,o=f16')
mixed_precision.set_policy(conv_local.ConvND, policy)
conv_local = importlib.reload(conv)
params, y = transform_and_run_once(
lambda: conv_local.ConvND(2, 1, 1)(jnp.ones([1, 1, 1, 1])))
jax.tree_util.tree_map(lambda p: self.assertEqual(p, jnp.float16), params)
self.assertEqual(y, jnp.float16)
@test_utils.transform_and_run
def test_policy_with_interceptor(self):
sidechannel = []
def my_interceptor(next_f, args, kwargs, context):
sidechannel.append(context)
return next_f(*args, **kwargs)
# We need this to make sure that the mixed precision interceptor is
# installed when we call set_policy (this only happens the first call).
mixed_precision.reset_thread_local_state_for_test()
policy = jmp.get_policy('p=f16,c=f32,o=f16')
with module.intercept_methods(my_interceptor):
mixed_precision.set_policy(OuterModule, policy)
x = OuterModule()()
self.assertEqual(x.dtype, jnp.float16)
# Outer.init, Outer.call, Inner.init, Inner.call
self.assertLen(sidechannel, 4)
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/mixed_precision_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
import itertools as it
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import base_test
from haiku._src import config
from haiku._src import initializers
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
toggle = lambda i, a: lambda x: a(x) if base.params_frozen() else i(x)
# JAX transforms and control flow that need to be aware of Haiku internal
# state to operate unsurprisingly.
# pylint: disable=g-long-lambda
HK_OVERLOADED_JAX_PURE_EXPECTING_FNS = (
# Just-in-time compilation.
("jit", stateful.jit),
# ("make_jaxpr", stateful.make_jaxpr),
("eval_shape", lambda f: (lambda x: [f(x), stateful.eval_shape(f, x)])),
# Parallelization.
# TODO(tomhennigan): Add missing features (e.g. pjit,xmap).
# ("pmap", lambda f: stateful.pmap(f, "i")),
# Vectorization.
("vmap", lambda f: stateful.vmap(f, split_rng=False)),
# Control flow.
# TODO(tomhennigan): Enable for associative_scan.
# ("associative_scan", lambda f:
# (lambda x: jax.lax.associative_scan(f, x))),
("cond", lambda f: (lambda x: stateful.cond(True, f, f, x))),
("fori_loop", lambda f:
(lambda x: stateful.fori_loop(0, 1, base_test.ignore_index(f), x))),
# ("map", lambda f: (lambda x: stateful.map(f, x))),
("scan", lambda f:
(lambda x: stateful.scan(base_test.identity_carry(f), None, x))),
("switch", lambda f: (lambda x: stateful.switch(0, [f, f], x))),
("while_loop", lambda f: toggle(
f, lambda x: stateful.while_loop(lambda xs: xs[0] == 0,
lambda xs: (1, f(xs[1])),
(0, x)))),
# Automatic differentiation.
# TODO(tomhennigan): Add missing features (e.g. custom_vjp, custom_jvp).
("grad", lambda f: stateful.grad(lambda x: f(x).sum())),
("value_and_grad", lambda f: stateful.value_and_grad(lambda x: f(x).sum())),
("checkpoint", stateful.remat),
)
# pylint: enable=g-long-lambda
def with_rng_reserve_size(f):
"""Run test with rng_reserve_size of 7."""
def wrapper(*a, **kw):
with config.context(rng_reserve_size=7):
return f(*a, **kw)
return wrapper
class StatefulTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_grad(self):
x = jnp.array(3.)
g = stateful.grad(SquareModule())(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.grad(jnp.square)(x)
@test_utils.transform_and_run
def test_value_and_grad(self):
x = jnp.array(2.)
y, g = stateful.value_and_grad(SquareModule())(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_value_and_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.value_and_grad(jnp.square)(x)
@test_utils.transform_and_run
def test_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
g, aux = stateful.grad(f, has_aux=True)(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
@test_utils.transform_and_run
def test_value_and_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
(y, aux), g = stateful.value_and_grad(f, has_aux=True)(x)
self.assertEqual(y, jnp.power(x, 2))
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
def test_grad_and_jit(self):
def f(x):
g = stateful.grad(SquareModule())(x)
return g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
g, state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
def test_value_and_grad_and_jit(self):
def f(x):
y, g = stateful.value_and_grad(SquareModule())(x)
return y, g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
(y, g), state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
@test_utils.transform_and_run
def test_jit(self):
mod = SquareModule()
x = jnp.array(2)
y = stateful.jit(mod)(x)
self.assertEqual(y, x ** 2)
def test_jit_no_transform(self):
x = jnp.array(2)
with self.assertRaises(ValueError, msg="Use jax.jit() instead"):
stateful.jit(jnp.square)(x)
@test_utils.transform_and_run
def test_remat(self):
forward, backward = [], []
callback = _callback_prim(lambda: forward.append(None),
lambda: backward.append(None))
def test(remat):
x = jnp.array(3.)
mod = CountingModule()
self.assertEqual(mod.count, 0)
f = lambda x: callback(mod(x))
if remat:
f = stateful.remat(f)
y, g = stateful.value_and_grad(f)(x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
self.assertEqual(mod.count, 1)
num_forward = len(forward)
num_backward = len(backward)
del forward[:], backward[:]
return num_forward, num_backward
# Sanity check.
self.assertEqual(test(remat=True), test(remat=True))
self.assertEqual(test(remat=False), test(remat=False))
# NOTE: JAX does not guarantee to execute primitives once and only once for
# a given function (we observe f=2,b=1 without remat and f=5,b=1 with
# remat), but we do expect that JAX will execute our primitive forward at
# least one more time with remat than without it.
num_forward_remat, num_backward_remat = test(remat=True)
num_forward_no_remat, num_backward_no_remat = test(remat=False)
self.assertGreater(num_forward_remat, num_forward_no_remat)
self.assertEqual(num_backward_remat, num_backward_no_remat)
def test_remat_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.remat() instead"):
stateful.remat(jnp.square)(x)
@test_utils.combined_named_parameters(
test_utils.named_bools("jax_remat"),
test_utils.named_bools("inline_hk_remat"))
def test_create_module_inside_remat(self, jax_remat, inline_hk_remat):
log = []
def forward(x):
def create_and_use_layer(x):
m = SquareModule(name="layer")
log.append(m.module_name)
return m(x)
if not inline_hk_remat:
create_and_use_layer = stateful.remat(create_and_use_layer)
for _ in range(2):
if inline_hk_remat:
x = stateful.remat(create_and_use_layer)(x)
else:
x = create_and_use_layer(x)
return x
def reset():
del log[:]
self.assertEmpty(log)
# Test forward.
x = jnp.float32(3)
forward = transform.transform_with_state(forward)
params, state = forward.init(None, x)
self.assertEqual(log, ["layer", "layer_1"])
reset()
# Test backward.
for _ in range(3):
grad_fn = jax.grad(lambda x: forward.apply(params, state, None, x)[0])
if jax_remat:
grad_fn = jax.remat(grad_fn)
self.assertEqual(int(grad_fn(x)), int(4 * (x ** 3)))
self.assertEqual(log, ["layer", "layer_1"])
reset()
@parameterized.parameters(True, False)
def test_cond(self, single_arg):
def f(x):
mod = SquareModule()
if single_arg:
return stateful.cond(x == 2, mod, lambda x: mod(x + 1), x)
else:
return stateful.cond(x == 2, x, mod, x, lambda x: mod(x + 1))
f = transform.transform_with_state(f)
for x, y in ((1, 4), (2, 4), (3, 16)):
x, y = map(jnp.array, (x, y))
params, state = f.init(None, x)
out, state = f.apply(params, state, None, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
@test_utils.transform_and_run
def test_cond_traces_branches_with_same_id_once(self):
witness = []
def f(x):
witness.append(None)
return x ** 2
stateful.cond(False, f, f, 0)
hk_call_count = len(witness)
self.assertEqual(hk_call_count, 1)
# Ensure we are in sync with JAX.
del witness[:]
jax.lax.cond(False, f, f, 0)
jax_call_count = len(witness)
self.assertEqual(hk_call_count, jax_call_count)
@test_utils.transform_and_run
def test_cond_no_args(self):
x = stateful.cond(True, lambda: 5, lambda: 4)
self.assertEqual(x, 5)
@test_utils.transform_and_run
def test_cond_operand_kwarg(self):
x = stateful.cond(True, lambda x: x + 5, lambda x: x + 4, operand=1)
self.assertEqual(x, 6)
@test_utils.transform_and_run
def test_cond_operand_kwarg_and_operands(self):
with self.assertRaisesRegex(ValueError, "cannot.*pass.*positionally"):
stateful.cond(True, lambda x: x + 5, lambda x: x + 4, 1, operand=1)
@test_utils.transform_and_run
def test_cond_two_args(self):
a, b = stateful.cond(True,
lambda a, b: (b, a),
lambda a, b: (a, b),
2, 1)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
@test_utils.transform_and_run
def test_cond_three_args(self):
a, b, c = stateful.cond(True,
lambda a, b, c: (c, b, a),
lambda a, b, c: (a, b, c),
3, 2, 1)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, 3)
def test_cond_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.cond() instead"):
stateful.cond(x == 2, x, jnp.square, x, lambda x: jnp.square(x + 1))
def test_switch(self):
def f(i, x):
mod = SquareModule()
branches = [mod, lambda x: mod(x + 1), lambda x: mod(x + 2)]
return stateful.switch(i, branches, x)
f = transform.transform_with_state(f)
for i, x, y in ((0, 1, 1), (1, 2, 9), (2, 3, 25)):
i, x, y = map(jnp.array, (i, x, y))
params, state = f.init(None, i, x)
out, state = f.apply(params, state, None, i, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
def test_switch_multiple_operands(self):
def f(i, x, y, z):
mod = SquareModule()
branches = [lambda x, y, z: mod(x),
lambda y, x, z: mod(x),
lambda y, z, x: mod(x),
]
return stateful.switch(i, branches, x, y, z)
f = transform.transform_with_state(f)
xyz = (1, 3, 5)
for i in range(3):
params, state = f.init(None, i, *xyz)
out, state = f.apply(params, state, None, i, *xyz)
expected_out = xyz[i]**2
self.assertEqual(state, {"square_module": {"y": expected_out}})
self.assertEqual(out, expected_out)
@test_utils.transform_and_run(run_apply=False)
def test_cond_branch_structure_error(self):
true_fn = lambda x: base.get_parameter("w", x.shape, x.dtype, init=jnp.ones)
false_fn = lambda x: x
with self.assertRaisesRegex(TypeError, "Hint: A common mistake"):
stateful.cond(False, true_fn, false_fn, 0)
@test_utils.transform_and_run(run_apply=False)
def test_switch_branch_structure_error(self):
branches = [
lambda x: base.get_parameter("w", x.shape, x.dtype, init=jnp.ones),
lambda x: x,
]
with self.assertRaisesRegex(TypeError, "Hint: A common mistake"):
stateful.switch(0, branches, 0)
@parameterized.parameters(1, 2, 4, 8)
@test_utils.transform_and_run
def test_switch_traces_cases_with_same_id_once(self, n):
f_witness = []
g_witness = []
def f(x):
f_witness.append(None)
return x ** 2
def g(x):
g_witness.append(None)
return x ** 2
stateful.switch(0, [f, g] * n, 2)
f_hk_call_count = len(f_witness)
g_hk_call_count = len(g_witness)
self.assertEqual(f_hk_call_count, 1)
self.assertEqual(g_hk_call_count, 1)
# Ensure we are in sync with JAX.
del f_witness[:], g_witness[:]
jax.lax.switch(0, [f, g] * n, 2)
f_jax_call_count = len(f_witness)
g_jax_call_count = len(g_witness)
self.assertEqual(f_hk_call_count, f_jax_call_count)
self.assertEqual(f_hk_call_count, g_jax_call_count)
def test_switch_no_transform(self):
i = jnp.array(2)
x = jnp.array(42.)
with self.assertRaises(ValueError, msg="Use jax.switch() instead"):
stateful.switch(i, [jnp.square] * 3, x)
@test_utils.transform_and_run
def test_difference_empty(self):
before = stateful.internal_state()
after = stateful.internal_state()
self.assertEmpty(
jax.tree_util.tree_leaves(stateful.difference(before, after)))
@parameterized.parameters(base.get_parameter, base.get_state)
@test_utils.transform_and_run(run_apply=False)
def test_difference_new(self, get_x):
get_x("a", [], init=jnp.zeros)
before = stateful.internal_state()
b = get_x("b", [], init=jnp.zeros)
after = stateful.internal_state()
diff = stateful.difference(before, after)
if get_x == base.get_state:
self.assertEmpty(diff.params)
self.assertEqual(diff.state, {"~": {"a": None,
"b": base.StatePair(b, b)}})
else:
self.assertEqual(diff.params, {"~": {"a": None, "b": b}})
self.assertEmpty(diff.state)
self.assertIsNone(diff.rng)
@test_utils.transform_and_run(run_apply=False)
def test_difference_update_state(self):
base.get_state("a", [], init=jnp.zeros)
base.get_state("b", [], init=jnp.zeros)
before = stateful.internal_state()
base.set_state("b", jnp.ones([]))
after = stateful.internal_state()
diff = stateful.difference(before, after)
self.assertEmpty(diff.params)
self.assertEqual(diff.state, {"~": {"a": None,
"b": base.StatePair(0., 1.)}})
self.assertIsNone(diff.rng)
@test_utils.transform_and_run(run_apply=False)
def test_difference_rng(self):
before = stateful.internal_state()
base.next_rng_key()
after = stateful.internal_state()
diff = stateful.difference(before, after)
self.assertEmpty(diff.params)
self.assertEmpty(diff.state)
self.assertIsNotNone(diff.rng)
def test_scan_no_transform(self):
xs = jnp.arange(3)
with self.assertRaises(ValueError, msg="Use jax.lax.scan() instead"):
stateful.scan(lambda c, x: (c, x), (), xs)
@parameterized.parameters(0, 1, 2, 4, 8)
def test_scan_with_state(self, unroll_length):
def f(xs):
m = CountingModule()
def sf(c, x):
self.assertEqual(c, ())
return c, m(x)
_, ys = stateful.scan(sf, (), xs)
return ys
f = transform.transform_with_state(f)
key = jax.random.PRNGKey(42)
init_key, apply_key = jax.random.split(key)
xs = jnp.arange(unroll_length)
params, state = f.init(init_key, xs)
self.assertEqual(list(state), ["counting_module"])
self.assertEqual(list(state["counting_module"]), ["count"])
np.testing.assert_allclose(state["counting_module"]["count"], 0, rtol=1e-4)
ys, state = f.apply(params, state, apply_key, xs)
np.testing.assert_allclose(state["counting_module"]["count"], unroll_length,
rtol=1e-4)
np.testing.assert_allclose(ys, xs ** 2, rtol=1e-4)
@parameterized.parameters(0, 1, 2, 8)
@test_utils.transform_and_run
@with_rng_reserve_size
def test_stateful_scan_with_rng_use(self, iteration_count):
def body_fun(c, x):
for _ in range(10):
_ = base.next_rng_key()
return c, x
base.reserve_rng_keys(5)
_ = stateful.scan(body_fun, (), (), length=iteration_count)
@parameterized.parameters(0, 1, 2, 8)
@test_utils.transform_and_run
@with_rng_reserve_size
def test_stateful_fori_with_rng_use(self, iteration_count):
def body_fun(_, x):
for _ in range(10):
_ = base.next_rng_key()
return x
base.reserve_rng_keys(5)
_ = stateful.fori_loop(0, iteration_count, body_fun, 1)
@test_utils.transform_and_run
@with_rng_reserve_size
def test_stateful_cond_with_rng_use(self):
# Test if using different amount of keys in different branches
# results in error
def true_branch(x):
_ = base.next_rng_key()
return x
def false_branch(x):
_ = base.next_rng_key()
_ = base.next_rng_key()
return x
base.reserve_rng_keys(5)
_ = stateful.cond(True, true_branch, false_branch, 0)
_ = stateful.cond(False, true_branch, false_branch, 0)
@test_utils.transform_and_run
@with_rng_reserve_size
def test_stateful_switch_with_rng_use(self):
# Test if using different amount of keys in different branches
# results in error
def branch_f(i):
for _ in range(i):
_ = base.next_rng_key()
return i
base.reserve_rng_keys(5)
branches = [lambda _, i=i: branch_f(i) for i in range(5)]
self.assertEqual(stateful.switch(3, branches, None), 3)
self.assertEqual(stateful.switch(0, branches, None), 0)
@test_utils.transform_and_run
def test_stateful_while_loop_with_rng_use(self):
def body_fun(i):
_ = base.next_rng_key()
_ = base.next_rng_key()
return i+1
base.reserve_rng_keys(5)
if transform.running_init():
body_fun(0)
else:
stateful.while_loop(lambda i: i < 7, body_fun, 0) # does not crash.
@parameterized.parameters(*it.product((0, 1, 2, 4, 8), (1, 2, 3)))
@test_utils.transform_and_run
def test_fori(self, lower, n):
upper = lower + n
m = CountingModule()
y = stateful.fori_loop(lower, upper, lambda i, x: m(i), 2)
self.assertEqual(y, jnp.square(upper - 1))
self.assertEqual(m.count, upper - lower)
@test_utils.transform_and_run
def test_fori_traced_length(self):
m = CountingModule()
def f(lower, upper):
y = stateful.fori_loop(lower, upper, lambda i, x: m(i), 2)
return y
# Because of the jit, lower and upper will be tracers.
out = stateful.jit(f)(0, 3)
self.assertEqual(out, 4)
self.assertEqual(m.count, 3)
@test_utils.transform_and_run
def test_map(self):
x = np.zeros((10, 10), dtype=np.float32)
def f(x):
self.assertLen(x.shape, 1)
return x + jax.random.uniform(base.next_rng_key())
if transform.running_init():
f(x[0])
else:
stateful.map(f, x)
def test_vmap(self):
def g(x):
return CountingModule()(x)
def f(x):
return stateful.vmap(g, split_rng=False)(x)
f = transform.transform_with_state(f)
x = jnp.ones([4]) + 1
params, state = f.init(None, x)
# State should not be mapped.
self.assertEmpty(params)
cnt, = jax.tree_util.tree_leaves(state)
self.assertEqual(cnt.ndim, 0)
self.assertEqual(cnt, 0)
# The output should be mapped but state should not be.
y, state = f.apply(params, state, None, x)
self.assertEqual(y.shape, (4,))
np.testing.assert_allclose(y, x ** 2)
cnt, = jax.tree_util.tree_leaves(state)
self.assertEqual(cnt.ndim, 0)
self.assertEqual(cnt, 1)
def test_vmap_must_be_called_in_transform(self):
f = stateful.vmap(lambda x: x, split_rng=False)
with self.assertRaisesRegex(ValueError,
"must be used as part of an.*hk.transform"):
f(0)
@test_utils.transform_and_run
def test_vmap_no_in_axes(self):
def fn_name(_):
pass
with self.assertRaisesRegex(
ValueError, "fn_name must have at least one non-None value in in_axes"):
stateful.vmap(fn_name, in_axes=None, split_rng=False)
@test_utils.transform_and_run
def test_vmap_in_axes_different_size(self):
x = jnp.ones([1, 2])
with self.assertRaisesRegex(
ValueError, "vmap got inconsistent sizes for array axes to be mapped"):
stateful.vmap(lambda a, b: None, in_axes=(0, 1), split_rng=False)(x, x)
@test_utils.transform_and_run
def test_vmap_in_axes_supports_list(self):
a = jnp.ones([4])
b = stateful.vmap(lambda a: a * 2, in_axes=[0], split_rng=False)(a)
np.testing.assert_array_equal(b, a * 2)
@test_utils.transform_and_run
def test_vmap_no_split_rng(self):
key_before = base.next_rng_key()
f = stateful.vmap(lambda _: base.next_rng_key(), split_rng=False)
x = jnp.arange(4)
k1, k2, k3, k4 = f(x)
key_after = base.next_rng_key()
np.testing.assert_array_equal(k1, k2)
np.testing.assert_array_equal(k2, k3)
np.testing.assert_array_equal(k3, k4)
self.assertFalse(np.array_equal(key_before, k1))
self.assertFalse(np.array_equal(key_after, k1))
self.assertFalse(np.array_equal(key_before, key_after))
@test_utils.transform_and_run
def test_vmap_split_rng(self):
key_before = base.next_rng_key()
f = stateful.vmap(lambda _: base.next_rng_key(), split_rng=True)
x = jnp.arange(4)
k1, k2, k3, k4 = f(x)
key_after = base.next_rng_key()
# Test that none of the keys are equal.
named_keys = (("k1", k1), ("k2", k2), ("k3", k3), ("k4", k4),
("key_before", key_before), ("key_after", key_after))
for (a_name, a), (b_name, b) in it.combinations(named_keys, 2):
self.assertFalse(
np.array_equal(a, b),
msg=f"Keys should not be equal, but {a_name} == {b_name}")
@test_utils.transform_and_run(run_apply=False)
def test_vmap_split_rng_better_out_axes_error(self):
def creates_params(_):
base.get_parameter("mapped",
(), jnp.float32,
init=initializers.TruncatedNormal())
f = stateful.vmap(creates_params, split_rng=True)
x = jnp.arange(4)
with self.assertRaisesRegex(ValueError,
"split_rng to True during initialization"):
f(x)
@test_utils.transform_and_run(run_apply=False)
def test_vmap_split_rng_out_axes_error_no_split_rng(self):
f = stateful.vmap(lambda x: x, split_rng=False, out_axes=None)
x = jnp.arange(4)
with self.assertRaisesRegex(ValueError,
"vmap has mapped output but out_axes is None"):
# test our split_rng error does not clobber jax error message.
f(x)
def test_vmap_split_rng_out_axes_error_no_init(self):
@transform.transform
def g(x):
f = stateful.vmap(lambda x: x, split_rng=True, out_axes=None)
f(x)
x = jnp.arange(4)
with self.assertRaisesRegex(ValueError,
"vmap has mapped output but out_axes is None"):
# test our split_rng error does not clobber jax error message.
g.apply({}, jax.random.PRNGKey(42), x)
def test_while_loop_rejected_in_init(self):
def f():
stateful.while_loop(lambda x: x.all(), lambda x: not x, 1)
f = transform.transform(f)
with self.assertRaisesRegex(
ValueError, "hk.while_loop does not support initialization"):
f.init(None)
def test_updating_state_in_cond_fails(self):
def f(x):
m = CountingModule(op=lambda x: x + 1)
if not base.params_frozen():
return m(x)
else:
stateful.while_loop(m, lambda x: x, x)
f = transform.transform_with_state(f)
x = jnp.zeros([])
params, state = f.init(None, x)
with self.assertRaisesRegex(
ValueError,
"does not support.*set_state.*next_rng_key.*in.*cond_fun`"):
f.apply(params, state, None, x)
def test_rng_in_cond_fails(self):
def f(x):
m = CountingModule(op=lambda x: x + 1)
if not base.params_frozen():
return m(x)
else:
stateful.while_loop(lambda _: base.next_rng_key(), lambda x: x, x)
f = transform.transform_with_state(f)
x = jnp.zeros([])
params, state = f.init(None, x)
with self.assertRaisesRegex(
ValueError,
"does not support.*set_state.*next_rng_key.*in.*cond_fun`"):
f.apply(params, state, jax.random.PRNGKey(42), x)
@parameterized.parameters(0, 1, 2, 4, 8)
def test_while_loop_with_state(self, iters):
def f(x):
m = CountingModule(op=lambda x: x + 1)
if not base.params_frozen():
return m(x)
else:
_, y = stateful.while_loop(lambda a: a[0] < iters,
lambda a: (a[0] + 1, m(a[1])),
(0, x))
return y
f = transform.transform_with_state(f)
x = jnp.zeros([])
params, state = f.init(None, x)
self.assertEqual(list(state), ["counting_module"])
self.assertEqual(list(state["counting_module"]), ["count"])
np.testing.assert_allclose(state["counting_module"]["count"], x, rtol=1e-4)
y, state = f.apply(params, state, None, x)
np.testing.assert_allclose(state["counting_module"]["count"], iters,
rtol=1e-4)
np.testing.assert_allclose(y, iters, rtol=1e-4)
def test_eval_shape(self):
def some_shape_changing_fun(x):
return x[0, :]
def f(x):
m = CountingModule(op=some_shape_changing_fun)
# state is not changed in this call
out_shape_struct = stateful.eval_shape(m, x)
return m(x), out_shape_struct
f = transform.transform_with_state(f)
key = jax.random.PRNGKey(42)
in_shape = (10, 10)
x = jnp.ones(in_shape)
params, state = f.init(key, x)
self.assertEqual(list(state), ["counting_module"])
self.assertEqual(list(state["counting_module"]), ["count"])
np.testing.assert_allclose(state["counting_module"]["count"], 0, rtol=1e-4)
(out, shape_struct), state = f.apply(params, state, key, x)
# Count is only advanced once
np.testing.assert_allclose(state["counting_module"]["count"], 1, rtol=1e-4)
np.testing.assert_allclose(out, some_shape_changing_fun(x), rtol=1e-4)
self.assertEqual(shape_struct.shape, (in_shape[1],))
def test_eval_shape_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.eval_shape() instead"):
stateful.eval_shape(jnp.square)(x)
@test_utils.transform_and_run
def test_temporary_state_resets_names(self):
with stateful.temporary_internal_state(stateful.internal_state()):
mod1 = module.Module(name="foo")
mod2 = module.Module(name="foo")
self.assertEqual(mod1.module_name, "foo")
self.assertEqual(mod2.module_name, "foo")
@test_utils.transform_and_run(run_apply=False)
def test_eval_shape_no_leaked_tracers_under_leak_checker(self):
with jax.checking_leaks():
stateful.eval_shape(SquareModule(), jnp.ones(())) # does not crash
@test_utils.combined_named_parameters(base_test.SIDE_EFFECTING_FUNCTIONS,
HK_OVERLOADED_JAX_PURE_EXPECTING_FNS)
@test_utils.transform_and_run
@test_utils.with_guardrails
def test_safe_use_of_jax(self, haiku_side_effect_fn, hk_jax_fn):
if "reserve_rng_keys_while_loop" in self._testMethodName:
self.skipTest("Expected not to work.")
# Make `f` identify with the side effecting function included.
f = hk_jax_fn(lambda x: [haiku_side_effect_fn(), x][1])
x = jnp.ones([1])
# These functions should not trigger exceptions from our guardrails.
f(x)
@test_utils.transform_and_run
def test_vmap_split_rng_with_default(self):
with self.assertRaisesRegex(TypeError,
"hk.vmap.require_split_rng = False"):
# Intentionally missing split_rng arg.
stateful.vmap(lambda: None)
with self.subTest("require_split_rng=0"):
stateful.vmap.require_split_rng = False
try:
# This call should not trigger an error, even though we are missing the
# split_rng argument which appears required (if you look at the function
# signature). It only works because require_split_rng is
# propagated to vmap via a sneaky decorator. This only exists to support
# users who import code that they cannot edit (e.g. from a read only
# file system) that is not passing the argument.
f = stateful.vmap(base.next_rng_key, axis_size=2)
finally:
stateful.vmap.require_split_rng = True
# Check that split_rng=False was implied.
k1, k2 = f()
self.assertTrue((k1 == k2).all())
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_vmap_split_rng_without_default(self, require_split_rng):
# Tests that when split_rng is passed explicitly the value of
# require_split_rng has no impact.
x = jnp.arange(2)
stateful.vmap.require_split_rng = require_split_rng
k1, k2 = stateful.vmap(lambda x: base.next_rng_key(), split_rng=True)(x)
self.assertTrue((k1 != k2).all())
k1, k2 = stateful.vmap(lambda x: base.next_rng_key(), split_rng=False)(x)
self.assertTrue((k1 == k2).all())
stateful.vmap.require_split_rng = True
def _callback_prim(forward, backward):
def f_impl(x):
forward()
return x
def b_impl(x):
backward()
return (x,)
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.interpreters.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
def __init__(self, op=jnp.square, name=None):
super().__init__(name=name)
self.op = op
@property
def count(self):
return base.get_state("count", [], init=jnp.zeros)
def __call__(self, x):
y = self.op(x)
base.set_state("count", self.count + 1)
return y
class SquareModule(module.Module):
def __call__(self, x):
assert x.ndim == 0
p = base.get_parameter("p", [], jnp.int32, init=lambda *_: jnp.array(2))
y = x ** p
base.set_state("y", y)
return y
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/stateful_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for working with random numbers."""
import contextlib
import functools
from haiku._src import base
from haiku._src import data_structures
import jax
@contextlib.contextmanager
def count_hk_rngs_requested():
"""Context manager counting calls to next_rng_key."""
# TODO(tomhennigan): Catch keys from `next_rng_keys`, `maybe_next_rng_key`, ..
# TODO(tomhennigan): Don't include keys returned under a `with_rng` context.
# TODO(tomhennigan): Optimize use of keys within a `with_rng` heading as well.
count = [0]
orig_next_rng_key = base.next_rng_key_internal
def counting_next_rng_key(*a, **k):
count[0] += 1
return orig_next_rng_key(*a, **k)
try:
base.next_rng_key_internal = counting_next_rng_key
yield lambda: count[0]
finally:
base.next_rng_key_internal = orig_next_rng_key
def optimize_rng_use(fun):
"""Optimizes a RNG key splitting in ``fun``.
Our strategy here is to use abstract interpretation to run your function
twice, the first time we use :func:`jax.eval_shape` to avoid spending any
flops and simply observe how many times you call :func:`~haiku.next_rng_key`.
We then run your function again, but this time we reserve enough RNG keys
ahead of time such that we only need to call :func:`jax.random.split` once.
In the following example, we need three random samples for our weight
matrices in our 3-layer MLP. To draw these samples we use
:func:`~haiku.next_rng_key` which will split a new key for each sample. By
using :func:`optimize_rng_use` Haiku will pre-allocate exactly enough RNGs for
``f`` to be evaluated by splitting the input key once and only once. For large
models (unlike this example) this can lead to a reduction in compilation time
of both ``init`` and ``apply``, with ``init`` seeing a larger expected speedup
as it performs more RNG key splitting in general.
>>> def f(x):
... net = hk.nets.MLP([300, 100, 10])
... return net(x)
>>> f = hk.experimental.optimize_rng_use(f)
>>> f = hk.transform(f)
>>> params = f.init(jax.random.PRNGKey(42), jnp.ones([1, 1]))
Args:
fun: A function to wrap.
Returns:
A function that applies ``fun`` but only requires one call to
:func:`jax.random.split` by Haiku.
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
base.assert_context("optimize_rng_use")
# Extract all current state.
frame = base.current_frame()
params = frame.params or None
if params is not None:
params = data_structures.to_haiku_dict(params)
state = frame.state or None
if state is not None:
state = base.extract_state(state, initial=True)
rng = frame.rng_stack.peek()
if rng is not None:
rng = rng.internal_state
def pure_fun(params, state, rng, *args, **kwargs):
with base.new_context(params=params, state=state, rng=rng):
return fun(*args, **kwargs)
with count_hk_rngs_requested() as rng_count_f:
jax.eval_shape(pure_fun, params, state, rng, *args, **kwargs)
rng_count = rng_count_f()
if rng_count:
base.current_frame().rng_stack.peek().reserve(rng_count)
return fun(*args, **kwargs)
return wrapper
|
dm-haiku-main
|
haiku/_src/random.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.summarise."""
# pylint: disable=unnecessary-lambda
from collections.abc import Sequence
import typing
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import basic
from haiku._src import module as module_lib
from haiku._src import summarise
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
def tabulate_to_list(
f,
*args,
columns=None,
filters=None,
) -> Sequence[Sequence[str]]:
old_tabulate = summarise.tabulate_lib.tabulate
summarise.tabulate_lib.tabulate = lambda rows, **_: rows
try:
out = summarise.tabulate(f, columns=columns, filters=filters)(*args)
finally:
summarise.tabulate_lib.tabulate = old_tabulate
if out == "No modules matching filters.":
return []
else:
out = typing.cast(Sequence[Sequence[str]], out)
return out
def get_summary(f, *args):
return summarise.eval_summary(f)(*args)
class SummariseTest(parameterized.TestCase):
def test_empty(self):
self.assertEmpty(get_summary(lambda: None))
def test_filters_ctor_only(self):
f = lambda: IdentityModule() # NOTE: Just calling ctor.
self.assertEmpty(get_summary(f))
@parameterized.parameters(*range(1, 5))
def test_one_row_per_method_call(self, num_calls):
def f():
m = IdentityModule()
for _ in range(num_calls):
m(x)
x = jnp.ones([])
invocations = get_summary(f)
self.assertLen(invocations, num_calls)
for invocation in invocations[1:]:
self.assertEqual(
invocations[0].context.method_name, invocation.context.method_name
)
@test_utils.combined_named_parameters(
test_utils.named_bools("params"), test_utils.named_range("num_elems", 8)
)
def test_params_or_state(self, params, num_elems):
def cls():
for i in range(num_elems):
g = base.get_parameter if params else base.get_state
g(f"x{i}", [], init=jnp.zeros)
f = lambda: basic.to_module(cls)(name="foo")()
invocations = get_summary(f)
(invocation,) = invocations
details = invocation.module_details
d = details.params if params else details.state
self.assertEqual(list(d), [f"foo/x{i}" for i in range(num_elems)])
def test_jitted_f(self):
witness = []
def f(x):
witness.append(None)
return basic.Linear(1)(x)
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
x = jnp.zeros([1, 1])
params = f.init(rng, x)
del witness[:]
# This layer of indirection (`g`) means summarise cannot unpack `f` and
# strip our jit.
jit_apply = jax.jit(f.apply)
g = lambda params, x: jit_apply(params, None, x)
for _ in range(2):
g(params, x) # Warm up JIT.
self.assertLen(witness, 1)
summary = get_summary(g, params, x)
self.assertLen(summary, 1)
class TabulateTest(parameterized.TestCase):
def test_filters_no_output(self):
f = lambda: NoOutputModule()()
self.assertEmpty(tabulate_to_list(f))
def test_includes_no_param_modules(self):
dropout_cls = basic.to_module(
lambda x: basic.dropout(base.next_rng_key(), 0.5, x)
)
x = jnp.ones([4])
f = lambda: dropout_cls(name="dropout")(x)
rows = tabulate_to_list(f, columns=("module",))
expected = [["dropout (ToModuleWrapper)"]]
self.assertEqual(rows, expected)
def test_module_column(self):
def f():
IdentityModule(name="foo")(1)
IdentityModule(name="bar")(1)
rows = tabulate_to_list(f, columns=("module",))
expected = [["foo (IdentityModule)"], ["bar (IdentityModule)"]]
self.assertEqual(rows, expected)
def test_config_column(self):
def f():
IdentityModule(name="foo")(1)
IdentityModule(name="bar")(1)
rows = tabulate_to_list(f, columns=("config",))
expected = [["IdentityModule(name='foo')"], ["IdentityModule(name='bar')"]]
self.assertEqual(rows, expected)
def test_owned_params_column(self):
f = lambda: CallsOtherModule(MultipleParametersModule())()
rows = tabulate_to_list(f, columns=("owned_params",))
expected = [[""], ["b: f32[40,50,60]\nw: f32[10,20,30]"]]
self.assertEqual(rows, expected)
def test_owned_params_sorted_by_size_then_name(self):
def f():
base.get_parameter("a", [1], init=jnp.zeros)
base.get_parameter("b", [2], init=jnp.zeros)
base.get_parameter("c", [2], init=jnp.zeros)
base.get_parameter("d", [3], init=jnp.zeros)
return 0
f = lambda f=f: basic.to_module(f)()()
rows = tabulate_to_list(f, columns=("owned_params",))
expected = [["d: f32[3]\nb: f32[2]\nc: f32[2]\na: f32[1]"]]
self.assertEqual(rows, expected)
def test_output_column_structured(self):
f = lambda: IdentityModule()( # pylint: disable=g-long-lambda
{
"a": jnp.ones([32, 32]),
"b": [jnp.zeros([1]), jnp.zeros([], jnp.int16)],
}
)
rows = tabulate_to_list(f, columns=("output",))
expected = [["{'a': f32[32,32], 'b': [f32[1], s16[]]}"]]
self.assertEqual(rows, expected)
def test_params_size_column(self):
f = lambda: CallsOtherModule(MultipleParametersModule())()
rows = tabulate_to_list(f, columns=("params_size",))
size = "126,000"
expected = [[size], [size]]
self.assertEqual(rows, expected)
def test_params_bytes_column(self):
f = lambda: CallsOtherModule(MultipleParametersModule())()
rows = tabulate_to_list(f, columns=("params_bytes",))
size = "504.00 KB"
expected = [[size], [size]]
self.assertEqual(rows, expected)
def test_invalid_column(self):
with self.assertRaisesRegex(ValueError, "Invalid column.*nonsense"):
tabulate_to_list(lambda: None, columns=("nonsense",))
def test_invalid_filter(self):
with self.assertRaisesRegex(ValueError, "Invalid filter.*nonsense"):
tabulate_to_list(lambda: None, filters=("nonsense",))
def test_f_accepts_tabulate_kwargs(self):
tabulate_kwargs = {"tablefmt": "html"}
f = lambda: CallsOtherModule(MultipleParametersModule())()
output = summarise.tabulate(f, tabulate_kwargs=tabulate_kwargs)()
self.assertIn("<table>", output)
@parameterized.parameters(lambda f: f, jax.jit, jax.pmap)
def test_jax_transformed_wrapper(self, jax_transform):
# Happens in practice if someone asks for a `summary(pmap(train_step))`
f = lambda: CallsOtherModule(MultipleParametersModule())()
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
if jax_transform == jax.pmap:
rng = jnp.broadcast_to(rng, (1, *rng.shape))
params = jax_transform(f.init)(rng)
g = jax_transform(lambda params, rng: f.apply(params, rng))
rows = tabulate_to_list(g, params, rng)
self.assertNotEmpty(rows)
@parameterized.parameters(lambda f: f, jax.jit, jax.pmap)
def test_equivalent_when_passing_transformed_fn(self, jax_transform):
f = lambda: CallsOtherModule(MultipleParametersModule())()
f_transform = transform.transform(f)
rows = tabulate_to_list(f)
self.assertNotEmpty(rows)
self.assertEqual(rows, tabulate_to_list(f_transform))
self.assertEqual(rows, tabulate_to_list(jax_transform(f_transform.init)))
self.assertEqual(rows, tabulate_to_list(jax_transform(f_transform.apply)))
class MultipleParametersModule(module_lib.Module):
def __call__(self):
base.get_parameter("w", [10, 20, 30], init=jnp.zeros)
base.get_parameter("b", [40, 50, 60], init=jnp.zeros)
return 1
class IdentityModule(module_lib.Module):
def __call__(self, x):
base.get_parameter("w", [], init=jnp.zeros)
return x
class NoOutputModule(module_lib.Module):
def __call__(self):
base.get_parameter("w", [], init=jnp.zeros)
class CallsOtherModule(module_lib.Module):
def __init__(self, other, name=None):
super().__init__(name=name)
self.other = other
def __call__(self, *args):
return self.other(*args)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/summarise_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Padding module for Haiku."""
from collections import abc
from collections.abc import Sequence
import typing
from typing import Any, Callable, Union
from haiku._src import utils
PadFn = Callable[[int], tuple[int, int]]
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
class pad:
PadFn = PadFn
# pylint: enable=invalid-name
def valid(effective_kernel_size: int) -> tuple[int, int]:
"""No padding."""
del effective_kernel_size
return (0, 0)
def same(effective_kernel_size: int) -> tuple[int, int]:
"""Pads such that the output size matches input size for stride=1."""
return ((effective_kernel_size - 1) // 2, effective_kernel_size // 2)
def full(effective_kernel_size: int) -> tuple[int, int]:
"""Maximal padding whilst not convolving over just padded elements."""
return (effective_kernel_size - 1, effective_kernel_size - 1)
def causal(effective_kernel_size: int) -> tuple[int, int]:
"""Pre-padding such that output has no dependence on the future."""
return (effective_kernel_size - 1, 0)
def reverse_causal(effective_kernel_size: int) -> tuple[int, int]:
"""Post-padding such that output has no dependence on the past."""
return (0, effective_kernel_size - 1)
def create_from_padfn(
padding: Union[hk.pad.PadFn, Sequence[hk.pad.PadFn]],
kernel: Union[int, Sequence[int]],
rate: Union[int, Sequence[int]],
n: int,
) -> Sequence[tuple[int, int]]:
"""Generates the padding required for a given padding algorithm.
Args:
padding: callable/tuple or a sequence of callables/tuples. The callables
take an integer representing the effective kernel size (kernel size when
the rate is 1) and return a sequence of two integers representing the
padding before and padding after for that dimension. The tuples are
defined with two elements, padding before and after. If `padding` is a
sequence it must be of length 1 or `n`.
kernel: int or sequence of ints of length ``n``. The size of the kernel for
each dimension. If it is an int it will be replicated for the non channel
and batch dimensions.
rate: int or sequence of ints of length ``n``. The dilation rate for each
dimension. If it is an int it will be replicated for the non channel and
batch dimensions.
n: the number of spatial dimensions.
Returns:
A sequence of length n containing the padding for each element. These are of
the form ``[pad_before, pad_after]``.
"""
# The effective kernel size includes any holes/gaps introduced by the
# dilation rate. It's equal to kernel_size when rate == 1.
effective_kernel_size = map(
lambda kernel, rate: (kernel - 1) * rate + 1,
utils.replicate(kernel, n, "kernel"), utils.replicate(rate, n, "rate"))
paddings = map(
lambda x, y: x(y), utils.replicate(padding, n, "padding"),
effective_kernel_size)
return tuple(paddings)
def create_from_tuple(
padding: Union[tuple[int, int], Sequence[tuple[int, int]]],
n: int,
) -> Sequence[tuple[int, int]]:
"""Create a padding tuple using partially specified padding tuple."""
assert padding, "Padding must not be empty."
if isinstance(padding[0], int):
padding = (padding,) * n
elif len(padding) == 1:
padding = tuple(padding) * n
elif len(padding) != n:
raise TypeError(
f"Padding {padding} must be a Tuple[int, int] or sequence of length 1"
f" or sequence of length {n}.")
padding = typing.cast(Sequence[tuple[int, int]], tuple(padding))
return padding
def is_padfn(padding: Union[hk.pad.PadFn, Sequence[hk.pad.PadFn], Any]) -> bool:
"""Tests whether the given argument is a single or sequence of PadFns."""
if isinstance(padding, abc.Sequence):
padding = padding[0]
return callable(padding)
|
dm-haiku-main
|
haiku/_src/pad.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.typing."""
from typing import Protocol
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import module
from haiku._src import test_utils
from haiku._src import typing
class CallableModule(module.Module):
def __call__(self, a):
return a
HAIKU_PROTOCOLS = (
typing.ModuleProtocol,
typing.SupportsCall,
)
class TypingTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_module_protocol(self):
self.assertNotIsInstance(object(), typing.ModuleProtocol)
self.assertIsInstance(module.Module(), typing.ModuleProtocol)
self.assertIsInstance(CallableModule(), typing.ModuleProtocol)
@test_utils.transform_and_run
def test_supports_call(self):
self.assertIsInstance(CallableModule(), typing.SupportsCall)
self.assertNotIsInstance(module.Module(), typing.SupportsCall)
@parameterized.parameters(*HAIKU_PROTOCOLS)
def test_no_subclassing(self, cls):
msg = ('ExtendsProtocol is a Protocol.*should not be subclassed.*'
f'`class ExtendsProtocol[(]{cls.__name__}, Protocol[)]`')
with self.assertRaisesRegex(TypeError, msg):
class ExtendsProtocol(cls): # pylint: disable=unused-variable
pass
@parameterized.parameters(*HAIKU_PROTOCOLS)
def test_can_subclass_for_new_protocol(self, cls):
# This test ensures that we don't reject creating a new protocol derrived
# from our protocols.
class CustomProtocol(cls, Protocol): # pylint: disable=unused-variable,g-wrong-blank-lines
pass
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/typing_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku initializers."""
from collections.abc import Sequence
from typing import Any, Union
from haiku._src import base
from haiku._src.typing import Initializer
import jax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
next_rng_key = base.next_rng_key
class initializers:
Initializer = Initializer
# pylint: enable=invalid-name
del base
def _compute_fans(shape, fan_in_axes=None):
"""Computes the number of input and output units for a weight shape."""
if len(shape) < 1:
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in, fan_out = shape
else:
if fan_in_axes is not None:
# Compute fan-in using user-specified fan-in axes.
fan_in = np.prod([shape[i] for i in fan_in_axes])
fan_out = np.prod([s for i, s in enumerate(shape)
if i not in fan_in_axes])
else:
# If no axes specified, assume convolution kernels (2D, 3D, or more.)
# kernel_shape: (..., input_depth, depth)
receptive_field_size = np.prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
class Constant(hk.initializers.Initializer):
"""Initializes with a constant."""
def __init__(
self, constant: Union[float, int, complex, np.ndarray, jax.Array]
):
"""Constructs a Constant initializer.
Args:
constant: Constant to initialize with.
"""
self.constant = constant
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
return jnp.broadcast_to(jnp.asarray(self.constant), shape).astype(dtype)
class RandomNormal(hk.initializers.Initializer):
"""Initializes by sampling from a normal distribution."""
def __init__(self, stddev=1., mean=0.):
"""Constructs a :class:`RandomNormal` initializer.
Args:
stddev: The standard deviation of the normal distribution to sample from.
mean: The mean of the normal distribution to sample from.
"""
self.stddev = stddev
self.mean = mean
def __call__(self, shape: Sequence[int], dtype) -> jax.Array:
m = jax.lax.convert_element_type(self.mean, dtype)
s = jax.lax.convert_element_type(self.stddev, dtype)
return m + s * jax.random.normal(hk.next_rng_key(), shape, dtype)
class TruncatedNormal(hk.initializers.Initializer):
"""Initializes by sampling from a truncated normal distribution."""
def __init__(self,
stddev: Union[float, jax.Array] = 1.,
mean: Union[float, complex, jax.Array] = 0.0,
lower: Union[float, jax.Array] = -2.0,
upper: Union[float, jax.Array] = 2.0,
):
"""Constructs a :class:`TruncatedNormal` initializer.
Args:
stddev: The standard deviation parameter of the truncated
normal distribution.
mean: The mean of the truncated normal distribution.
lower: Float or array representing the lower bound for truncation.
upper: Float or array representing the upper bound for truncation.
"""
self.stddev = stddev
self.mean = mean
self.lower = lower
self.upper = upper
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
real_dtype = jnp.finfo(dtype).dtype
m = jax.lax.convert_element_type(self.mean, dtype)
s = jax.lax.convert_element_type(self.stddev, real_dtype)
is_complex = jnp.issubdtype(dtype, jnp.complexfloating)
if is_complex:
shape = [2, *shape]
unscaled = jax.random.truncated_normal(
hk.next_rng_key(), self.lower, self.upper, shape, real_dtype)
if is_complex:
unscaled = unscaled[0] + 1j * unscaled[1]
return s * unscaled + m
class RandomUniform(hk.initializers.Initializer):
"""Initializes by sampling from a uniform distribution."""
def __init__(self, minval=0., maxval=1.):
"""Constructs a :class:`RandomUniform` initializer.
Args:
minval: The lower limit of the uniform distribution.
maxval: The upper limit of the uniform distribution.
"""
self.minval = minval
self.maxval = maxval
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
return jax.random.uniform(hk.next_rng_key(), shape, dtype, self.minval,
self.maxval)
class VarianceScaling(hk.initializers.Initializer):
"""Initializer which adapts its scale to the shape of the initialized array.
The initializer first computes the scaling factor ``s = scale / n``, where n
is:
- Number of input units in the weight tensor, if ``mode = fan_in``.
- Number of output units, if ``mode = fan_out``.
- Average of the numbers of input and output units, if ``mode = fan_avg``.
Then, with ``distribution="truncated_normal"`` or ``"normal"``,
samples are drawn from a distribution with a mean of zero and a standard
deviation (after truncation, if used) ``stddev = sqrt(s)``.
With ``distribution=uniform``, samples are drawn from a uniform distribution
within ``[-limit, limit]``, with ``limit = sqrt(3 * s)``.
The variance scaling initializer can be configured to generate other standard
initializers using the scale, mode and distribution arguments. Here are some
example configurations:
============== ==============================================================
Name Parameters
============== ==============================================================
glorot_uniform VarianceScaling(1.0, "fan_avg", "uniform")
glorot_normal VarianceScaling(1.0, "fan_avg", "truncated_normal")
lecun_uniform VarianceScaling(1.0, "fan_in", "uniform")
lecun_normal VarianceScaling(1.0, "fan_in", "truncated_normal")
he_uniform VarianceScaling(2.0, "fan_in", "uniform")
he_normal VarianceScaling(2.0, "fan_in", "truncated_normal")
============== ==============================================================
"""
def __init__(self, scale=1.0, mode='fan_in', distribution='truncated_normal',
fan_in_axes=None):
"""Constructs the :class:`VarianceScaling` initializer.
Args:
scale: Scale to multiply the variance by.
mode: One of ``fan_in``, ``fan_out``, ``fan_avg``
distribution: Random distribution to use. One of ``truncated_normal``,
``normal`` or ``uniform``.
fan_in_axes: Optional sequence of int specifying which axes of the shape
are part of the fan-in. If none provided, then the weight is assumed
to be like a convolution kernel, where all leading dimensions are part
of the fan-in, and only the trailing dimension is part of the fan-out.
Useful if instantiating multi-headed attention weights.
"""
if scale < 0.0:
raise ValueError('`scale` must be a positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
if distribution not in {'normal', 'truncated_normal', 'uniform'}:
raise ValueError('Invalid `distribution` argument:', distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.fan_in_axes = fan_in_axes
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
scale = self.scale
fan_in, fan_out = _compute_fans(shape, self.fan_in_axes)
if self.mode == 'fan_in':
scale /= max(1.0, fan_in)
elif self.mode == 'fan_out':
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == 'truncated_normal':
stddev = np.sqrt(scale)
# Adjust stddev for truncation.
# Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
distribution_stddev = np.asarray(.87962566103423978, dtype=dtype)
stddev = stddev / distribution_stddev
return TruncatedNormal(stddev=stddev)(shape, dtype)
elif self.distribution == 'normal':
stddev = np.sqrt(scale)
return RandomNormal(stddev=stddev)(shape, dtype)
else:
limit = np.sqrt(3.0 * scale)
return RandomUniform(minval=-limit, maxval=limit)(shape, dtype)
class UniformScaling(hk.initializers.Initializer):
"""Uniform scaling initializer.
Initializes by sampling from a uniform distribution, but with the variance
scaled by the inverse square root of the number of input units, multiplied by
the scale.
"""
def __init__(self, scale=1.0):
"""Constructs the :class:`UniformScaling` initializer.
Args:
scale: Scale to multiply the upper limit of the uniform distribution by.
"""
self.scale = scale
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
input_size = np.prod(shape[:-1])
max_val = np.sqrt(3 / input_size) * self.scale
return RandomUniform(-max_val, max_val)(shape, dtype)
class Orthogonal(hk.initializers.Initializer):
"""Uniform scaling initializer."""
def __init__(self, scale=1.0, axis=-1):
"""Construct an initializer for uniformly distributed orthogonal matrices.
These matrices will be row-orthonormal along the access specified by
``axis``. If the rank of the weight is greater than 2, the shape will be
flattened in all other dimensions and then will be row-orthonormal along the
final dimension. Note that this only works if the ``axis`` dimension is
larger, otherwise the matrix will be transposed (equivalently, it will be
column orthonormal instead of row orthonormal).
If the shape is not square, the matrices will have orthonormal rows or
columns depending on which side is smaller.
Args:
scale: Scale factor.
axis: Which axis corresponds to the "output dimension" of the tensor.
Returns:
An orthogonally initialized parameter.
"""
self.scale = scale
self.axis = axis
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
if len(shape) < 2:
raise ValueError('Orthogonal initializer requires at least a 2D shape.')
n_rows = shape[self.axis]
n_cols = np.prod(shape) // n_rows
matrix_shape = (n_rows, n_cols) if n_rows > n_cols else (n_cols, n_rows)
norm_dst = jax.random.normal(hk.next_rng_key(), matrix_shape, dtype)
q_mat, r_mat = jnp.linalg.qr(norm_dst)
# Enforce Q is uniformly distributed
q_mat *= jnp.sign(jnp.diag(r_mat))
if n_rows < n_cols:
q_mat = q_mat.T
q_mat = jnp.reshape(q_mat, (n_rows,) + tuple(np.delete(shape, self.axis)))
q_mat = jnp.moveaxis(q_mat, 0, self.axis)
return jax.lax.convert_element_type(self.scale, dtype) * q_mat
class Identity(hk.initializers.Initializer):
"""Initializer that generates the identity matrix.
Constructs a 2D identity matrix or batches of these.
"""
def __init__(self, gain: Union[float, np.ndarray, jax.Array] = 1.0):
"""Constructs an :class:`Identity` initializer.
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
self.gain = gain
def __call__(self, shape: Sequence[int], dtype: Any) -> jax.Array:
shape = tuple(shape)
if len(shape) < 2:
raise ValueError('Identity initializer requires at least a 2D shape.')
eye = jnp.eye(shape[-2], shape[-1], dtype=dtype)
if eye.shape != shape:
eye = jnp.broadcast_to(eye, shape)
gain = jax.lax.convert_element_type(self.gain, dtype)
return gain * eye
|
dm-haiku-main
|
haiku/_src/initializers.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.reshape."""
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import reshape
from haiku._src import test_utils
from haiku._src import transform
import jax
from jax.experimental import jax2tf
import jax.numpy as jnp
import numpy as np
B, H, W, C, D = 2, 3, 4, 5, 6
class ReshapeTest(parameterized.TestCase):
@parameterized.parameters(
(1, (B, H * W * C, D)),
(-4, (B, H * W * C, D)),
(2, (B, H, W * C, D)),
(-3, (B, H, W * C, D)),
(3, (B, H, W, C, D)),
(-2, (B, H, W, C, D)),
(4, (B, H, W, C, 1, D)),
(-1, (B, H, W, C, 1, D)),
)
def test_reshape(self, preserve_dims, expected_output_shape):
def f(inputs):
return reshape.Reshape(output_shape=(-1, D),
preserve_dims=preserve_dims)(inputs)
init_fn, apply_fn = transform.transform(f)
params = init_fn(None, jnp.ones([B, H, W, C, D]))
outputs = apply_fn(params, None, np.ones([B, H, W, C, D]))
self.assertEqual(outputs.shape, expected_output_shape)
def test_invalid_multiple_wildcard(self):
def f():
mod = reshape.Reshape(output_shape=[-1, -1])
return mod(np.ones([1, 2, 3]))
init_fn, _ = transform.transform(f)
with self.assertRaises(ValueError):
init_fn(None)
def test_invalid_type(self):
def f():
mod = reshape.Reshape(output_shape=[7, "string"])
return mod(np.ones([1, 2, 3]))
init_fn, _ = transform.transform(f)
with self.assertRaises(TypeError):
init_fn(None)
def test_reshape_convert(self):
if jax.default_backend() in {"tpu"}:
raise unittest.SkipTest(
"Jax2tf native_serialization eager mode is not support in TPU"
)
# A function containing a hk.reshape on a polymorphic dimension. We want
# to make sure we can convert this method using `jax.jax2tf`.
def f(inputs):
mod = reshape.Reshape(output_shape=[1, -1])
return mod(inputs)
init_fn, apply_fn = transform.transform(f)
x1 = jnp.ones([1, 2, 3])
params = init_fn(None, x1)
# We convert `f` using `jax2tf` with undefined shape
converted_f = jax2tf.convert(
apply_fn,
polymorphic_shapes=[None, None, jax2tf.PolyShape("_", "T", ...)], # pytype: disable=wrong-arg-count
with_gradient=True,
)
# Test equality for different inputs shapes.
original_output1 = apply_fn(params, None, x1)
converted_output1 = converted_f(params, None, x1)
self.assertTrue(np.allclose(original_output1, converted_output1))
x2 = jnp.ones([1, 4, 3])
converted_output2 = converted_f(params, None, x2)
original_output2 = apply_fn(params, None, x2)
self.assertTrue(np.allclose(original_output2, converted_output2))
def test_flatten(self):
def f():
return reshape.Flatten(preserve_dims=2)(jnp.zeros([2, 3, 4, 5]))
init_fn, apply_fn = transform.transform(f)
params = init_fn(None)
self.assertEqual(apply_fn(params, None).shape, (2, 3, 20))
@test_utils.transform_and_run
def test_flatten_1d(self):
mod = reshape.Flatten()
x = jnp.zeros([10])
y = mod(x)
self.assertEqual(x.shape, y.shape)
@test_utils.transform_and_run
def test_flatten_nd(self):
mod = reshape.Flatten(preserve_dims=2)
x = jnp.zeros([2, 3])
y = mod(x)
self.assertEqual(x.shape, y.shape)
@test_utils.transform_and_run
def test_flatten_1d_out_negative(self):
mod = reshape.Flatten(preserve_dims=-2)
x = jnp.zeros([2, 3])
y = mod(x)
self.assertEqual(y.shape, (6,))
@test_utils.transform_and_run
def test_flatten_nd_out_negative(self):
mod = reshape.Flatten(preserve_dims=-2)
x = jnp.zeros([5, 2, 3])
y = mod(x)
self.assertEqual(y.shape, (5, 6))
@test_utils.transform_and_run
def test_flatten_invalid_preserve_dims(self):
with self.assertRaisesRegex(ValueError,
"Argument preserve_dims should be non-zero."):
reshape.Flatten(preserve_dims=0)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/reshape_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enables module construction to be deferred."""
from collections.abc import Sequence
from typing import Callable, Generic, TypeVar
from haiku._src import base
from haiku._src import module
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
# pylint: enable=invalid-name
del module
# TODO(tomhennigan): Should be CallableModule.
T = TypeVar("T", bound=hk.Module)
class Deferred(Generic[T]):
"""Defers the construction of another module until the first call.
Deferred can be used to declare modules that depend on computed properties of
other modules before those modules are defined. This allows users to separate
the declaration and use of modules. For example at the start of your program
you can declare two modules which are coupled:
>>> encoder = hk.Linear(64)
>>> decoder = hk.Deferred(lambda: hk.Linear(encoder.input_size))
Later you can use these naturally (note: that using `decoder` first would
cause an error since `encoder.input_size` is only defined after `encoder` has
been called):
>>> x = jnp.ones([8, 32])
>>> y = encoder(x)
>>> z = decoder(y) # Constructs the Linear encoder by calling the lambda.
The result will satisfy the following conditions:
>>> assert x.shape == z.shape
>>> assert y.shape == (8, 64)
>>> assert decoder.input_size == encoder.output_size
>>> assert decoder.output_size == encoder.input_size
"""
def __init__(
self,
factory: Callable[[], T],
call_methods: Sequence[str] = ("__call__",),
):
"""Initializes the :class:`Deferred` module.
Args:
factory: A no argument callable which constructs the module to defer
to. The first time one of the `call_methods` are called the factory
will be run and then the constructed module will be called with the same
method and arguments as the deferred module.
call_methods: Methods which should trigger construction of the target
module. The default value configures this module to construct the first
time `__call__` is run. If you want to add methods other than call you
should explicitly pass them (optionally), for example
`call_methods=("__call__", "encode", "decode")`.
"""
self._factory = factory
self._target = None
self.__constructor_state = base.current_module_state()
for call_method in call_methods:
if call_method == "__call__":
# Has to be handled separately because __call__ cannot be overridden at
# the instance level.
# See: https://docs.python.org/3/reference/datamodel.html#special-lookup
continue
setattr(self, call_method, _materialize_then_call(self, call_method))
@property
def target(self) -> T:
"""Returns the target module.
If the factory has not already run this will trigger construction.
Subsequent calls to `target` will return the same instance.
Returns:
A :class:`Module` instance as created by the factory function passed into
the constructor.
"""
if self._target is None:
with base.maybe_push_module_state(self.__constructor_state):
self._target = self._factory()
self._factory = None
return self._target
def __call__(self, *args, **kwargs):
# pytype: disable=not-callable
# pylint: disable=not-callable
return self.target(*args, **kwargs)
# pytype: enable=not-callable
# pylint: enable=not-callable
def __str__(self):
return f"Deferred({self.target})"
def __repr__(self):
return f"Deferred({self.target!r})"
def __getattr__(self, name):
if name != "_target" and hasattr(self, "_target"):
if self._target is not None:
return getattr(self._target, name)
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'")
def __setattr__(self, name, value):
if name != "_target" and hasattr(self, "_target"):
if self._target is not None:
setattr(self._target, name, value)
return
super().__setattr__(name, value)
def __delattr__(self, name):
if name != "_target" and hasattr(self, "_target"):
if self._target is not None:
return delattr(self._target, name)
super().__delattr__(name)
def _materialize_then_call(deferred: Deferred, method_name: str):
def wrapped(*args, **kwargs):
return getattr(deferred.target, method_name)(*args, **kwargs)
return wrapped
|
dm-haiku-main
|
haiku/_src/deferred.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.multi_transform."""
import inspect
from typing import Optional, Union
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import multi_transform
from haiku._src import transform
from haiku._src import typing
import jax
import jax.numpy as jnp
import numpy as np
PRNGKey = typing.PRNGKey
State = typing.State
Params = typing.Params
def _assert_arrays_equal(x: jax.Array, y: jax.Array) -> None:
np.testing.assert_almost_equal(np.array(x), np.array(y), decimal=5)
class MultiTransformTest(parameterized.TestCase):
@parameterized.parameters(multi_transform.multi_transform,
multi_transform.multi_transform_with_state)
def test_multi_transform_empty(self, mt):
for empty_tree in ({}, [], (), AttrMap()):
with self.subTest(type(empty_tree).__name__):
f = mt(lambda: (lambda: None, empty_tree)) # pylint: disable=cell-var-from-loop
f.init(None)
self.assertEqual(f.apply, empty_tree)
def test_custom_pytree(self):
def f():
init = lambda: None
foo = lambda: 'foo'
bar = lambda: 'bar'
return init, AttrMap(foo=foo, bar=bar)
f = multi_transform.multi_transform(f)
self.assertEqual('foo', f.apply.foo({}, None))
self.assertEqual('bar', f.apply.bar({}, None))
def test_parameter_in_init(self):
def f():
w = base.get_parameter('w', [], init=jnp.zeros)
s = base.get_state('s', [], init=jnp.zeros)
init = lambda: None
def add():
s_add = base.get_state('s', [], init=jnp.zeros)
w_add = base.get_parameter('w', [], init=jnp.zeros)
return w, w_add, s, s_add
def sub():
s_sub = base.get_state('s', [], init=jnp.zeros)
w_sub = base.get_parameter('w', [], init=jnp.zeros)
return w, w_sub, s, s_sub
return init, (add, sub)
f = multi_transform.multi_transform_with_state(f)
params, state = f.init(None)
self.assertLen(f.apply, 2)
for apply_fn in f.apply:
# Check parameter and state reuse inside the transformed function.
(w, w_apply, s, s_apply), _ = apply_fn(params, state, None)
self.assertIs(w, w_apply)
self.assertIs(s, s_apply)
def test_state(self):
def f():
def init():
s = base.get_state('s', [], init=jnp.zeros)
base.set_state('s', s + 1)
def apply():
s = base.get_state('s')
base.set_state('s', s + 1)
return init, apply
f = multi_transform.multi_transform_with_state(f)
_, state_in = f.init(None)
self.assertEqual(state_in, {'~': {'s': 0}})
_, state_out = f.apply({}, state_in, None)
self.assertEqual(state_out, {'~': {'s': 1}})
def test_without_apply_rng_multi_transform(self):
def net(name):
def f(x):
p = base.get_parameter(name, [], init=jnp.zeros)
return p+x
return f
def mod():
one = net(name='one')
two = net(name='two')
def init(x):
z = one(x)
return two(z)
return init, (one, two)
f = multi_transform.without_apply_rng(
multi_transform.multi_transform_with_state(mod))
self.assertIsInstance(f, multi_transform.MultiTransformedWithState)
params, state = f.init(None, jnp.ones(()))
f.apply[0](params, state, jnp.ones(()))
f.apply[1](params, state, jnp.ones(()))
f = multi_transform.without_apply_rng(multi_transform.multi_transform(mod))
self.assertIsInstance(f, multi_transform.MultiTransformed)
params = f.init(None, jnp.ones(()))
f.apply[0](params, jnp.ones(()))
f.apply[1](params, jnp.ones(()))
def test_signature_without_apply_rng_transform_with_state(self):
@multi_transform.without_apply_rng
@transform.transform_with_state
def f(pos, key=37) -> int:
del pos, key
return 2
def expected_f_init(
rng: Optional[Union[PRNGKey, int]], pos, key=37
) -> tuple[Params, State]:
del rng, pos, key
raise NotImplementedError
def expected_f_apply(
params: Optional[Params], state: Optional[State], pos, key=37
) -> tuple[int, State]:
del params, state, pos, key
raise NotImplementedError
self.assertEqual(
inspect.signature(f.init), inspect.signature(expected_f_init))
self.assertEqual(
inspect.signature(f.apply), inspect.signature(expected_f_apply))
def test_signature_without_apply_rng_transform(self):
@multi_transform.without_apply_rng
@transform.transform
def f(pos, *, key: int = 37) -> int:
del pos, key
return 2
def expected_f_init(rng: Optional[Union[PRNGKey, int]],
pos, *, key: int = 37) -> Params:
del rng, pos, key
raise NotImplementedError
def expected_f_apply(
params: Optional[Params], pos, *, key: int = 37) -> int:
del params, pos, key
raise NotImplementedError
self.assertEqual(
inspect.signature(f.init), inspect.signature(expected_f_init))
self.assertEqual(
inspect.signature(f.apply), inspect.signature(expected_f_apply))
# Example custom pytree (a dict where `x.a` behaves like `x['a']`).
class AttrMap(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
jax.tree_util.register_pytree_node(AttrMap,
lambda d: (list(d.values()), d.keys()),
lambda k, v: AttrMap(zip(k, v)))
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/multi_transform_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic Haiku modules and functions."""
from collections.abc import Iterable, Sequence
import functools
from typing import Any, Callable, Optional
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import typing
from haiku._src.typing import PRNGKey
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
SupportsCall = typing.SupportsCall
# pylint: enable=invalid-name
del base, module, initializers, typing
# Utility and activation functions.
def one_hot(x, num_classes, dtype=jnp.float32):
"""Returns a one-hot version of indices.
DEPRECATED: Use ``jax.nn.one_hot(x, num_classes).astype(dtype)`` instead.
Args:
x: A tensor of indices.
num_classes: Number of classes in the one-hot dimension.
dtype: The dtype.
Returns:
The one-hot tensor. If indices' shape is [A, B, ...], shape is
[A, B, ... num_classes].
"""
return jax.nn.one_hot(x, num_classes).astype(dtype)
def multinomial(rng, logits, num_samples):
"""Draws samples from a multinomial distribution.
DEPRECATED: Use ``jax.random.categorical`` instead.
Args:
rng: A JAX PRNGKey.
logits: Unnormalized log-probabilities, where last dimension is categories.
num_samples: Number of samples to draw.
Returns:
Chosen categories, of shape ``logits.shape[:-1] + (num_samples,)``.
"""
if num_samples != 1:
shape = (num_samples,) + logits.shape[:-1]
else:
shape = None
samples = jax.random.categorical(rng, logits, shape=shape)
# Return expected shape.
if num_samples != 1:
return jnp.moveaxis(samples, 0, -1)
else:
return samples[..., None]
# Common modules.
class Sequential(hk.Module):
"""Sequentially calls the given list of layers.
Note that :class:`Sequential` is limited in the range of possible
architectures it can handle. This is a deliberate design decision;
:class:`Sequential` is only meant to be used for the simple case of fusing
together modules/ops where the input of a particular module/op is the output
of the previous one.
Another restriction is that it is not possible to have extra arguments in the
:meth:`__call__` method that are passed to the constituents of the module -
for example, if there is a :class:`BatchNorm` module in :class:`Sequential`
and the user wishes to switch the ``is_training`` flag. If this is the desired
use case, the recommended solution is to subclass :class:`Module` and
implement ``__call__``:
>>> class CustomModule(hk.Module):
... def __call__(self, x, is_training):
... x = hk.Conv2D(32, 4, 2)(x)
... x = hk.BatchNorm(True, True, 0.9)(x, is_training)
... x = jax.nn.relu(x)
... return x
"""
def __init__(
self,
layers: Iterable[Callable[..., Any]],
name: Optional[str] = None,
):
super().__init__(name=name)
self.layers = tuple(layers)
def __call__(self, inputs, *args, **kwargs):
"""Calls all layers sequentially."""
out = inputs
for i, layer in enumerate(self.layers):
if i == 0:
out = layer(out, *args, **kwargs)
else:
out = layer(out)
return out
class Linear(hk.Module):
"""Linear module."""
def __init__(
self,
output_size: int,
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
"""Constructs the Linear module.
Args:
output_size: Output dimensionality.
with_bias: Whether to add a bias to the output.
w_init: Optional initializer for weights. By default, uses random values
from truncated normal, with stddev ``1 / sqrt(fan_in)``. See
https://arxiv.org/abs/1502.03167v3.
b_init: Optional initializer for bias. By default, zero.
name: Name of the module.
"""
super().__init__(name=name)
self.input_size = None
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init or jnp.zeros
def __call__(
self,
inputs: jax.Array,
*,
precision: Optional[lax.Precision] = None,
) -> jax.Array:
"""Computes a linear transform of the input."""
if not inputs.shape:
raise ValueError("Input must not be scalar.")
input_size = self.input_size = inputs.shape[-1]
output_size = self.output_size
dtype = inputs.dtype
w_init = self.w_init
if w_init is None:
stddev = 1. / np.sqrt(self.input_size)
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter("w", [input_size, output_size], dtype, init=w_init)
out = jnp.dot(inputs, w, precision=precision)
if self.with_bias:
b = hk.get_parameter("b", [self.output_size], dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
return out
def ndim_at_least(x, num_dims):
if not (isinstance(x, jax.Array) or isinstance(x, np.ndarray)):
x = jnp.asarray(x)
return x.ndim >= num_dims
def arbitrary_mergeable_leaf(min_num_dims, args, kwargs):
for a in jax.tree_util.tree_leaves(args):
if ndim_at_least(a, min_num_dims):
return a
for k in jax.tree_util.tree_leaves(kwargs):
if ndim_at_least(k, min_num_dims):
return k
# Couldn't find a satisfactory leaf.
return None
def merge_leading_dims(x, num_dims):
"""Merge leading dimensions."""
# Don't merge if there aren't dimensions to merge.
if not ndim_at_least(x, num_dims):
return x
# TODO(tomhennigan) Pass dtype here to account for empty slices.
new_shape = (np.prod(x.shape[:num_dims]),) + x.shape[num_dims:]
return x.reshape(new_shape)
def split_leading_dim(x, to_dim):
new_shape = to_dim + x.shape[1:]
return x.reshape(new_shape)
class BatchApply:
r"""Temporarily merges leading dimensions of input tensors.
Merges the leading dimensions of a tensor into a single dimension, runs the
given callable, then splits the leading dimension of the result to match the
input.
Input arrays whose rank is smaller than the number of dimensions to collapse
are passed unmodified.
This may be useful for applying a module to each timestep of e.g. a
``[Time, Batch, ...]`` array.
For some ``f``\ s and platforms, this may be more efficient than
:func:`jax.vmap`, especially when combined with other transformations like
:func:`jax.grad`.
"""
def __init__(self, f, num_dims=2):
"""Constructs a :class:`BatchApply` module.
Args:
f: The callable to be applied to the reshaped array.
num_dims: The number of dimensions to merge.
"""
self._f = f
self.num_dims = num_dims
def __call__(self, *args, **kwargs):
example = arbitrary_mergeable_leaf(self.num_dims, args, kwargs)
if example is None:
raise ValueError(
"BatchApply requires at least one input with ndim >= "
f"{self.num_dims}.")
merge = lambda x: merge_leading_dims(x, self.num_dims)
split = lambda x: split_leading_dim(x, example.shape[:self.num_dims])
args = jax.tree_util.tree_map(merge, args)
kwargs = jax.tree_util.tree_map(merge, kwargs)
outputs = self._f(*args, **kwargs)
return jax.tree_util.tree_map(split, outputs)
def expand_apply(f, axis=0):
"""Wraps f to temporarily add a size-1 axis to its inputs.
Syntactic sugar for::
ins = jax.tree_util.tree_map(lambda t: np.expand_dims(t, axis=axis), ins)
out = f(ins)
out = jax.tree_util.tree_map(lambda t: np.squeeze(t, axis=axis), out)
This may be useful for applying a function built for ``[Time, Batch, ...]``
arrays to a single timestep.
Args:
f: The callable to be applied to the expanded inputs.
axis: Where to add the extra axis.
Returns:
f, wrapped as described above.
"""
if axis not in [0, -1]:
raise ValueError("expand_apply currently only supports axis=0 or axis=-1.")
@functools.wraps(f)
def wrapper(*args, **kwargs):
expand = lambda t: jnp.expand_dims(t, axis=axis)
args = jax.tree_util.tree_map(expand, args)
kwargs = jax.tree_util.tree_map(expand, kwargs)
outputs = f(*args, **kwargs)
return jax.tree_util.tree_map(lambda t: jnp.squeeze(t, axis=axis), outputs)
return wrapper
def dropout(
rng: PRNGKey, rate: float, x: jax.Array, broadcast_dims: Sequence[int] = ()
) -> jax.Array:
"""Randomly drop units in the input at a given rate.
See: http://www.cs.toronto.edu/~hinton/absps/dropout.pdf
Args:
rng: A JAX random key.
rate: Probability that each element of ``x`` is discarded. Must be a scalar
in the range ``[0, 1)``.
x: The value to be dropped out.
broadcast_dims: specifies dimensions that will share the same dropout mask.
Returns:
x, but dropped out and scaled by ``1 / (1 - rate)``.
Note:
This involves generating `x.size` pseudo-random samples from U([0, 1))
computed with the full precision required to compare them with `rate`. When
`rate` is a Python float, this is typically 32 bits, which is often more
than what applications require. A work-around is to pass `rate` with a lower
precision, e.g. using `np.float16(rate)`.
"""
return dropout_impl(rng, rate, x, broadcast_dims=broadcast_dims)
# Separated out to support monkey patching.
def dropout_impl(
rng: PRNGKey, rate: float, x: jax.Array, broadcast_dims: Sequence[int] = ()
) -> jax.Array:
"""See dropout."""
try:
if rate < 0 or rate >= 1:
raise ValueError("rate must be in [0, 1).")
if rate == 0.0:
return x
except jax.errors.ConcretizationTypeError:
pass
broadcast_shape = list(x.shape)
for dim in broadcast_dims:
if dim > len(broadcast_shape):
raise ValueError("Broadcast dimension does not exist. Got dimension "
f"{dim} for shape {broadcast_shape}.")
broadcast_shape[dim] = 1
keep_rate = 1.0 - rate
keep = jax.random.bernoulli(rng, keep_rate, shape=broadcast_shape)
keep = jnp.broadcast_to(keep, x.shape)
return keep * x / keep_rate
# TODO(tomhennigan): Fix internal tests and replace with `hk.SupportsCall`.
class CallableModule(hk.Module):
def __call__(self, *args, **kwargs) -> Any:
raise NotImplementedError
def to_module(f: Callable[..., Any]) -> type[CallableModule]:
"""Converts a function into a callable module class.
Sample usage:
>>> def bias_fn(x):
... b = hk.get_parameter("b", [], init=hk.initializers.RandomNormal())
... return x + b
>>> Bias = hk.to_module(bias_fn)
>>> def net(x, y):
... b = Bias(name="my_bias")
... # Bias x and y by the same amount.
... return b(x) * b(y)
Args:
f: The function to convert.
Returns:
A module class which runs ``f`` when called.
"""
class ToModuleWrapper(CallableModule):
"""Module produced by `hk.to_module`."""
def __init__(self, name=None):
if name is None:
name = f.__name__
elif not isinstance(name, str):
raise TypeError("Expected a string name as the first argument to the "
f"module constructor, got: {name}. Note that "
"`hk.to_module` returns a class not an object, so to "
"use your module you need to instantiate it first: "
"`cls = hk.to_module(fn); mod = cls(); out = mod(x)`.")
super().__init__(name=name)
def __call__(self, *a, **k):
return f(*a, **k)
if hasattr(f, "__doc__") and f.__doc__:
ToModuleWrapper.__doc__ = f.__doc__
functools.update_wrapper(ToModuleWrapper.__call__, f)
return ToModuleWrapper
|
dm-haiku-main
|
haiku/_src/basic.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Misc utility functions."""
import collections.abc
from collections.abc import Sequence
import decimal
import inspect
import pprint
import re
from typing import Any, TypeVar, Union
import jax
T = TypeVar("T")
def compare_or_false(a, b) -> bool:
try:
return bool(a == b)
except: # pylint: disable=bare-except
# Some equality checks might be buggy (e.g. `tf.Tensor == None`), in those
# cases be defensive and assume `a != b`. Note that an exception is also
# thrown when a and b are ndarrays of >1 element.
# TODO(tomhennigan) We could be smarter about comparing ndarrays.
return False
def auto_repr(cls: type[Any], *args, **kwargs) -> str:
"""Derives a `__repr__` from constructor arguments of a given class.
>>> class Foo:
... def __init__(self, x=None, y=42):
... pass
...
>>> auto_repr(Foo, "x")
"Foo(x='x')"
>>> auto_repr(Foo, "x", y=21)
"Foo(x='x', y=21)"
>>> auto_repr(Foo, None, 42)
Foo()
Args:
cls: a class to derive `__repr__` for.
*args: positional arguments.
**kwargs: keyword arguments.
Returns:
A string representing a call equivalent to `cls(*args, **kwargs)`.
"""
argspec = inspect.getfullargspec(cls.__init__)
arg_names = argspec.args
# Keep used positionals minus self.
arg_names = arg_names[1:(len(args) + 1)]
# Keep used kwargs in the order they appear in argspec.
arg_names.extend(n for n in argspec.args if n in kwargs)
arg_values = inspect.getcallargs(cls.__init__, None, *args, **kwargs) # pylint: disable=deprecated-method
# Extract default parameter values.
defaults = argspec.defaults or ()
defaults = dict(zip(argspec.args[-len(defaults):], defaults))
is_default = lambda n, v: (n in defaults and compare_or_false(v, defaults[n]))
names_and_values = [(name + "=", arg_values[name]) for name in arg_names
if not is_default(name, arg_values[name])]
# Add varargs.
names_and_values.extend(("", arg) for arg in args[len(argspec.args) - 1:])
# Add varkwargs.
names_and_values.extend(
(name + "=", kwargs[name]) for name in kwargs if name not in argspec.args)
single_line = cls.__name__ + "({})".format(", ".join(
name + repr(value) for name, value in names_and_values))
if len(single_line) <= 80:
return single_line
else:
return "{}(\n{},\n)".format(
cls.__name__,
indent(4, ",\n".join(fancy_repr(n, v) for n, v in names_and_values)))
def fancy_repr(name: str, value: Any) -> str:
try:
repr_value = pprint.pformat(value)
# C++ objects by way of pybind11 may not pprint correctly, but do have repr.
except TypeError:
repr_value = repr(value)
if name:
repr_value = indent(len(name), repr_value).strip()
return name + repr_value
def indent(amount: int, s: str) -> str:
"""Indents `s` with `amount` spaces."""
prefix = amount * " "
return "\n".join(prefix + line for line in s.splitlines())
def replicate(
element: Union[T, Sequence[T]],
num_times: int,
name: str,
) -> tuple[T, ...]:
"""Replicates entry in `element` `num_times` if needed."""
if (isinstance(element, (str, bytes)) or
not isinstance(element, collections.abc.Sequence)):
return (element,) * num_times
elif len(element) == 1:
return tuple(element * num_times)
elif len(element) == num_times:
return tuple(element)
raise TypeError(
f"{name} must be a scalar or sequence of length 1 or sequence of "
f"length {num_times}."
)
_SPATIAL_CHANNELS_FIRST = re.compile("^NC[^C]*$")
_SPATIAL_CHANNELS_LAST = re.compile("^N[^C]*C$")
_SEQUENTIAL = re.compile("^((BT)|(TB))[^D]*D$")
def get_channel_index(data_format: str) -> int:
"""Returns the channel index when given a valid data format.
>>> hk.get_channel_index('channels_last')
-1
>>> hk.get_channel_index('channels_first')
1
>>> hk.get_channel_index('N...C')
-1
>>> hk.get_channel_index('NCHW')
1
Args:
data_format: String, the data format to get the channel index from. Valid
data formats are spatial (e.g.``NCHW``), sequential (e.g. ``BTHWD``),
``channels_first`` and ``channels_last``).
Returns:
The channel index as an int, either ``1`` or ``-1``.
Raises:
ValueError: If the data format is unrecognised.
"""
if data_format == "channels_first":
return 1
elif data_format == "channels_last":
return -1
elif _SPATIAL_CHANNELS_FIRST.match(data_format):
return 1
elif _SPATIAL_CHANNELS_LAST.match(data_format):
return -1
elif _SEQUENTIAL.match(data_format):
return -1
else:
raise ValueError(
f"Unable to extract channel information from {data_format!r}. Valid "
"data formats are spatial (e.g.`NCHW`), sequential (e.g. `BTHWD`), "
"`channels_first` and `channels_last`).")
def assert_minimum_rank(inputs, rank: int):
"""Asserts the rank of the input is at least `rank`."""
if inputs.ndim < rank:
raise ValueError("Input %r must have rank >= %d" % (inputs, rank))
def tree_size(tree) -> int:
"""Sums the sizes of all arrays in a pytree.
For example given a ResNet50 model:
>>> f = hk.transform_with_state(lambda x: hk.nets.ResNet50(1000)(x, True))
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([128, 224, 224, 3])
>>> params, state = f.init(rng, x)
We can count the number of parameters and their size at f32:
>>> num_params = hk.data_structures.tree_size(params)
>>> byte_size = hk.data_structures.tree_bytes(params)
>>> print(f'{num_params} params, size: {byte_size / 1e6:.2f}MB')
25557032 params, size: 102.23MB
And compare that with casting our parameters to bf16:
>>> params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params)
>>> num_params = hk.data_structures.tree_size(params)
>>> byte_size = hk.data_structures.tree_bytes(params)
>>> print(f'{num_params} params, size: {byte_size / 1e6:.2f}MB')
25557032 params, size: 51.11MB
Args:
tree: A tree of jax.Arrays.
Returns:
The total size (number of elements) of the array(s) in the input.
"""
return sum(x.size for x in jax.tree_util.tree_leaves(tree))
def tree_bytes(tree) -> int:
"""Sums the size in bytes of all arrays in a pytree.
Note that this is the minimum size of the array (e.g. for a float32 we need
at least 4 bytes) however on some accelerators buffers may occupy more memory
due to padding/alignment constraints.
For example given a ResNet50 model:
>>> f = hk.transform_with_state(lambda x: hk.nets.ResNet50(1000)(x, True))
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([128, 224, 224, 3])
>>> params, state = f.init(rng, x)
We can count the number of parameters and their size at f32:
>>> num_params = hk.data_structures.tree_size(params)
>>> byte_size = hk.data_structures.tree_bytes(params)
>>> print(f'{num_params} params, size: {byte_size / 1e6:.2f}MB')
25557032 params, size: 102.23MB
And compare that with casting our parameters to bf16:
>>> params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params)
>>> num_params = hk.data_structures.tree_size(params)
>>> byte_size = hk.data_structures.tree_bytes(params)
>>> print(f'{num_params} params, size: {byte_size / 1e6:.2f}MB')
25557032 params, size: 51.11MB
Args:
tree: A tree of jax.Arrays.
Returns:
The total size in bytes of the array(s) in the input.
"""
return sum(x.size * x.dtype.itemsize for x in jax.tree_util.tree_leaves(tree))
_CAMEL_TO_SNAKE_R = re.compile(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))")
camel_to_snake = lambda value: _CAMEL_TO_SNAKE_R.sub(r"_\1", value).lower()
def simple_dtype(dtype) -> str:
if isinstance(dtype, type):
dtype = dtype(0).dtype
dtype = dtype.name
dtype = dtype.replace("complex", "c")
dtype = dtype.replace("double", "d")
dtype = dtype.replace("float", "f")
dtype = dtype.replace("uint", "u")
dtype = dtype.replace("int", "s")
return dtype
def format_array(x: Any) -> str:
"""Formats the given array showing dtype and shape info."""
return simple_dtype(x.dtype) + "[" + ",".join(map(str, x.shape)) + "]"
def format_bytes(num_bytes) -> str:
suffix = "B"
suffixes = ["KB", "MB", "GB", "TB"]
num_bytes = decimal.Decimal(num_bytes)
one_thousand = decimal.Decimal(1000)
while suffixes and num_bytes >= one_thousand:
num_bytes /= one_thousand
suffix = suffixes.pop(0)
return f"{num_bytes:.2f} {suffix}"
|
dm-haiku-main
|
haiku/_src/utils.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.module."""
import abc
from collections.abc import Sequence
import contextlib
import dataclasses
import inspect
from typing import Callable, Optional, Protocol, TypeVar, runtime_checkable
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import config
from haiku._src import module
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
ModuleT = TypeVar("ModuleT", bound=module.Module)
# TODO(tomhennigan) Improve test coverage.
class ModuleTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_module_naming_default(self):
mod1 = EmptyModule()
mod2 = EmptyModule()
self.assertEqual(mod1.module_name, "empty_module")
self.assertEqual(mod2.module_name, "empty_module_1")
@test_utils.transform_and_run
def test_module_naming_custom(self):
mod1 = EmptyModule(name="custom_name")
mod2 = EmptyModule(name="custom_name")
self.assertEqual(mod1.module_name, "custom_name")
self.assertEqual(mod2.module_name, "custom_name_1")
@test_utils.transform_and_run
def test_supports_arg_named_module(self):
class MyModule(module.Module):
def __init__(self, module): # pylint: disable=redefined-outer-name
del module
super().__init__()
self.assertIsNotNone(MyModule(module=None))
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_module_naming_explicit_numbering(self, step):
for n in range(0, step * 10, step):
module_name = f"custom_name_{n}"
self.assertEqual(EmptyModule(name=module_name).module_name, module_name)
@parameterized.parameters(1, 2, 3)
@test_utils.transform_and_run
def test_module_naming_explicit_reverse_numbering(self, step):
total = step * 10
for n in range(0, total, step):
n = total - n
module_name = f"custom_name_{n}"
self.assertEqual(EmptyModule(name=module_name).module_name, module_name)
self.assertEqual(EmptyModule(name="custom_name").module_name,
f"custom_name_{total + 1}")
@test_utils.transform_and_run
def test_module_naming_explicit_numbering_collision(self):
self.assertEqual(EmptyModule(name="custom_name").module_name, "custom_name")
self.assertEqual(EmptyModule(name="custom_name").module_name,
"custom_name_1")
with self.assertRaisesRegex(
ValueError, "Module name 'custom_name_1' is not unique"):
EmptyModule(name="custom_name_1")
@test_utils.transform_and_run
def test_module_naming_explicit_numbering_out_of_order(self):
for n in (1, 3, 2, 4):
self.assertEqual(
EmptyModule(name=f"custom_name_{n}").module_name, f"custom_name_{n}")
with self.assertRaisesRegex(
ValueError, "Module name 'custom_name_4' is not unique"):
EmptyModule(name="custom_name_4")
@test_utils.transform_and_run
def test_module_naming_explicit_numbering_zero_padded(self):
self.assertEqual(
EmptyModule(name="custom_name_000").module_name, "custom_name_000")
self.assertEqual(
EmptyModule(name="custom_name_001").module_name, "custom_name_001")
self.assertEqual(
EmptyModule(name="custom_name_002").module_name, "custom_name_002")
self.assertEqual(
EmptyModule(name="custom_name_007").module_name, "custom_name_007")
@test_utils.transform_and_run
def test_module_naming_explicit_numbering_zero_padded_reuse(self):
self.assertEqual(
EmptyModule(name="custom_name_007").module_name, "custom_name_007")
self.assertEqual(
EmptyModule(name="custom_name_007").module_name, "custom_name_007_1")
@test_utils.transform_and_run
def test_module_naming_explicit_numbering_zero_padded_vs_no_pad(self):
m1 = ScalarModule(name="scalar_module_1")
self.assertEqual(m1.module_name, "scalar_module_1")
m2 = ScalarModule(name="scalar_module_001")
self.assertEqual(m2.module_name, "scalar_module_001")
self.assertIsNot(m1(), m2()) # No parameter sharing.
@test_utils.transform_and_run
def test_flatten_invalid_name(self):
with self.assertRaisesRegex(ValueError, "is not a valid module name"):
EmptyModule(name="1bad-name")
@test_utils.transform_and_run
def test_parameter_reuse(self):
mod = ScalarModule()
w1 = mod()
w2 = mod()
self.assertIs(w1, w2)
@test_utils.transform_and_run
def test_multiple_forward_methods(self):
mod = MultipleForwardMethods(name="outer")
mod()
self.assertEqual(mod.ctor_mod.module_name, "outer/~/scalar_module")
self.assertEqual(mod.call_mod.module_name, "outer/scalar_module")
self.assertEqual(mod.encode_mod.module_name, "outer/~encode/scalar_module")
self.assertEqual(mod.decode_mod.module_name, "outer/~decode/scalar_module")
@test_utils.transform_and_run
def test_nesting(self):
mod = ParentModule()
self.assertEqual(mod.module_name, "parent_module")
self.assertEqual(mod.child1.module_name, "parent_module/~/child_module")
self.assertEqual(mod.child2.module_name, "parent_module/~/child_module_1")
def test_outside_transform_exception(self):
with self.assertRaisesRegex(ValueError,
"initialized inside an `hk.transform`"):
EmptyModule()
def test_params(self):
init_fn, _ = transform.transform(lambda: ScalarModule()()) # pylint: disable=unnecessary-lambda
params = init_fn(None)
self.assertEqual(params, {"scalar_module": {"w": jnp.zeros([])}})
def test_params_nested(self):
init_fn, _ = transform.transform(
lambda: MultipleForwardMethods(name="outer")()) # pylint: disable=unnecessary-lambda
params = init_fn(None)
self.assertEqual(params,
{"outer/~/scalar_module": {"w": jnp.zeros([])},
"outer/scalar_module": {"w": jnp.zeros([])},
"outer/~encode/scalar_module": {"w": jnp.zeros([])},
"outer/~decode/scalar_module": {"w": jnp.zeros([])}})
def test_used_inside_transform(self):
name_log = []
module_log = []
def counting_creator(next_creator, shape, dtype, init, context):
name_log.append(context.full_name)
mod = context.module
module_log.append((type(mod), mod.module_name))
return next_creator(shape, dtype, init)
def net():
with base.custom_creator(counting_creator):
return MultipleForwardMethods()()
init_fn, apply_fn = transform.transform(net)
params = init_fn(None)
self.assertEqual(name_log, [
"multiple_forward_methods/~/scalar_module/w", # __init__
"multiple_forward_methods/scalar_module/w", # __call__
"multiple_forward_methods/~encode/scalar_module/w", # encode
"multiple_forward_methods/~decode/scalar_module/w", # decode
])
self.assertEqual(module_log, [
(ScalarModule, "multiple_forward_methods/~/scalar_module"),
(ScalarModule, "multiple_forward_methods/scalar_module"),
(ScalarModule, "multiple_forward_methods/~encode/scalar_module"),
(ScalarModule, "multiple_forward_methods/~decode/scalar_module"),
])
del name_log[:]
apply_fn(params, None)
self.assertEmpty(name_log)
def test_stateful_module(self):
init_fn, apply_fn = transform.transform_with_state(
lambda: CountingModule()()) # pylint: disable=unnecessary-lambda
params, state = init_fn(None)
self.assertEqual(state, {"counting_module": {"count": 0}})
_, state = apply_fn(params, state, None)
self.assertEqual(state, {"counting_module": {"count": 10}})
def test_without_state(self):
init_fn, apply_fn = transform.without_state(
transform.transform_with_state(lambda: ScalarModule()())) # pylint: disable=unnecessary-lambda
params = init_fn(None)
out = apply_fn(params, None)
self.assertEqual(out, 0)
def test_without_state_raises_if_state_used(self):
init_fn, _ = transform.without_state(
transform.transform_with_state(lambda: CountingModule()())) # pylint: disable=unnecessary-lambda
with self.assertRaisesRegex(ValueError, "use.*transform_with_state"):
init_fn(None)
@test_utils.transform_and_run
def test_params_dict(self):
mods = [ScalarModule() for _ in range(5)]
for i, mod in enumerate(mods):
w = mod()
if i:
self.assertEqual(mod.params_dict(), {f"scalar_module_{i}/w": w})
else:
self.assertEqual(mod.params_dict(), {"scalar_module/w": w})
@test_utils.transform_and_run
def test_params_dict_captured(self):
mod = CapturesModule(ScalarModule())
w = mod()
self.assertEqual(mod.params_dict(), {"scalar_module/w": w})
@test_utils.transform_and_run
def test_params_dict_captured_lambda(self):
mod = CapturesModule(lambda: ScalarModule()()) # pylint: disable=unnecessary-lambda
w = mod()
self.assertIs(w, mod())
self.assertEqual(mod.params_dict(), {"captures_module/scalar_module/w": w})
@test_utils.transform_and_run
def test_state_dict(self):
mods = [ScalarStateModule() for _ in range(5)]
for i, mod in enumerate(mods):
w = mod()
if i:
self.assertEqual(mod.state_dict(), {f"scalar_state_module_{i}/w": w})
else:
self.assertEqual(mod.state_dict(), {"scalar_state_module/w": w})
@test_utils.transform_and_run
def test_state_dict_captured(self):
mod = CapturesModule(ScalarStateModule())
w = mod()
self.assertEqual(mod.state_dict(), {"scalar_state_module/w": w})
@test_utils.transform_and_run
def test_state_dict_captured_lambda(self):
mod = CapturesModule(lambda: ScalarStateModule()()) # pylint: disable=unnecessary-lambda
w = mod()
self.assertIs(w, mod())
self.assertEqual(mod.state_dict(),
{"captures_module/scalar_state_module/w": w})
def test_inline_use(self):
def f():
return ScalarModule()()
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
params = f.init(rng)
w = f.apply(params, None)
self.assertEqual(w, 0)
def test_transparent(self):
init_fn, _ = transform.transform(lambda: TransparentModule()()) # pylint: disable=unnecessary-lambda
params = init_fn(None)
self.assertEqual(params, {"scalar_module": {"w": jnp.zeros([])}})
@test_utils.transform_and_run
def test_method_hook(self):
events = []
@contextlib.contextmanager
def method_hook(mod, method_name):
events.append(("enter", method_name, getattr(mod, "module_name", None)))
yield
events.append(("exit", method_name, mod.module_name))
# Test __init__.
with module.hook_methods(method_hook):
m = EmptyModule()
self.assertIsNotNone(m)
self.assertEqual(events, [("enter", "__init__", None),
("exit", "__init__", "empty_module")])
# Test __call__.
del events[:]
m = CapturesModule(ScalarModule())
with module.hook_methods(method_hook):
m()
self.assertEqual(events, [("enter", "__call__", "captures_module"),
("enter", "__call__", "scalar_module"),
("exit", "__call__", "scalar_module"),
("exit", "__call__", "captures_module")])
@test_utils.transform_and_run
def test_callback_runs_after_submodules_updated(self):
params = []
@contextlib.contextmanager
def method_hook(mod, method_name):
yield
if method_name != "params_dict":
params.append((mod.module_name, method_name, tuple(mod.params_dict())))
m = CapturesModule(ScalarModule())
with module.hook_methods(method_hook):
m()
self.assertEqual(params,
[("scalar_module", "__call__", ("scalar_module/w",)),
("captures_module", "__call__", ("scalar_module/w",))])
@test_utils.transform_and_run
def test_submodules_in_ctor_tracked(self):
m = CreatesSubmoduleWithCtorParam(name="parent")
self.assertEqual(m._submodules, {m.child.module_name})
def test_context_reuse_same_instance(self):
params = {"parent_module/~/child_module": {"w": jnp.array(2.)},
"parent_module/~/child_module_1": {"w": jnp.array(3.)},
"parent_module_1/~/child_module": {"w": jnp.array(4.)},
"parent_module_1/~/child_module_1": {"w": jnp.array(5.)}}
with base.new_context(params=params) as ctx:
mod1 = ParentModule()
mod2 = ParentModule()
self.assertEqual(mod1.module_name, "parent_module")
self.assertEqual(mod2.module_name, "parent_module_1")
for parent, (c1, c2) in ((mod1, (2., 3.)), (mod2, (4., 5.))):
self.assertEqual(parent.child1(), c1)
self.assertEqual(parent.child2(), c2)
with ctx:
for parent, (c1, c2) in ((mod1, (2., 3.)), (mod2, (4., 5.))):
self.assertEqual(parent.child1(), c1)
self.assertEqual(parent.child2(), c2)
# Creating a new context should not be a problem.
with base.new_context(params=ctx.collect_params()) as ctx:
mod1 = ParentModule()
mod2 = ParentModule()
self.assertEqual(mod1.module_name, "parent_module")
self.assertEqual(mod2.module_name, "parent_module_1")
for parent, (c1, c2) in ((mod1, (2., 3.)), (mod2, (4., 5.))):
self.assertEqual(parent.child1(), c1)
self.assertEqual(parent.child2(), c2)
@parameterized.parameters(None, "mlp")
def test_dataclass(self, name):
with base.new_context() as ctx:
output_sizes = [300, 100, 10]
if name is None:
mlp = DataMLP(output_sizes)
else:
mlp = DataMLP(output_sizes, name="mlp")
mlp(jnp.ones([1, 28 * 28]))
params = ctx.collect_params()
if name is None:
module_names = ["data_mlp/linear", "data_mlp/linear_1",
"data_mlp/linear_2"]
else:
module_names = ["mlp/linear", "mlp/linear_1", "mlp/linear_2"]
self.assertEqual(list(params.keys()), module_names)
for module_name, output_size in zip(module_names, output_sizes):
self.assertEqual(params[module_name]["w"].shape[-1], output_size)
self.assertEqual(params[module_name]["b"].shape[-1], output_size)
@test_utils.transform_and_run
def test_intercept_method(self):
mod = IdentityModule()
x = jnp.ones([])
call_count = []
def add_one_interceptor(f, args, kwargs, context):
call_count.append(None)
self.assertLen(context, 4)
self.assertIs(context.module, mod)
self.assertEqual(context.method_name, "__call__")
self.assertEqual(context.orig_method(2), 2)
self.assertEqual(args, (x,))
self.assertEmpty(kwargs)
y = f(*args, **kwargs)
return y + 1
y1 = mod(x)
with module.intercept_methods(add_one_interceptor):
y2 = mod(x)
y3 = mod(x)
self.assertLen(call_count, 1)
self.assertEqual(y1, 1)
self.assertEqual(y2, 2)
self.assertEqual(y3, 1)
@test_utils.transform_and_run
def test_intercept_methods_calling_underlying_optional(self):
def do_nothing_interceptor(f, args, kwargs, context):
del f, context
self.assertEmpty(args)
self.assertEmpty(kwargs)
m = RaisesModule()
with module.intercept_methods(do_nothing_interceptor):
m()
with self.assertRaises(AssertionError):
m() # Without the interceptor we expect an error.
# The previous error should not stop us from re-applying.
with module.intercept_methods(do_nothing_interceptor):
m()
@test_utils.transform_and_run
def test_intercept_methods_run_in_lifo_order(self):
def op_interceptor(op):
def _interceptor(f, args, kwargs, context):
del context
y = f(*args, **kwargs)
return op(y)
return _interceptor
mod = IdentityModule()
x = 7
with module.intercept_methods(op_interceptor(lambda a: a + 1)), \
module.intercept_methods(op_interceptor(lambda a: a ** 2)):
y = mod(x)
self.assertEqual(y, (x ** 2) + 1)
with module.intercept_methods(op_interceptor(lambda a: a ** 2)), \
module.intercept_methods(op_interceptor(lambda a: a + 1)):
y = mod(x)
self.assertEqual(y, (x + 1) ** 2)
@test_utils.transform_and_run
def test_intercept_methods_orig_class(self):
class A(module.Module):
def __call__(self):
pass
class B(A):
def __call__(self): # pylint: disable=useless-parent-delegation
return super().__call__()
class C(B):
def __init__(self, name=None):
super().__init__(name=name)
log = []
def log_orig_class(f, args, kwargs, context):
log.append(
(type(context.module), context.orig_class, context.method_name))
return f(*args, **kwargs)
with module.intercept_methods(log_orig_class):
B()()
C()()
self.assertEqual(log, [
# b = B()
(B, B, "__init__"),
# b()
(B, B, "__call__"), (B, A, "__call__"),
# c = C()
(C, C, "__init__"), # NOTE: No entry for `(module.Module, __init__)`.
# c()
(C, B, "__call__"), (C, A, "__call__")])
@test_utils.transform_and_run
def test_name_scope_trivial(self):
with module.name_scope("foo"):
mod1 = module.Module(name="bar")
mod2 = module.Module(name="bar")
self.assertEqual(mod1.module_name, "foo/bar")
self.assertEqual(mod2.module_name, "foo/bar_1")
@test_utils.transform_and_run
def test_name_scope_inside_module(self):
mod = NameScopeModule(name="module")
w, w_foo = mod()
self.assertIsNot(w, w_foo)
params = mod.params_dict()
self.assertLen(params, 2)
self.assertIs(params["module/w"], w)
self.assertIs(params["module/foo/w"], w_foo)
@test_utils.transform_and_run
def test_name_scope_slash_delimited(self):
with module.name_scope("foo/bar"):
mod = module.Module(name="baz")
self.assertEqual(mod.module_name, "foo/bar/baz")
@test_utils.transform_and_run
def test_name_scope_nesting(self):
with module.name_scope("foo"):
with module.name_scope("bar"):
mod = module.Module(name="baz")
self.assertEqual(mod.module_name, "foo/bar/baz")
@test_utils.transform_and_run
def test_name_scope_duplicate_name(self):
with module.name_scope("foo"):
mod1 = module.Module(name="bar")
with module.name_scope("foo"):
mod2 = module.Module(name="bar")
self.assertEqual(mod1.module_name, "foo/bar")
self.assertEqual(mod2.module_name, "foo_1/bar")
@test_utils.transform_and_run
def test_name_scope_reuse(self):
# NOTE: If you are considering lifting this restriction, please think
# carefully about the following case:
#
# def f(x):
# foo_scope = name_scope("foo")
# with foo_scope: x = BarModule()(x) # name: foo/bar_module
# with foo_scope: x = BarModule()(x) # name: foo/bar_module
# return x
#
# We believe that the name reuse (when the scope is reused) will surprise
# users and lead to bugs. This behaviour does match what would happen if you
# put the body of the context manager into a method and called that method
# twice.
scope = module.name_scope("foo")
with scope:
pass
with self.assertRaisesRegex(ValueError, "name_scope is not reusable"):
with scope:
pass
@test_utils.transform_and_run
def test_name_scope_reuse_after_error(self):
scope = module.name_scope("foo")
with self.assertRaisesRegex(AssertionError, "expected"):
with scope:
assert False, "expected"
with self.assertRaisesRegex(ValueError, "name_scope is not reusable"):
with scope:
pass
@test_utils.transform_and_run
def test_name_scope_leading_slash(self):
with self.assertRaisesRegex(ValueError,
"Name scopes must not start with /"):
module.name_scope("/foo")
def test_name_scope_outside_transform(self):
with self.assertRaisesRegex(
ValueError, "name_scope.*must be used as part of an `hk.transform`"):
module.name_scope("foo")
@test_utils.transform_and_run
def test_name_scope_method_name(self):
with module.name_scope("a", method_name="bar"):
self.assertEqual(module.Module().module_name, "a/~bar/module")
with module.name_scope("b", method_name="__init__"):
self.assertEqual(module.Module().module_name, "b/~/module")
with module.name_scope("c", method_name="__call__"):
self.assertEqual(module.Module().module_name, "c/module")
@test_utils.transform_and_run
def test_is_protocol(self):
self.assertFalse(getattr(module.Module, "_is_protocol"))
self.assertFalse(getattr(ConcreteProtocolModule, "_is_protocol"))
# NOTE: Technically this bit is set wrong (ProtocolModule) is a protocol.
self.assertFalse(getattr(ProtocolModule, "_is_protocol"))
@test_utils.transform_and_run
def test_instance_checks(self):
self.assertIsInstance(ConcreteProtocolModule(), module.Module)
self.assertIsInstance(ConcreteProtocolModule(), SupportsFoo)
self.assertIsInstance(ConcreteProtocolModule(), ProtocolModule)
self.assertNotIsInstance(module.Module(), SupportsFoo)
self.assertNotIsInstance(module.Module(), ProtocolModule)
@test_utils.transform_and_run
def test_name_like(self):
m = ModuleWithCustomName(name="parent")
m.foo() # foo pretends to be __call__.
m.bar() # bar pretends to be baz.
# baz and call are happy to be themselves.
m.baz()
m()
self.assertEqual(m.init_module.module_name, "parent/~/child")
self.assertEqual(m.foo_module.module_name, "parent/child")
self.assertEqual(m.bar_module.module_name, "parent/~baz/child")
self.assertEqual(m.baz_module.module_name, "parent/~baz/child")
self.assertEqual(m.call_module.module_name, "parent/child")
@test_utils.transform_and_run
def test_name_like_aliasing(self):
m = ModuleWithDoubleCall(name="parent")
m()
self.assertEqual(m.foo_module.module_name, "parent/child") # pytype: disable=attribute-error
self.assertEqual(m.call_module.module_name, "parent/child")
@test_utils.transform_and_run
def test_name_like_on_call(self):
m = ModuleWithCustomNameOnCall(name="parent")
m.foo()
m() # Call pretends to be foo.
self.assertEqual(m.init_module.module_name, "parent/~/child")
self.assertEqual(m.foo_module.module_name, "parent/~foo/child")
self.assertEqual(m.call_module.module_name, "parent/~foo/child")
@test_utils.transform_and_run
def test_name_like_on_init(self):
m = ModuleWithCustomNameOnInit(name="parent") # init pretends to be call.
m()
self.assertEqual(m.init_module.module_name, "parent/child")
self.assertEqual(m.call_module.module_name, "parent/child")
@test_utils.transform_and_run
def test_name_like_interceptor_method_names_unchanged(self):
log = []
def log_parent_methods(f, args, kwargs, context: module.MethodContext):
if isinstance(context.module, ModuleWithCustomName):
log.append(context.method_name)
return f(*args, **kwargs)
with module.intercept_methods(log_parent_methods):
m = ModuleWithCustomName(name="parent")
m.foo() # foo pretends to be __call__.
m.bar() # bar pretends to be baz.
# baz and call are happy to be themselves.
m.baz()
m()
self.assertEqual(log, ["__init__", "foo", "bar", "baz", "__call__"])
@test_utils.transform_and_run
def test_auto_repr(self):
m = IdentityModule()
self.assertEqual(str(m), "IdentityModule()")
def test_signature(self):
captures_expected = inspect.Signature(
parameters=(
inspect.Parameter(
name="mod", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD
),
)
)
self.assertEqual(inspect.signature(CapturesModule), captures_expected)
datalinear_expected = inspect.Signature(
parameters=(
inspect.Parameter(
name="output_size",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=int,
),
inspect.Parameter(
name="name",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=None,
annotation=Optional[str],
),
),
return_annotation=None,
)
self.assertEqual(inspect.signature(DataLinear), datalinear_expected)
@test_utils.transform_and_run
@config.with_config(module_auto_repr=False)
def test_config_disable_auto_repr(self):
self.assertRegex(str(IdentityModule()),
"<.*.IdentityModule object at .*>")
@test_utils.transform_and_run
def test_attr_disable_auto_repr(self):
self.assertTrue(config.get_config().module_auto_repr)
self.assertRegex(str(NoAutoReprModule()),
"<.*.NoAutoReprModule object at .*>")
@parameterized.parameters("foo", "foo/bar")
@test_utils.transform_and_run
def test_force_name_naming(self, name):
m0 = create_module_from_qualified_name(name)
m1 = module.Module(name=module.force_name(name))
m2 = module.Module(name=module.force_name(name))
self.assertEqual(m0.name, m1.name)
self.assertEqual(m0.module_name, m1.module_name)
self.assertEqual(m1.name, m2.name)
self.assertEqual(m1.module_name, m2.module_name)
@test_utils.transform_and_run
def test_force_name_reserves_name(self):
m0 = module.Module(name=module.force_name("foo"))
m1 = module.Module(name="foo")
self.assertEqual(m0.module_name, "foo")
self.assertEqual(m1.module_name, "foo_1")
@parameterized.parameters("foo", "foo/bar")
@test_utils.transform_and_run
def test_force_name_inside_module(self, name):
class CreatesInnerModule(module.Module):
def __call__(self):
return module.Module(name=module.force_name(name))
m0 = create_module_from_qualified_name(name)
m1 = CreatesInnerModule()()
m2 = module.Module(name=module.force_name(name))
self.assertEqual(m0.module_name, m1.module_name)
self.assertEqual(m1.module_name, m2.module_name)
@test_utils.transform_and_run
def test_force_name_inside_name_scope(self):
m0 = module.Module(name="foo")
with module.name_scope("bar"):
m1 = module.Module(name=module.force_name("foo"))
m2 = module.Module(name=module.force_name("foo"))
self.assertEqual(m0.module_name, m1.module_name)
self.assertEqual(m1.module_name, m2.module_name)
@parameterized.parameters("foo", "foo/bar")
@test_utils.transform_and_run
def test_force_name_parameter_reuse(self, name):
m0 = create_module_from_qualified_name(name=name, cls=ScalarModule)
m1 = ScalarModule(name=module.force_name(name))
self.assertIs(m0(), m1())
@test_utils.transform_and_run
def test_force_name_parameter_reuse_name_scope(self):
m0 = create_module_from_qualified_name(name="foo/bar/baz", cls=ScalarModule)
w0 = m0()
with module.name_scope(module.force_name("foo/bar/baz")):
w1 = base.get_parameter("w", [], init=jnp.zeros)
self.assertIs(w0, w1)
@test_utils.transform_and_run
def test_force_name_intercept_methods(self):
def change_prefix(old, new):
def my_interceptor(next_f, args, kwargs, context: module.MethodContext):
if type(context.module).__name__ == "NameScopeModule":
# Avoid infinite recursion for modules introduced by name_scope.
return next_f(*args, **kwargs)
name = context.module.module_name
# We expect all usages in the test to have this prefix. If you are
# forking this code you can probably remove this line.
self.assertStartsWith(name, old)
if name.startswith(old):
name = name.replace(old, new, 1)
with module.name_scope(module.force_name(name),
method_name=context.method_name):
return next_f(*args, **kwargs)
return module.intercept_methods(my_interceptor)
with module.name_scope("outer"):
m1 = ParentModule()
with module.name_scope("inner"):
m2 = ParentModule()
m1()
with change_prefix("inner", "outer"):
m2()
self.assertIs(m1.child1.w, m2.child1.w)
self.assertIs(m1.child2.w, m2.child2.w)
class NoAutoReprModule(module.Module):
AUTO_REPR = False
class IdentityModule(module.Module):
def __call__(self, x):
return x
class RaisesModule(module.Module):
def __call__(self):
assert False
class CapturesModule(module.Module):
def __init__(self, mod):
super().__init__()
self._mod = mod
def __call__(self):
return self._mod()
class CreatesSubmoduleWithCtorParam(module.Module):
def __init__(self, name=None):
super().__init__(name=name)
self.child = HasConstructorParam(name="child")
class HasConstructorParam(module.Module):
def __init__(self, name=None):
super().__init__(name=name)
self.w = base.get_parameter("w", [], init=jnp.zeros)
class EmptyModule(module.Module):
pass
class ScalarModule(module.Module):
def __call__(self):
self.w = base.get_parameter("w", [], init=jnp.zeros)
return self.w
class ScalarStateModule(module.Module):
def __call__(self):
return base.get_state("w", [], init=jnp.zeros)
class ParentModule(module.Module):
def __init__(self):
super().__init__()
self.child1 = ScalarModule(name="child_module")
self.child2 = ScalarModule(name="child_module")
def __call__(self):
self.child1()
self.child2()
class MultipleForwardMethods(module.Module):
def __init__(self, name=None):
super().__init__(name=name)
s = ScalarModule()
s()
self.ctor_mod = s
def __call__(self):
s = ScalarModule()
self.call_mod = s
x = s()
x += self.autoencode()
return x
def autoencode(self):
x = self.encode()
x += self.decode()
return x
def encode(self):
s = ScalarModule()
self.encode_mod = s
return s()
def decode(self):
s = ScalarModule()
self.decode_mod = s
return s()
class CountingModule(module.Module):
def __call__(self):
for _ in range(10):
count = base.get_state("count", (), jnp.int32, jnp.zeros)
base.set_state("count", count + 1)
return count
class TransparentModule(module.Module):
@module.transparent
def __call__(self):
return ScalarModule()()
@dataclasses.dataclass
class DataLinear(module.Module):
output_size: int
name: Optional[str] = None
def __call__(self, x):
j, k = x.shape[-1], self.output_size
w = base.get_parameter("w", [j, k], init=jnp.ones)
b = base.get_parameter("b", [k], init=jnp.zeros)
return x @ w + b
@dataclasses.dataclass
class DataMLP(module.Module):
output_sizes: Sequence[int]
activation: Callable[[jax.Array], jax.Array] = jax.nn.relu
name: Optional[str] = None
def __call__(self, x):
for i, output_size in enumerate(self.output_sizes):
if i > 0:
x = self.activation(x)
x = DataLinear(output_size, name="linear")(x)
return x
class NameScopeModule(module.Module):
def __call__(self):
w = base.get_parameter("w", [], init=jnp.zeros)
with module.name_scope("foo"):
w_foo = base.get_parameter("w", [], init=jnp.zeros)
return w, w_foo
@runtime_checkable
class SupportsFoo(Protocol):
@abc.abstractmethod
def foo(self) -> int:
...
# Check that we can declare a module that also inherits from a Protocol without
# encountering a metaclass conflict.
class ProtocolModule(module.Module, SupportsFoo):
# We should also be able to add new abstractmethods to the derived class,
# since its metaclass is a subclass of ABCMeta.
@abc.abstractmethod
def bar(self) -> str:
...
class ConcreteProtocolModule(ProtocolModule):
def foo(self):
return 0
def bar(self):
return ""
class ModuleWithCustomName(module.Module):
def __init__(self, name=None):
super().__init__(name=name)
self.init_module = module.Module(name="child")
@module.name_like("__call__")
def foo(self):
self.foo_module = module.Module(name="child")
@module.name_like("baz")
def bar(self):
self.bar_module = module.Module(name="child")
def baz(self):
self.baz_module = module.Module(name="child")
def __call__(self):
self.call_module = module.Module(name="child")
class ModuleWithCustomNameOnCall(module.Module):
def __init__(self, name=None):
super().__init__(name=name)
self.init_module = module.Module(name="child")
def foo(self):
self.foo_module = module.Module(name="child")
@module.name_like("foo")
def __call__(self):
self.call_module = module.Module(name="child")
class ModuleWithCustomNameOnInit(module.Module):
@module.name_like("__call__")
def __init__(self, name=None):
super().__init__(name=name)
self.init_module = module.Module(name="child")
def __call__(self):
self.call_module = module.Module(name="child")
class ModuleWithDoubleCall(module.Module):
@module.name_like("__call__")
def foo(self):
self.foo_module = module.Module(name="child")
def __call__(self):
self.foo()
self.call_module = module.Module(name="child")
def create_module_from_qualified_name(
name: str,
*,
cls: type[ModuleT] = module.Module,
) -> ModuleT:
if "/" in name:
prefix, suffix = name.rsplit("/", 1)
with module.name_scope(prefix):
return cls(name=suffix)
else:
return cls(name=name)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/module_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Haiku module."""
from collections.abc import Mapping
import inspect
from typing import Any, Callable, NamedTuple, Optional, TypeVar, Union
from haiku._src import analytics
from haiku._src import base
from haiku._src import data_structures
from haiku._src import typing
import jax
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
PRNGSequence = base.PRNGSequence
Params = typing.Params
State = typing.State
MutableParams = typing.MutableParams
MutableState = typing.MutableState
# pylint: enable=invalid-name
# TODO(slebedev): This makes the module non-forkable.
PRNGKey = typing.PRNGKey
del typing
T = TypeVar("T")
# TODO(b/161684853): Use protocols for transform if/when PEP-612 is implemented.
# https://www.python.org/dev/peps/pep-0612/
def sig_replace_leading_parameters(
s: inspect.Signature, n: int, new_params: list[inspect.Parameter]
) -> inspect.Signature:
"""Replace the first n positional parameters of a signature."""
p = list(s.parameters.values())
for i in range(n):
if i >= len(p) or p[i].kind not in {
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD
}:
break # not enough arguments (or args in VARARGS that can't be counted)
else:
i = n
return inspect.Signature(
parameters=new_params + p[i:], return_annotation=s.return_annotation,
__validate_parameters__=False)
def sig_remove_state(s: inspect.Signature) -> inspect.Signature:
"""Remove hk.State from the return type of a signature."""
ret = s.return_annotation
# Extract the tuple element types from `typing._GenericAlias` or
# `types.GenericAlias`.
ret_generic = getattr(ret, "__origin__", None)
ret_type_args = getattr(ret, "__args__", ())
if ret_generic is tuple and len(ret_type_args) == 2:
ret = ret_type_args[0]
else:
ret = Any
return inspect.Signature(
parameters=list(s.parameters.values()), return_annotation=ret,
__validate_parameters__=False)
def sig_add_state(s: inspect.Signature) -> inspect.Signature:
"""Add hk.State to the return type of a signature."""
if s.return_annotation is inspect.Parameter.empty:
ret = Any
else:
ret = s.return_annotation
return inspect.Signature(
parameters=list(s.parameters.values()),
return_annotation=tuple[ret, hk.State],
__validate_parameters__=False)
class Transformed(NamedTuple):
"""Holds a pair of pure functions.
Attributes:
init: A pure function: ``params = init(rng, *a, **k)``
apply: A pure function: ``out = apply(params, rng, *a, **k)``
"""
# Args: [Optional[PRNGKey], ...]
init: Callable[..., hk.MutableParams]
# Args: [Params, Optional[PRNGKey], ...]
apply: Callable[..., Any]
class TransformedWithState(NamedTuple):
"""Holds a pair of pure functions.
Attributes:
init: A pure function: ``params, state = init(rng, *a, **k)``
apply: A pure function: ``out, state = apply(params, state, rng, *a, **k)``
"""
# Args: [Optional[PRNGKey], ...]
init: Callable[..., tuple[hk.MutableParams, hk.MutableState]]
# Args: [hk.Params, hk.State, Optional[PRNGKey], ...]
apply: Callable[..., tuple[Any, hk.MutableState]]
def to_prng_sequence(rng, err_msg) -> Optional[hk.PRNGSequence]:
if rng is not None:
try:
rng = hk.PRNGSequence(rng)
except Exception as e:
raise ValueError(
f"{err_msg}. The object was of type {type(rng)}: {rng}") from e
return rng
RNG_ERROR_TPL = ("{f} must be called with an RNG as the {position} argument, "
"the required signature is: `{signature}`")
INIT_RNG_ERROR = RNG_ERROR_TPL.format(
f="Init", position="first", signature="init(rng, *a, **k)")
APPLY_RNG_ERROR = RNG_ERROR_TPL.format(
f="Apply", position="second", signature="apply(params, rng, *a, **k)")
APPLY_RNG_STATE_ERROR = RNG_ERROR_TPL.format(
f="Apply", position="third", signature="apply(params, state, rng, *a, **k)")
def without_state(f: TransformedWithState) -> Transformed:
"""Wraps a transformed tuple and ignores state in/out.
The example below is equivalent to ``f = hk.transform(f)``:
>>> def f(x):
... mod = hk.Linear(10)
... return mod(x)
>>> f = hk.without_state(hk.transform_with_state(f))
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.zeros([1, 1])
>>> params = f.init(rng, x)
>>> print(f.apply(params, rng, x))
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
Args:
f: A transformed function.
Returns:
A transformed function that does not take or return state.
"""
def init_fn(*args, **kwargs) -> hk.MutableParams:
params, state = f.init(*args, **kwargs)
if state:
raise base.NonEmptyStateError(
"If your transformed function uses `hk.{get,set}_state` then use "
"`hk.transform_with_state`.")
return params
init_fn.__signature__ = sig_remove_state(inspect.signature(f.init))
def apply_fn(params, *args, **kwargs):
if "state" in kwargs:
raise TypeError(
"Haiku transform adds three arguments (params, state, rng) to apply. "
"If the functions you are transforming use the same names you must "
"pass them positionally (e.g. `f.apply(.., my_state)` and not by "
"name (e.g. `f.apply(.., state=my_state)`)")
out, state = f.apply(params, None, *args, **kwargs)
if state:
raise base.NonEmptyStateError(
"If your transformed function uses `hk.{get,set}_state` then use "
"`hk.transform_with_state`.")
return out
apply_fn.__signature__ = sig_remove_state(
sig_replace_leading_parameters(
inspect.signature(f.apply), 2, [
inspect.Parameter(
"params",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.Params])
]))
tie_in_original_fn(f, init_fn, apply_fn)
return Transformed(init=init_fn, apply=apply_fn)
def with_empty_state(f: Transformed) -> TransformedWithState:
"""Wraps a transformed tuple and passes empty state in/out.
The example below is equivalent to ``f = hk.transform_with_state(f)``:
>>> def f(x):
... mod = hk.Linear(10)
... return mod(x)
>>> f = hk.with_empty_state(hk.transform(f))
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.zeros([1, 1])
>>> params, state = f.init(rng, x)
>>> state
{}
>>> out, state = f.apply(params, state, rng, x)
>>> print(out)
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
>>> state
{}
Args:
f: A transformed function.
Returns:
A transformed function that does accepts and returns state.
"""
def init_fn(*args, **kwargs) -> tuple[hk.MutableParams, hk.MutableState]:
params = f.init(*args, **kwargs)
state = data_structures.to_haiku_dict({})
return params, state
init_fn.__signature__ = sig_add_state(inspect.signature(f.init))
def apply_fn(
params: hk.Params, state: Optional[hk.State], *args, **kwargs
) -> tuple[Any, hk.MutableState]:
del state
out = f.apply(params, *args, **kwargs)
state = data_structures.to_haiku_dict({})
return out, state
apply_fn.__signature__ = sig_add_state(sig_replace_leading_parameters(
inspect.signature(f.apply), 1, [
inspect.Parameter(
"param",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=hk.Params),
inspect.Parameter(
"state",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=hk.Params)
]))
tie_in_original_fn(f, init_fn, apply_fn)
return TransformedWithState(init=init_fn, apply=apply_fn)
# TODO(tomhennigan) Remove apply_rng.
def transform(f, *, apply_rng=True) -> Transformed:
"""Transforms a function using Haiku modules into a pair of pure functions.
For a function ``out = f(*a, **k)`` this function returns a pair of two pure
functions that call ``f(*a, **k)`` explicitly collecting and injecting
parameter values::
params = init(rng, *a, **k)
out = apply(params, rng, *a, **k)
Note that the ``rng`` argument is typically not required for ``apply`` and
passing ``None`` is accepted.
The first thing to do is to define a :class:`Module`. A module encapsulates
some parameters and a computation on those parameters:
>>> class MyModule(hk.Module):
... def __call__(self, x):
... w = hk.get_parameter("w", [], init=jnp.zeros)
... return x + w
Next, define some function that creates and applies modules. We use
:func:`transform` to transform that function into a pair of functions that
allow us to lift all the parameters out of the function (``f.init``) and
apply the function with a given set of parameters (``f.apply``):
>>> def f(x):
... a = MyModule()
... b = MyModule()
... return a(x) + b(x)
>>> f = hk.transform(f)
To get the initial state of the module call ``init`` with an example input:
>>> params = f.init(None, 1)
>>> params
{'my_module': {'w': ...Array(0., dtype=float32)},
'my_module_1': {'w': ...Array(0., dtype=float32)}}
You can then apply the function with the given parameters by calling
``apply`` (note that since we don't use Haiku's random number APIs to apply
our network we pass ``None`` as an RNG key):
>>> print(f.apply(params, None, 1))
2.0
It is expected that your program will at some point produce updated parameters
and you will want to re-apply ``apply``. You can do this by calling ``apply``
with different parameters:
>>> new_params = {"my_module": {"w": jnp.array(2.)},
... "my_module_1": {"w": jnp.array(3.)}}
>>> print(f.apply(new_params, None, 2))
9.0
If your transformed function needs to maintain internal state (e.g. moving
averages in batch norm) then see :func:`transform_with_state`.
Args:
f: A function closing over :class:`Module` instances.
apply_rng: In the process of being removed. Can only value `True`.
Returns:
A :class:`Transformed` tuple with ``init`` and ``apply`` pure functions.
"""
analytics.log_once("transform")
if not apply_rng:
raise ValueError(
"The apply_rng argument has been removed and hk.transform "
"now *always* applies an rng.\n"
"Replace hk.transform(..., apply_rng=False) with "
"hk.without_apply_rng(hk.transform(...)).\n"
"Replace hk.transform(..., apply_rng=True) with hk.transform(...).")
return without_state(transform_with_state(f))
COMPILED_FN_TYPES = (jax.interpreters.xla.xe.PjitFunction,
jax.interpreters.xla.xe.PmapFunction) # pytype: disable=name-error
def check_not_jax_transformed(f):
# TODO(tomhennigan): Consider `CompiledFunction = type(jax.jit(lambda: 0))`.
if isinstance(f, COMPILED_FN_TYPES):
raise ValueError("A common error with Haiku is to pass an already jit "
"(or pmap) decorated function into hk.transform (e.g. "
"`hk.transform(jax.jit(f)))`. You should instead jit/pmap "
"the init or apply function you get back from Haiku (e.g. "
"`jax.jit(hk.transform(f).apply)`).\n\n"
"This is because the function you pass into hk.transform "
"is not a pure function (because you don't explicitly "
"pass in/out params/rng). jit and pmap require you to "
"pass in a pure function (such as the init or apply "
"functions Haiku gives you back from hk.transform).")
def transform_with_state(f) -> TransformedWithState:
"""Transforms a function using Haiku modules into a pair of pure functions.
See :func:`transform` for general details on Haiku transformations.
For a function ``out = f(*a, **k)`` this function returns a pair of two pure
functions that call ``f(*a, **k)`` explicitly collecting and injecting
parameter values and state::
params, state = init(rng, *a, **k)
out, state = apply(params, state, rng, *a, **k)
Note that the ``rng`` argument is typically not required for ``apply`` and
passing ``None`` is accepted.
This function is equivalent to :func:`transform`, however it allows you to
maintain and update internal state (e.g. :class:`ExponentialMovingAverage` in
:class:`BatchNorm`) via :func:`get_state` and :func:`set_state`:
>>> def f():
... counter = hk.get_state("counter", shape=[], dtype=jnp.int32,
... init=jnp.zeros)
... hk.set_state("counter", counter + 1)
... return counter
>>> f = hk.transform_with_state(f)
>>> params, state = f.init(None)
>>> for _ in range(10):
... counter, state = f.apply(params, state, None)
>>> print(counter)
9
Args:
f: A function closing over :class:`Module` instances.
Returns:
A :class:`TransformedWithState` tuple with ``init`` and ``apply`` pure
functions.
"""
analytics.log_once("transform_with_state")
check_not_jax_transformed(f)
unexpected_tracer_hint = (
"An UnexpectedTracerError was raised while inside a Haiku transformed "
"function (see error above).\n"
"Hint: are you using a JAX transform or JAX control-flow function "
"(jax.vmap/jax.lax.scan/...) inside a Haiku transform? You might want to use "
"the Haiku version of the transform instead (hk.vmap/hk.scan/...).\n"
"See https://dm-haiku.readthedocs.io/en/latest/notebooks/transforms.html "
"on why you can't use JAX transforms inside a Haiku module.")
f_sig = inspect.signature(f)
def init_fn(
rng: Optional[Union[PRNGKey, int]],
*args,
**kwargs,
) -> tuple[hk.MutableParams, hk.MutableState]:
"""Initializes your function collecting parameters and state."""
rng = to_prng_sequence(rng, err_msg=INIT_RNG_ERROR)
with base.new_context(rng=rng) as ctx:
try:
f(*args, **kwargs)
except jax.errors.UnexpectedTracerError as e:
raise jax.errors.UnexpectedTracerError(unexpected_tracer_hint) from e
return ctx.collect_params(), ctx.collect_initial_state()
init_fn.__signature__ = inspect.Signature(
parameters=[
inspect.Parameter(
"rng",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[Union[PRNGKey, int]],
),
]
+ list(f_sig.parameters.values()),
return_annotation=tuple[hk.Params, hk.State],
__validate_parameters__=False,
)
def apply_fn(
params: Optional[hk.Params],
state: Optional[hk.State],
rng: Optional[Union[PRNGKey, int]],
*args,
**kwargs,
) -> tuple[Any, hk.MutableState]:
"""Applies your function injecting parameters and state."""
uses_state = state is not None
params = check_mapping("params", params)
state = check_mapping("state", state)
rng = to_prng_sequence(
rng,
err_msg=(APPLY_RNG_STATE_ERROR if uses_state else APPLY_RNG_ERROR))
with base.new_context(params=params, state=state, rng=rng) as ctx:
try:
out = f(*args, **kwargs)
except jax.errors.UnexpectedTracerError as e:
raise jax.errors.UnexpectedTracerError(unexpected_tracer_hint) from e
return out, ctx.collect_state()
apply_fn.__signature__ = sig_add_state(inspect.Signature(
parameters=[
inspect.Parameter("params", inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.Params]),
inspect.Parameter("state", inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.State]),
inspect.Parameter("rng", inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[Union[PRNGKey, int]]),
] + list(f_sig.parameters.values()),
return_annotation=f_sig.return_annotation,
__validate_parameters__=False
))
tie_in_original_fn(f, init_fn, apply_fn)
return TransformedWithState(init_fn, apply_fn)
def tie_in_original_fn(f, init_fn, apply_fn):
# EXPERIMENTAL: Expose the original function as a private attribute.
if isinstance(f, (Transformed, TransformedWithState)):
f = getattr(f.init, "_original_fn")
init_fn._original_fn = f # pylint: disable=protected-access
apply_fn._original_fn = f # pylint: disable=protected-access
def get_original_fn(f: Union[Transformed, TransformedWithState, Callable[...,
Any]]):
if isinstance(f, (Transformed, TransformedWithState)):
f = f.init
return getattr(f, "_original_fn")
def check_mapping(name: str, mapping: Optional[T]) -> T:
"""Cleans inputs to apply_fn, providing better errors."""
if mapping is None:
# Convert None to empty dict.
mapping = dict()
if not isinstance(mapping, Mapping):
if type(mapping).__name__ == "_DictWrapper":
# TensorFlow's checkpointing infrastructure replaces `dict` instances on
# `tf.Module`s with a type that is not a `Mapping` instance.
return mapping
raise TypeError(f"{name} argument does not appear valid. It should be a "
f"mapping but is of type {type(mapping)}. "
"For reference the parameters for apply are "
"`apply(params, rng, ...)`` for `hk.transform` and "
"`apply(params, state, rng, ...)` for "
"`hk.transform_with_state`.\n"
f"The argument was: {mapping!r}.")
return mapping
def running_init() -> bool:
"""Return True if running the ``init`` function of a Haiku transform.
In general you should not need to gate behaviour of your module based on
whether you are running ``init`` or ``apply``, but sometimes (e.g. when making
use of JAX control flow) this is required.
For example, if you want to use :func:`switch` to pick between experts, when
we run your init function we need to ensure that params/state for all experts
are created (unconditionally) but during apply we want to conditionally apply
(and perhaps update the internal state) of only one of our experts:
>>> experts = [hk.nets.ResNet50(10) for _ in range(5)]
>>> x = jnp.ones([1, 224, 224, 3])
>>> if hk.running_init():
... # During init unconditionally create params/state for all experts.
... for expert in experts:
... out = expert(x, is_training=True)
... else:
... # During apply conditionally apply (and update) only one expert.
... index = jax.random.randint(hk.next_rng_key(), [], 0, len(experts) - 1)
... out = hk.switch(index, experts, x)
Returns:
True if running ``init`` otherwise False.
"""
base.assert_context("running_init")
return not base.params_frozen()
|
dm-haiku-main
|
haiku/_src/transform.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Summarises Haiku modules."""
from collections.abc import Mapping, Sequence
import dataclasses
import functools
import pprint
from typing import Any, Callable, Optional, TypeVar, Union
from haiku._src import base
from haiku._src import data_structures
from haiku._src import module as module_lib
from haiku._src import transform
from haiku._src import utils
import jax
import jax.numpy as jnp
import numpy as np
import tabulate as tabulate_lib
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module_lib.Module
MethodContext = module_lib.MethodContext
intercept_methods = module_lib.intercept_methods
transform_with_state = transform.transform_with_state
Transformed = transform.Transformed
TransformedWithState = transform.TransformedWithState
# pylint: enable=invalid-name
del module_lib
T = TypeVar("T")
@dataclasses.dataclass(frozen=True)
class ArraySpec:
"""Shaped and sized specification of an array.
Attributes:
shape: Shape of the array.
dtype: DType of the array.
"""
shape: Sequence[int]
dtype: jnp.dtype
# Used so pformat(..) formats spec as: f32[1,2,3]
__repr__ = __str__ = utils.format_array
@property
def size(self): # pylint: disable=g-missing-from-attributes
return int(np.prod(self.shape))
@classmethod
def from_array(cls, array) -> "ArraySpec":
return ArraySpec(array.shape, array.dtype)
@dataclasses.dataclass(frozen=True)
class ModuleDetails:
"""Module and method related information.
Attributes:
module: A :class:`~haiku.Module` instance.
method_name: The method name that was invoked on the module.
params: The modules params dict with arrays converted to :class:`ArraySpec`.
state: The modules state dict with arrays converted to :class:`ArraySpec`.
"""
module: hk.Module
method_name: str
params: Mapping[str, ArraySpec]
state: Mapping[str, ArraySpec]
@classmethod
def of(cls, module: hk.Module, method_name: str) -> "ModuleDetails":
params = jax.tree_util.tree_map(ArraySpec.from_array, module.params_dict())
state = jax.tree_util.tree_map(ArraySpec.from_array, module.state_dict())
return ModuleDetails(module=module, method_name=method_name, params=params,
state=state)
@dataclasses.dataclass(frozen=True)
class MethodInvocation:
"""Record of a method being invoked on a given module.
Attributes:
module_details: Details about which module and method were invoked.
args_spec: Positional arguments to the method invocation with arrays
replaced by :class:`ArraySpec`.
kwargs_spec: Keyword arguments to the method invocation with arrays
replaced by :class:`ArraySpec`.
output_spec: Output of the method invocation with arrays replaced by
:class:`ArraySpec`.
context: Additional context information for the method call as provided
by :func:`~haiku.experimental.intercept_methods`.
call_stack: Stack of modules currently active while calling this module
method. For example if ``A`` calls ``B`` which calls ``C`` then the call
stack for ``C`` will be ``[B_DETAILS, A_DETAILS]``.
"""
module_details: ModuleDetails
args_spec: tuple[Any, ...]
kwargs_spec: dict[str, Any]
output_spec: Any # Actual: PyTree[Union[Any, ArraySpec]]
context: hk.MethodContext
call_stack: Sequence[ModuleDetails]
def get_call_stack() -> Sequence[ModuleDetails]:
frame = base.current_frame()
return tuple(map(
lambda s: ModuleDetails.of(s.module, s.method_name),
list(frame.module_stack)))
def to_spec(tree):
return jax.tree_util.tree_map(
lambda x: ArraySpec.from_array(x) if isinstance(x, jax.Array) else x,
tree)
IGNORED_METHODS = ("__init__", "params_dict", "state_dict")
def log_used_modules(
used_modules: list[MethodInvocation],
next_f: Callable[..., T],
args: tuple[Any, ...],
kwargs: dict[str, Any],
context: hk.MethodContext,
) -> T:
"""Method interceptor that logs used modules to the given list."""
if context.method_name in IGNORED_METHODS:
return next_f(*args, **kwargs)
idx = len(used_modules)
used_modules.append(None) # pytype: disable=container-type-mismatch
out = next_f(*args, **kwargs)
used_modules[idx] = MethodInvocation(
module_details=ModuleDetails.of(context.module, context.method_name),
args_spec=to_spec(args),
kwargs_spec=to_spec(kwargs),
output_spec=to_spec(out),
context=context,
call_stack=get_call_stack())
return out
def make_hk_transform_ignore_jax_transforms(f):
"""Wraps f such that if it was jit/pmap-ed Haiku won't realise."""
return lambda *a, **k: f(*a, **k) # pylint: disable=unnecessary-lambda
def eval_summary(
f: Union[Callable[..., Any], hk.Transformed, hk.TransformedWithState],
) -> Callable[..., Sequence[MethodInvocation]]:
"""Records module method calls performed by ``f``.
>>> f = lambda x: hk.nets.MLP([300, 100, 10])(x)
>>> x = jnp.ones([8, 28 * 28])
>>> for i in hk.experimental.eval_summary(f)(x):
... print("mod := {:14} | in := {} out := {}".format(
... i.module_details.module.module_name, i.args_spec[0], i.output_spec))
mod := mlp | in := f32[8,784] out := f32[8,10]
mod := mlp/~/linear_0 | in := f32[8,784] out := f32[8,300]
mod := mlp/~/linear_1 | in := f32[8,300] out := f32[8,100]
mod := mlp/~/linear_2 | in := f32[8,100] out := f32[8,10]
Args:
f: A function or transformed function to trace.
Returns:
A callable taking the same arguments as the provided function, but returning
a sequence of :class:`MethodInvocation` instances revealing the methods
called on each module when applying ``f``.
See Also:
:func:`tabulate`: Pretty prints a summary of the execution of a function.
"""
sidechannel = data_structures.ThreadLocalStack()
try:
f = transform.get_original_fn(f)
except AttributeError:
pass
def f_logged(*args, **kwargs):
used_modules = sidechannel.peek()
logging_interceptor = functools.partial(log_used_modules, used_modules)
with hk.intercept_methods(logging_interceptor):
f(*args, **kwargs)
# We know that we will only evaluate this function once and that inside
# eval_shape we will re-trace any jitted/pmap-ed code. This allows users to
# pass in jit/pmap decorated apply functions (e.g. train_step).
f = make_hk_transform_ignore_jax_transforms(f)
f_orig = hk.transform_with_state(f)
f_logged = hk.transform_with_state(f_logged)
def init_apply(*args, **kwargs):
init_rng, apply_rng = jax.random.split(jax.random.PRNGKey(42))
params, state = f_orig.init(init_rng, *args, **kwargs)
f_logged.apply(params, state, apply_rng, *args, **kwargs)
def wrapper(*args, **kwargs) -> Sequence[MethodInvocation]:
used_modules = []
with sidechannel(used_modules), jax.disable_jit():
jax.eval_shape(init_apply, *args, **kwargs)
return used_modules
return wrapper
# Support for writing out tables.
@dataclasses.dataclass(frozen=True)
class Filter:
f: Callable[[MethodInvocation], bool]
@dataclasses.dataclass(frozen=True)
class Column:
header: str
f: Callable[[MethodInvocation], str]
align: str = "left"
def owned_params(module: ModuleDetails) -> Mapping[str, ArraySpec]:
out = {}
for fq_name, param in module.params.items():
module_name, param_name = fq_name.rsplit("/", 1)
if module_name == module.module.module_name:
out[param_name] = param
return out
def format_owned_params(invocation: MethodInvocation) -> str:
"""Newline separated params sorted by size (desc) then name (asc)."""
params = owned_params(invocation.module_details)
size_desc_then_name = lambda i: (-np.prod(i[1].shape), i[0])
return "\n".join(f"{k}: {utils.format_array(v)}"
for k, v in sorted(params.items(), key=size_desc_then_name))
def format_input(invocation: MethodInvocation) -> str:
out = ", ".join(map(pprint.pformat, invocation.args_spec))
kwargs = invocation.kwargs_spec
if kwargs:
out += "\n"
out += ", ".join(f"{k}={pprint.pformat(v)}" for k, v in kwargs.items())
return out
def format_output(invocation: MethodInvocation) -> str:
return pprint.pformat(invocation.output_spec)
def format_call_stack(invocation: MethodInvocation) -> str:
"""Formats the name and call_stack of a given module as a string."""
def format_entry(state: ModuleDetails) -> str:
class_name = type(state.module).__name__
module_name = state.module.module_name
method_name = state.method_name
if method_name == "__call__":
return f"{module_name} ({class_name})"
else:
return f"{module_name} ({class_name}.{method_name})"
return "\n └ ".join(map(format_entry, invocation.call_stack))
all_filters = {
"has_output": Filter(lambda r: r.output_spec is not None),
"has_params": Filter(lambda r: bool(r.module_details.params)),
}
all_columns = {
"module": Column("Module", format_call_stack),
"config": Column("Config", lambda r: repr(r.module_details.module)),
"owned_params": Column("Module params", format_owned_params),
"input": Column("Input", format_input),
"output": Column("Output", format_output),
"params_size": Column(
"Param count",
lambda r: f"{utils.tree_size(r.module_details.params):,}",
"right",
),
"params_bytes": Column(
"Param bytes",
lambda r: utils.format_bytes(utils.tree_bytes(r.module_details.params)),
"right",
),
}
DEFAULT_COLUMNS = ("module", "config", "owned_params", "input", "output",
"params_size", "params_bytes")
DEFAULT_FILTERS = ("has_output",)
def tabulate(
f: Union[Callable[..., Any], hk.Transformed, hk.TransformedWithState],
*,
columns: Optional[Sequence[str]] = DEFAULT_COLUMNS,
filters: Optional[Sequence[str]] = DEFAULT_FILTERS,
tabulate_kwargs={"tablefmt": "grid"},
) -> Callable[..., str]:
# pylint: disable=line-too-long
"""Produces a summarised view of the execution of ``f``.
>>> def f(x):
... return hk.nets.MLP([300, 100, 10])(x)
>>> x = jnp.ones([8, 28 * 28])
>>> f = hk.transform(f)
>>> print(hk.experimental.tabulate(f)(x))
+-------------------------+------------------------------------------+-----------------+------------+------------+---------------+---------------+
| Module | Config | Module params | Input | Output | Param count | Param bytes |
+=========================+==========================================+=================+============+============+===============+===============+
| mlp (MLP) | MLP(output_sizes=[300, 100, 10]) | | f32[8,784] | f32[8,10] | 266,610 | 1.07 MB |
+-------------------------+------------------------------------------+-----------------+------------+------------+---------------+---------------+
| mlp/~/linear_0 (Linear) | Linear(output_size=300, name='linear_0') | w: f32[784,300] | f32[8,784] | f32[8,300] | 235,500 | 942.00 KB |
| └ mlp (MLP) | | b: f32[300] | | | | |
+-------------------------+------------------------------------------+-----------------+------------+------------+---------------+---------------+
| mlp/~/linear_1 (Linear) | Linear(output_size=100, name='linear_1') | w: f32[300,100] | f32[8,300] | f32[8,100] | 30,100 | 120.40 KB |
| └ mlp (MLP) | | b: f32[100] | | | | |
+-------------------------+------------------------------------------+-----------------+------------+------------+---------------+---------------+
| mlp/~/linear_2 (Linear) | Linear(output_size=10, name='linear_2') | w: f32[100,10] | f32[8,100] | f32[8,10] | 1,010 | 4.04 KB |
| └ mlp (MLP) | | b: f32[10] | | | | |
+-------------------------+------------------------------------------+-----------------+------------+------------+---------------+---------------+
Possible values for ``columns``:
* ``module``: Displays module and method name.
* ``config``: Displays the constructor arguments used for the module.
* ``owned_params``: Displays parameters directly owned by this module.
* ``input``: Displays module inputs.
* ``output``: Displays module output.
* ``params_size``: Displays the number of parameters
* ``params_bytes``: Displays parameter size in bytes.
Possible values for ``filters``:
* ``has_output``: Only include methods returning a value other than ``None``.
* ``has_params``: Removes methods from modules that do not have parameters.
Args:
f: A function to transform OR one of the init/apply functions from Haiku
or the result of :func:`transform` or :func:`transform_with_state`.
columns: A list of column names to enable.
filters: A list of filters to apply to remove certain module methods.
tabulate_kwargs: Keyword arguments to pass to ``tabulate.tabulate(..)``.
Returns:
A callable that takes the same arguments as ``f`` but returns a string
summarising the modules used during the execution of ``f``.
See Also:
:func:`eval_summary`: Raw data used to generate this table.
"""
# pylint: enable=line-too-long
f = eval_summary(f)
if columns is None:
columns = DEFAULT_COLUMNS
else:
invalid = [c for c in columns if c not in all_columns]
if invalid:
raise ValueError(
f"Invalid column(s) {invalid}, valid columns {list(all_columns)}")
if filters is None:
filters = DEFAULT_FILTERS
else:
invalid = [f for f in filters if f not in all_filters]
if invalid:
raise ValueError(
f"Invalid filter(s) {invalid}, valid filters {list(all_filters)}")
columns = [all_columns[c] for c in columns]
filters = [all_filters[f] for f in filters]
def generate_summary(*args, **kwargs) -> str:
"""Generates a string summary of the given Haiku function."""
rows = []
for method_invocation in f(*args, **kwargs):
if not all(filter.f(method_invocation) for filter in filters):
continue
row = [column.f(method_invocation) for column in columns]
rows.append(row)
if rows:
headers = [col.header for col in columns]
colalign = [col.align for col in columns]
return tabulate_lib.tabulate(
rows, headers=headers, colalign=colalign, **tabulate_kwargs)
else:
return "No modules matching filters."
return generate_summary
|
dm-haiku-main
|
haiku/_src/summarise.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures used by Haiku."""
# pylint: disable=unidiomatic-typecheck
# `isinstance(x, Mapping)` is super expensive, so we avoid it where possible
# since we expect constructing some of these types to be on the critical path
# for users.
import collections
from collections.abc import Iterator, Mapping, MutableMapping, Sequence
import contextlib
import pprint
import threading
from typing import Any, Callable, Deque, Generic, NamedTuple, Optional, TypeVar, Union
from haiku._src import config
from haiku._src import utils
import jax
K = TypeVar("K")
V = TypeVar("V")
T = TypeVar("T")
U = TypeVar("U")
class Stack(Generic[T]):
"""Stack supporting push/pop/peek."""
def __init__(self):
self._storage: Deque[T] = collections.deque()
def __len__(self) -> int:
return len(self._storage)
def __iter__(self) -> Iterator[T]:
return iter(reversed(self._storage))
def clone(self):
return self.map(lambda v: v)
def map(self, fn: Callable[[T], U]) -> "Stack[U]":
s = type(self)()
for item in self._storage:
s.push(fn(item))
return s
def pushleft(self, elem: T):
self._storage.appendleft(elem)
def push(self, elem: T):
self._storage.append(elem)
def popleft(self) -> T:
return self._storage.popleft()
def pop(self) -> T:
return self._storage.pop()
def peek(self, depth=-1) -> T:
return self._storage[depth]
@contextlib.contextmanager
def __call__(self, elem: T) -> Iterator[None]: # pytype: disable=invalid-annotation
self.push(elem)
try:
yield
finally:
assert self.pop() is elem
class ThreadLocalStack(Stack[T], threading.local):
"""Thread-local stack."""
class KeysOnlyKeysView(collections.abc.KeysView):
"""KeysView that does not print values when repr'ing."""
def __init__(self, mapping):
super().__init__(mapping) # pytype: disable=wrong-arg-count
self._mapping = mapping
def __repr__(self):
return f"{type(self).__name__}({list(self._mapping)!r})"
__str__ = __repr__
def to_immutable_dict(mapping: Mapping[K, V]) -> Mapping[K, V]:
"""Returns an immutable copy of the given mapping."""
if type(mapping) is FlatMap:
return mapping
items = []
for key, value in mapping.items():
value_type = type(value)
if value_type is dict:
value = to_immutable_dict(value)
items.append((key, value))
return FlatMap(items)
# TODO(tomhennigan) Better types here (Mapping[K, V]) -> MutableMapping[K, V]?
def to_mutable_dict(mapping):
"""Turns an immutable FlatMapping into a mutable dict."""
out = {}
for key, value in mapping.items():
value_type = type(value)
if value_type is FlatMap:
value = to_mutable_dict(value)
out[key] = value
return out
def to_haiku_dict(structure: Mapping[K, V]) -> MutableMapping[K, V]:
"""Returns a copy of the given two level structure.
Uses the same mapping type as Haiku will return from ``init`` or ``apply``
functions.
Args:
structure: A two level mapping to copy.
Returns:
A new two level mapping with the same contents as the input.
"""
return to_dict(structure)
def _copy_structure(tree):
"""Returns a copy of the given structure."""
leaves, treedef = jax.tree_util.tree_flatten(tree)
return jax.tree_util.tree_unflatten(treedef, leaves)
def _to_dict_recurse(value: Any):
if isinstance(value, Mapping):
return {k: _to_dict_recurse(v) for k, v in value.items()}
else:
return _copy_structure(value)
def to_dict(mapping: Mapping[str, Mapping[str, T]]) -> dict[str, dict[str, T]]:
"""Returns a ``dict`` copy of the given two level structure.
This method is guaranteed to return a copy of the input structure (e.g. even
if the input is already a ``dict``).
Args:
mapping: A two level mapping as returned by ``init`` functions of Haiku
transforms.
Returns:
A new two level mapping with the same contents as the input.
"""
return _to_dict_recurse(mapping)
def _repr_item(k, v):
k = repr(k) + ": "
v = pprint.pformat(v)
return k + utils.indent(len(k), v).lstrip()
class FlatComponents(NamedTuple):
leaves: Sequence[Any]
structure: jax.tree_util.PyTreeDef
class FlatMap(Mapping[K, V]):
"""Immutable mapping with O(1) flatten and O(n) unflatten operation.
Warning: this type is only efficient when used with ``jax.tree_util.tree_*``.
When used with ``tree.*`` it has similar performance to ``dict``.
Note that to prevent common errors immutable shims are returned for any
nested mappings.
"""
__slots__ = ("_structure", "_leaves", "_mapping")
def __init__(self, *args, **kwargs):
"""Accepts FlatComponents or the same arguments as `dict`."""
if not kwargs and len(args) == 1 and type(args[0]) is FlatComponents:
leaves, structure = args[0]
mapping = None
# When unflattening we cannot assume that the leaves are not pytrees (for
# example: `jax.tree_util.tree_map(list, my_map)` would pass a list of
# lists in as leaves).
if not jax.tree_util.all_leaves(leaves):
mapping = jax.tree_util.tree_unflatten(structure, leaves)
leaves, structure = jax.tree_util.tree_flatten(mapping)
else:
mapping = dict(*args, **kwargs)
leaves, structure = jax.tree_util.tree_flatten(mapping)
self._structure = structure
self._leaves = tuple(leaves)
self._mapping = mapping
def _to_mapping(self) -> Mapping[K, V]:
if self._mapping is None:
self._mapping = jax.tree_util.tree_unflatten(
self._structure, self._leaves)
return self._mapping
def keys(self):
return KeysOnlyKeysView(self._to_mapping())
def values(self):
return self._to_mapping().values()
def items(self):
return self._to_mapping().items()
def __eq__(self, other):
if other is None:
return False
t = type(other)
if t is FlatMap:
other = other._to_mapping()
return self._to_mapping() == other
def __hash__(self):
return hash((self._structure, self._leaves))
def __getitem__(self, key: K) -> V:
return self._to_mapping()[key]
def __getattr__(self, key):
raise AttributeError(
f"`x.{key}` is not supported on FlatMapping, use `x['{key}']` instead.")
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._to_mapping())
def __str__(self):
single_line = "{}({{{}}})".format(
type(self).__name__, ", ".join(f"{k!r}: {v!r}" for k, v in self.items())
)
if len(single_line) <= 80:
return single_line
return "{}({{\n{},\n}})".format(
type(self).__name__,
utils.indent(2, ",\n".join(_repr_item(k, v) for k, v in self.items())))
__repr__ = __str__
def __reduce__(self):
# NOTE: Using FlatMapping (not FlatMap) here for backwards compatibility
# with old pickles.
return FlatMapping, (self._to_mapping(),)
# Workaround for https://github.com/python/typing/issues/498.
__copy__ = None
jax.tree_util.register_pytree_node(
FlatMap,
lambda s: (s._leaves, s._structure), # pylint: disable=protected-access
lambda treedef, leaves: FlatMap(FlatComponents(leaves, treedef)))
# This is only needed because some naughty people reach in to Haiku internals
# and use `isinstance(x, haiku._src.data_structures.FlatMapping` (which was
# renamed to FlatMap).
# TODO(tomhennigan): If to_immutable_dict is removed this metaclass can go too.
class FlatMappingMeta(type(FlatMap)):
def __instancecheck__(cls, instance) -> bool:
return isinstance(instance, FlatMap)
class FlatMapping(FlatMap, metaclass=FlatMappingMeta):
"""Only called from old checkpoints."""
def __new__(cls, data):
if config.get_config().restore_flatmap:
return to_immutable_dict(data)
else:
return to_haiku_dict(data)
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
del args, kwargs
assert False, "This should never happen."
# _ _ _
# __| | ___ _ __ _ __ ___ ___ __ _| |_ ___ __| |
# / _` |/ _ \ '_ \| '__/ _ \/ __/ _` | __/ _ \/ _` |
# | (_| | __/ |_) | | | __/ (_| (_| | || __/ (_| |
# \__,_|\___| .__/|_| \___|\___\__,_|\__\___|\__,_|
# |_|
# The classes below are untested and maintained for backwards compatibility with
# old checkpoints.
class frozendict(Mapping[K, V]): # pylint: disable=invalid-name
"""Immutable mapping from keys to values."""
__slots__ = ("_storage", "_keys", "_hash")
def __init__(self, *args, **kwargs):
self._storage = dict(*args, **kwargs)
self._keys = tuple(sorted(self._storage))
# Dict values aren't necessarily hashable so we just use the keys.
self._hash = hash(self._keys)
def keys(self):
return KeysOnlyKeysView(self)
def __iter__(self):
return iter(self._keys)
def __len__(self):
return len(self._storage)
def __getattr__(self, key):
raise AttributeError(
f"x.{key} is not supported on frozendict, use x['{key}'] instead.")
def get(self, key: K, default: Optional[T] = None) -> Union[V, Optional[T]]:
return self._storage.get(key, default)
def __getitem__(self, key: K) -> V:
return self._storage[key]
def __repr__(self):
single_line = "{}({{{}}})".format(
type(self).__name__,
", ".join(f"{k!r}: {self._storage[k]!r}" for k in self._keys),
)
if len(single_line) <= 80:
return single_line
return "{}({{\n{},\n}})".format(
type(self).__name__,
utils.indent(
2, ",\n".join(_repr_item(k, self._storage[k]) for k in self._keys)))
__str__ = __repr__
def __eq__(self, other):
if isinstance(other, frozendict):
return self._storage == other._storage # pylint: disable=protected-access
elif isinstance(other, dict):
# dict is not generally hashable so this comparison is safe.
return self._storage == other
else:
return False
def __hash__(self):
return self._hash
def __reduce__(self):
return (frozendict, (self._storage,))
jax.tree_util.register_pytree_node(
frozendict,
lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: frozendict(zip(k, xs)))
|
dm-haiku-main
|
haiku/_src/data_structures.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.data_structures."""
import collections
import copy
import pickle
import threading
from absl.testing import absltest
from absl.testing import parameterized
import cloudpickle
import dill
from haiku._src import data_structures
import jax
import tree
frozendict = data_structures.frozendict
FlatMap = data_structures.FlatMap
all_picklers = parameterized.parameters(cloudpickle, dill, pickle)
class StackTest(absltest.TestCase):
cls = data_structures.Stack
def test_len(self):
s = self.cls()
self.assertEmpty(s)
for i in range(10):
self.assertLen(s, i)
s.push(None)
for i in range(10):
self.assertLen(s, 10 - i)
s.pop()
self.assertEmpty(s)
def test_push_peek_pop(self):
s = self.cls()
for i in range(3):
s.push(i)
self.assertEqual(s.peek(), 2)
self.assertEqual(s.peek(-2), 1)
self.assertEqual(s.peek(-3), 0)
for i in range(3):
self.assertEqual(s.peek(), 2 - i)
self.assertEqual(s.pop(), 2 - i)
self.assertEmpty(s)
def test_pushleft_peek_pop(self):
s = self.cls()
for i in range(3):
s.pushleft(i)
self.assertEqual(s.peek(), 0)
self.assertEqual(s.peek(-2), 1)
self.assertEqual(s.peek(-3), 2)
for i in range(3):
self.assertEqual(s.peek(), i)
self.assertEqual(s.pop(), i)
self.assertEmpty(s)
def test_popleft(self):
s = self.cls()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
self.assertEqual(s.popleft(), 1)
self.assertEqual(s.pop(), 4)
self.assertEqual(s.popleft(), 2)
self.assertEqual(s.pop(), 3)
def test_call(self):
s = self.cls()
with s(0):
self.assertEqual(s.peek(), 0)
with s(1):
self.assertEqual(s.peek(), 1)
self.assertEqual(s.peek(), 0)
self.assertEmpty(s)
def test_map(self):
s1 = self.cls()
s1.push(1)
s1.push(2)
s2 = s1.map(lambda x: x + 2)
self.assertIsNot(s1, s2)
self.assertEqual(s1.pop(), 2)
self.assertEqual(s1.pop(), 1)
self.assertEqual(s2.pop(), 4)
self.assertEqual(s2.pop(), 3)
def test_clone(self):
s1 = self.cls()
for i in range(5):
s1.push(i)
s2 = s1.clone()
assert s1 is not s2
self.assertEqual([s2.pop() for _ in range(len(s2))], [4, 3, 2, 1, 0])
self.assertEmpty(s2)
self.assertEqual([s1.pop() for _ in range(len(s1))], [4, 3, 2, 1, 0])
self.assertEmpty(s2)
def test_exception_safe(self):
s = self.cls()
o1 = object()
o2 = object()
with s(o1):
with self.assertRaisesRegex(ValueError, "expected"):
with s(o2):
raise ValueError("expected")
self.assertIs(s.peek(), o1)
self.assertEmpty(s)
class ThreadLocalStackTest(StackTest):
cls = data_structures.ThreadLocalStack
def test_stack_per_thread(self):
s = self.cls()
self.assertEmpty(s)
s.push(42)
s_len_second_thread = [None]
def second_thread():
self.assertEmpty(s)
s.push(666)
s.push(777)
s_len_second_thread[0] = len(s)
t = threading.Thread(target=second_thread)
t.start()
t.join()
self.assertEqual(s_len_second_thread[0], 2)
self.assertEqual(s.pop(), 42)
self.assertEmpty(s)
class FlatMappingTest(parameterized.TestCase):
def test_init_from_dict(self):
o = dict(a=1, b=2)
f = FlatMap(o)
self.assertEqual(o, f)
o["a"] = 2
self.assertEqual(f["a"], 1)
self.assertNotEqual(o, f)
def test_getattr(self):
f = FlatMap(dict(a=1, b=2))
with self.assertRaisesRegex(AttributeError, "not supported"):
_ = f.a
def test_setattr(self):
f = FlatMap(dict(a=1))
with self.assertRaises(AttributeError):
# Existing attr.
f.a = 4 # pytype: disable=not-writable
with self.assertRaises(AttributeError):
# New attr.
f.c = 4 # pytype: disable=not-writable
def test_getitem(self):
f = FlatMap(dict(a=1, b=2))
self.assertEqual(f["a"], 1)
self.assertEqual(f["b"], 2)
def test_getitem_missing(self):
f = FlatMap({})
with self.assertRaises(KeyError):
f["~"] # pylint: disable=pointless-statement
def test_getitem_missing_nested(self):
f = FlatMap({"~": {}})
with self.assertRaises(KeyError):
f["~"]["missing"] # pylint: disable=pointless-statement
def test_getitem_nested_immutable(self):
f = data_structures.to_immutable_dict({"a": {"b": "c"}})
with self.assertRaisesRegex(TypeError, "does not support item assignment"):
f["a"]["b"] = "d"
def test_get(self):
f = FlatMap(dict(a=1, b=2))
self.assertEqual(f.get("a"), 1)
self.assertEqual(f.get("b"), 2)
self.assertIsNone(f.get("c"))
self.assertEqual(f.get("d", f), f)
@parameterized.parameters(jax.tree_util.tree_map, tree.map_structure)
def test_tree_map(self, tree_map):
f = FlatMap(dict(a=1, b=dict(c=2)))
p = tree_map("v: {}".format, f)
self.assertEqual(type(p), FlatMap)
self.assertEqual(p._to_mapping(), {"a": "v: 1", "b": {"c": "v: 2"}})
def test_eq_hash(self):
a = FlatMap(dict(a=1, b=2))
b = FlatMap(dict(a=1, b=2))
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
@parameterized.named_parameters(
("copy", copy.copy),
("deepcopy", copy.deepcopy),
("pickle", lambda v: pickle.loads(pickle.dumps(v)),),
("cloudpickle", lambda v: cloudpickle.loads(cloudpickle.dumps(v)),),
("dill", lambda v: dill.loads(dill.dumps(v)),),
)
def test_copy(self, clone):
before = data_structures.to_immutable_dict(dict(a=dict(b=1, c=2)))
after = clone(before)
self.assertIsNot(before, after)
self.assertEqual(before, after)
self.assertEqual(after, {"a": {"b": 1, "c": 2}})
before_dict = data_structures.to_haiku_dict(before)
jax.tree_util.tree_map(self.assertEqual, before_dict, after)
@all_picklers
def test_pickle_roundtrip(self, pickler):
x = FlatMap({})
y = pickler.loads(pickler.dumps(x))
self.assertType(y, dict)
def test_golden_pickle_load(self):
loaded = self._pickle_load_golden("empty")
self.assertType(loaded, dict)
self.assertEmpty(loaded)
def assertType(self, obj, cls):
self.assertEqual(type(obj), cls)
def _pickle_load_golden(self, file):
with open(f"haiku/_src/testdata/{file}.pkl", "rb") as fp:
return pickle.load(fp)
def test_golden_pickle_nested(self):
loaded = self._pickle_load_golden("nested")
self.assertType(loaded, dict)
self.assertType(loaded["a"], dict)
self.assertType(loaded["a"]["b"], dict)
self.assertEqual(loaded, {"a": {"b": {"c": 1}}})
def test_flatmapping_isinstance(self):
# Note: This should not work (FlatMapping extends FlatMap not the other way
# around) however it is needed to support some naughty users who reached in
# to Haiku internals and depend on the name `data_structures.FlatMapping`.
o = FlatMap({})
self.assertIsInstance(o, data_structures.FlatMapping)
def test_flatmapping_init(self):
# NOTE: FlatMapping is a shim only for `pickle.loads`. The actual immutable
# map type in Haiku is FlatMap.
o = data_structures.FlatMapping({})
self.assertEqual(type(o), dict)
def test_deepcopy_still_immutable(self):
before = FlatMap(dict(a=[1, 2, 3]))
after = copy.deepcopy(before)
with self.assertRaises(TypeError):
before["a"] = [3, 2, 1] # pytype: disable=unsupported-operands
self.assertEqual(before["a"], [1, 2, 3])
self.assertEqual(after["a"], [1, 2, 3])
def test_keys(self):
d = FlatMap({"key1": "value", "key2": "value2"})
self.assertEqual(str(d.keys()), "KeysOnlyKeysView(['key1', 'key2'])")
self.assertEqual(repr(d.keys()), "KeysOnlyKeysView(['key1', 'key2'])")
def test_init(self):
# Init from dict
d = {"foo": {"a": 1}, "bar": 2}
f = FlatMap(d)
self.assertEqual(f, d)
# Init from FlatMap
f2 = FlatMap(f)
self.assertEqual(f, f2)
# Init from dict with nested FlatMap
inner = FlatMap({"a": 1})
outer = {"foo": inner, "bar": 2}
nested_flatmapping = FlatMap(outer)
self.assertEqual(outer, nested_flatmapping)
# Init from flat structures
values, treedef = jax.tree_util.tree_flatten(f)
self.assertEqual(
FlatMap(data_structures.FlatComponents(values, treedef)), f)
def test_get_item(self):
f_map = FlatMap(
{"foo": {"b": [1], "d": {"e": 2}}, "bar": (1,)})
self.assertEqual(f_map["foo"], {"b": [1], "d": {"e": 2}})
self.assertEqual(f_map["bar"], (1,))
with self.assertRaises(KeyError):
_ = f_map["b"]
def test_items(self):
f_map = FlatMap(
{"foo": {"b": {"c": 1}, "d": {"e": 2}}, "bar": {"c": 1}})
items = list(f_map.items())
self.assertEqual(items[0], ("foo", {"b": {"c": 1}, "d": {"e": 2}}))
self.assertEqual(items[1], ("bar", {"c": 1}))
self.assertEqual(items, list(zip(f_map.keys(), f_map.values())))
def test_tree_functions(self):
f = FlatMap(
{"foo": {"b": {"c": 1}, "d": 2}, "bar": {"c": 1}})
m = jax.tree_util.tree_map(lambda x: x + 1, f)
self.assertEqual(type(m), FlatMap)
self.assertEqual(m, {"foo": {"b": {"c": 2}, "d": 3}, "bar": {"c": 2}})
mm = jax.tree_util.tree_map(lambda x, y: x + y, f, f)
self.assertEqual(type(mm), FlatMap)
self.assertEqual(mm, {"foo": {"b": {"c": 2}, "d": 4}, "bar": {"c": 2}})
leaves, treedef = jax.tree_util.tree_flatten(f)
self.assertEqual(leaves, [1, 1, 2])
uf = jax.tree_util.tree_unflatten(treedef, leaves)
self.assertEqual(type(f), FlatMap)
self.assertEqual(f, uf)
def test_flatten_nested_struct(self):
d = {"foo": {"bar": [1, 2, 3]},
"baz": {"bat": [4, 5, 6],
"qux": [7, [8, 9]]}}
f = FlatMap(d)
leaves, treedef = jax.tree_util.tree_flatten(f)
self.assertEqual([4, 5, 6, 7, 8, 9, 1, 2, 3], leaves)
g = jax.tree_util.tree_unflatten(treedef, leaves)
self.assertEqual(g, f)
self.assertEqual(g, d)
def test_nested_sequence(self):
f_map = FlatMap(
{"foo": [1, 2], "bar": [{"a": 1}, 2]})
leaves, _ = jax.tree_util.tree_flatten(f_map)
self.assertEqual(leaves, [1, 2, 1, 2])
self.assertEqual(f_map["foo"][0], 1)
@parameterized.named_parameters(("tuple", tuple), ("list", list),)
def test_different_sequence_types(self, type_of_sequence):
f_map = FlatMap(
{"foo": type_of_sequence((1, 2)),
"bar": type_of_sequence((3, {"b": 4}))})
leaves, _ = jax.tree_util.tree_flatten(f_map)
self.assertEqual(leaves, [3, 4, 1, 2])
self.assertEqual(f_map["foo"][0], 1)
self.assertEqual(f_map["bar"][1]["b"], 4)
def test_replace_leaves_with_nodes_in_map(self):
f = FlatMap({"foo": 1, "bar": 2})
f_nested = jax.tree_util.tree_map(lambda x: {"a": (x, x)}, f)
leaves, _ = jax.tree_util.tree_flatten(f_nested)
self.assertEqual(leaves, [2, 2, 1, 1])
def test_frozen_builtins_jax_compatibility(self):
f = FlatMap({"foo": [3, 2], "bar": {"a": 3}})
mapped_frozen_list = jax.tree_util.tree_map(lambda x: x+1, f["foo"])
self.assertEqual(mapped_frozen_list[0], 4)
mapped_frozen_dict = jax.tree_util.tree_map(lambda x: x+1, f["bar"])
self.assertEqual(mapped_frozen_dict["a"], 4)
def test_tree_transpose(self):
outerdef = jax.tree_util.tree_structure(FlatMap({"a": 1, "b": 2}))
innerdef = jax.tree_util.tree_structure([1, 2])
self.assertEqual(
[FlatMap({"a": 3, "b": 5}), FlatMap({"a": 4, "b": 6})],
jax.tree_util.tree_transpose(
outerdef, innerdef, FlatMap({"a": [3, 4], "b": [5, 6]})))
class DataStructuresTest(parameterized.TestCase):
@parameterized.parameters(dict, frozendict, FlatMap,
lambda x: collections.defaultdict(object, x))
def test_to_dict(self, cls):
mapping_in = cls(
{f"a{i}": cls({f"b{j}": 0 for j in range(2)}) for i in range(10)})
mapping_out = data_structures.to_dict(mapping_in)
self.assertEqual(mapping_in, mapping_out)
self.assertIs(type(mapping_out), dict)
self.assertIsNot(mapping_in, mapping_out)
for key in mapping_in:
self.assertIs(type(mapping_out[key]), dict)
self.assertIsNot(mapping_in[key], mapping_out[key])
def test_to_dict_copies_value_structure(self):
v = [1, 2, 3]
mapping_in = {"m": {"w": v}}
mapping_out = data_structures.to_dict(mapping_in)
self.assertEqual(mapping_in, mapping_out)
self.assertIsNot(mapping_in["m"]["w"], mapping_out["m"]["w"])
v.append(4)
self.assertNotEqual(mapping_in, mapping_out)
def test_to_dict_recursively_changes_leaf_types(self):
mapping_in = {"m": {"w": FlatMap(a=FlatMap(b=0))}}
mapping_out = data_structures.to_dict(mapping_in)
self.assertEqual(type(mapping_out["m"]["w"]), dict)
self.assertEqual(type(mapping_out["m"]["w"]["a"]), dict)
def test_to_immutable_dict(self):
before = {"a": {"b": 1, "c": 2}}
after = data_structures.to_immutable_dict(before)
self.assertEqual(before, after)
self.assertEqual(type(after), FlatMap)
self.assertEqual(type(after["a"]), FlatMap)
def test_to_mutable_dict(self):
before = FlatMap({"a": {"b": 1, "c": 2}})
after = data_structures.to_mutable_dict(before)
self.assertEqual(before, after)
self.assertEqual(type(after), dict)
self.assertEqual(type(after["a"]), dict)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/data_structures_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.rms_norm."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import initializers
from haiku._src import rms_norm
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class RMSNormTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_connection(self):
data = jnp.zeros([2, 3, 4, 5])
norms = []
for axis in range(4):
norms.append(rms_norm.RMSNorm(axis=axis)(data))
norms.append(rms_norm.RMSNorm(axis=slice(1, None))(data))
norms.append(rms_norm.RMSNorm(axis=slice(2, None))(data))
norms.append(rms_norm.RMSNorm(axis=slice(1, -1))(data))
norms.append(rms_norm.RMSNorm(axis=-1, param_axis=(-1,))(data))
norms.append(rms_norm.RMSNorm(axis=-1, param_axis=(-2, -1))(data))
norms.append(rms_norm.RMSNorm(axis=-1, param_axis=(0, 1))(data))
norms.append(rms_norm.RMSNorm(axis=-1, param_axis=(0, 1, 2, 3))(data))
return norms
def test_bf16(self):
"""For all configurations, ensure bf16 outputs from bf16 inputs."""
def f(x):
ln = rms_norm.RMSNorm(axis=-1)
return ln(x)
fwd = transform.transform(f)
data = jnp.zeros([2, 3, 4, 5], dtype=jnp.bfloat16)
params = fwd.init(jax.random.PRNGKey(428), data)
bf16_params = jax.tree_util.tree_map(
lambda t: t.astype(jnp.bfloat16), params)
self.assertEqual(fwd.apply(bf16_params, None, data).dtype, jnp.bfloat16)
@test_utils.transform_and_run
def test_simple_case(self):
layer = rms_norm.RMSNorm([1, 2], eps=0.0)
inputs = np.full(shape=[2, 3, 3, 5], fill_value=2.0)
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 1.0)
@test_utils.transform_and_run
def test_simple_case_with_scale(self):
layer = rms_norm.RMSNorm(
axis=[1, 2], eps=0.0, scale_init=initializers.Constant(0.5))
inputs = np.full(shape=[2, 3, 3, 5], fill_value=2.0)
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 0.5)
@test_utils.transform_and_run
def test_zero_inputs(self):
layer = rms_norm.RMSNorm([1, 2])
inputs = np.zeros([2, 3, 3, 5])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 0.0)
@parameterized.named_parameters(("String", "foo"), ("ListString", ["foo"]))
@test_utils.transform_and_run
def test_invalid_axis(self, axis):
with self.assertRaisesRegex(
ValueError, "`axis` should be an int, slice or iterable of ints."):
rms_norm.RMSNorm(axis)
@test_utils.transform_and_run
def test_slice_axis(self):
slice_layer = rms_norm.RMSNorm(slice(1, -1))
axis_layer = rms_norm.RMSNorm((1, 2))
inputs = np.random.uniform(size=[3, 4, 4, 5], low=0, high=10)
slice_outputs = slice_layer(inputs)
axis_outputs = axis_layer(inputs)
np.testing.assert_array_equal(slice_outputs, axis_outputs)
@test_utils.transform_and_run
def test_simple_case_without_scale(self):
layer = rms_norm.RMSNorm(
axis=[1, 2], eps=0.0, create_scale=False)
inputs = np.full(shape=[2, 3, 3, 5], fill_value=2.0)
_ = layer(inputs)
assert "scale" not in layer.params_dict()
@parameterized.parameters(
(None, (6,)),
(-1, (6,)),
(-2, (1, 1, 5, 1)),
(-3, (1, 4, 1, 1)),
(-4, (3, 1, 1, 1)),
(0, (3, 1, 1, 1)),
(1, (1, 4, 1, 1)),
(2, (1, 1, 5, 1)),
(3, (6,)),
(slice(1, 3), (1, 4, 5, 1)),
(slice(0, 3, 2), (3, 1, 5, 1)),
(slice(-1, 0, -1), (1, 4, 5, 6)),
)
@test_utils.transform_and_run
def test_param_axis_sets_param_shape(self, param_axis, param_shape):
ln = rms_norm.RMSNorm(axis=-1, param_axis=param_axis)
ln(jnp.ones([3, 4, 5, 6]))
self.assertEqual(ln.params_dict()["rms_norm/scale"].shape, param_shape)
@parameterized.parameters(
((0, 1, 2), (3, 4, 5, 1)),
((-4, -2, -3), (3, 4, 5, 1)),
((0, 1), (3, 4, 1, 1)),
((0, 3), (3, 1, 1, 6)),
((-4, -1), (3, 1, 1, 6)),
((-1, -4), (3, 1, 1, 6)),
)
@test_utils.transform_and_run
def test_multiple_param_axis(self, param_axis, param_shape):
ln = rms_norm.RMSNorm(axis=-1, param_axis=param_axis)
ln(jnp.ones([3, 4, 5, 6]))
self.assertEqual(ln.params_dict()["rms_norm/scale"].shape, param_shape)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/rms_norm_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logs usage of certain functions in Haiku."""
def log_once(event: str):
# NOTE: We only log events when running inside DeepMind.
del event
|
dm-haiku-main
|
haiku/_src/analytics.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.filtering."""
import collections
from collections.abc import Sequence
import itertools
import re
import types
from typing import Any, Callable
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import basic
from haiku._src import data_structures
from haiku._src import filtering
from haiku._src import transform
import jax
import jax.numpy as jnp
def jax_fn_with_filter(
jax_fn: Callable[..., Any],
f: Callable[..., Any],
predicate: Callable[[str, str, jax.Array], bool],
**jax_fn_kwargs) -> Callable[..., Any]:
"""Applies a jax functionn to a given function after modifying its signature.
`jax_fn_with_filter` operates in two steps:
1 it wraps the input function `f`, which is expect to take as first
argument a `Params` data structure, with a function taking as first two
inputs a bipartition of the orginal parameters
2 the resulting function is transformed with `jax_fn` and wrapped
by a function supporting `f`'s signature and taking care of partitioning
the `f`'s `Params` input using `predicate`.
Args:
jax_fn: jax function, e.g. `jax.grad` or `jax.jacobian`.
f: callable to be transformed.
predicate: predicate to be used to partition `f`'s input parameters.
**jax_fn_kwargs: kwargs to be forwarded to `jax_fn`.
Returns:
Function calling the input jax function on the wrapped `f`.
"""
def wrapper(p1, p2, *args, **kwargs):
return f(filtering.merge(p1, p2), *args, **kwargs)
jaxed_fn = jax_fn(wrapper, **jax_fn_kwargs)
def fn_with_filter(p, *args, **kwargs):
p1, p2 = filtering.partition(predicate, p)
return jaxed_fn(p1, p2, *args, **kwargs)
return fn_with_filter
def get_net(x):
def init(v):
return dict(
w_init=lambda *args: v * jnp.ones((1, 1)),
b_init=lambda *args: v * 1.5 * jnp.ones((1,)))
h = basic.Linear(output_size=1, name="first_layer", **init(1.0))(x)
h = basic.Linear(output_size=1, name="second_layer", **init(3.0))(h)
return jnp.mean(h)
def get_names(params) -> set[str]:
names = set()
for path, module in params.items():
for name in module.keys():
names.add("/".join([path, name]))
return names
def to_set(params) -> set[tuple[str, Sequence[float]]]:
entries = set()
for path, module in params.items():
for key, value in module.items():
entries.add(
("/".join([path, key]), tuple(jax.device_get(value).flatten())))
return entries
def compile_regex(regex):
if not isinstance(regex, str):
regex = "|".join(["(" + r + ")" for r in regex])
return re.compile(regex)
class FilteringTest(parameterized.TestCase):
def test_partition(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
# parse by layer
first_layer_params, second_layer_params = filtering.partition(
lambda module_name, *_: module_name == "first_layer",
params)
self.assertEqual(
get_names(first_layer_params), {"first_layer/w", "first_layer/b"}
)
self.assertEqual(
get_names(second_layer_params), {"second_layer/w", "second_layer/b"}
)
# parse by variable type
weights, biases = filtering.partition(
lambda module_name, name, _: name == "w",
params) # pytype: disable=wrong-arg-types
self.assertEqual(get_names(weights), {"first_layer/w", "second_layer/w"})
self.assertEqual(get_names(biases), {"first_layer/b", "second_layer/b"})
# Compose regexes
regex = compile_regex(["first_layer.*", ".*w"])
matching, not_matching = filtering.partition(
lambda module_name, name, _: regex.match(f"{module_name}/{name}"),
params)
self.assertEqual(
get_names(matching),
{"first_layer/w", "first_layer/b", "second_layer/w"},
)
self.assertEqual(get_names(not_matching), {"second_layer/b"})
matching, not_matching = filtering.partition(
lambda mod_name, name, _: mod_name == "first_layer" and name != "w",
params)
self.assertEqual(get_names(matching), {"first_layer/b"})
self.assertEqual(
get_names(not_matching),
{"first_layer/w", "second_layer/w", "second_layer/b"},
)
@parameterized.parameters(*range(1, 8))
def test_partition_n(self, n):
cnt = itertools.count()
fn = lambda m, n, v: next(cnt)
structure = {f"layer_{i}": {"w": None} for i in range(n)}
structures = filtering.partition_n(fn, structure, n)
self.assertLen(structures, n)
self.assertEqual(filtering.merge(*structures), structure)
for i, substructure in enumerate(structures):
expected = {f"layer_{i}": {"w": None}}
self.assertEqual(substructure, expected)
def test_partition_n_nested(self):
nested_structure = {
"layer": {"a": [1, 2, 3], "b": {object()}, "c": {"a": "b"}}
}
cnt = itertools.count()
fn = lambda m, n, v: next(cnt)
out1, out2, out3 = filtering.partition_n(fn, nested_structure, 3)
self.assertEqual(out1, {"layer": {"a": nested_structure["layer"]["a"]}})
self.assertEqual(out2, {"layer": {"b": nested_structure["layer"]["b"]}})
self.assertEqual(out3, {"layer": {"c": nested_structure["layer"]["c"]}})
@parameterized.parameters(*range(1, 8))
def test_partition_n_merge_isomorphism(self, n):
cnt = itertools.count()
fn = lambda m, n, v: next(cnt)
input_structure = {f"layer_{i}": {"w": None} for i in range(n)}
structures = filtering.partition_n(fn, input_structure, n)
merged_structure = filtering.merge(*structures)
self.assertEqual(merged_structure, input_structure)
@parameterized.parameters(*range(1, 8))
def test_traverse(self, n):
structure = {f"layer_{i}": {"w": "wv", "b": "bv"}
for i in reversed(range(n))}
expected = []
for i in range(n):
expected.append((f"layer_{i}", "b", "bv"))
expected.append((f"layer_{i}", "w", "wv"))
actual = list(filtering.traverse(structure))
self.assertEqual(expected, actual)
def test_traverse_nested(self):
nested_structure = {
"layer": {"a": [1, 2, 3], "b": {object()}, "c": {"a": "b"}}
}
expected = [
("layer", x, nested_structure["layer"][x]) for x in ("a", "b", "c")
]
actual = list(filtering.traverse(nested_structure))
self.assertEqual(expected, actual)
@parameterized.parameters(({}, {}, True),
({"a": {}}, {}, True),
({}, {"a": {}}, True),
({"a": {}}, {"a": {}}, True),
({"a": {"b": 1}}, {"a": {}}, False))
def test_is_subset(self, structure1, structure2, is_subset):
if is_subset:
self.assertTrue(
filtering.is_subset(subset=structure1, superset=structure2))
else:
self.assertFalse(
filtering.is_subset(subset=structure1, superset=structure2))
@parameterized.parameters(*range(1, 4))
def test_is_subset_layers(self, n):
structure_small = {f"layer_{i}": {"w": "wv", "b": "bv"}
for i in reversed(range(n - 1))}
structure_large = {f"layer_{i}": {"w": "wv", "b": "bv"}
for i in reversed(range(n))}
self.assertTrue(
filtering.is_subset(subset=structure_small, superset=structure_large))
self.assertFalse(
filtering.is_subset(subset=structure_large, superset=structure_small))
def test_filter(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
second_layer_params = filtering.filter(
lambda module_name, *_: module_name == "second_layer",
params)
self.assertEqual(
get_names(second_layer_params), {"second_layer/w", "second_layer/b"}
)
biases = filtering.filter(
lambda module_name, name, _: name == "b",
params) # pytype: disable=wrong-arg-types
self.assertEqual(get_names(biases), {"first_layer/b", "second_layer/b"})
def test_transforms_with_filter(self):
# Note to make sense of test:
#
# out = (w0 + b0) * w1 + b1
# = w0 * w1 + b0 * w1 + b1
# doutdw0 = w1
# doutdw1 = w0 + b0
# with w0 = 1.0, b0 = 1.5, w1 = 3.0, b1 = 4.5
init_fn, apply_fn = transform.transform(get_net)
inputs = jnp.ones((1, 1))
params = init_fn(jax.random.PRNGKey(428), inputs)
df_fn = jax_fn_with_filter(
jax_fn=jax.grad,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
df = df_fn(params, None, inputs)
self.assertEqual(
to_set(df), {("first_layer/w", (3.0,)), ("second_layer/w", (2.5,))}
)
fn = jax_fn_with_filter(
jax_fn=jax.value_and_grad,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
v = fn(params, None, inputs)
self.assertEqual(v[0], jnp.array([12.0]))
self.assertEqual(to_set(df), to_set(v[1]))
def get_stacked_net(x):
y = get_net(x)
return jnp.stack([y, 2.0 * y])
_, apply_fn = transform.transform(get_stacked_net)
jf_fn = jax_fn_with_filter(
jax_fn=jax.jacobian,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
jf = jf_fn(params, None, inputs)
self.assertEqual(
to_set(jf),
{("first_layer/w", (3.0, 6.0)), ("second_layer/w", (2.5, 5.0))},
)
def test_map(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
# parse by layer
def map_fn(module_name, name, v):
del name
if "first_layer" in module_name:
return v
else:
return 2. * v
new_params = filtering.map(map_fn, params)
self.assertLen(jax.tree_util.tree_leaves(new_params), 4)
first_layer_params, second_layer_params = filtering.partition(
lambda module_name, *_: module_name == "first_layer",
params)
for mn in first_layer_params:
for n in first_layer_params[mn]:
self.assertEqual(params[mn][n], new_params[mn][n])
for mn in second_layer_params:
for n in second_layer_params[mn]:
self.assertEqual(2. * params[mn][n], new_params[mn][n])
def test_output_type_default(self):
self.assert_output_type(dict)
def test_merge_different_mappings(self):
a = collections.defaultdict(dict)
a["foo"]["bar"] = 1
b = {"foo": {"baz": 2}}
c = types.MappingProxyType({"foo": {"bat": 3}})
d = filtering.merge(a, b, c)
self.assertEqual(d, {"foo": {"bar": 1, "baz": 2, "bat": 3}})
def test_merge_nested(self):
a = {"layer": {"a": [1, 2, 3]}}
b = {"layer": {"b": {object()}}}
c = {"layer": {"c": {"a": "b"}}}
actual = filtering.merge(a, b, c)
expected = {"layer": {"a": a["layer"]["a"],
"b": b["layer"]["b"],
"c": c["layer"]["c"]}}
self.assertEqual(expected, actual)
def test_check_duplicates(self):
err = "Duplicate array found"
a = {"a": {"b": jnp.array([0.0, 0.1], dtype=jnp.float32)}}
b = {"a": {"b": jnp.array([0.0, 0.1], dtype=jnp.bfloat16)}}
c = {"a": {"b": jnp.array([0.0, 0.1, 0.2], dtype=jnp.float32)}}
d = {"a": {"b": "foo"}}
with self.subTest("dtype_mismatch"):
with self.assertRaisesRegex(ValueError, fr"{err}.*f32\[2\] vs bf16\[2\]"):
filtering.merge(a, b, check_duplicates=True)
with self.subTest("shape_mismatch"):
with self.assertRaisesRegex(ValueError, fr"{err}.*f32\[2\] vs f32\[3\]"):
filtering.merge(a, c, check_duplicates=True)
with self.subTest("multiple_mismatch"):
with self.assertRaisesRegex(ValueError, fr"{err}.*f32\[2\] vs bf16\[2\]"):
filtering.merge(a, b, c, check_duplicates=True)
with self.subTest("object_mismatch"):
with self.assertRaisesRegex(ValueError, fr"{err}.*f32\[2\] vs 'foo'"):
filtering.merge(a, d, check_duplicates=True)
def assert_output_type(self, out_cls):
def assert_type_recursive(s):
self.assertEqual(type(s), out_cls)
for in_cls in (dict, data_structures.FlatMap):
with self.subTest(str(in_cls)):
structure_a = in_cls({"m1": in_cls({"w": None})})
structure_b = in_cls({"m2": in_cls({"w": None})})
structure_c = in_cls({f"{i}": in_cls({"w": None}) for i in range(5)})
assert_type_recursive(
filtering.filter(lambda m, n, v: True, structure_a))
assert_type_recursive(filtering.map(lambda m, n, v: v, structure_a))
assert_type_recursive(filtering.merge(structure_a, structure_b))
parts = filtering.partition(lambda m, n, v: int(m) > 1, structure_c)
for part in parts:
assert_type_recursive(part)
parts = filtering.partition_n(lambda m, n, v: int(m), structure_c, 5)
for part in parts:
assert_type_recursive(part)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/filtering_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.group_norm."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import group_norm
from haiku._src import initializers
from haiku._src import test_utils
import jax.numpy as jnp
import numpy as np
def constant(fill_value, *, shape):
return np.full(shape, fill_value, np.float32)
class GroupNormTest(parameterized.TestCase):
@test_utils.transform_and_run
def test_simple_case(self):
layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
inputs = jnp.ones([2, 3, 3, 10])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 0.0)
@test_utils.transform_and_run
def test_simple_case_var(self):
layer = group_norm.GroupNorm(
groups=5,
create_scale=True,
create_offset=True,
scale_init=initializers.Constant(0.5),
offset_init=initializers.Constant(2.0))
inputs = jnp.ones([2, 3, 3, 10])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@test_utils.transform_and_run
def test_simple_case_nchwvar(self):
layer = group_norm.GroupNorm(
groups=5,
create_scale=True,
create_offset=True,
scale_init=initializers.Constant(0.5),
offset_init=initializers.Constant(2.0),
data_format="NCHW")
inputs = jnp.ones([2, 10, 3, 3])
outputs = layer(inputs)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@test_utils.transform_and_run
def test_data_format_agnostic_var(self):
c_last_layer = group_norm.GroupNorm(
groups=5, create_scale=True, create_offset=True)
c_first_layer = group_norm.GroupNorm(
groups=5, create_scale=True, create_offset=True, data_format="NCHW")
inputs = np.random.uniform(0, 10, [3, 4, 4, 10]).astype(np.float32)
c_last_output = c_last_layer(inputs)
inputs = jnp.transpose(inputs, [0, 3, 1, 2])
c_first_output = c_first_layer(inputs)
c_first_output = jnp.transpose(c_first_output, [0, 2, 3, 1])
self.assertAllClose(c_last_output, c_first_output)
@test_utils.transform_and_run
def test_simple_case_tensor(self):
layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
inputs = jnp.ones([2, 3, 3, 10])
scale = constant(0.5, shape=(10,))
offset = constant(2.0, shape=(10,))
outputs = layer(inputs, scale, offset)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@test_utils.transform_and_run
def test_simple_case_nchwtensor(self):
layer = group_norm.GroupNorm(
groups=5, data_format="NCHW", create_scale=False, create_offset=False)
inputs = jnp.ones([2, 10, 3, 3])
scale = constant(0.5, shape=(10, 1, 1))
offset = constant(2.0, shape=(10, 1, 1))
outputs = layer(inputs, scale, offset)
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
@test_utils.transform_and_run
def test_data_format_agnostic_tensor(self):
c_last = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
c_first = group_norm.GroupNorm(
groups=5, data_format="NCHW", create_scale=False, create_offset=False)
inputs = np.random.uniform(0, 10, [3, 4, 4, 10]).astype(np.float32)
scale = np.random.normal(size=(10,), loc=1.0)
offset = np.random.normal(size=(10,))
c_last_output = c_last(inputs, scale, offset)
inputs = jnp.transpose(inputs, [0, 3, 1, 2])
scale = jnp.reshape(scale, (10, 1, 1))
offset = jnp.reshape(offset, (10, 1, 1))
c_first_output = c_first(inputs, scale, offset)
c_first_output = jnp.transpose(c_first_output, [0, 2, 3, 1])
self.assertAllClose(c_last_output, c_first_output, rtol=1e-5)
@parameterized.parameters("NHW", "HWC", "channel_last")
@test_utils.transform_and_run
def test_invalid_data_format(self, data_format):
with self.assertRaisesRegex(
ValueError,
f"Unable to extract channel information from '{data_format}'.",
):
group_norm.GroupNorm(
groups=5,
data_format=data_format,
create_scale=False,
create_offset=False)
@parameterized.parameters("NCHW", "NCW", "channels_first")
@test_utils.transform_and_run
def test_valid_data_format_channels_first(self, data_format):
test = group_norm.GroupNorm(
groups=5,
data_format=data_format,
create_scale=False,
create_offset=False)
self.assertEqual(test.channel_index, 1)
@parameterized.parameters("NHWC", "NWC", "channels_last")
@test_utils.transform_and_run
def test_valid_data_format_channels_last(self, data_format):
test = group_norm.GroupNorm(
groups=5,
data_format=data_format,
create_scale=False,
create_offset=False)
self.assertEqual(test.channel_index, -1)
@parameterized.named_parameters(("String", "foo"), ("ListString", ["foo"]))
@test_utils.transform_and_run
def test_invalid_axis(self, axis):
with self.assertRaisesRegex(
ValueError, "`axis` should be an int, slice or iterable of ints."):
group_norm.GroupNorm(
groups=5, axis=axis, create_scale=False, create_offset=False)
@test_utils.transform_and_run
def test_no_scale_and_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `scale_init` if `create_scale=False`."):
group_norm.GroupNorm(
groups=5,
create_scale=False,
create_offset=True,
scale_init=jnp.ones)
@test_utils.transform_and_run
def test_no_offset_beta_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `offset_init` if `create_offset=False`."):
group_norm.GroupNorm(
groups=5,
create_scale=True,
create_offset=False,
offset_init=jnp.zeros)
@test_utils.transform_and_run
def test_create_scale_and_scale_provided(self):
layer = group_norm.GroupNorm(
groups=5, create_scale=True, create_offset=False)
with self.assertRaisesRegex(
ValueError, "Cannot pass `scale` at call time if `create_scale=True`."):
layer(jnp.ones([2, 3, 5]), scale=jnp.ones([4]))
@test_utils.transform_and_run
def test_create_offset_and_offset_provided(self):
layer = group_norm.GroupNorm(
groups=5, create_offset=True, create_scale=False)
with self.assertRaisesRegex(
ValueError,
"Cannot pass `offset` at call time if `create_offset=True`."):
layer(jnp.ones([2, 3, 5]), offset=jnp.ones([4]))
@test_utils.transform_and_run
def test_slice_axis(self):
slice_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
axis_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
inputs = np.random.uniform(0, 10, [3, 4, 4, 5]).astype(np.float32)
scale = np.random.normal(size=(5,), loc=1.0)
offset = np.random.normal(size=(5,))
slice_outputs = slice_layer(inputs, scale, offset)
axis_outputs = axis_layer(inputs, scale, offset)
self.assertAllClose(slice_outputs, axis_outputs)
@test_utils.transform_and_run
def test_rank_changes(self):
layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
inputs = jnp.ones([2, 3, 3, 5])
scale = constant(0.5, shape=(5,))
offset = constant(2.0, shape=(5,))
layer(inputs, scale, offset)
with self.assertRaisesRegex(
ValueError,
"The rank of the inputs cannot change between calls, the original"):
layer(jnp.ones([2, 3, 3, 4, 5]), scale, offset)
@parameterized.named_parameters(("Small", (2, 4, 4)), ("Bigger", (2, 3, 8)))
@test_utils.transform_and_run
def test_incompatible_groups_and_tensor(self, shape):
layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
inputs = jnp.ones(shape)
with self.assertRaisesRegex(
ValueError,
"The number of channels must be divisible by the number of groups"):
layer(inputs)
@test_utils.transform_and_run
def test5ddata_format_agnostic(self):
c_last_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
c_first_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False, data_format="NCDHW")
inputs = np.random.uniform(0, 10, [3, 4, 4, 4, 10]).astype(np.float32)
scale = np.random.normal(size=(10,), loc=1.0)
offset = np.random.normal(size=(10,))
c_last_output = c_last_layer(inputs, scale, offset)
inputs = jnp.transpose(inputs, [0, 4, 1, 2, 3])
scale = jnp.reshape(scale, [-1, 1, 1, 1])
offset = jnp.reshape(offset, [-1, 1, 1, 1])
c_first_output = c_first_layer(inputs, scale, offset)
c_first_output = jnp.transpose(c_first_output, [0, 2, 3, 4, 1])
self.assertAllClose(
c_last_output, c_first_output, atol=1e-5, rtol=1e-5)
@test_utils.transform_and_run
def test3ddata_format_agnostic(self):
c_last_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False)
c_first_layer = group_norm.GroupNorm(
groups=5, create_scale=False, create_offset=False, data_format="NCW")
inputs = np.random.uniform(0, 10, [3, 4, 10]).astype(np.float32)
scale = np.random.normal(size=(10,), loc=1.0)
offset = np.random.normal(size=(10,))
c_last_output = c_last_layer(inputs, scale, offset)
inputs = jnp.transpose(inputs, [0, 2, 1])
scale = jnp.reshape(scale, [-1, 1])
offset = jnp.reshape(offset, [-1, 1])
c_first_output = c_first_layer(inputs, scale, offset)
c_first_output = jnp.transpose(c_first_output, [0, 2, 1])
self.assertAllClose(
c_last_output, c_first_output, atol=1e-5, rtol=1e-5)
def assertAllClose(self, actual, desired, atol=1e-5, rtol=1e-5):
np.testing.assert_allclose(actual, desired, atol=atol, rtol=rtol)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/group_norm_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Haiku module."""
from collections.abc import Mapping
import contextlib
import functools
import inspect
import re
from typing import Any, Callable, ContextManager, NamedTuple, Optional, Protocol, TypeVar
from haiku._src import base
from haiku._src import config
from haiku._src import data_structures
from haiku._src import utils
import jax
import jax.numpy as jnp
T = TypeVar("T")
ThreadLocalStack = data_structures.ThreadLocalStack[T]
_APPLY_NAME_SCOPE = "__haiku_name_scope"
_CUSTOM_NAME = "__haiku_custom_name"
class Future:
"""Represents a value that will be produced eventually."""
def __init__(self):
self._result_set = False
self._result = None
def set_result(self, result: type["Module"]):
if self._result_set:
raise ValueError("Result already set.")
self._result_set = True
self._result = result
def result(self) -> type["Module"]:
if not self._result_set:
raise ValueError("Result not set.")
return self._result
# We subclass `type(Protocol)` in order to avoid metaclass conflicts when
# defining modules that also inherit from `Protocol`. Note that `type(Protocol)`
# already inherits from `abc.ABCMeta`.
class ModuleMetaclass(type(Protocol)):
"""Metaclass for `Module`."""
def __new__( # pylint: disable=bad-classmethod-argument
mcs: type[type[T]],
name: str,
bases: tuple[type[Any], ...],
clsdict: dict[str, Any],
) -> type[T]:
method_names = []
cls_fut = Future()
for key, value in clsdict.items():
if key == "module_name":
# Don't patch `module_name` in case the user implemented it as a
# computed property.
continue
elif key.startswith("__") and key != "__call__":
# Don't patch methods like `__getattr__` or `__del__`.
continue
elif isinstance(value, property):
# TODO(tomhennigan) Preserve the type of property subclasses.
p = value
clsdict[key] = property(
p.fget if not p.fget else wrap_method(key, p.fget, cls_fut.result),
p.fset if not p.fset else wrap_method(key, p.fset, cls_fut.result),
p.fdel if not p.fdel else wrap_method(key, p.fdel, cls_fut.result),
doc=value.__doc__)
elif inspect.isfunction(value):
# We defer patching methods until after the type is created such that we
# can trigger the descriptor binding them to the class.
method_names.append(key)
# Without this any classes with @abc.abstract* elements in their dict fail
# isinstance checks.
# TODO(b/177339347): Remove this workaround once the underlying bug in
# `_ProtocolMeta.__instancecheck__` has been fixed.
clsdict.setdefault("_is_protocol", False)
clsdict.setdefault(
"__repr__",
lambda module: module._auto_repr) # pylint: disable=protected-access
cls = super().__new__(mcs, name, bases, clsdict)
# Provides access to the class object to method interceptors.
cls_fut.set_result(cls)
for method_name in method_names:
# Note: the code below is subtle, we need to ensure that we're wrapping
# the method bound to the class. In some cases (e.g. `wrapt`) this is
# important since the method can trigger different behavior when it is
# bound (e.g. in wrapt `FunctionWrapper.__get__(None, cls)` produces a
# `BoundFunctionWrapper` which in turn populates the `instance` argument
# to decorator functions using args[0]).
# Equivalent to: `cls.__dict__[method_name].__get__(None, cls)`
method = getattr(cls, method_name)
method = wrap_method(method_name, method, cls_fut.result)
setattr(cls, method_name, method)
return cls
def __call__(cls, *args, **kwargs) -> Any: # pylint: disable=no-self-argument
# Call new such that we have an un-initialized module instance that we can
# still reference even if there is an exception during __init__. This is
# needed such that we can make sure the name_scope constructed in __init__
# is closed even if there is an exception.
# NOTE: We disable pytype since (somewhat surprisingly) this method is bound
# with the new class and not the metaclass.
module = cls.__new__(cls, *args, **kwargs) # pytype: disable=wrong-arg-types
# Now attempt to initialize the object.
init = wrap_method("__init__", cls.__init__, lambda: cls)
init(module, *args, **kwargs)
if (config.get_config().module_auto_repr and
getattr(module, "AUTO_REPR", True)):
module._auto_repr = utils.auto_repr(cls, *args, **kwargs) # pylint: disable=protected-access
else:
module._auto_repr = object.__repr__(module)
ran_super_ctor = hasattr(module, "module_name")
if not ran_super_ctor:
raise ValueError(
"Constructing an hk.Module without calling the super constructor "
"is not supported. Add the following as the first line in your "
"__init__ method:\n\nsuper(%s, self).__init__()" % cls.__name__)
return module
@property
def __signature__(cls): # pylint: disable=no-self-argument
signature = inspect.signature(cls.__init__)
params = tuple(signature.parameters.values())
return signature.replace(parameters=params[1:])
class MethodContext(NamedTuple):
r"""Read only state showing the calling context for a method.
For example, let's define two interceptors and print the values in the
context. Additionally, we will make the first interceptor conditionally short
circuit, since interceptors stack and are run in order, an earlier interceptor
can decide to call the next interceptor, or short circuit and call the
underlying method directly:
>>> module = hk.Linear(1, name="method_context_example")
>>> short_circuit = False
>>> def my_interceptor_1(next_fun, args, kwargs, context):
... print('running my_interceptor_1')
... print('- module.name: ', context.module.name)
... print('- method_name: ', context.method_name)
... if short_circuit:
... return context.orig_method(*args, **kwargs)
... else:
... return next_fun(*args, **kwargs)
>>> def my_interceptor_2(next_fun, args, kwargs, context):
... print('running my_interceptor_2')
... print('- module.name: ', context.module.name)
... print('- method_name: ', context.method_name)
... return next_fun(*args, **kwargs)
When ``short_circuit=False`` the two interceptors will run in order:
>>> with hk.intercept_methods(my_interceptor_1), \
... hk.intercept_methods(my_interceptor_2):
... _ = module(jnp.ones([1, 1]))
running my_interceptor_1
- module.name: method_context_example
- method_name: __call__
running my_interceptor_2
- module.name: method_context_example
- method_name: __call__
Setting ``short_circuit=True`` will cause the first interceptor to call the
original method (rather than ``next_fun`` which will trigger the next
interceptor):
>>> short_circuit = True
>>> with hk.intercept_methods(my_interceptor_1), \
... hk.intercept_methods(my_interceptor_2):
... _ = module(jnp.ones([1, 1]))
running my_interceptor_1
- module.name: method_context_example
- method_name: __call__
Attributes:
module: A :class:`~haiku.Module` instance whose method is being called.
method_name: The name of the method being called on the module.
orig_method: The underlying method on the module which when called will
*not* trigger interceptors. You should only call this if you want to
short circuit all the other interceptors, in general you should prefer to
call the ``next_fun`` passed to your interceptor which will run
``orig_method`` after running all other interceptors.
orig_class: The class which defined `orig_method`. Note that when
using inheritance this is not necessarily the same as `type(module)`.
"""
module: "Module"
method_name: str
orig_method: Callable[..., Any]
orig_class: type["Module"]
Args = tuple[Any]
Kwargs = dict[str, Any]
NextGetter = Callable[..., Any]
MethodGetter = Callable[[NextGetter, Args, Kwargs, MethodContext], Any]
interceptor_stack: ThreadLocalStack[MethodGetter] = ThreadLocalStack()
def intercept_methods(interceptor: MethodGetter):
"""Register a new method interceptor.
Method interceptors allow you to (at a distance) intercept method calls to
modules and modify args/kwargs before calling the underlying method. After the
underlying method is called you can modify its result before it is passed back
to the user.
For example you could intercept method calls to :class:`~haiku.BatchNorm` and
ensure it is always computed in full precision:
>>> def my_interceptor(next_f, args, kwargs, context):
... if (type(context.module) is not hk.BatchNorm
... or context.method_name != "__call__"):
... # We ignore methods other than BatchNorm.__call__.
... return next_f(*args, **kwargs)
...
... def cast_if_array(x):
... if isinstance(x, jax.Array):
... x = x.astype(jnp.float32)
... return x
...
... args, kwargs = jax.tree_util.tree_map(cast_if_array, (args, kwargs))
... out = next_f(*args, **kwargs)
... return out
We can create and use our module in the usual way, we just need to wrap any
method calls we want to intercept in the context manager:
>>> mod = hk.BatchNorm(decay_rate=0.9, create_scale=True, create_offset=True)
>>> x = jnp.ones([], jnp.bfloat16)
>>> with hk.intercept_methods(my_interceptor):
... out = mod(x, is_training=True)
>>> assert out.dtype == jnp.float32
Without the interceptor BatchNorm would compute in bf16, however since we
cast `x` before the underlying method is called we compute in f32.
Args:
interceptor: A method interceptor.
Returns:
Context manager under which the interceptor is active.
"""
return interceptor_stack(interceptor)
def intercept_methods_global(interceptor: MethodGetter):
interceptor_stack.pushleft(interceptor)
def run_interceptors( # pylint: disable=invalid-name
bound_method: Callable[..., Any],
method_name: str,
self: "Module",
orig_class: type["Module"],
*args: Args,
**kwargs: Kwargs,
) -> Any:
"""Runs any method interceptors or the original method."""
if not interceptor_stack:
return bound_method(*args, **kwargs)
ctx = MethodContext(module=self,
method_name=method_name,
orig_method=bound_method,
orig_class=orig_class)
interceptor_stack_copy = interceptor_stack.clone()
def next_fun(*args, **kwargs):
if interceptor_stack_copy:
# NOTE: The `interceptor_fun` may call `next_fun` to trigger the next
# interceptor (and so on) allowing interceptors to be run in turn.
interceptor_fun = interceptor_stack_copy.popleft()
return interceptor_fun(next_fun, args, kwargs, ctx)
else:
return bound_method(*args, **kwargs)
return next_fun(*args, **kwargs)
def simulate_module_method(module, method_name):
frame = base.current_frame()
state = base.ModuleState(module=module, method_name=method_name)
return frame.module(state)
class NameScope:
"""Context manager that when active adds a new name in the hierarcy."""
def __init__(self, name: str, method_name: str):
if not name or name[0] == "/":
raise ValueError("Name scopes must not start with /")
parts = [name] if name.startswith(OVERRIDE_PREFIX) else name.split("/")
module = None
with contextlib.ExitStack() as stack:
for subname in parts:
module = NameScopeModule(name=subname)
stack.enter_context(simulate_module_method(module, method_name))
self.__entered = False
self.__module = module
self.__method = method_name
self.__stack = contextlib.ExitStack()
def __enter__(self):
if self.__stack is None:
raise ValueError("name_scope is not reusable")
if self.__entered:
raise ValueError("name_scope is not reentrant")
self.__entered = True
self.__stack.enter_context(simulate_module_method(self.__module,
self.__method))
def __exit__(self, exc_type, exc_value, traceback):
try:
return self.__stack.__exit__(exc_type, exc_value, traceback)
finally:
self.__entered = False
self.__stack = None
def name_scope(
name: str,
*,
method_name: str = "__call__",
) -> ContextManager[None]:
"""Context manager which adds a prefix to all new modules, params or state.
>>> with hk.name_scope("my_name_scope"):
... net = hk.Linear(1, name="my_linear")
>>> net.module_name
'my_name_scope/my_linear'
When used inside a module, any submodules, parameters or state created inside
the name scope will have a prefix added to their names:
>>> class MyModule(hk.Module):
... def __call__(self, x):
... with hk.name_scope("my_name_scope"):
... submodule = hk.Linear(1, name="submodule")
... w = hk.get_parameter("w", [], init=jnp.ones)
... return submodule(x) + w
>>> f = hk.transform(lambda x: MyModule()(x))
>>> params = f.init(jax.random.PRNGKey(42), jnp.ones([1, 1]))
>>> jax.tree_util.tree_map(jnp.shape, params)
{'my_module/my_name_scope': {'w': ()},
'my_module/my_name_scope/submodule': {'b': (1,), 'w': (1, 1)}}
Name scopes are very similar to putting all of the code inside the context
manager inside a method on a :class:`Module` with the name you provide. Behind
the scenes this is precisely how name scopes are implemented.
If you are familiar with TensorFlow then Haiku's :func:`name_scope` is similar
to ``tf.variable_scope(..)`` in TensorFlow 1 and ``tf.name_scope(..)`` in
TensorFlow 1 and 2 in that it changes the names associated with modules,
parameters and state.
Args:
name: The name scope to use (e.g. ``"foo"`` or ``"foo/bar"``).
method_name: (Advanced uses only). Since name scopes are equivalent to
calling methods on modules the method name attribute allows you to specify
which method name you want to simulate. Most users should leave this as
the default value (`"__call__"`).
Returns:
A single use context manager that when active prefixes new modules,
parameters or state with the given name.
"""
base.assert_context("name_scope")
return NameScope(name, method_name)
def wrap_method(method_name, unbound_method, cls_resolver):
"""Wraps `method` such that it enters name stack and runs method interceptors.
Args:
method_name: The name of the method (e.g. "__call__").
unbound_method: An unbound method to wrap.
cls_resolver: A callable the returns the Module subclass which defined this
method.
Returns:
A function that runs the original method but in a context where parameters
are reused and modules can be created.
"""
if not getattr(unbound_method, _APPLY_NAME_SCOPE, True):
return unbound_method
@functools.wraps(unbound_method)
def wrapped(self, *args, **kwargs):
"""Calls the original method with a group name set before and after."""
if not base.frame_stack:
raise ValueError(
"All `hk.Module`s must be initialized inside an `hk.transform`.")
# Submodules are associated with this method. We allow users to associate
# submodules with a different method than the one being called via
# `@name_like("other_method")`. Interceptors and custom getters are still
# provided the actual method name (e.g. "submodule_method_name" is only used
# for naming submodules).
submodule_method_name = getattr(unbound_method, _CUSTOM_NAME, method_name)
frame = base.current_frame()
state = base.ModuleState(module=self, method_name=submodule_method_name)
with frame.module(state), _module_method_call(self, method_name):
# hk.Module enters the module name scope for all methods.
module_name = getattr(self, "module_name", None)
orig_class = cls_resolver()
f = functools.partial(unbound_method, self)
f = functools.partial(run_interceptors, f, method_name, self,
orig_class)
if module_name:
local_module_name = module_name.split("/")[-1]
f = jax.named_call(f, name=local_module_name)
if method_name != "__call__":
f = jax.named_call(f, name=method_name)
out = f(*args, **kwargs)
# Module names are set in the constructor. If `f` is the constructor then
# its name will only be set **after** `f` has run. For methods other
# than `__init__` we need the name before running in order to wrap their
# execution with `named_call`.
if module_name is None:
module_name = getattr(self, "module_name", None)
# Notify parent modules about our existence.
if module_name is not None:
for module_state in frame.module_stack:
if module_state.module is not self:
module_state.module._submodules.add(module_name) # pylint: disable=protected-access
return out
return wrapped
_VALID_IDENTIFIER_R = re.compile(r"^[a-zA-Z_]([a-zA-Z0-9_])*$")
valid_identifier = lambda name: bool(_VALID_IDENTIFIER_R.match(name))
def name_and_number(name: str) -> tuple[str, Optional[int]]:
splits = re.split(r"_(0|[1-9]\d*)$", name, 3)
if len(splits) > 1:
return splits[0], int(splits[1])
else:
return name, None
def unique_and_canonical_name(name: str) -> str:
"""Returns a canonical name for the given name."""
frame = base.current_frame()
# If we are outside init/call then prefix the name with the method name.
if len(frame.module_stack) > 1:
# -2 since we are inside the ctor and want to look at the caller state.
module_state = frame.module_stack.peek(-2)
# Make sure to include the method name if appropriate.
method_name = module_state.method_name
if method_name == "__init__":
name = "~/" + name
elif method_name != "__call__":
name = "~" + method_name + "/" + name
# Include the parent name.
parent_module = module_state.module
parent_name = base.safe_get_module_name(parent_module)
name = parent_name + "/" + name
# Test if the user has explicitly numbered this module.
name, n = name_and_number(name)
explicit_n = n is not None
# Determine a unique name for this module within the current context.
if n is None:
n = next_module_number(name)
name = f"{name}_{n}" if explicit_n or n else name
# Final sanity check that this name has not been used before.
reserve_module_name(name, check_unique=True)
return name
def reserve_module_name(name: str, *, check_unique: bool):
"""Reserves the given module name."""
frame = base.current_frame()
used_names = frame.used_names_stack.peek(-2)
if check_unique and name in used_names:
raise ValueError(f"Module name '{name}' is not unique.")
used_names.add(name)
name, number = name_and_number(name)
if number is None:
number = 0
counters = frame.counter_stack.peek(-2)
counters[name] = max(counters[name], number + 1)
def next_module_number(name: str) -> int:
frame = base.current_frame()
counters = frame.counter_stack.peek(-2)
return counters[name]
# NOTE: Since `:` is not a valid symbol in a module name (it has been rejected
# by check_name since the first version of Haiku) we know that no existing users
# have this name so it is a safe token.
OVERRIDE_PREFIX = "FORCE:"
def force_name(name: str) -> str:
"""Forces Haiku to use this name, ignoring all context information.
Haiku names modules according to where they are created (e.g. the stack of
modules that created them, or the current :func:`~haiku.name_scope`). This
function allows you to create modules that ignore all of this and have
precisely the name you provide.
This might be useful in the case that you have two modules and you want to
force them to share parameters:
>>> mod0 = hk.Linear(1)
>>> some_hyperparameter = True
>>> if some_hyperparameter:
... # Force mod1 and mod0 to have shared weights.
... mod1 = hk.Linear(1, name=hk.force_name(mod0.module_name))
... else:
... # mod0 and mod1 are independent.
... mod1 = hk.Linear(1)
(A simpler version of this snippet would do `mod1 = mod0` instead of using
force_name, however in real examples it can be simpler to use force_name,
especially in cases where you may not have access to the module instance
without lots of plumbing, but getting the module name is easy [e.g. it is a
hyperparameter]).
Args:
name: String name for the module. For example ``"foo"`` or ``"foo/bar"``.
Returns:
A value suitable to pass into the ``name`` argument of any Haiku module
constructor.
"""
return f"{OVERRIDE_PREFIX}{name}"
def check_name(component: str, name: str, allow_leading_tilde: bool = False):
if allow_leading_tilde and component.startswith("~"):
component = component[1:]
if not component:
# "~" is a valid component name (e.g. "foo/~/bar" is a valid name).
return
if not valid_identifier(component):
raise ValueError(f"'{name}' is not a valid module name (must be a "
"valid Python identifier)")
class Module(metaclass=ModuleMetaclass):
"""Base class for Haiku modules.
A Haiku module is a lightweight container for variables and other modules.
Modules typically define one or more "forward" methods (e.g. ``__call__``)
which apply operations combining user input and module parameters.
Modules must be initialized inside a :func:`transform` call.
For example:
>>> class AddModule(hk.Module):
... def __call__(self, x):
... w = hk.get_parameter("w", [], init=jnp.ones)
... return x + w
>>> def forward_fn(x):
... mod = AddModule()
... return mod(x)
>>> forward = hk.transform(forward_fn)
>>> x = 1.
>>> rng = None
>>> params = forward.init(rng, x)
>>> print(forward.apply(params, None, x))
2.0
"""
def __init__(self, name: Optional[str] = None):
"""Initializes the current module with the given name.
Subclasses should call this constructor before creating other modules or
variables such that those modules are named correctly.
Args:
name: An optional string name for the class. Must be a valid Python
identifier. If ``name`` is not provided then the class name for the
current instance is converted to ``lower_snake_case`` and used instead.
"""
if name is None:
if hasattr(self, "name") and self.name is not None:
# Attribute assigned by @dataclass constructor.
name = self.name
else:
name = utils.camel_to_snake(type(self).__name__)
if name.startswith(OVERRIDE_PREFIX):
name = name[len(OVERRIDE_PREFIX):]
for component in name.split("/"):
check_name(component, name, allow_leading_tilde=True)
reserve_module_name(name, check_unique=False)
else:
check_name(name, name)
name = unique_and_canonical_name(name)
self._submodules: set[str] = set()
self.module_name = name
self.name = self.module_name.split("/")[-1]
self._creation_frame_id = base.current_frame().frame_id
# Support @dataclass annotated modules.
__post_init__ = __init__
def params_dict(self) -> Mapping[str, jnp.ndarray]:
"""Returns parameters keyed by name for this module and submodules."""
if not base.frame_stack:
raise ValueError(
"`module.params_dict()` must be used as part of an `hk.transform`.")
return params_or_state_dict(self.module_name, self._submodules, "params")
def state_dict(self) -> Mapping[str, jnp.ndarray]:
"""Returns state keyed by name for this module and submodules."""
if not base.frame_stack:
raise ValueError(
"`module.state_dict()` must be used as part of an `hk.transform`.")
return params_or_state_dict(self.module_name, self._submodules, "state")
def params_or_state_dict(
module_name: str,
submodules: set[str],
which: str,
) -> Mapping[str, jnp.ndarray]:
"""Returns module parameters or state for the given module or submodules."""
assert which in ("params", "state")
out = {}
frame = base.current_frame()
for their_module_name, bundle in getattr(frame, which).items():
if (their_module_name == module_name
or their_module_name.startswith(module_name + "/")
or their_module_name in submodules):
for name, value in bundle.items():
fq_name = their_module_name + "/" + name
out[fq_name] = value.current if which == "state" else value
return out
def transparent(method: T) -> T:
"""Decorator to wrap a method, preventing automatic variable scope wrapping.
By default, all variables and modules created in a method are scoped by the
module and method names. This is undesirable in some cases. Any method
decorated with :func:`transparent` will create variables and modules in the
scope in which it was called.
Args:
method: the method to wrap.
Returns:
The method, with a flag indicating no name scope wrapping should occur.
"""
setattr(method, _APPLY_NAME_SCOPE, False)
return method
def name_like(method_name: str) -> Callable[[T], T]:
"""Allows a method to be named like some other method.
In Haiku submodules are named based on the name of their parent module and the
method in which they are created. When refactoring code it may be desirable to
maintain previous names in order to keep checkpoint compatibility, this can be
achieved using :func:`name_like`.
As an example, consider the following toy autoencoder:
>>> class Autoencoder(hk.Module):
... def __call__(self, x):
... z = hk.Linear(10, name="enc")(x) # name: autoencoder/enc
... y = hk.Linear(10, name="dec")(z) # name: autoencoder/dec
... return y
If we want to refactor this such that users can encode or decode, we would
create two methods (encode, decode) which would create and apply our modules.
In order to retain checkpoint compatibility with the original module we can
use :func:`name_like` to name those submodules as if they were created inside
``__call__``:
>>> class Autoencoder(hk.Module):
... @hk.name_like("__call__")
... def encode(self, x):
... return hk.Linear(10, name="enc")(x) # name: autoencoder/enc
...
... @hk.name_like("__call__")
... def decode(self, z):
... return hk.Linear(10, name="dec")(z) # name: autoencoder/dec
...
... def __call__(self, x):
... return self.decode(self.encode(x))
One sharp edge is if users rely on Haiku's numbering to take care of giving
unique names and refactor using :func:`name_like`. For example when
refactoring the following:
>>> class Autoencoder(hk.Module):
... def __call__(self, x):
... y = hk.Linear(10)(z) # name: autoencoder/linear_1
... z = hk.Linear(10)(x) # name: autoencoder/linear
... return y
To use :func:`name_like`, the unnamed linear modules in encode/decode will end
up with the same name (both: ``autoencoder/linear``) because module numbering
is only applied within a method:
>>> class Autoencoder(hk.Module):
... @hk.name_like("__call__")
... def encode(self, x):
... return hk.Linear(10)(x) # name: autoencoder/linear
...
... @hk.name_like("__call__")
... def decode(self, z):
... return hk.Linear(10)(z) # name: autoencoder/linear <-- NOT INTENDED
To fix this case you need to explicitly name the modules within the method
with their former name:
>>> class Autoencoder(hk.Module):
... @hk.name_like("__call__")
... def encode(self, x):
... return hk.Linear(10, name="linear")(x) # name: autoencoder/linear
...
... @hk.name_like("__call__")
... def decode(self, z):
... return hk.Linear(10, name="linear_1")(z) # name: autoencoder/linear_1
Args:
method_name: The name of a method whose name we should adopt. This method
does not actually have to be defined on the class.
Returns:
A decorator that when applied to a method marks it as having a different
name.
"""
def decorator(method: T) -> T:
setattr(method, _CUSTOM_NAME, method_name)
return method
return decorator
MethodHook = Callable[[Module, str], ContextManager[None]]
method_hook_stack: ThreadLocalStack[MethodHook] = ThreadLocalStack()
def hook_methods(method_hook: MethodHook) -> ContextManager[None]:
"""Context manager that registers a given module method_hook."""
return method_hook_stack(method_hook)
@contextlib.contextmanager
def _module_method_call(module: Module, method_name: str):
"""Context manager that wraps a method being called on a module."""
with contextlib.ExitStack() as stack:
for method_hook in method_hook_stack:
stack.enter_context(method_hook(module, method_name))
yield
class NameScopeModule(Module):
pass
|
dm-haiku-main
|
haiku/_src/module.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Haiku functions to dot."""
import collections
import contextlib
import functools
import html
from typing import Any, Callable, NamedTuple, Optional
from haiku._src import data_structures
from haiku._src import module
from haiku._src import utils
import jax
import jax.core
from jax.experimental import pjit
# Import tree if available, but only throw error at runtime.
# Permits us to drop dm-tree from deps.
try:
import tree # pylint: disable=g-import-not-at-top
except ImportError:
tree = None
graph_stack = data_structures.ThreadLocalStack['Graph']()
Node = collections.namedtuple('Node', 'id,title,outputs')
Edge = collections.namedtuple('Edge', 'a,b')
class Graph(NamedTuple):
"""Represents a graphviz digraph/subgraph.."""
title: str
nodes: list[Node]
edges: list[Edge]
subgraphs: list['Graph']
@classmethod
def create(cls, title: Optional[str] = None):
return Graph(title=title, nodes=[], edges=[], subgraphs=[])
def evolve(self, **kwargs) -> 'Graph':
return Graph(**{**self._asdict(), **kwargs})
def to_dot(fun: Callable[..., Any]) -> Callable[..., str]:
"""Converts a function using Haiku modules to a dot graph.
To view the resulting graph in Google Colab or an iPython notebook use the
``graphviz`` package:
.. code-block::
dot = hk.to_dot(f)(x)
import graphviz
graphviz.Source(dot)
Args:
fun: A function using Haiku modules.
Returns:
A function that returns the source code string to a graphviz graph
describing the operations executed by the given function clustered by Haiku
module.
See Also:
:func:`abstract_to_dot`: Generates a graphviz graph using abstract inputs.
"""
graph_fun = to_graph(fun)
@functools.wraps(fun)
def wrapped_fun(*args) -> str:
return _graph_to_dot(*graph_fun(*args))
return wrapped_fun
def abstract_to_dot(fun: Callable[..., Any]) -> Callable[..., str]:
"""Converts a function using Haiku modules to a dot graph.
Same as :func:`to_dot` but uses JAX's abstract interpretation
machinery to evaluate the function without requiring concrete inputs.
Valid inputs for the wrapped function include
:class:`jax.ShapeDtypeStruct`.
:func:`abstract_to_dot` does not support data-dependent
control-flow, because no concrete values are provided to the function.
Args:
fun: A function using Haiku modules.
Returns:
A function that returns the source code string to a graphviz graph
describing the operations executed by the given function clustered by Haiku
module.
See Also:
:func:`to_dot`: Generates a graphviz graph using concrete inputs.
"""
@functools.wraps(fun)
def wrapped_fun(*args) -> str:
dot_out = ''
# eval_shape cannot evaluate functions which return str, as str is not a
# valid JAX types.
# The following function extracts the created dot string during the
# abstract evaluation.
def dot_extractor_fn(*inner_args):
nonlocal dot_out
dot_out = to_dot(fun)(*inner_args)
jax.eval_shape(dot_extractor_fn, *args)
assert dot_out, 'Failed to extract dot graph from abstract evaluation'
return dot_out
return wrapped_fun
def name_or_str(o):
return getattr(o, '__name__', str(o))
def to_graph(fun):
"""Converts a Haiku function into an graph IR (extracted for testing)."""
@functools.wraps(fun)
def wrapped_fun(*args):
"""See `fun`."""
f = jax.linear_util.wrap_init(fun)
args_flat, in_tree = jax.tree_util.tree_flatten((args, {}))
flat_fun, out_tree = jax.api_util.flatten_fun(f, in_tree)
graph = Graph.create(title=name_or_str(fun))
@contextlib.contextmanager
def method_hook(mod: module.Module, method_name: str):
subg = Graph.create()
with graph_stack(subg):
yield
title = mod.module_name
if method_name != '__call__':
title += f' ({method_name})'
graph_stack.peek().subgraphs.append(subg.evolve(title=title))
with graph_stack(graph), \
module.hook_methods(method_hook), \
jax.core.new_main(DotTrace) as main:
out_flat = _interpret_subtrace(flat_fun, main).call_wrapped(*args_flat)
out = jax.tree_util.tree_unflatten(out_tree(), out_flat)
return graph, args, out
return wrapped_fun
@jax.linear_util.transformation
def _interpret_subtrace(main, *in_vals):
trace = DotTrace(main, jax.core.cur_sublevel())
in_tracers = [DotTracer(trace, val) for val in in_vals]
outs = yield in_tracers, {}
out_tracers = map(trace.full_raise, outs)
out_vals = [t.val for t in out_tracers]
yield out_vals
class DotTracer(jax.core.Tracer):
"""JAX tracer used in DotTrace."""
def __init__(self, trace, val):
super().__init__(trace)
self.val = val
@property
def aval(self):
return jax.core.get_aval(self.val)
def full_lower(self):
return self
class DotTrace(jax.core.Trace):
"""Traces a JAX function to dot."""
def pure(self, val):
return DotTracer(self, val)
def lift(self, val):
return DotTracer(self, val)
def sublift(self, val):
return DotTracer(self, val.val)
def process_primitive(self, primitive, tracers, params):
val_out = primitive.bind(*[t.val for t in tracers], **params)
if primitive is pjit.pjit_p:
f = jax.core.jaxpr_as_fun(params['jaxpr'])
f.__name__ = params['name']
fun = jax.linear_util.wrap_init(f)
return self.process_call(primitive, fun, tracers, params)
inputs = [t.val for t in tracers]
outputs = list(jax.tree_util.tree_leaves(val_out))
graph = graph_stack.peek()
node = Node(id=outputs[0], title=str(primitive), outputs=outputs)
graph.nodes.append(node)
graph.edges.extend([(i, outputs[0]) for i in inputs])
return jax.tree_util.tree_map(lambda v: DotTracer(self, v), val_out)
def process_call(self, call_primitive, f, tracers, params):
assert call_primitive.multiple_results
if (call_primitive in (pjit.pjit_p,) and
params.get('inline', False)):
f = _interpret_subtrace(f, self.main)
vals_out = f.call_wrapped(*[t.val for t in tracers])
return [DotTracer(self, v) for v in vals_out]
graph = Graph.create(title=f'{call_primitive} ({name_or_str(f.f)})')
graph_stack.peek().subgraphs.append(graph)
with graph_stack(graph):
f = _interpret_subtrace(f, self.main)
vals_out = f.call_wrapped(*[t.val for t in tracers])
return [DotTracer(self, v) for v in vals_out]
process_map = process_call
def process_custom_jvp_call(self, primitive, fun, jvp, tracers, *,
symbolic_zeros):
# Drop the custom differentiation rule.
del primitive, jvp, symbolic_zeros # Unused.
return fun.call_wrapped(*tracers)
def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers,
out_trees, symbolic_zeros):
# Drop the custom differentiation rule.
del primitive, fwd, bwd, out_trees, symbolic_zeros # Unused.
return fun.call_wrapped(*tracers)
def _format_val(val):
if not hasattr(val, 'shape'):
return repr(val)
shape = ','.join(map(str, val.shape))
dtype = utils.simple_dtype(val.dtype)
return f'{dtype}[{shape}]'
def escape(value):
return html.escape(str(value))
# Determine maximum nesting depth to appropriately scale subgraph labels.
def _max_depth(g: Graph) -> int:
if g.subgraphs:
return 1 + max(0, *[_max_depth(s) for s in g.subgraphs])
else:
return 1
def _scaled_font_size(depth: int) -> int:
return int(1.4**depth * 14)
def _graph_to_dot(graph: Graph, args, outputs) -> str:
"""Converts from an internal graph IR to 'dot' format."""
if tree is None:
raise ImportError('hk.to_dot requires dm-tree>=0.1.1.')
def format_path(path):
if isinstance(outputs, tuple):
out = f'output[{path[0]}]'
if len(path) > 1:
out += ': ' + '/'.join(map(str, path[1:]))
else:
out = 'output'
if path:
out += ': ' + '/'.join(map(str, path))
return out
lines = []
used_argids = set()
argid_usecount = collections.Counter()
op_outids = set()
captures = []
argids = {id(v) for v in jax.tree_util.tree_leaves(args)}
outids = {id(v) for v in jax.tree_util.tree_leaves(outputs)}
outname = {id(v): format_path(p) for p, v in tree.flatten_with_path(outputs)}
def render_graph(g: Graph, parent: Optional[Graph] = None, depth: int = 0):
"""Renders a given graph by appending 'dot' format lines."""
if parent:
lines.extend([
f'subgraph cluster_{id(g)} {{',
' style="rounded,filled";',
' fillcolor="#F0F5F5";',
' color="#14234B;";',
' pad=0.1;',
f' fontsize={_scaled_font_size(depth)};',
f' label = <<b>{escape(g.title)}</b>>;',
' labelloc = t;',
])
for node in g.nodes:
label = f'<b>{escape(node.title)}</b>'
for o in node.outputs:
label += '<br/>' + _format_val(o)
op_outids.add(id(o))
node_id = id(node.id)
if node_id in outids:
label = f'<b>{escape(outname[node_id])}</b><br/>' + label
color = '#0053D6'
fillcolor = '#AABFFF'
style = 'filled,bold'
else:
color = '#FFDB13'
fillcolor = '#FFF26E'
style = 'filled'
lines.append(f'{node_id} [label=<{label}>, '
f' id="node{node_id}",'
' shape=rect,'
f' style="{style}",'
' tooltip=" ",'
' fontcolor="black",'
f' color="{color}",'
f' fillcolor="{fillcolor}"];')
for s in g.subgraphs:
render_graph(s, parent=g, depth=depth - 1)
if parent:
lines.append(f'}} // subgraph cluster_{id(g)}')
for a, b in g.edges:
if id(a) not in argids and id(a) not in op_outids:
captures.append(a)
a, b = map(id, (a, b))
if a in argids:
i = argid_usecount[a]
argid_usecount[a] += 1
lines.append(f'{a}{i} -> {b};')
else:
lines.append(f'{a} -> {b};')
used_argids.add(a)
graph_depth = _max_depth(graph)
render_graph(graph, parent=None, depth=graph_depth)
# Process inputs and label them in the graph.
for path, value in tree.flatten_with_path(args):
if value is None:
continue
node_id = id(value)
if node_id not in used_argids:
continue
for i in range(argid_usecount[node_id]):
label = f'<b>args[{escape(path[0])}]'
if len(path) > 1:
label += ': ' + '/'.join(map(str, path[1:]))
label += '</b>'
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
label += f'<br/>{escape(_format_val(value))}'
fillcolor = '#FFDEAF'
fontcolor = 'black'
if i > 0:
label = '<b>(reuse)</b><br/>' + label
fillcolor = '#FFEACC'
fontcolor = '#565858'
lines.append(f'{node_id}{i} [label=<{label}>'
f' id="node{node_id}{i}",'
' shape=rect,'
' style="filled",'
f' fontcolor="{fontcolor}",'
' color="#FF8A4F",'
f' fillcolor="{fillcolor}"];')
for value in captures:
node_id = id(value)
if (not hasattr(value, 'aval') and
hasattr(value, 'size') and
value.size == 1):
label = f'<b>{value.item()}</b>'
else:
label = f'<b>{escape(_format_val(value))}</b>'
lines.append(f'{node_id} [label=<{label}>'
' shape=rect,'
' style="filled",'
' fontcolor="black",'
' color="#A261FF",'
' fillcolor="#E6D6FF"];')
head = [
'digraph G {',
'rankdir = TD;',
'compound = true;',
f'label = <<b>{escape(graph.title)}</b>>;',
f'fontsize={_scaled_font_size(graph_depth)};',
'labelloc = t;',
'stylesheet = <',
' data:text/css,',
' @import url(https://fonts.googleapis.com/css?family=Roboto:400,700);',
' svg text {',
' font-family: \'Roboto\';',
' }',
' .node text {',
' font-size: 12px;',
' }',
]
for node_id, use_count in argid_usecount.items():
if use_count == 1:
continue
# Add hover animation for reused args.
for a in range(use_count):
for b in range(use_count):
if a == b:
head.append(f'%23node{node_id}{a}:hover '
'{ stroke-width: 0.2em; }')
else:
head.append(
f'%23node{node_id}{a}:hover ~ %23node{node_id}{b} '
'{ stroke-width: 0.2em; }')
head.append('>')
lines.append('} // digraph G')
return '\n'.join(head + lines) + '\n'
|
dm-haiku-main
|
haiku/_src/dot.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional Haiku modules."""
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import pad
from haiku._src import utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
initializers = initializers
pad = pad
get_parameter = base.get_parameter
Module = module.Module
get_channel_index = utils.get_channel_index
# pylint: enable=invalid-name
del base, module, initializers, pad
def to_dimension_numbers(
num_spatial_dims: int,
channels_last: bool,
transpose: bool,
) -> lax.ConvDimensionNumbers:
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if channels_last:
spatial_dims = tuple(range(1, num_dims - 1))
image_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
image_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(lhs_spec=image_dn, rhs_spec=kernel_dn,
out_spec=image_dn)
class ConvND(hk.Module):
"""General N-dimensional convolutional."""
def __init__(
self,
num_spatial_dims: int,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[
str, Sequence[tuple[int, int]], hk.pad.PadFn, Sequence[hk.pad.PadFn]
] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "channels_last",
mask: Optional[jax.Array] = None,
feature_group_count: int = 1,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
num_spatial_dims: The number of spatial dimensions of the input.
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length ``num_spatial_dims``.
stride: Optional stride for the kernel. Either an integer or a sequence of
length ``num_spatial_dims``. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length ``num_spatial_dims``. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME`` or a
sequence of n ``(low, high)`` integer pairs that give the padding to
apply before and after each spatial dimension. or a callable or sequence
of callables of size ``num_spatial_dims``. Any callables must take a
single integer argument equal to the effective kernel size and return a
sequence of two integers representing the padding before and after. See
``haiku.pad.*`` for more details and example functions. Defaults to
``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``. See :func:`get_channel_index`.
mask: Optional mask of the weights.
feature_group_count: Optional number of groups in group convolution.
Default value of 1 corresponds to normal dense convolution. If a higher
value is used, convolutions are applied separately to that many groups,
then stacked together. This reduces the number of parameters
and possibly the compute for a given ``output_channels``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
name: The name of the module.
"""
super().__init__(name=name)
if num_spatial_dims <= 0:
raise ValueError(
"We only support convolution operations for `num_spatial_dims` "
f"greater than 0, received num_spatial_dims={num_spatial_dims}.")
self.num_spatial_dims = num_spatial_dims
self.output_channels = output_channels
self.kernel_shape = (
utils.replicate(kernel_shape, num_spatial_dims, "kernel_shape"))
self.with_bias = with_bias
self.stride = utils.replicate(stride, num_spatial_dims, "strides")
self.w_init = w_init
self.b_init = b_init or jnp.zeros
self.mask = mask
self.feature_group_count = feature_group_count
self.lhs_dilation = utils.replicate(1, num_spatial_dims, "lhs_dilation")
self.kernel_dilation = (
utils.replicate(rate, num_spatial_dims, "kernel_dilation"))
self.data_format = data_format
self.channel_index = hk.get_channel_index(data_format)
self.dimension_numbers = to_dimension_numbers(
num_spatial_dims, channels_last=(self.channel_index == -1),
transpose=False)
if isinstance(padding, str):
self.padding = padding.upper()
elif hk.pad.is_padfn(padding):
self.padding = hk.pad.create_from_padfn(padding=padding,
kernel=self.kernel_shape,
rate=self.kernel_dilation,
n=self.num_spatial_dims)
else:
self.padding = hk.pad.create_from_tuple(padding, self.num_spatial_dims)
def __call__(
self,
inputs: jax.Array,
*,
precision: Optional[lax.Precision] = None,
) -> jax.Array:
"""Connects ``ConvND`` layer.
Args:
inputs: An array of shape ``[spatial_dims, C]`` and rank-N+1 if unbatched,
or an array of shape ``[N, spatial_dims, C]`` and rank-N+2 if batched.
precision: Optional :class:`jax.lax.Precision` to pass to
:func:`jax.lax.conv_general_dilated`.
Returns:
An array of shape ``[spatial_dims, output_channels]`` and rank-N+1 if
unbatched, or an array of shape ``[N, spatial_dims, output_channels]``
and rank-N+2 if batched.
"""
unbatched_rank = self.num_spatial_dims + 1
allowed_ranks = [unbatched_rank, unbatched_rank + 1]
if inputs.ndim not in allowed_ranks:
raise ValueError(f"Input to ConvND needs to have rank in {allowed_ranks},"
f" but input has shape {inputs.shape}.")
unbatched = inputs.ndim == unbatched_rank
if unbatched:
inputs = jnp.expand_dims(inputs, axis=0)
if inputs.shape[self.channel_index] % self.feature_group_count != 0:
raise ValueError(f"Inputs channels {inputs.shape[self.channel_index]} "
f"should be a multiple of feature_group_count "
f"{self.feature_group_count}")
w_shape = self.kernel_shape + (
inputs.shape[self.channel_index] // self.feature_group_count,
self.output_channels)
if self.mask is not None and self.mask.shape != w_shape:
raise ValueError("Mask needs to have the same shape as weights. "
f"Shapes are: {self.mask.shape}, {w_shape}")
w_init = self.w_init
if w_init is None:
fan_in_shape = np.prod(w_shape[:-1])
stddev = 1. / np.sqrt(fan_in_shape)
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter("w", w_shape, inputs.dtype, init=w_init)
if self.mask is not None:
w *= self.mask
out = lax.conv_general_dilated(inputs,
w,
window_strides=self.stride,
padding=self.padding,
lhs_dilation=self.lhs_dilation,
rhs_dilation=self.kernel_dilation,
dimension_numbers=self.dimension_numbers,
feature_group_count=self.feature_group_count,
precision=precision)
if self.with_bias:
if self.channel_index == -1:
bias_shape = (self.output_channels,)
else:
bias_shape = (self.output_channels,) + (1,) * self.num_spatial_dims
b = hk.get_parameter("b", bias_shape, inputs.dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
if unbatched:
out = jnp.squeeze(out, axis=0)
return out
class Conv1D(ConvND):
"""One dimensional convolution."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[
str, Sequence[tuple[int, int]], hk.pad.PadFn, Sequence[hk.pad.PadFn]
] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NWC",
mask: Optional[jax.Array] = None,
feature_group_count: int = 1,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 1.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 1. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 1. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME`` or
a callable or sequence of callables of length 1. Any callables must
take a single integer argument equal to the effective kernel size and
return a list of two integers representing the padding before and after.
See haiku.pad.* for more details and example functions.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NWC`` or ``NCW``. By
default, ``NWC``.
mask: Optional mask of the weights.
feature_group_count: Optional number of groups in group convolution.
Default value of 1 corresponds to normal dense convolution. If a higher
value is used, convolutions are applied separately to that many groups,
then stacked together. This reduces the number of parameters
and possibly the compute for a given ``output_channels``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=1,
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
feature_group_count=feature_group_count,
name=name)
class Conv2D(ConvND):
"""Two dimensional convolution."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[
str, Sequence[tuple[int, int]], hk.pad.PadFn, Sequence[hk.pad.PadFn]
] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NHWC",
mask: Optional[jax.Array] = None,
feature_group_count: int = 1,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 2.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 2. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 2. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME`` or
a callable or sequence of callables of length 2. Any callables must
take a single integer argument equal to the effective kernel size and
return a list of two integers representing the padding before and after.
See haiku.pad.* for more details and example functions.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NHWC`` or ``NCHW``. By
default, ``NHWC``.
mask: Optional mask of the weights.
feature_group_count: Optional number of groups in group convolution.
Default value of 1 corresponds to normal dense convolution. If a higher
value is used, convolutions are applied separately to that many groups,
then stacked together. This reduces the number of parameters
and possibly the compute for a given ``output_channels``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=2,
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
feature_group_count=feature_group_count,
name=name)
class Conv3D(ConvND):
"""Three dimensional convolution."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[
str, Sequence[tuple[int, int]], hk.pad.PadFn, Sequence[hk.pad.PadFn]
] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NDHWC",
mask: Optional[jax.Array] = None,
feature_group_count: int = 1,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 3.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 3. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 3. 1 corresponds to standard ND convolution,
`rate > 1` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME`` or
a callable or sequence of callables of length 3. Any callables must
take a single integer argument equal to the effective kernel size and
return a list of two integers representing the padding before and after.
See haiku.pad.* for more details and example functions.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NDHWC`` or ``NCDHW``.
By default, ``NDHWC``.
mask: Optional mask of the weights.
feature_group_count: Optional number of groups in group convolution.
Default value of 1 corresponds to normal dense convolution. If a higher
value is used, convolutions are applied separately to that many groups,
then stacked together. This reduces the number of parameters
and possibly the compute for a given ``output_channels``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=3,
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
feature_group_count=feature_group_count,
name=name)
def compute_adjusted_padding(
input_size: int,
output_size: int,
kernel_size: int,
stride: int,
padding: str,
dilation: int = 1,
) -> tuple[int, int]:
"""Computes adjusted padding for desired ConvTranspose `output_size`."""
kernel_size = (kernel_size - 1) * dilation + 1
if padding == "VALID":
expected_input_size = (output_size - kernel_size + stride) // stride
if input_size != expected_input_size:
raise ValueError(f"The expected input size with the current set of input "
f"parameters is {expected_input_size} which doesn't "
f"match the actual input size {input_size}.")
padding_before = 0
elif padding == "SAME":
expected_input_size = (output_size + stride - 1) // stride
if input_size != expected_input_size:
raise ValueError(f"The expected input size with the current set of input "
f"parameters is {expected_input_size} which doesn't "
f"match the actual input size {input_size}.")
padding_needed = max(0,
(input_size - 1) * stride + kernel_size - output_size)
padding_before = padding_needed // 2
else:
raise ValueError(f"`padding` must be 'VALID' or 'SAME'. Passed: {padding}.")
expanded_input_size = (input_size - 1) * stride + 1
padded_out_size = output_size + kernel_size - 1
pad_before = kernel_size - 1 - padding_before
pad_after = padded_out_size - expanded_input_size - pad_before
return (pad_before, pad_after)
class ConvNDTranspose(hk.Module):
"""General n-dimensional transposed convolution (aka. deconvolution)."""
def __init__(
self,
num_spatial_dims: int,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
output_shape: Optional[Union[int, Sequence[int]]] = None,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "channels_last",
mask: Optional[jax.Array] = None,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
num_spatial_dims: The number of spatial dimensions of the input.
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length ``num_spatial_dims``.
stride: Optional stride for the kernel. Either an integer or a sequence of
length ``num_spatial_dims``. Defaults to 1.
output_shape: Output shape of the spatial dimensions of a transpose
convolution. Can be either an integer or an iterable of integers. If a
`None` value is given, a default shape is automatically calculated.
padding: Optional padding algorithm. Either "VALID" or "SAME".
Defaults to "SAME". See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``.
mask: Optional mask of the weights.
name: The name of the module.
"""
super().__init__(name=name)
if num_spatial_dims <= 0:
raise ValueError(
"We only support convolution operations for `num_spatial_dims` "
f"greater than 0, received num_spatial_dims={num_spatial_dims}.")
self.num_spatial_dims = num_spatial_dims
self.output_channels = output_channels
self.kernel_shape = (
utils.replicate(kernel_shape, num_spatial_dims, "kernel_shape"))
self.output_shape = output_shape
if self.output_shape is not None:
self.output_shape = (
utils.replicate(output_shape, num_spatial_dims, "output_shape"))
if not isinstance(padding, str):
raise ValueError("When specifying `output_shape`, ensure that paddding "
"is 'VALID' or 'SAME'.")
self.with_bias = with_bias
self.stride = utils.replicate(stride, num_spatial_dims, "strides")
self.w_init = w_init
self.b_init = b_init or jnp.zeros
self.mask = mask
# TODO(tomhennigan) Make use of hk.pad.create_from_tuple here?
self.padding = padding
self.data_format = data_format
self.channel_index = hk.get_channel_index(data_format)
self.dimension_numbers = to_dimension_numbers(
num_spatial_dims, channels_last=(self.channel_index == -1),
transpose=True)
def __call__(
self,
inputs: jax.Array,
*,
precision: Optional[lax.Precision] = None,
) -> jax.Array:
"""Computes the transposed convolution of the input.
Args:
inputs: An array of shape ``[spatial_dims, C]`` and rank-N+1 if unbatched,
or an array of shape ``[N, spatial_dims, C]`` and rank-N+2 if batched.
precision: Optional :class:`jax.lax.Precision` to pass to
:func:`jax.lax.conv_transpose`.
Returns:
An array of shape ``[spatial_dims, output_channels]`` and rank-N+1 if
unbatched, or an array of shape ``[N, spatial_dims, output_channels]``
and rank-N+2 if batched.
"""
unbatched_rank = self.num_spatial_dims + 1
allowed_ranks = [unbatched_rank, unbatched_rank + 1]
if inputs.ndim not in allowed_ranks:
raise ValueError(f"Input to ConvNDTranspose needs to have rank in "
f"{allowed_ranks}, but input has shape {inputs.shape}.")
unbatched = inputs.ndim == unbatched_rank
if unbatched:
inputs = jnp.expand_dims(inputs, axis=0)
input_channels = inputs.shape[self.channel_index]
w_shape = self.kernel_shape + (self.output_channels, input_channels)
if self.mask is not None and self.mask.shape != w_shape:
raise ValueError("Mask needs to have the same shape as weights. "
f"Shapes are: {self.mask.shape}, {w_shape}")
w_init = self.w_init
if w_init is None:
fan_in_shape = self.kernel_shape + (input_channels,)
stddev = 1. / np.sqrt(np.prod(fan_in_shape))
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter("w", w_shape, inputs.dtype, init=w_init)
if self.mask is not None:
w = w * self.mask
padding = self.padding
if self.output_shape is not None:
input_shape = (
inputs.shape[2:] if self.channel_index == 1 else inputs.shape[1:-1])
padding = tuple(map(
lambda i, o, k, s: compute_adjusted_padding(i, o, k, s, self.padding),
input_shape, self.output_shape, self.kernel_shape, self.stride))
out = lax.conv_transpose(inputs,
w,
strides=self.stride,
padding=padding,
dimension_numbers=self.dimension_numbers,
precision=precision)
if self.with_bias:
if self.channel_index == -1:
bias_shape = (self.output_channels,)
else:
bias_shape = (self.output_channels,) + (1,) * self.num_spatial_dims
b = hk.get_parameter("b", bias_shape, inputs.dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
if unbatched:
out = jnp.squeeze(out, axis=0)
return out
class Conv1DTranspose(ConvNDTranspose):
"""One dimensional transposed convolution (aka. deconvolution)."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
output_shape: Optional[Union[int, Sequence[int]]] = None,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NWC",
mask: Optional[jax.Array] = None,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 1.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 1. Defaults to 1.
output_shape: Output shape of the spatial dimensions of a transpose
convolution. Can be either an integer or an iterable of integers. If a
`None` value is given, a default shape is automatically calculated.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME``.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NWC`` or ``NCW``. By
default, ``NWC``.
mask: Optional mask of the weights.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=1,
output_channels=output_channels,
kernel_shape=kernel_shape,
output_shape=output_shape,
stride=stride,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
name=name)
class Conv2DTranspose(ConvNDTranspose):
"""Two dimensional transposed convolution (aka. deconvolution)."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
output_shape: Optional[Union[int, Sequence[int]]] = None,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NHWC",
mask: Optional[jax.Array] = None,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 2.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 2. Defaults to 1.
output_shape: Output shape of the spatial dimensions of a transpose
convolution. Can be either an integer or an iterable of integers. If a
`None` value is given, a default shape is automatically calculated.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME``.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NHWC`` or ``NCHW``. By
default, ``NHWC``.
mask: Optional mask of the weights.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=2,
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
output_shape=output_shape,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
name=name)
class Conv3DTranspose(ConvNDTranspose):
"""Three dimensional transposed convolution (aka. deconvolution)."""
def __init__(
self,
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
output_shape: Optional[Union[int, Sequence[int]]] = None,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NDHWC",
mask: Optional[jax.Array] = None,
name: Optional[str] = None,
):
"""Initializes the module.
Args:
output_channels: Number of output channels.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 3.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 3. Defaults to 1.
output_shape: Output shape of the spatial dimensions of a transpose
convolution. Can be either an integer or an iterable of integers. If a
`None` value is given, a default shape is automatically calculated.
padding: Optional padding algorithm. Either ``VALID`` or ``SAME``.
Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Either ``NDHWC`` or ``NCDHW``.
By default, ``NDHWC``.
mask: Optional mask of the weights.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=3,
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
output_shape=output_shape,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format,
mask=mask,
name=name)
|
dm-haiku-main
|
haiku/_src/conv.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku types."""
import abc
from collections.abc import Mapping, MutableMapping, Sequence
import typing
from typing import Any, Callable, Protocol, runtime_checkable
import jax
# pytype: disable=module-attr
try:
# Using PyType's experimental support for forward references.
Module = typing._ForwardRef("haiku.Module") # pylint: disable=protected-access
except AttributeError:
Module = Any
# pytype: enable=module-attr
Initializer = Callable[[Sequence[int], Any], jax.Array]
Params = Mapping[str, Mapping[str, jax.Array]]
MutableParams = MutableMapping[str, MutableMapping[str, jax.Array]]
State = Mapping[str, Mapping[str, jax.Array]]
MutableState = MutableMapping[str, MutableMapping[str, jax.Array]]
# Missing JAX types.
PRNGKey = jax.Array # pylint: disable=invalid-name
class LiftingModuleType:
"""Parent type of lift.LiftingModule, added here to solve circular dependency."""
class StrictProtocol(Protocol):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if Protocol not in cls.__bases__:
base_names = ", ".join(b.__name__ for b in cls.__bases__)
raise TypeError(
f"{cls.__name__} is a Protocol and should not be subclassed by "
"a non-Protocol type. If you intended your subclass to be a "
"protocol then you need to explicitly additionally extend "
f"Protocol: `class {cls.__name__}({base_names}, Protocol)`.")
@runtime_checkable
class ModuleProtocol(StrictProtocol, Protocol):
"""Protocol for Module like types."""
name: str
module_name: str
@abc.abstractmethod
def params_dict(self) -> Mapping[str, jax.Array]:
raise NotImplementedError
@abc.abstractmethod
def state_dict(self) -> Mapping[str, jax.Array]:
raise NotImplementedError
@runtime_checkable
class SupportsCall(ModuleProtocol, Protocol):
"""Protocol for Module like types that are Callable.
Being a protocol means you don't need to explicitly extend this type in order
to support instance checks with it. For example, :class:`Linear` only extends
:class:`Module`, however since it conforms (e.g. implements ``__call__``) to
this protocol you can instance check using it::
>>> assert isinstance(hk.Linear(1), hk.SupportsCall)
"""
@abc.abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
|
dm-haiku-main
|
haiku/_src/typing.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling Haiku modules."""
from collections.abc import Sequence
from typing import Optional, Union
import warnings
from haiku._src import module
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
Module = module.Module
# pylint: enable=invalid-name
del module
def _infer_shape(
x: jax.Array,
size: Union[int, Sequence[int]],
channel_axis: Optional[int] = -1,
) -> tuple[int, ...]:
"""Infer shape for pooling window or strides."""
if isinstance(size, int):
if channel_axis and not 0 <= abs(channel_axis) < x.ndim:
raise ValueError(f"Invalid channel axis {channel_axis} for {x.shape}")
if channel_axis and channel_axis < 0:
channel_axis = x.ndim + channel_axis
return (1,) + tuple(size if d != channel_axis else 1
for d in range(1, x.ndim))
elif len(size) < x.ndim:
# Assume additional dimensions are batch dimensions.
return (1,) * (x.ndim - len(size)) + tuple(size)
else:
assert x.ndim == len(size)
return tuple(size)
_VMAP_SHAPE_INFERENCE_WARNING = (
"When running under vmap, passing an `int` (except for `1`) for "
"`window_shape` or `strides` will result in the wrong shape being inferred "
"because the batch dimension is not visible to Haiku. Please update your "
"code to specify a full unbatched size.\n"
"For example if you had `pool(x, window_shape=3, strides=1)` before, you "
"should now pass `pool(x, window_shape=(3, 3, 1), strides=1)`. \n"
"Haiku will assume that any additional dimensions in your input are "
"batch dimensions, and will pad `window_shape` and `strides` accordingly "
"making your module support both batched and per-example inputs."
)
def _warn_if_unsafe(window_shape, strides):
unsafe = lambda size: isinstance(size, int) and size != 1
if unsafe(window_shape) or unsafe(strides):
warnings.warn(_VMAP_SHAPE_INFERENCE_WARNING, DeprecationWarning)
def max_pool(
value: jax.Array,
window_shape: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: str,
channel_axis: Optional[int] = -1,
) -> jax.Array:
"""Max pool.
Args:
value: Value to pool.
window_shape: Shape of the pooling window, same rank as value.
strides: Strides of the pooling window, same rank as value.
padding: Padding algorithm. Either ``VALID`` or ``SAME``.
channel_axis: Axis of the spatial channels for which pooling is skipped.
Returns:
Pooled result. Same rank as value.
"""
if padding not in ("SAME", "VALID"):
raise ValueError(f"Invalid padding '{padding}', must be 'SAME' or 'VALID'.")
_warn_if_unsafe(window_shape, strides)
window_shape = _infer_shape(value, window_shape, channel_axis)
strides = _infer_shape(value, strides, channel_axis)
return lax.reduce_window(value, -jnp.inf, lax.max, window_shape, strides,
padding)
def avg_pool(
value: jax.Array,
window_shape: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: str,
channel_axis: Optional[int] = -1,
) -> jax.Array:
"""Average pool.
Args:
value: Value to pool.
window_shape: Shape of the pooling window, same rank as value.
strides: Strides of the pooling window, same rank as value.
padding: Padding algorithm. Either ``VALID`` or ``SAME``.
channel_axis: Axis of the spatial channels for which pooling is skipped.
Returns:
Pooled result. Same rank as value.
Raises:
ValueError: If the padding is not valid.
"""
if padding not in ("SAME", "VALID"):
raise ValueError(f"Invalid padding '{padding}', must be 'SAME' or 'VALID'.")
_warn_if_unsafe(window_shape, strides)
window_shape = _infer_shape(value, window_shape, channel_axis)
strides = _infer_shape(value, strides, channel_axis)
reduce_window_args = (0., lax.add, window_shape, strides, padding)
pooled = lax.reduce_window(value, *reduce_window_args)
if padding == "VALID":
# Avoid the extra reduce_window.
return pooled / np.prod(window_shape)
else:
# Count the number of valid entries at each input point, then use that for
# computing average. Assumes that any two arrays of same shape will be
# padded the same. Avoid broadcasting on axis where pooling is skipped.
shape = [(v if w != 1 else 1) for (v, w) in zip(value.shape, window_shape)]
window_counts = lax.reduce_window(
jnp.ones(shape, value.dtype), *reduce_window_args)
return pooled / window_counts
class MaxPool(hk.Module):
"""Max pool.
Equivalent to partial application of :func:`max_pool`.
"""
def __init__(
self,
window_shape: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: str,
channel_axis: Optional[int] = -1,
name: Optional[str] = None,
):
"""Max pool.
Args:
window_shape: Shape of the pooling window, same rank as value.
strides: Strides of the pooling window, same rank as value.
padding: Padding algorithm. Either ``VALID`` or ``SAME``.
channel_axis: Axis of the spatial channels for which pooling is skipped.
name: String name for the module.
"""
super().__init__(name=name)
self.window_shape = window_shape
self.strides = strides
self.padding = padding
self.channel_axis = channel_axis
def __call__(self, value: jax.Array) -> jax.Array:
return max_pool(value, self.window_shape, self.strides,
self.padding, self.channel_axis)
class AvgPool(hk.Module):
"""Average pool.
Equivalent to partial application of :func:`avg_pool`.
"""
def __init__(
self,
window_shape: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: str,
channel_axis: Optional[int] = -1,
name: Optional[str] = None,
):
"""Average pool.
Args:
window_shape: Shape of the pooling window, same rank as value.
strides: Strides of the pooling window, same rank as value.
padding: Padding algorithm. Either ``VALID`` or ``SAME``.
channel_axis: Axis of the spatial channels for which pooling is skipped.
name: String name for the module.
"""
super().__init__(name=name)
self.window_shape = window_shape
self.strides = strides
self.padding = padding
self.channel_axis = channel_axis
def __call__(self, value: jax.Array) -> jax.Array:
return avg_pool(value, self.window_shape, self.strides,
self.padding, self.channel_axis)
|
dm-haiku-main
|
haiku/_src/pool.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch Norm."""
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import moving_averages
from haiku._src import utils
import jax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
ExponentialMovingAverage = moving_averages.ExponentialMovingAverage
get_channel_index = utils.get_channel_index
# pylint: enable=invalid-name
del base, initializers, module, moving_averages, utils
class BatchNorm(hk.Module):
"""Normalizes inputs to maintain a mean of ~0 and stddev of ~1.
See: https://arxiv.org/abs/1502.03167.
There are many different variations for how users want to manage scale and
offset if they require them at all. These are:
- No scale/offset in which case ``create_*`` should be set to ``False`` and
``scale``/``offset`` aren't passed when the module is called.
- Trainable scale/offset in which case ``create_*`` should be set to
``True`` and again ``scale``/``offset`` aren't passed when the module is
called. In this case this module creates and owns the ``scale``/``offset``
variables.
- Externally generated ``scale``/``offset``, such as for conditional
normalization, in which case ``create_*`` should be set to ``False`` and
then the values fed in at call time.
NOTE: ``jax.vmap(hk.transform(BatchNorm))`` will update summary statistics and
normalize values on a per-batch basis; we currently do *not* support
normalizing across a batch axis introduced by vmap.
"""
def __init__(
self,
create_scale: bool,
create_offset: bool,
decay_rate: float,
eps: float = 1e-5,
scale_init: Optional[hk.initializers.Initializer] = None,
offset_init: Optional[hk.initializers.Initializer] = None,
axis: Optional[Sequence[int]] = None,
cross_replica_axis: Optional[Union[str, Sequence[str]]] = None,
cross_replica_axis_index_groups: Optional[Sequence[Sequence[int]]] = None,
data_format: str = "channels_last",
name: Optional[str] = None,
):
"""Constructs a BatchNorm module.
Args:
create_scale: Whether to include a trainable scaling factor.
create_offset: Whether to include a trainable offset.
decay_rate: Decay rate for EMA.
eps: Small epsilon to avoid division by zero variance. Defaults ``1e-5``,
as in the paper and Sonnet.
scale_init: Optional initializer for gain (aka scale). Can only be set
if ``create_scale=True``. By default, ``1``.
offset_init: Optional initializer for bias (aka offset). Can only be set
if ``create_offset=True``. By default, ``0``.
axis: Which axes to reduce over. The default (``None``) signifies that all
but the channel axis should be normalized. Otherwise this is a list of
axis indices which will have normalization statistics calculated.
cross_replica_axis: If not ``None``, it should be a string (or sequence of
strings) representing the axis name(s) over which this module is being
run within a jax map (e.g. ``jax.pmap`` or ``jax.vmap``). Supplying this
argument means that batch statistics are calculated across all replicas
on the named axes.
cross_replica_axis_index_groups: Specifies how devices are grouped. Valid
only within ``jax.pmap`` collectives.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``. See :func:`get_channel_index`.
name: The module name.
"""
super().__init__(name=name)
if not create_scale and scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`")
if not create_offset and offset_init is not None:
raise ValueError("Cannot set `offset_init` if `create_offset=False`")
if (cross_replica_axis is None and
cross_replica_axis_index_groups is not None):
raise ValueError("`cross_replica_axis` name must be specified"
"if `cross_replica_axis_index_groups` are used.")
self.create_scale = create_scale
self.create_offset = create_offset
self.eps = eps
self.scale_init = scale_init or jnp.ones
self.offset_init = offset_init or jnp.zeros
self.axis = axis
self.cross_replica_axis = cross_replica_axis
self.cross_replica_axis_index_groups = cross_replica_axis_index_groups
self.channel_index = hk.get_channel_index(data_format)
self.mean_ema = hk.ExponentialMovingAverage(decay_rate, name="mean_ema")
self.var_ema = hk.ExponentialMovingAverage(decay_rate, name="var_ema")
def __call__(
self,
inputs: jax.Array,
is_training: bool,
test_local_stats: bool = False,
scale: Optional[jax.Array] = None,
offset: Optional[jax.Array] = None,
) -> jax.Array:
"""Computes the normalized version of the input.
Args:
inputs: An array, where the data format is ``[..., C]``.
is_training: Whether this is during training.
test_local_stats: Whether local stats are used when is_training=False.
scale: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the scale applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the offset applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
The array, normalized across all but the last dimension.
"""
if self.create_scale and scale is not None:
raise ValueError(
"Cannot pass `scale` at call time if `create_scale=True`.")
if self.create_offset and offset is not None:
raise ValueError(
"Cannot pass `offset` at call time if `create_offset=True`.")
channel_index = self.channel_index
if channel_index < 0:
channel_index += inputs.ndim
if self.axis is not None:
axis = self.axis
else:
axis = [i for i in range(inputs.ndim) if i != channel_index]
if is_training or test_local_stats:
mean = jnp.mean(inputs, axis, keepdims=True)
mean_of_squares = jnp.mean(jnp.square(inputs), axis, keepdims=True)
if self.cross_replica_axis:
mean = jax.lax.pmean(
mean,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups)
mean_of_squares = jax.lax.pmean(
mean_of_squares,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups)
var = mean_of_squares - jnp.square(mean)
else:
mean = self.mean_ema.average.astype(inputs.dtype)
var = self.var_ema.average.astype(inputs.dtype)
if is_training:
self.mean_ema(mean)
self.var_ema(var)
w_shape = [1 if i in axis else inputs.shape[i] for i in range(inputs.ndim)]
w_dtype = inputs.dtype
if self.create_scale:
scale = hk.get_parameter("scale", w_shape, w_dtype, self.scale_init)
elif scale is None:
scale = np.ones([], dtype=w_dtype)
if self.create_offset:
offset = hk.get_parameter("offset", w_shape, w_dtype, self.offset_init)
elif offset is None:
offset = np.zeros([], dtype=w_dtype)
eps = jax.lax.convert_element_type(self.eps, var.dtype)
inv = scale * jax.lax.rsqrt(var + eps)
return (inputs - mean) * inv + offset
|
dm-haiku-main
|
haiku/_src/batch_norm.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.attention."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import attention
from haiku._src import initializers
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
class MultiHeadAttentionTest(parameterized.TestCase):
@parameterized.named_parameters(
("batch = 1 & seq len = 1", 1, 1, 3, 5, 7, 11, 13),
("batch = 1 & seq len > 1", 1, 2, 3, 5, 7, 11, 13),
("batch > 1 & seq len > 1", 2, 3, 5, 7, 11, 13, 17),
)
@test_utils.transform_and_run
def test_shapes_batch(
self, batch_size, seq_len, embed_size, d_key, num_heads, d_value, d_out):
query = key = value = jnp.zeros((batch_size, seq_len, embed_size))
mha = attention.MultiHeadAttention(
key_size=d_key, num_heads=num_heads, value_size=d_value,
model_size=d_out, w_init_scale=1.0)(query, key, value)
self.assertEqual(mha.shape, (batch_size, seq_len, d_out))
@parameterized.named_parameters(
("seq len = 1", 1, 2, 3, 5, 7, 11),
("seq len > 1", 2, 3, 5, 7, 11, 13),
)
@test_utils.transform_and_run
def test_shapes_single(
self, seq_len, embed_size, d_key, num_heads, d_value, d_out):
query = key = value = jnp.zeros((seq_len, embed_size))
mha = attention.MultiHeadAttention(
key_size=d_key, num_heads=num_heads, value_size=d_value,
model_size=d_out, w_init_scale=1.0)(query, key, value)
self.assertEqual(mha.shape, (seq_len, d_out))
@test_utils.transform_and_run
def test_mask_arg(self):
seq_len = 3
embed_size = 2
model_size = 15
query = key = value = jnp.zeros((seq_len, embed_size))
causal_mask = jnp.tril(jnp.ones((seq_len, seq_len)))
causal_mask = causal_mask[None, :, :]
mha = attention.MultiHeadAttention(
key_size=7, num_heads=11, value_size=13,
model_size=model_size, w_init_scale=1.0)(
query, key, value, mask=causal_mask)
self.assertEqual(mha.shape, (seq_len, model_size))
@test_utils.transform_and_run
def test_different_seq_lengths(self):
query = jnp.zeros((2, 3))
key = value = jnp.zeros((5, 3))
mha = attention.MultiHeadAttention(
key_size=7, num_heads=11, value_size=13,
model_size=15, w_init_scale=1.0)(query, key, value)
self.assertEqual(mha.shape, (2, 15))
@test_utils.transform_and_run
def test_default_sizes(self):
mha = attention.MultiHeadAttention(
key_size=3, num_heads=5, w_init_scale=1.0)
self.assertEqual(mha.value_size, mha.key_size)
self.assertEqual(mha.model_size, mha.key_size * mha.num_heads)
def test_vmap(self):
def f(query, key, value):
return attention.MultiHeadAttention(
key_size=3, num_heads=5, w_init_scale=1.0)(query, key, value)
rng = jax.random.PRNGKey(42)
init_rng, apply_rng, vmap_rng = jax.random.split(rng, num=3)
init, apply = transform.transform(f)
# Transform as single-instance function:
query = key = value = jnp.zeros((7, 11))
params = init(init_rng, query, key, value)
y = apply(params, apply_rng, query, key, value)
self.assertEqual(y.shape, (7, 15,))
# Use vmap to get batched function:
vapply = jax.vmap(apply, in_axes=(None, 0, 0, 0, 0), out_axes=0)
query = key = value = jnp.zeros((13, 7, 11)) # prepend batch axis
rngs = jax.random.split(vmap_rng, 13) # give each instance its own rng
y = vapply(params, rngs, query, key, value)
self.assertEqual(y.shape, (13, 7, 15))
@test_utils.transform_and_run
def test_w_init(self):
with self.assertRaisesRegex(ValueError, "provide a weight initializer"):
attention.MultiHeadAttention(2, 3)
with self.assertRaisesRegex(ValueError, "provide only `w_init`"):
attention.MultiHeadAttention(
2, 3, w_init_scale=5, w_init=initializers.Constant(0))
w_init = initializers.Constant(3)
mha1 = attention.MultiHeadAttention(2, 3, w_init=w_init)
self.assertIs(mha1.w_init, w_init)
mha2 = attention.MultiHeadAttention(2, 3, w_init_scale=5)
self.assertIsInstance(mha2.w_init, initializers.VarianceScaling)
@test_utils.transform_and_run
def test_b_init(self):
w_init = initializers.Constant(3)
b_init = initializers.Constant(4)
mha1 = attention.MultiHeadAttention(2, 3, w_init=w_init, b_init=b_init)
self.assertIs(mha1.b_init, b_init)
@parameterized.named_parameters(
("with_bias_true", True, 2),
("with_bias_false", False, 1),
)
def test_with_bias(self, with_bias, expected_params):
def f(key, query, value):
w_init = initializers.Constant(3)
mha1 = attention.MultiHeadAttention(2, 3, w_init=w_init,
with_bias=with_bias)
return mha1(key, query, value)
rng = jax.random.PRNGKey(42)
init, _ = transform.transform(f)
query = key = jnp.zeros((5, 3))
value = jnp.zeros((5, 10))
params = init(rng, key, query, value)
for module_params in params.values():
self.assertLen(module_params, expected_params)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/attention_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transform a set of Haiku-using functions which use overlapping params."""
# pylint: disable=unnecessary-lambda
import dataclasses
import functools
import inspect
from typing import Any, Callable, NamedTuple, Optional, TypeVar
from haiku._src import analytics
from haiku._src import transform
from haiku._src import typing
import jax
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
transform_with_state = transform.transform_with_state
Transformed = transform.Transformed
TransformedWithState = transform.TransformedWithState
Params = typing.Params
State = typing.State
MutableParams = typing.MutableParams
MutableState = typing.MutableState
# pylint: enable=invalid-name
# TODO(slebedev): This makes the module non-forkable.
_transform = transform
del transform, typing
TemplateFn = Callable[..., Any]
TreeOfApplyFns = Any
class MultiTransformed(NamedTuple):
"""Holds a collection of pure functions.
Attributes:
init: A pure function: ``params = init(rng, *a, **k)``
apply: A JAX tree of pure functions each with the signature:
``out = apply(params, rng, *a, **k)``.
See also:
- :class:`Transformed`: Single apply variant of multi-transform.
- :class:`MultiTransformedWithState`: Multi apply with state variant.
"""
# Args: [Optional[PRNGKey], ...]
init: Callable[..., hk.MutableParams]
# PyTree[Callable[[hk.Params, Optional[PRNGKey], ...], Any]]
apply: Any
class MultiTransformedWithState(NamedTuple):
"""Holds a collection of pure functions.
Attributes:
init: A pure function: ``params, state = init(rng, *a, **k)``
apply: A JAX tree of pure functions each with the signature:
``out, state = apply(params, state, rng, *a, **k)``.
See also:
- :class:`TransformedWithState`: Single apply variant of multi-transform.
- :class:`MultiTransformed`: Multi apply with state variant.
"""
# Args: [Optional[PRNGKey], ...]
init: Callable[..., tuple[hk.MutableParams, hk.MutableState]]
# PyTree[Callable[[hk.Params, hk.State, Optional[PRNGKey], ...],
# Tuple[Any, hk.MutableState]]]
apply: Any
@dataclasses.dataclass
class Box:
"""Holds a Python value and has no leaves."""
python_value: Any
jax.tree_util.register_pytree_node(
Box, lambda b: ([], b.python_value), lambda v, _: Box(v))
def multi_transform_with_state(
f: Callable[[], tuple[TemplateFn, TreeOfApplyFns]],
) -> MultiTransformedWithState:
"""Transforms a collection of functions using Haiku into pure functions.
See :func:`multi_transform` for more details.
Example:
>>> def f():
... encoder = hk.Linear(1, name="encoder")
... decoder = hk.Linear(1, name="decoder")
...
... def init(x):
... z = encoder(x)
... return decoder(z)
...
... return init, (encoder, decoder)
>>> f = hk.multi_transform_with_state(f)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> params, state = f.init(rng, x)
>>> jax.tree_util.tree_map(jnp.shape, params)
{'decoder': {'b': (1,), 'w': (1, 1)},
'encoder': {'b': (1,), 'w': (1, 1)}}
>>> encode, decode = f.apply
>>> z, state = encode(params, state, None, x)
>>> y, state = decode(params, state, None, z)
Args:
f: Function returning a "template" function and an arbitrary
tree of functions using modules connected in the template function.
Returns:
An ``init`` function and a tree of pure ``apply`` functions.
See also:
- :func:`transform_with_state`: Transform a single apply function.
- :func:`multi_transform`: Transform multiple apply functions without state.
"""
analytics.log_once('multi_transform_with_state')
def init_fn(*args, **kwargs) -> tuple[hk.MutableParams, hk.MutableState]:
"""Returns initial state for the transformed functions."""
return f()[0](*args, **kwargs)
init_fn = hk.transform_with_state(init_fn).init
def apply_fn_i(i):
def apply_fn(*args, **kwargs):
"""Applies the transformed function at the given inputs."""
return jax.tree_util.tree_leaves(f()[1])[i](*args, **kwargs)
return apply_fn
# We need to find out the structure of f()[1], including how many
# functions there are, so that we can transform them individually and repack
# into the same tree structure. It's valid for modules to declare parameters
# in their constructor, so we need to create something that looks like
# hk.Params in order to do this. `jax.eval_shape` interprets the function
# abstractly, ie no real params are created, and we don't need to touch the
# accelerator. This means hardcoding the RNG below is fine.
def get_output_treedef() -> Box:
rng = jax.random.PRNGKey(42) # This is fine, see above
fns = hk.transform_with_state(lambda: f()[1])
apply_fns, _ = fns.apply(*fns.init(rng), rng)
return Box(jax.tree_util.tree_structure(apply_fns))
output_treedef = jax.eval_shape(get_output_treedef).python_value
apply_fns = make_tree(lambda i: hk.transform_with_state(apply_fn_i(i)).apply,
output_treedef)
return MultiTransformedWithState(init_fn, apply_fns)
def multi_transform(
f: Callable[[], tuple[TemplateFn, TreeOfApplyFns]],
) -> MultiTransformed:
"""Transforms a collection of functions using Haiku into pure functions.
In many scenarios we have several modules which are used either as primitives
for several Haiku modules/functions, or whose pure versions are to be reused
in downstream code. This utility enables this by applying
:func:`transform` to an arbitrary tree of Haiku functions which share modules
and have a common ``init`` function.
``f`` is expected to return a tuple of two elements. First is a ``template``
Haiku function which provides an example of how all internal Haiku modules are
connected. This function is used to create a common ``init`` function (with
your parameters).
The second object is an arbitrary tree of Haiku functions all of which reuse
the modules connected in the ``template`` function. These functions are
transformed to pure ``apply`` functions.
Example:
>>> def f():
... encoder = hk.Linear(1, name="encoder")
... decoder = hk.Linear(1, name="decoder")
...
... def init(x):
... z = encoder(x)
... return decoder(z)
...
... return init, (encoder, decoder)
>>> f = hk.multi_transform(f)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
>>> params = f.init(rng, x)
>>> jax.tree_util.tree_map(jnp.shape, params)
{'decoder': {'b': (1,), 'w': (1, 1)},
'encoder': {'b': (1,), 'w': (1, 1)}}
>>> encode, decode = f.apply
>>> z = encode(params, None, x)
>>> y = decode(params, None, z)
Args:
f: A factory function that returns two functions, firstly a common init
function that creates all modules, and secondly a pytree of apply
functions which make use of those modules.
Returns:
A :class:`MultiTransformed` instance which contains a pure init function
that creates all parameters, and a pytree of pure apply functions that
given the params apply the given function.
See also:
:func:`multi_transform_with_state`: Equivalent for modules using state.
"""
analytics.log_once('multi_transform')
f = multi_transform_with_state(f)
f = without_state(f)
return f
def without_state(f: MultiTransformedWithState) -> MultiTransformed:
"""Converts ``MultiTransformedWithState`` to ``MultiTransformed``."""
def init_fn(rng, *args, **kwargs) -> hk.MutableParams:
params, state = f.init(rng, *args, **kwargs)
if state:
raise ValueError(
'If your transformed function uses `hk.{get,set}_state` then use '
'`hk.multi_transform_with_state`.')
return params
def apply_without_state(orig_apply_fn):
def apply_fn(params: hk.Params, rng, *args, **kwargs):
out, state = orig_apply_fn(params, None, rng, *args, **kwargs)
if state:
raise ValueError(
'If your transformed function uses `hk.{get,set}_state` then use '
'`hk.multi_transform_with_state`.')
return out
return apply_fn
apply_fns = jax.tree_util.tree_map(apply_without_state, f.apply)
return MultiTransformed(init_fn, apply_fns)
TransformedT = TypeVar('TransformedT',
hk.Transformed,
hk.TransformedWithState,
MultiTransformed,
MultiTransformedWithState)
def without_apply_rng(f: TransformedT) -> TransformedT:
"""Removes the rng argument from the apply function.
This is a convenience wrapper that makes the ``rng`` argument to
``f.apply`` default to ``None``. This is useful when ``f`` doesn't actually
use random numbers as part of its computation, such that the ``rng`` argument
wouldn't be used. Note that if ``f`` `does` use random numbers, this will
cause an error to be thrown complaining that ``f`` needs a non-None PRNGKey.
Args:
f: A transformed function.
Returns:
The same transformed function, with a modified ``apply``.
"""
def check_rng_kwarg(kwargs):
if 'rng' in kwargs:
raise TypeError(
'Haiku transform adds three arguments (params, state, rng) to apply. '
'If the functions you are transforming use the same names you must '
'pass them positionally (e.g. `f.apply(.., my_rng)` and not by '
'name (e.g. `f.apply(.., rng=my_rng)`)')
if isinstance(f, hk.TransformedWithState):
def apply_fn(params, state, *args, **kwargs):
check_rng_kwarg(kwargs)
return f.apply(params, state, None, *args, **kwargs)
apply_fn.__signature__ = _transform.sig_replace_leading_parameters(
inspect.signature(f.apply), 3, [
inspect.Parameter(
'params',
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.Params]),
inspect.Parameter(
'state',
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.State]),
])
f_new = hk.TransformedWithState(init=f.init, apply=apply_fn)
_transform.tie_in_original_fn(f, f_new.init, f_new.apply)
elif isinstance(f, hk.Transformed):
def apply_fn(params, *args, **kwargs):
check_rng_kwarg(kwargs)
return f.apply(params, None, *args, **kwargs)
apply_fn.__signature__ = _transform.sig_replace_leading_parameters(
inspect.signature(f.apply), 2, [
inspect.Parameter(
'params',
inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Optional[hk.Params]),
])
f_new = hk.Transformed(init=f.init, apply=apply_fn)
_transform.tie_in_original_fn(f, f_new.init, f_new.apply)
elif isinstance(f, MultiTransformedWithState):
def make_new_apply_fn(apply_fn, params, state, *args, **kwargs):
check_rng_kwarg(kwargs)
return apply_fn(params, state, None, *args, **kwargs)
apply_fn = jax.tree_util.tree_map(
lambda fn: functools.partial(make_new_apply_fn, fn), f.apply)
f_new = MultiTransformedWithState(init=f.init, apply=apply_fn)
elif isinstance(f, MultiTransformed):
def make_new_apply_fn(apply_fn, params, *args, **kwargs):
check_rng_kwarg(kwargs)
return apply_fn(params, None, *args, **kwargs)
apply_fn = jax.tree_util.tree_map(
lambda fn: functools.partial(make_new_apply_fn, fn), f.apply)
f_new = MultiTransformed(init=f.init, apply=apply_fn)
else:
raise ValueError('Must be called with the result of `hk.transformed`, '
'`hk.multi_transform`, `hk.transformed_with_state` or '
'`hk.multi_transform_with_state`, '
f'actually called with {type(f)}')
return f_new
def make_tree(f: Callable[[int], Any], treedef: jax.tree_util.PyTreeDef):
leaves = list(map(f, range(treedef.num_leaves)))
return jax.tree_util.tree_unflatten(treedef, leaves)
|
dm-haiku-main
|
haiku/_src/multi_transform.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.config."""
from concurrent import futures
import inspect
import threading
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import config
class ConfigTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._before = config.main_thread_config
config.tls.config = config.main_thread_config = config.Config.default()
def tearDown(self):
super().tearDown()
config.tls.config = config.main_thread_config = self._before
del self._before
def test_check_jax_usage(self):
cfg = config.get_config()
config.check_jax_usage()
self.assertTrue(cfg.check_jax_usage)
config.check_jax_usage(False)
self.assertFalse(cfg.check_jax_usage)
config.check_jax_usage(True)
self.assertTrue(cfg.check_jax_usage)
@parameterized.parameters(True, False)
def test_inherits_default_from_main_thread(self, default):
e1 = threading.Event()
e2 = threading.Event()
config.get_config().check_jax_usage = default
def f():
self.assertEqual(config.get_config().check_jax_usage, default)
config.get_config().check_jax_usage = True
e1.set()
e2.wait()
self.assertTrue(config.get_config().check_jax_usage)
def g():
e1.wait()
self.assertEqual(config.get_config().check_jax_usage, default)
config.get_config().check_jax_usage = False
e2.set()
self.assertFalse(config.get_config().check_jax_usage)
with futures.ThreadPoolExecutor() as tpe:
f1 = tpe.submit(g)
f2 = tpe.submit(f)
f2.result()
f1.result()
self.assertEqual(config.get_config().check_jax_usage, default)
def test_with_config(self):
ran_f = [False]
@config.with_config(check_jax_usage=False)
def f():
ran_f[0] = True
return config.get_config().check_jax_usage
cfg = config.get_config()
cfg.check_jax_usage = True
self.assertFalse(f())
self.assertTrue(ran_f[0])
self.assertTrue(cfg.check_jax_usage)
def test_assign(self):
cfg = config.get_config()
cfg.check_jax_usage = False
with config.assign(check_jax_usage=True):
self.assertTrue(cfg.check_jax_usage)
self.assertFalse(cfg.check_jax_usage)
def test_assign_with_error(self):
cfg = config.get_config()
cfg.check_jax_usage = False
try:
with config.assign(check_jax_usage=True):
self.assertTrue(cfg.check_jax_usage)
# Raise an exception to test that config is reset on error.
raise ValueError("expected")
except ValueError:
pass
self.assertFalse(cfg.check_jax_usage)
def test_context_matches_set(self):
context_sig = inspect.signature(config.context)
set_sig = inspect.signature(config.set)
self.assertEqual(context_sig.parameters, set_sig.parameters)
def test_context(self):
cfg = config.get_config()
cfg.check_jax_usage = False
with config.context(check_jax_usage=True):
self.assertTrue(cfg.check_jax_usage)
self.assertFalse(cfg.check_jax_usage)
def test_set(self):
cfg = config.get_config()
cfg.check_jax_usage = False
config.set(check_jax_usage=True)
self.assertTrue(cfg.check_jax_usage)
config.set(check_jax_usage=False)
self.assertFalse(cfg.check_jax_usage)
def test_rng_reserve_size(self):
cfg = config.get_config()
prev = config.rng_reserve_size(3)
self.assertEqual(cfg.rng_reserve_size, 3)
self.assertEqual(prev, 1)
prev = config.rng_reserve_size(10)
self.assertEqual(cfg.rng_reserve_size, 10)
self.assertEqual(prev, 3)
def test_rng_reserve_size_error(self):
with self.assertRaisesRegex(ValueError, "RNG reserve size"):
config.rng_reserve_size(0)
with self.assertRaisesRegex(ValueError, "RNG reserve size"):
config.rng_reserve_size(-1)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/config_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.bias."""
from absl.testing import absltest
from haiku._src import bias
from haiku._src import test_utils
from haiku._src import transform
import jax.numpy as jnp
import numpy as np
class BiasTest(absltest.TestCase):
@test_utils.transform_and_run
def test_output_shape(self):
mod = bias.Bias(output_size=(2 * 2,))
with self.assertRaisesRegex(ValueError, "Input shape must be [(]-1, 4[)]"):
mod(jnp.ones([2, 2, 2]))
@test_utils.transform_and_run
def test_output_size_valid(self):
mod = bias.Bias(output_size=(2 * 2,))
mod(jnp.ones([2, 2 * 2]))
def test_bias_dims_scalar(self):
def f():
mod = bias.Bias(bias_dims=())
return mod(jnp.ones([1, 2, 3, 4]))
params = transform.transform(f).init(None)
self.assertEmpty(params["bias"]["b"].shape)
def test_bias_dims_custom(self):
b, d1, d2, d3 = range(1, 5)
def f():
mod = bias.Bias(bias_dims=[1, 3])
out = mod(jnp.ones([b, d1, d2, d3]))
self.assertEqual(mod.bias_shape, (d1, 1, d3))
return out
f = transform.transform(f)
params = f.init(None)
out = f.apply(params, None)
self.assertEqual(params["bias"]["b"].shape, (d1, 1, d3))
self.assertEqual(out.shape, (b, d1, d2, d3))
def test_bias_dims_negative_out_of_order(self):
def f():
mod = bias.Bias(bias_dims=[-1, -2])
mod(jnp.ones([1, 2, 3]))
self.assertEqual(mod.bias_shape, (2, 3))
params = transform.transform(f).init(None)
self.assertEqual(params["bias"]["b"].shape, (2, 3))
@test_utils.transform_and_run
def test_bias_dims_invalid(self):
mod = bias.Bias(bias_dims=[1, 5])
with self.assertRaisesRegex(ValueError,
"5 .* out of range for input of rank 3"):
mod(jnp.ones([1, 2, 3]))
@test_utils.transform_and_run
def test_b_init_defaults_to_zeros(self):
mod = bias.Bias()
x = jnp.ones([1, 1])
y = mod(x)
np.testing.assert_allclose(y, x)
@test_utils.transform_and_run
def test_b_init_custom(self):
mod = bias.Bias(b_init=jnp.ones)
x = jnp.ones([1, 1])
y = mod(x)
np.testing.assert_allclose(y, x + 1)
@test_utils.transform_and_run
def test_name(self):
mod = bias.Bias(name="foo")
self.assertEqual(mod.name, "foo")
@test_utils.transform_and_run
def test_multiplier(self):
mod = bias.Bias(b_init=jnp.ones)
y = mod(jnp.ones([1, 1]), multiplier=-1)
np.testing.assert_allclose(jnp.sum(y), 0)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/bias_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Haiku module."""
import collections
from collections.abc import Iterable, Iterator, Mapping, Sequence
import contextlib
import functools
import itertools as it
from typing import Any, Callable, NamedTuple, Optional, TypeVar, Union
import warnings
from haiku._src import config
from haiku._src import data_structures
from haiku._src.typing import ( # pylint: disable=g-multiple-import
Initializer,
LiftingModuleType,
Module,
MutableParams,
MutableState,
PRNGKey,
Params,
State,
)
import jax
from jax import config as jax_config
from jax import core as jax_core
import jax.numpy as jnp
try:
from typing import final # pylint: disable=g-import-not-at-top
except ImportError:
# Pre Python 3.8.
final = lambda cls: cls
T = TypeVar("T")
Stack = data_structures.Stack[T]
ThreadLocalStack = data_structures.ThreadLocalStack[T]
ModuleState = collections.namedtuple("ModuleState", ("module", "method_name"))
StatePair = collections.namedtuple("StatePair", ("initial", "current"))
# TODO(tomhennigan) Should creator_stack be part of frame?
frame_stack: ThreadLocalStack["Frame"] = ThreadLocalStack()
context_stack: ThreadLocalStack["HaikuContext"] = ThreadLocalStack()
param_creator_stack: ThreadLocalStack["Creator"] = ThreadLocalStack()
state_creator_stack: ThreadLocalStack["Creator"] = ThreadLocalStack()
param_getter_stack: ThreadLocalStack["Getter"] = ThreadLocalStack()
state_getter_stack: ThreadLocalStack["Getter"] = ThreadLocalStack()
state_setter_stack: ThreadLocalStack["Setter"] = ThreadLocalStack()
closure_boundary_stack: ThreadLocalStack[int] = ThreadLocalStack()
class JaxTraceLevel(NamedTuple):
"""Comparable object capturing trace state in JAX."""
opaque: Any
@classmethod
def current(cls):
# TODO(tomhennigan): Remove once a version of JAX is released incl PR#9423.
trace_stack = jax_core.thread_local_state.trace_state.trace_stack.stack
top_type = trace_stack[0].trace_type
level = trace_stack[-1].level
sublevel = jax_core.cur_sublevel()
return JaxTraceLevel(opaque=(top_type, level, sublevel))
frame_ids = it.count()
class Frame(NamedTuple):
"""A frame represents all of the per-transform values in Haiku."""
# JAX values.
params: MutableParams
state: Optional[MutableState]
rng_stack: Stack[Optional["PRNGSequence"]]
# Pure python values.
freeze_params: bool
module_stack: Stack[ModuleState]
counter_stack: Stack[collections.Counter]
used_names_stack: Stack[set[str]]
jax_trace_stack: Stack[JaxTraceLevel]
frame_id: int
@property
def params_frozen(self):
return self.freeze_params
@classmethod
def create(cls, params, state, rng: Optional["PRNGSequence"],
freeze_params: bool) -> "Frame":
"""Creates a new frame."""
frame = Frame(params=params,
state=state,
rng_stack=Stack(),
freeze_params=freeze_params,
module_stack=Stack(),
counter_stack=Stack(),
used_names_stack=Stack(),
jax_trace_stack=Stack(),
frame_id=next(frame_ids))
frame.rng_stack.push(rng)
frame.counter_stack.push(collections.Counter())
frame.used_names_stack.push(set())
frame.jax_trace_stack.push(JaxTraceLevel.current())
return frame
def evolve(self, params, state, rng, *, decoupled=True) -> "Frame":
"""Creates a new frame with JAX state as passed in."""
rng_stack = self.rng_stack.clone()
rng_stack.push(rng)
if decoupled:
module_stack = self.module_stack.clone()
counter_stack = self.counter_stack.map(collections.Counter)
used_names_stack = self.used_names_stack.map(set)
jax_trace_stack = self.jax_trace_stack.clone()
else:
module_stack = self.module_stack
counter_stack = self.counter_stack
used_names_stack = self.used_names_stack
jax_trace_stack = self.jax_trace_stack
return Frame(params=params,
state=state,
rng_stack=rng_stack,
freeze_params=self.freeze_params,
module_stack=module_stack,
counter_stack=counter_stack,
used_names_stack=used_names_stack,
jax_trace_stack=jax_trace_stack,
frame_id=next(frame_ids))
@contextlib.contextmanager
def module(self, module_state: ModuleState):
with self.module_stack(module_state), \
self.counter_stack(collections.Counter()), \
self.used_names_stack(set()):
yield
current_frame = frame_stack.peek
current_context = context_stack.peek
class HaikuContext:
"""Collects and injects values for computations."""
__slots__ = ("__params", "__state", "__rng", "__freeze_params",
"__expected_stack", "__names", "__counter",
"__teardown_callbacks")
def __init__(
self,
params: MutableParams,
state: MutableState,
rng: Optional["PRNGSequence"],
freeze_params: bool,
):
# NOTE: Using __ vs. _ since these are "really" private (as in using these
# properties directly could result in broken behaviour).
self.__params = params
self.__state = state
self.__rng = rng
self.__freeze_params = freeze_params
self.__expected_stack = ThreadLocalStack()
self.__names = set()
self.__counter = collections.Counter()
self.__teardown_callbacks = []
def collect_params(self) -> MutableParams:
return data_structures.to_haiku_dict(self.__params)
def collect_initial_state(self) -> MutableState:
return extract_state(self.__state, initial=True)
def collect_state(self) -> MutableState:
return extract_state(self.__state, initial=False)
def add_teardown_callback(self, f):
self.__teardown_callbacks.append(f)
def __enter__(self):
frame = Frame.create(params=self.__params,
state=self.__state,
rng=self.__rng,
freeze_params=self.__freeze_params)
frame.used_names_stack.push(self.__names)
frame.counter_stack.push(self.__counter)
self.__expected_stack.push(frame)
frame_stack.push(frame)
context_stack.push(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert frame_stack.pop() is self.__expected_stack.pop()
assert context_stack.pop() is self
if exc_type is None:
for callback in self.__teardown_callbacks:
callback()
def new_context(
*,
params: Optional[Params] = None,
state: Optional[State] = None,
rng: Optional[Union[PRNGKey, int]] = None,
) -> HaikuContext:
"""Collects the results of hk.{get,set}_{parameter,state} calls.
>>> with new_context(rng=jax.random.PRNGKey(42)) as ctx:
... mod = hk.nets.MLP([300, 100, 10])
... y1 = mod(jnp.ones([1, 1]))
>>> assert len(jax.tree_util.tree_leaves(ctx.collect_params())) == 6
>>> with ctx:
... y2 = mod(jnp.ones([1, 1]))
The same module instance in the same context will produce the same value:
>>> assert (y1 == y2).all()
Args:
params: Optional parameter values to inject.
state: Optional state values to inject.
rng: Optional rng to inject.
Returns:
Context manager which closes over mutable Haiku internal state.
"""
ddict = functools.partial(collections.defaultdict, dict)
if params is None:
params = ddict()
freeze_params = False
else:
params = data_structures.to_haiku_dict(params)
freeze_params = True
if state is None:
state = ddict()
else:
state = ddict({m: ddict({k: StatePair(v, v) for k, v in p.items()})
for m, p in state.items()})
if rng is not None and not isinstance(rng, PRNGSequence):
rng = PRNGSequence(rng)
return HaikuContext(params, state, rng, freeze_params)
def inside_transform():
return bool(frame_stack)
def safe_get_module_name(module: Module) -> str:
"""Checks if parameters/state can be safely created before returning the module name."""
# TODO(tomhennigan) Module specific code should be part of `module.py`.
if not hasattr(module, "module_name"):
raise ValueError("The super constructor must be called before you create "
"parameters or submodules.")
if (closure_boundary_stack
and module._creation_frame_id < closure_boundary_stack.peek()): # pylint: disable=protected-access
raise ValueError("You can't functionally close over a module which has "
"been intitialized outside of the function wrapped in "
"hk.transparent_lift or hk.layer_stack.\n"
"The module you closed over is called "
f"'{module.module_name}'.")
return module.module_name
def current_module_state() -> Optional[ModuleState]:
frame = current_frame()
if frame.module_stack:
return frame.module_stack.peek()
else:
return None
@contextlib.contextmanager
def maybe_push_module_state(module_state: Optional[ModuleState]):
if module_state is not None:
frame = current_frame()
with frame.module_stack(module_state):
yield
else:
yield
def current_module() -> Optional[Module]:
module_state = current_module_state()
return module_state.module if module_state is not None else None
def current_name() -> str:
"""Returns the currently active module name.
Outside of a Haiku module (but inside a Haiku transform) this will return
``~`` which matches the key in the params/state dict where top level values
are stored.
>>> hk.current_name()
'~'
Inside a module this returns the current module name:
>>> class ExampleModule(hk.Module):
... def __call__(self):
... return hk.current_name()
>>> ExampleModule()()
'example_module'
Inside a name scope this returns the current name scope:
>>> with hk.name_scope('example_name_scope'):
... print(hk.current_name())
example_name_scope
Returns:
The currently active module or name scope name. If modules or name scopes
are in use returns ``~``.
"""
assert_context("current_name")
module = current_module()
if module is not None:
return safe_get_module_name(module)
else:
# Any parameters defined outside an `hk.Module` are put in the same group.
return "~"
def get_lift_prefix() -> Optional[str]:
"""Get full lifted prefix from frame_stack if in lifted init."""
if params_frozen():
return None
prefixes = []
for frame in frame_stack:
if frame.module_stack:
module = frame.module_stack.peek().module
if isinstance(module, LiftingModuleType) and module.prefix_name:
prefixes.append(module.prefix_name)
prefix = "/".join(prefixes[::-1])
return f"{prefix}/" if prefix else None
def assert_context(public_symbol_name):
if not frame_stack:
raise ValueError(
"`hk.{}` must be used as part of an `hk.transform`".format(
public_symbol_name))
def params_frozen():
"""Returns true at apply time, false at init time."""
assert_context("params_frozen")
return current_frame().params_frozen
def get_params() -> Params:
"""Returns the parameters for the current :func:`transform`.
>>> def report(when):
... shapes = jax.tree_util.tree_map(jnp.shape, hk.get_params())
... print(f'{when}: {shapes}')
>>> def f(x):
... report('Before call')
... x = hk.Linear(1)(x)
... report('After call')
... return x
>>> f = hk.transform(f)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 1])
During ``init`` the parameters dictionary will get populated as modules are
called:
>>> params = f.init(rng, x)
Before call: {}
After call: {'linear': {'b': (1,), 'w': (1, 1)}}
During ``apply`` the parameters dictionary will remain unchanged:
>>> _ = f.apply(params, None, x)
Before call: {'linear': {'b': (1,), 'w': (1, 1)}}
After call: {'linear': {'b': (1,), 'w': (1, 1)}}
NOTE: Does not run :func:`custom_getters` or parameter initializers.
Returns:
A copy of the parameters dictionary. During ``init`` this dictionary will
be populated with any parameters that have been created so far. During
``apply`` this will contain all parameters of all modules (the params dict
does not change during apply).
See also:
- :func:`get_initial_state`: The initial state for the function.
- :func:`get_current_state`: The current state for the function.
"""
assert_context("get_params")
return current_context().collect_params()
def get_initial_state() -> State:
"""Returns the initial state for the current :func:`transform_with_state`.
Example:
>>> def report(when):
... state = jax.tree_util.tree_map(int, hk.get_initial_state())
... print(f'{when}: {state}')
>>> def f():
... report('Before get_state')
... x = hk.get_state('x', [], init=jnp.zeros)
... report('After get_state')
... hk.set_state('x', x + 1)
... report('After set_state')
>>> f = hk.transform_with_state(f)
During ``init``, the first set value (either directly via :func:`set_state` or
via the ``init`` argument to :func:`get_state`) will be returned:
>>> _, state = f.init(None)
Before get_state: {}
After get_state: {'~': {'x': 0}}
After set_state: {'~': {'x': 0}}
During ``apply`` the value passed into the ``apply`` function will be used:
>>> state = {'~': {'x': 10}}
>>> _ = f.apply({}, state, None)
Before get_state: {'~': {'x': 10}}
After get_state: {'~': {'x': 10}}
After set_state: {'~': {'x': 10}}
NOTE: Does not run :func:`custom_getters` or state initializers.
Returns:
A copy of the state dictionary that would be returned from ``init`` or
passed into ``apply``.
See also:
- :func:`get_params`: The current parameters for the function.
- :func:`get_current_state`: The current state for the function.
"""
assert_context("get_initial_state")
return current_context().collect_initial_state()
def get_current_state() -> State:
"""Returns the current state for the current :func:`transform_with_state`.
Example:
>>> def report(when):
... state = jax.tree_util.tree_map(int, hk.get_current_state())
... print(f'{when}: {state}')
>>> def f():
... report('Before get_state')
... x = hk.get_state('x', [], init=jnp.zeros)
... report('After get_state')
... hk.set_state('x', x + 1)
... report('After set_state')
>>> f = hk.transform_with_state(f)
During ``init``, the most recently set value (either directly via
:func:`set_state` or via the ``init`` argument to :func:`get_state`) will be
returned:
>>> _, state = f.init(None)
Before get_state: {}
After get_state: {'~': {'x': 0}}
After set_state: {'~': {'x': 1}}
During ``apply`` the most recently set value will be used, if no value has
been set then the value that is passed into ``apply`` will be used:
>>> state = {'~': {'x': 10}}
>>> _ = f.apply({}, state, None)
Before get_state: {'~': {'x': 10}}
After get_state: {'~': {'x': 10}}
After set_state: {'~': {'x': 11}}
NOTE: Does not run :func:`custom_getters` or state initializers.
Returns:
A copy of the state dictionary that would be returned from ``init`` or
``apply``.
See also:
- :func:`get_params`: The current parameters for the function.
- :func:`get_initial_state`: The initial state for the function.
"""
assert_context("get_current_state")
return current_context().collect_state()
@final
class DoNotStore:
r"""Causes a parameter or state value to not be stored.
By default, Haiku will put the value returned from
:func:`~haiku.get_parameter`, :func:`~haiku.get_state` and
:func:`~haiku.set_state` into the dictionaries returned by ``init``. This is
not always desirable.
For example, a user may want to have part of their network come from a
pretrained checkpoint, and they may want to freeze those values (aka. have
them not appear in the params dict passed later to ``grad``). You can achieve
this by manipulating the params dict, however sometimes it is more convenient
to do this using custom creators/getters/setters.
Consider the following function:
>>> def f(x):
... x = hk.Linear(300, name='torso')(x)
... x = hk.Linear(10, name='tail')(x)
... return x
Imagine you have a pre-trained set of weights for the torso:
>>> pretrained = {'torso': {'w': jnp.ones([28 * 28, 300]),
... 'b': jnp.ones([300])}}
First we define a creator, that tells Haiku to not store any parameters that
are part of the pretrained dict:
>>> def my_creator(next_creator, shape, dtype, init, context):
... if context.module_name in pretrained:
... return hk.DO_NOT_STORE
... return next_creator(shape, dtype, init)
Then we need a getter that provides the parameter value from the pretrained
dict:
>>> def my_getter(next_getter, value, context):
... if context.module_name in pretrained:
... assert value is hk.DO_NOT_STORE
... value = pretrained[context.module_name][context.name]
... return next_getter(value)
Finally we'll wrap our function in context managers activating our creator and
getter:
>>> def f_with_pretrained_torso(x):
... with hk.custom_creator(my_creator), \
... hk.custom_getter(my_getter):
... return f(x)
You can see that when we run our function we only get parameters from modules
that were not in the pretrained dict:
>>> f_with_pretrained_torso = hk.transform(f_with_pretrained_torso)
>>> rng = jax.random.PRNGKey(42)
>>> x = jnp.ones([1, 28 * 28])
>>> params = f_with_pretrained_torso.init(rng, x)
>>> assert list(params) == ['tail']
This value can be used in initialisers, :func:`~haiku.custom_creator` or
:func:`~haiku.custom_setter`.
"""
@property
def shape(self):
raise ValueError("DO_NOT_STORE does not have a shape.")
@property
def dtype(self):
raise ValueError("DO_NOT_STORE does not have a dtype.")
DO_NOT_STORE = DoNotStore()
def check_not_none(value: Optional[T], msg: str) -> T:
if value is None:
raise ValueError(msg)
return value
def throw_if_run(shape, dtype):
del shape, dtype
raise ValueError("Initializer must be specified.")
def get_parameter(
name: str,
shape: Sequence[int],
dtype: Any = jnp.float32,
init: Optional[Initializer] = None,
) -> jax.Array:
"""Creates or reuses a parameter for the given transformed function.
>>> print(hk.get_parameter("w", [], init=jnp.ones))
1.0
Parameters within the same :func:`transform` and/or :class:`Module` with the
same name have the same value:
>>> w1 = hk.get_parameter("w", [], init=jnp.zeros)
>>> w2 = hk.get_parameter("w", [], init=jnp.zeros)
>>> assert w1 is w2
Args:
name: A name for the parameter.
shape: The shape of the parameter.
dtype: The dtype of the parameter.
init: A callable of shape, dtype to generate an initial value for the
parameter.
Returns:
A jax.Array with the parameter of the given shape.
"""
assert_context("get_parameter")
assert_jax_usage("get_parameter")
if init is None:
init = throw_if_run
bundle_name = current_name()
frame = current_frame()
fq_name = bundle_name + "/" + name
context = GetterContext(full_name=fq_name,
module=current_module(),
original_dtype=dtype, original_shape=shape,
original_init=init,
lifted_prefix_name=get_lift_prefix())
if bundle_name not in frame.params:
param = None
else:
param = frame.params[bundle_name].get(name)
if param is None:
param_missing_error = ValueError(
f"Unable to retrieve parameter {name!r} for module "
f"{bundle_name!r} All parameters must be created as part of `init`.")
try:
if param_creator_stack:
param = run_creators(param_creator_stack, context, shape, dtype, init)
else:
param = init(shape, dtype)
except MissingRNGError as e:
if frame.params_frozen:
raise param_missing_error from e
else:
raise e
if param is DO_NOT_STORE:
# Initializers or custom creators that return `DO_NOT_STORE` are required
# to produce a value for the parameter via a custom getter.
remove_if_empty(frame.params, bundle_name)
else:
if frame.params_frozen:
# Throw if we needed to re-init the parameter during apply.
raise param_missing_error
param = check_not_none(param, "Parameters cannot be `None`.")
frame.params[bundle_name][name] = param
# Custom getters allow a hook for users to customize the value returned by
# get_parameter. For example casting values to some dtype.
if param_getter_stack:
param = run_getters(param_getter_stack, context, param)
param = check_not_none(param, "Parameters cannot be `None`.")
if param.shape != tuple(shape):
raise ValueError(
f"{fq_name!r} with retrieved shape {param.shape!r} does not match "
f"shape={shape!r} dtype={dtype!r}")
return param
def remove_if_empty(bundle, key):
if key in bundle and not bundle[key]:
del bundle[key]
class GetterContext(NamedTuple):
"""Context about where parameters are being created.
Attributes:
full_name: The full name of the given parameter (e.g. ``mlp/~/linear_0/w``).
module: The module that owns the current parameter, ``None`` if this
parameter exists outside any module.
original_dtype: The dtype that :func:`~haiku.get_parameter` or
:func:`~haiku.get_state` was originally called with.
original_shape: The shape that :func:`~haiku.get_parameter` or
:func:`~haiku.get_state` was originally called with.
original_init: The initializer that :func:`~haiku.get_parameter` or
:func:`~haiku.get_state` was originally called with.
lifted_prefix_name: The module names of all enclosing lifted modules (see
:func:`~haiku.lift` for more context). Adding this string as a prefix to
`full_name` will be equal to the final parameter name in the outer
transform's parameter dictionary.
NOTE: When :func:`~haiku.get_parameter` or :func:`~haiku.get_state` is
called in an `apply` context, this name will always be None because only
`init` functions are lifted.
module_name: The full name of enclosing modules.
name: The name of this parameter.
"""
full_name: str
module: Optional[Module]
original_dtype: Any
original_shape: Sequence[int]
original_init: Optional[Initializer]
lifted_prefix_name: Optional[str]
@property
def module_name(self):
module_name, _ = self.full_name.rsplit("/", 1)
return module_name
@property
def name(self):
_, name = self.full_name.rsplit("/", 1)
return name
NextCreator = Callable[[Sequence[int], Any, Initializer], jax.Array]
Creator = Callable[
[NextCreator, Sequence[int], Any, Initializer, GetterContext], jax.Array]
def run_creators(
stack: ThreadLocalStack[Creator],
context: GetterContext,
shape: Sequence[int],
dtype: Any = jnp.float32,
init: Optional[Initializer] = None,
) -> jax.Array:
"""See :func:`custom_creator` for usage."""
assert stack
stack = stack.clone()
def next_creator(shape, dtype, init):
if not stack:
return init(shape, dtype)
return stack.popleft()(next_creator, shape, dtype, init, context)
return next_creator(shape, dtype, init)
def custom_creator(
creator: Creator,
*,
params: bool = True,
state: bool = False,
) -> contextlib.AbstractContextManager:
"""Registers a custom parameter and/or state creator.
When new parameters are created via :func:`get_parameter` we first run custom
creators passing user defined values through. For example:
>>> def zeros_creator(next_creator, shape, dtype, init, context):
... init = jnp.zeros
... return next_creator(shape, dtype, init)
>>> with hk.custom_creator(zeros_creator):
... z = hk.get_parameter("z", [], jnp.float32, jnp.ones)
>>> print(z)
0.0
If ``state=True`` then your creator will additionally run on calls to
:func:`get_state`:
>>> with hk.custom_creator(zeros_creator, state=True):
... z = hk.get_state("z", [], jnp.float32, jnp.ones)
>>> print(z)
0.0
Args:
creator: A parameter creator.
params: Whether to intercept parameter creation, defaults to ``True``.
state: Whether to intercept state creation, defaults to ``False``.
Returns:
Context manager under which the creator is active.
"""
assert_context("custom_creator")
return custom_creator_unsafe(creator, params=params, state=state)
def custom_creator_unsafe(
creator: Creator,
*,
params: bool = True,
state: bool = False,
) -> contextlib.AbstractContextManager: # pylint: disable=g-bare-generic
"""See custom_creator."""
stack = contextlib.ExitStack()
if params:
stack.enter_context(param_creator_stack(creator))
if state:
stack.enter_context(state_creator_stack(creator))
return stack
NextGetter = Callable[[jax.Array], jax.Array]
Getter = Callable[[NextGetter, jax.Array, GetterContext], jax.Array]
def run_getters(
stack: Stack[Getter],
context: GetterContext,
value: jax.Array,
) -> jax.Array:
"""See :func:`custom_getter` for usage."""
assert stack
stack = stack.clone()
def next_getter(value):
if not stack:
return value
return stack.popleft()(next_getter, value, context)
return next_getter(value)
def custom_getter(
getter: Getter,
*,
params: bool = True,
state: bool = False,
) -> contextlib.AbstractContextManager:
"""Registers a custom parameter or state getter.
When parameters are retrieved using :func:`get_parameter` we always run all
custom getters before returning a value to the user.
>>> def bf16_getter(next_getter, value, context):
... value = value.astype(jnp.bfloat16)
... return next_getter(value)
>>> with hk.custom_getter(bf16_getter):
... w = hk.get_parameter("w", [], jnp.float32, jnp.ones)
>>> w.dtype
dtype(bfloat16)
If ``state=True`` the getter will additionally run for calls to
:func:`get_state`:
>>> with hk.custom_getter(bf16_getter, state=True):
... c = hk.get_state("c", [], jnp.float32, jnp.ones)
>>> c.dtype
dtype(bfloat16)
Args:
getter: A parameter getter.
params: Whether the getter should run on :func:`get_parameter`
state: Whether the getter should run on :func:`get_state`.
Returns:
Context manager under which the getter is active.
"""
assert_context("custom_getter")
stack = contextlib.ExitStack()
if params:
stack.enter_context(param_getter_stack(getter))
if state:
stack.enter_context(state_getter_stack(getter))
return stack
T = TypeVar("T")
U = TypeVar("U")
NextSetter = Callable[[str, T], U]
Setter = Callable[[NextSetter, T, "SetterContext"], U]
def run_setters(
stack: Stack[Setter],
context: "SetterContext",
value: T,
) -> U:
"""See :func:`custom_setter` for usage."""
assert stack
stack_copy = stack.clone()
def next_setter(value):
if not stack_copy:
return value
return stack_copy.popleft()(next_setter, value, context)
return next_setter(value)
class SetterContext(NamedTuple):
"""Context about where state is being set.
Attributes:
full_name: The full name of the given state (e.g. ``mlp/~/linear_0/w``).
module: The module that owns the current state, ``None`` if this
state exists outside any module.
original_dtype: The dtype that :func:`~haiku.set_state` was originally
called with.
original_shape: The shape that :func:`~haiku.set_state` or
:func:`~haiku.get_state` was originally called with.
lifted_prefix_name: The module names of all enclosing lifted modules (see
:func:`~haiku.lift` for more context). Adding this string as a prefix to
`full_name` will be equal to the final parameter name in the outer
transform's parameter dictionary.
NOTE: When :func:`~haiku.get_parameter` or :func:`~haiku.get_state` is
called in an `apply` context, this name will always be None because only
`init` functions are lifted.
module_name: The full name of enclosing modules.
name: The name of this state.
"""
full_name: str
module: Optional[Module]
original_dtype: Any
original_shape: Sequence[int]
lifted_prefix_name: Optional[str]
@property
def module_name(self):
module_name, _ = self.full_name.rsplit("/", 1)
return module_name
@property
def name(self):
_, name = self.full_name.rsplit("/", 1)
return name
def custom_setter(setter: Setter) -> contextlib.AbstractContextManager:
"""Registers a custom state setter.
When state is set using :func:`set_state` we always run all custom setters
before saving the value.
>>> def zero_during_init(next_setter, value, context):
... if hk.running_init():
... value = jnp.zeros_like(value)
... return next_setter(value)
>>> with hk.custom_setter(zero_during_init):
... hk.set_state("x", jnp.ones([2]))
... x = hk.get_state("x")
>>> print(x)
[0. 0.]
Args:
setter: A state setter.
Returns:
Context manager under which the setter is active.
"""
assert_context("custom_setter")
return state_setter_stack(setter)
def assert_is_prng_key(key: PRNGKey):
"""Asserts that the given input looks like a `jax.random.PRNGKey`."""
# The error message has to be constructed lazily to avoid an extra
# device-to-host copy.
make_error = lambda: ValueError( # pylint: disable=g-long-lambda
f"The provided key is not a JAX PRNGKey but a {type(key)}:\n{key}")
if not hasattr(key, "shape") or not hasattr(key, "dtype"):
raise make_error()
if hasattr(jax.dtypes, "prng_key"): # JAX 0.4.14 or newer
is_typed_prng = jax.dtypes.issubdtype(key.dtype, jax.dtypes.prng_key)
elif hasattr(jax.random, "PRNGKeyArray"): # Older JAX versions
is_typed_prng = isinstance(key, jax.random.PRNGKeyArray)
else: # Shouldn't get here, but just in case...
is_typed_prng = False
if is_typed_prng:
if key.shape:
raise ValueError(
"Provided key did not have expected shape and/or dtype: "
f"expected=(shape=(), dtype={key.dtype}), "
f"actual=(shape={key.shape}, dtype={key.dtype})")
else:
config_hint = ""
default_impl = jax.random.default_prng_impl()
expected_shape = default_impl.key_shape
if default_impl.key_shape != (2,):
# Default PRNG impl is set to something different from threefry.
config_hint = ("\nHint: jax_default_prng_impl has been set to "
f"'{jax_config.jax_default_prng_impl}', the shape "
"mismatch might be because a jax.random.PRNGKey was "
"created before this flag was set.")
if key.shape != expected_shape or key.dtype != jnp.uint32:
raise ValueError(
"Provided key did not have expected shape and/or dtype: "
f"expected=(shape={expected_shape}, dtype=uint32), "
f"actual=(shape={key.shape}, dtype={key.dtype}){config_hint}")
PRNGSequenceState = tuple[PRNGKey, Iterable[PRNGKey]]
class PRNGSequence(Iterator[PRNGKey]):
"""Iterator of JAX random keys.
>>> seq = hk.PRNGSequence(42) # OR pass a jax.random.PRNGKey
>>> key1 = next(seq)
>>> key2 = next(seq)
>>> assert key1 is not key2
If you know how many keys you will want then you can use :meth:`reserve` to
more efficiently split the keys you need::
>>> seq.reserve(4)
>>> keys = [next(seq) for _ in range(4)]
"""
__slots__ = ("_key", "_subkeys")
def __init__(self, key_or_seed: Union[PRNGKey, int, PRNGSequenceState]):
"""Creates a new :class:`PRNGSequence`."""
if isinstance(key_or_seed, tuple):
key, subkeys = key_or_seed
assert_is_prng_key(key)
for subkey in subkeys:
assert_is_prng_key(subkey)
self._key = key
self._subkeys = collections.deque(subkeys)
else:
if isinstance(key_or_seed, int):
key_or_seed = jax.random.PRNGKey(key_or_seed)
# A seed value may also be passed as an int32-typed scalar ndarray.
elif (hasattr(key_or_seed, "shape") and (not key_or_seed.shape) and
hasattr(key_or_seed, "dtype") and key_or_seed.dtype == jnp.int32):
key_or_seed = jax.random.PRNGKey(key_or_seed)
else:
assert_is_prng_key(key_or_seed)
self._key = key_or_seed
self._subkeys = collections.deque()
def reserve(self, num):
"""Splits additional ``num`` keys for later use."""
if num > 0:
# When storing keys we adopt a pattern of key0 being reserved for future
# splitting and all other keys being provided to the user in linear order.
# In terms of jax.random.split this looks like:
#
# key, subkey1, subkey2 = jax.random.split(key, 3) # reserve(2)
# key, subkey3, subkey4 = jax.random.split(key, 3) # reserve(2)
#
# Where subkey1->subkey4 are provided to the user in order when requested.
new_keys = tuple(jax.random.split(self._key, num + 1))
self._key = new_keys[0]
self._subkeys.extend(new_keys[1:])
def reserve_up_to_full(self):
num = config.get_config().rng_reserve_size
diffnum = num - len(self._subkeys)
if diffnum > 0:
self.reserve(diffnum)
elif diffnum < 0:
sliced_subkeys = list(self._subkeys)[:num]
self._subkeys = collections.deque(sliced_subkeys)
@property
def internal_state(self) -> PRNGSequenceState:
return self._key, tuple(self._subkeys)
def replace_internal_state(self, state: PRNGSequenceState):
key, subkeys = state
assert_is_prng_key(key)
for subkey in subkeys:
assert_is_prng_key(subkey)
self._key = key
self._subkeys = collections.deque(subkeys)
def __next__(self) -> PRNGKey:
if not self._subkeys:
self.reserve(config.get_config().rng_reserve_size)
return self._subkeys.popleft()
next = __next__
def take(self, num) -> tuple[PRNGKey, ...]:
self.reserve(max(num - len(self._subkeys), 0))
return tuple(next(self) for _ in range(num))
class MissingRNGError(ValueError):
pass
def rng_seq_or_fail() -> PRNGSequence:
rng_seq = current_frame().rng_stack.peek()
if rng_seq is None:
raise MissingRNGError("You must pass a non-None PRNGKey to init and/or "
"apply if you make use of random numbers.")
return rng_seq
def reserve_rng_keys(num: int):
"""Pre-allocate some number of JAX RNG keys.
See :func:`next_rng_key`.
This API offers a way to micro-optimize how RNG keys are split when using
Haiku. It is unlikely that you need it unless you find compilation time of
your ``init`` function to be a problem, or you sample a lot of random numbers
in ``apply``.
>>> hk.reserve_rng_keys(2) # Pre-allocate 2 keys for us to consume.
>>> _ = hk.next_rng_key() # Takes the first pre-allocated key.
>>> _ = hk.next_rng_key() # Takes the second pre-allocated key.
>>> _ = hk.next_rng_key() # Splits a new key.
Args:
num: The number of JAX rng keys to allocate.
"""
assert_context("reserve_rng_keys")
assert_jax_usage("reserve_rng_keys")
rng_seq = rng_seq_or_fail()
rng_seq.reserve(num)
def next_rng_key() -> PRNGKey:
"""Returns a unique JAX random key split from the current global key.
>>> key = hk.next_rng_key()
>>> _ = jax.random.uniform(key, [])
Returns:
A unique (within a call to ``init`` or ``apply``) JAX rng key that can be
used with APIs such as :func:`jax.random.uniform`.
"""
assert_context("next_rng_key")
assert_jax_usage("next_rng_key")
return next_rng_key_internal()
class JaxUsageError(ValueError):
pass
JaxUsageError.__module__ = "haiku"
def push_jax_trace_level():
return current_frame().jax_trace_stack(JaxTraceLevel.current())
def assert_jax_usage(public_symbol_name: str):
if not config.get_config().check_jax_usage:
return
expected_level = current_frame().jax_trace_stack.peek()
trace_level = JaxTraceLevel.current()
if trace_level != expected_level:
raise JaxUsageError(
"tl;dr - You need to use a Haiku overloaded transform (e.g. `hk.vmap`) "
"or control flow operator (e.g. `hk.cond`) instead of the `jax.*` "
"equivalent for untransformed functions using Haiku APIs."
"\n\n"
"Some APIs in JAX (e.g. `jit`, `vmap`, `cond`, `switch`) take "
f"functions that are expected to be pure. `hk.{public_symbol_name}` "
"has a side effect, and using it inside a function (without also using "
"`hk.transform`) makes that function 'impure' (the function has a side "
"effect)."
"\n\n"
"Haiku includes drop-in replacements for these JAX APIs (e.g. "
"`hk.vmap`) that carefully turn your function into a pure function and "
"then call the underlying JAX function.")
# NOTE: Split for monkey patching in random.py.
def next_rng_key_internal() -> PRNGKey:
rng_seq = rng_seq_or_fail()
return next(rng_seq)
def next_rng_keys(num: int) -> jax.Array:
"""Returns one or more JAX random keys split from the current global key.
>>> k1, k2 = hk.next_rng_keys(2)
>>> assert (k1 != k2).all()
>>> a = jax.random.uniform(k1, [])
>>> b = jax.random.uniform(k2, [])
>>> assert a != b
Args:
num: The number of keys to split.
Returns:
An array of shape ``[num, 2]`` unique (within a transformed function) JAX
rng keys that can be used with APIs such as :func:`jax.random.uniform`.
"""
assert_context("next_rng_keys")
assert_jax_usage("next_rng_keys")
rng_seq = rng_seq_or_fail()
return jnp.stack(rng_seq.take(num))
def maybe_next_rng_key() -> Optional[PRNGKey]:
""":func:`next_rng_key` if random numbers are available, else ``None``."""
assert_context("maybe_next_rng_key")
rng_seq = current_frame().rng_stack.peek()
if rng_seq is not None:
assert_jax_usage("maybe_next_rng_key")
return next(rng_seq)
else:
return None
def maybe_get_rng_sequence_state() -> Optional[PRNGSequenceState]:
"""Returns the internal state of the PRNG sequence.
Returns:
The internal state if random numbers are available, else ``None``.
"""
assert_context("maybe_get_rng_sequence_state")
rng_seq = current_frame().rng_stack.peek()
if rng_seq is not None:
return rng_seq.internal_state
else:
return None
def replace_rng_sequence_state(state: PRNGSequenceState):
"""Replaces the internal state of the PRNG sequence with the given state.
Args:
state: The new internal state or ``None``.
Raises:
MissingRNGError: If random numbers aren't available.
"""
assert_context("replace_rng_sequence_state")
rng_seq = current_frame().rng_stack.peek()
if rng_seq is None:
raise MissingRNGError(
"replace_rng_sequence_state requires an RNG to be passed into the"
" transformed function"
)
assert_jax_usage("replace_rng_sequence_state")
rng_seq.replace_internal_state(state)
def extract_state(state: State, *, initial) -> MutableState:
state = {m: {k: (v.initial if initial else v.current) for k, v in p.items()}
for m, p in state.items()}
state = data_structures.to_haiku_dict(state)
return state
def get_state(
name: str,
shape: Optional[Sequence[int]] = None,
dtype: Any = jnp.float32,
init: Optional[Initializer] = None,
) -> jax.Array:
"""Gets the current value for state with an optional initializer.
"State" can be used to represent mutable state in your network. The most
common usage of state is to represent the moving averages used in batch
normalization (see :class:`ExponentialMovingAverage`). If your network uses
"state" then you are required to use :func:`transform_with_state` and pass
state into and out of the apply function.
>>> print(hk.get_state("counter", [], init=jnp.zeros))
0.0
If the value for the given state is already defined (e.g. using
:func:`set_state`) then you can call with just the name:
>>> print(hk.get_state("counter"))
0.0
NOTE: state within the same :func:`transform` and/or :class:`Module` with the
same name have the same value:
>>> c1 = hk.get_state("counter")
>>> c2 = hk.get_state("counter")
>>> assert c1 is c2
Args:
name: A name for the state.
shape: The shape of the state.
dtype: The dtype of the state.
init: A callable ``f(shape, dtype)`` that returns an initial value for the
state.
Returns:
A jax.Array with the state of the given shape.
"""
assert_context("get_state")
assert_jax_usage("get_state")
bundle_name = current_name()
state = current_frame().state[bundle_name]
fq_name = f"{bundle_name}/{name}"
context = GetterContext(fq_name, current_module(),
dtype, shape, init, get_lift_prefix())
value = state.get(name, None)
if value is None:
if init is None:
raise ValueError(f"No value for {name!r} in {bundle_name!r}, perhaps "
"set an init function?")
if shape is None or dtype is None:
raise ValueError(f"Must provide shape and dtype to initialize {name!r} "
f"in {bundle_name!r}.")
if state_creator_stack:
value = run_creators(state_creator_stack, context, shape, dtype, init)
else:
value = init(shape, dtype)
if value is not DO_NOT_STORE:
state[name] = StatePair(value, value)
else:
value = value.current
# Custom getters allow a hook for users to customize the value returned by
# get_state. For example casting values to some dtype.
if state_getter_stack:
value = run_getters(state_getter_stack, context, value)
return value
maybe_shape = lambda x: getattr(x, "shape", None)
maybe_dtype = lambda x: getattr(x, "dtype", None)
def set_state(name: str, value):
"""Sets the current value for some state.
See :func:`get_state`.
"State" can be used to represent mutable state in your network. The most
common usage of state is to represent the moving averages used in batch
normalization (see :class:`ExponentialMovingAverage`). If your network uses
"state" then you are required to use :func:`transform_with_state` and pass
state into and out of the apply function.
>>> hk.set_state("counter", jnp.zeros([]))
>>> print(hk.get_state("counter"))
0.0
NOTE: state within the same :func:`transform` and/or :class:`Module` with the
same name have the same value:
>>> w1 = hk.get_state("counter")
>>> w2 = hk.get_state("counter")
>>> assert w1 is w2
Args:
name: A name for the state.
value: A value to set.
"""
assert_context("set_state")
assert_jax_usage("set_state")
frame = current_frame()
bundle_name = current_name()
state = frame.state[bundle_name]
if state_setter_stack:
shape = jax.tree_util.tree_map(maybe_shape, value)
dtype = jax.tree_util.tree_map(maybe_dtype, value)
fq_name = bundle_name + "/" + name
context = SetterContext(full_name=fq_name,
module=current_module(), original_dtype=dtype,
original_shape=shape,
lifted_prefix_name=get_lift_prefix())
value = run_setters(state_setter_stack, context, value)
if value is DO_NOT_STORE:
remove_if_empty(frame.state, bundle_name)
return
if name in state:
initial, _ = state[name]
current = value
else:
initial = current = value
state[name] = StatePair(initial, current)
def with_rng(key: PRNGKey):
"""Provides a new sequence for :func:`next_rng_key` to draw from.
When :func:`next_rng_key` is called, it draws a new key from the
:class:`PRNGSequence` defined by the input key to the transformed function.
This context manager overrides the sequence for the duration of the scope.
>>> with hk.with_rng(jax.random.PRNGKey(428)):
... s = jax.random.uniform(hk.next_rng_key(), ())
>>> print("{:.1f}".format(s))
0.5
Args:
key: The key to seed the sequence with.
Returns:
Context manager under which the given sequence is active.
"""
assert_context("with_rng")
assert_jax_usage("with_rng")
return current_frame().rng_stack(PRNGSequence(key))
def param_names() -> frozenset[tuple[str, str]]:
"""Returns all module and parameter names as a set of pairs."""
out = []
params = current_frame().params
for mod_name, bundle in params.items():
if not isinstance(bundle, Mapping):
# TODO(tomhennigan) Fix broken user code and remove this warning.
warnings.warn(f"Invalid entry {mod_name!r} in params {params}")
continue
for name in bundle:
out.append((mod_name, name))
return frozenset(out)
@contextlib.contextmanager
def assert_no_new_parameters():
before = param_names()
yield
diff = param_names() - before
if diff:
raise AssertionError(f"New parameters were created: {list(sorted(diff))}")
def _get_ids(collection_name: str) -> frozenset[int]:
"""Returns the identity for all state in the current context."""
out = []
collection = getattr(current_frame(), collection_name)
for mod_name, bundle in collection.items():
if not isinstance(bundle, Mapping):
# TODO(tomhennigan) Fix broken user code and remove this warning.
warnings.warn(f"Invalid entry {mod_name!r} in collection {collection}")
continue
out.extend(map(id, bundle.values()))
return frozenset(out)
def _all_state():
params = _get_ids("params")
state = _get_ids("state")
rng = current_frame().rng_stack.peek()
if rng is not None:
key, subkeys = rng.internal_state
rng = frozenset(map(id, [key] + list(subkeys)))
else:
rng = frozenset()
return params, state, rng
@contextlib.contextmanager
def assert_state_unchanged():
"""Asserts that in the given block params, state and rng are unchanged."""
params_before, state_before, rng_before = _all_state()
yield
params_after, state_after, rng_after = _all_state()
params_diff = params_after - params_before
state_diff = state_after - state_before
# Amount of rng keys can increase (reserve_rng_keys) or decrease
# (next_rng_key), so need symmetric set difference (^).
rng_diff = rng_after ^ rng_before
if params_diff or state_diff or rng_diff:
raise StateChangedError("Within this code block you are not able to modify "
"Haiku managed state (e.g. via `next_rng_key` or "
"`set_state`).")
class StateChangedError(AssertionError):
pass
class NonEmptyStateError(ValueError):
"""Error thrown when state is used without using `transform_with_state`."""
|
dm-haiku-main
|
haiku/_src/base.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jaxpr_info."""
from typing import Optional
from absl import logging
from absl.testing import absltest
from haiku._src import conv
from haiku._src import jaxpr_info
from haiku._src import module
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class MyModel(module.Module):
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
def __call__(self, x: jax.Array):
return conv.Conv2D(16, 3)(x)
class JaxprInfoTest(absltest.TestCase):
def test_simple_expression(self):
def add(x, y):
return jnp.sign(x) + jnp.cos(y)
a = jnp.zeros((12, 7))
mod = jaxpr_info.make_model_info(add)(a, a)
self.assertContentsEqual(
jaxpr_info.format_module(mod), """
add
sign
sign in f32[12,7], out f32[12,7]
cos in f32[12,7], out f32[12,7]
add in f32[12,7], f32[12,7], out f32[12,7]
""")
def test_compute_flops(self):
def _compute_flops(eqn: jax.core.JaxprEqn,
expression: jaxpr_info.Expression) -> int:
del expression
return max(np.prod(var.aval.shape) for var in eqn.invars) # pytype: disable=attribute-error
def add(x, y):
return jnp.sign(x) + jnp.cos(y)
a = jnp.zeros((12, 7))
mod = jaxpr_info.make_model_info(add, compute_flops=_compute_flops)(a, a)
self.assertContentsEqual(
jaxpr_info.format_module(mod), """
add 252 flops
sign 84 flops
sign 84 flops in f32[12,7], out f32[12,7]
cos 84 flops in f32[12,7], out f32[12,7]
add 84 flops in f32[12,7], f32[12,7], out f32[12,7]
""")
def test_haiku_module(self):
def forward(x):
return MyModel()(x)
forward_t = transform.transform_with_state(forward)
rng = jax.random.PRNGKey(42)
x = jnp.zeros((16, 8, 8, 32))
params, state = forward_t.init(rng, x)
mod = jaxpr_info.make_model_info(forward_t.apply)(params, state, rng, x)
self.assertContentsEqual(
jaxpr_info.format_module(mod), """
apply_fn
my_model 4.624 kparams
conv2_d 4.624 kparams
conv_general_dilated in f32[16,8,8,32], f32[3,3,32,16], out f32[16,8,8,16]
broadcast_in_dim in f32[16], out f32[16,8,8,16]
add in f32[16,8,8,16], f32[16,8,8,16], out f32[16,8,8,16]
""")
def test_haiku_module_loss(self):
def forward(x):
return MyModel()(x)
forward_t = transform.transform_with_state(forward)
rng = jax.random.PRNGKey(42)
x = jnp.zeros((16, 8, 8, 32))
params, state = forward_t.init(rng, x)
def loss(params, state, rng, x):
loss = jnp.sum(forward_t.apply(params, state, rng, x)[0])
return loss, loss
grad = jax.grad(loss, has_aux=True)
mod = jaxpr_info.make_model_info(grad)(params, state, rng, x)
formatted_mod = jaxpr_info.format_module(mod)
# Support old JAX versions on GitHub presubmits.
formatted_mod = formatted_mod.replace("transpose(jvp(conv2_d))",
"conv2_d").replace(
"jvp(conv2_d)", "conv2_d")
self.assertContentsEqual(
formatted_mod, """
loss
jvp(my_model)
conv2_d
conv_general_dilated in f32[16,8,8,32], f32[3,3,32,16], out f32[16,8,8,16]
broadcast_in_dim in f32[16], out f32[16,8,8,16]
add in f32[16,8,8,16], f32[16,8,8,16], out f32[16,8,8,16]
reduce_sum in f32[16,8,8,16], out f32[]
broadcast_in_dim in f32[], out f32[16,8,8,16]
transpose(jvp(my_model))
conv2_d
reduce_sum in f32[16,8,8,16], out f32[16]
conv_general_dilated in f32[16,8,8,32], f32[16,8,8,16], out f32[3,3,32,16]
""".strip())
def assertContentsEqual(self, a: str, b: str):
a, b = a.strip(), b.strip()
logging.info("a:\n%s", a)
logging.info("b:\n%s", b)
self.assertEqual(a, b)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/jaxpr_info_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lifting parameters in Haiku."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import config
from haiku._src import lift
from haiku._src import module
from haiku._src import multi_transform
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
IGNORE = lambda u: u.ignore_update()
UPDATE = lambda u: u.update({})
class Bias(module.Module):
def __call__(self, x):
b = base.get_parameter("b", (), init=jnp.ones)
return x + b
def with_lift(f, *, name="inner"):
def wrapped(*a, **k):
init, apply = transform.transform(f)
params = lift.lift(init, name=name)(None, *a, **k)
return apply(params, None, *a, **k)
return wrapped
def with_transparent_lift(f):
def wrapped(*a, **k):
init, apply = transform.transform(f)
params = lift.transparent_lift(init)(None, *a, **k)
return apply(params, None, *a, **k)
return wrapped
def top_level(x):
x = Bias(name="top_level")(x)
return Bias(name="top_level")(x)
def nested(x):
class OuterModule(module.Module):
def __call__(self, x):
return Bias(name="inner")(x)
return OuterModule(name="outer")(x)
def expected_duplicate_name(x):
class ExtraOuter(module.Module):
def __call__(self, x):
return Bias("inner")(x)
class OuterModule(module.Module):
def __call__(self, x):
x = ExtraOuter(name="outer")(x)
return Bias(name="outer")(x)
return OuterModule(name="outer")(x)
class LiftTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._prev_check_jax_usage = config.check_jax_usage(True)
def tearDown(self):
super().tearDown()
config.check_jax_usage(self._prev_check_jax_usage)
def test_lift_with_vmap(self):
def inner_fn(x):
assert x.ndim == 1
return Bias()(x)
def outer_fn(x):
assert x.ndim == 2
x = Bias()(x)
inner = multi_transform.without_apply_rng(transform.transform(inner_fn))
inner_p = lift.lift(inner.init)(base.next_rng_key(), x[0])
vmap_inner = jax.vmap(inner.apply, in_axes=(None, 0))
return vmap_inner(inner_p, x)
key = jax.random.PRNGKey(428)
init_key, apply_key = jax.random.split(key)
data = np.zeros((3, 2))
outer = transform.transform(outer_fn)
outer_params = outer.init(init_key, data)
self.assertEqual(outer_params, {
"bias": {"b": np.ones(())},
"lifted/bias": {"b": np.ones(())},
})
out = outer.apply(outer_params, apply_key, data)
np.testing.assert_equal(out, 2 * np.ones((3, 2)))
def test_lift_with_scan(self):
def inner_fn(x):
x *= base.get_parameter("w", shape=x.shape, init=jnp.zeros)
return x
class Outer(module.Module):
def __init__(self, allow_reuse):
super().__init__()
self._allow_reuse = allow_reuse
def __call__(self, carry, x):
x += base.get_parameter("w", shape=[], init=jnp.zeros)
inner = transform.transform(inner_fn)
keys = base.next_rng_key() if transform.running_init() else None
params = lift.lift(
inner.init, allow_reuse=self._allow_reuse)(keys, x)
return carry, inner.apply(params, None, x)
def model(x, *, allow_reuse):
return stateful.scan(Outer(allow_reuse), (), x)
rng = jax.random.PRNGKey(42)
data = np.zeros((4, 3, 2))
with self.subTest(name="allow_reuse"):
init, apply = transform.transform(
lambda x: model(x, allow_reuse=True))
params = init(rng, data)
_, out = apply(params, None, data)
np.testing.assert_equal(out, np.zeros_like(data))
with self.subTest(name="disallow_reuse"):
init, _ = transform.transform(lambda x: model(x, allow_reuse=False))
with self.assertRaisesRegex(ValueError, "Key '.*' already exists"):
_ = init(rng, data)
@parameterized.parameters((lift.lift, lambda: None),
(lift.lift_with_state, lambda: (None, None)))
def test_inside_transform(self, lift_fn, init_fn):
with self.assertRaisesRegex(ValueError, "must be .* part of .*transform"):
lift_fn(init_fn)
@test_utils.transform_and_run
def test_empty_lift(self):
f = transform.transform(lambda: None)
self.assertEmpty(lift.lift(f.init)(None))
@parameterized.parameters(True, False)
@test_utils.transform_and_run
def test_empty_lift_with_state(self, ignore_update):
f = transform.transform_with_state(lambda: None)
init_fn, updater = lift.lift_with_state(f.init)
params, state = init_fn(None)
self.assertEmpty(params)
self.assertEmpty(state)
if ignore_update:
updater.ignore_update()
else:
updater.update({})
def test_unused_updater(self):
def f() -> lift.LiftWithStateUpdater:
f = transform.transform_with_state(lambda: None)
return lift.lift_with_state(f.init)[1]
f = transform.transform_with_state(f)
with self.assertRaisesRegex(ValueError, "StateUpdater.*must be used"):
f.init(None)
@parameterized.named_parameters(("ignore then ignore", IGNORE, IGNORE),
("update then update", UPDATE, UPDATE),
("ignore then update", IGNORE, UPDATE),
("update then ignore", UPDATE, IGNORE))
@test_utils.transform_and_run
def test_used_multiple_times(self, update_fn1, update_fn2):
f = transform.transform_with_state(lambda: None)
updater = lift.lift_with_state(f.init)[1]
update_fn1(updater)
with self.assertRaisesRegex(ValueError, "must only be used once"):
update_fn2(updater)
@test_utils.transform_and_run(run_apply=False)
def test_lift_raises_with_state(self):
f = transform.transform_with_state(
lambda: base.get_state("w", [], init=jnp.zeros))
lifted = lift.lift(f.init) # pytype: disable=wrong-arg-types
with self.assertRaisesRegex(base.NonEmptyStateError,
"use.*lift_with_state"):
lifted(None)
def test_lift_with_state(self):
@transform.transform_with_state
def inner():
w = base.get_state("w", [], init=jnp.zeros)
w += 1
base.set_state("w", w)
return w
def outer():
lifted, updater = lift.lift_with_state(inner.init)
params, state = lifted(None)
self.assertEmpty(params)
out, state = inner.apply(params, state, None)
updater.update(state)
return out, state
outer = transform.transform_with_state(outer)
params, state = outer.init(None)
self.assertEmpty(params)
self.assertEqual(jax.tree_util.tree_map(int, state), {"lifted/~": {"w": 0}})
for expected in (1, 2, 3):
(w, inner_state), state = outer.apply(params, state, None)
self.assertEqual(
jax.tree_util.tree_map(int, inner_state), {"~": {"w": expected}})
self.assertEqual(w, expected)
self.assertEmpty(params)
self.assertEqual(state, {"lifted/~": {"w": expected}})
def test_lift_with_state_nested(self):
@transform.transform_with_state
def inner():
w = base.get_state("w", [], init=jnp.zeros)
w += 1
base.set_state("w", w)
return w
class Outer(module.Module):
def __call__(self):
lifted, updater = lift.lift_with_state(inner.init)
params, state = lifted(None)
out, state = inner.apply(params, state, None)
updater.update(state)
return out, state
outer = transform.transform_with_state(lambda: Outer()()) # pylint: disable=unnecessary-lambda
params, state = outer.init(None)
self.assertEmpty(params)
self.assertEqual(
jax.tree_util.tree_map(int, state), {"outer/lifted/~": {"w": 0}})
for expected in (1, 2, 3):
(w, inner_state), state = outer.apply(params, state, None)
self.assertEqual(
jax.tree_util.tree_map(int, inner_state), {"~": {"w": expected}})
self.assertEqual(w, expected)
self.assertEmpty(params)
self.assertEqual(state, {"outer/lifted/~": {"w": expected}})
@parameterized.parameters(IGNORE, UPDATE)
def test_updater_used_in_different_inner_transform(self, updater_fn):
def f():
g = transform.transform_with_state(lambda: None)
_, updater = lift.lift_with_state(g.init)
transform.transform_with_state(lambda: updater_fn(updater)).init(None)
f = transform.transform_with_state(f)
with self.assertRaisesRegex(
ValueError, "must be used within the same call to init/apply"):
f.init(None)
def test_transparent_lift_with_state(self):
@transform.transform_with_state
def inner():
w = base.get_state("w", [], init=jnp.zeros)
w += 1
base.set_state("w", w)
return w
@transform.transform_with_state
def outer():
lifted, updater = lift.transparent_lift_with_state(inner.init)
params, state = lifted(None)
out, state = inner.apply(params, state, None)
updater.update(state)
return out, state
params, state = outer.init(None)
self.assertEmpty(params)
self.assertEqual(jax.tree_util.tree_map(int, state), {"~": {"w": 0}})
for expected in (1, 2, 3):
(w, inner_state), state = outer.apply(params, state, None)
self.assertEqual(
jax.tree_util.tree_map(int, inner_state), {"~": {"w": expected}})
self.assertEqual(w, expected)
self.assertEmpty(params)
self.assertEqual(state, inner_state)
def test_transparent_lift_with_state_nested(self):
@transform.transform_with_state
def inner():
w = base.get_state("w", [], init=jnp.zeros)
w += 1
base.set_state("w", w)
return w
class Outer(module.Module):
def __call__(self):
lifted, updater = lift.transparent_lift_with_state(inner.init)
params, state = lifted(None)
out, state = inner.apply(params, state, None)
updater.update(state)
return out, state
outer = transform.transform_with_state(lambda: Outer()()) # pylint: disable=unnecessary-lambda
params, state = outer.init(None)
self.assertEmpty(params)
self.assertEqual(jax.tree_util.tree_map(int, state), {"outer/~": {"w": 0}})
for expected in (1, 2, 3):
(w, inner_state), state = outer.apply(params, state, None)
self.assertEqual(
jax.tree_util.tree_map(int, inner_state), {"~": {"w": expected}})
self.assertEqual(w, expected)
self.assertEmpty(params)
self.assertEqual(state, {"outer/~": {"w": expected}})
def test_transparent_lift(self):
class OuterModule(module.Module):
def __call__(self, x):
x += base.get_parameter("a", shape=[10, 10], init=jnp.zeros)
def inner_fn(x):
return InnerModule(name="inner")(x)
inner_transformed = transform.transform(inner_fn)
inner_params = lift.transparent_lift(inner_transformed.init)(
base.next_rng_key(), x)
x = inner_transformed.apply(inner_params, base.next_rng_key(), x)
return x
class InnerModule(module.Module):
def __call__(self, x):
x += base.get_parameter("b", shape=[10, 10], init=jnp.zeros)
return x
@transform.transform
def fn(x):
return OuterModule(name="outer")(x)
correct_weight_names = ["outer/inner", "outer"]
rng = jax.random.PRNGKey(0)
params = fn.init(rng, jnp.ones([10, 10]))
self.assertCountEqual(list(params.keys()), correct_weight_names)
def test_transparent_lift_top_level(self):
class MyModule(module.Module):
def __call__(self, x):
x += base.get_parameter("b", shape=[10, 10], init=jnp.zeros)
return x
@transform.transform
def fn(x):
def inner_fn(x):
x = MyModule(name="top_level")(x)
return MyModule(name="top_level")(x)
inner_transformed = transform.transform(inner_fn)
inner_params = lift.transparent_lift(inner_transformed.init)(None, x)
return inner_transformed.apply(inner_params, None, x)
correct_weight_names = ["top_level", "top_level_1"]
params = fn.init(None, jnp.ones([10, 10]))
self.assertCountEqual(list(params.keys()), correct_weight_names)
fn.apply(params, None, jnp.ones([10, 10]))
def test_transparent_lift_existing_params_error(self):
class MyModule(module.Module):
def __call__(self, x):
x += base.get_parameter("b", shape=[3, 7], init=jnp.zeros)
return x
@transform.transform
def fn(x):
@transform.transform
def inner_fn(x):
return MyModule()(x)
x = MyModule()(x)
inner_params = lift.transparent_lift(inner_fn.init)(None, x)
return inner_fn.apply(inner_params, None, x)
with self.assertRaisesRegex(
ValueError, "Key 'my_module' already exists in the destination params"):
_ = fn.init(None, jnp.ones([3, 7]))
@parameterized.named_parameters([(fn.__name__, fn) # pylint: disable=undefined-variable
for fn in [top_level, nested,
expected_duplicate_name]])
def test_lift_naming_semantics(self, inner_module):
@transform.transform
def fn(x):
return with_transparent_lift(inner_module)(x)
x = jnp.ones([10, 10])
params_with_lift = fn.init(None, x)
params_without_lift = transform.transform(inner_module).init(None, x)
jax.tree_util.tree_map(
self.assertAlmostEqual, params_with_lift, params_without_lift)
fn.apply(params_with_lift, None, x)
def test_transparent_lift_closed_over_errors(self):
@transform.transform
def fn(x):
outer_defined = Bias(name="inner")
def inner_fn(x):
# transparent_lift closes over outer_defined
x = outer_defined(x)
return Bias(name="inner")(x)
return with_transparent_lift(inner_fn)(x)
with self.assertRaisesRegex(
ValueError, "close over a module.*transparent_lift"):
fn.init(None, jnp.ones((10, 10)))
def test_transparent_lift_closed_over_nested_errors(self):
class OuterModule(module.Module):
def __call__(self, x):
outer_defined = Bias(name="inner")
def inner_fn(x):
# transparent_lift closes over outer_defined nested in another module.
x = outer_defined(x)
return Bias(name="inner")(x)
return with_transparent_lift(inner_fn)(x)
@transform.transform
def fn(x):
return OuterModule(name="outer")(x)
with self.assertRaisesRegex(
ValueError, "close over a module.*transparent_lift"):
fn.init(None, jnp.ones((10, 10)))
def test_same_name_across_transforms_no_closed_error(self):
init1, _ = transform.transform(lambda x: Bias()(x)) # pylint: disable=unnecessary-lambda
init2, _ = transform.transform(lambda x: Bias()(x)) # pylint: disable=unnecessary-lambda
params1 = init1(None, 1.)
params2 = init2(None, 1.) # does not fail
jax.tree_util.tree_map(self.assertAlmostEqual, params1, params2)
def test_closed_over_within_transparent_lift_no_closed_error(self):
# You can close over modules within the boundary of the transparent_lift.
@transform.transform
def transformed_fn(x):
def lifted_fn(x):
outer_defined = Bias(name="inner")
def closing_over_fn(x):
return outer_defined(x)
x = stateful.vmap(closing_over_fn, split_rng=False)(x)
return Bias(name="inner")(x)
return with_transparent_lift(lifted_fn)(x)
transformed_fn.init(None, jnp.ones((10, 10))) # does not crash.
@parameterized.named_parameters(
("lift", with_lift, "outer/inner/"),
("transparent_lift", with_transparent_lift, "outer/"),
("nested_lift", lambda f: with_lift(with_lift(f)), "outer/inner/inner/"),
("nested_transparent_lift",
lambda f: with_transparent_lift(with_transparent_lift(f)),
"outer/"),
)
def test_custom_full_lift_prefix(self, lift_fn, expected_name):
def my_creator(next_creator, shape, dtype, init, context):
self.assertEqual(context.lifted_prefix_name, expected_name)
return next_creator(shape, dtype, init)
def my_getter(next_getter, value, context):
if transform.running_init():
self.assertEqual(context.lifted_prefix_name, expected_name)
return next_getter(value)
class Outer(module.Module):
def __call__(self, x):
with base.custom_getter(my_getter), base.custom_creator(my_creator):
return lift_fn(lambda x: Bias()(x))(x) # pylint: disable=unnecessary-lambda
@transform.transform
def fn(x):
return Outer()(x)
x = jnp.ones([10, 10])
params_with_lift = fn.init(None, x)
fn.apply(params_with_lift, None, x)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/lift_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Root Mean Square Layer Normalization.
Reference: https://arxiv.org/abs/1910.07467
"""
from collections import abc
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import layer_norm
from haiku._src import module
import jax
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
# pylint: enable=invalid-name
del base, module, initializers
AxisOrAxes = Union[int, Sequence[int], slice]
class RMSNorm(hk.Module):
"""RMSNorm module.
RMSNorm provides an alternative that can be both faster and more stable than
LayerNorm. The inputs are normalized by the root-mean-squared (RMS) and scaled
by a learned parameter, but they are not recentered around their mean.
See https://arxiv.org/pdf/1910.07467.pdf
"""
def __init__(
self,
axis: AxisOrAxes,
eps: float = 1e-5,
scale_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
create_scale: bool = True,
*,
param_axis: Optional[AxisOrAxes] = None,
):
"""Constructs a RMSNorm module.
Args:
axis: Integer, list of integers, or slice indicating which axes to
normalize over.
eps: Small epsilon to avoid division by zero variance. Defaults to 1e-5.
scale_init: Optional initializer for gain (aka scale). By default, one.
name: The module name.
create_scale: Bool, defines whether to create a trainable scale
per channel applied after the normalization.
param_axis: Axis used to determine the parameter shape of the learnable
scale/offset. Sonnet sets this to the channel/feature axis (e.g. to
``-1`` for ``NHWC``). Other libraries set this to the same as the
reduction axis (e.g. ``axis=param_axis``). `None` defaults to (-1,).
"""
super().__init__(name=name)
if not create_scale and scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`.")
if isinstance(axis, slice):
self.axis = axis
elif isinstance(axis, int):
self.axis = (axis,)
elif (isinstance(axis, abc.Iterable) and
all(isinstance(ax, int) for ax in axis)):
self.axis = tuple(axis)
else:
raise ValueError("`axis` should be an int, slice or iterable of ints.")
self.eps = eps
self.create_scale = create_scale
self.scale_init = scale_init or jnp.ones
if param_axis is None:
self.param_axis = (-1,)
else:
self.param_axis = layer_norm.to_axes_or_slice(param_axis)
def __call__(self, inputs: jax.Array):
"""Connects the layer norm.
Args:
inputs: An array, where the data format is ``[N, ..., C]``.
Returns:
The normalized array, of the same shape as the inputs.
"""
axis = self.axis
if isinstance(axis, slice):
axis = tuple(range(inputs.ndim)[axis])
param_axis = layer_norm.to_abs_axes(self.param_axis, inputs.ndim)
if param_axis == (inputs.ndim - 1,):
# For param_axis=-1 we store non-broadcast param shape for compatibility
# with older checkpoints.
param_shape = inputs.shape[-1:]
else:
param_shape = tuple(
(inputs.shape[i] if i in param_axis else 1)
for i in range(inputs.ndim))
if self.create_scale:
scale = hk.get_parameter(
"scale", param_shape, inputs.dtype, init=self.scale_init)
scale = jnp.broadcast_to(scale, inputs.shape)
else:
scale = 1.
mean_squared = jnp.mean(jnp.square(inputs), axis=axis, keepdims=True)
mean_squared = jnp.broadcast_to(mean_squared, inputs.shape)
return inputs * scale * jax.lax.rsqrt(mean_squared + self.eps)
|
dm-haiku-main
|
haiku/_src/rms_norm.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.embed."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import embed
from haiku._src import test_utils
import jax.numpy as jnp
import numpy as np
_EMBEDDING_MATRIX = np.asarray([
[0.0, 0.0, 0.0, 0.0],
[0.5, 0.5, 0.5, 0.5],
[0.1, 0.2, 0.3, 0.4]
])
_1D_IDS = [0, 2] # pylint: disable=invalid-name
_2D_IDS = [[0, 2], [2, 2]] # pylint: disable=invalid-name
_3D_IDS = [[[0, 2], [2, 2]], [[1, 1], [0, 2]]] # pylint: disable=invalid-name
class EmbedTest(parameterized.TestCase):
@parameterized.parameters(
itertools.product(["ARRAY_INDEX", "ONE_HOT"],
[_1D_IDS, _2D_IDS, _3D_IDS]))
@test_utils.transform_and_run
def test_lookup(self, lookup_style, inp_ids):
emb = embed.Embed(embedding_matrix=_EMBEDDING_MATRIX,
lookup_style=lookup_style)
np.testing.assert_allclose(
emb(inp_ids),
jnp.asarray(_EMBEDDING_MATRIX)[jnp.asarray(inp_ids)])
self.assertEqual(
list(emb(inp_ids).shape),
list(jnp.asarray(_EMBEDDING_MATRIX)[jnp.asarray(inp_ids)].shape))
@parameterized.parameters("ARRAY_INDEX", "ONE_HOT")
@test_utils.transform_and_run
def test_default_creation(self, lookup_style):
emb = embed.Embed(vocab_size=6, embed_dim=12, lookup_style=lookup_style)
self.assertEqual(emb(_1D_IDS).shape, (2, 12))
@test_utils.transform_and_run
def test_no_creation_args(self):
with self.assertRaisesRegex(ValueError, "must be supplied either with an"):
embed.Embed()
@test_utils.transform_and_run
def test_inconsistent_creation_args(self):
with self.assertRaisesRegex(ValueError, "supplied but the `vocab_size`"):
embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, vocab_size=4)
with self.assertRaisesRegex(ValueError, "supplied but the `embed_dim`"):
embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, embed_dim=5)
@test_utils.transform_and_run
def test_embed_dtype_check(self):
emb = embed.Embed(
embedding_matrix=_EMBEDDING_MATRIX, lookup_style="ARRAY_INDEX")
with self.assertRaisesRegex(
ValueError,
"hk.Embed's __call__ method must take an array of integer dtype but "
"was called with an array of float32"):
emb([1.0, 2.0]) # type: ignore
@test_utils.transform_and_run
def test_embed_invalid_lookup(self):
lookup_style = "FOO"
emb = embed.Embed(embedding_matrix=_EMBEDDING_MATRIX, lookup_style="FOO")
with self.assertRaisesRegex(AttributeError, lookup_style):
emb(_1D_IDS)
@test_utils.transform_and_run
def test_embed_property_check(self):
lookup_style = "ONE_HOT"
emb = embed.Embed(
embedding_matrix=_EMBEDDING_MATRIX, lookup_style=lookup_style)
self.assertEqual(emb.vocab_size, 3)
self.assertEqual(emb.embed_dim, 4)
np.testing.assert_allclose(
emb.embeddings,
jnp.asarray([[0., 0., 0., 0.], [0.5, 0.5, 0.5, 0.5],
[0.1, 0.2, 0.3, 0.4]]))
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/embed_test.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for JAX transformations that respect Haiku internal state."""
import collections
import collections.abc
from collections.abc import Mapping, MutableMapping
import functools
import inspect
from typing import Any, Callable, Optional, TypeVar
from haiku._src import base
import jax
import jax.numpy as jnp
InternalState = collections.namedtuple("InternalState", "params,state,rng")
Bundle = Mapping[str, Mapping[str, Any]]
T = TypeVar("T")
python_map = map # pylint: disable=invalid-name
def copy_structure(bundle: T) -> T:
return jax.tree_util.tree_map(lambda x: x, bundle)
def internal_state(*, params=True) -> InternalState:
frame = base.current_frame()
rng = frame.rng_stack.peek()
if rng is not None:
rng = rng.internal_state
return InternalState(
params=(copy_structure(frame.params) if params else None),
state=copy_structure(frame.state),
rng=copy_structure(rng))
def update_recursive(dst: MutableMapping[Any, Any], src: Mapping[Any, Any]):
for k, v in src.items():
if isinstance(v, collections.abc.Mapping):
dst.setdefault(k, {})
update_recursive(dst[k], v)
else:
if v is not None:
# NOTE: We only expect `None` values thanks to `difference`.
dst[k] = v
def update_internal_state(state: InternalState):
frame = base.current_frame()
if not frame.params_frozen and state.params is not None:
update_recursive(frame.params, state.params)
update_recursive(frame.state, state.state)
rng = state.rng
if rng is not None:
frame.rng_stack.peek().replace_internal_state(rng)
def temporary_internal_state(state: InternalState, *, share_python_state=False):
"""Pushes a temporary copy of the internal state."""
state = copy_structure(state)
rng = state.rng
if rng is not None:
rng = base.PRNGSequence(rng)
current_state = internal_state()
params = state.params
if params is None:
params = current_state.params
state = state.state
if state is None:
state = current_state.state
frame = base.current_frame()
frame = frame.evolve(params=params, state=state, rng=rng,
decoupled=(not share_python_state))
return base.frame_stack(frame)
def reserve_up_to_full_rng_block():
"""If RNG is active in the current frame, reserve up to the default block."""
# TODO(lenamartens): Fix needing full block reservation in stateful
# control-flow by keeping track of current key with index and keeping a full
# block in PRNGSequence at all time.
rng_seq = base.current_frame().rng_stack.peek()
if rng_seq:
rng_seq.reserve_up_to_full()
def grad(fun, argnums=0, has_aux=False, holomorphic=False):
r"""Creates a function which evaluates the gradient of ``fun``.
NOTE: You only need this in a very specific case that you want to take a
gradient **inside** a :func:`transform`\ ed function and the function you are
differentiating uses :func:`set_state`. For example:
>>> class MyModule(hk.Module):
... def __call__(self, x):
... hk.set_state("last", x ** 2)
... return x ** 2
>>> def f(x):
... m = MyModule()
... g = hk.grad(m)(x)
... return g
>>> f = hk.transform_with_state(f)
>>> x = jnp.array(2.)
>>> params, state = jax.jit(f.init)(None, x)
>>> print(state["my_module"]["last"])
4.0
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or tuple of integers. Specifies which positional
argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the gradient
of ``fun``. If `argnums` is an integer then the gradient has the same shape
and type as the positional argument indicated by that integer. If argnums is
a tuple of integers, the gradient is a tuple of values with the same shapes
and types as the corresponding arguments. If ``has_aux`` is True then a pair
of ``gradient, auxiliary_data`` is returned.
For example:
>>> grad_tanh = jax.grad(jax.numpy.tanh)
>>> print(grad_tanh(0.2))
0.96...
"""
value_and_grad_fun = value_and_grad(fun, argnums=argnums, has_aux=has_aux,
holomorphic=holomorphic)
@functools.wraps(fun)
def grad_fn(*args, **kwargs):
value, grads = value_and_grad_fun(*args, **kwargs)
if has_aux:
value, aux = value
return grads, aux
else:
return grads
return grad_fn
def value_and_grad(fun, argnums=0, has_aux=False, holomorphic=False):
r"""Creates a function which evaluates both ``fun`` and the grad of ``fun``.
NOTE: You only need this in a very specific case that you want to take a
gradient **inside** a :func:`transform`\ ed function and the function you are
differentiating uses :func:`set_state`. For example:
>>> class MyModule(hk.Module):
... def __call__(self, x):
... hk.set_state("last", jnp.sum(x))
... return x ** 2
>>> def f(x):
... m = MyModule()
... y, g = hk.value_and_grad(m)(x)
... return y, g
>>> f = hk.transform_with_state(f)
>>> x = jnp.array(2.)
>>> _ = jax.jit(f.init)(None, x)
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or tuple of integers. Specifies which positional
argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun`` that evaluates both ``fun``
and the gradient of ``fun`` and returns them as a pair (a two-element
tuple). If ``argnums`` is an integer then the gradient has the same shape
and type as the positional argument indicated by that integer. If argnums is
a tuple of integers, the gradient is a tuple of values with the same shapes
and types as the corresponding arguments.
"""
if not base.inside_transform():
raise ValueError("hk.grad() should not be used outside of hk.transform(). "
"Use jax.grad() instead.")
@functools.wraps(fun)
def stateful_fun(*args, **kwargs):
state_in = kwargs.pop("hk_state")
with temporary_internal_state(state_in), \
base.push_jax_trace_level():
out = fun(*args, **kwargs)
out, aux = (out if has_aux else (out, None))
state_out = difference(state_in, internal_state())
return out, (aux, state_out)
grad_fun = jax.value_and_grad(stateful_fun, argnums=argnums,
has_aux=True, holomorphic=holomorphic)
@functools.wraps(grad_fun)
def wrapper(*args, **kwargs):
kwargs["hk_state"] = internal_state()
(value, (aux, hk_state)), grads = grad_fun(*args, **kwargs)
update_internal_state(hk_state)
if has_aux:
return (value, aux), grads
else:
return value, grads
return wrapper
class Box:
"""A pytree leaf that acts as a box."""
def __init__(self, value):
self.value = value
TwoLevelMapping = Mapping[Any, Mapping[Any, Any]]
TwoLevelMappingToBox = Mapping[Any, Mapping[Any, Box]]
def box_and_fill_missing(
a: TwoLevelMapping,
b: TwoLevelMapping,
) -> tuple[TwoLevelMappingToBox, TwoLevelMappingToBox]:
"""Returns boxed two level mappings with the same structure.
It is assumed that ``a`` is a subset of ``b``.
Args:
a: A two level mapping (e.g. Haiku parameters or state).
b: A two level mapping (e.g. Haiku parameters or state).
Returns:
A pair of two level mappings with ``Box`` wrapped leaves (suitable for use
with ``jax.tree_util.tree_*``). The mappings have the contents of ``a`` and
``b`` respectively. Both mappings have the structure from ``b``. Any missing
elements are set to ``Box(None)``.
"""
out_a = {k: {} for k in b}
out_b = {k: {} for k in b}
for k1, v1 in b.items():
for k2 in v1:
out_b[k1][k2] = Box(b[k1][k2])
if k1 in a and k2 in a[k1]:
out_a[k1][k2] = Box(a[k1][k2])
else:
out_a[k1][k2] = Box(None)
return out_a, out_b
def difference(before: InternalState, after: InternalState) -> InternalState:
"""Returns an InternalState object with unchanged items set to ``None``.
Note that to determine what values have changed we compare them by identity
not by value. This is only reasonable to do if `difference` is used to compare
state *inside* a JAX transform (e.g. comparing the arguments passed into JIT
with the values that you are about to return from it).
This function never produces false negatives (e.g. we will never incorrectly
say that a piece of state is unchanged when it has), however it may produce
false positives. One well known case is if a value is traced by an inner JAX
transform but unchanged, the identity of the Python object will differ from
the value passed into the outer function, but the value will not have changed.
In this case `difference` will say that the value has changed. For example if
the following change happened inside a function whose state was being diffed
we would defensively say that ``u`` had changed value even though it had only
changed Python identity:
>>> u = hk.get_state("u", [], init=jnp.ones)
>>> u, _ = jax.jit(lambda a: a, a ** 2)(u)
>>> hk.set_state("u", u)
Args:
before: state before.
after: state after.
Returns:
The difference between before and after, with any values that have the same
identity before and after set to `None`.
"""
def if_changed(is_new, box_a, box_b):
if box_a.value is None or is_new(box_a.value, box_b.value):
return box_b.value
else:
return None
# params
is_new_param = lambda a, b: a is not b
params_before, params_after = box_and_fill_missing(before.params,
after.params)
params_after = jax.tree_util.tree_map(
functools.partial(if_changed, is_new_param), params_before, params_after)
# state
def is_new_state(a: base.StatePair, b: base.StatePair):
return a.initial is not b.initial or a.current is not b.current
state_before, state_after = box_and_fill_missing(before.state, after.state)
state_after = jax.tree_util.tree_map(
functools.partial(if_changed, is_new_state), state_before, state_after)
# rng
def is_new_rng(a: Optional[base.PRNGSequenceState],
b: Optional[base.PRNGSequenceState]):
if a is None:
return True
assert len(a) == 2 and len(b) == 2
return a[0] is not b[0] or a[1] is not b[1]
rng = after.rng if is_new_rng(before.rng, after.rng) else None
return InternalState(params_after, state_after, rng)
def thread_hk_state_in_kwargs(dec_fun):
"""Equivalent to jax.{} but passing Haiku state.""".format(dec_fun.__name__)
@functools.wraps(dec_fun)
def wrapped_dec_fun(fun, *dec_args, **dec_kwargs):
"""Decorates a modified version of ``fun`` that passes Haiku state."""
if not base.inside_transform():
raise ValueError(
"hk.{0}() should not be used outside of hk.transform. "
"Use jax.{0}() instead.".format(dec_fun.__name__))
@functools.wraps(fun)
def stateful_fun(*args, **kwargs):
state_in = kwargs.pop("hk_state")
with temporary_internal_state(state_in, share_python_state=True), \
base.push_jax_trace_level():
out = fun(*args, **kwargs)
return out, difference(state_in, internal_state())
dec_stateful_fun = dec_fun(stateful_fun, *dec_args, **dec_kwargs)
@functools.wraps(dec_stateful_fun)
def wrapper(*args, **kwargs):
kwargs["hk_state"] = internal_state()
out, state = dec_stateful_fun(*args, **kwargs)
update_internal_state(state)
return out
return wrapper
wrapped_dec_fun.__doc__ = f"Equivalent to jax.{dec_fun.__name__} but passing Haiku state."
return wrapped_dec_fun
jit = thread_hk_state_in_kwargs(jax.jit)
# pylint: disable=unnecessary-lambda
# pylint: disable=g-long-lambda
# Calls factory function to create a function then applies it to arguments.
_factory = lambda f: functools.wraps(f)(lambda *a, **k: f()(*a, **k))
# Wraps a function inside a lambda to hide its identity.
_lambda_wrap = lambda f: functools.wraps(f)(lambda *a, **k: f(*a, **k))
# jax.remat foiling the function identity cache.
_remat_no_cache = functools.wraps(jax.remat)(
lambda f, *args, **kwargs: functools.wraps(f)(
_factory(lambda: jax.remat(_lambda_wrap(f), *args, **kwargs))))
# Haiku version of jax.remat.
remat = thread_hk_state_in_kwargs(_remat_no_cache)
# pylint: enable=unnecessary-lambda
# pylint: enable=g-long-lambda
def stateful_branch(branch_fun):
"""Calls branch_fun passing internal state in and out."""
@functools.wraps(branch_fun)
def new_branch_fun(operand):
state, operand = operand
with temporary_internal_state(state), \
base.push_jax_trace_level():
out = branch_fun(*operand)
reserve_up_to_full_rng_block()
# TODO(tomhennigan) Return difference of state in/out here.
return out, internal_state()
return new_branch_fun
SENTINEL = object()
def _new_cond(pred, true_fun, false_fun, *operands, operand=SENTINEL):
del pred, true_fun, false_fun, operands, operand
def _old_cond(pred, true_operand, true_fun, false_operand, false_fun):
del pred, true_operand, true_fun, false_operand, false_fun
def _memoize_by_id(f):
"""Memoizes the result of a higher order function on input function id."""
cache = {}
@functools.wraps(f)
def wrapper(g):
i = id(g)
try:
res = cache[i]
except KeyError:
res = cache[i] = f(g)
return res
return wrapper
RUNNING_INIT_HINT = """
Hint: A common mistake is to use hk.cond(..) or `hk.switch(..)` at init time and
create module parameters in one of the branches. At init time you should
unconditionally create the parameters of all modules you might want to use
at apply.
For hk.cond():
if hk.running_init():
# At init time unconditionally create parameters in my_module.
my_other_module(x)
out = my_module(x)
else:
out = hk.cond(pred, my_module, my_other_module)
For hk.switch():
branches = [my_module, lambda x: x]
if hk.running_init():
# At init time unconditionally create parameters in all branches.
for branch in branches:
out = my_module(x)
else:
out = hk.switch(idx, branches, x)
""".strip()
def with_output_structure_hint(f):
"""Adds a helpful hint to branch structure errors."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except TypeError as e:
if not base.params_frozen() and "must have same type structure" in str(e):
raise TypeError(RUNNING_INIT_HINT) from e
else:
raise e
return wrapper
# pylint: disable=g-doc-args
@functools.wraps(jax.lax.cond)
@with_output_structure_hint
def cond(*args, **kwargs):
"""Equivalent to :func:`jax.lax.cond` but with Haiku state passed in/out.
>>> true_fn = hk.nets.ResNet50(10)
>>> false_fn = hk.Sequential([hk.Flatten(), hk.nets.MLP([300, 100, 10])])
>>> x = jnp.ones([1, 224, 224, 3])
>>> if hk.running_init():
... # At `init` run both branches to create parameters everywhere.
... true_fn(x)
... out = false_fn(x)
... else:
... # At `apply` conditionally call one of the modules.
... i = jax.random.randint(hk.next_rng_key(), [], 0, 100)
... out = hk.cond(i > 50, true_fn, false_fn, x)
Args:
pred: Boolean scalar type.
true_fun: Function (A -> B), to be applied if ``pred`` is ``True``.
false_fun: Function (A -> B), to be applied if ``pred`` is ``False``.
operands: Operands (A) input to either branch depending on ``pred``. The
type can be a scalar, array, or any pytree (nested Python tuple/list/dict)
thereof.
Returns:
Value (B) of either ``true_fun(*operands)`` or ``false_fun(*operands)``,
depending on the value of ``pred``. The type can be a scalar, array, or any
pytree (nested Python tuple/list/dict) thereof.
"""
# pylint: enable=g-doc-args
if not base.inside_transform():
raise ValueError("hk.cond() should not be used outside of hk.transform(). "
"Use jax.cond() instead.")
try:
bound_args = inspect.signature(_old_cond).bind(*args, **kwargs)
pred, true_operand, true_fun, false_operand, false_fun = bound_args.args
if not callable(true_fun) or not callable(false_fun):
# Two operand new cond case: cond(pred, tf, ff, 1, 2)
raise TypeError
except TypeError:
bound_args = inspect.signature(_new_cond).bind(*args, **kwargs)
bound_args.apply_defaults()
pred, true_fun, false_fun, *operands = bound_args.args
operand = bound_args.kwargs["operand"]
if operand is not SENTINEL:
if operands:
raise ValueError("When the operand keyword argument is used you cannot " # pylint: disable=raise-missing-from
"also pass operands positionally. Got "
f"operand={operand} and *operands={tuple(operands)}")
operands = (operand,)
del operand
else:
true_fun = lambda op, f=true_fun: f(op[0])
false_fun = lambda op, f=false_fun: f(op[1])
operands = ((true_operand, false_operand),)
reserve_up_to_full_rng_block()
stateful_branch_mem = _memoize_by_id(stateful_branch)
state = internal_state()
out, state = jax.lax.cond(pred,
true_fun=stateful_branch_mem(true_fun),
false_fun=stateful_branch_mem(false_fun),
operand=(state, operands))
update_internal_state(state)
return out
@with_output_structure_hint
def switch(index, branches, *operands):
r"""Equivalent to :func:`jax.lax.switch` but with Haiku state passed in/out.
Note that creating parameters inside a switch branch is not supported, as such
at init time we recommend you unconditionally evaluate all branches of your
switch and only use the switch at apply. For example:
>>> experts = [hk.nets.MLP([300, 100, 10]) for _ in range(5)]
>>> x = jnp.ones([1, 28 * 28])
>>> if hk.running_init():
... # During init unconditionally create params/state for all experts.
... for expert in experts:
... out = expert(x)
... else:
... # During apply conditionally apply (and update) only one expert.
... index = jax.random.randint(hk.next_rng_key(), [], 0, len(experts) - 1)
... out = hk.switch(index, experts, x)
Args:
index: Integer scalar type, indicating which branch function to apply.
branches: Sequence of functions (A -> B) to be applied based on index.
operands: Operands (A) input to whichever branch is applied.
Returns:
Value (B) of branch(\*operands) for the branch that was selected based on
index.
"""
if not base.inside_transform():
raise ValueError(
"hk.switch() should not be used outside of hk.transform(). "
"Use jax.switch() instead.")
reserve_up_to_full_rng_block()
stateful_branch_mem = _memoize_by_id(stateful_branch)
state = internal_state()
out, state = jax.lax.switch(
index, tuple(python_map(stateful_branch_mem, branches)), (state, operands)
)
update_internal_state(state)
return out
def scan(f, init, xs, length=None, reverse=False, unroll=1):
"""Equivalent to :func:`jax.lax.scan` but with Haiku state passed in/out."""
if not base.inside_transform():
raise ValueError("hk.scan() should not be used outside of hk.transform(). "
"Use jax.lax.scan() instead.")
if length is None:
length = jax.tree_util.tree_leaves(xs)[0].shape[0]
running_init_fn = not base.params_frozen()
if running_init_fn:
# During `init` we need to unroll one step of the scan, this is because our
# carry contains the Haiku state and during `init` this may change structure
# (e.g. as state is created).
if not length:
x0 = jax.tree_util.tree_map(lambda x: jnp.zeros(x.shape[1:], x.dtype), xs)
_, y0 = f(init, x0)
y0 = jax.tree_util.tree_map(
lambda y: jnp.zeros((0,) + y.shape, y.dtype), y0)
return init, y0
if reverse:
x0 = jax.tree_util.tree_map(lambda x: x[-1], xs)
xs = jax.tree_util.tree_map(lambda x: x[:-1], xs)
else:
x0 = jax.tree_util.tree_map(lambda x: x[0], xs)
xs = jax.tree_util.tree_map(lambda x: x[1:], xs)
init, y0 = f(init, x0)
y0 = jax.tree_util.tree_map(lambda y: jnp.expand_dims(y, 0), y0)
length -= 1
if not length:
return init, y0
@functools.wraps(f)
def stateful_fun(carry, x):
carry, state = carry
with temporary_internal_state(state):
with base.assert_no_new_parameters(), \
base.push_jax_trace_level():
carry, out = f(carry, x)
reserve_up_to_full_rng_block()
carry = (carry, internal_state(params=False))
return carry, out
# Before pulling out the internal state, reserve a full block of RNG keys.
# This is to make sure we're always passing in the same amount of subkeys in
# and out of the scan carry (scan requires equal length lists).
# After every scan iteration we reserve back up to the full block.
reserve_up_to_full_rng_block()
# We know that we don't need to thread params in and out, since for init we
# have already created them (given that above we unroll one step of the scan)
# and for apply we know they are immutable. As such we only need to thread the
# state and rng in and out.
init = (init, internal_state(params=False))
(carry, state), ys = jax.lax.scan(
stateful_fun, init, xs, length, reverse, unroll=unroll)
update_internal_state(state)
if running_init_fn:
if reverse:
ys = jax.tree_util.tree_map(
lambda y0, ys: jnp.concatenate([ys, y0]), y0, ys)
else:
ys = jax.tree_util.tree_map(
lambda y0, ys: jnp.concatenate([y0, ys]), y0, ys)
return carry, ys
def map(f, xs): # pylint: disable=redefined-builtin
"""Equivalent to :func:`jax.lax.map` but with Haiku state passed in/out."""
g = lambda _, x: ((), f(x))
_, ys = scan(g, (), xs)
return ys
def fori_loop(lower, upper, body_fun, init_val):
"""Equivalent to :func:`jax.lax.fori_loop` with Haiku state passed in/out."""
if not base.inside_transform():
raise ValueError(
"hk.fori_loop() should not be used outside of hk.transform(). "
"Use jax.lax.fori_loop() instead.")
@functools.wraps(body_fun)
def pure_body_fun(i, val):
state, val = val
with temporary_internal_state(state), \
base.push_jax_trace_level():
val = body_fun(i, val)
reserve_up_to_full_rng_block()
state = internal_state()
return state, val
if not base.params_frozen():
# During init we need to unwind one step of the loop to ensure the Haiku
# state before and after the body has the same structure.
init_val = body_fun(lower, init_val)
lower += 1
try:
if upper - lower == 0:
return init_val
except jax.errors.ConcretizationTypeError:
# upper or lower might be tracers, which jax.lax.fori_loop can handle.
pass
reserve_up_to_full_rng_block()
state = internal_state()
init_val = state, init_val
state, val = jax.lax.fori_loop(lower, upper, pure_body_fun, init_val)
update_internal_state(state)
return val
def maybe_get_axis(axis: int, arrays: Any) -> Optional[int]:
"""Returns `array.shape[axis]` for one of the arrays in the input."""
shapes = [a.shape for a in jax.tree_util.tree_leaves(arrays)]
sizes = {s[axis] for s in shapes}
if len(sizes) != 1:
raise ValueError("Arrays must have the same mapped axis size, found "
f"sizes {sizes} for input shapes {shapes}")
size, = sizes
return size
# Uniq but maintaining insertion order.
uniq = lambda x: tuple({k: None for k in x}.keys())
def get_mapped_axis_size(args: tuple[Any], in_axes: Any) -> int:
sizes = uniq(jax.tree_util.tree_leaves(
jax.tree_util.tree_map(maybe_get_axis, in_axes, args)))
assert sizes, "hk.vmap should guarantee non-empty in_axes"
# NOTE: We use the first in_axes regardless of how many non-unique values
# there are to allow JAX to handle multiple conflicting sizes.
return sizes[0]
def add_split_rng_error(f):
"""Adds a nice error message when split_rng is missing."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if "split_rng" not in kwargs and not wrapper.require_split_rng:
kwargs["split_rng"] = False
if "split_rng" not in kwargs:
try:
return f(*args, **kwargs)
except TypeError as e:
raise TypeError("Haiku now requires the split_rng argument to be "
"passed to hk.vmap. If you have code using the old "
"API which you cannot change, you can opt-out of this "
"requirement by using "
"`hk.vmap.require_split_rng = False`.") from e
return f(*args, **kwargs)
wrapper.require_split_rng = True
return wrapper
list_to_tuple = lambda x: tuple(x) if isinstance(x, list) else x
@add_split_rng_error
def vmap(
fun: Callable[..., Any],
in_axes=0,
out_axes=0,
axis_name: Optional[str] = None,
axis_size: Optional[int] = None,
*,
split_rng: bool,
) -> Callable[..., Any]:
"""Equivalent to :func:`jax.vmap` with module parameters/state not mapped.
The behaviour of Haiku random key APIs under :func:`vmap` is controlled by the
``split_rng`` argument:
>>> x = jnp.arange(2)
>>> f = hk.vmap(lambda _: hk.next_rng_key(), split_rng=False)
>>> key1, key2 = f(x)
>>> assert (key1 == key2).all()
>>> f = hk.vmap(lambda _: hk.next_rng_key(), split_rng=True)
>>> key1, key2 = f(x)
>>> assert not (key1 == key2).all()
Random numbers in Haiku are typically used for two things, firstly for
initialising model parameters, and secondly for creating random samples as
part of the forward pass of a neural network (e.g. for dropout). If you are
using :func:`vmap` with a module that uses Haiku random keys for both (e.g.
you don't pass keys explicitly into the network), then it is quite likely that
you will want to vary the value of ``split_rng`` depending on whether we are
initalizing (e.g. creating model parameters) or applying the model. An easy
way to do this is to set ``split_rng=(not hk.running_init())``.
For more advanced use cases, such as mapping module parameters, we suggest
users instead use :func:`lift` or :func:`~haiku.experimental.transparent_lift`
in combination with :func:`jax.vmap`.
Args:
fun: See :func:`jax.vmap`.
in_axes: See :func:`jax.vmap`.
out_axes: See :func:`jax.vmap`.
axis_name: See :func:`jax.vmap`.
axis_size: See :func:`jax.vmap`.
split_rng: Controls whether random key APIs in Haiku (e.g.
:func:`next_rng_key`) return different (aka. the internal key is split
before calling your mapped function) or the same (aka. the internal key
is broadcast before calling your mapped fucntion) key. See the docstring
for examples.
Returns:
See :func:`jax.vmap`.
"""
if not jax.tree_util.tree_leaves(in_axes):
raise ValueError(
f"{fun.__name__} must have at least one non-None value in in_axes "
"to use with `hk.vmap`.")
params_axes = state_axes = None
rng_axes = (0 if split_rng else None)
haiku_state_axes = InternalState(params_axes, state_axes, rng_axes)
in_axes = list_to_tuple(in_axes), haiku_state_axes
out_axes = out_axes, haiku_state_axes
@functools.wraps(fun)
def pure_fun(args, state_in):
if split_rng:
# NOTE: In the case of split_rng we recieve an RNG key (rather than the
# internal state of a PRNGSequence) so we need to construct that here.
rng = base.PRNGSequence(state_in.rng).internal_state
state_in = InternalState(state_in.params, state_in.state, rng)
with temporary_internal_state(state_in), \
base.push_jax_trace_level():
out = fun(*args)
state_out = difference(state_in, internal_state())
return out, state_out
@functools.wraps(fun)
def mapped_fun(*args):
base.assert_context("vmap")
mapped_pure_fun = jax.vmap(pure_fun, in_axes=in_axes, out_axes=out_axes,
axis_name=axis_name, axis_size=axis_size)
state = internal_state()
if split_rng:
# Need to take a new key and split.
num = get_mapped_axis_size(args, in_axes[0])
rng = base.next_rng_keys(num)
state = internal_state() # Needed since we mutated internal RNG.
saved_rng = state.rng
state = InternalState(state.params, state.state, rng)
try:
out, state = mapped_pure_fun(args, state)
except ValueError as err:
if split_rng and not base.params_frozen() and "out_axes" in str(err):
# TODO(lenamartens): add error for state too.
raise ValueError("hk.vmap does not support setting split_rng to True "
"during initialization because it assumes parameters "
"are always shared along the mapped dimension. "
"Consider switching the value of `split_rng` to False "
"during initialization through "
"`split_rng=(not hk.running_init())`."
) from err
else:
raise err
if split_rng:
state = InternalState(state.params, state.state, saved_rng)
update_internal_state(state)
return out
return mapped_fun
def while_loop(cond_fun, body_fun, init_val):
"""Equivalent to jax.lax.while_loop with Haiku state threaded in/out."""
if not base.params_frozen():
raise ValueError(
"hk.while_loop does not support initialization (since we cannot "
"statically determine if your loop will run at least once). Please "
"use `hk.running_init` to run the body unconditionally:\n"
"\n"
" if hk.running_init():\n"
" # Unconditionally connect the module at init time.\n"
" val = module(val)\n"
" else:\n"
" val = hk.while_loop(lambda val: val.mean() < 1, module, val)\n")
@functools.wraps(cond_fun)
def pure_cond_fun(val):
val, _ = val
try:
with base.assert_state_unchanged():
return cond_fun(val)
except base.StateChangedError as e:
# If we find a use case for updating state/using rng in `cond` we would
# need to make a change in JAX itself (to support aux in/out of the cond).
raise ValueError(
"`hk.while_loop` does not support `hk.set_state`, `hk.next_rng_key` "
"(et al) in `cond_fun`."
) from e
@functools.wraps(body_fun)
def pure_body_fun(val):
val, state = val
with temporary_internal_state(state), \
base.push_jax_trace_level():
val = body_fun(val)
reserve_up_to_full_rng_block()
state = internal_state()
return val, state
reserve_up_to_full_rng_block()
init_val = (init_val, internal_state())
val, state = jax.lax.while_loop(pure_cond_fun, pure_body_fun, init_val)
update_internal_state(state)
return val
def eval_shape(fun, *args, **kwargs):
"""Equivalent to jax.eval_shape with any changed Haiku state discarded."""
if not base.inside_transform():
raise ValueError(
"hk.eval_shape() should not be used outside of hk.transform(). "
"Use jax.eval_shape() instead.")
@functools.wraps(fun)
def stateless_fun(state, *args, **kwargs):
with temporary_internal_state(state), \
base.push_jax_trace_level():
out = fun(*args, **kwargs)
# Don't return changed state
return out
out_shape = jax.eval_shape(stateless_fun, internal_state(), *args, **kwargs)
return out_shape
|
dm-haiku-main
|
haiku/_src/stateful.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer Norm."""
import collections.abc
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import utils
import jax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
get_parameter = base.get_parameter
initializers = initializers
Module = module.Module
get_channel_index = utils.get_channel_index
# pylint: enable=invalid-name
del base, module, initializers, utils
AxisOrAxes = Union[int, Sequence[int], slice]
AxesOrSlice = Union[tuple[int, ...], slice]
# TODO(tomhennigan): Update users to `param_axis=-1` and flip + remove this.
ERROR_IF_PARAM_AXIS_NOT_EXPLICIT = False
def to_axes_or_slice(axis: AxisOrAxes) -> AxesOrSlice:
if isinstance(axis, slice):
return axis
elif isinstance(axis, int):
return (axis,)
elif (isinstance(axis, collections.abc.Iterable) and
all(isinstance(ax, int) for ax in axis)):
return tuple(axis)
else:
raise ValueError(
f"`axis` should be an int, slice or iterable of ints. Got: {axis}")
def to_abs_axes(axis: AxesOrSlice, ndim: int) -> tuple[int, ...]:
if isinstance(axis, slice):
return tuple(range(ndim)[axis])
else:
return tuple(sorted({a % ndim for a in axis}))
class LayerNorm(hk.Module):
"""LayerNorm module.
See: https://arxiv.org/abs/1607.06450.
Example usage:
>>> ln = hk.LayerNorm(axis=-1, param_axis=-1,
... create_scale=True, create_offset=True)
>>> x = ln(jnp.ones([8, 224, 224, 3]))
"""
def __init__(
self,
axis: AxisOrAxes,
create_scale: bool,
create_offset: bool,
eps: float = 1e-5,
scale_init: Optional[hk.initializers.Initializer] = None,
offset_init: Optional[hk.initializers.Initializer] = None,
use_fast_variance: bool = False,
name: Optional[str] = None,
*,
param_axis: Optional[AxisOrAxes] = None,
):
"""Constructs a LayerNorm module.
Args:
axis: Integer, list of integers, or slice indicating which axes to
normalize over. Note that the shape of the scale/offset parameters are
controlled by the ``param_axis`` argument.
create_scale: Bool, defines whether to create a trainable scale
per channel applied after the normalization.
create_offset: Bool, defines whether to create a trainable offset
per channel applied after normalization and scaling.
eps: Small epsilon to avoid division by zero variance. Defaults ``1e-5``,
as in the paper and Sonnet.
scale_init: Optional initializer for gain (aka scale). By default, one.
offset_init: Optional initializer for bias (aka offset). By default, zero.
use_fast_variance: If true, use a faster but less numerically stable
formulation for computing variance.
name: The module name.
param_axis: Axis used to determine the parameter shape of the learnable
scale/offset. Sonnet sets this to the channel/feature axis (e.g. to
``-1`` for ``NHWC``). Other libraries set this to the same as the
reduction axis (e.g. ``axis=param_axis``).
"""
super().__init__(name=name)
if not create_scale and scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`.")
if not create_offset and offset_init is not None:
raise ValueError("Cannot set `offset_init` if `create_offset=False`.")
self.axis = to_axes_or_slice(axis)
self.eps = eps
self.create_scale = create_scale
self.create_offset = create_offset
self.scale_init = scale_init or jnp.ones
self.offset_init = offset_init or jnp.zeros
self.use_fast_variance = use_fast_variance
self._param_axis_passed_explicitly = param_axis is not None
self.param_axis = (
(-1,) if param_axis is None else to_axes_or_slice(param_axis))
def __call__(
self,
inputs: jax.Array,
scale: Optional[jax.Array] = None,
offset: Optional[jax.Array] = None,
) -> jax.Array:
"""Connects the layer norm.
Args:
inputs: An array, where the data format is ``[N, ..., C]``.
scale: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the scale applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the offset applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
The array, normalized.
"""
if self.create_scale and scale is not None:
raise ValueError(
"Cannot pass `scale` at call time if `create_scale=True`.")
if self.create_offset and offset is not None:
raise ValueError(
"Cannot pass `offset` at call time if `create_offset=True`.")
axis = to_abs_axes(self.axis, inputs.ndim)
mean = jnp.mean(inputs, axis=axis, keepdims=True)
if self.use_fast_variance:
mean_of_squares = jnp.mean(jnp.square(inputs), axis=axis, keepdims=True)
variance = mean_of_squares - jnp.square(mean)
else:
variance = jnp.var(inputs, axis=axis, keepdims=True)
if ((self.create_scale or self.create_offset) and
not self._param_axis_passed_explicitly):
if ERROR_IF_PARAM_AXIS_NOT_EXPLICIT and axis != (inputs.ndim - 1,):
raise ValueError("When axis is not the final dimension we require "
"you to also pass `param_axis` in the ctor."
f" axis={axis} ndim={inputs.ndim}")
# Shape for the learnable scale and offset is the number of channels.
# See: https://arxiv.org/pdf/1803.08494.pdf around equation 6.
param_axis = to_abs_axes(self.param_axis, inputs.ndim)
if param_axis == (inputs.ndim - 1,):
# For param_axis=-1 we store non-broadcast param shape for compatibility
# with older checkpoints.
param_shape = (inputs.shape[-1],)
else:
param_shape = tuple((inputs.shape[i] if i in param_axis else 1)
for i in range(inputs.ndim))
if self.create_scale:
scale = hk.get_parameter("scale", param_shape, inputs.dtype,
init=self.scale_init)
elif scale is None:
scale = np.array(1., dtype=inputs.dtype)
if self.create_offset:
offset = hk.get_parameter("offset", param_shape, inputs.dtype,
init=self.offset_init)
elif offset is None:
offset = np.array(0., dtype=inputs.dtype)
if jax.config.jax_numpy_rank_promotion != "allow":
# TODO(b/234327547): Explicit bcast triggers excessive mem usage on TPU.
# We should remove the conditional (and always broadcast) when the
# referenced bug is fixed.
scale = jnp.broadcast_to(scale, inputs.shape)
offset = jnp.broadcast_to(offset, inputs.shape)
mean = jnp.broadcast_to(mean, inputs.shape)
eps = jax.lax.convert_element_type(self.eps, variance.dtype)
inv = scale * jax.lax.rsqrt(variance + eps)
return inv * (inputs - mean) + offset
class InstanceNorm(LayerNorm):
"""Normalizes inputs along the spatial dimensions.
See :class:`LayerNorm` for more details.
"""
def __init__(
self,
create_scale: bool,
create_offset: bool,
eps: float = 1e-5,
scale_init: Optional[hk.initializers.Initializer] = None,
offset_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "channels_last",
name: Optional[str] = None,
):
"""Constructs an :class:`InstanceNorm` module.
This method creates a module which normalizes over the spatial dimensions.
Args:
create_scale: ``bool`` representing whether to create a trainable scale
per channel applied after the normalization.
create_offset: ``bool`` representing whether to create a trainable offset
per channel applied after normalization and scaling.
eps: Small epsilon to avoid division by zero variance. Defaults to
``1e-5``.
scale_init: Optional initializer for the scale variable. Can only be set
if ``create_scale=True``. By default scale is initialized to ``1``.
offset_init: Optional initializer for the offset variable. Can only be set
if ``create_offset=True``. By default offset is initialized to ``0``.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``. See :func:`get_channel_index`.
name: Name of the module.
"""
param_axis = hk.get_channel_index(data_format)
if param_axis == 1:
axis = slice(2, None)
else: # channel_index = -1
assert param_axis == -1
axis = slice(1, -1)
super().__init__(
axis=axis,
create_scale=create_scale,
create_offset=create_offset,
eps=eps,
scale_init=scale_init,
offset_init=offset_init,
param_axis=param_axis,
name=name)
|
dm-haiku-main
|
haiku/_src/layer_norm.py
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.basic."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import base
from haiku._src import basic
from haiku._src import test_utils
from haiku._src import transform
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
class BasicTest(parameterized.TestCase):
def test_onehot_shape(self):
indices = jnp.arange(24, dtype=jnp.float32).reshape([2, 3, 4])
num_classes = 24
out = basic.one_hot(indices, num_classes=num_classes)
self.assertEqual(out.shape, (2, 3, 4, num_classes))
@parameterized.parameters(1, 10)
def test_multinomial_r1(self, num_samples):
out = basic.multinomial(random.PRNGKey(428), jnp.ones([4]), num_samples)
self.assertEqual(out.shape, (num_samples,))
@parameterized.parameters(1, 10)
def test_multinomial_r2(self, num_samples):
out = basic.multinomial(random.PRNGKey(428), jnp.ones([3, 4]), num_samples)
self.assertEqual(out.shape, (3, num_samples))
@test_utils.transform_and_run
def test_sequential_params(self):
seq = basic.Sequential([
basic.Sequential([basic.Linear(2), basic.Linear(2)]),
basic.Sequential([lambda x: basic.Linear(2)(x * 1)])
])
for _ in range(2):
# Connect seq to ensure params are created. Connect twice to ensure that
# we see the two instances of the lambda Linear.
seq(jnp.zeros([1, 1]))
params = seq.params_dict()
self.assertCountEqual(
list(params), [
"linear/w", "linear/b", "linear_1/w", "linear_1/b",
"sequential_1/linear/w", "sequential_1/linear/b"
])
@test_utils.transform_and_run
def test_sequential(self):
seq = basic.Sequential([basic.Linear(2), jax.nn.relu])
out = seq(jnp.zeros([3, 2]))
self.assertEqual(out.shape, (3, 2))
@test_utils.transform_and_run
def test_dropout(self):
rate = 0.25
x = basic.dropout(base.next_rng_key(),
rate,
jnp.ones([1000, 1000])) # larger is less flaky test
self.assertAlmostEqual(jnp.mean(x == 0), rate, delta=0.001)
# The dropped out tensor is rescaled to preserve the same mean.
self.assertAlmostEqual(jnp.mean(x), 1.0, delta=0.01)
@test_utils.transform_and_run
def test_dropout_broadcasts(self):
x_in = jnp.ones([3, 3, 3, 3])
x_out = basic.dropout(base.next_rng_key(),
0.5,
x_in,
broadcast_dims=(1,))
np.testing.assert_allclose(x_out[:, 0], x_out[:, 1])
np.testing.assert_allclose(x_out[:, 0], x_out[:, 2])
self.assertRaises(AssertionError, np.testing.assert_allclose,
x_out[:, :, 0], x_out[:, :, 1])
self.assertRaises(AssertionError, np.testing.assert_allclose,
x_out[:, :, 0], x_out[:, :, 2])
self.assertRaises(AssertionError, np.testing.assert_allclose,
x_out[0], x_out[1])
self.assertEqual(x_in.shape, x_out.shape)
x_out = basic.dropout(base.next_rng_key(),
0.5,
x_in,
broadcast_dims=(1, 2))
np.testing.assert_allclose(x_out[:, 0], x_out[:, 1])
np.testing.assert_allclose(x_out[:, 0], x_out[:, 2])
np.testing.assert_allclose(x_out[:, :, 0], x_out[:, :, 1])
np.testing.assert_allclose(x_out[:, :, 0], x_out[:, :, 2])
self.assertRaises(AssertionError, np.testing.assert_allclose,
x_out[0], x_out[1])
self.assertEqual(x_in.shape, x_out.shape)
@test_utils.transform_and_run
def test_dropout_jit(self):
jax.jit(basic.dropout)(base.next_rng_key(), 0.25, jnp.ones([3, 3]))
@parameterized.parameters(jnp, np)
def test_merge_leading_dims_preserves_type(self, xnp):
inputs = xnp.ones(shape=(2, 3, 4))
outputs = basic.merge_leading_dims(inputs, 2)
self.assertEqual(type(inputs), type(outputs))
@parameterized.parameters(jnp, np)
def test_split_leading_dims_preserves_type(self, xnp):
inputs = xnp.ones(shape=(2 * 3, 4))
outputs = basic.split_leading_dim(inputs, (2, 3))
self.assertEqual(type(inputs), type(outputs))
def test_batchapply(self):
def raises(a, b):
if len(a.shape) != 2:
raise ValueError("a must be shape 2")
if len(b.shape) != 1:
raise ValueError("b must be shape 1")
return a + b
out = basic.BatchApply(raises)(jnp.ones([2, 3, 4]), jnp.ones([4]))
np.testing.assert_array_equal(out, 2 * jnp.ones([2, 3, 4]))
def test_batchapply_accepts_float(self):
def raises(a, b):
if len(a.shape) != 2:
raise ValueError("a must be shape 2")
return a + b
out = basic.BatchApply(raises)(jnp.ones([2, 3, 4]), 2.)
np.testing.assert_array_equal(out, 3 * jnp.ones([2, 3, 4]))
def test_batchapply_accepts_none(self):
def raises(a, b):
if a is not None:
raise ValueError("a must be None.")
if len(b.shape) != 2:
raise ValueError("b must be shape 2")
return 3 * b
out = basic.BatchApply(raises)(None, jnp.ones([2, 3, 4]))
np.testing.assert_array_equal(out, 3 * jnp.ones([2, 3, 4]))
def test_batchapply_raises(self):
with self.assertRaisesRegex(ValueError, "requires at least one input"):
basic.BatchApply(lambda: 1)()
def test_expand_apply(self):
def raises(a, b):
if len(a.shape) != 3:
raise ValueError("a must be shape 3")
if len(b.shape) != 2:
raise ValueError("b must be shape 2")
return a + b
out = basic.expand_apply(raises)(jnp.ones([3, 4]), jnp.ones([4]))
np.testing.assert_array_equal(out, 2 * jnp.ones([3, 4]))
def test_expand_apply_raises(self):
with self.assertRaisesRegex(ValueError, "only supports axis=0 or axis=-1"):
basic.expand_apply(lambda: 1, axis=1)()
@test_utils.transform_and_run
def test_to_module(self):
def bias_fn(x):
b = base.get_parameter("b", [], init=jnp.ones)
return x + b
Bias = basic.to_module(bias_fn) # pylint: disable=invalid-name
mod = Bias()
self.assertEqual(mod(jnp.ones([])), 2.)
@test_utils.transform_and_run
def test_to_module_error_invalid_name(self):
def bias_fn(x):
b = base.get_parameter("b", [], init=jnp.ones)
return x + b
cls = basic.to_module(bias_fn)
garbage = object()
with self.assertRaisesRegex(TypeError,
f"Expected a string name .* got: {garbage}"):
cls(garbage) # pytype: disable=wrong-arg-types
@test_utils.transform_and_run
def test_to_module_error_docs(self):
def documented_fn():
"""Really great docs."""
def undocumented_fn():
pass
cls = basic.to_module(documented_fn)
documented = cls()
self.assertEqual(documented.__doc__, "Really great docs.")
self.assertEqual(documented.__call__.__doc__, "Really great docs.")
cls = basic.to_module(undocumented_fn)
undocumented = cls()
self.assertEqual(undocumented.__doc__, "Module produced by `hk.to_module`.")
self.assertIsNone(undocumented.__call__.__doc__)
class LinearTest(parameterized.TestCase):
def test_linear_rank1(self):
def f():
return basic.Linear(output_size=2)(jnp.zeros([6]))
init_fn, apply_fn = transform.transform(f)
params = init_fn(random.PRNGKey(428))
self.assertEqual(params["linear"]["w"].shape, (6, 2))
self.assertEqual(params["linear"]["b"].shape, (2,))
self.assertEqual(apply_fn(params, None).shape, (2,))
def test_linear_rank2(self):
def f():
return basic.Linear(output_size=2)(jnp.zeros((5, 6)))
init_fn, apply_fn = transform.transform(f)
params = init_fn(random.PRNGKey(428))
self.assertEqual(params["linear"]["w"].shape, (6, 2))
self.assertEqual(params["linear"]["b"].shape, (2,))
self.assertEqual(apply_fn(params, None).shape, (5, 2))
def test_linear_rank3(self):
def f():
return basic.Linear(output_size=2)(jnp.zeros((2, 5, 6)))
init_fn, apply_fn = transform.transform(f)
params = init_fn(random.PRNGKey(428))
self.assertEqual(params["linear"]["w"].shape, (6, 2))
self.assertEqual(params["linear"]["b"].shape, (2,))
self.assertEqual(apply_fn(params, None).shape, (2, 5, 2))
def test_linear_without_bias_has_zero_in_null_space(self):
def f():
return basic.Linear(output_size=6, with_bias=False)(jnp.zeros((5, 6)))
init_fn, apply_fn = transform.transform(f)
params = init_fn(random.PRNGKey(428))
self.assertEqual(params["linear"]["w"].shape, (6, 6))
self.assertNotIn("b", params["linear"])
np.testing.assert_array_almost_equal(
apply_fn(params, None), jnp.zeros((5, 6)))
@parameterized.parameters(None,
jax.lax.Precision.DEFAULT,
jax.lax.Precision.HIGH,
jax.lax.Precision.HIGHEST)
def test_precision(self, precision):
def f(x):
return basic.Linear(1)(x, precision=precision)
f = transform.transform(f)
rng = jax.random.PRNGKey(42)
x = np.ones([1, 1])
params = f.init(rng, x)
c = (
jax.jit(lambda x: f.apply(params, None, x))
.lower(x)
.compiler_ir(dialect="hlo")
)
hlo = c.as_hlo_text()
op_line = next(l for l in hlo.split("\n") if "dot(" in l)
if precision is not None and precision != jax.lax.Precision.DEFAULT:
name = str(precision).lower()
self.assertRegex(op_line, f"operand_precision={{{name},{name}}}")
else:
self.assertNotIn("operand_precision", op_line)
if __name__ == "__main__":
absltest.main()
|
dm-haiku-main
|
haiku/_src/basic_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Depthwise Convolutional Haiku module."""
from collections.abc import Sequence
from typing import Optional, Union
from haiku._src import base
from haiku._src import initializers
from haiku._src import module
from haiku._src import utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# If you are forking replace this block with `import haiku as hk`.
# pylint: disable=invalid-name
class hk:
initializers = initializers
get_parameter = base.get_parameter
Module = module.Module
get_channel_index = utils.get_channel_index
# pylint: enable=invalid-name
del base, module, initializers
DIMENSION_NUMBERS = {
1: ("NWC", "WIO", "NWC"),
2: ("NHWC", "HWIO", "NHWC"),
3: ("NDHWC", "DHWIO", "NDHWC")
}
DIMENSION_NUMBERS_NCSPATIAL = {
1: ("NCH", "HIO", "NCH"),
2: ("NCHW", "HWIO", "NCHW"),
3: ("NCDHW", "DHWIO", "NCDHW")
}
class DepthwiseConvND(hk.Module):
"""N-D Depthwise Convolution Module."""
def __init__(
self,
channel_multiplier: int,
kernel_shape: Union[int, Sequence[int]],
num_spatial_dims: int,
data_format: str,
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
"""Construct an ND Depthwise Convolution.
Args:
channel_multiplier: Multiplicity of output channels. To keep the number of
output channels the same as the number of input channels, set 1.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length ``num_spatial_dims``.
num_spatial_dims: The number of spatial dimensions of the input data.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``. See :func:`get_channel_index`.
stride: Optional stride for the kernel. Either an integer or a sequence of
length ``num_spatial_dims``. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length ``num_spatial_dims``. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID``, ``SAME`` or a
sequence of ``before, after`` pairs. Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
name: The name of the module.
"""
super().__init__(name=name)
self.num_spatial_dims = num_spatial_dims
self.kernel_shape = utils.replicate(kernel_shape, self.num_spatial_dims,
"kernel_shape")
self.lhs_dilation = (1,) * len(self.kernel_shape)
self.rhs_dilation = utils.replicate(rate, num_spatial_dims, "rhs_dilation")
self.channel_multiplier = channel_multiplier
self.padding = padding
self.stride = utils.replicate(stride, self.num_spatial_dims, "strides")
self.data_format = data_format
self.channel_index = hk.get_channel_index(data_format)
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init or jnp.zeros
if self.channel_index == -1:
self.dn = DIMENSION_NUMBERS[self.num_spatial_dims]
else:
self.dn = DIMENSION_NUMBERS_NCSPATIAL[self.num_spatial_dims]
def __call__(self, inputs: jax.Array) -> jax.Array:
channel_index = hk.get_channel_index(self.data_format)
w_shape = self.kernel_shape + (1, self.channel_multiplier *
inputs.shape[channel_index])
w_init = self.w_init
if w_init is None:
fan_in_shape = np.prod(w_shape[:-1])
stddev = 1. / np.sqrt(fan_in_shape)
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter("w", w_shape, inputs.dtype, init=w_init)
out = lax.conv_general_dilated(
inputs,
w,
window_strides=self.stride,
padding=self.padding,
lhs_dilation=self.lhs_dilation,
rhs_dilation=self.rhs_dilation,
dimension_numbers=self.dn,
feature_group_count=inputs.shape[channel_index])
if self.with_bias:
if channel_index == -1:
b_shape = (self.channel_multiplier * inputs.shape[channel_index],)
else:
b_shape = (self.channel_multiplier * inputs.shape[channel_index], 1, 1)
b = hk.get_parameter("b", b_shape, inputs.dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
return out
class SeparableDepthwiseConv2D(hk.Module):
"""Separable 2-D Depthwise Convolution Module."""
def __init__(
self,
channel_multiplier: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NHWC",
name: Optional[str] = None,
):
"""Construct a Separable 2D Depthwise Convolution module.
Args:
channel_multiplier: Multiplicity of output channels. To keep the number of
output channels the same as the number of input channels, set 1.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length ``num_spatial_dims``.
stride: Optional stride for the kernel. Either an integer or a sequence of
length ``num_spatial_dims``. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID``, ``SAME`` or a
sequence of ``before, after`` pairs. Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``.
name: The name of the module.
"""
super().__init__(name=name)
kernel_shape = utils.replicate(kernel_shape, 2, "kernel_shape")
self._conv1 = DepthwiseConv2D(
channel_multiplier=channel_multiplier,
kernel_shape=[kernel_shape[0], 1],
stride=stride,
padding=padding,
with_bias=False,
w_init=w_init,
b_init=b_init,
data_format=data_format)
self._conv2 = DepthwiseConv2D(
channel_multiplier=1,
kernel_shape=[1, kernel_shape[1]],
stride=1,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
data_format=data_format)
def __call__(self, inputs: jax.Array) -> jax.Array:
return self._conv2(self._conv1(inputs))
class DepthwiseConv1D(DepthwiseConvND):
"""One dimensional convolution."""
def __init__(
self,
channel_multiplier: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NWC",
name: Optional[str] = None,
):
"""Construct a 1D Depthwise Convolution.
Args:
channel_multiplier: Multiplicity of output channels. To keep the number of
output channels the same as the number of input channels, set 1.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 1.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 1. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 1. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID``, ``SAME`` or a
sequence of ``before, after`` pairs. Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``. See :func:`get_channel_index`.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=1,
data_format=data_format,
channel_multiplier=channel_multiplier,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
name=name)
class DepthwiseConv2D(DepthwiseConvND):
"""Two dimensional convolution."""
def __init__(
self,
channel_multiplier: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NHWC",
name: Optional[str] = None,
):
"""Construct a 2D Depthwise Convolution.
Args:
channel_multiplier: Multiplicity of output channels. To keep the number of
output channels the same as the number of input channels, set 1.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 2.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 2. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 1. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID``, ``SAME`` or a
sequence of ``before, after`` pairs. Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``. See :func:`get_channel_index`.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=2,
data_format=data_format,
channel_multiplier=channel_multiplier,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
name=name)
class DepthwiseConv3D(DepthwiseConvND):
"""Three dimensional convolution."""
def __init__(
self,
channel_multiplier: int,
kernel_shape: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
rate: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[tuple[int, int]]] = "SAME",
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
data_format: str = "NDHWC",
name: Optional[str] = None,
):
"""Construct a 3D Depthwise Convolution.
Args:
channel_multiplier: Multiplicity of output channels. To keep the number of
output channels the same as the number of input channels, set 1.
kernel_shape: The shape of the kernel. Either an integer or a sequence of
length 3.
stride: Optional stride for the kernel. Either an integer or a sequence of
length 3. Defaults to 1.
rate: Optional kernel dilation rate. Either an integer or a sequence of
length 1. 1 corresponds to standard ND convolution,
``rate > 1`` corresponds to dilated convolution. Defaults to 1.
padding: Optional padding algorithm. Either ``VALID``, ``SAME`` or a
sequence of ``before, after`` pairs. Defaults to ``SAME``. See:
https://www.tensorflow.org/xla/operation_semantics#conv_convolution.
with_bias: Whether to add a bias. By default, true.
w_init: Optional weight initialization. By default, truncated normal.
b_init: Optional bias initialization. By default, zeros.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default, ``channels_last``. See :func:`get_channel_index`.
name: The name of the module.
"""
super().__init__(
num_spatial_dims=3,
data_format=data_format,
channel_multiplier=channel_multiplier,
kernel_shape=kernel_shape,
stride=stride,
rate=rate,
padding=padding,
with_bias=with_bias,
w_init=w_init,
b_init=b_init,
name=name)
|
dm-haiku-main
|
haiku/_src/depthwise_conv.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.test_utils."""
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import test_utils
import jax
import jax.numpy as jnp
class TestUtilsTest(parameterized.TestCase):
@test_utils.transform_and_run(
jax_transform=jax.pmap,
map_rng=lambda k: jnp.broadcast_to(k, (1, *k.shape)))
def test_transform_and_run_pmap(self):
pass
@test_utils.transform_and_run(
jax_transform=lambda f: jax.pmap(f, 'i'),
map_rng=lambda k: jnp.broadcast_to(k, (1, *k.shape)))
def test_transform_and_run_pmap_with_axis(self):
pass
if __name__ == '__main__':
absltest.main()
|
dm-haiku-main
|
haiku/_src/test_utils_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.