python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utility functions for training and evaluation."""
import inspect
from typing import Any, Callable
import chex
import haiku as hk
from jax import nn as jnn
from jax import numpy as jnp
from neural_networks_chomsky_hierarchy.tasks import task
COMPUTATION_EMPTY_TOKEN = 0
OUTPUT_EMPTY_TOKEN = 1
def make_model_with_empty_targets(
model: Callable[[chex.Array], chex.Array],
generalization_task: task.GeneralizationTask,
computation_steps_mult: int = 0,
single_output: bool = False,
) -> Callable[[chex.Array], chex.Array]:
"""Returns a wrapped model that pads the inputs to match the output length.
For a given input tape `input_tape` of vocabulary size `vocab_size`, the
wrapped model will process a tape of the format
[`input_tape`, `empty_tape`], where the empty tape token is `vocab_size + 1`.
The `empty_tape` has the same length as the task output.
Args:
model: A model function that converts inputs to outputs.
generalization_task: The task that we train on.
computation_steps_mult: The amount of empty cells to append to the input
tape. This variable is a multiplier and the actual number of cells is
`computation_steps_mult * input_length`.
single_output: Whether to return the squeezed tensor of values.
"""
def new_model(x: chex.Array) -> chex.Array:
batch_size, input_length, input_size = x.shape
output_length = generalization_task.output_length(input_length)
extra_dims_onehot = 1 + int(computation_steps_mult > 0)
final_input_size = input_size + extra_dims_onehot
# Add trailing zeros to account for new final_input_size.
extra_zeros_x = jnp.zeros(
(batch_size, input_length, final_input_size - input_size)
)
x = jnp.concatenate([x, extra_zeros_x], axis=-1)
computation_tape = jnp.full(
(batch_size, computation_steps_mult * input_length),
fill_value=input_size + COMPUTATION_EMPTY_TOKEN)
computation_tape = jnn.one_hot(
computation_tape, num_classes=final_input_size
)
output_tokens = jnp.full(
(batch_size, output_length),
fill_value=input_size
+ OUTPUT_EMPTY_TOKEN
- int(computation_steps_mult == 0),
)
output_tokens = jnn.one_hot(output_tokens, num_classes=final_input_size)
final_input = jnp.concatenate([x, computation_tape, output_tokens], axis=1)
if 'input_length' in inspect.getfullargspec(model).args:
output = model(final_input, input_length=input_length) # pytype: disable=wrong-keyword-args
else:
output = model(final_input)
output = output[:, -output_length:]
if single_output:
output = jnp.squeeze(output, axis=1)
return output
return new_model
def make_model_with_targets_as_input(
model: Callable[[chex.Array], chex.Array], computation_steps_mult: int = 0
) -> Callable[[chex.Array, chex.Array], chex.Array]:
"""Returns a wrapped model that takes the targets as inputs.
This function is useful for the autoregressive case where we pass the targets
as inputs to the model. The final input looks like:
[inputs, computation_tokens, output_token, targets]
Args:
model: A haiku model that takes 'x' as input.
computation_steps_mult: The amount of computation tokens to append to the
input tape. This variable is a multiplier and the actual number of cell is
computation_steps_mult * input_length.
"""
def new_model(x: chex.Array, y: chex.Array) -> chex.Array:
"""Returns an output from the inputs and targets.
Args:
x: One-hot input vectors, shape (B, T, input_size).
y: One-hot target output vectors, shape (B, T, output_size).
"""
batch_size, input_length, input_size = x.shape
_, output_length, output_size = y.shape
extra_dims_onehot = 1 + int(computation_steps_mult > 0)
final_input_size = max(input_size, output_size) + extra_dims_onehot
# Add trailing zeros to account for new final_input_size.
extra_zeros_x = jnp.zeros(
(batch_size, input_length, final_input_size - input_size)
)
x = jnp.concatenate([x, extra_zeros_x], axis=-1)
extra_zeros_y = jnp.zeros(
(batch_size, output_length, final_input_size - output_size)
)
y = jnp.concatenate([y, extra_zeros_y], axis=-1)
computation_tape = jnp.full(
(batch_size, computation_steps_mult * input_length),
fill_value=input_size + COMPUTATION_EMPTY_TOKEN,
)
computation_tape = jnn.one_hot(
computation_tape, num_classes=final_input_size
)
output_token = jnp.full(
(batch_size, 1),
fill_value=input_size
+ OUTPUT_EMPTY_TOKEN
- int(computation_steps_mult == 0),
)
output_token = jnn.one_hot(output_token, num_classes=final_input_size)
final_input = jnp.concatenate(
[x, computation_tape, output_token, y], axis=1
)
if 'input_length' in inspect.getfullargspec(model).args:
output = model(final_input, input_length=input_length) # pytype: disable=wrong-keyword-args
else:
output = model(final_input)
return output[:, -output_length - 1 : -1]
return new_model
def add_sampling_to_autoregressive_model(
model: Callable[[chex.Array, chex.Array], chex.Array],
single_output: bool = False,
) -> Callable[[chex.Array, chex.Array, bool], chex.Array]:
"""Adds a 'sample' argument to the model, to use autoregressive sampling."""
def new_model_with_sampling(
x: chex.Array,
y: chex.Array,
sample: bool,
) -> chex.Array:
"""Returns an autoregressive model if `sample == True and output_size > 1`.
Args:
x: The input sequences of shape (b, t, i), where i is the input size.
y: The target sequences of shape (b, t, o), where o is the output size.
sample: Whether to evaluate the model using autoregressive decoding.
"""
output_length = 1 if len(y.shape) == 2 else y.shape[1]
output_size = y.shape[-1]
if not sample or output_length == 1:
output = model(x, y)
else:
def evaluate_model_autoregressively(
idx: int,
predictions: chex.Array,
) -> chex.Array:
"""Iteratively evaluates the model based on the previous predictions.
Args:
idx: The index of the target sequence that should be evaluated.
predictions: The logits for the predictions up to but not including
the index `idx`.
Returns:
The `predictions` array modified only at position `idx` where the
logits for index `idx` have been inserted.
"""
one_hot_predictions = jnn.one_hot(
jnp.argmax(predictions, axis=-1),
num_classes=output_size,
)
logits = model(x, one_hot_predictions)
return predictions.at[:, idx].set(logits[:, idx])
output = hk.fori_loop(
lower=0,
upper=output_length,
body_fun=evaluate_model_autoregressively,
init_val=jnp.empty_like(y),
)
if single_output:
output = jnp.squeeze(output, axis=1)
return output
return new_model_with_sampling
def update_tree_with_new_containers(
tree: Any, update_dict: dict[str, Any]
) -> None:
"""Updates a dataclass tree in place, adding new containers.
This method is useful for the nested library to add fields to a tree, for
which containers have not been created.
For instance, if A is a dataclass with attribute architecture_params, and we
want to add the value architecture_params.rnn_model.size, we need to create
the container 'rnn_model' inside architecture_params.
Args:
tree: An object with attribute (typically a dataclass).
update_dict: A dict of nested updates. See example above.
"""
for key in update_dict:
subkeys = key.split('.')
if len(subkeys) >= 2:
# Example: architecture.params.size
for i in range(0, len(subkeys) - 2):
getattr(tree, subkeys[i])[subkeys[i + 1]] = {}
|
neural_networks_chomsky_hierarchy-main
|
experiments/utils.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training loop for length generalization experiments."""
import dataclasses
import functools
import random
from typing import Any, Callable, Mapping, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tqdm
from neural_networks_chomsky_hierarchy.experiments import curriculum as curriculum_lib
from neural_networks_chomsky_hierarchy.experiments import range_evaluation
from neural_networks_chomsky_hierarchy.tasks import task as task_lib
_LossMetrics = Optional[Mapping[str, jnp.ndarray]]
_LossFn = Callable[[chex.Array, chex.Array], tuple[float, _LossMetrics]]
_AccuracyFn = Callable[[chex.Array, chex.Array], float]
_ModelApplyFn = Callable[..., chex.Array]
_MAX_RNGS_RESERVE = 50000
@dataclasses.dataclass
class ClassicTrainingParams:
"""Parameters needed to train classical architectures."""
seed: int # Used to sample during forward pass (e.g. from final logits).
model_init_seed: int # Used to initialize model parameters.
training_steps: int
log_frequency: int
task: task_lib.GeneralizationTask
length_curriculum: curriculum_lib.Curriculum
batch_size: int
model: hk.Transformed
loss_fn: Callable[[jnp.ndarray, jnp.ndarray], tuple[float, _LossMetrics]]
learning_rate: float
test_model: Optional[hk.Transformed] = None
max_grad_norm: float = 1.
is_autoregressive: bool = False
compute_full_range_test: bool = False
range_test_total_batch_size: int = 512
range_test_sub_batch_size: int = 64
max_range_test_length: int = 100
accuracy_fn: Optional[Callable[[jnp.ndarray, jnp.ndarray],
jnp.ndarray]] = None
def _apply_loss_and_metrics_fn(
params: hk.Params,
rng_key: chex.PRNGKey,
batch: task_lib.Batch,
model_apply_fn: _ModelApplyFn,
loss_fn: _LossFn,
accuracy_fn: _AccuracyFn,
is_autoregressive: bool = False,
) -> tuple[float, tuple[_LossMetrics, float]]:
"""Computes the model output and applies the loss function.
Depending on whether a model is autoregressive or not, it will have a
different number of input parameters (i.e., autoregressive models also require
the targets as an input).
Args:
params: The model parameters.
rng_key: The prng key to use for random number generation.
batch: The data (consists of both inputs and outputs).
model_apply_fn: The model function that converts inputs into outputs.
loss_fn: A function that computes the loss for a batch of logits and labels.
accuracy_fn: A function that computes the accuracy for a batch of logits and
labels.
is_autoregressive: Whether the model is autoregressive or not.
Returns:
The loss of the model for the batch of data, extra loss metrics and the
accuracy, if accuracy_fn is not None.
"""
if is_autoregressive:
outputs = model_apply_fn(
params, rng_key, batch["input"], batch["output"], sample=False)
else:
outputs = model_apply_fn(params, rng_key, batch["input"])
loss, loss_metrics = loss_fn(outputs, batch["output"])
if accuracy_fn is not None:
accuracy = accuracy_fn(outputs, batch["output"])
else:
accuracy = None
return loss, (loss_metrics, accuracy)
@functools.partial(
jax.jit,
static_argnames=(
"model_apply_fn",
"loss_fn",
"accuracy_fn",
"optimizer",
"is_autoregressive",
),
)
def _update_parameters(
params: hk.Params,
rng_key: chex.PRNGKey,
batch: task_lib.Batch,
model_apply_fn: _ModelApplyFn,
loss_fn: _LossFn,
accuracy_fn: _AccuracyFn,
optimizer: optax.GradientTransformation,
opt_state: optax.OptState,
is_autoregressive: bool = False,
) -> tuple[hk.Params, optax.OptState, tuple[float, _LossMetrics, float]]:
"""Applies a single SGD update step to the model parameters.
Args:
params: The model parameters.
rng_key: The prng key to use for random number generation.
batch: The data (consists of both inputs and outputs).
model_apply_fn: The model function that converts inputs into outputs.
loss_fn: A function that computes the loss for a batch of logits and labels.
accuracy_fn: A function that computes the accuracy for a batch of logits and
labels.
optimizer: The optimizer that computes the updates from the gradients of the
`loss_fn` with respect to the `params` and the previous `opt_state`.
opt_state: The optimizer state, e.g., momentum for each variable when using
Adam.
is_autoregressive: Whether the model is autoregressive or not.
Returns:
The updated parameters, the new optimizer state, and the loss, loss metrics
and accuracy.
"""
(loss, (metrics, accuracy)), grads = jax.value_and_grad(
_apply_loss_and_metrics_fn,
has_aux=True)(params, rng_key, batch, model_apply_fn, loss_fn,
accuracy_fn, is_autoregressive)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state, (loss, metrics, accuracy)
class TrainingWorker:
"""Training worker."""
def __init__(self,
training_params: ClassicTrainingParams,
use_tqdm: bool = False):
"""Initializes the worker.
Args:
training_params: The training parameters.
use_tqdm: Whether to add a progress bar to stdout.
"""
self._training_params = training_params
self._use_tqdm = use_tqdm
def run(
self,
) -> tuple[
list[Mapping[str, Any]], Optional[list[Mapping[str, Any]]], chex.ArrayTree
]:
"""Trains the model with the provided config.
Returns:
Results (various training and validation metrics), module parameters
and router parameters.
"""
training_params = self._training_params
rngs_reserve = min(_MAX_RNGS_RESERVE, training_params.training_steps)
random.seed(training_params.seed)
np.random.seed(training_params.seed)
rng_seq = hk.PRNGSequence(training_params.seed)
rng_seq.reserve(rngs_reserve)
results = []
model = training_params.model
task = training_params.task
length_curriculum = training_params.length_curriculum
optimizer = optax.chain(
optax.clip_by_global_norm(training_params.max_grad_norm),
optax.adam(training_params.learning_rate))
dummy_batch = task.sample_batch(
next(rng_seq), length=10, batch_size=training_params.batch_size)
model_init_rng_key = jax.random.PRNGKey(training_params.model_init_seed)
if training_params.is_autoregressive:
params = model.init(
model_init_rng_key,
dummy_batch["input"],
dummy_batch["output"],
sample=False)
else:
params = model.init(model_init_rng_key, dummy_batch["input"])
opt_state = optimizer.init(params)
self._params, self._step = params, 0
steps = range(training_params.training_steps + 1)
if self._use_tqdm:
steps = tqdm.tqdm(steps)
for step in steps:
# Randomness handled by either python.random or numpy.
length = length_curriculum.sample_sequence_length(step)
# Randomness handled by either jax, python.random or numpy.
train_batch = task.sample_batch(
next(rng_seq), length=length, batch_size=training_params.batch_size)
params, opt_state, (
train_loss, train_metrics, train_accuracy) = _update_parameters(
params=params,
rng_key=next(rng_seq),
batch=train_batch,
model_apply_fn=model.apply,
loss_fn=training_params.loss_fn,
accuracy_fn=training_params.accuracy_fn,
optimizer=optimizer,
opt_state=opt_state,
is_autoregressive=training_params.is_autoregressive)
self._params, self._step = params, step
log_freq = training_params.log_frequency
if (log_freq > 0) and (step % log_freq == 0):
log_data = {
"step": step,
"train_loss": float(train_loss),
}
if training_params.accuracy_fn is not None:
log_data["train_accuracy"] = float(train_accuracy)
for key, value in train_metrics.items():
log_data[".".join(["train_metrics", key])] = np.array(value)
results.append(log_data)
# We need to access this private attribute since the default reserve size
# can not be edited yet.
if not rng_seq._subkeys: # pylint: disable=protected-access
rng_seq.reserve(rngs_reserve)
eval_results = None
if training_params.compute_full_range_test:
eval_params = range_evaluation.EvaluationParams(
model=training_params.test_model or model,
params=params,
accuracy_fn=training_params.accuracy_fn,
sample_batch=task.sample_batch,
max_test_length=training_params.max_range_test_length,
total_batch_size=training_params.range_test_total_batch_size,
sub_batch_size=training_params.range_test_sub_batch_size,
is_autoregressive=training_params.is_autoregressive,
)
eval_results = range_evaluation.range_evaluation(
eval_params, use_tqdm=False)
return results, eval_results, params
|
neural_networks_chomsky_hierarchy-main
|
experiments/training.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic Stack RNN core.
Following the paper from DuSell et al (2020):
https://arxiv.org/abs/2010.04674
The idea is to add a non-deterministic stack extension to a recurrent neural
network to be able to simulate a machine accepting non-deterministic
context-free languages. It can be seen as an extension to the Stack-RNN
developed by Joulin et al (2015). However, it is far more complex and hard to
understand.
The non-deterministic stack is completely differentiable.
A non-deterministic Pushdown Automata (NDPDA) uses 'multiple stacks at the same
time'. The problem is that the number of possible stacks grows exponentially
with time, which makes a naive practical implementation impossible. However,
Lang et al proved in 1969, based on ideas from Context-Frees parsers like the
CYK, that a NDPDA can be simulated only using O(n³) memory, and not O(2^n).
The main idea is to reuse the content of the different stacks in a dynamic
programming manner. A stack with n values is a stack with n-1 values + an extra
value, so we can build a graph of possible stacks, which would reuse most of
the data.
Concretely, the graph is made of nodes (t, q, x) where t is a number (the time),
q is a state from a fixed, user-defined set and x is a symbol or value, also
from a finite, user-defined set. Then one path in this graph is exactly one
stack, which can simply be reconstructed by reading the value for each node
in the path. Each state q can be used as a 'branching' mechanism: the more
states, the more branching there can be and therefore the more stacks can be
used. The number of possible stacks is (#states * #symbols)^t.
To interact with this graph, ie do a push or a pop action, one uses transitions
on these nodes. For push, it is a function of the form (q1, x1) -> (q2, x2),
where q2 is the new state to go in (ie whether to branch to a new stack, or keep
the same) and x2 is the value to push. For pop, it is a function of the form
(q1, x1) -> q2, which again allows the network to choose whether to create a new
stack or not. No value should be passed there however. The functions are
modelled by transition matrices of shape (Q, S, Q, S) where Q=#states and
S=#symbols.
Once the action matrices are passed, the graph is updated. The update is done
via an internal transition matrix called gamma. This matrix is simple for the
push action (one can only push on the top of the stack, ie nodes for which t =
current timestep). It is far more complex for the pop action, as popping a value
from the current stack can completely change the structure of the graph: the
new stack after popping might be equal to a very old stack seen at the beginning
of the episode, and we must change the links accordingly. Roughly, the update
operation for gamma has a time complexity of O(Q⁴ S³ n³).
Finally, one the graph is updated via gamma, we update the probabilities of the
top of stacks, which gives us a tensor called alpha. From alpha we deduce the
average top of the stack to be sent to the agent.
As there are 3 actions (pop/push/no_op), unrolling this over
long sequences and using big batch sizes consumes too much memory and the
accelerators fail.
Notations:
Q: number of states of the ND stack (not the number of states of the
RNN).
S: number of symbols which can be pushed on the stack.
T: Sequence length.
B: Batch size.
"""
from typing import Any, Mapping, NamedTuple, Optional
import chex
import haiku as hk
import jax
import jax.nn as jnn
import jax.numpy as jnp
_EPSILON = 0.001
class NDStack(NamedTuple):
"""The non-deterministic stack.
Note that alpha and top_stack depend on gamma.
"""
gamma: chex.Array # Shape (B, T, T, Q, S, Q, S)
alpha: chex.Array # Shape (B, T, Q, S)
top_stack: chex.Array # Shape (B, S)
def _update_stack(ndstack: NDStack,
push_actions: chex.Array,
pop_actions: chex.Array,
replace_actions: chex.Array,
timestep: int,
read_states: bool = True) -> NDStack:
"""Returns an updated NDStack.
Args:
ndstack: See above. Contains the internals needed to simulate a
non-deterministic stack.
push_actions: A tensor of shape (B, Q, S, Q, S).
pop_actions: A tensor of shape (B, Q, S, Q).
replace_actions: A tensor of shape (B, Q, S, Q, S).
timestep: The current timestep while processing the sequence.
read_states: Whether to read the state of the NPDA as well.
"""
stack_size = ndstack.gamma.shape[2]
mask = jnp.zeros((stack_size, stack_size))
mask = mask.at[timestep - 1, timestep].set(1)
new_push_gamma_t = jnp.einsum('bqxry,tT->btTqxry', push_actions,
mask)[:, :, timestep]
index_k = jnp.stack([jnp.arange(start=0, stop=stack_size)] * stack_size)
index_i = jnp.transpose(index_k)
timestep_arr = jnp.full((stack_size, stack_size), timestep)
index_mask = jnp.logical_and(index_k > index_i, index_k < timestep_arr - 1)
index_mask = jnp.einsum('tT,bqxry->btTqxry', index_mask,
jnp.ones(push_actions.shape))
new_pop_gamma_t = jnp.einsum(
'bikqxuy,bkuysz,bszr->biqxry',
index_mask * ndstack.gamma,
ndstack.gamma[:, :, timestep - 1],
pop_actions,
)
new_replace_gamma_t = jnp.einsum('biqxsz,bszry->biqxry',
ndstack.gamma[:, :,
timestep - 1], replace_actions)
new_gamma = jax.vmap(jax.vmap(lambda x, y: x.at[timestep].set(y)))(
ndstack.gamma, new_replace_gamma_t + new_pop_gamma_t + new_push_gamma_t)
alpha_t = jnp.einsum('biqx,biqxry->bry', ndstack.alpha, new_gamma[:, :,
timestep])
new_alpha = jax.vmap(lambda x, y: x.at[timestep].set(y))(ndstack.alpha,
alpha_t)
if read_states:
batch_size, states, symbols = alpha_t.shape
obs = jnp.reshape(alpha_t, (batch_size, states * symbols))
else:
obs = jnp.sum(alpha_t, axis=1)
obs = obs / (jnp.sum(obs, axis=-1, keepdims=True) + _EPSILON)
return NDStack(new_gamma, new_alpha, top_stack=obs)
# First element is the NDStack, second is the current timestep, third is the
# hidden internal state.
_NDStackRnnState = tuple[NDStack, chex.Array, chex.Array]
class NDStackRNNCore(hk.RNNCore):
"""Core for the non-deterministic stack RNN."""
def __init__(
self,
stack_symbols: int,
stack_states: int,
stack_size: int = 30,
inner_core: type[hk.RNNCore] = hk.VanillaRNN,
read_states: bool = False,
name: Optional[str] = None,
**inner_core_kwargs: Mapping[str, Any]
):
"""Initializes.
Args:
stack_symbols: The number of symbols which can be used in the stack.
stack_states: The number of states of the non-deterministic stack.
Corresponds to the number of branching in the graph, ie roughly n_stacks
= stack_states ^ t.
stack_size: The total size of the stacks. Be careful when increasing this
value since the computation is in O(stack_size ^ 3).
inner_core: The inner RNN core builder.
read_states: Whether to read the states on the NPDA or only the top of the
stack.
name: See base class.
**inner_core_kwargs: The arguments to be passed to the inner RNN core
builder.
"""
super().__init__(name=name)
self._rnn_core = inner_core(**inner_core_kwargs)
self._stack_symbols = stack_symbols
self._stack_states = stack_states
self._stack_size = stack_size
self._read_states = read_states
def __call__(
self, inputs: chex.Array, prev_state: _NDStackRnnState
) -> tuple[chex.Array, _NDStackRnnState]:
"""Steps the non-deterministic stack RNN core.
See base class docstring.
Args:
inputs: An input array of shape (batch_size, input_size). The time
dimension is not included since it is an RNNCore, which is unrolled over
the time dimension.
prev_state: A _NDStackRnnState tuple, consisting of the previous nd-stack,
the previous timestep and the previous state of the inner core.
Returns:
- output: An output array of shape (batch_size, output_size).
- next_state: Same format as prev_state.
"""
ndstack, timestep, old_core_state = prev_state
# The network can always read the top of the stack.
batch_size = ndstack.gamma.shape[0]
inputs = jnp.concatenate([inputs, ndstack.top_stack], axis=-1)
new_core_output, new_core_state = self._rnn_core(inputs, old_core_state)
n_push_actions = (self._stack_states * self._stack_symbols)**2
n_pop_actions = self._stack_states**2 * self._stack_symbols
n_replace_actions = (self._stack_states * self._stack_symbols)**2
actions = hk.Linear(n_push_actions + n_pop_actions + n_replace_actions)(
new_core_output)
actions = jnn.softmax(actions, axis=-1)
push_actions = jnp.reshape(
actions[:, :n_push_actions],
(batch_size, self._stack_states, self._stack_symbols,
self._stack_states, self._stack_symbols))
pop_actions = jnp.reshape(
actions[:, n_push_actions:n_push_actions + n_pop_actions],
(batch_size, self._stack_states, self._stack_symbols,
self._stack_states))
replace_actions = jnp.reshape(
actions[:, -n_replace_actions:],
(batch_size, self._stack_states, self._stack_symbols,
self._stack_states, self._stack_symbols))
new_ndstack = _update_stack(
ndstack,
push_actions,
pop_actions,
replace_actions, (timestep + 1)[0],
read_states=self._read_states)
return new_core_output, (new_ndstack, timestep + 1, new_core_state)
def initial_state(self, batch_size: Optional[int]) -> _NDStackRnnState:
"""Returns the initial state of the core, a hidden state and an empty stack."""
core_state = self._rnn_core.initial_state(batch_size)
# Gamma, the transition matrix, is initialized to full zeros: there is no
# connection in the graph at the beginning.
gamma = jnp.zeros(
(batch_size, self._stack_size, self._stack_size, self._stack_states,
self._stack_symbols, self._stack_states, self._stack_symbols))
# Alpha is zero everywhere except for the first node, which is (0, q0, 0).
alpha = jnp.zeros(
(batch_size, self._stack_size, self._stack_states, self._stack_symbols))
alpha = jax.vmap(lambda x: x.at[0, 0, 0].set(1))(alpha)
if self._read_states:
top_stack = jnp.zeros(
(batch_size, self._stack_states * self._stack_symbols))
else:
# The top of the stack is 0 as the first node contains the symbol 0.
top_stack = jnp.zeros((batch_size, self._stack_symbols))
ndstack = NDStack(gamma, alpha, top_stack)
return ndstack, jnp.zeros((batch_size,), dtype=jnp.int32), core_state
|
neural_networks_chomsky_hierarchy-main
|
models/ndstack_rnn.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stack RNN core.
Following the paper from Joulin et al (2015):
https://arxiv.org/abs/1503.01007
The idea is to add a stack extension to a recurrent neural network to be able to
simulate a machine accepting context-free languages.
The stack is completely differentiable. The actions taken are probabilities
only and therefore no RL is required. The stack and state update are just linear
combinations of the last states and these probabilities.
"""
from typing import Any, Mapping, Optional
import einshape
import haiku as hk
import jax
import jax.nn as jnn
import jax.numpy as jnp
# First element is the stacks, second is the hidden internal state.
_StackRnnState = tuple[jnp.ndarray, jnp.ndarray]
# Number of actions the stack-RNN can take, namely POP, PUSH and NO_OP.
_NUM_ACTIONS = 3
def _update_stack(stack: jnp.ndarray, actions: jnp.ndarray,
push_value: jnp.ndarray) -> jnp.ndarray:
"""Updates the stack values.
We update the stack in two steps.
In the first step, we update the top of the stack, and essentially do:
stack[0] = push_action * push_value
+ pop_action * stack[1]
+ noop_action * stack[0]
Then, in the second step, we update the rest of the stack and we move the
elements up and down, depending on the action executed:
* If push_action were 1, then we'd be purely pushing a new element
to the top of the stack, so we'd move all elements down by one.
* Likewise, if pop_action were 1, we'd be purely taking an element
off the top of the stack, so we'd move all elements up by one.
* Finally, if noop_action were 1, we'd leave elements where they were.
The update is therefore essentially:
stack[i] = push_action * stack[i-1]
+ pop_action * stack[i+1]
+ noop_action * stack[i]
Args:
stack: The current stack, shape (batch_size, stack_size, stack_cell_size).
actions: The array of probabilities of the actions, shape (batch_size, 3).
push_value: The vector to push on the stack, if the push action probability
is positive, shape (batch_size, stack_cell_size).
Returns:
The new stack, same shape as the input stack.
"""
batch_size, stack_size, stack_cell_size = stack.shape
# Tiling the actions to match the top of the stack.
# Shape (batch_size, stack_cell_size, _NUM_ACTIONS)
cell_tiled_stack_actions = einshape.jax_einshape(
'ba->bsa', actions, s=stack_cell_size)
push_action = cell_tiled_stack_actions[..., 0]
pop_action = cell_tiled_stack_actions[..., 1]
pop_value = stack[..., 1, :]
no_op_action = cell_tiled_stack_actions[..., 2]
no_op_value = stack[..., 0, :]
# Shape (batch_size, 1, stack_cell_size)
top_new_stack = (
push_action * push_value + pop_action * pop_value +
no_op_action * no_op_value)
top_new_stack = jnp.expand_dims(top_new_stack, axis=1)
# Tiling the actions to match all of the stack except the top.
# Shape (batch_size, stack_size, stack_cell_size, _NUM_ACTIONS)
stack_tiled_stack_actions = einshape.jax_einshape(
'ba->bcsa', actions, s=stack_cell_size, c=stack_size - 1)
push_action = stack_tiled_stack_actions[..., 0]
push_value = stack[..., :-1, :]
pop_action = stack_tiled_stack_actions[..., 1]
pop_extra_zeros = jnp.zeros((batch_size, 1, stack_cell_size))
pop_value = jnp.concatenate([stack[..., 2:, :], pop_extra_zeros], axis=1)
no_op_action = stack_tiled_stack_actions[..., 2]
no_op_value = stack[..., 1:, :]
# Shape (batch_size, stack_size-1, stack_cell_size)
rest_new_stack = (
push_action * push_value + pop_action * pop_value +
no_op_action * no_op_value)
# Finally concatenate the new top with the new rest of the stack.
return jnp.concatenate([top_new_stack, rest_new_stack], axis=1)
class StackRNNCore(hk.RNNCore):
"""Core for the stack RNN."""
def __init__(
self,
stack_cell_size: int,
stack_size: int = 30,
n_stacks: int = 1,
inner_core: type[hk.RNNCore] = hk.VanillaRNN,
name: Optional[str] = None,
**inner_core_kwargs: Mapping[str, Any]
):
"""Initializes.
Args:
stack_cell_size: The dimension of the vectors we put in the stack.
stack_size: The total number of vectors we can stack.
n_stacks: Number of stacks to use in the network.
inner_core: The inner RNN core builder.
name: See base class.
**inner_core_kwargs: The arguments to be passed to the inner RNN core
builder.
"""
super().__init__(name=name)
self._rnn_core = inner_core(**inner_core_kwargs)
self._stack_cell_size = stack_cell_size
self._stack_size = stack_size
self._n_stacks = n_stacks
def __call__(
self, inputs: jnp.ndarray, prev_state: _StackRnnState
) -> tuple[jnp.ndarray, _StackRnnState]:
"""Steps the stack RNN core.
See base class docstring.
Args:
inputs: An input array of shape (batch_size, input_size). The time
dimension is not included since it is an RNNCore, which is unrolled over
the time dimension.
prev_state: A _StackRnnState tuple, consisting of the previous stacks and
the previous state of the inner core. Each stack has shape (batch_size,
stack_size, stack_cell_size), such that `stack[n][0]` represents the top
of the stack for the nth batch item, and `stack[n][-1]` the bottom of
the stack. The stacks are just the concatenation of all these tensors.
Returns:
- output: An output array of shape (batch_size, output_size).
- next_state: Same format as prev_state.
"""
stacks, old_core_state = prev_state
# The network can always read the top of the stack.
batch_size = stacks.shape[0]
top_stacks = stacks[:, :, 0, :]
top_stacks = jnp.reshape(
top_stacks, (batch_size, self._n_stacks * self._stack_cell_size))
inputs = jnp.concatenate([inputs, top_stacks], axis=-1)
new_core_output, new_core_state = self._rnn_core(inputs, old_core_state)
push_values = hk.Linear(self._n_stacks * self._stack_cell_size)(
new_core_output)
push_values = jnp.reshape(
push_values, (batch_size, self._n_stacks, self._stack_cell_size))
# Shape (batch_size, _NUM_ACTIONS)
stack_actions = jnn.softmax(
hk.Linear(self._n_stacks * _NUM_ACTIONS)(new_core_output), axis=-1)
stack_actions = jnp.reshape(stack_actions,
(batch_size, self._n_stacks, _NUM_ACTIONS))
new_stacks = jax.vmap(
_update_stack, in_axes=1, out_axes=1)(stacks, stack_actions,
push_values)
return new_core_output, (new_stacks, new_core_state)
def initial_state(self, batch_size: Optional[int]) -> _StackRnnState:
"""Returns the initial state of the core, a hidden state and an empty stack."""
core_state = self._rnn_core.initial_state(batch_size)
stacks = jnp.zeros(
(batch_size, self._n_stacks, self._stack_size, self._stack_cell_size))
return stacks, core_state
|
neural_networks_chomsky_hierarchy-main
|
models/stack_rnn.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model."""
import dataclasses
from typing import Callable, Optional
import chex
import haiku as hk
import jax
import jax.nn as jnn
import jax.numpy as jnp
from neural_networks_chomsky_hierarchy.models import positional_encodings as pos_encs_lib
@chex.dataclass
class TransformerConfig:
"""Hyperparameters used in the Transformer architectures."""
# The size of the model output (i.e., the output vocabulary size).
output_size: int
# The dimension of the first embedding.
embedding_dim: int = 64
# The number of multi-head attention layers.
num_layers: int = 5
# The number of heads per layer.
num_heads: int = 8
# The number of hidden neurons per head. If None, it is set to be equal to
# `embedding_dim // num_heads`.
num_hiddens_per_head: Optional[int] = None
# The probability that each element is discarded by the dropout modules.
dropout_prob: float = 0.1
# The parameter initialization scale for the embeddings.
emb_init_scale: float = 0.02
# Whether to use the embeddings rather than raw inputs.
use_embeddings: bool = True
# Whether to share embeddings between the Encoder and the Decoder.
share_embeddings: bool = False
# The size of the sliding attention window. See MultiHeadDotProductAttention.
attention_window: Optional[int] = None
# The positional encoding used with default sin/cos (Vaswani et al., 2017).
positional_encodings: pos_encs_lib.PositionalEncodings = dataclasses.field(
default_factory=lambda: pos_encs_lib.PositionalEncodings.SIN_COS
)
# The maximum size of the context (used by the posiitonal encodings).
max_time: int = 10_000
# The parameters for the positional encodings, default sin/cos.
positional_encodings_params: pos_encs_lib.PositionalEncodingsParams = (
dataclasses.field(default_factory=pos_encs_lib.SinCosParams)
)
# How much larger the hidden layer of the feedforward network should be
# compared to the `embedding_dim`.
widening_factor: int = 4
# Add mask to make causal predictions.
causal_masking: bool = False
def __post_init__(self) -> None:
"""Sets `num_hiddens_per_head` if it is `None`."""
if self.num_hiddens_per_head is None:
self.num_hiddens_per_head = self.embedding_dim // self.num_heads
def layer_norm(x: chex.Array) -> chex.Array:
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
def shift_right(x: chex.Array, output_size: int) -> chex.Array:
"""Right-shift the one-hot encoded input by padding on the temporal axis."""
x = jnp.argmax(x, axis=-1)
# Add a time dimension for the single-output case (i.e., `ndim == 1`).
if x.ndim == 1:
x = jnp.expand_dims(x, axis=1)
padded = jnp.pad(
x, ((0, 0), (1, 0)), mode='constant', constant_values=output_size)
return jnn.one_hot(padded[:, :-1], num_classes=output_size + 1)
def compute_sliding_window_mask(sequence_length: int,
attention_window: int) -> chex.Array:
"""Returns a k-diagonal mask for a sliding window.
Args:
sequence_length: The length of the sequence, which will determine the shape
of the output.
attention_window: The size of the sliding window.
Returns:
A symmetric matrix of shape (sequence_length, sequence_length),
attention_window-diagonal, with ones on the diagonal and on all the
upper/lower diagonals up to attention_window // 2.
Raises:
ValueError if attention_window is <= 0.
"""
if attention_window <= 0:
raise ValueError(
f'The attention window should be > 0. Got {attention_window}.')
if attention_window == 1:
return jnp.eye(sequence_length, sequence_length)
attention_mask = jnp.sum(
jnp.stack([
jnp.eye(sequence_length, sequence_length, k=k, dtype=jnp.int32)
for k in range(1, attention_window // 2 + 1)
]),
axis=0)
attention_mask = attention_mask + jnp.transpose(attention_mask)
attention_mask += jnp.eye(sequence_length, sequence_length)
return attention_mask
class MultiHeadDotProductAttention(hk.Module):
"""Multi-head dot-product attention (Vaswani et al., 2017)."""
def __init__(
self,
num_heads: int,
num_hiddens_per_head: int,
positional_encodings: pos_encs_lib.PositionalEncodings,
positional_encodings_params: pos_encs_lib.PositionalEncodingsParams,
attention_window: Optional[int] = None,
name: Optional[str] = None,
) -> None:
"""Initializes the attention module.
Args:
num_heads: Number of heads to use.
num_hiddens_per_head: Number of hidden neurons per head.
positional_encodings: Which positional encodings to use in the attention.
positional_encodings_params: Parameters for the positional encodings.
attention_window: Size of the attention sliding window. None means no
sliding window is used (or equivalently, window=full_attention_length).
We attend only on attention_window tokens around a given query token. We
attend to tokens before AND after the query token. If attention_window
is even, we use the value +1.
name: Name of the module.
"""
super().__init__(name=name)
self._num_heads = num_heads
self._num_hiddens_per_head = num_hiddens_per_head
self._positional_encodings = positional_encodings
self._attention_window = attention_window
self._positional_encodings_params = positional_encodings_params
def __call__(
self,
inputs_q: chex.Array,
inputs_kv: chex.Array,
mask: Optional[chex.Array] = None,
causal: bool = False,
) -> chex.Array:
"""Returns the output of the multi-head attention."""
batch_size, sequence_length, embedding_size = inputs_q.shape
num_hiddens = self._num_hiddens_per_head * self._num_heads
q = hk.Linear(num_hiddens, with_bias=False)(inputs_q)
k = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
v = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
# The second (sequence) dimension is undefined since it can differ between
# queries and keys/values when decoding.
new_shape = (batch_size, -1, self._num_heads, self._num_hiddens_per_head)
q = jnp.reshape(q, new_shape)
k = jnp.reshape(k, new_shape)
v = jnp.reshape(v, new_shape)
# Let b=batch_size, t=seq_len, h=num_heads, and d=num_hiddens_per_head.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.RELATIVE:
# We type hint the params to match the if statement, for pytype.
self._positional_encodings_params: pos_encs_lib.RelativeParams
attention = pos_encs_lib.compute_attention_with_relative_encodings(
q, k, self._positional_encodings_params.max_time, causal=causal
)
else:
if self._positional_encodings == pos_encs_lib.PositionalEncodings.ROTARY:
q = pos_encs_lib.apply_rotary_encoding(
q, position=jnp.arange(q.shape[1])[None, :]
)
k = pos_encs_lib.apply_rotary_encoding(
k, position=jnp.arange(k.shape[1])[None, :]
)
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
attention *= 1.0 / jnp.sqrt(self._num_hiddens_per_head)
# ALiBi encodings are not scaled with the 1 / sqrt(d_k) factor.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.ALIBI:
attention += pos_encs_lib.compute_alibi_encodings_biases(
attention.shape[1:]
)
if self._attention_window is not None:
# We compute the sliding attention by just applying a mask on the values
# that are outside our window.
attention_mask = compute_sliding_window_mask(sequence_length,
self._attention_window)
attention = jnp.where(attention_mask, attention,
jnp.finfo(jnp.float32).min)
if mask is not None:
attention = jnp.where(mask, attention, jnp.finfo(jnp.float32).min)
normalized_attention = jnn.softmax(attention)
output = jnp.einsum('bhtT,bThd->bthd', normalized_attention, v)
output = jnp.reshape(output, (batch_size, sequence_length, num_hiddens))
return hk.Linear(embedding_size, with_bias=False)(output)
class TransformerEncoder(hk.Module):
"""Transformer Encoder (Vaswani et al., 2017)."""
def __init__(
self,
config: TransformerConfig,
shared_embeddings_fn: Optional[Callable[[chex.Array], chex.Array]] = None,
name: Optional[str] = None,
) -> None:
"""Initializes the transformer encoder.
Args:
config: The hyperparameters used in Transformer architectures.
shared_embeddings_fn: Embedding function that is shared with the decoder.
name: The name of the module.
"""
super().__init__(name=name)
self._config = config
self._shared_embeddings_fn = shared_embeddings_fn
def __call__(self, x: jnp.ndarray) -> chex.Array:
"""Returns the transformer encoder output, shape [B, T, E]."""
if self._config.use_embeddings:
if self._shared_embeddings_fn is not None:
embeddings = self._shared_embeddings_fn(x)
else:
# Since `x` is one-hot encoded, using hk.Linear is equivalent to
# hk.Embed with hk.EmbedLookupStyle.ONE_HOT.
embs_init = hk.initializers.TruncatedNormal(
stddev=self._config.emb_init_scale)
embeddings = hk.Linear(
self._config.embedding_dim, with_bias=False, w_init=embs_init)(
x)
embeddings *= jnp.sqrt(self._config.embedding_dim)
else:
embeddings = x
batch_size, sequence_length, embedding_size = embeddings.shape
pos_enc_params = self._config.positional_encodings_params
if (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.SIN_COS
):
pos_encodings = pos_encs_lib.sinusoid_position_encoding(
sequence_length=sequence_length,
hidden_size=embedding_size,
memory_length=0,
max_timescale=pos_enc_params.max_time,
min_timescale=2,
clamp_length=0,
causal=True,
)
h = embeddings + pos_encodings
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
else:
h = embeddings
# The causal mask is shared across heads.
if self._config.causal_masking:
causal_mask = jnp.tril(
jnp.ones((batch_size, 1, sequence_length, sequence_length))
)
else:
causal_mask = None
for _ in range(self._config.num_layers):
attention = MultiHeadDotProductAttention(
num_heads=self._config.num_heads,
num_hiddens_per_head=self._config.num_hiddens_per_head,
positional_encodings=self._config.positional_encodings,
positional_encodings_params=pos_enc_params,
attention_window=self._config.attention_window,
)(
inputs_q=h,
inputs_kv=h,
mask=causal_mask,
causal=self._config.causal_masking,
)
attention = hk.dropout(hk.next_rng_key(), self._config.dropout_prob,
attention)
attention = layer_norm(h + attention)
# Position-wise feedforward network.
h = hk.Linear(self._config.embedding_dim * self._config.widening_factor)(
attention)
h = jnn.relu(h)
h = hk.Linear(self._config.embedding_dim)(h)
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
h = layer_norm(h + attention)
return h
class TransformerDecoder(hk.Module):
"""Transformer Decoder (Vaswani et al., 2017)."""
def __init__(
self,
config: TransformerConfig,
shared_embeddings_fn: Optional[Callable[[chex.Array], chex.Array]] = None,
name: Optional[str] = None,
) -> None:
"""Initializes the Transformer decoder.
Args:
config: The hyperparameters used in Transformer architectures.
shared_embeddings_fn: Embedding function that is shared with the encoder.
name: The name of the module.
"""
super().__init__(name=name)
self._config = config
self._shared_embeddings_fn = shared_embeddings_fn
def __call__(self, encoded: chex.Array, targets: chex.Array) -> chex.Array:
"""Returns the transformer decoder output, shape [B, T_O, E].
Args:
encoded: The output of the encoder, shape [B, T_I, E].
targets: The one-hot encoded target values, shape [B, T_O, 2].
"""
targets = shift_right(targets, self._config.output_size)
if self._config.use_embeddings:
if self._shared_embeddings_fn is not None:
output_embeddings = self._shared_embeddings_fn(targets)
else:
# Since `x` is one-hot encoded, using hk.Linear is equivalent to
# hk.Embed with hk.EmbedLookupStyle.ONE_HOT.
embs_init = hk.initializers.TruncatedNormal(
stddev=self._config.emb_init_scale)
output_embeddings = hk.Linear(
self._config.embedding_dim, with_bias=False, w_init=embs_init)(
targets)
output_embeddings *= jnp.sqrt(self._config.embedding_dim)
else:
output_embeddings = targets
batch_size, output_sequence_length, embedding_size = output_embeddings.shape
if (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.SIN_COS
):
pos_encodings = pos_encs_lib.sinusoid_position_encoding(
sequence_length=output_sequence_length,
hidden_size=embedding_size,
memory_length=0,
max_timescale=self._config.positional_encodings_params.max_time,
min_timescale=2,
clamp_length=0,
causal=True,
)
h = output_embeddings + pos_encodings
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
else:
h = output_embeddings
# The causal mask is shared across heads.
causal_mask = jnp.tril(
jnp.ones(
(batch_size, 1, output_sequence_length, output_sequence_length)))
for _ in range(self._config.num_layers):
self_attention = MultiHeadDotProductAttention(
num_heads=self._config.num_heads,
num_hiddens_per_head=self._config.num_hiddens_per_head,
positional_encodings=self._config.positional_encodings,
positional_encodings_params=self._config.positional_encodings_params,
attention_window=self._config.attention_window,
)(inputs_q=h, inputs_kv=h, mask=causal_mask, causal=True)
self_attention = hk.dropout(hk.next_rng_key(), self._config.dropout_prob,
self_attention)
self_attention = layer_norm(h + self_attention)
cross_attention = MultiHeadDotProductAttention(
num_heads=self._config.num_heads,
num_hiddens_per_head=self._config.num_hiddens_per_head,
positional_encodings=self._config.positional_encodings,
positional_encodings_params=self._config.positional_encodings_params,
attention_window=self._config.attention_window,
)(inputs_q=self_attention, inputs_kv=encoded, causal=True)
cross_attention = hk.dropout(hk.next_rng_key(), self._config.dropout_prob,
cross_attention)
cross_attention = layer_norm(self_attention + cross_attention)
# Position-wise feedforward network.
h = hk.Linear(self._config.embedding_dim * self._config.widening_factor)(
cross_attention)
h = jnn.relu(h)
h = hk.Linear(self._config.embedding_dim)(h)
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
h = layer_norm(h + cross_attention)
return h
class Transformer(hk.Module):
"""Transformer (Vaswani et al., 2017)."""
def __init__(self, config: TransformerConfig, name: Optional[str] = None):
"""Initializes the Transformer.
Args:
config: The hyperparameters used in Transformer architectures.
name: The name of the module.
"""
super().__init__(name=name)
shared_embeddings_fn = None
if config.share_embeddings:
shared_embeddings_fn = hk.Linear(
config.embedding_dim,
with_bias=False,
w_init=hk.initializers.TruncatedNormal(stddev=config.emb_init_scale),
name='shared_embeddings')
self._encoder = TransformerEncoder(config, shared_embeddings_fn)
self._decoder = TransformerDecoder(config, shared_embeddings_fn)
def __call__(self, inputs: chex.Array, targets: chex.Array) -> chex.Array:
return self._decoder(self._encoder(inputs), targets)
def make_transformer_encoder(
output_size: int,
embedding_dim: int = 64,
num_layers: int = 5,
num_heads: int = 8,
num_hiddens_per_head: Optional[int] = None,
dropout_prob: float = 0.1,
emb_init_scale: float = 0.02,
use_embeddings: bool = True,
share_embeddings: bool = False,
attention_window: Optional[int] = None,
positional_encodings: Optional[pos_encs_lib.PositionalEncodings] = None,
positional_encodings_params: Optional[
pos_encs_lib.PositionalEncodingsParams
] = None,
widening_factor: int = 4,
return_all_outputs: bool = False,
causal_masking: bool = False,
) -> Callable[[chex.Array], chex.Array]:
"""Returns a transformer encoder model."""
if positional_encodings is None:
positional_encodings = pos_encs_lib.PositionalEncodings.SIN_COS
positional_encodings_params = pos_encs_lib.SinCosParams()
elif positional_encodings_params is None:
raise ValueError('No parameters for positional encodings are passed.')
config = TransformerConfig(
output_size=output_size,
embedding_dim=embedding_dim,
num_layers=num_layers,
num_heads=num_heads,
num_hiddens_per_head=num_hiddens_per_head,
dropout_prob=dropout_prob,
emb_init_scale=emb_init_scale,
use_embeddings=use_embeddings,
share_embeddings=share_embeddings,
attention_window=attention_window,
positional_encodings=positional_encodings,
positional_encodings_params=positional_encodings_params,
widening_factor=widening_factor,
causal_masking=causal_masking,
)
def transformer_encoder(inputs: chex.Array) -> chex.Array:
output = TransformerEncoder(config)(inputs)
if not return_all_outputs:
output = output[:, -1, :]
return hk.Linear(output_size)(output)
return transformer_encoder
def make_transformer(
output_size: int,
embedding_dim: int = 64,
num_layers: int = 5,
num_heads: int = 8,
num_hiddens_per_head: Optional[int] = None,
dropout_prob: float = 0.1,
emb_init_scale: float = 0.02,
use_embeddings: bool = True,
share_embeddings: bool = False,
attention_window: Optional[int] = None,
positional_encodings: Optional[pos_encs_lib.PositionalEncodings] = None,
positional_encodings_params: Optional[
pos_encs_lib.PositionalEncodingsParams
] = None,
widening_factor: int = 4,
return_all_outputs: bool = False,
) -> Callable[[chex.Array, chex.Array], chex.Array]:
"""Returns a transformer model."""
if positional_encodings is None:
positional_encodings = pos_encs_lib.PositionalEncodings.SIN_COS
positional_encodings_params = pos_encs_lib.SinCosParams()
elif positional_encodings_params is None:
raise ValueError('No parameters for positional encodings are passed.')
config = TransformerConfig(
output_size=output_size,
embedding_dim=embedding_dim,
num_layers=num_layers,
num_heads=num_heads,
num_hiddens_per_head=num_hiddens_per_head,
dropout_prob=dropout_prob,
emb_init_scale=emb_init_scale,
use_embeddings=use_embeddings,
share_embeddings=share_embeddings,
attention_window=attention_window,
positional_encodings=positional_encodings,
positional_encodings_params=positional_encodings_params,
widening_factor=widening_factor,
)
def transformer(inputs: chex.Array, targets: chex.Array) -> chex.Array:
output = Transformer(config)(inputs, targets)
if not return_all_outputs:
output = output[:, -1, :]
return hk.Linear(output_size)(output)
return transformer
|
neural_networks_chomsky_hierarchy-main
|
models/transformer.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Positional encodings, used in `transformer.py`."""
import enum
import math
from typing import Any
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class PositionalEncodings(enum.Enum):
"""Enum for all the positional encodings implemented."""
NONE = 0
SIN_COS = 1
ALIBI = 2
RELATIVE = 3
ROTARY = 4
# General type used throughout the class for pos enc parameters.
PositionalEncodingsParams = Any
@chex.dataclass
class SinCosParams:
"""Parameters for the classical sin/cos positional encoding."""
# The maximum wavelength used.
max_time: int = 10_000
# We will use this same class for Rotary and Relative.
RotaryParams = SinCosParams
RelativeParams = SinCosParams
POS_ENC_TABLE = {
'NONE': PositionalEncodings.NONE,
'SIN_COS': PositionalEncodings.SIN_COS,
'ALIBI': PositionalEncodings.ALIBI,
'RELATIVE': PositionalEncodings.RELATIVE,
'ROTARY': PositionalEncodings.ROTARY,
}
POS_ENC_PARAMS_TABLE = {
'NONE': SinCosParams,
'SIN_COS': SinCosParams,
'ALIBI': SinCosParams,
'RELATIVE': RelativeParams,
'ROTARY': RotaryParams,
}
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
memory_length: int = 0,
max_timescale: float = 1e4,
min_timescale: float = 2.0,
clamp_length: int = 0,
causal: bool = False,
):
"""Creates sinusoidal encodings.
The time dimension is larger than sequence_length as we need to cover all
cases of looking in either the future or past.
Args:
sequence_length: `int` sequence length, L
hidden_size: `int` dimension of the positional encoding vectors, D
memory_length: `int` size of the memory, M
max_timescale: `int` maximum timescale for the frequency
min_timescale: `int` minimum timescale for the frequency
clamp_length: If greater than 0, any positions further apart than
`clamp_length` are clamped to this value
causal: If true then generates a smaller set (L vs 2 * L) of time-encodings
for the use-case of causal attention.
Returns:
An array of shape [L + M, D] for causal and [2 * L + M, D] otherwise.
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale ** (-freqs / hidden_size)
# Since inputs can look into the past and into the future, depending on the
# permutation mask, we need to have relative encodings for both. The furthest
# back an input can see is the final token, up to sequence_length +
# memory_length - 1. The furthest ahead an input can see is for token 0 where
# it can see up to sequence_length - 1 future tokens.
if causal:
pos_seq = np.arange(sequence_length + memory_length, 0, -1.0)
else:
pos_seq = np.arange(sequence_length + memory_length, -sequence_length, -1.0)
if clamp_length:
pos_seq = np.clip(pos_seq, a_min=-clamp_length, a_max=clamp_length)
sinusoid_inp = np.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1
)
return pos_emb
def _rel_shift_inner(logits: chex.Array, attention_length: int) -> chex.Array:
"""Shifts the relative logits.
This is a more general than the original Transformer-XL implementation as
inputs may also see the future. (The implementation does not rely on a
causal mask removing the upper-right triangle.)
Given attention length 3 and inputs:
[[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2]]
The shifted output is:
[[0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]]
Args:
logits: input tensor of shape [T_q, T_v + T_q]
attention_length: T_v `int` length of the attention, should be equal to
memory size + sequence length.
Returns:
A shifted version of the input of size [T_q, T_v]. In each row, a window of
size T_v elements is kept. The window starts at
the rightmost end, for the first row. It then shifts left by 1 for each
subsequent row.
"""
if logits.ndim != 2:
raise ValueError('`logits` needs to be an array of dimension 2.')
tq, total_len = logits.shape
assert total_len == tq + attention_length
logits = jnp.reshape(logits, [total_len, tq])
logits = jax.lax.slice(logits, (1, 0), logits.shape) # logits[1:]
logits = jnp.reshape(logits, [tq, total_len - 1])
# Equiv to logits[:, :attention_length].
logits = jax.lax.slice(logits, (0, 0), (tq, attention_length))
return logits
def _rel_shift_causal(logits: chex.Array) -> chex.Array:
"""Shifts the relative logits, assuming causal attention.
Given inputs:
[[-4, -3, -2, -1],
[-4, -3, -2, -1]]
The shifted (and, later, masked) output is:
[[-3, -2, -1, 0],
[-4, -3, -2, -1]]
Args:
logits: input tensor of shape [T_q, T_v]
Returns:
A shifted version of the input of size [T_q, T_v].
"""
t1, t2 = logits.shape
# We prepend zeros on the final timescale dimension.
to_pad = jnp.zeros_like(logits[..., :1])
x = jnp.concatenate((to_pad, logits), axis=-1)
# Reshape trick to shift input.
x = jnp.reshape(x, [t2 + 1, t1])
# Remove extra time dimension and re-shape.
x = jax.lax.slice(x, [1] + [0] * (x.ndim - 1), x.shape)
return jnp.reshape(x, [t1, t2])
def relative_shift(
logits: chex.Array, attention_length: int, causal: bool = False
) -> chex.Array:
if causal:
fn = _rel_shift_causal
else:
fn = lambda t: _rel_shift_inner(t, attention_length)
return jax.vmap(jax.vmap(fn))(logits)
def apply_rotary_encoding(
x: jnp.ndarray, position: jnp.ndarray, max_time: int = 10_000
) -> jnp.ndarray:
"""Applies RoPE positional encodings for the input.
Paper: https://arxiv.org/abs/2104.09864
Args:
x: The input tensor on which RoPE will be applied. Usually it is either some
queries q or some keys k.
position: The positions to use. Usually it's an arange of the maximum
length.
max_time: Constant used to scale position by in the encodings.
Returns:
A tensor with the same shape as x.
"""
# Expand dims for positions to support inputs of shapes BTC or BTHC.
freq_seq = jnp.arange(x.shape[-1] // 2, dtype=jnp.float32)
freq_seq = freq_seq / (x.shape[-1] // 2)
inv_freq = max_time**-freq_seq
inv_freq = jnp.repeat(inv_freq, 2, 0)
# Produce position inputs to periodic functions.
t = position[:, :, None, None] * inv_freq[None, None, None, :]
x_rot = jnp.einsum('bthd,dD->bthD', x, _rope_kernel(x.shape[-1], x.dtype))
return x * jnp.cos(t).astype(x.dtype) + jnp.sin(t).astype(x.dtype) * x_rot
def _rope_kernel(n: int, dtype: Any) -> np.ndarray:
"""Reorders the embedding dimension of an array, to make rotation easier."""
# We implement the equivalent of
# even_dims, odd_dims, = x[..., ::2], x[..., 1::2]
# return jnp.stack((-odd_dims, even_dims), axis=-1).reshape(x.shape)
# with a custom kernel for einsum. This allows the computation to execute
# on the MXU instead of producing a slow gather.
assert n % 2 == 0, n
kernel = np.zeros((n, n), dtype)
for i in range(n):
# Swap each neighbouring pair of values.
if i % 2 == 0:
kernel[i, i + 1] = 1
else:
kernel[i, i - 1] = -1
return kernel
def compute_attention_with_relative_encodings(
queries: chex.Array,
keys: chex.Array,
max_time: int = 10_000,
causal: bool = False) -> chex.Array:
"""Returns attention with relative positional encodings.
This code strictly follows what is described in the TransformerXL paper.
https://arxiv.org/pdf/1901.02860.pdf
Args:
queries: The queries used for attention. Shape (b, t, h, d).
keys: The keys used for attention. Shape (b, T, h, d).
max_time: Constant used to scale position by in the sin/cos encodings.
causal: Whether to use causal attention when shifting the relative logits.
Returns:
The attention logits. Shape (b, h, t, T).
"""
batch_size, k_seq_len, num_heads, num_hiddens = keys.shape
hiddens = num_hiddens * num_heads
# First compute the content logits.
content_bias = hk.get_parameter(
name='relpos_contentbias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02))
content_logits = jnp.einsum('bthd,bThd->bhtT', queries + content_bias, keys)
positional_encodings = sinusoid_position_encoding(
sequence_length=k_seq_len,
hidden_size=hiddens,
memory_length=0,
max_timescale=max_time,
min_timescale=2,
clamp_length=0,
causal=causal,
)
positional_encodings = jnp.broadcast_to(positional_encodings, (batch_size,) +
positional_encodings.shape)
relative_keys = hk.Linear(hiddens, with_bias=False)(positional_encodings)
relative_keys = jnp.reshape(
relative_keys, positional_encodings.shape[:-1] + (num_heads, num_hiddens))
# Then compute the relative part.
relative_bias = hk.get_parameter(
name='relpos_relativebias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02))
relative_logits = jnp.einsum('bthd,bThd->bhtT', queries + relative_bias,
relative_keys)
# We shift the relative logits instead of the positional encoding matrix as
# described in Appendix B of the paper (https://arxiv.org/pdf/1901.02860.pdf).
relative_logits = relative_shift(
relative_logits, attention_length=content_logits.shape[-1], causal=causal
)
assert content_logits.shape == relative_logits.shape
return content_logits + relative_logits
def _get_alibi_slopes(num_heads: int) -> list[float]:
"""Returns the slopes for the different attention heads.
While this does not exactly match the description of the [ALiBi
paper](https://arxiv.org/pdf/2108.12409.pdf), it corresponds to the [official
implementation](https://github.com/ofirpress/attention_with_linear_biases/blob/a06526fbfe557f9148e414b8569dcb97c7b182ba/fairseq/models/transformer.py#L742).
Args:
num_heads: The number of attention heads to create slopes for.
"""
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(num_heads).is_integer():
return get_slopes_power_of_2(num_heads)
else:
closest_power_of_2 = 2**math.floor(math.log2(num_heads))
return (get_slopes_power_of_2(closest_power_of_2) + _get_alibi_slopes(
2 * closest_power_of_2)[0::2][:num_heads - closest_power_of_2])
def compute_alibi_encodings_biases(
attention_shape: tuple[int, ...]
) -> chex.Array:
"""Returns the biases following the ALiBi method.
This code strictly follows what is described in the ALiBi paper.
https://arxiv.org/pdf/2108.12409.pdf
Args:
attention_shape: The attention logits shape, without batch size, (h, t, T).
Returns:
The alibi biases, same shape as the input logits shape.
"""
num_heads, q_seq_len, k_seq_len = attention_shape
# Since we do not use causal masking, the upper triangle of the matrix has to
# be nonzero. Therefore, we set it equal to the lower triangle, but we also
# add a constant factor of 0.5 to the lower triangle, to (arbitrarily) break
# the symmetry (otherwise, the model cannot distinguish left and right).
alibi = np.zeros((q_seq_len, k_seq_len))
alibi -= sum(np.tri(*alibi.shape, k=-i) for i in range(1, q_seq_len))
alibi -= sum(np.tri(*alibi.T.shape, k=-i).T for i in range(1, k_seq_len))
alibi += 0.5 * np.tri(*alibi.shape, k=-1)
return alibi * jnp.array(_get_alibi_slopes(num_heads))[:, None, None]
|
neural_networks_chomsky_hierarchy-main
|
models/positional_encodings.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the Tape RNN."""
import abc
import functools
from typing import Any, Optional, Sequence
import chex
import haiku as hk
import jax
from jax import nn as jnn
from jax import numpy as jnp
# The first element is the memory, the second is the hidden internal state, and
# the third is the input length, necessary for adaptive actions.
_TapeRNNState = tuple[chex.Array, chex.Array, chex.Array]
class TapeRNNCore(hk.RNNCore, abc.ABC):
"""Core for the tape RNN."""
def __init__(
self,
memory_cell_size: int,
memory_size: int = 30,
n_tapes: int = 1,
mlp_layers_size: Sequence[int] = (64, 64),
inner_core: type[hk.RNNCore] = hk.VanillaRNN,
name: Optional[str] = None,
**inner_core_kwargs: Any
):
"""Initializes.
Args:
memory_cell_size: The dimension of the vectors we put in memory.
memory_size: The size of the tape, fixed value along the episode.
n_tapes: Number of tapes to use. Default is 1.
mlp_layers_size: Sizes for the inner MLP layers. Can be empty, in which
case the MLP is a linear layer.
inner_core: The inner RNN core builder.
name: See base class.
**inner_core_kwargs: The arguments to be passed to the inner RNN core
builder.
"""
super().__init__(name=name)
self._rnn_core = inner_core(**inner_core_kwargs)
self._mlp_layers_size = mlp_layers_size
self._memory_cell_size = memory_cell_size
self._memory_size = memory_size
self._n_tapes = n_tapes
@abc.abstractmethod
def _tape_operations(
self, eye_memory: chex.Array, input_length: int
) -> list[chex.Array]:
"""Returns a set of updated memory slots.
An eye matrix is passed and corresponds to the positions of the memory
slots. This method returns a matrix with the new positions associated with
the actions. For instance, for a 'left' action, the new matrix will just be
a roll(eye_memory, shift=-1). This is general enough to allow any
permutation on the indexes.
Args:
eye_memory: An eye matrix of shape [memory_size, memory_size].
input_length: The length of the input sequence. Can be useful for some
operations.
"""
@property
@abc.abstractmethod
def num_actions(self) -> int:
"""Returns the number of actions which can be taken on the tape."""
def __call__(
self, inputs: chex.Array, prev_state: _TapeRNNState
) -> tuple[chex.Array, _TapeRNNState]:
"""Steps the tape RNN core."""
memories, old_core_state, input_length = prev_state
# The network can always read the value of the current cell.
batch_size = memories.shape[0]
current_memories = memories[:, :, 0, :]
current_memories = jnp.reshape(
current_memories, (batch_size, self._n_tapes * self._memory_cell_size))
inputs = jnp.concatenate([inputs, current_memories], axis=-1)
new_core_output, new_core_state = self._rnn_core(inputs, old_core_state)
readout_mlp = hk.nets.MLP(
list(self._mlp_layers_size) + [self._n_tapes * self._memory_cell_size])
write_values = readout_mlp(new_core_output)
write_values = jnp.reshape(
write_values, (batch_size, self._n_tapes, self._memory_cell_size))
# Shape (batch_size, num_actions).
actions = []
for _ in range(self._n_tapes):
actions.append(
jnn.softmax(hk.Linear(self.num_actions)(new_core_output), axis=-1))
actions = jnp.stack(actions, axis=1)
update_memory = functools.partial(
self._update_memory, input_length=input_length[0])
new_memories = jax.vmap(
update_memory, in_axes=1, out_axes=1)(memories, actions, write_values)
return new_core_output, (new_memories, new_core_state, input_length)
def initial_state(self, batch_size: Optional[int],
input_length: int) -> _TapeRNNState: # pytype: disable=signature-mismatch
"""Returns the initial state of the core."""
core_state = self._rnn_core.initial_state(batch_size)
memories = jnp.zeros(
(batch_size, self._n_tapes, self._memory_size, self._memory_cell_size))
return memories, core_state, jnp.array([input_length])
def _update_memory(self, memory: chex.Array, actions: chex.Array,
write_values: chex.Array, input_length: int) -> chex.Array:
"""Computes the new memory based on the `actions` and `write_values`.
Args:
memory: The current memory with shape `[batch_size, memory_size,
memory_cell_size]`.
actions: The action probabilities with shape `[batch_size, num_actions]`.
write_values: The values added to the first memory entry with shape
`[batch_size, memory_cell_size]`.
input_length: The length of the input.
Returns:
The new memory with shape `[batch_size, memory_size]`.
"""
_, memory_size, _ = memory.shape
memory_with_write = jnp.concatenate(
[jnp.expand_dims(write_values, axis=1), memory[:, 1:]], axis=1)
eye_memory = jnp.eye(memory_size)
operations = self._tape_operations(eye_memory, input_length)
apply_operation = lambda x: jnp.einsum('mM,bMc->bmc', x, memory_with_write)
memory_operations = jnp.stack(list(map(apply_operation, operations)))
return jnp.einsum('Abmc,bA->bmc', memory_operations, actions)
class TapeInputLengthJumpCore(TapeRNNCore):
"""A tape-RNN with extra jumps of the length of the input.
5 possible actions:
- write and stay
- write and move one cell left
- write and move one cell right
- write and move input_length cells left
- write and move input_length cells right
"""
@property
def num_actions(self) -> int:
"""Returns the number of actions of the tape."""
return 5
def _tape_operations(
self, eye_memory: chex.Array, input_length: int
) -> list[chex.Array]:
write_stay = eye_memory
write_left = jnp.roll(eye_memory, shift=-1, axis=0)
write_right = jnp.roll(eye_memory, shift=1, axis=0)
write_jump_left = jnp.roll(eye_memory, shift=-input_length, axis=0)
write_jump_right = jnp.roll(eye_memory, shift=input_length, axis=0)
return [
write_stay, write_left, write_right, write_jump_left, write_jump_right
]
|
neural_networks_chomsky_hierarchy-main
|
models/tape_rnn.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builders for RNN/LSTM cores."""
from typing import Any, Callable
import haiku as hk
import jax.nn as jnn
import jax.numpy as jnp
from neural_networks_chomsky_hierarchy.models import tape_rnn
def make_rnn(
output_size: int,
rnn_core: type[hk.RNNCore],
return_all_outputs: bool = False,
input_window: int = 1,
**rnn_kwargs: Any
) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""Returns an RNN model, not haiku transformed.
Only the last output in the sequence is returned. A linear layer is added to
match the required output_size.
Args:
output_size: The output size of the model.
rnn_core: The haiku RNN core to use. LSTM by default.
return_all_outputs: Whether to return the whole sequence of outputs of the
RNN, or just the last one.
input_window: The number of tokens that are fed at once to the RNN.
**rnn_kwargs: Kwargs to be passed to the RNN core.
"""
def rnn_model(x: jnp.array, input_length: int = 1) -> jnp.ndarray:
core = rnn_core(**rnn_kwargs)
if issubclass(rnn_core, tape_rnn.TapeRNNCore):
initial_state = core.initial_state(x.shape[0], input_length) # pytype: disable=wrong-arg-count
else:
initial_state = core.initial_state(x.shape[0])
batch_size, seq_length, embed_size = x.shape
if seq_length % input_window != 0:
x = jnp.pad(x, ((0, 0), (0, input_window - seq_length % input_window),
(0, 0)))
new_seq_length = x.shape[1]
x = jnp.reshape(
x,
(batch_size, new_seq_length // input_window, input_window, embed_size))
x = hk.Flatten(preserve_dims=2)(x)
output, _ = hk.dynamic_unroll(
core, x, initial_state, time_major=False, return_all_states=True)
output = jnp.reshape(output, (batch_size, new_seq_length, output.shape[-1]))
if not return_all_outputs:
output = output[:, -1, :] # (batch, time, alphabet_dim)
output = jnn.relu(output)
return hk.Linear(output_size)(output)
return rnn_model
|
neural_networks_chomsky_hierarchy-main
|
models/rnn.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
import imp
import setuptools
# Additional requirements for TensorFlow baselines, excluding OpenAI & Dopamine.
# See baselines/README.md for more information.
baselines_require = [
'dm-sonnet',
'dm-tree',
'tensorflow',
'tensorflow_probability',
'trfl',
'tqdm',
]
# Additional requirements for JAX baselines.
# See baselines/README.md for more information.
baselines_jax_require = [
'dataclasses',
'dm-haiku',
'dm-tree',
'jax',
'jaxlib',
'optax',
'rlax',
'tqdm',
]
baselines_third_party_require = [
'tensorflow == 1.15',
'dopamine-rl',
'baselines',
]
testing_require = [
'gym==0.20.0',
'tensorflow_probability == 0.14.1',
'mock',
'pytest-xdist',
'pytype',
]
setuptools.setup(
name='bsuite',
description=('Core RL Behaviour Suite. '
'A collection of reinforcement learning experiments.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/deepmind/bsuite',
author='DeepMind',
author_email='dm-bsuite-eng+os@google.com',
license='Apache License, Version 2.0',
version=imp.load_source('_metadata', 'bsuite/_metadata.py').__version__,
keywords='reinforcement-learning python machine-learning',
packages=setuptools.find_packages(),
install_requires=[
'absl-py',
'dm_env',
'immutabledict',
'matplotlib',
'numpy',
'pandas',
'plotnine',
'scipy',
'scikit-image',
'six',
'termcolor',
],
extras_require={
'baselines': baselines_require,
'baselines_jax': baselines_jax_require,
'baselines_third_party': baselines_third_party_require,
'testing': testing_require,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
bsuite-master
|
setup.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package metadata for bsuite.
This is kept in a separate module so that it can be imported from setup.py, at
a time when bsuite's dependencies may not have been installed yet.
"""
__version__ = '0.3.5'
|
bsuite-master
|
bsuite/_metadata.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sweep.py."""
from absl.testing import absltest
from bsuite import sweep
from bsuite.experiments.bandit import sweep as bandit_sweep
class SweepTest(absltest.TestCase):
"""Simple tests for sweeps."""
def test_sweep_contents(self):
"""Checks that all sweeps have sensible contents."""
test_bsuite_id = 'bandit/0'
test_bsuite_id_1 = 'bandit/1'
# Check `test_bsuite_id` is in BANDIT, SWEEP, and TESTING sweeps.
self.assertIn(test_bsuite_id, sweep.BANDIT)
self.assertIn(test_bsuite_id, sweep.SWEEP)
self.assertIn(test_bsuite_id, sweep.TESTING)
# `test_bsuite_id_1` should *not* be included in the testing sweep.
self.assertNotIn(test_bsuite_id_1, sweep.TESTING)
# Check all settings present in sweep.
self.assertLen(sweep.BANDIT, len(bandit_sweep.SETTINGS))
# Check `test_bsuite_id` is found in the 'basic' TAG section.
self.assertIn(test_bsuite_id, sweep.TAGS['basic'])
def test_sweep_immutable(self):
"""Checks that all exposed sweeps are immutable."""
with self.assertRaises(TypeError):
# pytype: disable=attribute-error
# pytype: disable=unsupported-operands
sweep.BANDIT[0] = 'new_bsuite_id'
sweep.SWEEP[0] = 'new_bsuite_id'
sweep.TESTING[0] = 'new_bsuite_id'
sweep.TAGS['new_tag'] = 42
# pytype: enable=unsupported-operands
# pytype: enable=attribute-error
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/sweep_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Behaviour Suite for Reinforcement Learning."""
from . import bsuite as _bsuite
from bsuite._metadata import __version__
load = _bsuite.load
load_from_id = _bsuite.load_from_id
load_and_record = _bsuite.load_and_record
load_and_record_to_sqlite = _bsuite.load_and_record_to_sqlite
load_and_record_to_csv = _bsuite.load_and_record_to_csv
|
bsuite-master
|
bsuite/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to load bsuite environments."""
from typing import Any, Mapping, Tuple
from bsuite import sweep
from bsuite.environments import base
from bsuite.experiments.bandit import bandit
from bsuite.experiments.bandit_noise import bandit_noise
from bsuite.experiments.bandit_scale import bandit_scale
from bsuite.experiments.cartpole import cartpole
from bsuite.experiments.cartpole_noise import cartpole_noise
from bsuite.experiments.cartpole_scale import cartpole_scale
from bsuite.experiments.cartpole_swingup import cartpole_swingup
from bsuite.experiments.catch import catch
from bsuite.experiments.catch_noise import catch_noise
from bsuite.experiments.catch_scale import catch_scale
from bsuite.experiments.deep_sea import deep_sea
from bsuite.experiments.deep_sea_stochastic import deep_sea_stochastic
from bsuite.experiments.discounting_chain import discounting_chain
from bsuite.experiments.memory_len import memory_len
from bsuite.experiments.memory_size import memory_size
from bsuite.experiments.mnist import mnist
from bsuite.experiments.mnist_noise import mnist_noise
from bsuite.experiments.mnist_scale import mnist_scale
from bsuite.experiments.mountain_car import mountain_car
from bsuite.experiments.mountain_car_noise import mountain_car_noise
from bsuite.experiments.mountain_car_scale import mountain_car_scale
from bsuite.experiments.umbrella_distract import umbrella_distract
from bsuite.experiments.umbrella_length import umbrella_length
from bsuite.logging import csv_logging
from bsuite.logging import terminal_logging
import dm_env
import termcolor
# Internal imports.
# Mapping from experiment name to environment constructor or load function.
# Each constructor or load function accepts keyword arguments as defined in
# each experiment's sweep.py file.
EXPERIMENT_NAME_TO_ENVIRONMENT = dict(
bandit=bandit.load,
bandit_noise=bandit_noise.load,
bandit_scale=bandit_scale.load,
cartpole=cartpole.load,
cartpole_noise=cartpole_noise.load,
cartpole_scale=cartpole_scale.load,
cartpole_swingup=cartpole_swingup.CartpoleSwingup,
catch=catch.load,
catch_noise=catch_noise.load,
catch_scale=catch_scale.load,
deep_sea=deep_sea.load,
deep_sea_stochastic=deep_sea_stochastic.load,
discounting_chain=discounting_chain.load,
memory_len=memory_len.load,
memory_size=memory_size.load,
mnist=mnist.load,
mnist_noise=mnist_noise.load,
mnist_scale=mnist_scale.load,
mountain_car=mountain_car.load,
mountain_car_noise=mountain_car_noise.load,
mountain_car_scale=mountain_car_scale.load,
umbrella_distract=umbrella_distract.load,
umbrella_length=umbrella_length.load,
)
def unpack_bsuite_id(bsuite_id: str) -> Tuple[str, int]:
"""Returns the experiment name and setting index given a bsuite_id."""
parts = bsuite_id.split(sweep.SEPARATOR)
assert len(parts) == 2
experiment_name = parts[0]
setting_index = int(parts[1])
return experiment_name, setting_index
def load(
experiment_name: str,
kwargs: Mapping[str, Any],
) -> base.Environment:
"""Returns a bsuite environment given an experiment name and settings."""
return EXPERIMENT_NAME_TO_ENVIRONMENT[experiment_name](**kwargs)
def load_from_id(bsuite_id: str) -> base.Environment:
"""Returns a bsuite environment given a bsuite_id."""
kwargs = sweep.SETTINGS[bsuite_id]
experiment_name, _ = unpack_bsuite_id(bsuite_id)
env = load(experiment_name, kwargs)
termcolor.cprint(
f'Loaded bsuite_id: {bsuite_id}.', color='white', attrs=['bold'])
return env
def load_and_record(bsuite_id: str,
save_path: str,
logging_mode: str = 'csv',
overwrite: bool = False) -> dm_env.Environment:
"""Returns a bsuite environment wrapped with CSV or terminal logging."""
if logging_mode == 'csv':
return load_and_record_to_csv(bsuite_id, save_path, overwrite)
elif logging_mode == 'terminal':
return load_and_record_to_terminal(bsuite_id)
else:
raise ValueError((f'Unrecognised logging_mode "{logging_mode}". '
'Must be "csv" or "terminal".'))
def load_and_record_to_csv(bsuite_id: str,
results_dir: str,
overwrite: bool = False) -> dm_env.Environment:
"""Returns a bsuite environment that saves results to CSV.
To load the results, specify the file path in the provided notebook, or to
manually inspect the results use:
```python
from bsuite.logging import csv_load
results_df, sweep_vars = csv_load.load_bsuite(results_dir)
```
Args:
bsuite_id: The bsuite id identifying the environment to return. For example,
"catch/0" or "deep_sea/3".
results_dir: Path to the directory to store the resultant CSV files. Note
that this logger will generate a separate CSV file for each bsuite_id.
overwrite: Whether to overwrite existing CSV files if found.
Returns:
A bsuite environment determined by the bsuite_id.
"""
raw_env = load_from_id(bsuite_id)
termcolor.cprint(
f'Logging results to CSV file for each bsuite_id in {results_dir}.',
color='yellow',
attrs=['bold'])
return csv_logging.wrap_environment(
env=raw_env,
bsuite_id=bsuite_id,
results_dir=results_dir,
overwrite=overwrite,
)
def load_and_record_to_terminal(bsuite_id: str) -> dm_env.Environment:
"""Returns a bsuite environment that logs to terminal."""
raw_env = load_from_id(bsuite_id)
termcolor.cprint(
'Logging results to terminal.', color='yellow', attrs=['bold'])
return terminal_logging.wrap_environment(raw_env)
|
bsuite-master
|
bsuite/bsuite.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This module exposes the bsuite experiment definitions in terms of bsuite_ids.
Each bsuite_id is designed to be a human readable string in the format:
environment_name/i
where i is the index of the setting in that experiments sweep.py file.
Each bsuite_id can be used to load an environment, via the bsuite.load*
functions.
To iterate over the bsuite_ids for all experiments, use `sweep.SWEEP`.
To iterate over the bsuite_ids for a single experiment, use
`sweep.<EXPERIMENT_NAME>``. For example, `sweep.DISCOUNTING_CHAIN`.
To iterate over the bsuite_ids for a single experiment type, use
`sweep.TAGS[<EXPERIMENT_TAG>]`. For example, `sweep.TAGS['memory'].
"""
from typing import Any, Dict, Mapping, Tuple
from bsuite.experiments.bandit import sweep as bandit_sweep
from bsuite.experiments.bandit_noise import sweep as bandit_noise_sweep
from bsuite.experiments.bandit_scale import sweep as bandit_scale_sweep
from bsuite.experiments.cartpole import sweep as cartpole_sweep
from bsuite.experiments.cartpole_noise import sweep as cartpole_noise_sweep
from bsuite.experiments.cartpole_scale import sweep as cartpole_scale_sweep
from bsuite.experiments.cartpole_swingup import sweep as cartpole_swingup_sweep
from bsuite.experiments.catch import sweep as catch_sweep
from bsuite.experiments.catch_noise import sweep as catch_noise_sweep
from bsuite.experiments.catch_scale import sweep as catch_scale_sweep
from bsuite.experiments.deep_sea import sweep as deep_sea_sweep
from bsuite.experiments.deep_sea_stochastic import sweep as deep_sea_stochastic_sweep
from bsuite.experiments.discounting_chain import sweep as discounting_chain_sweep
from bsuite.experiments.memory_len import sweep as memory_len_sweep
from bsuite.experiments.memory_size import sweep as memory_size_sweep
from bsuite.experiments.mnist import sweep as mnist_sweep
from bsuite.experiments.mnist_noise import sweep as mnist_noise_sweep
from bsuite.experiments.mnist_scale import sweep as mnist_scale_sweep
from bsuite.experiments.mountain_car import sweep as mountain_car_sweep
from bsuite.experiments.mountain_car_noise import sweep as mountain_car_noise_sweep
from bsuite.experiments.mountain_car_scale import sweep as mountain_car_scale_sweep
from bsuite.experiments.umbrella_distract import sweep as umbrella_distract_sweep
from bsuite.experiments.umbrella_length import sweep as umbrella_length_sweep
import immutabledict
# Common types aliases.
BSuiteId = str # Experiment bsuite_ids are strings, e.g. 'deep_sea/10'.
Tag = str # Experiment tags are strings, e.g. 'exploration'.
EnvKWargs = Dict[str, Any] # Keyword arguments to environment constructors.
# bsuite_ids are strings of the form {environment_name}{SEPARATOR}{index}.
SEPARATOR = '/'
# Exclude environment names ending with the following from the testing sweep.
IGNORE_FOR_TESTING = ('_noise', '_scale')
_SETTINGS = {}
_SWEEP = []
_TAGS = {}
_TESTING = []
_EPISODES = {}
def _parse_sweep(experiment_package) -> Tuple[BSuiteId, ...]:
"""Returns the bsuite_ids for each experiment package."""
results = []
# package.__name__ is something like 'bsuite.experiments.bandit.sweep'
experiment_name = experiment_package.__name__.split('.')[-2]
eligible_for_test_sweep = not any(experiment_name.endswith(s)
for s in IGNORE_FOR_TESTING)
# Construct bsuite_ids for each setting defined by the experiment.
for i, setting in enumerate(experiment_package.SETTINGS):
bsuite_id = f'{experiment_name}{SEPARATOR}{i}'
if i == 0 and eligible_for_test_sweep:
# For each environment, add one `bsuite_id` to the TESTING sweep.
_TESTING.append(bsuite_id)
results.append(bsuite_id)
_SETTINGS[bsuite_id] = setting
_EPISODES[bsuite_id] = experiment_package.NUM_EPISODES
# Add bsuite_ids to corresponding tag sweeps.
for tag in experiment_package.TAGS:
if tag not in _TAGS:
_TAGS[tag] = []
_TAGS[tag].extend(results)
_SWEEP.extend(results)
return tuple(results)
# bsuite_ids broken down by environment.
BANDIT = _parse_sweep(bandit_sweep)
BANDIT_NOISE = _parse_sweep(bandit_noise_sweep)
BANDIT_SCALE = _parse_sweep(bandit_scale_sweep)
CARTPOLE = _parse_sweep(cartpole_sweep)
CARTPOLE_NOISE = _parse_sweep(cartpole_noise_sweep)
CARTPOLE_SCALE = _parse_sweep(cartpole_scale_sweep)
CARTPOLE_SWINGUP = _parse_sweep(cartpole_swingup_sweep)
CATCH = _parse_sweep(catch_sweep)
CATCH_NOISE = _parse_sweep(catch_noise_sweep)
CATCH_SCALE = _parse_sweep(catch_scale_sweep)
DEEP_SEA = _parse_sweep(deep_sea_sweep)
DEEP_SEA_STOCHASTIC = _parse_sweep(deep_sea_stochastic_sweep)
DISCOUNTING_CHAIN = _parse_sweep(discounting_chain_sweep)
MEMORY_LEN = _parse_sweep(memory_len_sweep)
MEMORY_SIZE = _parse_sweep(memory_size_sweep)
MNIST = _parse_sweep(mnist_sweep)
MNIST_NOISE = _parse_sweep(mnist_noise_sweep)
MNIST_SCALE = _parse_sweep(mnist_scale_sweep)
MOUNTAIN_CAR = _parse_sweep(mountain_car_sweep)
MOUNTAIN_CAR_NOISE = _parse_sweep(mountain_car_noise_sweep)
MOUNTAIN_CAR_SCALE = _parse_sweep(mountain_car_scale_sweep)
UMBRELLA_DISTRACT = _parse_sweep(umbrella_distract_sweep)
UMBRELLA_LENGTH = _parse_sweep(umbrella_length_sweep)
# Mapping from bsuite id to keyword arguments for the corresponding environment.
SETTINGS: Mapping[BSuiteId,
EnvKWargs] = immutabledict.immutabledict(**_SETTINGS)
# Tuple containing all bsuite_ids. Used for hyperparameter sweeps.
SWEEP: Tuple[BSuiteId, ...] = tuple(_SWEEP)
# Mapping from tag (e.g. 'memory') to experiment `bsuite_id`s with that tag.
# This can be used to run sweeps on all tasks only of a particular tag, by using
# e.g. TAGS['basic'] or TAGS['scale'].
TAGS: Mapping[Tag, Tuple[BSuiteId, ...]] = immutabledict.immutabledict(
**{k: tuple(v) for k, v in _TAGS.items()})
# Tuple containing a representative subset bsuite_ids used for agent tests.
TESTING: Tuple[BSuiteId, ...] = tuple(_TESTING)
# Mapping from bsuite_id to bsuite_num_episodes = how many episodes to run.
EPISODES: Mapping[BSuiteId, int] = immutabledict.immutabledict(**_EPISODES)
|
bsuite-master
|
bsuite/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.bandit."""
from absl.testing import absltest
from bsuite.environments import bandit
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return bandit.SimpleBandit(5)
def make_action_sequence(self):
valid_actions = range(11)
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/bandit_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.discounting_chain."""
from absl.testing import absltest
from bsuite.environments import discounting_chain
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return discounting_chain.DiscountingChain(10)
def make_action_sequence(self):
valid_actions = [0, 1, 2, 3, 4]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/discounting_chain_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic memory challenge.
Observation is given by n+1 pixels: (context, time_to_live).
Context will only be nonzero in the first step, when it will be +1 or -1 iid
by component. All actions take no effect until time_to_live=0, then the agent
must repeat the observations that it saw bit-by-bit.
"""
from typing import Optional
from bsuite.environments import base
import dm_env
from dm_env import specs
import numpy as np
class MemoryChain(base.Environment):
"""Memory Chain environment, implementing the environment API."""
def __init__(self,
memory_length: int,
num_bits: int = 1,
seed: Optional[int] = None):
"""Builds the memory chain environment."""
super(MemoryChain, self).__init__()
self._memory_length = memory_length
self._num_bits = num_bits
self._rng = np.random.RandomState(seed)
# Contextual information per episode
self._timestep = 0
self._context = self._rng.binomial(1, 0.5, num_bits)
self._query = self._rng.randint(num_bits)
# Logging info
self._total_perfect = 0
self._total_regret = 0
self._episode_mistakes = 0
# bsuite experiment length.
self.bsuite_num_episodes = 10_000 # Overridden by experiment load().
def _get_observation(self):
"""Observation of form [time, query, num_bits of context]."""
obs = np.zeros(shape=(1, self._num_bits + 2), dtype=np.float32)
# Show the time, on every step.
obs[0, 0] = 1 - self._timestep / self._memory_length
# Show the query, on the last step
if self._timestep == self._memory_length - 1:
obs[0, 1] = self._query
# Show the context, on the first step
if self._timestep == 0:
obs[0, 2:] = 2 * self._context - 1
return obs
def _step(self, action: int) -> dm_env.TimeStep:
observation = self._get_observation()
self._timestep += 1
if self._timestep - 1 < self._memory_length:
# On all but the last step provide a reward of 0.
return dm_env.transition(reward=0., observation=observation)
if self._timestep - 1 > self._memory_length:
raise RuntimeError('Invalid state.') # We shouldn't get here.
if action == self._context[self._query]:
reward = 1.
self._total_perfect += 1
else:
reward = -1.
self._total_regret += 2.
return dm_env.termination(reward=reward, observation=observation)
def _reset(self) -> dm_env.TimeStep:
self._timestep = 0
self._episode_mistakes = 0
self._context = self._rng.binomial(1, 0.5, self._num_bits)
self._query = self._rng.randint(self._num_bits)
observation = self._get_observation()
return dm_env.restart(observation)
def observation_spec(self):
return specs.Array(
shape=(1, self._num_bits + 2), dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(2, name='action')
def _save(self, observation):
self._raw_observation = (observation * 255).astype(np.uint8)
def bsuite_info(self):
return dict(
total_perfect=self._total_perfect,
total_regret=self._total_regret)
|
bsuite-master
|
bsuite/environments/memory_chain.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.catch."""
from absl.testing import absltest
from bsuite.environments import catch
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return catch.Catch(rows=10, columns=5)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/catch_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic credit assigment challenge.
Observation is 3 + n_distractor pixels:
(need_umbrella, have_umbrella, time_to_live, n x distractors)
Only the first action takes any effect (pick up umbrella or not).
All other actions take no effect and the reward is +1, -1 on the final step.
Distractor states are always Bernoulli sampled iid each step.
"""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.umbrella_length import sweep
import dm_env
from dm_env import specs
import numpy as np
class UmbrellaChain(base.Environment):
"""Umbrella Chain environment."""
def __init__(self,
chain_length: int,
n_distractor: int = 0,
seed: Optional[int] = None):
"""Builds the umbrella chain environment.
Args:
chain_length: Integer. Length that the agent must back up.
n_distractor: Integer. Number of distractor observations.
seed: Optional integer. Seed for numpy's random number generator (RNG).
"""
super().__init__()
self._chain_length = chain_length
self._rng = np.random.RandomState(seed)
self._n_distractor = n_distractor
self._timestep = 0
self._need_umbrella = self._rng.binomial(1, 0.5)
self._has_umbrella = 0
self._total_regret = 0
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
obs = np.zeros(shape=(1, 3 + self._n_distractor), dtype=np.float32)
obs[0, 0] = self._need_umbrella
obs[0, 1] = self._has_umbrella
obs[0, 2] = 1 - self._timestep / self._chain_length
obs[0, 3:] = self._rng.binomial(1, 0.5, size=self._n_distractor)
return obs
def _step(self, action: int) -> dm_env.TimeStep:
self._timestep += 1
if self._timestep == 1: # you can only pick up umbrella t=1
self._has_umbrella = action
if self._timestep == self._chain_length: # reward only at end.
if self._has_umbrella == self._need_umbrella:
reward = 1.
else:
reward = -1.
self._total_regret += 2.
observation = self._get_observation()
return dm_env.termination(reward=reward, observation=observation)
reward = 2. * self._rng.binomial(1, 0.5) - 1.
observation = self._get_observation()
return dm_env.transition(reward=reward, observation=observation)
def _reset(self) -> dm_env.TimeStep:
self._timestep = 0
self._need_umbrella = self._rng.binomial(1, 0.5)
self._has_umbrella = self._rng.binomial(1, 0.5)
observation = self._get_observation()
return dm_env.restart(observation)
def observation_spec(self):
return specs.Array(
shape=(1, 3 + self._n_distractor), dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(2, name='action')
def bsuite_info(self):
return dict(total_regret=self._total_regret)
def _save(self, observation):
self._raw_observation = (observation * 255).astype(np.uint8)
@property
def optimal_return(self):
# Returns the maximum total reward achievable in an episode.
return 1
@property
def context(self):
return self._context # pytype: disable=attribute-error # bind-properties
|
bsuite-master
|
bsuite/environments/umbrella_chain.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""bsuite environments package."""
from bsuite.environments.base import Environment
|
bsuite-master
|
bsuite/environments/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.deep_sea."""
from absl.testing import absltest
from bsuite.environments import deep_sea
from dm_env import test_utils
import numpy as np
class DeepSeaInterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return deep_sea.DeepSea(10)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
class StochasticDeepSeaInterfaceTest(test_utils.EnvironmentTestMixin,
absltest.TestCase):
def make_object_under_test(self):
return deep_sea.DeepSea(5, deterministic=False)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/deep_sea_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Cartpole reinforcement learning environment."""
import collections
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.cartpole import sweep
import dm_env
from dm_env import specs
import numpy as np
CartpoleState = collections.namedtuple(
'CartpoleState', ['x', 'x_dot', 'theta', 'theta_dot', 'time_elapsed'])
CartpoleConfig = collections.namedtuple(
'CartpoleConfig',
['mass_cart', 'mass_pole', 'length', 'force_mag', 'gravity']
)
def step_cartpole(action: int,
timescale: float,
state: CartpoleState,
config: CartpoleConfig) -> CartpoleState:
"""Helper function to step cartpole state under given config."""
# Unpack variables into "short" names for mathematical equation
force = (action - 1) * config.force_mag
cos = np.cos(state.theta)
sin = np.sin(state.theta)
pl = config.mass_pole * config.length
l = config.length
m_pole = config.mass_pole
m_total = config.mass_cart + config.mass_pole
g = config.gravity
# Compute the physical evolution
temp = (force + pl * state.theta_dot**2 * sin) / m_total
theta_acc = (g * sin - cos * temp) / (l * (4/3 - m_pole * cos**2 / m_total))
x_acc = temp - pl * theta_acc * cos / m_total
# Update states according to discrete dynamics
x = state.x + timescale * state.x_dot
x_dot = state.x_dot + timescale * x_acc
theta = np.remainder(
state.theta + timescale * state.theta_dot, 2 * np.pi)
theta_dot = state.theta_dot + timescale * theta_acc
time_elapsed = state.time_elapsed + timescale
return CartpoleState(x, x_dot, theta, theta_dot, time_elapsed)
class Cartpole(base.Environment):
"""This implements a version of the classic Cart Pole task.
For more information see:
https://webdocs.cs.ualberta.ca/~sutton/papers/barto-sutton-anderson-83.pdf
The observation is a vector representing:
`(x, x_dot, sin(theta), cos(theta), theta_dot, time_elapsed)`
The actions are discrete ['left', 'stay', 'right']. Episodes start with the
pole close to upright. Episodes end when the pole falls, the cart falls off
the table, or the max_time is reached.
"""
def __init__(self,
height_threshold: float = 0.8,
x_threshold: float = 3.,
timescale: float = 0.01,
max_time: float = 10.,
init_range: float = 0.05,
seed: Optional[int] = None):
# Setup.
self._state = CartpoleState(0, 0, 0, 0, 0)
super().__init__()
self._rng = np.random.RandomState(seed)
self._init_fn = lambda: self._rng.uniform(low=-init_range, high=init_range)
# Logging info
self._raw_return = 0.
self._best_episode = 0.
self._episode_return = 0.
# Reward/episode logic
self._height_threshold = height_threshold
self._x_threshold = x_threshold
self._timescale = timescale
self._max_time = max_time
# Problem config
self._cartpole_config = CartpoleConfig(
mass_cart=1.,
mass_pole=0.1,
length=0.5,
force_mag=10.,
gravity=9.8,
)
# Public attributes.
self.bsuite_num_episodes = sweep.NUM_EPISODES
# Overrides the super method.
def reset(self):
self._reset_next_step = False
self._state = CartpoleState(
x=self._init_fn(),
x_dot=self._init_fn(),
theta=self._init_fn(),
theta_dot=self._init_fn(),
time_elapsed=0.,
)
self._episode_return = 0
return dm_env.restart(self.observation)
# Overrides the super method (we implement special auto-reset behavior here).
def step(self, action):
if self._reset_next_step:
return self.reset()
self._state = step_cartpole(
action=action,
timescale=self._timescale,
state=self._state,
config=self._cartpole_config,
)
# Rewards only when the pole is central and balanced
is_reward = (np.cos(self._state.theta) > self._height_threshold
and np.abs(self._state.x) < self._x_threshold)
reward = 1. if is_reward else 0.
self._raw_return += reward
self._episode_return += reward
if self._state.time_elapsed > self._max_time or not is_reward:
self._best_episode = max(self._episode_return, self._best_episode)
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation)
return dm_env.transition(reward=reward, observation=self.observation)
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def action_spec(self):
return specs.DiscreteArray(dtype=int, num_values=3, name='action')
def observation_spec(self):
return specs.Array(shape=(1, 6), dtype=np.float32, name='observation')
@property
def observation(self) -> np.ndarray:
"""Approximately normalize output."""
obs = np.zeros((1, 6), dtype=np.float32)
obs[0, 0] = self._state.x / self._x_threshold
obs[0, 1] = self._state.x_dot / self._x_threshold
obs[0, 2] = np.sin(self._state.theta)
obs[0, 3] = np.cos(self._state.theta)
obs[0, 4] = self._state.theta_dot
obs[0, 5] = self._state.time_elapsed / self._max_time
return obs
def bsuite_info(self):
return dict(raw_return=self._raw_return,
best_episode=self._best_episode)
|
bsuite-master
|
bsuite/environments/cartpole.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic bandit environment.
Observation is a single pixel of 0 - this is an independent arm bandit problem!
Rewards are [0, 0.1, .. 1] assigned randomly to 11 arms and deterministic
"""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.bandit import sweep
import dm_env
from dm_env import specs
import numpy as np
class SimpleBandit(base.Environment):
"""SimpleBandit environment."""
def __init__(self, mapping_seed: Optional[int] = None, num_actions: int = 11):
"""Builds a simple bandit environment.
Args:
mapping_seed: Optional integer. Seed for action mapping.
num_actions: number of actions available, defaults to 11.
"""
super(SimpleBandit, self).__init__()
self._rng = np.random.RandomState(mapping_seed)
self._num_actions = num_actions
action_mask = self._rng.choice(
range(self._num_actions), size=self._num_actions, replace=False)
self._rewards = np.linspace(0, 1, self._num_actions)[action_mask]
self._total_regret = 0.
self._optimal_return = 1.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
return np.ones(shape=(1, 1), dtype=np.float32)
def _reset(self) -> dm_env.TimeStep:
observation = self._get_observation()
return dm_env.restart(observation)
def _step(self, action: int) -> dm_env.TimeStep:
reward = self._rewards[action]
self._total_regret += self._optimal_return - reward
observation = self._get_observation()
return dm_env.termination(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(shape=(1, 1), dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(self._num_actions, name='action')
def bsuite_info(self):
return dict(total_regret=self._total_regret)
|
bsuite-master
|
bsuite/environments/bandit.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic discounting challenge.
Observation is two pixels: (context, time_to_live)
Context will only be -1 in the first step, then equal to the action selected in
the first step. For all future decisions the agent is in a "chain" for that
action. Reward of +1 come at one of: 1, 3, 10, 30, 100
However, depending on the seed, one of these chains has a 10% bonus.
"""
from typing import Any, Dict, Optional
from bsuite.environments import base
from bsuite.experiments.discounting_chain import sweep
import dm_env
from dm_env import specs
import numpy as np
class DiscountingChain(base.Environment):
"""Discounting Chain environment."""
def __init__(self, mapping_seed: Optional[int] = None):
"""Builds the Discounting Chain environment.
Args:
mapping_seed: Optional integer, specifies which reward is bonus.
"""
super().__init__()
self._episode_len = 100
self._reward_timestep = [1, 3, 10, 30, 100]
self._n_actions = len(self._reward_timestep)
if mapping_seed is None:
mapping_seed = np.random.randint(0, self._n_actions)
else:
mapping_seed = mapping_seed % self._n_actions
self._rewards = np.ones(self._n_actions)
self._rewards[mapping_seed] += 0.1
self._timestep = 0
self._context = -1
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
obs = np.zeros(shape=(1, 2), dtype=np.float32)
obs[0, 0] = self._context
obs[0, 1] = self._timestep / self._episode_len
return obs
def _reset(self) -> dm_env.TimeStep:
self._timestep = 0
self._context = -1
observation = self._get_observation()
return dm_env.restart(observation)
def _step(self, action: int) -> dm_env.TimeStep:
if self._timestep == 0:
self._context = action
self._timestep += 1
if self._timestep == self._reward_timestep[self._context]:
reward = self._rewards[self._context]
else:
reward = 0.
observation = self._get_observation()
if self._timestep == self._episode_len:
return dm_env.termination(reward=reward, observation=observation)
return dm_env.transition(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(shape=(1, 2), dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(self._n_actions, name='action')
def _save(self, observation):
self._raw_observation = (observation * 255).astype(np.uint8)
@property
def optimal_return(self):
# Returns the maximum total reward achievable in an episode.
return 1.1
def bsuite_info(self) -> Dict[str, Any]:
return {}
|
bsuite-master
|
bsuite/environments/discounting_chain.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.cartpole."""
from absl.testing import absltest
from bsuite.environments import cartpole
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return cartpole.Cartpole(seed=22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/cartpole_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python implementation of 'Mountain Car' environment.
An underpowered car must drive up a hill, to succeed you must go back/forth.
This is a classic environment in RL research, first described by:
A Moore, Efficient Memory-Based Learning for Robot Control,
PhD thesis, University of Cambridge, 1990.
"""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.mountain_car import sweep
import dm_env
from dm_env import specs
import numpy as np
class MountainCar(base.Environment):
"""Mountain Car, an underpowered car must power up a hill."""
def __init__(self,
max_steps: int = 1000,
seed: Optional[int] = None):
"""Mountain Car, an underpowered car must power up a hill.
Args:
max_steps : maximum number of steps to perform per episode
seed : randomization seed
"""
super().__init__()
self._min_pos = -1.2
self._max_pos = 0.6
self._max_speed = 0.07
self._goal_pos = 0.5
self._force = 0.001
self._gravity = 0.0025
self._max_steps = max_steps
self._rng = np.random.RandomState(seed)
self._timestep = 0
self._raw_return = 0.
self._position = 0.
self._velocity = 0.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
obs = [self._position, self._velocity, self._timestep / self._max_steps]
return np.array([obs], dtype=np.float32)
def _reset(self) -> dm_env.TimeStep:
"""Random initialize in [-0.6, -0.4] and zero velocity."""
self._timestep = 0
self._position = self._rng.uniform(-0.6, -0.4)
self._velocity = 0
return dm_env.restart(self._get_observation())
def _step(self, action: int) -> dm_env.TimeStep:
self._timestep += 1
reward = -1.
self._raw_return += reward
# Step the environment
self._velocity += (action - 1) * self._force + np.cos(
3 * self._position) * -self._gravity
self._velocity = np.clip(self._velocity, -self._max_speed, self._max_speed)
self._position += self._velocity
self._position = np.clip(self._position, self._min_pos, self._max_pos)
if self._position == self._min_pos:
self._velocity = np.clip(self._velocity, 0, self._max_speed)
observation = self._get_observation()
if self._position >= self._goal_pos or self._timestep >= self._max_steps:
return dm_env.termination(reward=reward, observation=observation)
return dm_env.transition(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(shape=(1, 3), dtype=np.float32, name='observation')
def action_spec(self):
"""Actions [0,1,2] -> [Left, Stay, Right]."""
return specs.DiscreteArray(3, name='action')
def bsuite_info(self):
return dict(raw_return=self._raw_return)
|
bsuite-master
|
bsuite/environments/mountain_car.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Catch reinforcement learning environment."""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.catch import sweep
import dm_env
from dm_env import specs
import numpy as np
_ACTIONS = (-1, 0, 1) # Left, no-op, right.
class Catch(base.Environment):
"""A Catch environment built on the dm_env.Environment class.
The agent must move a paddle to intercept falling balls. Falling balls only
move downwards on the column they are in.
The observation is an array shape (rows, columns), with binary values:
zero if a space is empty; 1 if it contains the paddle or a ball.
The actions are discrete, and by default there are three available:
stay, move left, and move right.
The episode terminates when the ball reaches the bottom of the screen.
"""
def __init__(self,
rows: int = 10,
columns: int = 5,
seed: Optional[int] = None):
"""Initializes a new Catch environment.
Args:
rows: number of rows.
columns: number of columns.
seed: random seed for the RNG.
"""
self._rows = rows
self._columns = columns
self._rng = np.random.RandomState(seed)
self._board = np.zeros((rows, columns), dtype=np.float32)
self._ball_x = None
self._ball_y = None
self._paddle_x = None
self._paddle_y = None
self._reset_next_step = True
self._total_regret = 0.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _reset(self) -> dm_env.TimeStep:
"""Returns the first `TimeStep` of a new episode."""
self._reset_next_step = False
self._ball_x = self._rng.randint(self._columns)
self._ball_y = 0
self._paddle_x = self._columns // 2
self._paddle_y = self._rows - 1
return dm_env.restart(self._observation())
def _step(self, action: int) -> dm_env.TimeStep:
"""Updates the environment according to the action."""
if self._reset_next_step:
return self.reset()
# Move the paddle.
dx = _ACTIONS[action]
self._paddle_x = np.clip(self._paddle_x + dx, 0, self._columns - 1)
# Drop the ball.
self._ball_y += 1
# Check for termination.
if self._ball_y == self._paddle_y:
reward = 1. if self._paddle_x == self._ball_x else -1.
self._reset_next_step = True
self._total_regret += (1. - reward)
return dm_env.termination(reward=reward, observation=self._observation())
return dm_env.transition(reward=0., observation=self._observation())
def observation_spec(self) -> specs.BoundedArray:
"""Returns the observation spec."""
return specs.BoundedArray(shape=self._board.shape, dtype=self._board.dtype,
name="observation", minimum=0, maximum=1)
def action_spec(self) -> specs.DiscreteArray:
"""Returns the action spec."""
return specs.DiscreteArray(
dtype=int, num_values=len(_ACTIONS), name="action")
def _observation(self) -> np.ndarray:
self._board.fill(0.)
self._board[self._ball_y, self._ball_x] = 1.
self._board[self._paddle_y, self._paddle_x] = 1.
return self._board.copy()
def bsuite_info(self):
return dict(total_regret=self._total_regret)
|
bsuite-master
|
bsuite/environments/catch.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.memory_len."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite.environments import memory_chain
from dm_env import test_utils
import numpy as np
class MemoryLengthInterfaceTest(test_utils.EnvironmentTestMixin,
parameterized.TestCase):
def make_object_under_test(self):
return memory_chain.MemoryChain(memory_length=10, num_bits=1)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
class MemorySizeInterfaceTest(test_utils.EnvironmentTestMixin,
parameterized.TestCase):
def make_object_under_test(self):
return memory_chain.MemoryChain(memory_length=2, num_bits=10)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/memory_chain_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mountain_car."""
from absl.testing import absltest
from bsuite.environments import mountain_car
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mountain_car.MountainCar(2)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/mountain_car_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.umbrella_distract."""
from absl.testing import absltest
from bsuite.environments import umbrella_chain
from dm_env import test_utils
import numpy as np
class UmbrellaDistractInterfaceTest(test_utils.EnvironmentTestMixin,
absltest.TestCase):
def make_object_under_test(self):
return umbrella_chain.UmbrellaChain(chain_length=20, n_distractor=22)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
class UmbrellaLengthInterfaceTest(test_utils.EnvironmentTestMixin,
absltest.TestCase):
def make_object_under_test(self):
return umbrella_chain.UmbrellaChain(chain_length=10, n_distractor=0)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/umbrella_chain_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Base class for bsuite environments.
This inherits from the dm_env base class, with two major differences:
- Includes bsuite-specific metadata:
- `bsuite_info` returns metadata for logging, e.g. for computing regret/score.
- `bsuite_num_episodes` specifies how long the experiment should run for.
- Implements the auto-reset behavior specified by the environment API.
That is, stepping an environment after a LAST timestep should return the
first timestep of a new episode.
"""
import abc
from typing import Any, Dict
import dm_env
class Environment(dm_env.Environment, abc.ABC):
"""Base clas for bsuite environments.
A bsuite environment is a dm_env environment with extra metadata:
- bsuite_info method.
- bsuite_num_episodes attribute.
A bsuite environment also has auto-reset behavior.
This class implements the required `step()` and `reset()` methods.
It instead requires users to implement `_step()` and `_reset()`. This class
handles the reset behaviour automatically when it detects a LAST timestep.
"""
# Number of episodes that this environment should be run for.
bsuite_num_episodes: int
def __init__(self):
self._reset_next_step = True
def reset(self) -> dm_env.TimeStep:
"""Resets the environment, calling the underlying _reset() method."""
self._reset_next_step = False
return self._reset()
def step(self, action: int) -> dm_env.TimeStep:
"""Steps the environment and implements the auto-reset behavior."""
if self._reset_next_step:
return self.reset()
timestep = self._step(action)
self._reset_next_step = timestep.last()
return timestep
@abc.abstractmethod
def _reset(self) -> dm_env.TimeStep:
"""Returns a `timestep` namedtuple as per the regular `reset()` method."""
@abc.abstractmethod
def _step(self, action: int) -> dm_env.TimeStep:
"""Returns a `timestep` namedtuple as per the regular `step()` method."""
@abc.abstractmethod
def bsuite_info(self) -> Dict[str, Any]:
"""Returns metadata specific to this environment for logging/scoring."""
|
bsuite-master
|
bsuite/environments/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mnist."""
from absl.testing import absltest
from bsuite.environments import mnist
from dm_env import test_utils
import numpy as np
class CatchInterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mnist.MNISTBandit(seed=101)
def make_action_sequence(self):
num_actions = self.environment.action_spec().num_values
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.randint(num_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/environments/mnist_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python implementation of 'Deep Sea' exploration environment.
This environment is designed as a stylized version of the 'exploration chain':
- The observation is an N x N grid, with a falling block starting in top left.
- Each timestep the agent can move 'left' or 'right', which are mapped to
discrete actions 0 and 1 on a state-dependent level.
- There is a large reward of +1 in the bottom right state, but this can be
hard for many exploration algorithms to find.
The stochastic version of this domain only transitions to the right with
probability (1 - 1/N) and adds N(0,1) noise to the 'end' states of the chain.
Logging notes 'bad episodes', which are ones where the agent deviates from the
optimal trajectory by taking a bad action, this is *almost* equivalent to the
total regret, but ignores the (small) effects of the move_cost. We avoid keeping
track of this since it makes no big difference to us.
For more information, see papers:
[1] https://arxiv.org/abs/1703.07608
[2] https://arxiv.org/abs/1806.03335
"""
from typing import Optional
import warnings
from bsuite.environments import base
from bsuite.experiments.deep_sea import sweep
import dm_env
from dm_env import specs
import numpy as np
class DeepSea(base.Environment):
"""Deep Sea environment to test for deep exploration."""
def __init__(self,
size: int,
deterministic: bool = True,
unscaled_move_cost: float = 0.01,
randomize_actions: bool = True,
seed: Optional[int] = None,
mapping_seed: Optional[int] = None):
"""Deep sea environment to test for deep exploration.
Args:
size: The size of `N` for the N x N grid of states.
deterministic: Whether transitions are deterministic (default) or 'windy',
i.e. the `right` action fails with probability 1/N.
unscaled_move_cost: The move cost for moving right, multiplied by N. The
default (0.01) means the optimal policy gets 0.99 episode return.
randomize_actions: The definition of DeepSea environment includes random
mappings of actions: (0,1) -> (left, right) by state. For debugging
purposes, we include the option to turn this randomization off and
let 0=left, 1=right in every state.
seed: Random seed for rewards and transitions, if applicable.
mapping_seed: Random seed for action mapping, if applicable.
"""
super().__init__()
self._size = size
self._deterministic = deterministic
self._unscaled_move_cost = unscaled_move_cost
self._rng = np.random.RandomState(seed)
if randomize_actions:
self._mapping_rng = np.random.RandomState(mapping_seed)
self._action_mapping = self._mapping_rng.binomial(1, 0.5, [size, size])
else:
warnings.warn('Environment is in debug mode (randomize_actions=False).'
'Only randomized_actions=True is the DeepSea environment.')
self._action_mapping = np.ones([size, size])
if not self._deterministic: # action 'right' only succeeds (1 - 1/N)
optimal_no_cost = (1 - 1 / self._size) ** (self._size - 1)
else:
optimal_no_cost = 1.
self._optimal_return = optimal_no_cost - self._unscaled_move_cost
self._column = 0
self._row = 0
self._bad_episode = False
self._total_bad_episodes = 0
self._denoised_return = 0
self._reset()
# bsuite experiment length.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
obs = np.zeros(shape=(self._size, self._size), dtype=np.float32)
if self._row >= self._size: # End of episode null observation
return obs
obs[self._row, self._column] = 1.
return obs
def _reset(self) -> dm_env.TimeStep:
self._row = 0
self._column = 0
self._bad_episode = False
return dm_env.restart(self._get_observation())
def _step(self, action: int) -> dm_env.TimeStep:
reward = 0.
action_right = action == self._action_mapping[self._row, self._column]
# Reward calculation
if self._column == self._size - 1 and action_right:
reward += 1.
self._denoised_return += 1.
if not self._deterministic: # Noisy rewards on the 'end' of chain.
if self._row == self._size - 1 and self._column in [0, self._size - 1]:
reward += self._rng.randn()
# Transition dynamics
if action_right:
if self._rng.rand() > 1 / self._size or self._deterministic:
self._column = np.clip(self._column + 1, 0, self._size - 1)
reward -= self._unscaled_move_cost / self._size
else:
if self._row == self._column: # You were on the right path and went wrong
self._bad_episode = True
self._column = np.clip(self._column - 1, 0, self._size - 1)
self._row += 1
observation = self._get_observation()
if self._row == self._size:
if self._bad_episode:
self._total_bad_episodes += 1
return dm_env.termination(reward=reward, observation=observation)
return dm_env.transition(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(
shape=(self._size, self._size), dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(2, name='action')
def bsuite_info(self):
return dict(total_bad_episodes=self._total_bad_episodes,
denoised_return=self._denoised_return)
|
bsuite-master
|
bsuite/environments/deep_sea.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MNIST classification as a bandit.
In this environment, we test the agent's generalization ability, and abstract
away exploration/planning/memory etc -- i.e. a bandit, with no 'state'.
"""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.mnist import sweep
from bsuite.utils import datasets
import dm_env
from dm_env import specs
import numpy as np
class MNISTBandit(base.Environment):
"""MNIST classification as a bandit environment."""
def __init__(self, fraction: float = 1., seed: Optional[int] = None):
"""Loads the MNIST training set (60K images & labels) as numpy arrays.
Args:
fraction: What fraction of the training set to keep (default is all).
seed: Optional integer. Seed for numpy's random number generator (RNG).
"""
super().__init__()
(images, labels), _ = datasets.load_mnist()
num_data = len(labels)
self._num_data = int(fraction * num_data)
self._image_shape = images.shape[1:]
self._images = images[:self._num_data]
self._labels = labels[:self._num_data]
self._rng = np.random.RandomState(seed)
self._correct_label = None
self._total_regret = 0.
self._optimal_return = 1.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _reset(self) -> dm_env.TimeStep:
"""Agent gets an MNIST image to 'classify' using its next action."""
idx = self._rng.randint(self._num_data)
image = self._images[idx].astype(np.float32) / 255
self._correct_label = self._labels[idx]
return dm_env.restart(observation=image)
def _step(self, action: int) -> dm_env.TimeStep:
"""+1/-1 for correct/incorrect guesses. This also terminates the episode."""
correct = action == self._correct_label
reward = 1. if correct else -1.
self._total_regret += self._optimal_return - reward
observation = np.zeros(shape=self._image_shape, dtype=np.float32)
return dm_env.termination(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(
shape=self._image_shape, dtype=np.float32, name='observation')
def action_spec(self):
return specs.DiscreteArray(num_values=10, name='action')
def bsuite_info(self):
return dict(total_regret=self._total_regret)
|
bsuite-master
|
bsuite/environments/mnist.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots for summary data across all experiments, e.g. the radar plot."""
from typing import Callable, Mapping, NamedTuple, Optional, Sequence, Union
from bsuite.experiments.bandit import analysis as bandit_analysis
from bsuite.experiments.bandit_noise import analysis as bandit_noise_analysis
from bsuite.experiments.bandit_scale import analysis as bandit_scale_analysis
from bsuite.experiments.cartpole import analysis as cartpole_analysis
from bsuite.experiments.cartpole_noise import analysis as cartpole_noise_analysis
from bsuite.experiments.cartpole_scale import analysis as cartpole_scale_analysis
from bsuite.experiments.cartpole_swingup import analysis as cartpole_swingup_analysis
from bsuite.experiments.catch import analysis as catch_analysis
from bsuite.experiments.catch_noise import analysis as catch_noise_analysis
from bsuite.experiments.catch_scale import analysis as catch_scale_analysis
from bsuite.experiments.deep_sea import analysis as deep_sea_analysis
from bsuite.experiments.deep_sea_stochastic import analysis as deep_sea_stochastic_analysis
from bsuite.experiments.discounting_chain import analysis as discounting_chain_analysis
from bsuite.experiments.memory_len import analysis as memory_len_analysis
from bsuite.experiments.memory_size import analysis as memory_size_analysis
from bsuite.experiments.mnist import analysis as mnist_analysis
from bsuite.experiments.mnist_noise import analysis as mnist_noise_analysis
from bsuite.experiments.mnist_scale import analysis as mnist_scale_analysis
from bsuite.experiments.mountain_car import analysis as mountain_car_analysis
from bsuite.experiments.mountain_car_noise import analysis as mountain_car_noise_analysis
from bsuite.experiments.mountain_car_scale import analysis as mountain_car_scale_analysis
from bsuite.experiments.umbrella_distract import analysis as umbrella_distract_analysis
from bsuite.experiments.umbrella_length import analysis as umbrella_length_analysis
from bsuite.utils import plotting
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine as gg
################################################################################
# Summarizing scores
class BSuiteSummary(NamedTuple):
"""Container for summary metadata for a given bsuite experiment."""
score: Callable[[pd.DataFrame], float]
type: str
tags: Sequence[str]
episode: int
def _parse_bsuite(package) -> BSuiteSummary:
"""Returns a Bsuite summary from a package."""
return BSuiteSummary(
score=package.score,
type=package.TAGS[0],
tags=package.TAGS,
episode=package.NUM_EPISODES,
)
BSUITE_INFO = dict(
bandit=_parse_bsuite(bandit_analysis),
bandit_noise=_parse_bsuite(bandit_noise_analysis),
bandit_scale=_parse_bsuite(bandit_scale_analysis),
cartpole=_parse_bsuite(cartpole_analysis),
cartpole_noise=_parse_bsuite(cartpole_noise_analysis),
cartpole_scale=_parse_bsuite(cartpole_scale_analysis),
cartpole_swingup=_parse_bsuite(cartpole_swingup_analysis),
catch=_parse_bsuite(catch_analysis),
catch_noise=_parse_bsuite(catch_noise_analysis),
catch_scale=_parse_bsuite(catch_scale_analysis),
deep_sea=_parse_bsuite(deep_sea_analysis),
deep_sea_stochastic=_parse_bsuite(deep_sea_stochastic_analysis),
discounting_chain=_parse_bsuite(discounting_chain_analysis),
memory_len=_parse_bsuite(memory_len_analysis),
memory_size=_parse_bsuite(memory_size_analysis),
mnist=_parse_bsuite(mnist_analysis),
mnist_noise=_parse_bsuite(mnist_noise_analysis),
mnist_scale=_parse_bsuite(mnist_scale_analysis),
mountain_car=_parse_bsuite(mountain_car_analysis),
mountain_car_noise=_parse_bsuite(mountain_car_noise_analysis),
mountain_car_scale=_parse_bsuite(mountain_car_scale_analysis),
umbrella_distract=_parse_bsuite(umbrella_distract_analysis),
umbrella_length=_parse_bsuite(umbrella_length_analysis),
)
ALL_TAGS = set()
for bsuite_summary in BSUITE_INFO.values():
ALL_TAGS = ALL_TAGS.union(set(bsuite_summary.tags))
def _is_finished(df: pd.DataFrame, n_min: int) -> bool:
"""Check to see if every bsuite id in the dataframe is finished."""
# At this point we have grouped by any additional hyperparameters.
# Check if we have run enough episodes for every id.
max_time = df.groupby('bsuite_id')['episode'].max().reset_index()
return max_time['episode'].min() >= n_min
def _bsuite_score_single(df: pd.DataFrame,
experiment_info: Mapping[str, BSuiteSummary],
verbose: bool = False) -> pd.DataFrame:
"""Score the bsuite across all domains for a single agent."""
data = []
for env_name, env_data in df.groupby('bsuite_env'):
if env_name not in experiment_info:
if verbose:
print('WARNING: {}_score not found in load.py and so is excluded.'
.format(env_name))
else:
b_summary = experiment_info[env_name]
data.append({
'bsuite_env': env_name,
'score': b_summary.score(env_data),
'type': b_summary.type,
'tags': str(b_summary.tags),
'finished': _is_finished(env_data, b_summary.episode),
})
return pd.DataFrame(data)
def bsuite_score(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame:
"""Score bsuite for each experiment across hyperparameter settings."""
score_fun = lambda x: _bsuite_score_single(x, BSUITE_INFO)
if sweep_vars:
score_df = df.groupby(sweep_vars).apply(score_fun).reset_index()
else:
score_df = score_fun(df)
# Groupby has a habit of adding meaningless columns to dataframe.
for col in df.columns:
if col in ['level_0', 'level_1', 'level_2']:
score_df.drop(col, axis=1, inplace=True)
return score_df
def _summarize_single_by_tag(score_df: pd.DataFrame,
unique_tags: Sequence[str],
tags_column: str) -> pd.DataFrame:
"""Takes in a single scored dataframe and averages score over tags."""
df = score_df.copy()
# Expand the columns of dataframe to indicate if it contains valid tag.
for tag in unique_tags:
df[tag] = df[tags_column].str.contains(tag)
data = []
for tag in unique_tags:
ave_score = df.loc[df[tag], 'score'].mean()
data.append({'tag': tag, 'score': ave_score})
return pd.DataFrame(data)
def ave_score_by_tag(score_df: pd.DataFrame,
sweep_vars: Sequence[str]) -> pd.DataFrame:
"""Takes in a bsuite scored dataframe and summarizes by tags."""
summary_fun = lambda x: _summarize_single_by_tag(x, list(ALL_TAGS), 'tags')
if sweep_vars:
summary_df = score_df.groupby(sweep_vars).apply(summary_fun).reset_index()
else:
summary_df = summary_fun(score_df)
return summary_df
################################################################################
# Summary plots
def _gen_ordered_experiments() -> Sequence[str]:
"""Provides a list of ordered experiments for bar plot."""
basics = ['bandit', 'mnist', 'catch', 'mountain_car', 'cartpole']
noise = [env + '_noise' for env in basics]
scale = [env + '_scale' for env in basics]
explore = ['deep_sea', 'deep_sea_stochastic', 'cartpole_swingup']
credit = ['umbrella_length', 'umbrella_distract', 'discounting_chain']
memory = ['memory_len', 'memory_size']
return basics + noise + scale + explore + credit + memory
_ORDERED_EXPERIMENTS = _gen_ordered_experiments()
_ORDERED_TYPES = [
'basic', 'noise', 'scale', 'exploration', 'credit_assignment', 'memory']
def _clean_bar_plot_data(
df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame:
"""Clean the summary data for bar plot comparison of agents."""
df = df_in.copy()
df['env'] = pd.Categorical(
df.bsuite_env, categories=_ORDERED_EXPERIMENTS, ordered=True)
df['type'] = pd.Categorical(
df['type'], categories=_ORDERED_TYPES, ordered=True)
if sweep_vars is None:
df['agent'] = 'agent'
elif len(sweep_vars) == 1:
df['agent'] = df[sweep_vars[0]].astype(str)
else:
df['agent'] = (df[sweep_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
return df
def bsuite_bar_plot(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Output bar plot of bsuite data."""
df = _clean_bar_plot_data(df_in, sweep_vars)
p = (gg.ggplot(df)
+ gg.aes(x='env', y='score', colour='type', fill='type')
+ gg.geom_bar(position='dodge', stat='identity')
+ gg.geom_hline(yintercept=1., linetype='dashed', alpha=0.5)
+ gg.scale_colour_manual(plotting.CATEGORICAL_COLOURS)
+ gg.scale_fill_manual(plotting.CATEGORICAL_COLOURS)
+ gg.xlab('experiment')
+ gg.theme(axis_text_x=gg.element_text(angle=25, hjust=1))
)
if not all(df.finished): # add a layer of alpha for unfinished jobs
p += gg.aes(alpha='finished')
p += gg.scale_alpha_discrete(range=[0.3, 1.0])
# Compute the necessary size of the plot
if sweep_vars:
p += gg.facet_wrap(sweep_vars, labeller='label_both', ncol=1)
n_hypers = df[sweep_vars].drop_duplicates().shape[0]
else:
n_hypers = 1
return p + gg.theme(figure_size=(14, 3 * n_hypers + 1))
def _bar_plot_compare(df: pd.DataFrame) -> gg.ggplot:
"""Bar plot of buite score data, comparing agents on each experiment."""
p = (gg.ggplot(df)
+ gg.aes(x='agent', y='score', colour='agent', fill='agent')
+ gg.geom_bar(position='dodge', stat='identity')
+ gg.geom_hline(yintercept=1., linetype='dashed', alpha=0.5)
+ gg.theme(axis_text_x=gg.element_text(angle=25, hjust=1))
+ gg.scale_colour_manual(plotting.CATEGORICAL_COLOURS)
+ gg.scale_fill_manual(plotting.CATEGORICAL_COLOURS)
)
if not all(df.finished): # add a layer of alpha for unfinished jobs
p += gg.aes(alpha='finished')
p += gg.scale_alpha_discrete(range=[0.3, 1.0])
return p
def bsuite_bar_plot_compare(
df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Output bar plot of bsuite data, comparing agents on each experiment."""
df = _clean_bar_plot_data(df_in, sweep_vars)
p = _bar_plot_compare(df)
p += gg.facet_wrap('env', labeller='label_both')
p += gg.theme(figure_size=(18, 16))
return p
def plot_single_experiment(
summary_df: pd.DataFrame,
bsuite_env: str,
sweep_vars: Optional[Sequence[str]] = None) -> Union[gg.ggplot, None]:
"""Compare score for just one experiment."""
if len(summary_df) == 0: # pylint:disable=g-explicit-length-test
print('WARNING: you have no bsuite summary data, please reload.')
return
env_df = summary_df[summary_df.bsuite_env == bsuite_env]
if len(env_df) == 0: # pylint:disable=g-explicit-length-test
print('Warning, you have no data for bsuite_env={}'.format(bsuite_env))
print('Your dataframe only includes bsuite_env={}'
.format(summary_df.bsuite_env.unique()))
return
df = _clean_bar_plot_data(env_df, sweep_vars)
n_agent = len(df.agent.unique())
p = _bar_plot_compare(df)
plot_width = min(2 + n_agent, 12)
p += gg.theme(figure_size=(plot_width, 6))
p += gg.ggtitle('bsuite score for {} experiment'.format(bsuite_env))
print('tags={}'.format(df.tags.iloc[0]))
return p
def _tag_pretify(tag):
return tag.replace('_', ' ').title()
def _radar(
df: pd.DataFrame, ax: plt.Axes, label: str, all_tags: Sequence[str],
color: str, alpha: float = 0.2, edge_alpha: float = 0.85, zorder: int = 2,
edge_style: str = '-'):
"""Plot utility for generating the underlying radar plot."""
tmp = df.groupby('tag').mean().reset_index()
values = []
for curr_tag in all_tags:
score = 0.
selected = tmp[tmp['tag'] == curr_tag]
if len(selected) == 1:
score = float(selected['score'])
else:
print('{} bsuite scores found for tag {!r} with setting {!r}. '
'Replacing with zero.'.format(len(selected), curr_tag, label))
values.append(score)
values = np.maximum(values, 0.05) # don't let radar collapse to 0.
values = np.concatenate((values, [values[0]]))
angles = np.linspace(0, 2*np.pi, len(all_tags), endpoint=False)
angles = np.concatenate((angles, [angles[0]]))
ax.plot(angles, values, '-', linewidth=5, label=label,
c=color, alpha=edge_alpha, zorder=zorder, linestyle=edge_style)
ax.fill(angles, values, alpha=alpha, color=color, zorder=zorder)
# TODO(iosband): Necessary for some change in matplotlib code...
axis_angles = angles[:-1] * 180/np.pi
ax.set_thetagrids(
axis_angles, map(_tag_pretify, all_tags), fontsize=18)
# To avoid text on top of gridlines, we flip horizontalalignment
# based on label location
text_angles = np.rad2deg(angles)
for label, angle in zip(ax.get_xticklabels()[:-1], text_angles[:-1]):
if 90 <= angle <= 270:
label.set_horizontalalignment('right')
else:
label.set_horizontalalignment('left')
def bsuite_radar_plot(summary_data: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None):
"""Output a radar plot of bsuite data from bsuite_summary by tag."""
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, polar=True)
try:
ax.set_axis_bgcolor('white')
except AttributeError:
ax.set_facecolor('white')
all_tags = sorted(summary_data['tag'].unique())
if sweep_vars is None:
summary_data['agent'] = 'agent'
elif len(sweep_vars) == 1:
summary_data['agent'] = summary_data[sweep_vars[0]].astype(str)
else:
summary_data['agent'] = (summary_data[sweep_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
if len(summary_data.agent.unique()) > 5:
print('WARNING: We do not recommend radar plot for more than 5 agents.')
# Creating radar plot background by hand, reusing the _radar call
# it will give a slight illusion of being "3D" as inner part will be
# darker than the outer
thetas = np.linspace(0, 2*np.pi, 100)
ax.fill(thetas, [0.25,] * 100, color='k', alpha=0.05)
ax.fill(thetas, [0.5,] * 100, color='k', alpha=0.05)
ax.fill(thetas, [0.75,] * 100, color='k', alpha=0.03)
ax.fill(thetas, [1.,] * 100, color='k', alpha=0.01)
palette = lambda x: plotting.CATEGORICAL_COLOURS[x]
if sweep_vars:
sweep_data_ = summary_data.groupby('agent')
for aid, (agent, sweep_df) in enumerate(sweep_data_):
_radar(sweep_df, ax, agent, all_tags, color=palette(aid)) # pytype: disable=wrong-arg-types # pandas-drop-duplicates-overloads
if len(sweep_vars) == 1:
label = sweep_vars[0]
if label == 'experiment':
label = 'agent' # rename if actually each individual agent
legend = ax.legend(loc=(1.1, 0.), ncol=1, title=label)
ax.get_legend().get_title().set_fontsize('20')
ax.get_legend().get_title().set_fontname('serif')
ax.get_legend().get_title().set_color('k')
ax.get_legend().get_title().set_alpha(0.75)
legend._legend_box.align = 'left' # pylint:disable=protected-access
else:
legend = ax.legend(loc=(1.1, 0.), ncol=1,)
plt.setp(legend.texts, fontname='serif')
frame = legend.get_frame()
frame.set_color('white')
for text in legend.get_texts():
text.set_color('grey')
else:
_radar(summary_data, ax, '', all_tags, color=palette(0))
# Changing internal lines to be dotted and semi transparent
for line in ax.xaxis.get_gridlines():
line.set_color('grey')
line.set_alpha(0.95)
line.set_linestyle(':')
line.set_linewidth(2)
for line in ax.yaxis.get_gridlines():
line.set_color('grey')
line.set_alpha(0.95)
line.set_linestyle(':')
line.set_linewidth(2)
plt.xticks(color='grey', fontname='serif')
ax.set_rlabel_position(0)
plt.yticks(
[0, 0.25, 0.5, 0.75, 1],
['', '.25', '.5', '.75', '1'],
color='k', alpha=0.75, fontsize=16, fontname='serif')
# For some reason axis labels are behind plot by default ...
ax.set_axisbelow(False)
return fig
|
bsuite-master
|
bsuite/experiments/summary_analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.summary_analysis."""
from absl.testing import absltest
from bsuite.experiments import summary_analysis
class SummaryAnalysisTest(absltest.TestCase):
def test_constants(self):
self.assertNotEmpty(summary_analysis.BSUITE_INFO)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/summary_analysis_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for MNIST."""
from typing import Optional, Sequence
from bsuite.experiments.mnist import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 1.8
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
"""Output a single score = 50% regret, 50% "final accuracy"."""
regret_score = plotting.ave_regret_score(
df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES)
final_df = df.copy()
final_df['ave_return'] = (
1.0 - (final_df.total_regret.diff() / final_df.episode.diff()))
final_df = final_df[final_df.episode > 0.9 * NUM_EPISODES]
# Convert (+1, -1) average return --> (+1, 0) accuracy score
acc_score = np.mean(final_df.ave_return + 1) * 0.5
return 0.5 * (regret_score + acc_score)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret through time."""
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the accuracy through time individually by run."""
df = df_in.copy()
df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff())
df['average_accuracy'] = (df.average_return + 1) / 2
p = plotting.plot_individual_returns(
df_in=df[df.episode >= 100],
max_episode=NUM_EPISODES,
return_column='average_accuracy',
colour_var=colour_var,
yintercept=1.,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average accuracy')
|
bsuite-master
|
bsuite/experiments/mnist/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mnist/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mnist bandit experiment."""
NUM_EPISODES = 10000
SETTINGS = tuple({'seed': None} for n in range(20))
TAGS = ('basic', 'generalization')
|
bsuite-master
|
bsuite/experiments/mnist/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MNIST classification as a bandit.
In this environment, we test the agent's generalization ability, and abstract
away exploration/planning/memory etc -- i.e. a bandit, with no 'state'.
"""
from bsuite.environments import mnist
load = mnist.MNISTBandit
|
bsuite-master
|
bsuite/experiments/mnist/mnist.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for catch with noise."""
from typing import Optional, Sequence
from bsuite.experiments.catch import analysis as catch_analysis
from bsuite.experiments.catch_noise import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame, scaling_var='noise_scale') -> float:
"""Output a single score for experiment = mean - std over scaling_var."""
return plotting.score_by_scaling(
df=df,
score_fn=catch_analysis.score,
scaling_var=scaling_var,
)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time."""
p = plotting.plot_regret_learning(
df_in=df, group_col=group_col, sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=catch_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time by noise_scale."""
p = plotting.plot_regret_average(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.geom_hline(gg.aes(yintercept=catch_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return catch_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='noise_scale'
) + gg.ylab('average episodic return (removing noise)')
|
bsuite-master
|
bsuite/experiments/catch_noise/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/catch_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Catch environment with noisy rewards."""
from bsuite.environments import catch
from bsuite.experiments.catch_noise import sweep
from bsuite.utils import wrappers
def load(noise_scale, seed):
"""Load a catch experiment with the prescribed settings."""
env = wrappers.RewardNoise(
env=catch.Catch(seed=seed),
noise_scale=noise_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/catch_noise/catch_noise.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.catch_noise."""
from absl.testing import absltest
from bsuite.experiments.catch_noise import catch_noise
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return catch_noise.load(1., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/catch_noise/catch_noise_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for catch_noise experiment."""
from bsuite.experiments.catch import sweep as catch_sweep
NUM_EPISODES = catch_sweep.NUM_EPISODES
_settings = []
for scale in [0.1, 0.3, 1.0, 3., 10.]:
for seed in range(4):
_settings.append({'noise_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('noise', 'credit_assignment')
|
bsuite-master
|
bsuite/experiments/catch_noise/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for bandit_noise."""
from typing import Optional, Sequence
from bsuite.experiments.bandit import analysis as bandit_analysis
from bsuite.experiments.bandit_noise import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame, scaling_var='noise_scale') -> float:
"""Output a single score for experiment = mean - std over scaling_var."""
return plotting.score_by_scaling(
df=df,
score_fn=bandit_analysis.score,
scaling_var=scaling_var,
)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time."""
p = plotting.plot_regret_learning(
df_in=df, group_col=group_col, sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES)
return bandit_analysis.bandit_learning_format(p)
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time by noise_scale."""
p = plotting.plot_regret_average(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.scale_y_continuous(breaks=np.arange(0, 1.1, 0.1).tolist())
p += gg.theme(panel_grid_major_y=gg.element_line(size=2.5),
panel_grid_minor_y=gg.element_line(size=0),)
p += gg.geom_hline(gg.aes(yintercept=bandit_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
p += gg.coord_cartesian(ylim=(0, 1))
return p
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return bandit_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='noise_scale'
) + gg.ylab('average episodic return (removing noise)')
|
bsuite-master
|
bsuite/experiments/bandit_noise/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.bandit_noise."""
from absl.testing import absltest
from bsuite.experiments.bandit_noise import bandit_noise
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return bandit_noise.load(1., 42, 42)
def make_action_sequence(self):
valid_actions = range(11)
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/bandit_noise/bandit_noise_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/bandit_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic bandit_noise challenge.
Observation is a single pixel of 0 - this is an independent arm bandit problem!
Rewards are np.linspace(0, 1, 11) with some level of reward noise.
"""
from bsuite.environments import bandit
from bsuite.experiments.bandit import sweep
from bsuite.utils import wrappers
def load(noise_scale, seed, mapping_seed, num_actions=11):
"""Load a bandit_noise experiment with the prescribed settings."""
env = wrappers.RewardNoise(
env=bandit.SimpleBandit(mapping_seed, num_actions=num_actions),
noise_scale=noise_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/bandit_noise/bandit_noise.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for bandit_noise experiment."""
from bsuite.experiments.bandit import sweep as bandit_sweep
NUM_EPISODES = bandit_sweep.NUM_EPISODES
_settings = []
for scale in [0.1, 0.3, 1.0, 3., 10.]:
for n in range(4):
_settings.append({'noise_scale': scale, 'seed': None, 'mapping_seed': n})
SETTINGS = tuple(_settings)
TAGS = ('noise',)
|
bsuite-master
|
bsuite/experiments/bandit_noise/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MNIST bandit with noisy rewards."""
from bsuite.environments import mnist
from bsuite.experiments.mnist_noise import sweep
from bsuite.utils import wrappers
def load(noise_scale, seed):
"""Load a mnist_noise experiment with the prescribed settings."""
env = wrappers.RewardNoise(
env=mnist.MNISTBandit(seed=seed),
noise_scale=noise_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/mnist_noise/mnist_noise.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for mnist_noise."""
from typing import Optional, Sequence
from bsuite.experiments.mnist import analysis as mnist_analysis
from bsuite.experiments.mnist_noise import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame, scaling_var='noise_scale') -> float:
"""Output a single score for experiment = mean - std over scaling_var."""
return plotting.score_by_scaling(
df=df,
score_fn=mnist_analysis.score,
scaling_var=scaling_var,
)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time."""
p = plotting.plot_regret_learning(
df_in=df, group_col=group_col, sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=mnist_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time by noise_scale."""
p = plotting.plot_regret_average(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.geom_hline(gg.aes(yintercept=mnist_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return mnist_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='noise_scale'
) + gg.ylab('average accuracy (removing noise)')
|
bsuite-master
|
bsuite/experiments/mnist_noise/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mnist_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mnist_noise experiment."""
from bsuite.experiments.mnist import sweep as mnist_sweep
NUM_EPISODES = mnist_sweep.NUM_EPISODES
_settings = []
for scale in [0.1, 0.3, 1.0, 3., 10.]:
for seed in range(4):
_settings.append({'noise_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('noise', 'generalization')
|
bsuite-master
|
bsuite/experiments/mnist_noise/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mnist."""
from absl.testing import absltest
from bsuite.experiments.mnist_noise import mnist_noise
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mnist_noise.load(noise_scale=2.0, seed=101)
def make_action_sequence(self):
num_actions = self.environment.action_spec().num_values
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.randint(num_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/mnist_noise/mnist_noise_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis functions for mountain_car experiment."""
from typing import Optional, Sequence
from bsuite.experiments.mountain_car import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
_SOLVED_STEPS = 100
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
BASE_REGRET = 1000 # Maximum regret staying at bottom for 1000 steps
def score(df: pd.DataFrame) -> float:
"""Output a single score for mountain car."""
cp_df = mountain_car_preprocess(df_in=df)
return plotting.ave_regret_score(
cp_df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES)
def mountain_car_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess mountain_car data for use with regret metrics."""
df = df_in.copy()
ideal_total_return = _SOLVED_STEPS * -1 * df.episode
total_return = df.raw_return # Sum of all rewards so far.
df['total_regret'] = ideal_total_return - total_return
return df
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Simple learning curves for mountain_car."""
df = mountain_car_preprocess(df)
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.raw_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=-_SOLVED_STEPS,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/mountain_car/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mountain_car/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python implementation of 'Mountain Car' environment.
An underpowered car must drive up a hill, to succeed you must go back/forth.
This is a classic environment in RL research, first described by:
A Moore, Efficient Memory-Based Learning for Robot Control,
PhD thesis, University of Cambridge, 1990.
"""
from bsuite.environments import mountain_car
load = mountain_car.MountainCar
|
bsuite-master
|
bsuite/experiments/mountain_car/mountain_car.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mountain_car experiment."""
NUM_EPISODES = 1000
SETTINGS = tuple({'seed': None} for n in range(20))
TAGS = ('basic', 'generalization')
|
bsuite-master
|
bsuite/experiments/mountain_car/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mountain car scale reinforcement learning environment."""
from bsuite.environments import mountain_car
from bsuite.experiments.mountain_car_noise import sweep
from bsuite.utils import wrappers
def load(reward_scale: float, seed: int):
"""Load a mountain_car experiment with the prescribed settings."""
env = wrappers.RewardScale(
env=mountain_car.MountainCar(seed=seed),
reward_scale=reward_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/mountain_car_scale/mountain_car_scale.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for mountain_car_scale experiment."""
from typing import Optional, Sequence
from bsuite.experiments.mountain_car import analysis as mc_analysis
from bsuite.experiments.mountain_car_noise import analysis as mc_noise_analysis
from bsuite.experiments.mountain_car_scale import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return mc_noise_analysis.score(df, scaling_var='reward_scale')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return mc_noise_analysis.plot_learning(df, sweep_vars, 'reward_scale')
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return mc_noise_analysis.plot_average(df, sweep_vars, 'reward_scale')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return mc_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='reward_scale'
) + gg.ylab('average episodic return (after rescaling)')
|
bsuite-master
|
bsuite/experiments/mountain_car_scale/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mountain_car_scale/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mountain_car_scale experiment."""
from bsuite.experiments.mountain_car import sweep as mountain_car_sweep
NUM_EPISODES = mountain_car_sweep.NUM_EPISODES
_settings = []
for scale in [0.001, 0.03, 1.0, 30., 1000.]:
for seed in range(4):
_settings.append({'reward_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('scale', 'generalization')
|
bsuite-master
|
bsuite/experiments/mountain_car_scale/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mountain_car_scale."""
from absl.testing import absltest
from bsuite.experiments.mountain_car_scale import mountain_car_scale
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mountain_car_scale.load(10., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/mountain_car_scale/mountain_car_scale_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.cartpole_scale."""
from absl.testing import absltest
from bsuite.experiments.cartpole_scale import cartpole_scale
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return cartpole_scale.load(10., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/cartpole_scale/cartpole_scale_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for cartpole_scale environments."""
from typing import Optional, Sequence
from bsuite.experiments.cartpole import analysis as cartpole_analysis
from bsuite.experiments.cartpole_noise import analysis as cartpole_noise_analysis
from bsuite.experiments.cartpole_scale import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return cartpole_noise_analysis.score(df, scaling_var='reward_scale')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return cartpole_noise_analysis.plot_learning(df, sweep_vars, 'reward_scale')
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return cartpole_noise_analysis.plot_average(df, sweep_vars, 'reward_scale')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return cartpole_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='reward_scale'
) + gg.ylab('average episodic return (after rescaling)')
|
bsuite-master
|
bsuite/experiments/cartpole_scale/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/cartpole_scale/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cartpole environment with scaled rewards."""
from bsuite.environments import cartpole
from bsuite.experiments.cartpole_scale import sweep
from bsuite.utils import wrappers
def load(reward_scale, seed):
"""Load a cartpole experiment with the prescribed settings."""
env = wrappers.RewardScale(
env=cartpole.Cartpole(seed=seed),
reward_scale=reward_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/cartpole_scale/cartpole_scale.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for cartpole_scale experiment."""
from bsuite.experiments.cartpole import sweep as cartpole_sweep
NUM_EPISODES = cartpole_sweep.NUM_EPISODES
_settings = []
for scale in [0.001, 0.03, 1.0, 30., 1000.]:
for seed in range(4):
_settings.append({'reward_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('scale', 'generalization')
|
bsuite-master
|
bsuite/experiments/cartpole_scale/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.deep_sea_stochastic."""
from absl.testing import absltest
from bsuite.experiments.deep_sea_stochastic import deep_sea_stochastic
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return deep_sea_stochastic.load(22)
def make_action_sequence(self):
valid_actions = [0, 1]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/deep_sea_stochastic/deep_sea_stochastic_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for deep sea stochastic.
We say that a deep sea episode is 'bad' when the agent takes a move 'left'
while it on the 'optimal' trajectory. However, for the stochastic case this
means that the agent can have few 'bad' trajectories just by luck of the
environment noise. To make sure that this is not by dumb luck, we use a more
stringent threshold and only once the agent has done at least 100 episodes.
"""
from typing import Optional, Sequence
from bsuite.experiments.deep_sea import analysis as deep_sea_analysis
from bsuite.experiments.deep_sea_stochastic import sweep
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
plot_scaling = deep_sea_analysis.plot_scaling
plot_scaling_log = deep_sea_analysis.plot_scaling_log
plot_regret = deep_sea_analysis.plot_regret
def find_solution(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
num_episodes: int = NUM_EPISODES) -> pd.DataFrame:
"""Find first solution episode, with harsher thresh for stochastic domain."""
df = df_in.copy()
df = df[df.episode >= 100]
return deep_sea_analysis.find_solution(
df, sweep_vars, thresh=0.8, num_episodes=num_episodes)
def score(df: pd.DataFrame,
forgiveness: float = 100.) -> float:
"""Outputs a single score for deep sea selection."""
plt_df = find_solution(df)
beat_dither = (plt_df.solved
& (plt_df.episode < 2 ** plt_df['size'] + forgiveness))
return np.mean(beat_dither)
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
num_episodes: int = NUM_EPISODES) -> gg.ggplot:
"""Plot the returns through time individually by run."""
return deep_sea_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
yintercept=np.exp(-1),
num_episodes=num_episodes,
) + gg.ylab('average episodic return (excluding additive noise)')
|
bsuite-master
|
bsuite/experiments/deep_sea_stochastic/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Stochastic Deep Sea environment."""
from bsuite.environments import deep_sea
from bsuite.experiments.deep_sea_stochastic import sweep
def load(size: int, mapping_seed=0):
"""Load a deep sea experiment with the prescribed settings."""
env = deep_sea.DeepSea(
size=size,
deterministic=False,
mapping_seed=mapping_seed,
)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/deep_sea_stochastic/deep_sea_stochastic.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/deep_sea_stochastic/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for deep_sea_stochastic."""
from bsuite.experiments.deep_sea import sweep as deep_sea_sweep
NUM_EPISODES = deep_sea_sweep.NUM_EPISODES
SETTINGS = tuple({'size': n, 'mapping_seed': 42} for n in range(10, 51, 2))
TAGS = ('exploration', 'noise')
|
bsuite-master
|
bsuite/experiments/deep_sea_stochastic/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for umbrella_distract experiment."""
from typing import Optional, Sequence
from bsuite.experiments.umbrella_distract import sweep
from bsuite.experiments.umbrella_length import analysis as umbrella_length_analysis
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return umbrella_length_analysis.score_by_group(df, 'n_distractor')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret through time."""
return plotting.plot_regret_group_nosmooth(
df_in=df,
group_col='n_distractor',
sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES,
)
def plot_scale(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average return at end of learning investigating scaling."""
return plotting.plot_regret_ave_scaling(
df_in=df,
group_col='n_distractor',
episode=sweep.NUM_EPISODES,
regret_thresh=0.5,
sweep_vars=sweep_vars,
)
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
return umbrella_length_analysis.plot_seeds(df_in, sweep_vars, 'n_distractor')
|
bsuite-master
|
bsuite/experiments/umbrella_distract/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/umbrella_distract/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unbrella chain environment with varying distractor observations."""
from bsuite.environments import umbrella_chain
from bsuite.experiments.umbrella_distract import sweep
def load(n_distractor: int, seed=0):
"""Load a deep sea experiment with the prescribed settings."""
env = umbrella_chain.UmbrellaChain(
chain_length=20,
n_distractor=n_distractor,
seed=seed,
)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/umbrella_distract/umbrella_distract.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for umbrella_distract experiment."""
from bsuite.experiments.umbrella_length import sweep as umbrella_length_sweep
NUM_EPISODES = umbrella_length_sweep.NUM_EPISODES
_log_spaced = []
_log_spaced.extend(range(1, 11))
_log_spaced.extend([12, 14, 17, 20, 25])
_log_spaced.extend(range(30, 105, 10))
SETTINGS = tuple({'n_distractor': n} for n in _log_spaced)
TAGS = ('credit_assignment', 'noise')
|
bsuite-master
|
bsuite/experiments/umbrella_distract/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for deep_sea experiment."""
from typing import Optional, Sequence
from bsuite.experiments.deep_sea import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def _check_data(df: pd.DataFrame) -> None:
"""Check that the data has the correct information logged."""
assert 'total_bad_episodes' in df.columns
assert 'episode' in df.columns
assert 'size' in df.columns
def find_solution(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
merge: bool = True,
thresh: float = 0.9,
num_episodes: int = NUM_EPISODES) -> pd.DataFrame:
"""Find first episode that gets below thresh regret by sweep_vars."""
# Check data has the necessary columns for deep sea
df = df_in.copy()
_check_data(df)
df = df[df.episode <= num_episodes]
# Parse the variables that you are aggregating over
if sweep_vars is None:
sweep_vars = ['size']
elif 'size' not in sweep_vars:
sweep_vars = list(sweep_vars) + ['size']
# Find the earliest episode that gets at least below thresh regret
df['avg_bad_episodes'] = df.total_bad_episodes / df.episode
plt_df = df[df.avg_bad_episodes < thresh].groupby(sweep_vars)['episode']
plt_df = plt_df.min().reset_index()
solved = plt_df.set_index(sweep_vars).episode
unsolved_ids = set(df.set_index(sweep_vars).index) - set(solved.index)
unsolved = df.groupby(sweep_vars)['episode'].max()[list(unsolved_ids)]
plt_df = solved.append(unsolved).to_frame()
plt_df.rename(columns={0: 'episode'}, inplace=True)
plt_df.loc[solved.index, 'solved'] = True
plt_df.loc[unsolved.index, 'solved'] = False
plt_df.rename(columns={0: 'episode'}, inplace=True)
plt_df.reset_index(inplace=True)
# Add a column to see if the experiment has finished 10k episodes
finish_df = (
df.groupby(sweep_vars)['episode'].max() >= num_episodes).reset_index()
finish_df.rename(columns={'episode': 'finished'}, inplace=True)
plt_df = plt_df.merge(finish_df, on=sweep_vars)
plt_df.loc[plt_df.solved, 'finished'] = True # If solved -> finished
# Optionally merge back with all the df columns.
if merge:
join_vars = sweep_vars + ['episode']
plt_df = plt_df.merge(df, on=join_vars)
return plt_df
def score(df: pd.DataFrame,
forgiveness: float = 100.) -> float:
"""Outputs a single score for deep sea selection."""
plt_df = find_solution(df)
beat_dither = (plt_df.solved
& (plt_df.episode < 2 ** plt_df['size'] + forgiveness))
return np.mean(beat_dither)
def _make_baseline(plt_df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame:
"""Generate baseline 2^N data for each combination of sweep_vars."""
x = np.arange(5, 20)
baseline = pd.DataFrame(dict(size=x, episode=2**x))
if sweep_vars:
params = plt_df.groupby(sweep_vars).size().reset_index().drop(0, axis=1)
data = []
for _, row in params.iterrows():
tmp = baseline.copy()
for col, val in row.iteritems():
tmp[col] = val
data.append(tmp)
return pd.concat(data, sort=True)
else:
return baseline
def _base_scaling(plt_df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
with_baseline: bool = True) -> gg.ggplot:
"""Base underlying piece of the scaling plots for deep sea."""
p = (gg.ggplot(plt_df)
+ gg.aes(x='size', y='episode')
)
if np.all(plt_df.finished):
p += gg.geom_point(gg.aes(colour='solved'), size=3, alpha=0.75)
else:
p += gg.geom_point(gg.aes(shape='finished', colour='solved'),
size=3, alpha=0.75)
p += gg.scale_shape_manual(values=['x', 'o'])
if np.all(plt_df.solved):
p += gg.scale_colour_manual(values=['#313695']) # blue
else:
p += gg.scale_colour_manual(values=['#d73027', '#313695']) # [red, blue]
if with_baseline:
baseline_df = _make_baseline(plt_df, sweep_vars)
p += gg.geom_line(data=baseline_df, colour='black',
linetype='dashed', alpha=0.4, size=1.5)
return p
def plot_scaling(plt_df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
with_baseline: bool = True,
num_episodes: int = NUM_EPISODES) -> gg.ggplot:
"""Plot scaling of learning time against exponential baseline."""
p = _base_scaling(plt_df, sweep_vars, with_baseline)
p += gg.xlab('deep sea problem size')
p += gg.ylab('#episodes until < 90% bad episodes')
if with_baseline:
max_steps = np.minimum(num_episodes, plt_df.episode.max())
p += gg.coord_cartesian(ylim=(0, max_steps))
return plotting.facet_sweep_plot(p, sweep_vars)
def plot_scaling_log(plt_df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
with_baseline=True) -> gg.ggplot:
"""Plot scaling of learning time against exponential baseline."""
p = _base_scaling(plt_df, sweep_vars, with_baseline)
p += gg.scale_x_log10(breaks=[5, 10, 20, 50])
p += gg.scale_y_log10(breaks=[100, 300, 1000, 3000, 10000, 30000])
p += gg.xlab('deep sea problem size (log scale)')
p += gg.ylab('#episodes until < 90% bad episodes (log scale)')
return plotting.facet_sweep_plot(p, sweep_vars)
def plot_regret(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
num_episodes: int = NUM_EPISODES) -> gg.ggplot:
"""Plot average regret of deep_sea through time by size."""
df = df_in.copy()
df = df[df['size'].isin([10, 20, 30, 40, 50])]
df['avg_bad'] = df.total_bad_episodes / df.episode
df['size'] = df['size'].astype('category')
p = (gg.ggplot(df[df.episode <= num_episodes])
+ gg.aes('episode', 'avg_bad', group='size', colour='size')
+ gg.geom_line(size=2, alpha=0.75)
+ gg.geom_hline(
gg.aes(yintercept=0.99), linetype='dashed', alpha=0.4, size=1.75)
+ gg.geom_hline(gg.aes(yintercept=0.0), alpha=0) # axis hack
+ gg.ylab('average bad episodes')
+ gg.scale_colour_manual(values=plotting.FIVE_COLOURS)
)
return plotting.facet_sweep_plot(p, sweep_vars)
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
yintercept: float = 0.99,
num_episodes: int = NUM_EPISODES) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.denoised_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df[df.episode > 0.01 * num_episodes], # First episodes very noisy
max_episode=num_episodes,
return_column='average_return',
colour_var='size',
yintercept=yintercept,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/deep_sea/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/deep_sea/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for deep_sea experiment."""
NUM_EPISODES = 10000
SETTINGS = tuple({'size': n, 'mapping_seed': 42} for n in range(10, 51, 2))
TAGS = ('exploration',)
|
bsuite-master
|
bsuite/experiments/deep_sea/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python implementation of 'Deep Sea' exploration environment."""
from bsuite.environments import deep_sea
load = deep_sea.DeepSea
|
bsuite-master
|
bsuite/experiments/deep_sea/deep_sea.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for bandit."""
from typing import Optional, Sequence
from bsuite.experiments.bandit import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 0.5
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
"""Output a single score for bandit experiment."""
return plotting.ave_regret_score(
df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret through time."""
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES)
return bandit_learning_format(p)
def bandit_learning_format(plot: gg.ggplot) -> gg.ggplot:
"""Add nice bandit formatting to ggplot."""
plot += gg.scale_y_continuous(breaks=np.arange(0, 1.1, 0.1).tolist())
plot += gg.theme(panel_grid_major_y=gg.element_line(size=2.5),
panel_grid_minor_y=gg.element_line(size=0))
plot += gg.geom_hline(
gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75)
plot += gg.coord_cartesian(ylim=(0, 1))
return plot
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff())
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=1.,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/bandit/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/bandit/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic bandit environment.
Observation is a single pixel of 0 - this is an independent arm bandit problem!
Rewards are [0, 0.1, .. 1] assigned randomly to 11 arms and deterministic
"""
from bsuite.environments import bandit
load = bandit.SimpleBandit
|
bsuite-master
|
bsuite/experiments/bandit/bandit.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for bandit experiment."""
NUM_EPISODES = 10000
SETTINGS = tuple({'mapping_seed': n} for n in range(20))
TAGS = ('basic',)
|
bsuite-master
|
bsuite/experiments/bandit/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for memory_len experiment."""
from typing import Optional, Sequence
from bsuite.experiments.memory_len import analysis as memory_len_analysis
from bsuite.experiments.memory_size import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return memory_len_analysis.score(df, group_col='num_bits')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return memory_len_analysis.plot_learning(df, sweep_vars, 'num_bits')
def plot_scale(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return memory_len_analysis.plot_scale(df, sweep_vars, 'num_bits')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return memory_len_analysis.plot_seeds(
df_in=df[df.episode > 100],
sweep_vars=sweep_vars,
colour_var='num_bits',
)
|
bsuite-master
|
bsuite/experiments/memory_size/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/memory_size/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for memory_len experiment."""
from bsuite.experiments.memory_len import sweep as memory_len_sweep
NUM_EPISODES = memory_len_sweep.NUM_EPISODES
_log_spaced = []
_log_spaced.extend(range(1, 11))
_log_spaced.extend([12, 14, 17, 20, 25])
_log_spaced.extend(range(30, 50, 10))
SETTINGS = tuple({'num_bits': n} for n in _log_spaced)
TAGS = ('memory',)
|
bsuite-master
|
bsuite/experiments/memory_size/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic memory challenge.
Observation is given by n+1 pixels: (context, time_to_live).
Context will only be nonzero in the first step, when it will be +1 or -1 iid
by component. All actions take no effect until time_to_live=0, then the agent
must repeat the observations that it saw bit-by-bit.
"""
from typing import Optional
from bsuite.environments import memory_chain
from bsuite.experiments.memory_size import sweep
def load(num_bits: int, seed: Optional[int] = 0):
"""Memory Chain environment, with variable number of bits."""
env = memory_chain.MemoryChain(
memory_length=2,
num_bits=num_bits,
seed=seed,
)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/memory_size/memory_size.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.catch_scale."""
from absl.testing import absltest
from bsuite.experiments.catch_scale import catch_scale
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return catch_scale.load(10., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/catch_scale/catch_scale_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for catch scale environments."""
from typing import Optional, Sequence
from bsuite.experiments.catch import analysis as catch_analysis
from bsuite.experiments.catch_noise import analysis as catch_noise_analysis
from bsuite.experiments.catch_scale import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return catch_noise_analysis.score(df, scaling_var='reward_scale')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return catch_noise_analysis.plot_learning(df, sweep_vars, 'reward_scale')
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return catch_noise_analysis.plot_average(df, sweep_vars, 'reward_scale')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return catch_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='reward_scale'
) + gg.ylab('average episodic return (after rescaling)')
|
bsuite-master
|
bsuite/experiments/catch_scale/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Catch environment with scaled rewards."""
from bsuite.environments import catch
from bsuite.experiments.catch_scale import sweep
from bsuite.utils import wrappers
def load(reward_scale, seed):
"""Load a catch experiment with the prescribed settings."""
env = wrappers.RewardScale(
env=catch.Catch(seed=seed),
reward_scale=reward_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/catch_scale/catch_scale.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/catch_scale/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for catch_scale experiment."""
from bsuite.experiments.catch import sweep as catch_sweep
NUM_EPISODES = catch_sweep.NUM_EPISODES
_settings = []
for scale in [0.001, 0.03, 1.0, 30., 1000.]:
for seed in range(4):
_settings.append({'reward_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('scale', 'credit_assignment')
|
bsuite-master
|
bsuite/experiments/catch_scale/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for umbrella_length experiment."""
from typing import Optional, Sequence
from bsuite.experiments.umbrella_length import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
REGRET_THRESH = 0.5
TAGS = sweep.TAGS
def score_by_group(df: pd.DataFrame, group_col: str) -> float:
"""Output a single score for umbrella_chain."""
regret_list = [] # Loop to handle partially-finished runs.
for _, sub_df in df.groupby(group_col):
max_eps = np.minimum(sub_df.episode.max(), sweep.NUM_EPISODES)
ave_regret = (
sub_df.loc[sub_df.episode == max_eps, 'total_regret'].mean() / max_eps)
regret_list.append(ave_regret)
return np.mean(np.array(regret_list) < REGRET_THRESH)
def score(df: pd.DataFrame) -> float:
return score_by_group(df, group_col='chain_length')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret through time."""
return plotting.plot_regret_group_nosmooth(
df_in=df,
group_col='chain_length',
sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES,
)
def plot_scale(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average return at end of learning investigating scaling."""
return plotting.plot_regret_ave_scaling(
df_in=df,
group_col='chain_length',
episode=sweep.NUM_EPISODES,
regret_thresh=0.5,
sweep_vars=sweep_vars,
)
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: str = 'chain_length') -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff())
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=1.,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/umbrella_length/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic credit assigment challenge.
Observation is 3 + n_distractor pixels:
(need_umbrella, have_umbrella, time_to_live, n x distractors)
Only the first action takes any effect (pick up umbrella or not).
All other actions take no effect and the reward is +1, -1 on the final step.
Distractor states are always Bernoulli sampled iid each step.
"""
from bsuite.environments import umbrella_chain
load = umbrella_chain.UmbrellaChain
|
bsuite-master
|
bsuite/experiments/umbrella_length/umbrella_length.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/umbrella_length/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for umbrella_length experiment."""
NUM_EPISODES = 10000
_log_spaced = []
_log_spaced.extend(range(1, 11))
_log_spaced.extend([12, 14, 17, 20, 25])
_log_spaced.extend(range(30, 105, 10))
SETTINGS = tuple({'chain_length': n, 'n_distractor': 20} for n in _log_spaced)
TAGS = ('credit_assignment', 'noise')
|
bsuite-master
|
bsuite/experiments/umbrella_length/sweep.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.