python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run training of one or more algorithmic tasks from CLRS."""
import functools
import os
import shutil
from typing import Any, Dict, List
from absl import app
from absl import flags
from absl import logging
import clrs
import jax
import numpy as np
import requests
import tensorflow as tf
flags.DEFINE_list('algorithms', ['bfs'], 'Which algorithms to run.')
flags.DEFINE_list('train_lengths', ['4', '7', '11', '13', '16'],
'Which training sizes to use. A size of -1 means '
'use the benchmark dataset.')
flags.DEFINE_integer('length_needle', -8,
'Length of needle for training and validation '
'(not testing) in string matching algorithms. '
'A negative value randomizes the length for each sample '
'between 1 and the opposite of the value. '
'A value of 0 means use always 1/4 of the length of '
'the haystack (the default sampler behavior).')
flags.DEFINE_integer('seed', 42, 'Random seed to set')
flags.DEFINE_boolean('random_pos', True,
'Randomize the pos input common to all algos.')
flags.DEFINE_boolean('enforce_permutations', True,
'Whether to enforce permutation-type node pointers.')
flags.DEFINE_boolean('enforce_pred_as_input', True,
'Whether to change pred_h hints into pred inputs.')
flags.DEFINE_integer('batch_size', 32, 'Batch size used for training.')
flags.DEFINE_boolean('chunked_training', False,
'Whether to use chunking for training.')
flags.DEFINE_integer('chunk_length', 16,
'Time chunk length used for training (if '
'`chunked_training` is True.')
flags.DEFINE_integer('train_steps', 10000, 'Number of training iterations.')
flags.DEFINE_integer('eval_every', 50, 'Evaluation frequency (in steps).')
flags.DEFINE_integer('test_every', 500, 'Evaluation frequency (in steps).')
flags.DEFINE_integer('hidden_size', 128,
'Number of hidden units of the model.')
flags.DEFINE_integer('nb_heads', 1, 'Number of heads for GAT processors')
flags.DEFINE_integer('nb_msg_passing_steps', 1,
'Number of message passing steps to run per hint.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate to use.')
flags.DEFINE_float('grad_clip_max_norm', 1.0,
'Gradient clipping by norm. 0.0 disables grad clipping')
flags.DEFINE_float('dropout_prob', 0.0, 'Dropout rate to use.')
flags.DEFINE_float('hint_teacher_forcing', 0.0,
'Probability that ground-truth teacher hints are encoded '
'during training instead of predicted hints. Only '
'pertinent in encoded_decoded modes.')
flags.DEFINE_enum('hint_mode', 'encoded_decoded',
['encoded_decoded', 'decoded_only', 'none'],
'How should hints be used? Note, each mode defines a '
'separate task, with various difficulties. `encoded_decoded` '
'requires the model to explicitly materialise hint sequences '
'and therefore is hardest, but also most aligned to the '
'underlying algorithmic rule. Hence, `encoded_decoded` '
'should be treated as the default mode for our benchmark. '
'In `decoded_only`, hints are only used for defining '
'reconstruction losses. Often, this will perform well, but '
'note that we currently do not make any efforts to '
'counterbalance the various hint losses. Hence, for certain '
'tasks, the best performance will now be achievable with no '
'hint usage at all (`none`).')
flags.DEFINE_enum('hint_repred_mode', 'soft', ['soft', 'hard', 'hard_on_eval'],
'How to process predicted hints when fed back as inputs.'
'In soft mode, we use softmaxes for categoricals, pointers '
'and mask_one, and sigmoids for masks. '
'In hard mode, we use argmax instead of softmax, and hard '
'thresholding of masks. '
'In hard_on_eval mode, soft mode is '
'used for training and hard mode is used for evaluation.')
flags.DEFINE_boolean('use_ln', True,
'Whether to use layer normalisation in the processor.')
flags.DEFINE_boolean('use_lstm', False,
'Whether to insert an LSTM after message passing.')
flags.DEFINE_integer('nb_triplet_fts', 8,
'How many triplet features to compute?')
flags.DEFINE_enum('encoder_init', 'xavier_on_scalars',
['default', 'xavier_on_scalars'],
'Initialiser to use for the encoders.')
flags.DEFINE_enum('processor_type', 'triplet_gmpnn',
['deepsets', 'mpnn', 'pgn', 'pgn_mask',
'triplet_mpnn', 'triplet_pgn', 'triplet_pgn_mask',
'gat', 'gatv2', 'gat_full', 'gatv2_full',
'gpgn', 'gpgn_mask', 'gmpnn',
'triplet_gpgn', 'triplet_gpgn_mask', 'triplet_gmpnn'],
'Processor type to use as the network P.')
flags.DEFINE_string('checkpoint_path', '/tmp/CLRS30',
'Path in which checkpoints are saved.')
flags.DEFINE_string('dataset_path', '/tmp/CLRS30',
'Path in which dataset is stored.')
flags.DEFINE_boolean('freeze_processor', False,
'Whether to freeze the processor of the model.')
FLAGS = flags.FLAGS
PRED_AS_INPUT_ALGOS = [
'binary_search',
'minimum',
'find_maximum_subarray',
'find_maximum_subarray_kadane',
'matrix_chain_order',
'lcs_length',
'optimal_bst',
'activity_selector',
'task_scheduling',
'naive_string_matcher',
'kmp_matcher',
'jarvis_march']
def unpack(v):
try:
return v.item() # DeviceArray
except (AttributeError, ValueError):
return v
def _iterate_sampler(sampler, batch_size):
while True:
yield sampler.next(batch_size)
def _maybe_download_dataset(dataset_path):
"""Download CLRS30 dataset if needed."""
dataset_folder = os.path.join(dataset_path, clrs.get_clrs_folder())
if os.path.isdir(dataset_folder):
logging.info('Dataset found at %s. Skipping download.', dataset_folder)
return dataset_folder
logging.info('Dataset not found in %s. Downloading...', dataset_folder)
clrs_url = clrs.get_dataset_gcp_url()
request = requests.get(clrs_url, allow_redirects=True)
clrs_file = os.path.join(dataset_path, os.path.basename(clrs_url))
os.makedirs(dataset_folder)
open(clrs_file, 'wb').write(request.content)
shutil.unpack_archive(clrs_file, extract_dir=dataset_folder)
os.remove(clrs_file)
return dataset_folder
def make_sampler(length: int,
rng: Any,
algorithm: str,
split: str,
batch_size: int,
multiplier: int,
randomize_pos: bool,
enforce_pred_as_input: bool,
enforce_permutations: bool,
chunked: bool,
chunk_length: int,
sampler_kwargs: Dict[str, Any]):
"""Create a sampler with given options.
Args:
length: Size of samples (i.e., number of nodes in the graph).
A length of -1 will mean that the benchmark
dataset (for the given split) is used. Positive sizes will instantiate
samplers of the corresponding size.
rng: Numpy random state.
algorithm: The name of the algorithm to sample from.
split: 'train', 'val' or 'test'.
batch_size: Samples per batch.
multiplier: Integer multiplier for the number of samples in the dataset,
only used for positive sizes. Negative multiplier means infinite samples.
randomize_pos: Whether to randomize the `pos` input.
enforce_pred_as_input: Whether to convert fixed pred_h hints to inputs.
enforce_permutations: Whether to enforce permutation pointers.
chunked: Whether to chunk the dataset.
chunk_length: Unroll length of chunks, if `chunked` is True.
sampler_kwargs: Extra args passed to the sampler.
Returns:
A sampler (iterator), the number of samples in the iterator (negative
if infinite samples), and the spec.
"""
if length < 0: # load from file
dataset_folder = _maybe_download_dataset(FLAGS.dataset_path)
sampler, num_samples, spec = clrs.create_dataset(folder=dataset_folder,
algorithm=algorithm,
batch_size=batch_size,
split=split)
sampler = sampler.as_numpy_iterator()
else:
num_samples = clrs.CLRS30[split]['num_samples'] * multiplier
sampler, spec = clrs.build_sampler(
algorithm,
seed=rng.randint(2**32),
num_samples=num_samples,
length=length,
**sampler_kwargs,
)
sampler = _iterate_sampler(sampler, batch_size)
if randomize_pos:
sampler = clrs.process_random_pos(sampler, rng)
if enforce_pred_as_input and algorithm in PRED_AS_INPUT_ALGOS:
spec, sampler = clrs.process_pred_as_input(spec, sampler)
spec, sampler = clrs.process_permutations(spec, sampler, enforce_permutations)
if chunked:
sampler = clrs.chunkify(sampler, chunk_length)
return sampler, num_samples, spec
def make_multi_sampler(sizes, rng, **kwargs):
"""Create a sampler with cycling sample sizes."""
ss = []
tot_samples = 0
for length in sizes:
sampler, num_samples, spec = make_sampler(length, rng, **kwargs)
ss.append(sampler)
tot_samples += num_samples
def cycle_samplers():
while True:
for s in ss:
yield next(s)
return cycle_samplers(), tot_samples, spec
def _concat(dps, axis):
return jax.tree_util.tree_map(lambda *x: np.concatenate(x, axis), *dps)
def collect_and_eval(sampler, predict_fn, sample_count, rng_key, extras):
"""Collect batches of output and hint preds and evaluate them."""
processed_samples = 0
preds = []
outputs = []
while processed_samples < sample_count:
feedback = next(sampler)
batch_size = feedback.outputs[0].data.shape[0]
outputs.append(feedback.outputs)
new_rng_key, rng_key = jax.random.split(rng_key)
cur_preds, _ = predict_fn(new_rng_key, feedback.features)
preds.append(cur_preds)
processed_samples += batch_size
outputs = _concat(outputs, axis=0)
preds = _concat(preds, axis=0)
out = clrs.evaluate(outputs, preds)
if extras:
out.update(extras)
return {k: unpack(v) for k, v in out.items()}
def create_samplers(rng, train_lengths: List[int]):
"""Create all the samplers."""
train_samplers = []
val_samplers = []
val_sample_counts = []
test_samplers = []
test_sample_counts = []
spec_list = []
for algo_idx, algorithm in enumerate(FLAGS.algorithms):
# Make full dataset pipeline run on CPU (including prefetching).
with tf.device('/cpu:0'):
if algorithm in ['naive_string_matcher', 'kmp_matcher']:
# Fixed haystack + needle; variability will be in needle
# Still, for chunked training, we maintain as many samplers
# as train lengths, since, for each length there is a separate state,
# and we must keep the 1:1 relationship between states and samplers.
max_length = max(train_lengths)
if max_length > 0: # if < 0, we are using the benchmark data
max_length = (max_length * 5) // 4
train_lengths = [max_length]
if FLAGS.chunked_training:
train_lengths = train_lengths * len(train_lengths)
logging.info('Creating samplers for algo %s', algorithm)
p = tuple([0.1 + 0.1 * i for i in range(9)])
if p and algorithm in ['articulation_points', 'bridges',
'mst_kruskal', 'bipartite_matching']:
# Choose a lower connection probability for the above algorithms,
# otherwise trajectories are very long
p = tuple(np.array(p) / 2)
length_needle = FLAGS.length_needle
sampler_kwargs = dict(p=p, length_needle=length_needle)
if length_needle == 0:
sampler_kwargs.pop('length_needle')
common_sampler_args = dict(
algorithm=FLAGS.algorithms[algo_idx],
rng=rng,
enforce_pred_as_input=FLAGS.enforce_pred_as_input,
enforce_permutations=FLAGS.enforce_permutations,
chunk_length=FLAGS.chunk_length,
)
train_args = dict(sizes=train_lengths,
split='train',
batch_size=FLAGS.batch_size,
multiplier=-1,
randomize_pos=FLAGS.random_pos,
chunked=FLAGS.chunked_training,
sampler_kwargs=sampler_kwargs,
**common_sampler_args)
train_sampler, _, spec = make_multi_sampler(**train_args)
mult = clrs.CLRS_30_ALGS_SETTINGS[algorithm]['num_samples_multiplier']
val_args = dict(sizes=[np.amax(train_lengths)],
split='val',
batch_size=32,
multiplier=2 * mult,
randomize_pos=FLAGS.random_pos,
chunked=False,
sampler_kwargs=sampler_kwargs,
**common_sampler_args)
val_sampler, val_samples, spec = make_multi_sampler(**val_args)
test_args = dict(sizes=[-1],
split='test',
batch_size=32,
multiplier=2 * mult,
randomize_pos=False,
chunked=False,
sampler_kwargs={},
**common_sampler_args)
test_sampler, test_samples, spec = make_multi_sampler(**test_args)
spec_list.append(spec)
train_samplers.append(train_sampler)
val_samplers.append(val_sampler)
val_sample_counts.append(val_samples)
test_samplers.append(test_sampler)
test_sample_counts.append(test_samples)
return (train_samplers,
val_samplers, val_sample_counts,
test_samplers, test_sample_counts,
spec_list)
def main(unused_argv):
if FLAGS.hint_mode == 'encoded_decoded':
encode_hints = True
decode_hints = True
elif FLAGS.hint_mode == 'decoded_only':
encode_hints = False
decode_hints = True
elif FLAGS.hint_mode == 'none':
encode_hints = False
decode_hints = False
else:
raise ValueError('Hint mode not in {encoded_decoded, decoded_only, none}.')
train_lengths = [int(x) for x in FLAGS.train_lengths]
rng = np.random.RandomState(FLAGS.seed)
rng_key = jax.random.PRNGKey(rng.randint(2**32))
# Create samplers
(train_samplers,
val_samplers, val_sample_counts,
test_samplers, test_sample_counts,
spec_list) = create_samplers(rng, train_lengths)
processor_factory = clrs.get_processor_factory(
FLAGS.processor_type,
use_ln=FLAGS.use_ln,
nb_triplet_fts=FLAGS.nb_triplet_fts,
nb_heads=FLAGS.nb_heads
)
model_params = dict(
processor_factory=processor_factory,
hidden_dim=FLAGS.hidden_size,
encode_hints=encode_hints,
decode_hints=decode_hints,
encoder_init=FLAGS.encoder_init,
use_lstm=FLAGS.use_lstm,
learning_rate=FLAGS.learning_rate,
grad_clip_max_norm=FLAGS.grad_clip_max_norm,
checkpoint_path=FLAGS.checkpoint_path,
freeze_processor=FLAGS.freeze_processor,
dropout_prob=FLAGS.dropout_prob,
hint_teacher_forcing=FLAGS.hint_teacher_forcing,
hint_repred_mode=FLAGS.hint_repred_mode,
nb_msg_passing_steps=FLAGS.nb_msg_passing_steps,
)
eval_model = clrs.models.BaselineModel(
spec=spec_list,
dummy_trajectory=[next(t) for t in val_samplers],
**model_params
)
if FLAGS.chunked_training:
train_model = clrs.models.BaselineModelChunked(
spec=spec_list,
dummy_trajectory=[next(t) for t in train_samplers],
**model_params
)
else:
train_model = eval_model
# Training loop.
best_score = -1.0
current_train_items = [0] * len(FLAGS.algorithms)
step = 0
next_eval = 0
# Make sure scores improve on first step, but not overcome best score
# until all algos have had at least one evaluation.
val_scores = [-99999.9] * len(FLAGS.algorithms)
length_idx = 0
while step < FLAGS.train_steps:
feedback_list = [next(t) for t in train_samplers]
# Initialize model.
if step == 0:
all_features = [f.features for f in feedback_list]
if FLAGS.chunked_training:
# We need to initialize the model with samples of all lengths for
# all algorithms. Also, we need to make sure that the order of these
# sample sizes is the same as the order of the actual training sizes.
all_length_features = [all_features] + [
[next(t).features for t in train_samplers]
for _ in range(len(train_lengths))]
train_model.init(all_length_features[:-1], FLAGS.seed + 1)
else:
train_model.init(all_features, FLAGS.seed + 1)
# Training step.
for algo_idx in range(len(train_samplers)):
feedback = feedback_list[algo_idx]
rng_key, new_rng_key = jax.random.split(rng_key)
if FLAGS.chunked_training:
# In chunked training, we must indicate which training length we are
# using, so the model uses the correct state.
length_and_algo_idx = (length_idx, algo_idx)
else:
# In non-chunked training, all training lengths can be treated equally,
# since there is no state to maintain between batches.
length_and_algo_idx = algo_idx
cur_loss = train_model.feedback(rng_key, feedback, length_and_algo_idx)
rng_key = new_rng_key
if FLAGS.chunked_training:
examples_in_chunk = np.sum(feedback.features.is_last).item()
else:
examples_in_chunk = len(feedback.features.lengths)
current_train_items[algo_idx] += examples_in_chunk
logging.info('Algo %s step %i current loss %f, current_train_items %i.',
FLAGS.algorithms[algo_idx], step,
cur_loss, current_train_items[algo_idx])
# Periodically evaluate model
if step >= next_eval:
eval_model.params = train_model.params
for algo_idx in range(len(train_samplers)):
common_extras = {'examples_seen': current_train_items[algo_idx],
'step': step,
'algorithm': FLAGS.algorithms[algo_idx]}
# Validation info.
new_rng_key, rng_key = jax.random.split(rng_key)
val_stats = collect_and_eval(
val_samplers[algo_idx],
functools.partial(eval_model.predict, algorithm_index=algo_idx),
val_sample_counts[algo_idx],
new_rng_key,
extras=common_extras)
logging.info('(val) algo %s step %d: %s',
FLAGS.algorithms[algo_idx], step, val_stats)
val_scores[algo_idx] = val_stats['score']
next_eval += FLAGS.eval_every
# If best total score, update best checkpoint.
# Also save a best checkpoint on the first step.
msg = (f'best avg val score was '
f'{best_score/len(FLAGS.algorithms):.3f}, '
f'current avg val score is {np.mean(val_scores):.3f}, '
f'val scores are: ')
msg += ', '.join(
['%s: %.3f' % (x, y) for (x, y) in zip(FLAGS.algorithms, val_scores)])
if (sum(val_scores) > best_score) or step == 0:
best_score = sum(val_scores)
logging.info('Checkpointing best model, %s', msg)
train_model.save_model('best.pkl')
else:
logging.info('Not saving new best model, %s', msg)
step += 1
length_idx = (length_idx + 1) % len(train_lengths)
logging.info('Restoring best model from checkpoint...')
eval_model.restore_model('best.pkl', only_load_processor=False)
for algo_idx in range(len(train_samplers)):
common_extras = {'examples_seen': current_train_items[algo_idx],
'step': step,
'algorithm': FLAGS.algorithms[algo_idx]}
new_rng_key, rng_key = jax.random.split(rng_key)
test_stats = collect_and_eval(
test_samplers[algo_idx],
functools.partial(eval_model.predict, algorithm_index=algo_idx),
test_sample_counts[algo_idx],
new_rng_key,
extras=common_extras)
logging.info('(test) algo %s : %s', FLAGS.algorithms[algo_idx], test_stats)
logging.info('Done!')
if __name__ == '__main__':
app.run(main)
|
clrs-master
|
clrs/examples/run.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tempfile
from pushworld.config import PUZZLE_EXTENSION
from pushworld.puzzle import Actions, PushWorldPuzzle
from pushworld.transform import get_puzzle_transforms
def test_transformation():
"""Tests `get_puzzle_transforms` by checking that the transformed solutions solve
the transformed puzzles."""
test_puzzle = os.path.join(
os.path.split(__file__)[0], "puzzles", "shortest_path_tool.pwp"
)
tempdir = tempfile.gettempdir()
plan = [
Actions.LEFT,
Actions.UP,
Actions.UP,
Actions.UP,
Actions.RIGHT,
Actions.DOWN,
Actions.DOWN,
Actions.DOWN,
Actions.DOWN,
]
puzzle = PushWorldPuzzle(test_puzzle)
assert puzzle.is_valid_plan(plan) # sanity check
actions_rot90_map = {
Actions.UP: Actions.RIGHT,
Actions.RIGHT: Actions.DOWN,
Actions.DOWN: Actions.LEFT,
Actions.LEFT: Actions.UP,
}
actions_flipped_map = {
Actions.UP: Actions.DOWN,
Actions.DOWN: Actions.UP,
Actions.RIGHT: Actions.RIGHT,
Actions.LEFT: Actions.LEFT,
}
with open(test_puzzle, "r") as fp:
puzzle_string = fp.read()
created_puzzles = get_puzzle_transforms(puzzle_string)
assert len(created_puzzles) == 8
for transform_name, puzzle_string in created_puzzles.items():
# only one number in the transform name
rotation_angle = int(re.findall(r"\d+", transform_name)[0])
puzzle_file_path = os.path.join(tempdir, transform_name + PUZZLE_EXTENSION)
with open(puzzle_file_path, "w") as puzzle_file:
puzzle_file.write(puzzle_string)
puzzle = PushWorldPuzzle(puzzle_file_path)
transformed_plan = plan
if "flipped" in transform_name:
transformed_plan = [actions_flipped_map[i] for i in transformed_plan]
for _ in range(rotation_angle // 90):
transformed_plan = [actions_rot90_map[i] for i in transformed_plan]
assert puzzle.is_valid_plan(transformed_plan)
|
pushworld-main
|
python3/test/test_transform.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
from typing import Any, Dict
import yaml
from pushworld.benchmark_rgd import benchmark_rgd_planner
from pushworld.config import (
BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION, RGD_PLANNER_PATH
)
from pushworld.puzzle import Actions, PushWorldPuzzle
MISSING_PLANNER_EXECUTABLE = not os.path.exists(RGD_PLANNER_PATH)
SKIP_TEST_REASON = (
"The RGD planner executable was not found. "
"You may need to update `RGD_PLANNER_PATH` in `src/pushworld/config.py`."
)
def _benchmark_puzzle(puzzle_file_path: str, **kwargs) -> Dict[str, Any]:
"""Runs `benchmark_rgd_planner` on the given puzzle.
Args:
puzzle_file_path: The path of the PushWorld puzzle file to benchmark.
**kwargs: Forwarded to `benchmark_rgd_planner`.
Returns:
A dictionary containing the planning result. See `benchmark_rgd_planner`
for the contents of this result.
"""
tempdir = tempfile.gettempdir()
benchmark_rgd_planner(
results_path=tempdir,
puzzles_path=puzzle_file_path,
heuristic="N+RGD",
**kwargs,
)
puzzle_filename = os.path.split(puzzle_file_path)[1]
planning_result_filename = os.path.splitext(puzzle_filename)[0] + ".yaml"
planning_result_file_path = os.path.join(tempdir, planning_result_filename)
with open(planning_result_file_path, "r") as planning_result_file:
return yaml.safe_load(planning_result_file)
@pytest.mark.skipif(MISSING_PLANNER_EXECUTABLE, reason=SKIP_TEST_REASON)
def test_success():
"""Verifies that a plan is returned when a puzzle is solved."""
puzzle_name = "Pull Dont Push"
puzzle_file_path = os.path.join(
BENCHMARK_PUZZLES_PATH, "level2", puzzle_name + PUZZLE_EXTENSION
)
result = _benchmark_puzzle(
puzzle_file_path, time_limit=None, memory_limit=None
)
puzzle = PushWorldPuzzle(puzzle_file_path)
plan = [Actions.FROM_CHAR[s] for s in result["plan"].upper()]
assert puzzle.is_valid_plan(plan)
assert result["puzzle"] == puzzle_name
assert result["planner"] == "Novelty+RGD"
assert result["planning_time"] > 0
assert result.get("failure_reason", None) is None
@pytest.mark.skipif(MISSING_PLANNER_EXECUTABLE, reason=SKIP_TEST_REASON)
def test_time_limit():
"""Verifies that `benchmark_rgd_planner` detects when a puzzle reaches the
time limit.
"""
time_limit = 1 # seconds
result = _benchmark_puzzle(
os.path.join(
BENCHMARK_PUZZLES_PATH, "level4", "Four Pistons" + PUZZLE_EXTENSION
),
time_limit=time_limit,
memory_limit=None,
)
assert result["plan"] is None
assert result["failure_reason"] == "time limit reached"
assert result["planning_time"] == time_limit
@pytest.mark.skipif(MISSING_PLANNER_EXECUTABLE, reason=SKIP_TEST_REASON)
def test_memory_limit():
"""Verifies that `benchmark_rgd_planner` detects when a puzzle reaches the
memory limit.
"""
result = _benchmark_puzzle(
os.path.join(
BENCHMARK_PUZZLES_PATH, "level2", "Pull Dont Push" + PUZZLE_EXTENSION
),
time_limit=None,
memory_limit=0.001, # gigabytes
)
assert result["plan"] is None
assert result["failure_reason"] == "memory error"
|
pushworld-main
|
python3/test/test_benchmark_rgd.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import subprocess
import tempfile
from typing import List
from pushworld.config import (
DOMAIN_SUFFIX,
FAST_DOWNWARD_PATH,
PROBLEM_SUFFIX,
PUZZLE_EXTENSION,
)
from pushworld.pddl import puzzle_to_pddl
from pushworld.puzzle import Actions, PushWorldPuzzle
MISSING_PLANNER_EXECUTABLE = not os.path.exists(FAST_DOWNWARD_PATH)
SKIP_TEST_REASON = (
"The Fast Downward executable was not found. "
"You may need to update `FAST_DOWNWARD_PATH` in `src/pushworld/config.py`."
)
def run_fast_downward(
domain_file_path: str, problem_file_path: str
) -> List[str] | None:
"""Runs the Fast Downward planner on the given PDDL domain + problem.
Args:
domain_file_path: The path of the PDDL domain file.
problem_file_path: The path of the PDDL problem file.
Returns:
The resulting plan, stored as a list of PDDL actions, or None if a plan was
not found.
Raises:
AssertionError: If Fast Downward detects that the plan does not achieve
the problem goal.
"""
plan_file_path = f"{tempfile.gettempdir()}/plan"
proc = subprocess.Popen(
[
FAST_DOWNWARD_PATH,
"--alias",
"seq-sat-lama-2011",
"--overall-time-limit",
"100s",
"--overall-memory-limit",
"4G",
"--plan-file",
plan_file_path,
domain_file_path,
problem_file_path,
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout = proc.communicate()[0].decode("utf-8")
if "Search stopped without finding a solution." in stdout:
return None
assert "Solution found." in stdout
plan = []
with open(f"{plan_file_path}.1", "r") as plan_file:
for line in plan_file.readlines():
if not line.startswith(";"):
plan.append(line.strip())
return plan
def check_pddl(puzzle_name: str, for_bfws: bool, solution_exists: bool) -> None:
"""Verifies that a planner is able to find a solution in the given puzzle whenever
a solution exists.
Args:
puzzle_name: The name of a puzzle in the `puzzles` directory that is in the
same directory as this file.
for_bfws: Forwarded to `puzzle_to_pddl`.
solution_exists: True iff the puzzle has at least one solution.
Raises:
AssertionError: If
- a plan was found when a solution does not exist.
- a plan was not found when a solution exists.
- a discovered plan does not achieve the goal.
"""
puzzle = PushWorldPuzzle(
os.path.join(
os.path.dirname(__file__), "puzzles", puzzle_name + PUZZLE_EXTENSION
)
)
domain, problem = puzzle_to_pddl(puzzle_name, puzzle, for_bfws=for_bfws)
tempdir = tempfile.gettempdir()
domain_file_path = f"{tempdir}/{puzzle_name}{DOMAIN_SUFFIX}"
problem_file_path = f"{tempdir}/{puzzle_name}{PROBLEM_SUFFIX}"
with open(domain_file_path, "w") as domain_file:
domain_file.write(domain)
with open(problem_file_path, "w") as problem_file:
problem_file.write(problem)
plan = run_fast_downward(domain_file_path, problem_file_path)
# A map from PDDL agent-movement actions to PushWorld actions.
agent_action_map = {
"(move-agent left)": Actions.LEFT,
"(move-agent right)": Actions.RIGHT,
"(move-agent up)": Actions.UP,
"(move-agent down)": Actions.DOWN,
}
if solution_exists:
assert plan is not None
# Verify that the solution achieves the goal.
state = puzzle.initial_state
while len(plan) > 0:
# This action should always be "move-agent".
next_state = puzzle.get_next_state(state, agent_action_map[plan.pop(0)])
# The next action should always be "push agent".
assert plan.pop(0).startswith("(push agent")
# The remaining actions should push the correct number of objects.
num_moved_objects = sum(a != b for a, b in zip(state, next_state))
for _ in range(num_moved_objects - 1):
assert plan.pop(0).startswith("(push m")
state = next_state
assert puzzle.is_goal_state(state)
else:
assert plan is None
@pytest.mark.skipif(MISSING_PLANNER_EXECUTABLE, reason=SKIP_TEST_REASON)
def test_conversion() -> None:
"""Checks that `puzzle_to_pddl` generates a PDDL description that correctly
represents the dynamics of the PushWorld environment.
This test uses Fast Downward (https://www.fast-downward.org/) to find and
validate plans on the generated PDDL descriptions.
"""
puzzles_with_solutions = [
"trivial",
"trivial_tool",
"multiple_goals",
"trivial_obstacle",
"transitive_pushing",
]
puzzles_without_solutions = ["is_goal_state"]
for for_bfws in [True, False]:
for puzzle_name in puzzles_with_solutions:
check_pddl(puzzle_name, for_bfws, solution_exists=True)
for puzzle_name in puzzles_without_solutions:
check_pddl(puzzle_name, for_bfws, solution_exists=False)
|
pushworld-main
|
python3/test/test_pddl.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from pushworld.config import BENCHMARK_PUZZLES_PATH, BENCHMARK_SOLUTIONS_PATH
from pushworld.puzzle import Actions, PushWorldPuzzle
from pushworld.utils.filesystem import get_puzzle_file_paths, iter_files_with_extension
def test_dataset() -> None:
"""Checks that every puzzle in the dataset has a solution."""
puzzle_file_paths = get_puzzle_file_paths(BENCHMARK_PUZZLES_PATH)
visited_puzzles = set()
errors = []
for result_file_path in iter_files_with_extension(
BENCHMARK_SOLUTIONS_PATH, ".yaml"
):
with open(result_file_path) as file:
planning_result = yaml.safe_load(file)
puzzle_name = planning_result["puzzle"]
if puzzle_name not in puzzle_file_paths:
errors.append(
f'No puzzle is named "{puzzle_name}" in the directory: '
f"{BENCHMARK_PUZZLES_PATH}. Referenced from: {result_file_path}"
)
continue
puzzle = PushWorldPuzzle(puzzle_file_paths[puzzle_name])
plan_string = planning_result["plan"]
plan = [Actions.FROM_CHAR[s] for s in plan_string.upper()]
if puzzle.is_valid_plan(plan):
visited_puzzles.add(puzzle_name)
else:
errors.append(f"Detected invalid plan in {result_file_path}")
for puzzle_name, file_path in puzzle_file_paths.items():
if puzzle_name not in visited_puzzles:
errors.append(f"No solution found for puzzle: {puzzle_name} ({file_path})")
# Raise all errors in batch, which is easier to debug.
if len(errors) > 0:
raise ValueError("\n".join(errors))
|
pushworld-main
|
python3/test/test_dataset.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import dm_env
import numpy as np
from pushworld.dm_env import PushWorldEnv
from pushworld.puzzle import Actions
import pytest
TEST_PUZZLES_PATH = os.path.join(os.path.split(__file__)[0], "puzzles")
def test_observations_and_renderings():
"""Checks that `PushWorldEnv.reset` and `step` return correct observations,
and checks that these observations are consistent with `render`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path)
observation = env.reset().observation
env.observation_spec().validate(observation)
image = env.current_puzzle.render(env.current_state)
assert (image == observation * 255).all()
assert (image == env.render() * 255).all()
observation = env.step(Actions.RIGHT).observation
env.observation_spec().validate(observation)
image = env.current_puzzle.render(env.current_state)
assert (image == observation * 255).all()
assert (image == env.render() * 255).all()
def test_standard_padding():
"""Checks setting `standard_padding = True` in a `PushWorldEnv`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path, standard_padding=False)
o1 = env.reset().observation
env.observation_spec().validate(o1)
o1 = o1.sum(axis=0)
nz1 = np.count_nonzero(o1)
env = PushWorldEnv(puzzle_file_path, standard_padding=True)
o2 = env.reset().observation
env.observation_spec().validate(o2)
o2 = o2.sum(axis=0)
nz2 = np.count_nonzero(o2)
assert o1.shape != o2.shape
assert nz1 == nz2
def test_reward():
"""Checks that `PushWorldEnv.step` returns expected rewards."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "multiple_goals.pwp")
cost_per_step = -0.01
env = PushWorldEnv(puzzle_file_path)
env.reset()
assert env.step(Actions.RIGHT)[1] == cost_per_step
assert env.step(Actions.RIGHT)[1] == 1 + cost_per_step
assert env.step(Actions.RIGHT)[1] == -1 + cost_per_step
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
assert env.step(Actions.LEFT)[1] == 10
@pytest.mark.parametrize("standard_padding", [True, False])
def test_all_goals_achieved(standard_padding: bool):
"""Checks that `PushWorldEnv.step` returns expected values when all goals in the
puzzle are achieved."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path, standard_padding=standard_padding)
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.DOWN)
env.step(Actions.RIGHT)
timestep = env.step(Actions.UP)
env.observation_spec().validate(timestep.observation)
assert timestep.reward == 10.0
assert timestep.step_type == dm_env.StepType.LAST
def test_truncation():
"""Checks that `PushWorldEnv.step` correctly returns whether an episode is
truncated by reaching the `max_steps` limit."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "transitive_pushing.pwp")
env = PushWorldEnv(puzzle_file_path, max_steps=3)
assert env.reset().step_type == dm_env.StepType.FIRST
assert env.step(Actions.LEFT).step_type == dm_env.StepType.MID
assert env.step(Actions.LEFT).step_type == dm_env.StepType.MID
timestep = env.step(Actions.LEFT)
assert timestep.step_type == dm_env.StepType.LAST
assert timestep.reward != 10.0
def test_termination():
"""Checks that `PushWorldEnv.step` currectly returns whether an episode is
terminated by achieving all goals in the puzzle."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "transitive_pushing.pwp")
env = PushWorldEnv(puzzle_file_path)
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
timestep = env.step(Actions.RIGHT)
assert timestep.reward == 10.0
assert timestep.step_type == dm_env.StepType.LAST
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "multiple_goals.pwp")
env = PushWorldEnv(puzzle_file_path)
for action in [Actions.LEFT, Actions.RIGHT]:
env.reset()
env.step(action)
timestep = env.step(action)
# Goal achieved, but not terminated because another goal remains.
assert timestep.reward > 0
assert timestep.step_type == dm_env.StepType.MID
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
timestep = env.step(Actions.LEFT)
# Both goals achieved.
assert timestep.reward == 10.0
assert timestep.step_type == dm_env.StepType.LAST
def test_reset():
"""Checks `PushWorldEnv.reset`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path)
o1 = env.reset().observation
o2 = env.step(Actions.RIGHT).observation
o3 = env.reset().observation
assert not (o1 == o2).all()
assert (o1 == o3).all()
env.observation_spec().validate(o1)
env = PushWorldEnv(TEST_PUZZLES_PATH)
initial_states = set(tuple(env.reset().observation.flat) for _ in range(100))
assert len(initial_states) > 1
|
pushworld-main
|
python3/test/test_dm_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pushworld.config import PUZZLE_EXTENSION
from pushworld.puzzle import AGENT_IDX, Actions, PushWorldPuzzle
def _get_test_puzzle_file_path(puzzle_name: str) -> str:
"""Returns the file path of the puzzle with the given name."""
return os.path.join(
os.path.split(__file__)[0], "puzzles", puzzle_name + PUZZLE_EXTENSION
)
def test_agent_movement() -> None:
"""Tests that the agent object moves correctly in free space and near walls."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("agent_movement"))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.LEFT)
assert next_state[0] == (1, 2)
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.RIGHT)
assert next_state[0] == (3, 2)
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.UP)
assert next_state[0] == (2, 1)
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.DOWN)
assert next_state[0] == (2, 3)
# Add a left agent wall
puzzle._agent_collision_map[Actions.LEFT].add((2, 2))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.LEFT)
assert next_state[0] == (2, 2)
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.RIGHT)
assert next_state[0] == (3, 2)
# Add a right agent wall
puzzle._agent_collision_map[Actions.RIGHT].add((2, 2))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.RIGHT)
assert next_state[0] == (2, 2)
# Add a top agent wall
puzzle._agent_collision_map[Actions.UP].add((2, 2))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.UP)
assert next_state[0] == (2, 2)
# Add a bottom agent wall
puzzle._agent_collision_map[Actions.DOWN].add((2, 2))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.DOWN)
assert next_state[0] == (2, 2)
def test_pushing() -> None:
"""Tests directly pushing an object in contact with the agent."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("pushing"))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.DOWN)
assert next_state == ((1, 2), (2, 1))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.RIGHT)
assert next_state == ((2, 1), (3, 1))
next_state = puzzle.get_next_state(next_state, Actions.RIGHT)
assert next_state == ((3, 1), (4, 1))
next_state = puzzle.get_next_state(next_state, Actions.RIGHT)
assert next_state == ((3, 1), (4, 1)) # transitive stopping
def test_transitive_pushing() -> None:
"""Tests indirectly pushing an object using another object as a tool."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("transitive_pushing"))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.DOWN)
assert next_state == ((1, 2), (5, 1), (3, 1))
next_state = puzzle.get_next_state(puzzle.initial_state, Actions.RIGHT)
assert next_state == ((2, 1), (5, 1), (3, 1))
next_state = puzzle.get_next_state(next_state, Actions.RIGHT)
assert next_state == ((3, 1), (5, 1), (4, 1))
next_state = puzzle.get_next_state(next_state, Actions.RIGHT)
assert next_state == ((4, 1), (6, 1), (5, 1))
next_state = puzzle.get_next_state(next_state, Actions.RIGHT) # transitive stopping
assert next_state == ((4, 1), (6, 1), (5, 1))
next_state = puzzle.get_next_state(next_state, Actions.DOWN)
assert next_state == ((4, 2), (6, 1), (5, 1))
def test_goal_states() -> None:
"""Checks `PushWorldPuzzle.is_goal_state` and `count_achieved_goals`."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("is_goal_state"))
state = ((5, 1), (3, 6), (2, 5))
assert puzzle.is_goal_state(state)
assert puzzle.count_achieved_goals(state) == 2
state = ((2, 8), (3, 6), (2, 5))
assert puzzle.is_goal_state(state)
assert puzzle.count_achieved_goals(state) == 2
state = ((1, 1), (3, 3), (2, 5))
assert not puzzle.is_goal_state(state)
assert puzzle.count_achieved_goals(state) == 1
state = ((1, 1), (3, 6), (2, 2))
assert not puzzle.is_goal_state(state)
assert puzzle.count_achieved_goals(state) == 1
state = ((1, 1), (3, 4), (1, 5))
assert not puzzle.is_goal_state(state)
assert puzzle.count_achieved_goals(state) == 0
def test_trivial_file_loading() -> None:
"""Verifies that a trivial puzzle can step through actions and detect goal states."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("trivial"))
assert puzzle.goal_state == ((3, 1),)
assert puzzle.initial_state == ((1, 2), (2, 2))
# Verify the collision maps
assert len(puzzle._agent_collision_map[Actions.LEFT]) == 3
assert len(puzzle._agent_collision_map[Actions.RIGHT]) == 3
assert len(puzzle._agent_collision_map[Actions.UP]) == 3
assert len(puzzle._agent_collision_map[Actions.DOWN]) == 3
assert puzzle._agent_collision_map[Actions.LEFT] == set([(2, 1), (1, 2), (2, 3)])
assert puzzle._agent_collision_map[Actions.UP] == set([(1, 2), (2, 1), (3, 1)])
assert puzzle._agent_collision_map[Actions.RIGHT] == set([(3, 1), (3, 2), (3, 3)])
assert puzzle._agent_collision_map[Actions.DOWN] == set([(1, 2), (2, 3), (3, 3)])
# Verify the solution to the puzzle
state = puzzle.initial_state
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.LEFT) # Push into a wall. No change.
assert state == ((1, 2), (2, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.UP) # Push into a wall. No change.
assert state == ((1, 2), (2, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(
state, Actions.DOWN
) # Push into a agent wall. No change.
assert state == ((1, 2), (2, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.RIGHT)
assert state == ((2, 2), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(
state, Actions.RIGHT
) # Transitive stopping. No change.
assert state == ((2, 2), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.DOWN)
assert state == ((2, 3), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.DOWN) # Push into a wall. No change.
assert state == ((2, 3), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.RIGHT)
assert state == ((3, 3), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.RIGHT) # Push into a wall. No change.
assert state == ((3, 3), (3, 2))
assert not puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.UP)
assert state == ((3, 2), (3, 1))
assert puzzle.is_goal_state(state)
state = puzzle.get_next_state(state, Actions.UP) # Transitive stopping. No change.
assert state == ((3, 2), (3, 1))
assert puzzle.is_goal_state(state)
def test_file_parsing() -> None:
"""Tests that all properties of a puzzle are loaded correctly from a file,
including computed collision maps."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("file_parsing"))
assert puzzle.dimensions == (12, 18)
assert puzzle.goal_state == ((6, 5), (3, 4))
assert puzzle.initial_state == (
(1, 12), # S
(6, 14), # M4
(1, 3), # M1
(4, 1), # M0
(2, 7), # M2
(3, 8), # M3
)
# Object indices the state
M4, M1, M0, M2, M3 = range(1, 6)
assert len(puzzle._agent_collision_map[Actions.LEFT]) == 16
assert len(puzzle._wall_collision_map[Actions.LEFT][M0]) == 15
assert len(puzzle._wall_collision_map[Actions.LEFT][M1]) == 16
assert len(puzzle._wall_collision_map[Actions.LEFT][M2]) == 14
assert len(puzzle._wall_collision_map[Actions.LEFT][M3]) == 16
assert len(puzzle._wall_collision_map[Actions.LEFT][M4]) == 15
assert len(puzzle._agent_collision_map[Actions.RIGHT]) == 16
assert len(puzzle._wall_collision_map[Actions.RIGHT][M0]) == 15
assert len(puzzle._wall_collision_map[Actions.RIGHT][M1]) == 16
assert len(puzzle._wall_collision_map[Actions.RIGHT][M2]) == 14
assert len(puzzle._wall_collision_map[Actions.RIGHT][M3]) == 16
assert len(puzzle._wall_collision_map[Actions.RIGHT][M4]) == 15
assert len(puzzle._agent_collision_map[Actions.UP]) == 9
assert len(puzzle._wall_collision_map[Actions.UP][M0]) == 9
assert len(puzzle._wall_collision_map[Actions.UP][M1]) == 10
assert len(puzzle._wall_collision_map[Actions.UP][M2]) == 8
assert len(puzzle._wall_collision_map[Actions.UP][M3]) == 10
assert len(puzzle._wall_collision_map[Actions.UP][M4]) == 9
assert len(puzzle._movable_collision_map[Actions.DOWN][AGENT_IDX][M4]) == 4
assert len(puzzle._movable_collision_map[Actions.DOWN][AGENT_IDX][M3]) == 3
assert len(puzzle._movable_collision_map[Actions.DOWN][M1][M4]) == 2
assert len(puzzle._movable_collision_map[Actions.DOWN][M1][M2]) == 4
assert len(puzzle._movable_collision_map[Actions.LEFT][M1][M4]) == 2
assert len(puzzle._movable_collision_map[Actions.LEFT][M1][M2]) == 4
assert len(puzzle._movable_collision_map[Actions.RIGHT][M1][M4]) == 2
assert len(puzzle._movable_collision_map[Actions.RIGHT][M1][M2]) == 4
assert len(puzzle._movable_collision_map[Actions.UP][M1][M4]) == 2
assert len(puzzle._movable_collision_map[Actions.UP][M1][M2]) == 4
def test_rendering() -> None:
"""Tests `PushWorldPuzzle.render` and `render_plan`."""
puzzle = PushWorldPuzzle(_get_test_puzzle_file_path("trivial"))
state = puzzle.initial_state
initial_image = puzzle.render(state)
assert initial_image.shape == (100, 100, 3)
plan = [Actions.RIGHT, Actions.DOWN, Actions.RIGHT, Actions.UP]
plan_images = puzzle.render_plan(plan)
assert (initial_image == plan_images[0]).all()
# Compare hashes of the images so that this file doesn't store all of the image
# arrays.
image_hashes = [
8141256401900123811,
1770142108181252064,
4744825492003518882,
-7463149466192975143,
-8235536721686713717,
]
assert image_hashes == [hash(tuple(image.flat)) for image in plan_images]
|
pushworld-main
|
python3/test/test_puzzle.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following assumes gym.__version__ == '0.19.0'
import os
import numpy as np
import pytest
from pushworld.gym_env import PushWorldEnv
from pushworld.puzzle import Actions
TEST_PUZZLES_PATH = os.path.join(os.path.split(__file__)[0], "puzzles")
def test_observations_and_renderings():
"""Checks that `PushWorldEnv.reset` and `step` return correct observations,
and checks that these observations are consistent with `render`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path)
observation, info = env.reset()
assert observation in env.observation_space
image = env.current_puzzle.render(info["puzzle_state"])
assert (image == observation * 255).all()
assert (image == env.render()).all()
observation, _, _, _, info = env.step(Actions.RIGHT)
assert observation in env.observation_space
image = env.current_puzzle.render(info["puzzle_state"])
assert (image == observation * 255).all()
assert (image == env.render()).all()
def test_standard_padding():
"""Checks setting `standard_padding = True` in a `PushWorldEnv`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path, standard_padding=False)
o1, _ = env.reset()
assert o1 in env.observation_space
o1 = o1.sum(axis=0)
nz1 = np.count_nonzero(o1)
env = PushWorldEnv(puzzle_file_path, standard_padding=True)
o2, _ = env.reset()
assert o2 in env.observation_space
o2 = o2.sum(axis=0)
nz2 = np.count_nonzero(o2)
assert o1.shape != o2.shape
assert nz1 == nz2
def test_reward():
"""Checks that `PushWorldEnv.step` returns expected rewards."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "multiple_goals.pwp")
cost_per_step = -0.01
env = PushWorldEnv(puzzle_file_path)
env.reset()
assert env.step(Actions.RIGHT)[1] == cost_per_step
assert env.step(Actions.RIGHT)[1] == 1 + cost_per_step
assert env.step(Actions.RIGHT)[1] == -1 + cost_per_step
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
assert env.step(Actions.LEFT)[1] == 10
@pytest.mark.parametrize("standard_padding", [True, False])
def test_all_goals_achieved(standard_padding: bool):
"""Checks that `PushWorldEnv.step` returns expected values when all goals in the
puzzle are achieved."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path, standard_padding=standard_padding)
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.DOWN)
env.step(Actions.RIGHT)
obs, reward, terminated, truncated, _ = env.step(Actions.UP)
assert obs in env.observation_space
assert reward == 10
assert terminated == True
assert truncated == False
def test_truncation():
"""Checks that `PushWorldEnv.step` correctly returns whether an episode is
truncated by reaching the `max_steps` limit."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "transitive_pushing.pwp")
env = PushWorldEnv(puzzle_file_path, max_steps=3)
env.reset()
assert env.step(Actions.LEFT)[3] == False
assert env.step(Actions.LEFT)[3] == False
assert env.step(Actions.LEFT)[3] == True
def test_termination():
"""Checks that `PushWorldEnv.step` currectly returns whether an episode is
terminated by achieving all goals in the puzzle."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "transitive_pushing.pwp")
env = PushWorldEnv(puzzle_file_path)
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
_, reward, terminated, truncated, _ = env.step(Actions.RIGHT)
assert reward == 10
assert terminated == True
assert truncated == False
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "multiple_goals.pwp")
env = PushWorldEnv(puzzle_file_path)
for action in [Actions.LEFT, Actions.RIGHT]:
env.reset()
env.step(action)
_, reward, terminated, _, _ = env.step(action)
# Goal achieved, but not terminated because another goal remains.
assert reward > 0
assert terminated == False
env.reset()
env.step(Actions.RIGHT)
env.step(Actions.RIGHT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
env.step(Actions.LEFT)
_, reward, terminated, _, _ = env.step(Actions.LEFT)
# Both goals achieved.
assert reward == 10
assert terminated == True
def test_reset():
"""Checks `PushWorldEnv.reset`."""
puzzle_file_path = os.path.join(TEST_PUZZLES_PATH, "trivial.pwp")
env = PushWorldEnv(puzzle_file_path)
o1, _ = env.reset()
o2 = env.step(Actions.RIGHT)[0]
o3, _ = env.reset()
assert not (o1 == o2).all()
assert (o1 == o3).all()
assert o1 in env.observation_space
env = PushWorldEnv(TEST_PUZZLES_PATH)
initial_states = set(tuple(env.reset()[0].flat) for _ in range(100))
assert len(initial_states) > 1
|
pushworld-main
|
python3/test/test_gym_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.benchmark_rgd import benchmark_rgd_planner
if __name__ == "__main__":
fire.Fire(benchmark_rgd_planner)
|
pushworld-main
|
python3/scripts/benchmark_rgd.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.pddl import convert_all_puzzles_to_pddl
if __name__ == "__main__":
fire.Fire(convert_all_puzzles_to_pddl)
|
pushworld-main
|
python3/scripts/convert_to_pddl.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.visualization import render_puzzle_previews
if __name__ == "__main__":
fire.Fire(render_puzzle_previews)
|
pushworld-main
|
python3/scripts/render_puzzle_previews.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.plot import plot_puzzles_solved_vs_time
if __name__ == "__main__":
fire.Fire(plot_puzzles_solved_vs_time)
|
pushworld-main
|
python3/scripts/plot_puzzles_solved_vs_time.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.sas import convert_all_pddls_to_sas
if __name__ == "__main__":
fire.Fire(convert_all_pddls_to_sas)
|
pushworld-main
|
python3/scripts/convert_to_sas.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.visualization import render_plans
if __name__ == "__main__":
fire.Fire(render_plans)
|
pushworld-main
|
python3/scripts/render_plans.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from pushworld.puzzle import NUM_ACTIONS
from pushworld.gym_env import PushWorldEnv
from absl import app
from absl import flags
_PATH = flags.DEFINE_string('path', '', 'Puzzle file path.')
matplotlib.use('TkAgg')
def main(argv):
# Create PushWorldEnv
env = PushWorldEnv(_PATH.value)
# Reset the environment and show observation
image, info = env.reset()
plt.figure(figsize=(5, 5))
plt.imshow(image)
plt.ion()
plt.show()
# Randomly take 10 actions and show observation
for _ in range(10):
rets = env.step(np.random.randint(NUM_ACTIONS))
image = rets[0]
plt.imshow(image)
plt.draw()
plt.pause(0.5)
if __name__ == '__main__':
app.run(main)
|
pushworld-main
|
python3/scripts/demo_gym_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.generate import generate_level0_puzzles
if __name__ == "__main__":
fire.Fire(generate_level0_puzzles)
|
pushworld-main
|
python3/scripts/generate_level0_puzzles.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import fire
from pushworld.transform import create_transformed_puzzles
if __name__ == "__main__":
fire.Fire(create_transformed_puzzles)
|
pushworld-main
|
python3/scripts/create_transformed_puzzles.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pushworld.dm_env import PushWorldEnv
from pushworld.puzzle import NUM_ACTIONS
_PATH = flags.DEFINE_string('path', '', 'Puzzle file path.')
matplotlib.use('TkAgg')
def main(argv):
# Create PushWorldEnv
env = PushWorldEnv(_PATH.value)
# Reset the environment and show observation
timestep = env.reset()
plt.figure(figsize=(5, 5))
plt.imshow(timestep.observation)
plt.ion()
plt.show()
# Randomly take 10 actions and show observation
for _ in range(10):
timestep = env.step(np.random.randint(NUM_ACTIONS))
plt.imshow(timestep.observation)
plt.draw()
plt.pause(0.5)
if __name__ == '__main__':
app.run(main)
|
pushworld-main
|
python3/scripts/demo_dm_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from pushworld.config import DOMAIN_SUFFIX, FAST_DOWNWARD_PATH, PROBLEM_SUFFIX
def pddl_to_sas(
domain_file_path: str,
problem_file_path: str,
sas_file_path: str,
fast_downward_executable: str = FAST_DOWNWARD_PATH,
) -> None:
"""Runs the Fast Downward (https://www.fast-downward.org/) translator on
the given PDDL domain and problem files to convert them into an SAS file.
Args:
domain_file_path: The path of the PDDL domain file.
problem_file_path: The path of the PDDL problem file.
sas_file_path: The path of the written SAS file.
fast_downward_executable: The path of the `fast-downward.py` executable.
"""
proc = subprocess.Popen(
[fast_downward_executable, "--translate"]
+ ["--sas-file", sas_file_path]
+ [domain_file_path, problem_file_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out = proc.communicate()[0].decode("utf-8")
if "translate exit code: 0" not in out:
raise RuntimeError(
f"Failed to convert {domain_file_path} and {problem_file_path} "
"into SAS format."
)
def convert_all_pddls_to_sas(
pddl_path: str,
fast_downward_executable: str = FAST_DOWNWARD_PATH,
domain_suffix: str = DOMAIN_SUFFIX,
problem_suffix: str = PROBLEM_SUFFIX,
) -> None:
"""Iterates through the directory of PDDL files and converts them into SAS files.
This function uses the Fast Downward (https://www.fast-downward.org/) translator
to convert PDDL into SAS format.
Args:
pddl_path: The path of a directory that contains pairs of PDDL domains
and problems. PDDL files can be nested in subdirectories, but pairs must
occur in the same subdirectory. For each detected pair, a SAS file is
written in the same directory.
fast_downward_executable: The path of the `fast-downward.py` executable.
domain_suffix: The suffix of PDDL domain files.
problem_suffix: The suffix of PDDL problem files.
"""
for pddl_subdir, _, filenames in os.walk(pddl_path):
for filename in filenames:
if filename.endswith(domain_suffix):
domain_file_path = os.path.join(pddl_subdir, filename)
puzzle_name = domain_file_path[: -len(domain_suffix)]
problem_file_path = puzzle_name + problem_suffix
sas_file_path = puzzle_name + ".sas"
pddl_to_sas(
domain_file_path=domain_file_path,
problem_file_path=problem_file_path,
sas_file_path=sas_file_path,
fast_downward_executable=fast_downward_executable,
)
|
pushworld-main
|
python3/src/pushworld/sas.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import tqdm
import yaml
from pushworld.config import BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION, RGD_PLANNER_PATH
from pushworld.puzzle import Actions, PushWorldPuzzle
from pushworld.utils.filesystem import get_puzzle_file_paths, map_files_with_extension
from pushworld.utils.process import GIGABYTE, run_process
def benchmark_rgd_planner(
results_path: str = "nrgd_results",
puzzles_path: str = BENCHMARK_PUZZLES_PATH,
planner_path: str = RGD_PLANNER_PATH,
heuristic: str = "N+RGD",
time_limit: Optional[int] = 60 * 30,
memory_limit: Optional[float] = 30,
) -> None:
"""Benchmarks a greedy best-first search using the recursive graph distance
heuristic on a collection of PushWorld puzzles.
Args:
results_path: The path of the directory in which to save the benchmark results,
which include one YAML file per puzzle. These YAMLs contain the following
top-level key-value pairs:
planner: <name of the planner>
puzzle: <name of the puzzle that the planner attempted to solve>
plan: <a string of 'UDLR' characters, or `null` if no plan found>
planning_time: <the time the planner spent searching for a plan>
failure_reason: <if a plan was not found, this summarizes why>
puzzles_path: The path of the directory from which to load PushWorld puzzles
to benchmark. Can also be the path of a single PushWorld puzzle file.
planner_path: The path of the RGD planner executable.
heuristic: The heuristic mode of the planner. See the RGD planner executable
for available options.
time_limit: In seconds, the maximum CPU time for which the planner is allowed
to run to solve a single puzzle. If None, there is no limit.
memory_limit: In gigabytes, the maximum memory that the planner is allowed
to use to solve a single puzzle. If None, there is no limit.
"""
heuristic_to_planner_name = {
"N+RGD": "Novelty+RGD",
"RGD": "RGD",
}
if heuristic not in heuristic_to_planner_name:
raise ValueError(
f'Unknown heuristic: "{heuristic}". '
f"Supported values are {list(heuristic_to_planner_name.keys())}"
)
planner_name = heuristic_to_planner_name[heuristic]
for puzzle_file_path, planning_result_file_path in tqdm.tqdm(
map_files_with_extension(
input_file_or_directory_path=puzzles_path,
input_extension=PUZZLE_EXTENSION,
output_directory_path=results_path,
output_extension=".yaml",
)
):
out, _, planning_time = run_process(
command=[RGD_PLANNER_PATH, heuristic, puzzle_file_path],
time_limit=time_limit,
memory_limit=(
None if memory_limit is None else int(memory_limit * GIGABYTE)
),
)
puzzle_name = os.path.splitext(os.path.split(puzzle_file_path)[1])[0]
planning_result = {
"planner": planner_name,
"puzzle": puzzle_name,
"planning_time": planning_time,
}
if out == "":
planning_result["failure_reason"] = "time limit reached"
planning_result["plan"] = None
planning_result["planning_time"] = time_limit
elif out == "NO SOLUTION":
planning_result["failure_reason"] = "no solution exists"
planning_result["plan"] = None
elif "std::bad_alloc" in out or "failed to map segment" in out:
planning_result["failure_reason"] = "memory error"
planning_result["plan"] = None
elif set(out).issubset("UDLR"):
puzzle = PushWorldPuzzle(puzzle_file_path)
if puzzle.is_valid_plan([Actions.FROM_CHAR[ch] for ch in out]):
planning_result["plan"] = out
else:
planning_result["failure_reason"] = "invalid plan"
planning_result["plan"] = None
else:
planning_result["failure_reason"] = "unknown"
planning_result["plan"] = None
with open(planning_result_file_path, "w") as planning_result_file:
yaml.dump(planning_result, planning_result_file)
|
pushworld-main
|
python3/src/pushworld/benchmark_rgd.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import yaml
from pushworld.utils.filesystem import iter_files_with_extension
def plot_puzzles_solved_vs_time(
planner_results_path: str,
output_file_path: str = "puzzles_solved_vs_time.png",
planning_timeout: int = 60 * 30,
) -> None:
"""Creates a plot of number of puzzles solved vs. planning time for each planner.
The plot's right y-axis shows the percentage of puzzles solved.
Args:
planner_results_path: The path of a directory containing planner result files
in `.yaml` format. Each file must contain the following top-level key-value
pairs:
planner: <name of the planner>
puzzle: <name of the puzzle that the planner attempted to solve>
plan: <a string of 'UDLR' characters, or `null` if no plan found>
planning_time: <the time the planner spent searching for a plan>
output_file_path: The path of the file in which to save the plot as a PNG image.
planning_timeout: In seconds, the maximum time all planners were allowed to
solve each puzzle. The plot is extended to include this as the last time on
the x-axis.
Raises:
ValueError: If planners did not attempt to solve all of the same puzzles, or
if there are multiple results for a planner on the same puzzle.
"""
_, ax1 = plt.subplots()
ax2 = ax1.twinx()
# A map from planner names to lists of the times taken to solve puzzles.
planning_times_per_planner = defaultdict(list)
# A map from planner names to the puzzles they attempted to solve.
puzzles_tried_per_planner = defaultdict(set)
for result_file_path in iter_files_with_extension(planner_results_path, ".yaml"):
with open(result_file_path, "r") as result_file:
planning_result = yaml.safe_load(result_file)
planner_name = planning_result["planner"]
puzzle_name = planning_result["puzzle"]
if puzzle_name in puzzles_tried_per_planner[planner_name]:
raise ValueError(
f'Planner "{planner_name}" has multiple results for the '
f'"{puzzle_name}" puzzle'
)
puzzles_tried_per_planner[planner_name].add(puzzle_name)
if planning_result["plan"] is not None:
planning_times_per_planner[planning_result["planner"]].append(
planning_result["planning_time"]
)
# Verify that all planners solved the same puzzles
puzzles_tried_per_planner = list(puzzles_tried_per_planner.items())
for planner_name, puzzles_tried in puzzles_tried_per_planner[1:]:
if puzzles_tried != puzzles_tried_per_planner[0][1]:
raise ValueError(
f'Planners "{planner_name}" and "{puzzles_tried_per_planner[0][0]}" '
"did not attempt the same puzzles."
)
for planner_name, puzzle_solve_times in sorted(planning_times_per_planner.items()):
# X and Y values to plot
x = sorted(puzzle_solve_times)
y = list(range(1, 1 + len(x)))
# Replace all 0s with the minimum reported time.
for i, t in enumerate(x):
if t > 0:
break
for j in range(i):
x[j] = t
# Extend all plots to the timeout
x.append(planning_timeout)
y.append(y[-1])
ax1.plot(x, y, label=planner_name)
num_puzzles = len(puzzles_tried_per_planner[0][1])
max_solved_puzzles = max(map(len, planning_times_per_planner.values()))
ax1.set_xscale("log")
ax1.set_xlabel("Planning Time (seconds)")
ax1.set_ylabel("Number of Puzzles Solved")
mn, mx = ax1.set_ylim(0, max_solved_puzzles * 1.05)
ax2.set_ylabel("% of Puzzles Solved")
ax2.set_ylim(mn * 100 / num_puzzles, mx * 100 / num_puzzles)
ax1.legend()
plt.tight_layout()
plt.savefig(output_file_path)
|
pushworld-main
|
python3/src/pushworld/plot.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains configurable file extensions, data locations, and executable paths.
"""
import os
MODULE_PATH = os.path.split(__file__)[0]
DOMAIN_SUFFIX = "-domain.pddl"
PROBLEM_SUFFIX = "-problem.pddl"
PUZZLE_EXTENSION = ".pwp"
# Data paths
BENCHMARK_PATH = os.path.join(MODULE_PATH, "../../../benchmark")
BENCHMARK_PUZZLES_PATH = os.path.join(BENCHMARK_PATH, "puzzles")
BENCHMARK_SOLUTIONS_PATH = os.path.join(BENCHMARK_PATH, "solutions")
# Paths to planner executables
RGD_PLANNER_PATH = os.path.join(MODULE_PATH, "../../../cpp/build/bin/run_planner")
FAST_DOWNWARD_PATH = os.path.join(MODULE_PATH, "../../../../downward/fast-downward.py")
|
pushworld-main
|
python3/src/pushworld/config.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Optional
import dm_env
from dm_env import specs
import numpy as np
from pushworld.config import PUZZLE_EXTENSION
from pushworld.puzzle import (
DEFAULT_BORDER_WIDTH,
DEFAULT_PIXELS_PER_CELL,
NUM_ACTIONS,
PushWorldPuzzle,
State,
)
from pushworld.utils.env_utils import get_max_puzzle_dimensions, render_observation_padded
from pushworld.utils.filesystem import iter_files_with_extension
class PushWorldEnv(dm_env.Environment):
"""A dm_env environment for PushWorld puzzles.
Rewards are calculated according to Appendix D of
https://arxiv.org/pdf/1707.06203.pdf with one change: The negative reward
per step is reduced to 0.01, since PushWorld puzzles tend to have longer
solutions than Sokoban puzzles.
Args:
puzzle_path: The path of a PushWorld puzzle file or of a directory that
contains puzzle files, possibly nested in subdirectories. All discovered
puzzles are loaded, and the `reset` method randomly selects a new puzzle
each time it is called.
max_steps: If not None, the `step` method will return step type dm_env.StepType.LAST
after calling it `max_steps` times since the most recent call of `reset`.
border_width: The pixel width of the border drawn to indicate object
boundaries. Must be >= 1.
pixels_per_cell: The pixel width and height of a discrete position in the
environment. Must be >= 1 + 2 * border_width.
standard_padding: If True, all puzzles are padded to the maximum width and
height of the puzzles in the `pushworld.config.BENCHMARK_PUZZLES_PATH`
directory. If False, puzzles are padded to the maximum dimensions of all
puzzles found in the `puzzle_path`.
"""
def __init__(
self,
puzzle_path: str,
max_steps: Optional[int] = None,
border_width: int = DEFAULT_BORDER_WIDTH,
pixels_per_cell: int = DEFAULT_PIXELS_PER_CELL,
standard_padding: bool = False,
) -> None:
self._puzzles = []
for puzzle_file_path in iter_files_with_extension(
puzzle_path, PUZZLE_EXTENSION
):
self._puzzles.append(PushWorldPuzzle(puzzle_file_path))
if len(self._puzzles) == 0:
raise ValueError(f"No PushWorld puzzles found in: {puzzle_path}")
if border_width < 1:
raise ValueError("border_width must be >= 1")
if pixels_per_cell < 3:
raise ValueError("pixels_per_cell must be >= 3")
self._max_steps = max_steps
self._pixels_per_cell = pixels_per_cell
self._border_width = border_width
widths, heights = zip(*[puzzle.dimensions for puzzle in self._puzzles])
self._max_cell_width = max(widths)
self._max_cell_height = max(heights)
if standard_padding:
standard_cell_height, standard_cell_width = get_max_puzzle_dimensions()
if standard_cell_height < self._max_cell_height:
raise ValueError(
"`standard_padding` is True, but the maximum puzzle height in "
"BENCHMARK_PUZZLES_PATH is less than the height of the puzzle(s) "
"in the given `puzzle_path`."
)
else:
self._max_cell_height = standard_cell_height
if standard_cell_width < self._max_cell_width:
raise ValueError(
"`standard_padding` is True, but the maximum puzzle width in "
"BENCHMARK_PUZZLES_PATH is less than the width of the puzzle(s) "
"in the given `puzzle_path`."
)
else:
self._max_cell_width = standard_cell_width
# Use a fixed arbitrary seed for reproducibility of results and for
# deterministic tests.
self._random_generator = random.Random(123)
self._current_puzzle = None
self._current_state = None
self._action_space = specs.DiscreteArray(
num_values=NUM_ACTIONS, dtype=int, name="action"
)
self._observation_space = specs.BoundedArray(
shape=render_observation_padded(
self._puzzles[0], self._puzzles[0].initial_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
).shape,
dtype=np.float32,
name="board",
minimum=0.0,
maximum=1.0,
)
def observation_spec(self) -> specs.BoundedArray:
"""Implements `dm_env.Environment.observation_spec`."""
return self._observation_space
def action_spec(self) -> specs.DiscreteArray:
"""Implements `dm_env.Environment.action_spec`."""
return self._action_space
@property
def current_puzzle(self) -> PushWorldPuzzle or None:
"""The current puzzle, or `None` if `reset` has not yet been called."""
return self._current_puzzle
@property
def current_state(self) -> State or None:
"""The current state, or `None` if the environment has just been initialized.
"""
return self._current_state
def reset(
self,
seed: Optional[int] = None,
) -> dm_env.TimeStep:
"""Implements `dm_env.Environment.reset`.
This function randomly selects a puzzle from those provided to the
constructor and resets the environment to the initial state of the puzzle.
Args:
seed: If not None, the random number generator in this environment is
reset with this seed.
Returns:
A tuple of (observation, info). The observation contains the initial
observation of the environment after the reset, and it is formatted as
an RGB image with shape (height, width, 3) with `float32` type and
values ranging from [0, 1]. The info dictionary is unused.
"""
if seed is not None:
self._random_generator = random.Random(seed)
self._current_puzzle = self._random_generator.choice(self._puzzles)
self._current_state = self._current_puzzle.initial_state
self._current_achieved_goals = self._current_puzzle.count_achieved_goals(
self._current_state
)
self._steps = 0
observation = render_observation_padded(
self._current_puzzle, self._current_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
)
return dm_env.restart(observation)
def step(self, action: int) -> dm_env.TimeStep:
"""Implements `dm_env.Environment.step`.
Args:
action: Index corresponding to the categorical action
Returns:
TimeStep object bundling the reward, discount and observation. The returned observation is an RGB image of the new state of the
environment, formatted as a `float32` array with shape (height, width, 3)
and values ranging from [0, 1].
"""
try:
self._action_space.validate(action)
except ValueError:
raise ValueError("The provided action is not in the action space.")
if self._current_state is None:
raise RuntimeError("reset() must be called before step() can be called.")
self._steps += 1
previous_state = self._current_state
self._current_state = self._current_puzzle.get_next_state(
self._current_state, action
)
observation = render_observation_padded(
self._current_puzzle, self._current_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
)
terminated = self._current_puzzle.is_goal_state(self._current_state)
if terminated:
reward = 10.0
else:
previous_achieved_goals = self._current_puzzle.count_achieved_goals(
previous_state
)
current_achieved_goals = self._current_puzzle.count_achieved_goals(
self._current_state
)
reward = current_achieved_goals - previous_achieved_goals - 0.01
truncated = (
False if self._max_steps is None else self._steps >= self._max_steps
)
done = terminated or truncated
if done:
return dm_env.termination(reward, observation)
else:
return dm_env.transition(reward, observation)
def render(self, mode="rgb_array") -> np.ndarray:
"""Render an observation from the environment state.
Returns:
An RGB image of the current state of the environment, formatted as a
`np.float32` array with shape (height, width, 3).
"""
assert mode == "rgb_array", "mode must be rgb_array."
return (
self._current_puzzle.render(
self._current_state,
border_width=self._border_width,
pixels_per_cell=self._pixels_per_cell,
).astype(np.float32)
/ 255
)
|
pushworld-main
|
python3/src/pushworld/dm_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from pushworld.benchmark_rgd import benchmark_rgd_planner
import tqdm
import yaml
class FailedToGenerateError(Exception):
"""Raised if puzzle generation can't be completed in current state."""
pass
def place_object(
puzzle: list[list[str]],
object_symbol: str,
shape: list[tuple[int, int]],
):
"""Draws the object into the given image.
Args:
puzzle: The partially generated puzzle to place an object in.
object_symbol: The string to represent the object. e.g. "A", "W".
shape: Offset indicies describing the object's shape.
Raises:
FailedToGenerateError: if the puzzle can't be completed
"""
puzzle_height = len(puzzle)
puzzle_width = len(puzzle[0])
shape_height = 1 + max([y for y, x in shape])
shape_width = 1 + max([x for y, x in shape])
attempts = 0
while True:
attempts += 1
if attempts > 100:
raise FailedToGenerateError()
# get a random location to attempt to place the object
x = random.choice(range(puzzle_width + 1 - shape_width))
y = random.choice(range(puzzle_height + 1 - shape_height))
# check that that location is clear for the shape
clear = True
for offset in shape:
y_offset, x_offset = offset
if puzzle[y + y_offset][x + x_offset] != ".":
clear = False
# if the location is clear, place the object
if clear:
for offset in shape:
y_offset, x_offset = offset
puzzle[y + y_offset][x + x_offset] = object_symbol
break
def generate_puzzle(
puzzle_width: int,
puzzle_height: int,
num_walls: int,
num_obstacles: int,
num_goal_objects: int,
possible_object_shapes: list[list[tuple[int, int]]],
):
"""Attempts to generate a single puzzle.
Args:
puzzle_width: The width of the puzzle.
puzzle_height: The height of the puzzle.
num_walls: The number of single-pixel walls to place in the puzzle.
num_obstacles: The number of obstacle objects to place.
num_goal_objects: The number of goal objects (and goals) to place.
possible_object_shapes: A list of possible shapes to use for the agent,
goals, and obstacles.
Returns:
A string representation of a puzzle.
Raises:
FailedToGenerateError: if the puzzle can't be completed
"""
assert (
len(possible_object_shapes) >= num_goal_objects
), "need a distinct shape for each goal object"
puzzle = [["." for _ in range(puzzle_width)] for _ in range(puzzle_height)]
goal_object_1_shape = random.choice(possible_object_shapes)
place_object(puzzle, "M1", shape=goal_object_1_shape)
place_object(puzzle, "G1", shape=goal_object_1_shape)
if num_goal_objects == 2:
goal_object_2_shape = None
while (
goal_object_2_shape is None
or goal_object_2_shape == goal_object_1_shape
):
goal_object_2_shape = random.choice(possible_object_shapes)
place_object(puzzle, "M2", shape=goal_object_2_shape)
place_object(puzzle, "G2", shape=goal_object_2_shape)
place_object(
puzzle, "A", shape=random.choice(possible_object_shapes)
)
for i in range(num_obstacles):
place_object(
puzzle,
f"M{1 + i + num_goal_objects}",
shape=random.choice(possible_object_shapes),
)
for _ in range(num_walls):
place_object(puzzle, "W", shape=[(0, 0)])
return "\n".join([" ".join(l) for l in puzzle])
def generate_level0_puzzles(
save_location_path: str,
num_puzzles: int = 5,
random_seed: int = 0,
filter_puzzles: bool = True,
time_limit: int = 2,
min_puzzle_size: int = 8,
max_puzzle_size: int = 12,
min_num_walls: int = 2,
max_num_walls: int = 4,
min_num_obstacles: int = 1,
max_num_obstacles: int = 2,
min_num_goal_objects: int = 1,
max_num_goal_objects: int = 1,
object_shapes: str = "complex",
):
"""Generates puzzles matching given constraints.
Generation is naive, so if constraints are too tight,
such as being too small for the number of shapes asked for, generation may
be slow or impossible to complete.
Args:
save_location_path: Name for directory to store generated puzzles. Must be
empty or not yet exist.
num_puzzles: The number of puzzles to generate. If puzzles are filtered
for solvability the number remaining may be lower.
random_seed: The seed to use for randomization.
filter_puzzles: If True, run a solver to filter out unsolvable puzzles.
time_limit: The number of seconds to run the solver on each level, if
filter_puzzles is True.
min_puzzle_size: The minimum height and width of puzzles.
max_puzzle_size: The maximum height and width of puzzles.
min_num_walls: The minimum number of single-pixel walls to place inside
puzzles.
max_num_walls: The maximum number of single-pixel walls to place inside
puzzles.
min_num_obstacles: The minimum number of obstacle shapes to add.
max_num_obstacles: The maximum number of obstacle shapes to add.
min_num_goal_objects: The minimum number of goals object and goals to add.
max_num_goal_objects: The maximum number of goal objects and goals to add.
object_shapes: Can be either "simple" or "complex". If "simple, the agent,
goals, and obstacle shapes will all be 1x1. If "complex" they will be
random trominos.
"""
random.seed(random_seed)
if not os.path.exists(save_location_path):
os.makedirs(save_location_path)
if os.listdir(save_location_path):
raise ValueError(f"{save_location_path} is not empty!")
if num_puzzles < 1:
raise ValueError("num_puzzles must be at least 1")
if min_puzzle_size < 2 or min_puzzle_size > max_puzzle_size:
raise ValueError(
"min_puzzle_size must be >1 and no bigger than max_puzzle_size"
)
if min_num_walls < 0 or min_num_walls > max_num_walls:
raise ValueError(
"min_num_walls must be >=0 and no bigger than max_num_walls"
)
if min_num_obstacles < 0 or min_num_obstacles > max_num_obstacles:
raise ValueError(
"min_num_obstacles must be >=0 and no bigger than max_num_obstacles"
)
if (
min_num_goal_objects < 1
or max_num_goal_objects > 2
or min_num_goal_objects > max_num_goal_objects
):
raise ValueError(
"min_num_goal_objects must be >0, max_num_goal_objects must be <3, and"
" min_num_goal_objects must be no bigger than max_num_goal_objects"
)
if object_shapes == "simple":
possible_shapes = [
[(0, 0)],
]
elif object_shapes == "complex":
possible_shapes = [
[(0, 0)],
[(0, 0), (0, 1)],
[(0, 0), (1, 0)],
[(0, 0), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 1)],
[(0, 0), (0, 1), (1, 0)],
[(1, 0), (0, 1), (1, 1)],
[(0, 0), (0, 1), (0, 2)],
[(0, 0), (1, 0), (2, 0)],
]
else:
raise ValueError("object_shapes must be either 'simple' or 'complex'")
for i in tqdm.tqdm(range(num_puzzles)):
while True:
try:
p_string = generate_puzzle(
puzzle_width=random.choice(
range(min_puzzle_size, max_puzzle_size + 1)
),
puzzle_height=random.choice(
range(min_puzzle_size, max_puzzle_size + 1)
),
num_walls=random.choice(range(min_num_walls, max_num_walls + 1)),
num_obstacles=random.choice(
range(min_num_obstacles, max_num_obstacles + 1)
),
num_goal_objects=random.choice(
range(min_num_goal_objects, max_num_goal_objects + 1)
),
possible_object_shapes=possible_shapes,
)
break
except FailedToGenerateError:
pass
fname = f"{save_location_path}/puzzle_{i}.pwp"
with open(fname, "w+") as f:
f.write(p_string)
if filter_puzzles:
filter_puzzles_by_solvability(save_location_path, time_limit, num_puzzles)
def filter_puzzles_by_solvability(
path: str, time_limit: int, num_puzzles: int
):
"""Filters out unsolvable puzzles.
Args:
path: The location of the puzzles to filter.
time_limit: The number of seconds to run the solver on each puzzle.
num_puzzles: The number of puzzles to expect.
"""
print(
"Running planner to filter puzzles for solvability, with time_limit:"
f" {time_limit}"
)
benchmark_rgd_planner(
puzzles_path=path, time_limit=time_limit, results_path=path
)
def puzzle_path(index, extension):
return f"{path}/puzzle_{index}.{extension}"
solved_map = dict()
for i in range(num_puzzles):
with open(puzzle_path(i, "yaml")) as file:
results = yaml.safe_load(file)
if results["plan"] is not None:
solved_map[i] = len(solved_map)
print(f"{len(solved_map)}/{num_puzzles} were solvable")
for i in range(num_puzzles):
if i in solved_map:
os.rename(puzzle_path(i, "pwp"), puzzle_path(solved_map[i], "pwp"))
else:
os.remove(puzzle_path(i, "pwp"))
os.remove(puzzle_path(i, "yaml"))
|
pushworld-main
|
python3/src/pushworld/generate.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, List, Set, Tuple
import numpy as np
# The default pixel width of the border drawn to indicate object boundaries.
DEFAULT_BORDER_WIDTH = 2
# The default pixel width and height of a discrete position (i.e. cell) in a
# PushWorld environment.
DEFAULT_PIXELS_PER_CELL = 20
NUM_ACTIONS = 4
AGENT_IDX = 0
class Actions:
"""An enumeration of available actions in the PushWorld environment."""
LEFT, RIGHT, UP, DOWN = range(NUM_ACTIONS)
FROM_CHAR = {
"L": LEFT,
"R": RIGHT,
"U": UP,
"D": DOWN,
}
DISPLACEMENTS = np.array(
[
(-1, 0), # LEFT
(1, 0), # RIGHT
(0, -1), # UP
(0, 1), # DOWN
]
)
# Type aliases
Point = Tuple[int, int]
State = Tuple[Point, ...]
Color = Tuple[int, int, int] # (red, green, blue) with range 0 - 255
def hex_to_rgb(hex_string: str) -> Color:
"""Converts a standard 6-digit hex color into a tuple of decimal
(red, green, blue) values."""
return tuple(int(hex_string[i : i + 2], 16) for i in (0, 2, 4))
class Colors:
"""An enumeration of all colors involved in rendering a PushWorld puzzle."""
AGENT = hex_to_rgb("00DC00")
AGENT_BORDER = hex_to_rgb("006E00")
AGENT_WALL = hex_to_rgb("FAC71E")
AGENT_WALL_BORDER = hex_to_rgb("7D640F")
GOAL = None # transparent
GOAL_BORDER = hex_to_rgb("B90000")
GOAL_OBJECT = hex_to_rgb("DC0000")
GOAL_OBJECT_BORDER = hex_to_rgb("6E0000")
MOVABLE = hex_to_rgb("469BFF")
MOVABLE_BORDER = hex_to_rgb("23487F")
WALL = hex_to_rgb("0A0A0A")
WALL_BORDER = hex_to_rgb("050505")
@dataclass(frozen=True)
class PushWorldObject:
"""An object in the the PushWorld environment.
Attributes:
position: The absolute position of the object.
fill_color: The color with which to fill the object's area when rendered.
border_color: The color of the object's border when rendered.
cells: The discrete positions that this object occupies, defined relative to
the object's frame.
"""
position: Point
fill_color: Color
border_color: Color
cells: Set[Point]
class PushWorldPuzzle:
"""A puzzle in the PushWorld environment.
Args:
file_path: The path to a `.pwp` file that defines the puzzle.
Attributes:
initial_state: The initial state from which a plan must be found to achieve the
goal.
goal_state: Defines the goal to achieve from the initial state.
dimensions: A (width, height) tuple of the number of discrete positions in
the puzzle.
wall_positions: The discrete positions of all walls.
agent_wall_positions: The discrete positions of all walls that only block the
movement of the agent object.
movable_objects: A list of all movable objects, including their shapes.
Methods:
get_next_state: Returns the state that results from performing an action from a
given state.
count_achieved_goals: Returns the number of objects that are in their goal
positions in a given state.
is_goal_state: Returns whether the given state satisfies the goal of this
puzzle.
is_valid_plan: Returns whether the sequence of actions in the plan achieves the
goal, starting from the initial state.
render: Creates an image of a given state.
render_plan: Creates a video of a given plan, starting from the initial state.
"""
def __init__(self, file_path: str) -> None:
obj_pixels = defaultdict(set)
with open(file_path, "r") as fi:
elems_per_row = -1
for line_idx, line in enumerate(fi):
y = line_idx + 1
line_elems = line.split()
if y == 1:
elems_per_row = len(line_elems)
else:
if elems_per_row != len(line_elems):
raise ValueError(
f"Row {y} does not have the same number of elements as "
"the first row."
)
for x in range(1, len(line_elems) + 1):
cell_elems = line_elems[x - 1].split("+")
for elem_id in cell_elems:
elem_id = elem_id.lower()
if elem_id != ".":
obj_pixels[elem_id].add((x, y))
if "a" not in obj_pixels:
raise ValueError(
"Every puzzle must have an agent object, indicated by 'a'."
)
# Add walls at the boundaries of the puzzle
width = self._width = x + 2
height = self._height = y + 2
for xx in range(width):
obj_pixels["w"].add((xx, 0))
obj_pixels["w"].add((xx, height - 1))
for yy in range(height):
obj_pixels["w"].add((0, yy))
obj_pixels["w"].add((width - 1, yy))
movables = ["a"]
self._goal_state = ()
object_positions = {}
self._movable_objects = []
self._goals = []
self._agent_walls = None
# Put the agent in front of all other movables
sorted_elem_ids = list(obj_pixels.keys())
sorted_elem_ids.sort(reverse=True)
for elem_id in sorted_elem_ids:
pixels = obj_pixels[elem_id]
if elem_id == "w" or elem_id == "aw":
position = (0, 0)
else:
xx, yy = zip(*pixels)
position = (min(xx), min(yy))
pixels = subtract_from_points(pixels, position)
object_positions[elem_id] = position
obj_pixels[elem_id] = pixels
if elem_id == "w":
self._walls = PushWorldObject(
position=position,
fill_color=Colors.WALL,
border_color=Colors.WALL_BORDER,
cells=pixels,
)
elif elem_id == "aw":
self._agent_walls = PushWorldObject(
position=position,
fill_color=Colors.AGENT_WALL,
border_color=Colors.AGENT_WALL_BORDER,
cells=pixels,
)
elif elem_id == "a":
self._movable_objects.append(
PushWorldObject(
position=position,
fill_color=Colors.AGENT,
border_color=Colors.AGENT_BORDER,
cells=pixels,
)
)
elif elem_id[0] == "g":
self._goals.append(
PushWorldObject(
position=position,
fill_color=Colors.GOAL,
border_color=Colors.GOAL_BORDER,
cells=pixels,
)
)
if elem_id[0] == "g":
self._goal_state += (object_positions[elem_id], )
movable_id = "m" + elem_id[1:]
assert (
movable_id in obj_pixels
), f"Goal has no associated movable object: {movable_id}"
movables.append(movable_id)
for elem_id in obj_pixels:
if elem_id[0] == "m" and elem_id not in movables:
movables.append(elem_id)
for i, elem_id in enumerate(movables[1:]):
self._movable_objects.append(
PushWorldObject(
position=object_positions[elem_id],
fill_color=Colors.MOVABLE
if i >= len(self._goal_state)
else Colors.GOAL_OBJECT,
border_color=Colors.MOVABLE_BORDER
if i >= len(self._goal_state)
else Colors.GOAL_OBJECT_BORDER,
cells=obj_pixels[elem_id],
)
)
self._agent_wall_positions = obj_pixels["aw"]
self._wall_positions = obj_pixels["w"]
self._goal_state = tuple(self._goal_state)
self._initial_state = tuple(object_positions[elem_id] for elem_id in movables)
# Create all collision data structures
num_movables = self.num_movables = len(movables)
self._agent_collision_map = [set() for i in range(NUM_ACTIONS)]
self._wall_collision_map = [
[set() for i in range(num_movables)] for a in range(NUM_ACTIONS)
]
self._movable_collision_map = [
[[set() for i in range(num_movables)] for j in range(num_movables)]
for a in range(NUM_ACTIONS)
]
# Populate the actor collisions
for a in range(NUM_ACTIONS):
obj_pixels["aw"].update(obj_pixels["w"])
_populate_static_collisions(
collision_positions=self._agent_collision_map[a],
action=a,
object_pixels=obj_pixels["a"],
static_obstacle_pixels=obj_pixels["aw"],
width=width,
height=height,
)
# Populate the wall collisions of all movables other than the agent
for m in range(1, num_movables):
for a in range(NUM_ACTIONS):
_populate_static_collisions(
collision_positions=self._wall_collision_map[a][m],
action=a,
object_pixels=obj_pixels[movables[m]],
static_obstacle_pixels=obj_pixels["w"],
width=width,
height=height,
)
# Populate the collisions between all movables. There is no need to store
# collisions caused by movables pushing the agent, since the agent is the
# cause of all movement.
for pusher in range(num_movables):
for pushee in range(1, num_movables):
for a in range(NUM_ACTIONS):
_populate_dynamic_collisions(
collision_positions=(
self._movable_collision_map[a][pusher][pushee]
),
action=a,
pusher_pixels=obj_pixels[movables[pusher]],
pushee_pixels=obj_pixels[movables[pushee]],
)
self._pushed_objects = np.zeros((num_movables,), bool)
self._pushed_objects[AGENT_IDX] = True
@property
def initial_state(self) -> State:
"""The initial state from which a plan must be found to achieve the goal."""
return self._initial_state
@property
def goal_state(self) -> Tuple[Point]:
"""Defines the goal to achieve from the initial state.
The kth element in the goal state defines the goal position of the (k+1)th
element in each `State`.
"""
return self._goal_state
@property
def dimensions(self) -> Tuple[int, int]:
"""A (width, height) tuple of the number of discrete positions in the puzzle."""
return (self._width, self._height)
@property
def wall_positions(self) -> Set[Point]:
"""The discrete positions of all walls."""
return self._wall_positions
@property
def agent_wall_positions(self) -> Set[Point]:
"""The discrete positions of all walls that only block the movement of the
agent object."""
return self._agent_wall_positions
@property
def movable_objects(self) -> List[PushWorldObject]:
"""A list of all movable objects, including their shapes."""
return self._movable_objects
def get_next_state(self, state: State, action: int) -> State:
"""Returns the state that results from performing the `action` in the given
`state`."""
agent_pos = state[AGENT_IDX]
if agent_pos in self._agent_collision_map[action]:
return state # the actor cannot move
walls = self._wall_collision_map[action]
frontier = [AGENT_IDX]
while frontier:
movable_idx = frontier.pop()
movable_pos = state[movable_idx]
movable_collisions = self._movable_collision_map[action][movable_idx]
for obstacle_idx in range(1, self.num_movables):
if self._pushed_objects[obstacle_idx]:
continue # already pushed
# Is obstacle_idx pushed by movable_idx?
obstacle_pos = state[obstacle_idx]
relative_pos = tuple(np.subtract(movable_pos, obstacle_pos))
if relative_pos not in movable_collisions[obstacle_idx]:
continue # obstacle_idx is not pushed by movable_idx
# obstacle_idx is being pushed by movable_idx
if obstacle_pos in walls[obstacle_idx]:
# transitive stopping; nothing can move.
self._pushed_objects[1:] = False
return state
self._pushed_objects[obstacle_idx] = True
frontier.append(obstacle_idx)
next_state = list(state)
displacement = Actions.DISPLACEMENTS[action]
next_state[0] = tuple(displacement + state[0])
for i in range(1, self.num_movables):
if self._pushed_objects[i]:
next_state[i] = tuple(displacement + state[i])
self._pushed_objects[i] = False
else:
next_state[i] = state[i]
return tuple(next_state)
def count_achieved_goals(self, state: State) -> int:
"""Returns the number of objects that are in their goal positions in a
given state."""
count = 0
for entity, goal_entity in zip(
state[1 : 1 + len(self._goal_state)], self._goal_state
):
if entity == goal_entity:
count += 1
return count
def is_goal_state(self, state: State) -> bool:
"""Returns whether the given state satisfies the goal of this puzzle."""
return state[1 : 1 + len(self._goal_state)] == self._goal_state
def is_valid_plan(self, plan: Iterable[int]) -> bool:
"""Returns whether the sequence of actions in the plan achieves the goal,
starting from the initial state."""
state = self._initial_state
for action in plan:
if self.is_goal_state(state):
# goal was achieved before the plan ended
return False
state = self.get_next_state(state, action)
return self.is_goal_state(state)
def render(
self,
state: State,
border_width: int = DEFAULT_BORDER_WIDTH,
pixels_per_cell: int = DEFAULT_PIXELS_PER_CELL,
) -> np.ndarray:
"""Creates an image of the given state.
Args:
state: The state to render.
border_width: The pixel width of the border drawn to indicate object
boundaries. Must be >= 1.
pixels_per_cell: The pixel width and height of a discrete position in the
environment. Must be >= 1 + 2 * border_width.
Returns:
An RGB image with shape (height, width, 3) and type `uint8`.
"""
if border_width < 1:
raise ValueError("border_width must be >= 1")
if pixels_per_cell < 1 + 2 * border_width:
raise ValueError("pixels_per_cell must be >= 1 + 2*border_width")
image_shape = (self._height * pixels_per_cell, self._width * pixels_per_cell, 3)
image = np.ones(image_shape, np.uint8) * 255
objects = [(self._walls, self._walls.position)]
if self._agent_walls is not None:
objects.insert(0, (self._agent_walls, self._agent_walls.position))
objects += zip(self._movable_objects, state)
objects += [(g, g.position) for g in self._goals]
for obj, pos in objects:
_draw_object(
obj=obj,
position=pos,
image=image,
pixels_per_cell=pixels_per_cell,
border_width=border_width,
)
return image
def render_plan(
self,
plan: Iterable[int],
border_width: int = DEFAULT_BORDER_WIDTH,
pixels_per_cell: int = DEFAULT_PIXELS_PER_CELL,
) -> List[np.ndarray]:
"""Creates a video of the given plan, starting from the initial state.
Args:
plan: A sequence of actions.
border_width: The pixel width of the border drawn to indicate object
boundaries. Must be >= 1.
pixels_per_cell: The pixel width and height of a discrete position in the
environment. Must be >= 1 + 2 * border_width.
Returns:
A list of RGB images with shape (height, width, 3) and type `uint8`.
"""
state = self._initial_state
image = self.render(
state=state,
border_width=border_width,
pixels_per_cell=pixels_per_cell,
)
images = [image]
for action in plan:
state = self.get_next_state(state, action)
image = self.render(
state=state,
border_width=border_width,
pixels_per_cell=pixels_per_cell,
)
images.append(image)
return images
def points_overlap(s1: Set[Point], s2: Set[Point], offset: Point) -> bool:
"""Returns whether there exists a pair of points (p1, p2) in the sets (s1, s2) such
that p1 + offset == p2."""
offset_s2 = subtract_from_points(s2, offset)
return bool(s1.intersection(offset_s2))
def subtract_from_points(points: Set[Point], offset: Point) -> Set[Point]:
"""Returns the set {p - offset} for all `p` in `points`."""
dx, dy = offset
return set((x - dx, y - dy) for x, y in points)
def _populate_static_collisions(
collision_positions: Set[Point],
action: int,
object_pixels: Set[Point],
static_obstacle_pixels: Set[Point],
width: int,
height: int,
) -> None:
"""Computes the positions in which an object moves into collision with a static
obstacle when moving in the direction of the given `action`.
Args:
collision_positions: Modified in place. This function adds all positions of the
object in which moving the object in the direction of the given `action`
results in a collision with a static obstacle.
action: The direction of the movement.
object_pixels: The pixel positions of the object, measured in the
object's reference frame.
static_obstacle_pixels: The pixel positions of static obstacles, measured
in the global frame.
width: The maximum x-position of the object.
height: The maximum y-position of the object.
"""
dis_x, dis_y = Actions.DISPLACEMENTS[action]
xx, yy = zip(*object_pixels)
object_size = (max(xx) - min(xx) + 1, max(yy) - min(yy) + 1)
width -= object_size[0]
height -= object_size[1]
for object_x, object_y in object_pixels:
for obstacle_x, obstacle_y in static_obstacle_pixels:
dx = -dis_x + obstacle_x - object_x
dy = -dis_y + obstacle_y - object_y
if (
dx >= 0
and dy >= 0
and dx <= width
and dy <= height
and not points_overlap(object_pixels, static_obstacle_pixels, (dx, dy))
):
collision_positions.add((dx, dy))
def _populate_dynamic_collisions(
collision_positions: Set[Point],
action: int,
pusher_pixels: Set[Point],
pushee_pixels: Set[Point],
) -> None:
"""Computes the relative positions between two objects in which one object can
push the other when it moves in the direction of the given `action`.
Args:
collision_positions: Modified in place. This function adds all positions of the
pusher relative to the pushee in which moving the pusher in the direction
of the given `action` results in a collision with the pushee.
action: The direction of the pushing movement.
pusher_pixels: The pixel positions of the pusher object, measured in the
object's reference frame.
pushee_pixels: The pixel positions of the pushee object, measured in the
object's reference frame.
"""
dis_x, dis_y = Actions.DISPLACEMENTS[action]
for pusher_x, pusher_y in pusher_pixels:
for pushee_x, pushee_y in pushee_pixels:
dx = -dis_x + pushee_x - pusher_x
dy = -dis_y + pushee_y - pusher_y
if not points_overlap(pusher_pixels, pushee_pixels, (dx, dy)):
collision_positions.add((dx, dy))
def _draw_object(
obj: PushWorldObject,
position: Point,
image: np.ndarray,
pixels_per_cell: int,
border_width: int,
) -> None:
"""Draws the object into the given image.
Args:
obj: The object to draw.
position: The (column, row) position of the object.
image: The image in which to draw the object. Modified in place.
Must have shape (height, width, 3) and type `uint8`.
pixels_per_cell: The pixel width and height of a discrete position.
border_width: The pixel width of the border that highlights object boundaries.
"""
position = np.array(position)
border_offsets = [
(-1, 0),
(1, 0),
(0, -1),
(0, 1),
(-1, -1),
(-1, 1),
(1, -1),
(1, 1),
]
# Draw the cells and borders
for cell in obj.cells:
c, r = (position + cell) * pixels_per_cell
if obj.fill_color is not None:
image[r : r + pixels_per_cell, c : c + pixels_per_cell] = obj.fill_color
for dr, dc in border_offsets:
if (cell[0] + dc, cell[1] + dr) not in obj.cells:
# The adjacent cell is empty, so draw a border
r1 = r + max(0, dr) * (pixels_per_cell - border_width)
r2 = (r1 + pixels_per_cell) if dr == 0 else (r1 + border_width)
c1 = c + max(0, dc) * (pixels_per_cell - border_width)
c2 = (c1 + pixels_per_cell) if dc == 0 else (c1 + border_width)
image[r1:r2, c1:c2] = obj.border_color
|
pushworld-main
|
python3/src/pushworld/puzzle.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
pushworld-main
|
python3/src/pushworld/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from PIL import Image
from pushworld.config import BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION
from pushworld.puzzle import Actions, PushWorldPuzzle
from pushworld.utils.filesystem import get_puzzle_file_paths, map_files_with_extension
from pushworld.utils.images2mp4 import images2mp4
def render_puzzle_previews(
image_path: str,
puzzle_path: str = BENCHMARK_PUZZLES_PATH,
image_extension: str = ".png",
) -> None:
"""Iterates over all PushWorld puzzles found in the given `puzzle_path` and
saves them in the `image_path` directory.
This function recursively descends into all subdirectories in the puzzle path and
recreates the subdirectory structure in the directory of saved images.
Args:
image_path: The path to a directory in which to save the resulting puzzle
preview images. This directory is created if it does not exist.
puzzle_path: The path to a PushWorld puzzle file (.pwp) or to a directory
that contains puzzle files, possibly nested in subdirectories.
image_extension: The file extension of the saved images. This determines the
image format.
"""
for puzzle_file_path, image_file_path in map_files_with_extension(
input_file_or_directory_path=puzzle_path,
input_extension=PUZZLE_EXTENSION,
output_directory_path=image_path,
output_extension=image_extension,
):
puzzle = PushWorldPuzzle(puzzle_file_path)
image = puzzle.render(puzzle.initial_state)
Image.fromarray(image).save(image_file_path)
def render_plans(
planning_results_path: str,
video_path: str,
puzzle_path: str = BENCHMARK_PUZZLES_PATH,
fps: float = 6.0,
) -> None:
"""Iterates over all planning result YAML files contained within the
`planning_results_path` directory, and then renders MP4 videos of all plans
found within the result files.
Each planning result YAML file must have the following structure:
```
planner: <name of the planner>
puzzle: <name of the puzzle that the planner attempted to solve>
plan: <a string of 'UDLR' characters, or `null` if no plan found>
```
Args:
planning_results_path: The path of a directory that contains planning result
YAML files.
video_path: The path of a directory in which videos of the plans are saved.
puzzle_path: The path of a directory that contains all puzzles that are
mentioned in the planning result files.
fps: Frames Per Second in the generated mp4 video files.
"""
puzzle_file_paths = get_puzzle_file_paths(puzzle_path)
for planning_result_file_path, video_file_path in map_files_with_extension(
input_file_or_directory_path=planning_results_path,
input_extension=".yaml",
output_directory_path=video_path,
output_extension=".mp4",
):
with open(planning_result_file_path) as result_file:
planning_result = yaml.safe_load(result_file)
plan = planning_result["plan"]
if plan is None:
continue
puzzle_name = planning_result["puzzle"]
if puzzle_name not in puzzle_file_paths:
raise ValueError(
f'No puzzle is named "{puzzle_name}" in the directory: {puzzle_path}'
)
puzzle = PushWorldPuzzle(puzzle_file_paths[puzzle_name])
images = puzzle.render_plan([Actions.FROM_CHAR[ch] for ch in plan.upper()])
images2mp4(video_file_path, images=images, fps=fps)
|
pushworld-main
|
python3/src/pushworld/visualization.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains functions to convert PushWorld puzzles into PDDL problems and
domains. This PDDL representation involves the following design choices:
1) Optimizations for object movements and collision checking
To minimize the computational cost of advancing the state during a search process,
the state only contains a single position for each object. If instead the state
stored all occupied positions of each object (since objects can have shapes that
occupy multiple positions), moving an object would require updating all of its
occupied positions, which would be slower.
To support this optimization, all colliding positions between pairs of objects
are precomputed and stored in the PDDL problem. Because colliding positions are
precomputed, planners can detect collisions between a pair of objects in O(1) time.
2) Decomposing simultaneous object movements into multiple actions
Any number of objects can push each other in a chain. For example, objects A, B,
and C could be in contact such that when the agent pushes A, then A simultaneously
pushes B, then B simultaneously pushes C, etc. To move object chains of arbitrary
length with a single action would require derived predicates to label all objects
that should be pushed, but derived predicates are not widely supported by existing
classical planners (as of September 2022). To maximize compatibility with existing
planners, the PDDL representation generated by this file uses one `push` action to
move one object at a time. Pushing multiple objects simultaneously (i.e. due to a
single movement of the agent) therefore requires multiple PDDL actions. This
approach has the same big-O complexity as derived predicates for checking
collisions to determine which objects to push.
3) Memory optimizations for Best-First Width Search
The Best-First Width Search (BFWS) planner from
https://github.com/nirlipo/BFWS-public contains two versions: One that uses
`libff` (referring to the Fast Forward heuristic) from LAPKT to preprocess the PDDL
representation, and another that uses Python code from Fast Downward (FD).
On PushWorld puzzles, the FD preprocessing is orders of magnitude slower than the
FF preprocessing, so benchmarks use the FF version. However, `libff` is written
so that it allocates memory for (total number of constants)^(max predicate arity),
which exceeds 30 Gb in typical puzzles. To reduce memory usage, a `for_bfws` mode
is available to reduce the maximum predicate arity from 4 to 3, at the expense of
more conditions in predicates.
"""
import itertools
import os
from typing import Tuple
import numpy as np
from pushworld.config import (
BENCHMARK_PUZZLES_PATH,
DOMAIN_SUFFIX,
PROBLEM_SUFFIX,
PUZZLE_EXTENSION,
)
from pushworld.puzzle import AGENT_IDX, PushWorldPuzzle, points_overlap
from pushworld.utils.filesystem import map_files_with_extension
domain_template = """(define
(domain {problem_name})
(:requirements :typing :strips :conditional-effects :negative-preconditions)
(:types
position - object
direction - object
; Any object that can move and push other objects
moveable-object - object
; The object that can be directly controlled
agent-object - moveable-object
; A pair of objects, only used for BFWS. This is an auxiliary type to reduce
; the arity of the `in-collision` predicate. BFWS uses the `libff` library from
; LAPKT, and `libff` allocates memory for (num constants)^(predicate arity),
; which can exceed 100 Gb in some problems when `in-collision` has arity=4.
object-pair - object
)
(:constants
agent - agent-object
up down left right - direction
{moveable_objects} - moveable-object
{object_pair_names}
)
(:predicates
(should-move ?obj - moveable-object ?dir - direction)
(has-moved ?obj - moveable-object)
(at ?obj - moveable-object ?pos - position)
(connected ?from - position ?to - position ?dir - direction)
(wall-collision ?obj - moveable-object ?next-pos - position)
{in_collision_predicate}
{is_pair_predicate}
)
(:action move-agent
:parameters (?dir - direction)
:precondition (and
{no_objects_should_move}
)
:effect (and
(should-move agent ?dir)
(forall
(?obj - moveable-object)
(not (has-moved ?obj)))
)
)
(:action push
:parameters ( ?obj - moveable-object ?dir - direction ?pos - position ?next-pos - position
)
:precondition (and
(should-move ?obj ?dir)
(not (has-moved ?obj))
(at ?obj ?pos)
(connected ?pos ?next-pos ?dir)
(not (wall-collision ?obj ?next-pos))
)
:effect (and
(not (at ?obj ?pos))
(at ?obj ?next-pos)
(has-moved ?obj)
(not (should-move ?obj ?dir))
(forall (?other-obj - moveable-object)
(when
(and
(not (has-moved ?other-obj)){push_condition}
)
(should-move ?other-obj ?dir)
)
)
)
)
)"""
push_condition = """
(exists (?other-pos - position)
(and
(at ?other-obj ?other-pos)
(in-collision ?obj ?next-pos ?other-obj ?other-pos)
)
)
"""
bfws_push_condition = """
(exists (?pair - object-pair ?other-pos - position)
(and
(at ?other-obj ?other-pos)
(is-pair ?pair ?obj ?other-obj)
(in-collision ?pair ?next-pos ?other-pos)
)
)
"""
is_pair_predicate = """
; Only used for BFWS
(is-pair
?pair - object-pair
?obj - moveable-object
?other-obj - moveable-object
)
"""
in_collision_predicate = """
(in-collision
?obj - moveable-object
?pos - position
?other-obj - moveable-object
?other-pos - position
)
"""
bfws_in_collision_predicate = """
(in-collision
?pair - object-pair
?pos - position
?other-pos - position
)
"""
problem_template = """(define
(problem {problem_name})
(:domain {problem_name})
(:objects
{positions_decl}
)
(:init
{initial_object_positions}
{wall_collisions}
; Define the grid of positions. (0, 0) is the top-left.
{position_connections}
; Enumerate all collisions between all pairs of objects in all positions,
; skipping positions that are already forbidden by walls. The "agent" object
; never occurs on the right because it is always the first pusher.
{object_collisions}
{object_pairs}
)
(:goal
(and
{position_goals}
{no_objects_should_move}
)
)
)"""
def puzzle_to_pddl(
name: str, puzzle: PushWorldPuzzle, for_bfws: bool = False
) -> Tuple[str, str]:
"""Converts the given puzzle into a PDDL description.
Args:
name: The name of the puzzle.
puzzle: The puzzle to convert to PDDL.
for_bfws: If True, the PDDL is adjusted to minimize the memory usage of the
`ff-version` of the Best-First Width Search planner from:
https://github.com/nirlipo/BFWS-public
Returns:
A (PDDL domain, PDDL problem) tuple.
"""
moveable_objects = " ".join(
f"m{i}" for i in range(len(puzzle.initial_state)) if i != AGENT_IDX
)
width, height = puzzle.dimensions
positions_decl = "\n".join(
(" " * 8 + " ".join([f"pos{x}-{y}" for x in range(width - 2)]) + " - position")
for y in range(height - 2)
)
initial_object_positions = ""
object_names = []
object_sizes = []
for i, obj in enumerate(puzzle.movable_objects):
obj_name = "agent" if i == AGENT_IDX else f"m{i}"
object_names.append(obj_name)
object_sizes.append(np.max(list(obj.cells), axis=0) + 1)
pos = obj.position
initial_object_positions += (
f" (at {obj_name} pos{pos[0]-1}-{pos[1]-1})\n"
)
wall_collisions = ""
collision_free_positions = []
for i, obj in enumerate(puzzle.movable_objects):
size = object_sizes[i]
name = object_names[i]
wall_positions = (
puzzle.agent_wall_positions if i == AGENT_IDX else puzzle.wall_positions
)
obj_free_positions = []
collision_free_positions.append(obj_free_positions)
for x, y in itertools.product(
range(width - 1 - size[0]),
range(height - 1 - size[1]),
):
if points_overlap(obj.cells, wall_positions, (x + 1, y + 1)):
wall_collisions += " " * 8 + f"(wall-collision {name} pos{x}-{y})\n"
else:
obj_free_positions.append((x, y))
position_connections = ""
for y in range(height - 2):
for x in range(width - 2):
if x > 0:
position_connections += (
f" (connected pos{x}-{y} pos{x-1}-{y} left)\n"
)
if x + 1 < width - 2:
position_connections += (
f" (connected pos{x}-{y} pos{x+1}-{y} right)\n"
)
if y > 0:
position_connections += (
f" (connected pos{x}-{y} pos{x}-{y-1} up)\n"
)
if y + 1 < height - 2:
position_connections += (
f" (connected pos{x}-{y} pos{x}-{y+1} down)\n"
)
object_collisions_list = []
for i, obj_a in enumerate(puzzle.movable_objects):
name_a = object_names[i]
for j, obj_b in enumerate(puzzle.movable_objects[i + 1 :], start=i + 1):
name_b = object_names[j]
offsets_to_collisions = {}
for (x_a, y_a), (x_b, y_b) in itertools.product(
collision_free_positions[i], collision_free_positions[j]
):
offset = (x_a - x_b, y_a - y_b)
if offset in offsets_to_collisions:
in_collision = offsets_to_collisions[offset]
else:
in_collision = points_overlap(obj_a.cells, obj_b.cells, offset)
offsets_to_collisions[offset] = in_collision
if in_collision:
if for_bfws:
a_to_b_collision = (
f"(in-collision {name_a}-{name_b} pos{x_a}-{y_a} "
f"pos{x_b}-{y_b})"
)
else:
a_to_b_collision = (
f"(in-collision {name_a} pos{x_a}-{y_a} {name_b} "
f"pos{x_b}-{y_b})"
)
object_collisions_list.append(a_to_b_collision)
if i != AGENT_IDX:
# Also consider the swapped order
if for_bfws:
b_to_a_collision = (
f"(in-collision {name_b}-{name_a} pos{x_b}-{y_b} "
f"pos{x_a}-{y_a})"
)
else:
b_to_a_collision = (
f"(in-collision {name_b} pos{x_b}-{y_b} {name_a} "
f"pos{x_a}-{y_a})"
)
object_collisions_list.append(b_to_a_collision)
object_collisions = ("\n" + " " * 8).join(object_collisions_list)
position_goals = ""
for obj_name, (x, y) in zip(object_names[1:], puzzle.goal_state):
position_goals += f" (at {obj_name} pos{x-1}-{y-1})\n"
no_objects_should_move = []
for obj in ["agent"] + moveable_objects.split():
for direction in ["left", "right", "up", "down"]:
no_objects_should_move.append(
" " * 12 + f"(not (should-move {obj} {direction}))"
)
no_objects_should_move = "\n".join(no_objects_should_move)
if for_bfws:
object_pairs_list = []
object_pair_names_list = []
for name_a in object_names:
for name_b in object_names:
if name_a != name_b:
pair_name = f"{name_a}-{name_b}"
object_pair_names_list.append(pair_name)
object_pairs_list.append(f"(is-pair {pair_name} {name_a} {name_b})")
object_pairs = "\n ".join(object_pairs_list)
object_pair_names = " ".join(object_pair_names_list) + " - object-pair"
else:
object_pairs = ""
object_pair_names = ""
problem_name = name.replace(" ", "_")
return (
domain_template.format(
problem_name=problem_name,
moveable_objects=moveable_objects,
is_pair_predicate=is_pair_predicate if for_bfws else "",
in_collision_predicate=(
bfws_in_collision_predicate if for_bfws else in_collision_predicate
),
object_pair_names=object_pair_names,
no_objects_should_move=no_objects_should_move,
push_condition=bfws_push_condition if for_bfws else push_condition,
),
problem_template.format(
problem_name=problem_name,
positions_decl=positions_decl,
initial_object_positions=initial_object_positions,
wall_collisions=wall_collisions,
position_connections=position_connections,
object_collisions=object_collisions,
object_pairs=object_pairs,
position_goals=position_goals,
no_objects_should_move=no_objects_should_move,
),
)
def convert_all_puzzles_to_pddl(
pddl_path: str, puzzle_path: str = BENCHMARK_PUZZLES_PATH, for_bfws: bool = False
) -> None:
"""Iterates over all PushWorld puzzles found in the given `puzzle_path` directory
and converts them into pairs of PDDL domain and problem files.
This function recursively descends into all subdirectories in the puzzle path and
recreates the subdirectory structure in the directory of saved PDDL files.
Args:
pddl_path: The path to a directory in which to save the resulting PDDL files.
This directory is created if it does not exist.
puzzle_path: The path to a PushWorld puzzle file or to a directory
that contains puzzle files, possibly nested in subdirectories.
for_bfws: See `puzzle_to_pddl`.
"""
for puzzle_file_path, pddl_file_path in map_files_with_extension(
input_file_or_directory_path=puzzle_path,
input_extension=PUZZLE_EXTENSION,
output_directory_path=pddl_path,
):
puzzle = PushWorldPuzzle(puzzle_file_path)
puzzle_name = os.path.splitext(os.path.split(puzzle_file_path)[1])[0]
pddl_domain, pddl_problem = puzzle_to_pddl(puzzle_name, puzzle, for_bfws)
with open(pddl_file_path + DOMAIN_SUFFIX, "w") as domain_file:
domain_file.write(pddl_domain)
with open(pddl_file_path + PROBLEM_SUFFIX, "w") as problem_file:
problem_file.write(pddl_problem)
|
pushworld-main
|
python3/src/pushworld/pddl.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict
import numpy as np
def get_puzzle_transforms(puzzle_string: str) -> Dict[str, str]:
"""Computes all 8 possible combinations of 90 degree rotations and horizontal and
vertical flips of the given PushWorld puzzle.
Args:
puzzle_string: A human-readable representation of a PushWorld puzzle.
This string uses the format of a `.pwp` file.
Returns:
A map from transform names to their associated transformed puzzle strings.
Always contains 8 elements.
"""
lines = [l.split() for l in puzzle_string.splitlines()]
transformed_puzzles = {}
for flipped in [False, True]:
for rotation in range(0, 360, 90):
transformed_puzzle_string = "\n".join([" ".join(l) for l in lines])
transformed_puzzle_name = f"r{rotation}{'_flipped' if flipped else ''}"
transformed_puzzles[transformed_puzzle_name] = transformed_puzzle_string
lines = np.rot90(lines, axes=(1, 0))
# flip top to bottom
lines = lines[::-1]
return transformed_puzzles
def create_transformed_puzzles(puzzle_path: str, output_path: str) -> None:
"""For every puzzle found in the `puzzle_path`, this function rotates and flips
the puzzle into all 8 possible transformations and saves the resulting transformed
puzzles into the `output_path`.
Args:
puzzle_path: The path of the directory that contains the PushWorld puzzles
to transform.
output_path: The path of the directory in which all transformed puzzles
are written.
"""
puzzle_path = puzzle_path.rstrip(os.path.sep)
for subdir, _, filenames in os.walk(puzzle_path):
for filename in filenames:
if filename.endswith(".pwp"):
puzzle_file_path = os.path.join(subdir, filename)
with open(puzzle_file_path, "r") as puzzle_file:
puzzle_string = puzzle_file.read()
aug_puzzles = get_puzzle_transforms(puzzle_string)
for puzzle_name, puzzle_string in aug_puzzles.items():
file_prefix, _ = os.path.splitext(
puzzle_file_path[len(puzzle_path) + 1 :]
)
transformed_file_path = os.path.join(
output_path, f"{file_prefix}_{puzzle_name}.pwp"
)
parent_directory, _ = os.path.split(transformed_file_path)
os.makedirs(parent_directory, exist_ok=True)
with open(transformed_file_path, "w") as fp:
fp.write(puzzle_string)
|
pushworld-main
|
python3/src/pushworld/transform.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Any, Dict, Optional, Tuple, Union
import gym
import numpy as np
from pushworld.config import PUZZLE_EXTENSION
from pushworld.puzzle import (
DEFAULT_BORDER_WIDTH,
DEFAULT_PIXELS_PER_CELL,
NUM_ACTIONS,
PushWorldPuzzle,
)
from pushworld.utils.env_utils import get_max_puzzle_dimensions, render_observation_padded
from pushworld.utils.filesystem import iter_files_with_extension
class PushWorldEnv(gym.Env):
"""An OpenAI Gym environment for PushWorld puzzles.
Rewards are calculated according to Appendix D of
https://arxiv.org/pdf/1707.06203.pdf with one change: The negative reward per step
is reduced to 0.01, since PushWorld puzzles tend to have longer solutions than
Sokoban puzzles.
Args:
puzzle_path: The path of a PushWorld puzzle file or of a directory that
contains puzzle files, possibly nested in subdirectories. All discovered
puzzles are loaded, and the `reset` method randomly selects a new puzzle
each time it is called.
max_steps: If not None, the `step` method will return `done = True` after
calling it `max_steps` times since the most recent call of `reset`.
border_width: The pixel width of the border drawn to indicate object
boundaries. Must be >= 1.
pixels_per_cell: The pixel width and height of a discrete position in the
environment. Must be >= 1 + 2 * border_width.
standard_padding: If True, all puzzles are padded to the maximum width and
height of the puzzles in the `pushworld.config.BENCHMARK_PUZZLES_PATH`
directory. If False, puzzles are padded to the maximum dimensions of
all puzzles found in the `puzzle_path`.
"""
def __init__(
self,
puzzle_path: str,
max_steps: Optional[int] = None,
border_width: int = DEFAULT_BORDER_WIDTH,
pixels_per_cell: int = DEFAULT_PIXELS_PER_CELL,
standard_padding: bool = False,
) -> None:
self._puzzles = []
for puzzle_file_path in iter_files_with_extension(
puzzle_path, PUZZLE_EXTENSION
):
self._puzzles.append(PushWorldPuzzle(puzzle_file_path))
if len(self._puzzles) == 0:
raise ValueError(f"No PushWorld puzzles found in: {puzzle_path}")
if border_width < 1:
raise ValueError("border_width must be >= 1")
if pixels_per_cell < 3:
raise ValueError("pixels_per_cell must be >= 3")
self._max_steps = max_steps
self._pixels_per_cell = pixels_per_cell
self._border_width = border_width
widths, heights = zip(*[puzzle.dimensions for puzzle in self._puzzles])
self._max_cell_width = max(widths)
self._max_cell_height = max(heights)
if standard_padding:
standard_cell_height, standard_cell_width = get_max_puzzle_dimensions()
if standard_cell_height < self._max_cell_height:
raise ValueError(
"`standard_padding` is True, but the maximum puzzle height in "
"BENCHMARK_PUZZLES_PATH is less than the height of the puzzle(s) "
"in the given `puzzle_path`."
)
else:
self._max_cell_height = standard_cell_height
if standard_cell_width < self._max_cell_width:
raise ValueError(
"`standard_padding` is True, but the maximum puzzle width in "
"BENCHMARK_PUZZLES_PATH is less than the width of the puzzle(s) "
"in the given `puzzle_path`."
)
else:
self._max_cell_width = standard_cell_width
# Use a fixed arbitrary seed for reproducibility of results and for
# deterministic tests.
self._random_generator = random.Random(123)
self._current_puzzle = None
self._current_state = None
self._action_space = gym.spaces.Discrete(NUM_ACTIONS)
self._observation_space = gym.spaces.Box(
low=0.0,
high=1.0,
shape=render_observation_padded(
self._puzzles[0], self._puzzles[0].initial_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
).shape,
dtype=np.float32,
)
@property
def action_space(self) -> gym.spaces.Space:
"""Implements `gym.Env.action_space`."""
return self._action_space
@property
def observation_space(self) -> gym.spaces.Space:
"""Implements `gym.Env.observation_space`."""
return self._observation_space
@property
def metadata(self) -> Dict[str, Any]:
"""Implements `gym.Env.metadata`."""
return {"render_modes": ["rgb_array"]}
@property
def render_mode(self) -> str:
"""Implements `gym.Env.render_mode`. Always contains "rgb_array"."""
return "rgb_array"
@property
def current_puzzle(self) -> PushWorldPuzzle or None:
"""The current puzzle, or `None` if `reset` has not yet been called."""
return self._current_puzzle
def reset(
self,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Tuple[np.ndarray, dict]:
"""Implements `gym.Env.reset`.
This function randomly selects a puzzle from those provided to the constructor
and resets the environment to the initial state of the puzzle.
Args:
seed: If not None, the random number generator in this environment is reset
with this seed.
options: Unused. Required by the `gym.Env.reset` interface.
Returns:
A tuple of (observation, info). The observation contains the initial
observation of the environment after the reset, and it is formatted as an
RGB image with shape (height, width, 3) with `float32` type and values
ranging from [0, 1]. The info dictionary is unused.
"""
if seed is not None:
self._random_generator = random.Random(seed)
self._current_puzzle = self._random_generator.choice(self._puzzles)
self._current_state = self._current_puzzle.initial_state
self._current_achieved_goals = self._current_puzzle.count_achieved_goals(
self._current_state
)
self._steps = 0
observation = render_observation_padded(
self._current_puzzle, self._current_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
)
info = {"puzzle_state": self._current_state}
return observation, info
def step(self, action: int) -> Union[Tuple[np.ndarray, float, bool, dict], Tuple[np.ndarray, float, bool, bool, dict]]:
"""Implements `gym.Env.step`.
The returned observation is an RGB image of the new state of the environment,
formatted as a `float32` array with shape (height, width, 3) and values ranging
from [0, 1].
"""
if not self._action_space.contains(action):
raise ValueError("The provided action is not in the action space.")
if self._current_state is None:
raise RuntimeError("reset() must be called before step() can be called.")
self._steps += 1
previous_state = self._current_state
self._current_state = self._current_puzzle.get_next_state(
self._current_state, action
)
observation = render_observation_padded(
self._current_puzzle, self._current_state, self._max_cell_height, self._max_cell_width, self._pixels_per_cell, self._border_width,
)
terminated = self._current_puzzle.is_goal_state(self._current_state)
if terminated:
reward = 10.0
else:
previous_achieved_goals = self._current_puzzle.count_achieved_goals(
previous_state
)
current_achieved_goals = self._current_puzzle.count_achieved_goals(
self._current_state
)
reward = current_achieved_goals - previous_achieved_goals - 0.01
truncated = False if self._max_steps is None else self._steps >= self._max_steps
info = {"puzzle_state": self._current_state}
return observation, reward, terminated, truncated, info
def render(self, mode='rgb_array') -> np.ndarray:
"""Implements `gym.Env.render`.
Returns:
An RGB image of the current state of the environment, formatted as a
`uint8` array with shape (height, width, 3).
"""
assert mode == 'rgb_array', 'mode must be rgb_array.'
return self._current_puzzle.render(
self._current_state,
border_width=self._border_width,
pixels_per_cell=self._pixels_per_cell,
)
|
pushworld-main
|
python3/src/pushworld/gym_env.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from typing import List
import numpy as np
def images2mp4(
video_file_path: str,
images: List[np.ndarray],
color_axis: int = 2,
fps: float = 30.0,
min_video_size: int = 100,
) -> None:
"""Converts a list of images into an mp4 video.
Args:
video_file_path: The path of the .mp4 file where the video is saved.
images: The list of RGB images to convert into a video. Must all have the same
shape, which can either be (height, width, 3) or (3, height, width).
This list must contain at least two images.
color_axis: If 0, images must have shape (3, height, width). If 2, images
must have shape (height, width, 3).
fps: Frames per second in the generated video.
min_video_size: In pixels, the minimum width or height of the generated
video. Images are repeatedly upsampled by 2x until their dimensions
exceed this value.
"""
if color_axis not in [0, 2]:
raise ValueError("color_axis must either be 0 or 2")
if video_file_path[-4:].lower() != ".mp4":
video_file_path += ".mp4"
if len(images) < 2:
raise ValueError("Cannot save a video with only %i frames" % len(images))
if color_axis == 2:
h, w, c = images[0].shape
elif color_axis == 0:
c, h, w = images[0].shape
upsample = 1 + max(min_video_size // h, min_video_size // w)
# Make sure the dimensions are divisible by 2.
if (w % 2 == 1 or h % 2 == 1) and upsample % 2 == 1:
upsample += 1
w *= upsample
h *= upsample
size = (w, h)
ffmpeg_command = (
"ffmpeg",
"-nostats",
"-loglevel",
"error", # suppress warnings
"-y",
"-r",
"%d" % fps,
# input
"-f",
"rawvideo",
"-s:v",
"{}x{}".format(*size),
"-pix_fmt",
"rgb24",
"-i",
"-",
# output
"-vcodec",
"libx264",
"-pix_fmt",
"yuv420p",
video_file_path,
)
try:
proc = subprocess.Popen(
ffmpeg_command, stdin=subprocess.PIPE, preexec_fn=os.setsid
)
except FileNotFoundError as error:
if error.filename == "ffmpeg":
raise RuntimeError(
"Converting images into an mp4 video requires `ffmpeg` to be "
"installed. See https://ffmpeg.org/"
)
for frame in images:
if color_axis == 0:
# Transpose the color axis to the correct position
frame = np.transpose(frame, [1, 2, 0])
if frame.shape[2] != 3:
raise ValueError(
f"Expected 3 color channels, but an image has {frame.shape[2]} color "
"channels."
)
frame = frame.repeat(upsample, axis=0).repeat(upsample, axis=1)
proc.stdin.write(frame[:h, :w].tobytes())
proc.stdin.close()
|
pushworld-main
|
python3/src/pushworld/utils/images2mp4.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Generator, Optional, Tuple
from pushworld.config import BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION
def iter_files_with_extension(
file_or_directory_path: str, extension: str
) -> Generator[str, None, None]:
"""Yields the paths of all files nested in a given directory that have the given
extension.
Args:
file_or_directory_path: A path to a file or a directory in which to search for
files with the given `extension`. If this is a path to a file, then it must
have the correct extension.
extension: The extension to search for. Case insensitive.
Yields:
If `file_or_directory_path` is a file with the given extension, this generator
only yields the path of this file. Otherwise this generator yields the path
to every file in `file_or_directory_path` or any of its subdirectories that
has the given extension.
Raises:
ValueError: If `file_or_directory_path` refers to a file but has the wrong
extension.
"""
extension = extension.lower()
file_or_directory_path = file_or_directory_path.rstrip(os.path.sep)
if os.path.isfile(file_or_directory_path):
if file_or_directory_path.lower().endswith(extension):
yield file_or_directory_path
else:
raise ValueError(
"The given file does not have the expected "
f"extension ({extension}): {file_or_directory_path}"
)
else:
# Not a file, so must be a directory.
for parent_directory_path, _, filenames in os.walk(file_or_directory_path):
for filename in filenames:
if filename.lower().endswith(extension):
file_path = os.path.join(parent_directory_path, filename)
yield file_path
def map_files_with_extension(
input_file_or_directory_path: str,
input_extension: str,
output_directory_path: str,
output_extension: Optional[str] = None,
) -> Generator[Tuple[str, str], None, None]:
"""Maps files from one directory to another while replacing their extensions and
maintaining the structure of subdirectories.
For example, if an input directory contains these files:
input_directory/
sub_directory/
foo.yaml
another_sub_directory/
bar.png
baz.yaml
Then when `input_extension` is ".yaml" and `output_extension` is ".gif", this
function will yield the paths to the following files and create all necessary
subdirectories to match the structure of `input_directory`:
output_directory/ <-- this directory is created
sub_directory/ <-- this directory is created
foo.gif <-- this file path is yielded
baz.gif <-- this file path is yielded
Args:
input_file_or_directory_path: A path to a file or a directory in which to search
for files with the given `extension`. If this is a path to a file, then it
must have the correct extension.
input_extension: The extension to search for in the
`input_file_or_directory_path`. Case insensitive.
output_directory_path: The path of the directory where output files are stored.
This directory is created if it does not already exist, as well as all
subdirectories needed to match the structure of the input directory.
output_extension: The extension of output files.
Yields:
(input file path, output file path) pairs. The parent directory of the output
file is guaranteed to exist.
"""
if isinstance(output_extension, str) and not output_extension.startswith("."):
output_extension = "." + output_extension
for input_file_path in iter_files_with_extension(
input_file_or_directory_path, input_extension
):
input_parent_directory_path, filename = os.path.split(input_file_path)
output_filename = os.path.splitext(filename)[0]
if output_extension is not None:
output_filename += output_extension
if input_file_path == input_file_or_directory_path:
output_parent_directory_path = output_directory_path
else:
output_parent_directory_path = os.path.join(
output_directory_path,
input_parent_directory_path[len(input_file_or_directory_path) + 1 :],
)
# Create the output (sub)directory if it does not already exist.
os.makedirs(output_parent_directory_path, exist_ok=True)
output_file_path = os.path.join(output_parent_directory_path, output_filename)
yield input_file_path, output_file_path
def get_puzzle_file_paths(puzzle_path: str = BENCHMARK_PUZZLES_PATH) -> Dict[str, str]:
"""Returns the paths to all PushWorld puzzles in a directory, including
nested subdirectories.
Args:
puzzle_path: The path to the directory containing PushWorld puzzle files with
the `PUZZLE_EXTENSION` extension.
Returns:
A map from puzzle names to filenames.
Raises:
ValueError: If two puzzles have the same name.
"""
puzzles = {}
for puzzle_file_path in iter_files_with_extension(puzzle_path, PUZZLE_EXTENSION):
puzzle_name = os.path.splitext(os.path.split(puzzle_file_path)[1])[0]
if puzzle_name in puzzles:
raise ValueError(
f'Two puzzles have the same name "{puzzle_name}": '
f"{puzzle_file_path} and {puzzles[puzzle_name]}"
)
puzzles[puzzle_name] = puzzle_file_path
return puzzles
|
pushworld-main
|
python3/src/pushworld/utils/filesystem.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
pushworld-main
|
python3/src/pushworld/utils/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Generator, Optional, Tuple
import numpy as np
from pushworld.config import BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION
from pushworld.puzzle import (
PushWorldPuzzle,
State,
)
from pushworld.utils.filesystem import iter_files_with_extension
def get_max_puzzle_dimensions() -> Tuple[int, int]:
"""Returns the (max height, max width) of PushWorld puzzles in the
`pushworld.config.BENCHMARK_PUZZLES_PATH` directory."""
max_height = 0
max_width = 0
for puzzle_file_path in iter_files_with_extension(
BENCHMARK_PUZZLES_PATH, PUZZLE_EXTENSION
):
with open(puzzle_file_path, "r") as puzzle_file:
lines = puzzle_file.readlines()
# Add 2 for the outer walls
max_height = max(max_height, len(lines) + 2)
max_width = max(max_width, len(lines[0].strip().split()) + 2)
return max_height, max_width
def render_observation_padded(
puzzle: PushWorldPuzzle,
state: State,
max_cell_height: int,
max_cell_width: int,
pixels_per_cell: int,
border_width: int,
) -> np.ndarray:
"""Renders an observation of the state of a puzzle.
Args:
puzzle: The puzzle to observe.
state: The state of the puzzle.
Returns:
An observation of the PushWorld environment. This observation is
formatted
as an RGB image with shape (height, width, 3) with `float32` type and
values
ranging from [0, 1].
"""
image = (
puzzle.render(
state,
border_width=border_width,
pixels_per_cell=pixels_per_cell,
).astype(np.float32)
/ 255
)
# Pad the image to the correct size.
height_padding = (
max_cell_height * pixels_per_cell - image.shape[0]
)
width_padding = (
max_cell_width * pixels_per_cell - image.shape[1]
)
half_height_padding = height_padding // 2
half_width_padding = width_padding // 2
return np.pad(
image,
pad_width=[
(half_height_padding, height_padding - half_height_padding),
(half_width_padding, width_padding - half_width_padding),
(0, 0),
],
)
|
pushworld-main
|
python3/src/pushworld/utils/env_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import resource
import subprocess
import sys
from typing import Callable, Optional, Tuple
# These definitions are standard since 1998. (https://en.wikipedia.org/wiki/Gigabyte)
KILOBYTE = 1000
MEGABYTE = 1000 * KILOBYTE
GIGABYTE = 1000 * MEGABYTE
def get_child_process_cpu_time() -> float:
"""Returns the total CPU time in seconds used by child processes of this process."""
child_times = resource.getrusage(resource.RUSAGE_CHILDREN)
return child_times.ru_utime + child_times.ru_stime
def run_process(
command: list[str],
time_limit: Optional[int] = None,
memory_limit: Optional[int] = None,
) -> Tuple[str, int, float]:
"""Runs a child process specified by the given command.
Args:
command: A list of strings of command-line arguments.
time_limit: In seconds, the maximum time the process is allowed to run.
If `None`, no time limit is enforced.
memory_limit: In bytes, the maximum memory the process is allowed to use.
If `None`, no memory limit is enforced.
Returns:
A tuple of (stdout, return code, CPU run time) of the child process.
The measurement of CPU time assumes that no other child processes are
running in parallel.
"""
if not isinstance(time_limit, (int, type(None))):
raise TypeError("time_limit must be an integer or None")
if time_limit is not None and time_limit <= 0:
raise ValueError("time_limit must be a positive integer")
if not isinstance(memory_limit, (int, type(None))):
raise TypeError("memory_limit must be an integer or None")
if memory_limit is not None and memory_limit <= 0:
raise ValueError("memory_limit must be a positive integer")
def preexec_fn():
if time_limit is not None:
# See `https://github.com/aibasel/downward/blob/main/driver/limits.py` for
# an explanation of this `try..except`.
try:
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit + 1))
except ValueError:
resource.setrlimit(resource.RLIMIT_CPU, (time_limit, time_limit))
if memory_limit is not None:
resource.setrlimit(resource.RLIMIT_AS, (memory_limit, memory_limit))
start_cpu_time = get_child_process_cpu_time()
proc = subprocess.Popen(
command,
preexec_fn=preexec_fn,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out = proc.communicate()[0] # this waits for the process to terminate
cpu_run_time = get_child_process_cpu_time() - start_cpu_time
out = out.strip().decode("utf-8")
return out, proc.returncode, cpu_run_time
|
pushworld-main
|
python3/src/pushworld/utils/process.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
import imp # pylint: disable=deprecated-module
import setuptools
# Additional requirements for testing.
testing_require = [
'mock',
'pytest-xdist',
'pytype==2021.8.11', # to be compatible with dm-acme
]
setuptools.setup(
name='neural_testbed',
description=(
'Neural testbed. '
'A library for evaluating probabilistic inference in neural networks.'
),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/deepmind/neural_testbed',
author='DeepMind',
author_email='neural-testbed-eng+os@google.com',
license='Apache License, Version 2.0',
version=imp.load_source(
'_metadata', 'neural_testbed/_metadata.py'
).__version__,
keywords='probabilistic-inference python machine-learning',
packages=setuptools.find_packages(),
install_requires=[
'absl-py',
'chex',
'dm-haiku',
'enn @ git+https://git@github.com/deepmind/enn',
'jax',
'jaxlib',
'ml_collections',
'neural-tangents',
'numpy',
'pandas',
'plotnine',
'tensorflow==2.8.0', # to be compatible with dm-acme
'tensorflow-datasets==4.6.0', # to be compatible with dm-acme
'tensorflow_probability==0.15.0', # to be compatible with dm-acme
'typing-extensions',
'protobuf==3.20.0', # to avoid Typeerror: descriptors cannot not be
# created directly
],
extras_require={
'testing': testing_require,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
neural_testbed-master
|
setup.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package metadata for neural_testbed.
This is kept in a separate module so that it can be imported from setup.py, at
a time when neural_testbed's dependencies may not have been installed yet.
"""
__version__ = '0.1.0'
|
neural_testbed-master
|
neural_testbed/_metadata.py
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Neural testbed library for uncertainty evaluation."""
|
neural_testbed-master
|
neural_testbed/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for Neural testbed."""
import abc
import dataclasses
from typing import Any, Dict, NamedTuple, Optional
import chex
import typing_extensions
# Maybe this Data class needs to be a tf.Dataset
class Data(NamedTuple):
x: chex.Array # Always includes a batch index
y: chex.Array # Always includes a batch index
@dataclasses.dataclass(frozen=True)
class PriorKnowledge:
"""What an agent knows a priori about the problem."""
input_dim: int
num_train: int
tau: int
num_classes: int = 1
layers: Optional[int] = None
noise_std: Optional[float] = None
temperature: Optional[float] = None
extra: Optional[Dict[str, Any]] = None
@dataclasses.dataclass
class ENNQuality:
kl_estimate: float
extra: Optional[Dict[str, Any]] = None
class EpistemicSampler(typing_extensions.Protocol):
"""Interface for drawing posterior samples from distribution.
For classification this should represent the class *logits*.
For regression this is the posterior sample of the function f(x).
Assumes a batched input x.
"""
def __call__(self, x: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Generate a random sample from approximate posterior distribution."""
class TestbedAgent(typing_extensions.Protocol):
"""An interface for specifying a testbed agent."""
def __call__(self, data: Data, prior: PriorKnowledge) -> EpistemicSampler:
"""Sets up a training procedure given ENN prior knowledge."""
class TestbedProblem(abc.ABC):
"""An interface for specifying a generative GP model of data."""
@abc.abstractproperty
def train_data(self) -> Data:
"""Access training data from the GP for ENN training."""
@abc.abstractproperty
def prior_knowledge(self) -> PriorKnowledge:
"""Information describing the problem instance."""
@abc.abstractmethod
def evaluate_quality(self, enn_sampler: EpistemicSampler) -> ENNQuality:
"""Evaluate the quality of a posterior sampler."""
# See experiments/experiment.py for framework to run agent on a problem.
|
neural_testbed-master
|
neural_testbed/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example running an ENN on active learning task."""
from absl import app
from absl import flags
from acme.utils import loggers
from enn import active_learning
from neural_testbed.active_learning import experiment
from neural_testbed.agents.factories.sweeps import testbed_2d as factories
from neural_testbed.bandit import agents
# ENN training
_AGENT_ID = flags.DEFINE_string('agent_id', 'mlp',
'Which benchmark agent to run.')
# Action priority
_PRIORITY = flags.DEFINE_string('priority', 'entropy', 'How to prioritize data')
# Active learning
_INPUT_DIM = flags.DEFINE_integer('input_dim', 10, 'Input dimension')
_TEMPERATURE = flags.DEFINE_float('temperature', 0.1, 'Temperature')
_NUM_ACTIONS = flags.DEFINE_integer('num_actions', 20, 'Number of actions')
_NUM_STEPS = flags.DEFINE_integer('num_steps', 10_000, 'Number of timesteps')
_SEED = flags.DEFINE_integer('seed', 0, 'Bandit seed')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 128, 'batch size in training')
_STEPS_PER_OBS = flags.DEFINE_integer('steps_per_obs', 1,
'sgds per observation')
def main(_):
# Override this config for different ENNs... must be a VanillaEnnAgent
paper_agent = factories.get_paper_agent(_AGENT_ID.value)
# Convert testbed agent to an active learning agent
config, l2_weight_decay = agents.make_config_l2_for_bandit(
paper_agent=paper_agent,
temperature=_TEMPERATURE.value,
seed=_SEED.value,
)
# Run the active learning experiment with appropriate logging
trainer = experiment.ActiveLearning(
enn_config=config,
priority_fn_ctor=active_learning.get_priority_fn_ctor(_PRIORITY.value),
input_dim=_INPUT_DIM.value,
num_actions=_NUM_ACTIONS.value * _INPUT_DIM.value,
temperature=_TEMPERATURE.value,
seed=_SEED.value,
steps_per_obs=_STEPS_PER_OBS.value,
logger=loggers.make_default_logger('results', time_delta=0),
should_log=lambda x: x % 10 == 0,
batch_size=_BATCH_SIZE.value,
l2_weight_decay=l2_weight_decay,
)
trainer.run(_NUM_STEPS.value)
if __name__ == '__main__':
app.run(main)
|
neural_testbed-master
|
neural_testbed/active_learning/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
neural_testbed-master
|
neural_testbed/active_learning/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Active learning evaluation of ENN agent on testbed problem."""
import functools
import typing as tp
from acme.utils import loggers
import chex
from enn import active_learning
from enn import base as enn_base
from enn import datasets
from enn import losses
from enn import networks
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import agents
from neural_testbed import base as testbed_base
from neural_testbed import generative
from neural_testbed import likelihood
from neural_testbed.bandit import replay
from neural_testbed.leaderboard import sweep
import optax
class ActiveLearning:
"""Active learning experiment."""
def __init__(
self,
enn_config: agents.VanillaEnnConfig,
priority_fn_ctor: active_learning.PriorityFnCtor,
input_dim: int,
num_actions: int,
logit_ctor: tp.Optional[sweep.LogitCtor] = None,
temperature: float = 1,
steps_per_obs: int = 1,
logger: tp.Optional[loggers.Logger] = None,
should_log: tp.Callable[[int], bool] = lambda x: True,
batch_size: int = 16,
l2_weight_decay: float = 1,
replay_capacity: int = 10_000,
learning_rate: float = 1e-3,
seed: int = 0,
):
"""Initializes an active learning experiment."""
# Initializing the agent internals
prior = testbed_base.PriorKnowledge(
input_dim=input_dim,
num_train=100,
num_classes=2,
tau=1,
layers=2,
temperature=temperature,
)
self.observed_actions = set()
self.enn = enn_config.enn_ctor(prior)
batch_fwd = networks.make_batch_fwd(self.enn)
self.priority_fn = jax.jit(priority_fn_ctor(batch_fwd))
loss_fn = enn_config.loss_ctor(prior, self.enn)
loss_fn = functools.partial(loss_fn, self.enn)
def predicate(module_name: str, name: str, value) -> bool:
del name, value
return 'prior' not in module_name
def loss_with_decay(
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> enn_base.LossOutput:
# Adding annealing l2 weight decay manually
data_loss, (state, metrics) = loss_fn(params, state, batch, key)
l2_weight = losses.l2_weights_with_predicate(params, predicate)
metrics['l2_weight'] = l2_weight
decay_loss = l2_weight_decay * l2_weight / batch.extra['num_steps']
return data_loss + decay_loss, (state, metrics)
self._loss_with_decay = jax.jit(loss_with_decay)
optimizer = optax.adam(learning_rate)
# Perform an SGD step on a batch of data
def sgd_step(
params: hk.Params,
opt_state: optax.OptState,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> tp.Tuple[hk.Params, optax.OptState]:
unused_state = {}
grads, _ = jax.grad(
loss_with_decay, has_aux=True)(params, unused_state, batch, key)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
self._sgd_step = jax.jit(sgd_step)
# Generating the underlying function
self.rng = hk.PRNGSequence(seed)
self.actions = jax.random.normal(next(self.rng), [num_actions, input_dim])
# Create the logit_fn
if logit_ctor is None:
logit_fn = generative.make_2layer_mlp_logit_fn(
input_dim=input_dim,
temperature=temperature,
hidden=50,
num_classes=2,
key=next(self.rng),
)
else:
logit_fn = logit_ctor(next(self.rng))
logits = logit_fn(self.actions)
# Creating the testing environment
self.test_problem = _make_test_problem(
logit_fn=logit_fn, prior=prior, input_dim=input_dim, key=next(self.rng))
# Vector of probabilities of rewards for each action
self.probs = jax.nn.softmax(logits)[:, 1]
chex.assert_shape(self.probs, [num_actions])
self.max_prob = jnp.max(self.probs)
# Initializing the network
index = self.enn.indexer(next(self.rng))
self.params, self.network_state = self.enn.init(
next(self.rng), self.actions, index)
self.opt_state = optimizer.init(self.params)
self._steps_per_obs = steps_per_obs
self._temperature = temperature
self._batch_size = batch_size
self.l2_weight_decay = l2_weight_decay
self.replay = replay.Replay(capacity=replay_capacity)
self.logger = (
logger or loggers.make_default_logger('experiment', time_delta=0))
self.should_log = should_log
self.num_steps = 0
def select_action(params: hk.Params,
key: chex.PRNGKey) -> tp.Dict[str, chex.Array]:
priority_key, noise_key, selection_key = jax.random.split(key, 3)
# Randomly generate rewards for each action.
rewards = jax.random.bernoulli(noise_key, self.probs)
# Get the priority score for each action.
batch = datasets.ArrayBatch(x=self.actions, y=rewards)
dummy_state = {}
priorities, _ = self.priority_fn(params, dummy_state, batch, priority_key)
# Pick an action with the highest priority (with additional random noise).
action = _random_argmax(priorities, selection_key)
# Calculate the regret.
chosen_prob = self.probs[action]
reward = rewards[action]
regret = self.max_prob - chosen_prob
return { # pytype: disable=bad-return-type # numpy-scalars
'action': action,
'reward': reward,
'regret': regret,
'chosen_prob': chosen_prob, # for debugging
}
self._select_action = jax.jit(select_action)
def run(self, num_steps: int):
"""Runs a TS experiment for num_steps."""
for _ in range(num_steps):
self.num_steps += 1
action = self.step()
self.observed_actions.add(action)
if self.should_log(self.num_steps):
# Evaluate the ENN on test data
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
index = self.enn.indexer(key)
net_out, unused_state = self.enn.apply(self.params, {}, x, index)
return networks.parse_net_output(net_out)
enn_quality = self.test_problem.evaluate_quality(jax.jit(enn_sampler))
results = {
'kl_estimate': float(enn_quality.kl_estimate),
'num_steps': self.num_steps,
'num_data': len(self.observed_actions),
}
results.update(_clean_results(enn_quality.extra))
self.logger.write(results)
for _ in range(self._steps_per_obs):
if self.num_steps >= 1:
self.params, self.opt_state = self._sgd_step(
self.params, self.opt_state, self._get_batch(), next(self.rng))
def step(self) -> int:
"""Selects action, update replay and return the selected action."""
results = self._select_action(self.params, next(self.rng))
self.replay.add([
self.actions[results['action']],
jnp.ones([1]) * results['reward'],
jnp.ones([1], dtype=jnp.int64) * self.num_steps,
])
return int(results['action'])
def _get_batch(self) -> datasets.ArrayBatch:
"""Samples a batch from the replay."""
actions, rewards, indices = self.replay.sample(self._batch_size)
return datasets.ArrayBatch( # pytype: disable=wrong-arg-types # numpy-scalars
x=actions,
y=rewards,
data_index=indices,
extra={'num_steps': self.num_steps},
)
def _make_test_problem(
logit_fn: generative.LogitFn,
prior: testbed_base.PriorKnowledge,
input_dim: int,
key: chex.PRNGKey,
num_classes: int = 2,
) -> likelihood.SampleBasedTestbed:
"""Makes the test environment."""
sampler_key, kl_key = jax.random.split(key)
# Defining dummy values for x_train_generator and num_train. These values are
# not used as we only use data_sampler to make test data.
dummy_x_train_generator = generative.make_gaussian_sampler(input_dim)
dummy_num_train = 10
data_sampler = generative.ClassificationEnvLikelihood(
logit_fn=logit_fn,
x_train_generator=dummy_x_train_generator, # UNUSED
x_test_generator=generative.make_gaussian_sampler(input_dim),
num_train=dummy_num_train, # UNUSED
key=sampler_key,
tau=1,
)
sample_based_kl = likelihood.CategoricalKLSampledXSampledY(
num_test_seeds=1000,
num_enn_samples=1000,
key=kl_key,
num_classes=num_classes,
)
sample_based_kl = likelihood.add_classification_accuracy_ece(
sample_based_kl,
num_test_seeds=1000,
num_enn_samples=100,
num_classes=num_classes,
)
return likelihood.SampleBasedTestbed(
data_sampler, sample_based_kl, prior)
def _random_argmax(vals: chex.Array,
key: chex.PRNGKey,
scale: float = 1e-5) -> int:
"""Selects argmax with additional random noise."""
noise = jax.random.normal(key, vals.shape)
return jnp.argmax(vals + scale * noise, axis=0)
def _clean_results(results: tp.Dict[str, tp.Any]) -> tp.Dict[str, tp.Any]:
"""Cleans the results for logging (can't log jax arrays)."""
def clean_result(value: tp.Any) -> tp.Any:
value = loggers.to_numpy(value)
if isinstance(value, chex.ArrayNumpy) and value.size == 1:
value = float(value)
return value
for key, value in results.items():
results[key] = clean_result(value)
return results
|
neural_testbed-master
|
neural_testbed/active_learning/experiment.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for calculating likelihood."""
import dataclasses
import chex
from enn.metrics import base as metrics_base
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.likelihood import base as likelihood_base
def gaussian_log_likelihood(err: chex.Array,
cov: chex.Array) -> float:
"""Calculates the Gaussian log likelihood of a multivariate normal."""
first_term = len(err) * jnp.log(2 * jnp.pi)
_, second_term = jnp.linalg.slogdet(cov)
third_term = jnp.einsum('ai,ab,bi->i', err, jnp.linalg.pinv(cov), err)
return -0.5 * (first_term + second_term + third_term) # pytype: disable=bad-return-type # jax-types
def optimized_gaussian_ll(err: chex.Array) -> float:
"""Computes the Gaussian LL based on optimized residual MSE."""
optimized_cov = jnp.mean(err ** 2) * jnp.eye(len(err))
return gaussian_log_likelihood(err, optimized_cov)
@dataclasses.dataclass
class GaussianSampleKL(likelihood_base.SampleBasedKL):
"""Evaluates KL according to optimized Gaussian residual model."""
num_test_seeds: int
num_enn_samples: int
enn_sigma: float
key: chex.PRNGKey
def __call__(
self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> testbed_base.ENNQuality:
"""Evaluates KL according to optimized Gaussian residual model."""
batched_sampler = jax.vmap(enn_sampler, in_axes=[None, 0])
batched_ll = jax.vmap(gaussian_log_likelihood, in_axes=[0, None])
def kl_estimate(key: chex.PRNGKey) -> float:
"""Computes KL estimate on a single instance of test data."""
data, true_ll = data_sampler.test_data(key)
tau = data.x.shape[0]
data_keys = jax.random.split(key, self.num_enn_samples)
samples = batched_sampler(data.x, data_keys)
batched_err = samples - jnp.expand_dims(data.y, 0)
chex.assert_shape(batched_err, [self.num_enn_samples, tau, 1])
# ENN uses the enn_sigma to compute likelihood of sampled data
enn_cov = self.enn_sigma ** 2 * jnp.eye(tau)
sampled_ll = batched_ll(batched_err, enn_cov)
chex.assert_shape(sampled_ll, [self.num_enn_samples, 1])
# TODO(author2): Make sure of our KL computation.()
ave_ll = metrics_base.average_sampled_log_likelihood(sampled_ll) # pytype: disable=wrong-arg-types # numpy-scalars
return true_ll - ave_ll
batched_kl = jax.jit(jax.vmap(kl_estimate))
kl_keys = jax.random.split(self.key, self.num_test_seeds)
sampled_kl = batched_kl(kl_keys)
return testbed_base.ENNQuality(kl_estimate=jnp.mean(sampled_kl))
@dataclasses.dataclass
class GaussianSmoothedSampleKL(likelihood_base.SampleBasedKL):
"""Evaluates KL according to optimized Gaussian residual model."""
num_test_seeds: int
num_enn_samples: int
enn_sigma: float
key: chex.PRNGKey
cov_ridge: float = 1e-6 # To smooth out the covariance estimate
def __call__(
self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> testbed_base.ENNQuality:
"""Evaluates KL according to optimized Gaussian residual model."""
batched_sampler = jax.vmap(enn_sampler, in_axes=[None, 0])
def kl_estimate(key: chex.PRNGKey) -> float:
"""Computes KL estimate on a single instance of test data."""
data_key, enn_key = jax.random.split(key)
data, true_ll = data_sampler.test_data(data_key)
tau = data.x.shape[0]
# Forward the ENN at many samples and form smoothed Gaussian approximation
enn_keys = jax.random.split(enn_key, self.num_enn_samples)
enn_samples = batched_sampler(data.x, enn_keys)
enn_mean = jnp.mean(enn_samples, axis=0)
chex.assert_shape(enn_mean, [tau, 1])
# Estimates the covariance matrix with bias (simple variance in tau=1).
enn_cov = jnp.cov(enn_samples[:, :, 0], rowvar=False, bias=True)
if tau == 1:
enn_cov = enn_cov[None, None]
enn_cov += self.cov_ridge * jnp.eye(tau)
chex.assert_shape(enn_cov, [tau, tau])
# Estimate KL based on combined distribution
err = data.y - enn_mean
cov = enn_cov + self.enn_sigma ** 2 * jnp.eye(tau)
unnormalized_kl = true_ll - gaussian_log_likelihood(err, cov)
return unnormalized_kl
batched_kl = jax.jit(jax.vmap(kl_estimate))
kl_keys = jax.random.split(self.key, self.num_test_seeds)
sampled_kl = batched_kl(kl_keys)
return testbed_base.ENNQuality(kl_estimate=jnp.mean(sampled_kl))
|
neural_testbed-master
|
neural_testbed/likelihood/regression.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.likelihood."""
import dataclasses
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed import generative
from neural_testbed import leaderboard
from neural_testbed import likelihood
class BernoulliDataSampler(likelihood.GenerativeDataSampler):
"""Generates data sampled from a fixed Bernoulli(p)."""
def __init__(self, prob: float):
self.probs = jnp.array([1 - prob, prob])
self.x = jnp.ones([1, 1])
@property
def train_data(self) -> testbed_base.Data:
raise ValueError('This problem should not be used for training.')
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates a random sample of test data with posterior log-likelihood."""
bool_sample = jax.random.bernoulli(key, self.probs[1])
y = jnp.expand_dims(jnp.array(bool_sample, dtype=jnp.int32), 0)
log_likelihood = jnp.log(self.probs[y])
y = y[:, None]
chex.assert_shape(y, [1, 1])
return testbed_base.Data(self.x, y), log_likelihood
@dataclasses.dataclass
class BernoulliEpistemicSampler(testbed_base.EpistemicSampler):
"""ENN samples [0, logit_scale] with prob=p and [logit_scale, 0] with prob=1-p."""
prob: float = 0.5
logit_scale: float = 1e6
def __call__(self, x: chex.Array, key: chex.PRNGKey) -> chex.Array:
num_data = x.shape[0]
bool_sample = jax.random.bernoulli(key, self.prob, shape=[num_data])
y = jnp.array(bool_sample, dtype=jnp.int32)
logits = jax.nn.one_hot(y, num_classes=2) * self.logit_scale
chex.assert_shape(logits, [num_data, 2])
return logits
class DummyENN(testbed_base.EpistemicSampler):
"""A dummy ENN for classification."""
def __init__(self, logits: chex.Array, dummy_posterior: chex.Array,
x_test: chex.Array):
assert len(logits) == len(dummy_posterior)
self._logits = logits
self._posterior = dummy_posterior
self._num_models = len(dummy_posterior)
self._x_test = x_test
def __call__(self, x: chex.Array, seed: int = 0) -> chex.Array:
key = jax.random.PRNGKey(seed)
fn_index = jax.random.choice(key, a=self._num_models, p=self._posterior)
def get_index(x):
"""Returns the index for data x."""
return jnp.argmin(jnp.linalg.norm(x-self._x_test, axis=1))
data_index = get_index(x)
logits = self._logits[fn_index, data_index, :]
logits = jnp.expand_dims(logits, axis=0)
chex.assert_shape(logits, [1, 2])
return logits
class DummyRegressionENN(testbed_base.EpistemicSampler):
"""A dummy ENN for regression."""
def __init__(self, true_posterior_mean: chex.Array,
true_posterior_cov: chex.Array,
x_test: chex.Array):
assert len(x_test) == len(true_posterior_mean)
self._posterior_mean = true_posterior_mean
self._posterior_cov = true_posterior_cov
self._x_test = x_test
def __call__(self, x: chex.Array, key: chex.PRNGKey) -> chex.Array:
def get_index(x):
"""Returns the index for a single test data x."""
return jnp.argmin(jnp.linalg.norm(x-self._x_test, axis=1))
batched_get_index = jax.vmap(get_index)
# Finds the indices for all tau test data x.
test_x_indices = batched_get_index(x)
tau, unused_input_dim = x.shape
assert len(test_x_indices) == tau
# Sample the true function from the posterior mean
nngp_mean = self._posterior_mean[test_x_indices, 0]
chex.assert_shape(nngp_mean, [tau])
nngp_cov = self._posterior_cov[jnp.ix_(test_x_indices, test_x_indices)]
chex.assert_shape(nngp_cov, [tau, tau])
sampled_fn = jax.random.multivariate_normal(key, nngp_mean, nngp_cov)
enn_outputs = sampled_fn[:, None]
chex.assert_shape(enn_outputs, [tau, 1])
return enn_outputs
class UtilTest(parameterized.TestCase):
@parameterized.product(
err_val=[0, 1., 1e3],
cov_val=[1, 1e-3, 1e3])
def test_gaussian_log_likelihood_diagonal_cov(
self, err_val: float, cov_val: float):
"""Test the computed log likelihood in the simple case of diagonal cov."""
num_sample = 4
err = err_val * jnp.ones(shape=(num_sample, 1))
cov = cov_val * jnp.eye(num_sample)
log_likelihood = likelihood.gaussian_log_likelihood(err, cov)
expected_log_likelihood = -0.5 * (
num_sample * jnp.log(2 * jnp.pi * cov_val)
+ (jnp.linalg.norm(err) ** 2) / cov_val)
self.assertAlmostEqual(
log_likelihood, expected_log_likelihood,
msg=(f'Expected log likelihood to be {expected_log_likelihood} ',
f'but received {log_likelihood}'),
delta=1e-5)
@parameterized.product(
true_prob=[0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5],
enn_err=[-0.1, -0.05, 0, 0.05, 0.1])
def test_bernoulli_sample_based_kl(self, true_prob: float, enn_err: float):
"""Tests the estimated sample-based KL is close to the analytic KL.
Compares the bernoulli sample-based estimate against an analytic KL.
Checks that the absolute error is less than 0.01.
Args:
true_prob: true probability of class 1 in generative model.
enn_err: error in enn probability estimate (clipped to 0, 1).
"""
key = jax.random.PRNGKey(0)
enn_prob = jnp.clip(true_prob + enn_err, 0, 1)
# We test only when enn_prob is in (0, 1)
if 0 < enn_prob < 1:
true_kl = (true_prob * jnp.log(true_prob / enn_prob)
+ (1 - true_prob) * jnp.log((1- true_prob) / (1 - enn_prob)))
kl_estimator = likelihood.CategoricalKLSampledXSampledY(
num_test_seeds=1000,
num_enn_samples=1000,
key=key)
sample_kl = kl_estimator(
BernoulliEpistemicSampler(enn_prob), BernoulliDataSampler(true_prob))
self.assertAlmostEqual(
true_kl, sample_kl.kl_estimate,
msg=f'Expected KL={true_kl} but received {sample_kl}',
delta=5e-2,
)
@parameterized.product(
base_seed=[1, 1000],
input_dim=[1, 10, 100],
data_ratio=[1, 10],
num_test_x=[1000],
num_enn_samples=[100],
noise_std=[0.01, 0.1, 1],
tau=[1])
def test_perfect_regression_agent_zero_kl(self, base_seed: int,
input_dim: int,
data_ratio: int,
num_test_x: int,
num_enn_samples: int,
noise_std: int,
tau: int):
"""Tests the estimated KL for a perfect regerssion agent is very close to 0."""
num_train = int(data_ratio * input_dim)
num_test_seeds = num_test_x
rng = hk.PRNGSequence(base_seed)
x_train, x_test = leaderboard.gaussian_data(
next(rng), num_train, input_dim, num_test_x)
# Build the data sampler
data_sampler = generative.GPRegression(
kernel_fn=generative.make_benchmark_kernel(input_dim),
x_train=x_train,
x_test=x_test,
key=next(rng),
tau=tau,
noise_std=noise_std,
)
# Build a perfect dummy ENN agent
dummy_enn = DummyRegressionENN(data_sampler._test_mean,
data_sampler._test_cov, x_test)
# Calculate KL
sample_kl_estimator = likelihood.GaussianSmoothedSampleKL(
num_test_seeds=num_test_seeds,
num_enn_samples=num_enn_samples,
enn_sigma=noise_std,
key=next(rng))
sample_kl = sample_kl_estimator(dummy_enn, data_sampler)
self.assertAlmostEqual(
sample_kl.kl_estimate, 0.,
msg=f'sample kl={sample_kl} not close enough to 0',
delta=5e-2,
)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/likelihood/likelihood_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.likelihood."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
from neural_testbed.likelihood import utils
class LogSumProdTest(parameterized.TestCase):
@parameterized.product(
num_centroids=[10, 100],
tau=[100, 1000],
magnitude=[-12, -10, -8],
)
def test_not_nan(self, num_centroids: int, tau: int, magnitude: float):
"""Check that we don't get Inf."""
def compute_ll(key: chex.PRNGKey) -> float:
num_obs = jax.random.poisson(key, 1, [num_centroids])
nu = num_obs / jnp.sum(num_obs)
q_hat = jnp.ones([num_centroids, tau]) * (10 ** magnitude)
q_hat += jnp.expand_dims(nu == 0, 1).astype(jnp.float32)
q_hat = jnp.clip(q_hat, 0, 1)
return utils.log_sum_prod(nu, q_hat)
keys = jax.random.split(jax.random.PRNGKey(0), 10)
log_likelihoods = jax.jit(jax.vmap(compute_ll))(keys)
assert jnp.all(jnp.isfinite(log_likelihoods))
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/likelihood/utils_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for calculating likelihood."""
import dataclasses
import functools
from typing import Dict, Optional, Tuple
import chex
from enn import metrics
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.likelihood import base as likelihood_base
from neural_testbed.likelihood import utils
def compute_discrete_kl(p: chex.Array, q: chex.Array) -> float:
"""KL-divergence between two discrete distributions with the same support."""
# squeeze p and q if needed
p = jnp.squeeze(p)
q = jnp.squeeze(q)
assert jnp.shape(p) == jnp.shape(q)
return jnp.nansum(jnp.multiply(p, jnp.log(p) - jnp.log(q)))
@dataclasses.dataclass
class CategoricalKLSampledXSampledY(likelihood_base.SampleBasedKL):
"""Evaluates KL according to categorical model, sampling X and output Y.
This approach samples an (x, y) output from the enn and data sampler and uses
this to estimate the KL divergence.
"""
num_test_seeds: int
num_enn_samples: int
key: chex.PRNGKey
num_classes: Optional[int] = None # Purely for shape checking.
def __call__(
self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> testbed_base.ENNQuality:
"""Evaluates KL according to categorical model."""
kl_key, enn_key = jax.random.split(self.key, 2)
test_data_fn = jax.jit(data_sampler.test_data)
def get_logits(x: chex.Array) -> chex.Array:
"""Returns logits for input x."""
sample_logits = functools.partial(enn_sampler, x)
enn_keys = jax.random.split(enn_key, self.num_enn_samples)
try:
logits = jax.lax.map(sample_logits, enn_keys)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
# TODO(author1): replace with proper logging.
print(f'Was not able to run enn_sampler inside jit due to: {e}')
logits = jnp.array([sample_logits(k) for k in enn_keys])
return logits
def kl_estimator(key: chex.PRNGKey) -> float:
"""Computes KL estimate on a single instance of test data."""
data, true_ll = test_data_fn(key)
logits = get_logits(data.x)
return true_ll - metrics.calculate_joint_ll(logits, data.y)
kl_keys = jax.random.split(kl_key, self.num_test_seeds)
# Attempt to use fully-jitted code, but if the enn_sampler is not able to
# jax.jit then we fall back on another implementation.
try:
kl_estimates = jax.lax.map(kl_estimator, kl_keys)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
# TODO(author1): replace with proper logging.
print(f'Was not able to run enn_sampler inside jit due to: {e}')
kl_estimates = jnp.array([kl_estimator(k) for k in kl_keys])
return utils.parse_kl_estimates(kl_estimates)
@dataclasses.dataclass
class ClassificationSampleAccEce:
"""Evaluates accuracy and expected calibration error (ece)."""
num_test_seeds: int
num_enn_samples: int
key: chex.PRNGKey
num_bins: int = 10 # Number of bins used in calculating ECE
num_classes: Optional[int] = None # Purely for shape checking.
def __call__(
self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> Dict[str, float]:
"""Evaluates accuracy and expected calibration error (ece)."""
data_key, enn_key = jax.random.split(self.key)
calculate_accuracy = metrics.make_accuracy_calculator()
calculate_ece = metrics.SingleBatchECE(self.num_bins)
def get_logits(x: chex.Array) -> chex.Array:
"""Returns logits for input x."""
sample_logits = functools.partial(enn_sampler, x)
enn_keys = jax.random.split(enn_key, self.num_enn_samples)
try:
logits = jax.lax.map(sample_logits, enn_keys)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
# TODO(author1): replace with proper logging.
print(f'Was not able to run enn_sampler inside jit due to: {e}')
logits = jnp.array([sample_logits(k) for k in enn_keys])
return logits
# We need all train data. We can get all the data by calling
# data_sampler.train_data
train_data = data_sampler.train_data
train_data_logits = get_logits(train_data.x)
train_acc = calculate_accuracy(train_data_logits, train_data.y)
train_ece = calculate_ece(train_data_logits, train_data.y)
# We need all test data. We can get a batch of the data by calling
# data_sampler.test_data(key)
# Keys for generating `self.num_test_seeds` batches of test data
test_keys = jax.random.split(data_key, self.num_test_seeds)
def test_x_y(key: chex.PRNGKey) -> Tuple[chex.Array, chex.Array]:
data, _ = data_sampler.test_data(key)
return (data.x, data.y)
test_x, test_y = jax.lax.map(test_x_y, test_keys)
# test_x has the shape of [num_test_seeds, tau] + single_data_shape. We
# reshape it to [num_test_seeds * tau, ] + single_data_shape.
test_x = jnp.reshape(test_x, (test_x.shape[0] * test_x.shape[1],) +
test_x.shape[2:])
# test_y has the shape of [num_test_seeds, tau, 1]. We reshape it to
# [num_test_seeds * tau, 1].
test_y = jnp.reshape(test_y, (test_y.shape[0] * test_y.shape[1],) +
test_y.shape[2:])
test_data = testbed_base.Data(x=test_x, y=test_y)
test_data_logits = get_logits(test_data.x)
test_acc = calculate_accuracy(test_data_logits, test_data.y)
test_ece = calculate_ece(test_data_logits, test_data.y)
return {
'train_acc': train_acc,
'test_acc': test_acc,
'train_ece': train_ece,
'test_ece': test_ece
}
def add_classification_accuracy_ece(
sample_based_kl: likelihood_base.SampleBasedKL,
num_test_seeds: int,
num_enn_samples: int,
num_bins: int = 10,
num_classes: Optional[int] = None,
**kwargs) -> likelihood_base.SampleBasedKL:
"""Adds classification accuracy to the metric evaluated by sample_based_kl."""
del kwargs
sample_based_acc_ece = ClassificationSampleAccEce(
num_test_seeds=num_test_seeds,
num_enn_samples=num_enn_samples,
num_classes=num_classes,
num_bins=num_bins,
key=jax.random.PRNGKey(0)
)
def evaluate_quality(
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> testbed_base.ENNQuality:
"""Returns KL estimate and classification accuracy as ENN quality metrics."""
enn_quality = sample_based_kl(enn_sampler, data_sampler)
# Attempt to use jitted code, but if the enn_sampler is not able to
# jax.jit then skip adding accuracy.
try:
eval_stats = sample_based_acc_ece(enn_sampler, data_sampler)
enn_quality.extra.update(eval_stats)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
print(f'Skipping accuracy. The enn_sampler not jittable due to \n{e}')
return testbed_base.ENNQuality(
kl_estimate=enn_quality.kl_estimate, extra=enn_quality.extra)
return evaluate_quality
|
neural_testbed-master
|
neural_testbed/likelihood/classification.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of likelihood."""
# TODO(author2): Work on reducing this public scope
# Base classes
from neural_testbed.likelihood.base import GenerativeDataSampler
from neural_testbed.likelihood.base import SampleBasedKL
from neural_testbed.likelihood.base import SampleBasedTestbed
# Classification
from neural_testbed.likelihood.classification import add_classification_accuracy_ece
from neural_testbed.likelihood.classification import CategoricalKLSampledXSampledY
from neural_testbed.likelihood.classification import ClassificationSampleAccEce
from neural_testbed.likelihood.classification import compute_discrete_kl
# Classification Projection
from neural_testbed.likelihood.classification_projection import CategoricalClusterKL
from neural_testbed.likelihood.classification_projection import JointLLCalculatorProjection
from neural_testbed.likelihood.classification_projection import KmeansCluster
from neural_testbed.likelihood.classification_projection import RandomProjection
# Regression
from neural_testbed.likelihood.regression import gaussian_log_likelihood
from neural_testbed.likelihood.regression import GaussianSampleKL
from neural_testbed.likelihood.regression import GaussianSmoothedSampleKL
from neural_testbed.likelihood.regression import optimized_gaussian_ll
|
neural_testbed-master
|
neural_testbed/likelihood/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for likelihood code."""
import itertools
import chex
import jax
import jax.numpy as jnp
import jax.scipy.special as jsp
from neural_testbed import base as testbed_base
def enumerate_all_features(dim: int, num_values: int) -> chex.Array:
"""Helper function to create all categorical features."""
features = jnp.array(list(itertools.product(range(num_values), repeat=dim)))
chex.assert_shape(features, [num_values ** dim, dim])
return features.astype(jnp.int32)
def log_sum_prod(nu: chex.Array, q_hat: chex.Array) -> float:
"""Efficiently computes log(sum_k nu_k prod_t q_{k,t})."""
chex.assert_rank([nu, q_hat], [1, 2])
chex.assert_equal(nu.shape[0], q_hat.shape[0])
# We have a numerical problem where the q_hat where nu are zero can be 0 or 1.
# Since these terms ultimately do not contribute anything to the sum, we can
# replace them in with anything. For numerical stability, we just set them to
# be 1e-9.
zero_nus = jnp.expand_dims(nu == 0, axis=1)
amended_q_hat = q_hat * (1 - zero_nus) + (1e-9 * zero_nus)
log_prod_qs = jnp.sum(jnp.log(amended_q_hat), axis=1)
chex.assert_equal_shape([nu, log_prod_qs])
# Use rebasing trick to maintain numerical stability
base = jnp.max(log_prod_qs)
ll = jsp.logsumexp(log_prod_qs - base, b=nu) + base
return ll
def parse_kl_estimates(kl_estimates: chex.Array) -> testbed_base.ENNQuality:
"""Parse the finite elements of KL estimates."""
# TODO(author5): This section of the code is designed to clip errant inf.
# We don't exactly know why this is happening but decide to clip infinite
# estimate by the maximum finite KL and record the number of inf estimates.
kl_estimates_finite = kl_estimates[jnp.isfinite(kl_estimates)]
pct_finite_kl = len(kl_estimates_finite) / len(kl_estimates)
kl_estimate_max = jax.lax.cond(
len(kl_estimates_finite) >= 1,
lambda _: jnp.max(_, initial=-jnp.inf),
lambda _: jnp.inf,
kl_estimates_finite,
)
clipped_estimates = jnp.minimum(kl_estimates, kl_estimate_max)
kl_estimate = jnp.mean(clipped_estimates)
extra = {
'kl_estimate_std': float(jnp.std(kl_estimates)),
'train_acc': None,
'test_acc': None,
'train_ece': None,
'test_ece': None,
'pct_finite_kl': float(pct_finite_kl)
}
return testbed_base.ENNQuality(kl_estimate=kl_estimate, extra=extra)
|
neural_testbed-master
|
neural_testbed/likelihood/utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for classification_projection."""
from typing import NamedTuple
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.scipy.special as jsp
from neural_testbed.likelihood import classification_projection
from neural_testbed.likelihood import utils
class _ProblemConfig(NamedTuple):
probits: chex.Array
y: chex.Array
brute_ll: chex.Array
def make_naive_ensemble_problem(num_enn_samples: int,
tau: int,
num_classes: int,
key: chex.PRNGKey,
temperature: float = 1.) -> _ProblemConfig:
"""Create a naive ensemble problem."""
logit_key, y_key = jax.random.split(key)
# sample logits, probs, and probits
logits = jax.random.normal(
logit_key, shape=[num_enn_samples, tau, num_classes]) / temperature
probs = jax.nn.softmax(logits)
probits = jsp.ndtri(probs)
chex.assert_shape(probits, [num_enn_samples, tau, num_classes])
# sample random labels
y = jax.random.categorical(y_key, logits[0, :, :])
chex.assert_shape(y, [tau])
# compute the log-likelihood in a brute-force way
log_probs_correct = jnp.log(probs[:, jnp.arange(tau), y])
chex.assert_shape(log_probs_correct, [num_enn_samples, tau])
log_probs_model = jnp.sum(log_probs_correct, axis=1)
chex.assert_shape(log_probs_model, [num_enn_samples])
brute_ll = jsp.logsumexp(log_probs_model) - jnp.log(num_enn_samples)
return _ProblemConfig(probits, y[:, None], brute_ll)
def make_repeated_ensemble_problem(num_enn_samples: int,
tau: int,
num_classes: int,
key: chex.PRNGKey,
temperature: float = 1.) -> _ProblemConfig:
"""Create a naive ensemble problem."""
logit_key, y_key = jax.random.split(key)
assert num_enn_samples % 2 == 0
# sample logits, probs, and probits
logits = jax.random.normal(
logit_key, shape=[int(num_enn_samples / 2), tau, num_classes
]) / temperature
logits = jnp.concatenate([logits, logits], axis=0)
probs = jax.nn.softmax(logits)
probits = jsp.ndtri(probs)
chex.assert_shape(probits, [num_enn_samples, tau, num_classes])
# sample random labels
y = jax.random.categorical(y_key, logits[0, :, :])
chex.assert_shape(y, [tau])
# compute the log-likelihood in a brute-force way
log_probs_correct = jnp.log(probs[:, jnp.arange(tau), y])
chex.assert_shape(log_probs_correct, [num_enn_samples, tau])
log_probs_model = jnp.sum(log_probs_correct, axis=1)
chex.assert_shape(log_probs_model, [num_enn_samples])
brute_ll = jsp.logsumexp(log_probs_model) - jnp.log(num_enn_samples)
return _ProblemConfig(probits, y[:, None], brute_ll)
def make_ll_estimate(cluster_alg: classification_projection.ClusterAlg):
def ll_estimate(
probits: chex.Array, y: chex.Array, key: chex.PRNGKey) -> chex.Array:
# Perform appropriate clustering
num_enn_samples = probits.shape[0]
counts, centers = cluster_alg(probits, y, key)
# Compute the model log likelihood
model_ll = jax.jit(utils.log_sum_prod)(counts / num_enn_samples, centers)
return model_ll
return jax.jit(ll_estimate)
class ClassificationParityTest(parameterized.TestCase):
@parameterized.product(
num_enn_samples=[1000, 10000],
tau=[10, 30],
num_classes=[2, 4, 10],
cluster_only_correct_class=[True, False],
)
def test_random_projection(
self, num_enn_samples: int, tau: int, num_classes: int,
cluster_only_correct_class: bool):
rng = hk.PRNGSequence(999)
probits, y, brute_ll = make_naive_ensemble_problem(
num_enn_samples, tau, num_classes, key=next(rng))
cluster_alg = classification_projection.RandomProjection(
7, cluster_only_correct_class)
ll_estimate = make_ll_estimate(cluster_alg)
model_ll = ll_estimate(probits, y, next(rng))
# Check the absolute error is not too high
rel_err = float(jnp.abs(model_ll - brute_ll)) / float(jnp.abs(brute_ll))
assert rel_err < 0.25, f'relative error is {rel_err}'
@parameterized.product(
num_enn_samples=[1000, 10000],
tau=[10, 30],
num_classes=[2, 4, 10],
cluster_only_correct_class=[True, False],
)
def test_random_projection_repeated(
self, num_enn_samples: int, tau: int, num_classes: int,
cluster_only_correct_class: bool):
rng = hk.PRNGSequence(999)
probits, y, brute_ll = make_repeated_ensemble_problem(
num_enn_samples, tau, num_classes, key=next(rng))
cluster_alg = classification_projection.RandomProjection(
7, cluster_only_correct_class)
ll_estimate = make_ll_estimate(cluster_alg)
model_ll = ll_estimate(probits, y, next(rng))
# Check the absolute error is not too high
rel_err = float(jnp.abs(model_ll - brute_ll)) / float(jnp.abs(brute_ll))
assert rel_err < 0.30, f'relative error is {rel_err}'
@parameterized.product(
num_enn_samples=[100, 1000],
tau=[10, 30],
num_classes=[2, 4, 10],
cluster_only_correct_class=[True, False],
)
def test_kmeans_cluster(
self, num_enn_samples: int, tau: int, num_classes: int,
cluster_only_correct_class: bool):
rng = hk.PRNGSequence(999)
probits, y, brute_ll = make_naive_ensemble_problem(
num_enn_samples, tau, num_classes, key=next(rng))
cluster_alg = classification_projection.KmeansCluster(
num_enn_samples, max_iter=10,
cluster_only_correct_class=cluster_only_correct_class)
ll_estimate = make_ll_estimate(cluster_alg)
model_ll = ll_estimate(probits, y, next(rng))
# TODO(author2): Push this relative error factor lower
# Check the absolute error is not too high
rel_err = float(jnp.abs(model_ll - brute_ll)) / float(jnp.abs(brute_ll))
assert rel_err < 0.01, f'relative error is {rel_err}'
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/likelihood/classification_projection_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Calculating classification likelihood based on random projection.
WARNING: THIS IS EXPERIMENTAL CODE AND NOT YET AT GOLD QUALITY.
"""
# TODO(author2): sort out the code quality here.
import dataclasses
import functools
from typing import Tuple
import chex
from enn.extra import kmeans
from enn.metrics import base as metrics_base
import jax
import jax.numpy as jnp
import jax.scipy.special as jsp
from neural_testbed import base as testbed_base
from neural_testbed.likelihood import base as likelihood_base
from neural_testbed.likelihood import utils
import typing_extensions
_Counts = chex.Array # Number of elements in each cluster: [num_clusters]
_Centers = chex.Array # The center of each cluser: [num_clusters, tau]
class ClusterAlg(typing_extensions.Protocol):
def __call__(self,
probits: chex.Array,
y: chex.Array,
key: chex.PRNGKey) -> Tuple[_Counts, _Centers]:
"""Uses probits and y labels to compute counts and centers."""
@dataclasses.dataclass
class JointLLCalculatorProjection(metrics_base.MetricCalculator):
"""Computes joint ll aggregated over enn samples using projection method.
Depending on data batch_size (can be inferred from logits and labels), this
function computes joint ll for tau=batch_size aggregated over enn samples. If
data batch_size is one, this function computes marginal ll.
"""
cluster_alg: ClusterAlg # Algorithm for clustering
clip_probits: float # Clip absolute value of probits at this level
cluster_key: chex.PRNGKey # An RNG key
def __call__(self, logits: chex.Array, labels: chex.Array) -> float:
"""Computes joint ll aggregated over enn samples using projection method."""
num_enn_samples, tau, num_classes = logits.shape
chex.assert_shape(labels, [tau, 1])
def logits_to_probits(logits: chex.Array) -> chex.Array:
probs = jax.nn.softmax(logits)
probits = jsp.ndtri(probs)
probits = jnp.clip(probits, -self.clip_probits, self.clip_probits)
chex.assert_shape(probits, [tau, num_classes])
return probits
# Convert logits to probits
probits = jax.lax.map(logits_to_probits, logits)
# Perform appropriate clustering
counts, centers = self.cluster_alg(probits, labels, self.cluster_key)
chex.assert_rank([counts, centers], [1, 2])
chex.assert_shape(centers, [counts.shape[0], tau])
# Compute the model log likelihood
log_sum_prod = jax.jit(utils.log_sum_prod)
avg_ll = log_sum_prod(counts / num_enn_samples, centers)
return avg_ll
@dataclasses.dataclass
class CategoricalClusterKL(likelihood_base.SampleBasedKL):
"""Evaluates KL according to random clustering of ENN samples."""
cluster_alg: ClusterAlg # Algorithm for clustering
num_enn_samples: int # Number of samples from ENN environment model
num_test_seeds: int # Number of testing seeds for the data generation
key: chex.PRNGKey # RNG key
clip_probits: float = 5 # Clip absolute value of probits at this level
def __call__(
self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: likelihood_base.GenerativeDataSampler,
) -> testbed_base.ENNQuality:
"""Evaluates KL according to categorical model."""
kl_key, enn_key, cluster_key = jax.random.split(self.key, 3)
joint_ll_calculator = JointLLCalculatorProjection(
self.cluster_alg,
self.clip_probits,
cluster_key)
test_data_fn = jax.jit(data_sampler.test_data)
def get_logits(x: chex.Array) -> chex.Array:
"""Returns logits for input x."""
sample_logits = functools.partial(enn_sampler, x)
enn_keys = jax.random.split(enn_key, self.num_enn_samples)
try:
logits = jax.lax.map(sample_logits, enn_keys)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
# TODO(author1): replace with proper logging.
print(f'Was not able to run enn_sampler inside jit due to: {e}')
logits = jnp.array([sample_logits(k) for k in enn_keys])
return logits
def kl_estimate(key: chex.PRNGKey) -> chex.Array:
"""Estimates the KL for one realization of the data."""
data, true_ll = test_data_fn(key)
logits = get_logits(data.x)
return true_ll - joint_ll_calculator(logits, data.y)
kl_keys = jax.random.split(kl_key, self.num_test_seeds)
try:
kl_estimates = jax.lax.map(kl_estimate, kl_keys)
except (jax.errors.JAXTypeError, jax.errors.JAXIndexError) as e:
# TODO(author1): replace with proper logging.
print(f'Was not able to run kl_estimate inside jit due to: {e}')
kl_estimates = jnp.array([kl_estimate(k) for k in kl_keys])
return utils.parse_kl_estimates(kl_estimates)
@dataclasses.dataclass
class KmeansCluster(ClusterAlg):
"""Clusters probits based on K-Means algorithm."""
num_centroids: int # Number of KMeans centroids
max_iter: int = 10 # Number of iterations of KMeans
cluster_only_correct_class: bool = False # Only cluster on correct class
def __call__(self,
probits: chex.Array,
y: chex.Array,
key: chex.PRNGKey) -> Tuple[_Counts, _Centers]:
"""Cluster ENN probits according to K-Means."""
chex.assert_rank(probits, 3)
num_enn_samples, tau, num_classes = probits.shape
if self.cluster_only_correct_class:
flat_probits = probits[:, jnp.arange(tau), y.ravel()]
chex.assert_shape(flat_probits, [num_enn_samples, tau])
else:
flat_probits = jnp.reshape(probits, [num_enn_samples, tau * num_classes])
# Fit the KMeans clustering algorithm
kmeans_cluster = kmeans.KMeansCluster(
self.num_centroids, self.max_iter, key)
output = kmeans_cluster.fit(flat_probits)
# Parse the output and reshape.
counts = output.counts_per_centroid
chex.assert_shape(counts, [self.num_centroids])
# obtain classes from the output and convert to one-hot encoding
classes = jax.nn.one_hot(output.classes, self.num_centroids)
chex.assert_shape(classes, [num_enn_samples, self.num_centroids])
# get probs_correct
if self.cluster_only_correct_class:
probits_correct = flat_probits
else:
probits_correct = probits[:, jnp.arange(tau), y.ravel()]
chex.assert_shape(probits_correct, [num_enn_samples, tau])
probs_correct = jsp.ndtr(probits_correct)
# unnormalized prob_correct_centers
probs_correct_centers = jnp.einsum('ij,ik->jk', classes, probs_correct)
chex.assert_shape(probs_correct_centers, [self.num_centroids, tau])
# normalize prob_correct_centers
counts_denominator = counts[:, None]
chex.assert_shape(counts_denominator, [self.num_centroids, 1])
probs_correct_centers = jnp.divide(probs_correct_centers + 5e-10,
counts_denominator + 1e-9)
chex.assert_shape(probs_correct_centers, [self.num_centroids, tau])
return counts, probs_correct_centers
@dataclasses.dataclass
class RandomProjection(ClusterAlg):
"""Cluster ENN probits according to random projections."""
dimension: int
cluster_only_correct_class: bool = False # Only cluster on correct class
normalize: bool = False # Whether to apply per-class normalization
def __call__(self,
probits: chex.Array,
y: chex.Array,
key: chex.PRNGKey) -> Tuple[_Counts, _Centers]:
def cluster_fn(probits: chex.Array,
y: chex.Array,
key: chex.PRNGKey) -> Tuple[_Counts, _Centers]:
chex.assert_rank(probits, 3)
num_enn_samples, tau, num_classes = probits.shape
if self.cluster_only_correct_class:
flat_probits = probits[:, jnp.arange(tau), y.ravel()]
target_shape = [num_enn_samples, tau]
else:
flat_probits = jnp.reshape(
probits, [num_enn_samples, tau * num_classes])
target_shape = [num_enn_samples, tau * num_classes]
if self.normalize:
flat_probits -= jnp.mean(flat_probits, axis=0, keepdims=True)
svd_u, _, svd_v = jnp.linalg.svd(flat_probits, full_matrices=False)
flat_probits = jnp.dot(svd_u, svd_v)
a_key, b_key = jax.random.split(key)
chex.assert_shape(flat_probits, target_shape)
# Compute the correct label probabilities
probs = jsp.ndtr(probits)
select_label = jax.jit(jax.vmap(_select_label, in_axes=[0, None]))
probs_correct = select_label(probs, y)
chex.assert_shape(probs_correct, [num_enn_samples, tau])
# Perfom the projection
if self.cluster_only_correct_class:
a = jax.random.normal(a_key, shape=[self.dimension, tau])
else:
a = jax.random.normal(a_key, shape=[self.dimension, tau * num_classes])
b = jax.random.normal(b_key, shape=[self.dimension])
def compute_projection(flat_probit: chex.Array) -> chex.Array:
projection = jnp.dot(a, flat_probit) + b
return (projection > 0).astype(jnp.int32)
projections = jax.vmap(compute_projection)(flat_probits)
chex.assert_shape(projections, [num_enn_samples, self.dimension])
batched_equal = jax.vmap(jnp.array_equal, in_axes=(0, None))
num_features = min(num_enn_samples, 2**self.dimension)
# Choose features as the unique projections, and also get counts
features, counts = jnp.unique(projections, axis=0,
size=num_features,
return_counts=True)
chex.assert_shape(features, [num_features, self.dimension])
chex.assert_shape(counts, [num_features])
# TODO(author2): Consider simplifying this step
# Compute the average for each feature
def single_center_per_t(single_f: chex.Array, t: int) -> chex.Array:
# Check for the rows where samples match f
valid_masks = batched_equal(
projections, single_f).astype(jnp.float32)
chex.assert_shape(valid_masks, [num_enn_samples])
numerator = jnp.sum(valid_masks * probs_correct[:, t])
denominator = jnp.sum(valid_masks)
return (numerator + 1e-6) / (denominator + 1e-6)
single_center = jax.vmap(single_center_per_t, in_axes=[None, 0])
map_fn = lambda x: single_center(x, jnp.arange(tau))
all_centers = jax.lax.map(map_fn, features)
chex.assert_shape(all_centers, [num_features, tau])
return counts, all_centers
return jax.jit(cluster_fn)(probits, y, key)
def _select_label(probs: chex.Array, y: chex.Array) -> chex.Array:
chex.assert_rank(probs, 2)
labels = jnp.squeeze(y, axis=1)
chex.assert_rank(labels, 1)
return probs[jnp.arange(probs.shape[0]), labels]
|
neural_testbed-master
|
neural_testbed/likelihood/classification_projection.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for calculating likelihood."""
import abc
from typing import Tuple
import chex
from neural_testbed import base as testbed_base
import typing_extensions
class GenerativeDataSampler(abc.ABC):
"""An interface for generative test/train data."""
@property
@abc.abstractmethod
def train_data(self) -> testbed_base.Data:
"""Access training data from the GP for ENN training."""
@abc.abstractmethod
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates a random sample of test data with posterior log-likelihood.
WARNING: This method should be pure, for use in jax.jit.
Args:
key: random number generator key.
"""
class SampleBasedKL(typing_extensions.Protocol):
"""An interface for estimating KL divergence from data."""
def __call__(self,
enn_sampler: testbed_base.EpistemicSampler,
data_sampler: GenerativeDataSampler) -> testbed_base.ENNQuality:
"""Uses ENN and data samples to estimate the KL divergence."""
class SampleBasedTestbed(testbed_base.TestbedProblem):
"""A simpler wrapper to make a testbed instance out of data sampler."""
def __init__(self,
data_sampler: GenerativeDataSampler,
sample_based_kl: SampleBasedKL,
prior_knowledge: testbed_base.PriorKnowledge):
self.data_sampler = data_sampler
self.sample_based_kl = sample_based_kl
self._prior_knowledge = prior_knowledge
@property
def train_data(self) -> testbed_base.Data:
return self.data_sampler.train_data
def evaluate_quality(
self,
enn_sampler: testbed_base.EpistemicSampler) -> testbed_base.ENNQuality:
"""Evaluate the quality of a posterior sampler."""
return self.sample_based_kl(enn_sampler, self.data_sampler)
@property
def prior_knowledge(self) -> testbed_base.PriorKnowledge:
return self._prior_knowledge
|
neural_testbed-master
|
neural_testbed/likelihood/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.real_data.data_sampler."""
from absl.testing import absltest
from absl.testing import parameterized
from neural_testbed import base as testbed_base
from neural_testbed.real_data import data_sampler as real_data_sampler
import numpy as np
class RealDataClassificationTest(parameterized.TestCase):
@parameterized.product(
data_size=[1, 10],
input_dim=[1, 10],
num_classes=[1, 2, 10],
tau=[1, 10])
def test_data(self,
data_size: int,
input_dim: int,
num_classes: int,
tau: int):
"""Test returns for the train_data and test_data methods."""
x = np.random.rand(data_size, input_dim)
y = np.random.randint(num_classes, size=(data_size, 1))
data = testbed_base.Data(x=x, y=y)
train_data, test_data = data, data
data_sampler = real_data_sampler.RealDataSampler(
train_data=train_data,
test_sampler=real_data_sampler.make_global_sampler(test_data),
tau=tau,
)
train_data = data_sampler.train_data
np.testing.assert_allclose(train_data.x, x, rtol=1e-6, atol=0)
np.testing.assert_allclose(train_data.y, y, rtol=1e-6, atol=0)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/real_data/data_sampler_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Storing the name of supported datasets and their information."""
import dataclasses
import os
from typing import List
@dataclasses.dataclass
class DatasetInfo:
dataset_name: str
num_classes: int
input_dim: int
num_train: int
num_test: int
def regression_datasets():
"""Returns a dictionary of regression datasets currently supported."""
datasets = {
'boston_housing':
DatasetInfo(
dataset_name='boston_housing',
num_classes=1,
input_dim=13,
num_train=202,
num_test=51),
'concrete_strength':
DatasetInfo(
dataset_name='concrete_strength',
num_classes=1,
input_dim=8,
num_train=824,
num_test=206),
'energy_efficiency':
DatasetInfo(
dataset_name='energy_efficiency',
num_classes=1,
input_dim=8,
num_train=614,
num_test=154),
'naval_propulsion':
DatasetInfo(
dataset_name='naval_propulsion',
num_classes=1,
input_dim=16,
num_train=9547,
num_test=2387),
'kin8nm':
DatasetInfo(
dataset_name='kin8nm',
num_classes=1,
input_dim=8,
num_train=6553,
num_test=1639),
'power_plant':
DatasetInfo(
dataset_name='power_plant',
num_classes=1,
input_dim=4,
num_train=7654,
num_test=1914),
'protein_structure':
DatasetInfo(
dataset_name='protein_structure',
num_classes=1,
input_dim=9,
num_train=36584,
num_test=9146),
'wine':
DatasetInfo(
dataset_name='wine',
num_classes=1,
input_dim=11,
num_train=1279,
num_test=320),
'yacht_hydrodynamics':
DatasetInfo(
dataset_name='yacht_hydrodynamics',
num_classes=1,
input_dim=7,
num_train=246,
num_test=62)
}
return datasets
def structured_datasets():
"""Returns a dictionary of structured datasets currently supported."""
datasets = {
'iris':
DatasetInfo(
dataset_name='iris',
num_classes=3,
input_dim=4,
num_train=120,
num_test=30),
'wine_quality':
DatasetInfo(
dataset_name='wine_quality',
num_classes=11,
input_dim=11,
num_train=3918,
num_test=980),
'german_credit_numeric':
DatasetInfo(
dataset_name='german_credit_numeric',
num_classes=2,
input_dim=24,
num_train=800,
num_test=200),
}
return datasets
def image_datasets():
"""Returns a dictionary of image datasets currently supported."""
dataset = {
'cmaterdb':
DatasetInfo(
dataset_name='cmaterdb',
num_classes=10,
input_dim=3_072,
num_train=5_000,
num_test=1_000),
'mnist':
DatasetInfo(
dataset_name='mnist',
num_classes=10,
input_dim=784,
num_train=60_000,
num_test=10_000),
'emnist/digits':
DatasetInfo(
dataset_name='emnist/digits',
num_classes=10,
input_dim=784,
num_train=240_000,
num_test=40_000),
'emnist/letters':
DatasetInfo(
dataset_name='emnist/letters',
num_classes=37,
input_dim=784,
num_train=88_800,
num_test=14_800),
'fashion_mnist':
DatasetInfo(
dataset_name='fashion_mnist',
num_classes=10,
input_dim=784,
num_train=60_000,
num_test=10_000),
'mnist_corrupted/shot_noise':
DatasetInfo(
dataset_name='mnist_corrupted/shot_noise',
num_classes=10,
input_dim=784,
num_train=60_000,
num_test=10_000),
'cifar10':
DatasetInfo(
dataset_name='cifar10',
num_classes=10,
input_dim=3_072,
num_train=50_000,
num_test=10_000),
}
return dataset
@dataclasses.dataclass
class UCIDataSpec:
path: str
desc: str
label: str
excluded: List[str]
# TODO(author3): Avoid hard-coding directory string so it's user-specified.
UCI_BASE_DIR = '/tmp/uci_datasets'
# BEGIN GOOGLE-INTERNAL
UCI_BASE_DIR = '/path_to_regression_dataset_repo/distbelief/uci_datasets/'
# END GOOGLE-INTERNAL
DATA_SPECS = {
'boston_housing': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'boston_housing.csv'),
desc=('The Boston housing data was collected in 1978 and each of the '
'506 entries represent aggregated data about 14 features for '
'homes from various suburbs in Boston, Massachusetts.'),
label='MEDV',
excluded=[]),
'concrete_strength': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'concrete_strength.csv'),
desc=('The Boston housing data was collected in 1978 and each of the '
'506 entries represent aggregated data about 14 features for '
'homes from various suburbs in Boston, Massachusetts.'),
label='concrete_compressive_strength',
excluded=[]),
'energy_efficiency': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'energy_efficiency.csv'),
desc=('This study looked into assessing the heating load and cooling '
'load requirements of buildings (that is, energy efficiency) as '
'a function of building parameters. **Heating load only**.'),
label='Y1',
excluded=['Y2']),
'naval_propulsion': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'naval_propulsion.csv'),
desc=('Data have been generated from a sophisticated simulator of a '
'Gas Turbines (GT), mounted on a Frigate characterized by a '
'Combined Diesel eLectric And Gas (CODLAG) propulsion plant '
'type. **GT Turbine decay state coefficient only**'),
label='GT Turbine decay state coefficient',
excluded=['GT Compressor decay state coefficient']),
'kin8nm': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'kin8nm.csv'),
desc=('This is data set is concerned with the forward kinematics of '
'an 8 link robot arm. Among the existing variants of this data '
'set we have used the variant 8nm, which is known to be highly '
'non-linear and medium noisy.'),
label='y',
excluded=[]),
'power_plant': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'power_plant.csv'),
desc=('The Boston housing data was collected in 1978 and each of the '
'506 entries represent aggregated data about 14 features for '
'homes from various suburbs in Boston, Massachusetts.'),
label='PE',
excluded=[]),
'protein_structure': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'protein_structure.csv'),
desc=('This is a data set of Physicochemical Properties of Protein '
'Tertiary Structure. The data set is taken from CASP 5-9. There '
'are 45730 decoys and size varying from 0 to 21 armstrong.'),
label='RMSD',
excluded=[]),
'wine': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'wine.csv'),
desc=('The dataset is related to red variant of the Portuguese '
'"Vinho Verde" wine. **NB contains red wine examples only**'),
label='quality',
excluded=[]),
'yacht_hydrodynamics': UCIDataSpec(
path=os.path.join(UCI_BASE_DIR, 'yacht_hydrodynamics.csv'),
desc=('Delft data set, used to predict the hydodynamic performance of '
'sailing yachts from dimensions and velocity.'),
label='Residuary resistance per unit weight of displacement',
excluded=[])
}
STRUCTURED_DATASETS = tuple(structured_datasets().keys())
IMAGE_DATASETS = tuple(image_datasets().keys())
CLASSIFICATION_DATASETS = STRUCTURED_DATASETS + IMAGE_DATASETS
REGRESSION_DATASETS = tuple(regression_datasets().keys())
DATASETS_SETTINGS = {
**image_datasets(),
**structured_datasets(),
**regression_datasets(),
}
DATASETS = CLASSIFICATION_DATASETS + REGRESSION_DATASETS
|
neural_testbed-master
|
neural_testbed/real_data/datasets.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of real data."""
# Realdata data sampler
from neural_testbed.real_data.data_sampler import make_global_sampler
from neural_testbed.real_data.data_sampler import make_local_sampler
from neural_testbed.real_data.data_sampler import RealDataSampler
# Realdata datasets
from neural_testbed.real_data.datasets import CLASSIFICATION_DATASETS
from neural_testbed.real_data.datasets import DatasetInfo
from neural_testbed.real_data.datasets import DATASETS
from neural_testbed.real_data.datasets import DATASETS_SETTINGS
from neural_testbed.real_data.datasets import REGRESSION_DATASETS
# Realdata loading of testbed problem
from neural_testbed.real_data.load import problem_from_config
from neural_testbed.real_data.load import problem_from_id
# Realdata sweep of testbed problems
from neural_testbed.real_data.sweep import CLASSIFICATION
from neural_testbed.real_data.sweep import CLASSIFICATION_NUM_DATA
from neural_testbed.real_data.sweep import ProblemConfig
from neural_testbed.real_data.sweep import REGRESSION
from neural_testbed.real_data.sweep import SETTINGS
# Realdata utils
from neural_testbed.real_data.utils import config_from_dataset_name
from neural_testbed.real_data.utils import load_classification_dataset
from neural_testbed.real_data.utils import load_regression_dataset
|
neural_testbed-master
|
neural_testbed/real_data/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for real data testbed."""
from typing import Dict, Tuple, Union, Optional
import chex
from neural_testbed import base as testbed_base
from neural_testbed.real_data import datasets
from neural_testbed.real_data import sweep
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
_DictFeat = Dict[str, Union[int, float]]
Features = Union[_DictFeat, chex.Array]
def _standardize_data(x: chex.Array,
axes: int = 0,
epsilon: float = 1e-8) -> chex.Array:
"""Returns standardized input."""
mean, variance = tf.nn.moments(x, axes=axes)
x_standardized = (x - mean) / tf.sqrt(variance + epsilon)
return x_standardized
def _preprocess_structured_data(
features: Features, label: int) -> testbed_base.Data:
"""Preprocess structured data into testbed standardized dictionary format."""
# features can be a dict of numeric features or a single numeric feature
if isinstance(features, Dict):
features = tf.concat(
[tf.cast(tf.expand_dims(x, -1), tf.float64) for x in features.values()],
axis=0)
else:
features = tf.cast(features, tf.float64)
features = _standardize_data(features)
chex.assert_shape(features, (features.shape[0],))
chex.assert_shape(label, ())
label = tf.expand_dims(label, -1)
return testbed_base.Data(x=features, y=label)
def _preprocess_image_data(dataset: Dict[str, chex.Array]) -> testbed_base.Data:
"""Preprocess image data into testbed standardized dictionary format."""
assert 'image' in dataset
assert 'label' in dataset
image, label = dataset['image'], dataset['label']
image = tf.image.convert_image_dtype(image, tf.float64)
image = tf.reshape(image, [-1])
image = _standardize_data(image)
chex.assert_shape(image, (image.shape[0],))
chex.assert_shape(label, ())
label = tf.expand_dims(label, -1)
return testbed_base.Data(x=image, y=label)
def _load_structured_dataset(
dataset_info: datasets.DatasetInfo, split: str) -> testbed_base.Data:
"""Loads a structured dataset."""
if split == 'train':
data_split = f'train[:{dataset_info.num_train}]'
batch_size = dataset_info.num_train
else:
data_split = f'train[-{dataset_info.num_test}:]'
batch_size = dataset_info.num_test
ds = tfds.load(
name=dataset_info.dataset_name, split=data_split, as_supervised=True)
ds = ds.map(_preprocess_structured_data)
ds = ds.batch(batch_size)
data = next(iter(tfds.as_numpy(ds)))
return data
def _load_image_dataset(
dataset_info: datasets.DatasetInfo, split: str) -> testbed_base.Data:
"""Loads an image dataset."""
if split == 'train':
data_split = f'train[:{dataset_info.num_train}]'
batch_size = dataset_info.num_train
else:
data_split = split
batch_size = dataset_info.num_test
ds = tfds.load(
name=dataset_info.dataset_name, split=data_split, with_info=False)
ds = ds.map(_preprocess_image_data)
ds = ds.batch(batch_size)
data = next(iter(tfds.as_numpy(ds)))
return data
def load_classification_dataset(
dataset_info: datasets.DatasetInfo, split: str) -> testbed_base.Data:
"""Returns dataset data based on problem_config and split."""
dataset_name = dataset_info.dataset_name
if dataset_name not in datasets.CLASSIFICATION_DATASETS:
raise ValueError(f'dataset {dataset_name} is not supported yet.')
if dataset_name in datasets.STRUCTURED_DATASETS:
return _load_structured_dataset(dataset_info=dataset_info, split=split)
else:
return _load_image_dataset(dataset_info=dataset_info, split=split)
def get_uci_data(name) -> Tuple[chex.Array, chex.Array]:
"""Returns an array of features and an array of labels for dataset `name`."""
spec = datasets.DATA_SPECS.get(name)
if spec is None:
raise ValueError('Unknown dataset: {}. Available datasets:\n{}'.format(
name, '\n'.join(datasets.DATA_SPECS.keys())))
with tf.io.gfile.GFile(spec.path) as f:
df = pd.read_csv(f)
labels = df.pop(spec.label).to_numpy().astype(np.float32)
for ex in spec.excluded:
_ = df.pop(ex)
features = df.to_numpy().astype(np.float32)
return features, labels
def load_regression_dataset(
dataset_name: str) -> Tuple[testbed_base.Data, testbed_base.Data]:
"""Returns dataset data from dataset_name."""
if dataset_name not in datasets.REGRESSION_DATASETS:
raise ValueError(f'dataset {dataset_name} is not supported yet.')
x, y = get_uci_data(dataset_name)
if len(y.shape) == 1:
y = y[:, None]
train_test_split = 0.8
random_permutation = np.random.permutation(x.shape[0])
n_train = int(x.shape[0] * train_test_split)
train_ind = random_permutation[:n_train]
test_ind = random_permutation[n_train:]
x_train, y_train = x[train_ind, :], y[train_ind, :]
x_test, y_test = x[test_ind, :], y[test_ind, :]
x_mean, x_std = np.mean(x_train, axis=0), np.std(x_train, axis=0)
y_mean = np.mean(y_train, axis=0)
epsilon = tf.keras.backend.epsilon()
x_train = (x_train - x_mean) / (x_std + epsilon)
x_test = (x_test - x_mean) / (x_std + epsilon)
y_train, y_test = y_train - y_mean, y_test - y_mean
train_data = testbed_base.Data(x=x_train, y=y_train)
test_data = testbed_base.Data(x=x_test, y=y_test)
return train_data, test_data
def config_from_dataset_name(
dataset_name: str,
tau: int = 1,
seed: int = 0,
temperature: float = 0.01,
noise_std: float = 1.,
num_train: Optional[int] = None,
) -> sweep.ProblemConfig:
"""Returns a testbed problem based on a dataset name and tau.
Args:
dataset_name: name of the dataset.
tau: value of tau for joint prediction.
seed: random seed.
temperature: temperature to be used in prior_knowledge. It does not affect
the data.
noise_std: noise_std to be used in prior_knowledge. It does not affect
the data.
num_train: optional, it can be used to limit the number of training data.
Returns:
A problem config for real data.
"""
dataset_info = datasets.DATASETS_SETTINGS[dataset_name]
if num_train is None:
num_train = dataset_info.num_train
else:
num_train = min(num_train, dataset_info.num_train)
prior_knowledge = testbed_base.PriorKnowledge(
input_dim=dataset_info.input_dim,
num_train=num_train,
num_classes=dataset_info.num_classes,
tau=tau,
temperature=temperature,
noise_std=noise_std,
)
problem_config = sweep.ProblemConfig(
prior_knowledge=prior_knowledge,
seed=seed,
dataset_name=dataset_name,
)
return problem_config
|
neural_testbed-master
|
neural_testbed/real_data/utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Defines the leaderboard sweep for RealData testbed."""
import dataclasses
from typing import Dict
from neural_testbed import base
from neural_testbed.real_data import data_sampler
from neural_testbed.real_data import datasets
# problem_ids are strings of the form {sweep_name}{SEPARATOR}{index}.
SEPARATOR = '/'
@dataclasses.dataclass(frozen=True)
class ProblemConfig:
"""Problem configuration including prior knowledge and some hyperparams."""
# Agent's a priori knowledge about the problem.
prior_knowledge: base.PriorKnowledge
# Random seed controlling all the randomness in the problem.
seed: int
# Name of the dataset
dataset_name: str
# Test sampler constructor
test_sampler_ctor: data_sampler.SamplerCtor = data_sampler.make_local_sampler
# Number of inputs (X's) used for evaluation.
num_test_seeds: int = 1000
# Number of samples generated from ENN during evaluation.
num_enn_samples: int = 1000
@property
def meta_data(self):
meta = dataclasses.asdict(self)
meta.pop('prior_knowledge')
meta.update(dataclasses.asdict(self.prior_knowledge))
return meta
def classification_sweep(num_seed: int = 5,
initial_seed: int = 0,
temperature: float = 0.01) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for classification problems.
Args:
num_seed: number of seeds per configuration of other hyperparameters.
initial_seed: initial value of the seed.
temperature: temperature to be used in prior_knowledge. It does not affect
the data.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
for tau in [1, 10]:
for dataset_name in datasets.CLASSIFICATION_DATASETS:
seed = initial_seed
for unused_seed_inc in range(num_seed):
seed += 1
dataset_info = datasets.DATASETS_SETTINGS[dataset_name]
prior_knowledge = base.PriorKnowledge(
input_dim=dataset_info.input_dim,
num_train=dataset_info.num_train,
num_classes=dataset_info.num_classes,
tau=tau,
temperature=temperature,
)
configs.append(
ProblemConfig(
prior_knowledge=prior_knowledge,
seed=seed,
dataset_name=dataset_name,
))
return {f'classification{SEPARATOR}{i}': v for i, v in enumerate(configs)}
def classification_num_data_sweep(
num_seed: int = 5,
initial_seed: int = 0,
temperature: float = 0.01,
) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for classification problems with different number of training data.
Args:
num_seed: number of seeds per configuration of other hyperparameters.
initial_seed: initial value of the seed.
temperature: temperature to be used in prior_knowledge. It does not affect
the data.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
for tau in [1, 10]:
for dataset_name in datasets.CLASSIFICATION_DATASETS:
seed = initial_seed
for num_train in [1, 10, 100, 1000, 10_000, 100_000]:
for unused_seed_inc in range(num_seed):
seed += 1
dataset_info = datasets.DATASETS_SETTINGS[dataset_name]
# Update num_train of the dataset
num_train = min(num_train, dataset_info.num_train)
prior_knowledge = base.PriorKnowledge(
input_dim=dataset_info.input_dim,
num_train=num_train,
num_classes=dataset_info.num_classes,
tau=tau,
temperature=temperature,
)
configs.append(
ProblemConfig(
prior_knowledge=prior_knowledge,
seed=seed,
dataset_name=dataset_name,
))
return {
f'classification_variant_data{SEPARATOR}{i}': v
for i, v in enumerate(configs)
}
def regression_sweep(num_seed: int = 5,
initial_seed: int = 0,
noise_std: float = 1.) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for regression problems.
Args:
num_seed: number of seeds per configuration of other hyperparameters.
initial_seed: initial value of the seed.
noise_std: noise_std to be used in prior_knowledge. It does not affect
the data.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
for dataset_name in datasets.REGRESSION_DATASETS:
seed = initial_seed
for unused_seed_inc in range(num_seed):
seed += 1
dataset_info = datasets.DATASETS_SETTINGS[dataset_name]
prior_knowledge = base.PriorKnowledge(
input_dim=dataset_info.input_dim,
num_train=dataset_info.num_train,
num_classes=dataset_info.num_classes,
tau=1,
noise_std=noise_std,
)
configs.append(
ProblemConfig(
prior_knowledge=prior_knowledge,
seed=seed,
dataset_name=dataset_name,
))
return {f'regression{SEPARATOR}{i}': v for i, v in enumerate(configs)}
SETTINGS = {**regression_sweep(),
**classification_sweep(),
**classification_num_data_sweep(),}
REGRESSION = tuple(regression_sweep().keys())
CLASSIFICATION = tuple(classification_sweep().keys())
CLASSIFICATION_NUM_DATA = tuple(classification_num_data_sweep().keys())
|
neural_testbed-master
|
neural_testbed/real_data/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loading a realdata problem for the testbed."""
from absl import logging
import haiku as hk
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.real_data import data_sampler
from neural_testbed.real_data import datasets
from neural_testbed.real_data import sweep
from neural_testbed.real_data import utils
def problem_from_id(problem_id: str) -> testbed_base.TestbedProblem:
"""Factory method to load realdata problem from problem_id.
This is a user facing function and its only job is to translate problem_id
to prior kowledge.
Args:
problem_id: a string representing a standard problem in the leaderboard.
Returns:
A testbed problem.
"""
logging.info('Loading problem_id: %s', problem_id)
try:
problem_config = sweep.SETTINGS[problem_id]
except ValueError as value_error:
raise ValueError(f'Unrecognised problem_id={problem_id}') from value_error
return problem_from_config(problem_config)
def problem_from_config(
problem_config: sweep.ProblemConfig) -> testbed_base.TestbedProblem:
"""Returns a testbed problem given a problem config."""
assert problem_config.prior_knowledge.num_classes > 0
if problem_config.prior_knowledge.num_classes > 1:
return _load_classification(problem_config)
else:
return _load_regression(problem_config)
def _load_classification(
problem_config: sweep.ProblemConfig) -> testbed_base.TestbedProblem:
"""Load a classification problem from problem_config."""
rng = hk.PRNGSequence(problem_config.seed)
prior_knowledge = problem_config.prior_knowledge
dataset_info = datasets.DATASETS_SETTINGS[problem_config.dataset_name]
# Update num_train of the dataset
dataset_info.num_train = problem_config.prior_knowledge.num_train
train_data = utils.load_classification_dataset(
dataset_info=dataset_info, split='train',)
test_data = utils.load_classification_dataset(
dataset_info=dataset_info, split='test',)
realdata_sampler = data_sampler.RealDataSampler(
train_data=train_data,
test_sampler=data_sampler.make_local_sampler(test_data),
tau=prior_knowledge.tau,
)
sample_based_kl = likelihood.CategoricalKLSampledXSampledY(
num_test_seeds=problem_config.num_test_seeds,
num_enn_samples=problem_config.num_enn_samples,
key=next(rng),
num_classes=prior_knowledge.num_classes,
)
sample_based_kl = likelihood.add_classification_accuracy_ece(
sample_based_kl,
num_test_seeds=int(1_000 / prior_knowledge.tau) + 1,
num_enn_samples=100,
num_classes=prior_knowledge.num_classes,
)
return likelihood.SampleBasedTestbed(
data_sampler=realdata_sampler,
sample_based_kl=sample_based_kl,
prior_knowledge=prior_knowledge,
)
def _load_regression(
problem_config: sweep.ProblemConfig) -> testbed_base.TestbedProblem:
"""Load a regression problem from problem_config."""
rng = hk.PRNGSequence(problem_config.seed)
prior_knowledge = problem_config.prior_knowledge
train_data, test_data = utils.load_regression_dataset(
dataset_name=problem_config.dataset_name)
realdata_sampler = data_sampler.RealDataSampler(
train_data=train_data,
test_sampler=data_sampler.make_global_sampler(test_data),
tau=prior_knowledge.tau,
)
sample_based_kl = likelihood.GaussianSampleKL(
# This KL estimator cannot handle very large num_test_seed * tau
num_test_seeds=int(problem_config.num_test_seeds
/ prior_knowledge.tau) + 1,
num_enn_samples=problem_config.num_enn_samples,
enn_sigma=prior_knowledge.noise_std,
key=next(rng),
)
return likelihood.SampleBasedTestbed(
data_sampler=realdata_sampler,
sample_based_kl=sample_based_kl,
prior_knowledge=prior_knowledge,
)
|
neural_testbed-master
|
neural_testbed/real_data/load.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A fake data sampler for classification based on real data."""
from typing import Tuple, Callable
import chex
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
# key, num_samples -> Data
Sampler = Callable[[chex.PRNGKey, int], testbed_base.Data]
SamplerCtor = Callable[[testbed_base.Data], Sampler]
class RealDataSampler(likelihood.GenerativeDataSampler):
"""A fake data sampler for classification/regression based on real data."""
def __init__(self,
train_data: testbed_base.Data,
test_sampler: Sampler,
tau: int = 1):
self._train_data = train_data
self._tau = tau
self._test_sampler = test_sampler
@property
def train_data(self) -> testbed_base.Data:
"""Returns train data."""
return self._train_data
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Returns a batch of test data.
Args:
key: A key for generating random numbers.
Returns:
A tuple of 1 test data as a testbed_base.Data and a float which is always
0. The reason for having this float value equal to 0 is that in the
testbed pipeline this method is expected to return a tuple of a test data
and log-likelihood under posterior. However, for real data, we don't have
log-likelihood under posterior. Hence, we set it to 0 and by doing so, we
can still use one formula in the testbed for calculating kl estimate.
"""
test_data = self._test_sampler(key, self._tau)
return test_data, 0.
def make_local_sampler(data: testbed_base.Data, kappa: int = 2) -> Sampler:
"""Returns a sampler which samples based on kappa anchor points.
To make this work in jax we actually implement this by first sampling kappa
anchor points, then randomly the tau batch points from these kappa anchors
(with replacement).
Args:
data: test data.
kappa: number of anchor reference points. If tau is less than kappa we
default to sampling tau points.
Returns:
Local sampler of data indices.
"""
x_test = jnp.array(data.x)
y_test = jnp.array(data.y)
num_data = y_test.shape[0]
def local_sampler(key: chex.PRNGKey, tau: int) -> testbed_base.Data:
anchor_key, sample_key = jax.random.split(key, 2)
# Sample anchor data indices
anchor_idx = jax.random.randint(anchor_key, [kappa], 0, num_data)
# Index into these anchor indices
sample_idx = jax.random.randint(sample_key, [tau], 0, kappa)
repeat_idx = anchor_idx[sample_idx]
chex.assert_shape(repeat_idx, [tau])
return testbed_base.Data(x=x_test[repeat_idx, :], y=y_test[repeat_idx, :])
return local_sampler
def make_global_sampler(data: testbed_base.Data)-> Sampler:
"""Returns a sampler which samples uniformly from data points."""
x_test = jnp.array(data.x)
y_test = jnp.array(data.y)
num_data = y_test.shape[0]
def global_sampler(key: chex.PRNGKey, tau: int) -> testbed_base.Data:
sample_idx = jax.random.randint(key, [tau], 0, num_data)
chex.assert_shape(sample_idx, [tau])
return testbed_base.Data(x=x_test[sample_idx, :], y=y_test[sample_idx, :])
return global_sampler
|
neural_testbed-master
|
neural_testbed/real_data/data_sampler.py
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple example running an ENN on the testbed."""
from absl import app
from absl import flags
from neural_testbed import leaderboard
from neural_testbed.agents.factories.sweeps import testbed as factories
from neural_testbed.experiments import experiment
from neural_testbed.experiments import pool
from neural_testbed.leaderboard import sweep
# Option to configue the leaderboard problem instance.
# To do a *sweep* over all problem_ids pass --problem_id=SWEEP
flags.DEFINE_string('problem_id', 'classification_2d/0',
'ID for leaderboard GP.')
# Options for logging results to csv for evaluation later.
flags.DEFINE_string(
'results_dir', '/tmp/neural_testbed', 'Where to store results as csv.')
flags.DEFINE_bool('overwrite_csv', True, 'Whether to overwrite existing csv.')
# Loading agent from factories, #sweep_id config within agent_name sweep
flags.DEFINE_string('agent_name', 'mlp', 'Agent to load')
flags.DEFINE_integer('sweep_id', -1, 'Agent within sweep, <0 gives default.')
flags.DEFINE_integer(
'num_batches', -1,
'override number of training batch. Mostly used for testing.')
FLAGS = flags.FLAGS
def run_single_problem(problem_id: str) -> str:
"""Evaluates the agent on a single problem instance."""
# Load the problem via problem_id.
problem = leaderboard.problem_from_id_csv(
problem_id, FLAGS.results_dir, FLAGS.overwrite_csv)
# Define the agent. Here we are constructing one of the benchmark agents
# implemented in the factories package.
paper_agent = factories.get_paper_agent(FLAGS.agent_name)
if FLAGS.sweep_id < 0:
config = paper_agent.default
if FLAGS.num_batches > 0 and hasattr(config, 'num_batches'):
config.num_batches = FLAGS.num_batches
else:
config = paper_agent.sweep()[FLAGS.sweep_id]
agent = paper_agent.ctor(config)
# Run the experiment and output the KL score.
kl_quality = experiment.run(agent, problem)
print(f'kl_quality={kl_quality}, write csv to {FLAGS.results_dir}')
return problem_id
def main(_):
if FLAGS.problem_id == 'SWEEP':
# Perform a sweep over all the relevant problem_id for full evaluation.
pool.map_mpi(run_single_problem, sweep.CLASSIFICATION_2D)
else:
# Run just a single problem_id.
run_single_problem(FLAGS.problem_id)
if __name__ == '__main__':
app.run(main)
|
neural_testbed-master
|
neural_testbed/experiments/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
neural_testbed-master
|
neural_testbed/experiments/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common experiment loop for submitting to the testbed."""
from neural_testbed import base as testbed_base
def run(agent: testbed_base.TestbedAgent,
problem: testbed_base.TestbedProblem) -> testbed_base.ENNQuality:
"""Run an agent on a given testbed problem."""
enn_sampler = agent(problem.train_data, problem.prior_knowledge)
return problem.evaluate_quality(enn_sampler)
|
neural_testbed-master
|
neural_testbed/experiments/experiment.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.experiments.run."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from neural_testbed import leaderboard
from neural_testbed.experiments import run
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
class RunTest(parameterized.TestCase):
@parameterized.parameters([[x] for x in leaderboard.CLASSIFICATION_2D_TEST])
def test_neural_testbed(self, problem_id: str):
FLAGS.problem_id = problem_id
FLAGS.num_batches = 2
run.main(None)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/experiments/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code for generating a full set of results using multiprocessing."""
from concurrent import futures
import multiprocessing
from typing import Callable, Optional, Sequence
import termcolor
import tqdm
ProblemId = str
def map_mpi(
run_fn: Callable[[ProblemId], ProblemId],
problem_ids: Sequence[ProblemId],
num_processes: Optional[int] = None,
):
"""Maps `run_fn` over `problem_ids`, using `num_processes` in parallel."""
num_processes = num_processes or multiprocessing.cpu_count()
num_experiments = len(problem_ids)
message = """
Experiment info
---------------
Num experiments: {num_experiments}
Num worker processes: {num_processes}
""".format(
num_processes=num_processes, num_experiments=num_experiments)
termcolor.cprint(message, color='blue', attrs=['bold'])
# Create a pool of processes, dispatch the experiments to them, show progress.
pool = futures.ProcessPoolExecutor(num_processes)
progress_bar = tqdm.tqdm(total=num_experiments)
for problem_id in pool.map(run_fn, problem_ids):
description = '[Last finished: {}]'.format(problem_id)
progress_bar.set_description(termcolor.colored(description, color='green'))
progress_bar.update()
|
neural_testbed-master
|
neural_testbed/experiments/pool.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A minimalist wrapper around ENN experiment for testbed submission."""
import dataclasses
from typing import Callable, Dict, Optional, Union
from acme.utils import loggers
import chex
from enn import datasets
from enn import networks
from enn import supervised
from enn import utils
import jax
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.agents import enn_losses
import optax
# Allow for either an integer num_batches or determined from prior
_BatchStrategy = Union[int, Callable[[testbed_base.PriorKnowledge], int]]
@dataclasses.dataclass
class VanillaEnnConfig:
"""Configuration options for the VanillaEnnAgent."""
enn_ctor: enn_losses.EnnCtor
loss_ctor: enn_losses.LossCtor = enn_losses.default_enn_loss()
optimizer: optax.GradientTransformation = optax.adam(1e-3)
num_batches: _BatchStrategy = 1000
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
prior_loss_ctor: Optional[enn_losses.LossCtor] = None
prior_loss_freq: int = 10
seed: int = 0
batch_size: int = 100
center_train_data: bool = False
eval_batch_size: Optional[int] = None
logger: Optional[loggers.Logger] = None
train_log_freq: Optional[int] = None
eval_log_freq: Optional[int] = None
def extract_enn_sampler(
experiment: supervised.BaseExperiment) -> testbed_base.EpistemicSampler:
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Generate a random sample from posterior distribution at x."""
net_out = experiment.predict(x, key)
return networks.parse_net_output(net_out)
return jax.jit(enn_sampler)
@dataclasses.dataclass
class VanillaEnnAgent(testbed_base.TestbedAgent):
"""Wraps an ENN as a testbed agent, using sensible loss/bootstrapping."""
config: VanillaEnnConfig
eval_datasets: Optional[Dict[str, datasets.ArrayBatchIterator]] = None
experiment: Optional[supervised.BaseExperiment] = None
def __call__(
self,
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
"""Wraps an ENN as a testbed agent, using sensible loss/bootstrapping."""
enn = self.config.enn_ctor(prior)
if self.config.center_train_data:
enn = networks.make_centered_enn(enn, data.x)
enn_data = datasets.ArrayBatch(x=data.x, y=data.y)
dataset = utils.make_batch_iterator(
enn_data, self.config.batch_size, self.config.seed)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
trainers = []
trainers.append(supervised.MultilossTrainer(
loss_fn=self.config.loss_ctor(prior, enn),
dataset=dataset,
))
if self.config.prior_loss_ctor is not None:
trainers.append(supervised.MultilossTrainer(
loss_fn=self.config.prior_loss_ctor(prior, enn),
dataset=dataset,
should_train=lambda step: step % self.config.prior_loss_freq == 0,
name='prior_loss',
))
# Parse number of training batches from config _BatchStrategy
if isinstance(self.config.num_batches, int):
num_batches = self.config.num_batches
else:
num_batches = self.config.num_batches(prior)
self.experiment = supervised.MultilossExperiment(
enn=enn,
trainers=trainers,
optimizer=self.config.optimizer,
seed=self.config.seed,
logger=self.config.logger,
train_log_freq=logging_freq(
num_batches, log_freq=self.config.train_log_freq),
eval_datasets=self.eval_datasets,
eval_log_freq=logging_freq(
num_batches, log_freq=self.config.eval_log_freq),
)
# Train agent and return the ENN
self.experiment.train(num_batches)
return extract_enn_sampler(self.experiment)
def make_learning_curve_enn_agent(
config: VanillaEnnConfig,
problem: testbed_base.TestbedProblem,
num_test: int = 1000,
seed: int = 0
) -> VanillaEnnAgent:
"""Constructs an agent with privileged access to testing data.
This constructor will look inside the testbed problem and try to extract the
testing data, for periodic evaluation within the *experiment* dataframe of
the agent. This should allow us to produce learning curves on train/test.
However, it is slightly *hacky*... so use at your own risk!
Args:
config: options for the vanilla ENN agent.
problem: problem instance, ideally it should contain a SampleBasedTestbed.
num_test: number of testing datapoints for the "test" dataset.
seed: an integer seed.
Returns:
VanillaEnnAgent with internal logging of train/test.
"""
problem = getattr(problem, 'problem', problem)
if isinstance(problem, likelihood.SampleBasedTestbed):
# Convert the data to enn batch format
train_data = datasets.ArrayBatch(
x=problem.train_data.x, y=problem.train_data.y
)
# Generate a sample-based test dataset with num_test samples.
def gen_test(key: chex.PRNGKey) -> testbed_base.Data:
data, _ = problem.data_sampler.test_data(key)
return testbed_base.Data(x=data.x[0, :], y=data.y[0, :])
test_keys = jax.random.split(jax.random.PRNGKey(seed), num_test)
test_data = jax.lax.map(gen_test, test_keys)
test_data = datasets.ArrayBatch(x=test_data.x, y=test_data.y)
# Pass out eval_datasets to experiment.
eval_datasets = {
'train': utils.make_batch_iterator(train_data, config.eval_batch_size),
'test': utils.make_batch_iterator(test_data, config.eval_batch_size),
}
else:
print(f'WARNING: problem={problem} is not SampleBasedTestbed.')
eval_datasets = None
return VanillaEnnAgent(config, eval_datasets)
def _round_to_integer(x: float) -> int:
"""Utility function to round a float to integer, or 1 if it would be 0."""
x = int(x)
if x == 0:
return 1
else:
return x
def logging_freq(num_batches: int,
num_points: int = 30,
log_freq: Optional[int] = None) -> int:
"""Computes a logging frequency from num_batches, optionally log_freq."""
if log_freq is None:
log_freq = _round_to_integer(num_batches / num_points)
return log_freq
|
neural_testbed-master
|
neural_testbed/agents/enn_agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing public methods for approximate posterior training."""
# ENN SGD
from neural_testbed.agents.enn_agent import extract_enn_sampler
from neural_testbed.agents.enn_agent import make_learning_curve_enn_agent
from neural_testbed.agents.enn_agent import VanillaEnnAgent
from neural_testbed.agents.enn_agent import VanillaEnnConfig
# ENN losses
from neural_testbed.agents.enn_losses import combine_loss_prior_loss
from neural_testbed.agents.enn_losses import default_enn_loss
from neural_testbed.agents.enn_losses import default_enn_prior_loss
from neural_testbed.agents.enn_losses import gaussian_regression_loss
from neural_testbed.agents.enn_losses import LossCtor
from neural_testbed.agents.enn_losses import regularized_dropout_loss
|
neural_testbed-master
|
neural_testbed/agents/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helpful losses for the ENN agent."""
from typing import Callable, Optional
import chex
from enn import base as enn_base
from enn import data_noise
from enn import datasets
from enn import losses
from enn import networks
from neural_testbed import base as testbed_base
EnnCtor = Callable[[testbed_base.PriorKnowledge], networks.EnnArray]
LossCtor = Callable[
[testbed_base.PriorKnowledge, networks.EnnArray], losses.LossFnArray]
def default_enn_prior_loss(num_index_samples: int = 10) -> LossCtor:
def prior_loss_ctor(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn
if prior.num_classes > 1:
return losses.ClassificationPriorLoss(num_index_samples)
else:
return losses.RegressionPriorLoss(num_index_samples)
return prior_loss_ctor
def default_enn_loss(num_index_samples: int = 10,
distribution: str = 'none',
seed: int = 0,
weight_reg_scale: Optional[float] = None) -> LossCtor:
"""Constructs a default loss suitable for classification or regression."""
def loss_ctor(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
# Construct L2 or Xent loss based on regression/classification.
if prior.num_classes > 1:
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
else:
single_loss = losses.L2Loss()
# Add bootstrapping
boot_fn = data_noise.BootstrapNoise(enn, distribution, seed)
single_loss = losses.add_data_noise(single_loss, boot_fn)
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples)
# Add L2 weight decay
if weight_reg_scale:
scale = (weight_reg_scale ** 2) / (2. * prior.num_train)
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale)
return loss_fn
return loss_ctor
def gaussian_regression_loss(num_index_samples: int,
noise_scale: float = 1,
l2_weight_decay: float = 0,
exclude_bias_l2: bool = True) -> LossCtor:
"""Add a matching Gaussian noise to the target y."""
def loss_ctor(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
"""Add a matching Gaussian noise to the target y."""
noise_std = noise_scale * prior.noise_std
noise_fn = data_noise.GaussianTargetNoise(enn, noise_std)
single_loss = losses.add_data_noise(losses.L2Loss(), noise_fn)
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples)
if l2_weight_decay != 0:
if exclude_bias_l2:
predicate = lambda module, name, value: name != 'b'
else:
predicate = lambda module, name, value: True
loss_fn = losses.add_l2_weight_decay(loss_fn, l2_weight_decay, predicate)
return loss_fn
return loss_ctor
def regularized_dropout_loss(num_index_samples: int = 10,
dropout_rate: float = 0.05,
scale: float = 1e-2,
tau: float = 1.0) -> LossCtor:
"""Constructs the special regularized loss of the paper "Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning" (2015)."""
def loss_ctor(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn # Unused
# Construct L2 or Xent loss based on regression/classification.
if prior.num_classes > 1:
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
else:
single_loss = losses.L2Loss()
reg = (scale**2) * (1 - dropout_rate) / (2. * prior.num_train * tau)
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples)
return losses.add_l2_weight_decay(loss_fn, scale=reg)
return loss_ctor
# Module specialized to work only with Array inputs and Batch data.
_LossConfig = losses.CombineLossConfig[chex.Array, enn_base.Output,
datasets.ArrayBatch]
def combine_loss_prior_loss(loss_fn: losses.LossFnArray,
prior_loss_fn: Optional[
losses.LossFnArray] = None,
weight: float = 1.) -> losses.LossFnArray:
"""Compatibility wrapper for deprecated prior_loss_fn interface."""
if prior_loss_fn is None:
return loss_fn
else:
return losses.combine_losses([
_LossConfig(loss_fn, 'loss'),
_LossConfig(prior_loss_fn, 'prior', weight),
])
|
neural_testbed-master
|
neural_testbed/agents/enn_losses.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for baseline agents."""
import dataclasses
from typing import Sequence
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import ensemble
@dataclasses.dataclass
class MLPConfig:
adaptive_weight_scale: bool = True # Whether to scale with prior
l2_weight_decay: float = 1. # Weight decay
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1000 # Number of SGD steps
batch_strategy: bool = False # Whether to scale num_batches with data ratio
seed: int = 0 # Initialization seed
def make_mlp_agent(config: MLPConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a baseline MLP agent."""
config = ensemble.VanillaEnsembleConfig(
num_ensemble=1,
l2_weight_decay=config.l2_weight_decay,
adaptive_weight_scale=config.adaptive_weight_scale,
hidden_sizes=config.hidden_sizes,
num_batches=config.num_batches,
seed=config.seed)
return ensemble.make_agent(config)
@dataclasses.dataclass
class LogisticRegressionConfig:
adaptive_weight_scale: bool = True # Whether to scale with prior
l2_weight_decay: float = 1. # Weight decay
num_batches: int = 1000 # Number of SGD steps
seed: int = 0 # Initialization seed
def make_logistic_regression_agent(
config: LogisticRegressionConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a baseline logistic regression agent."""
config = ensemble.VanillaEnsembleConfig(
num_ensemble=1,
l2_weight_decay=config.l2_weight_decay,
adaptive_weight_scale=config.adaptive_weight_scale,
hidden_sizes=(),
num_batches=config.num_batches,
seed=config.seed)
return ensemble.make_agent(config)
def logistic_regression_sweep() -> Sequence[LogisticRegressionConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(LogisticRegressionConfig(
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
))
return tuple(sweep)
def logistic_regression_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=LogisticRegressionConfig(),
ctor=make_logistic_regression_agent,
sweep=logistic_regression_sweep)
@dataclasses.dataclass
class DummyConfig:
seed: int = 0 # Initialization seed
def dummy_sweep() -> Sequence[DummyConfig]:
return tuple([DummyConfig()])
def make_uniform_class_probs_agent(
config: DummyConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a baseline uniform class probability agent."""
del config
def make_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
"""Ignores the input and always outputs equal logits for all classes."""
del data # data does not affect the baseline agent.
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
del key # key does not affect the baseline agent.
return jnp.ones([x.shape[0], prior.num_classes]) / prior.num_classes
return enn_sampler
return make_agent
def uniform_class_probs_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=DummyConfig(),
ctor=make_uniform_class_probs_agent,
sweep=dummy_sweep)
def make_average_class_probs_agent(
config: DummyConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a baseline average class probability agent."""
del config
def make_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge) -> testbed_base.EpistemicSampler:
"""Calculates the frequency of each class and outputs the class frequency."""
counts = jnp.array([
jnp.count_nonzero(data.y == label) for label in range(prior.num_classes)
])
average_probs = counts / prior.num_train
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
del key # key does not affect the baseline agent.
return jnp.repeat(average_probs[None, :], x.shape[0], axis=0)
return enn_sampler
return make_agent
def average_class_probs_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=DummyConfig(),
ctor=make_average_class_probs_agent,
sweep=dummy_sweep)
def make_prior_agent(
config: DummyConfig) -> testbed_base.TestbedAgent:
"""Factory method to create an agent that uses prior knowledge but ignores data."""
del config
def make_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge) -> testbed_base.EpistemicSampler:
"""Samples an MLP according to the generative model."""
del data
hidden = 50
def net_fn(x: chex.Array) -> chex.Array:
"""Defining the generative model MLP."""
y = hk.Linear(
output_size=hidden,
b_init=hk.initializers.RandomNormal(1./jnp.sqrt(prior.input_dim)),
)(x)
y = jax.nn.relu(y)
y = hk.Linear(hidden)(y)
y = jax.nn.relu(y)
return hk.Linear(prior.num_classes)(y)
transformed = hk.without_apply_rng(hk.transform(net_fn))
def sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
params = transformed.init(key, x)
return transformed.apply(params, x) / prior.temperature
return jax.jit(sampler)
return make_agent
def prior_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=DummyConfig(),
ctor=make_prior_agent,
sweep=dummy_sweep)
|
neural_testbed-master
|
neural_testbed/agents/factories/baselines.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for hypermodel agent."""
import dataclasses
from typing import Sequence
from enn import data_noise
from enn import losses
from enn import networks
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
import numpy as np
import optax
@dataclasses.dataclass
class HypermodelConfig:
"""Configuration for hypermodel agent."""
index_dim: int = 6 # Index dimension
num_index_samples: int = 16 # Number of index samples to average over
prior_scale: float = 1 # Scale for additive prior function
l2_weight_decay: float = 1. # Weight decay
adaptive_weight_scale: bool = True # Whether to scale with prior knowledge
temp_scale_prior: str = 'sqrt' # How to scale prior with temperature
distribution: str = 'none' # Bootsrapping distribution
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
prior_hidden_sizes: Sequence[int] = (10,) # Hidden sizes for prior network
num_batches: int = 2000 # Number of SGD steps
batch_strategy: bool = False # Whether to scale num_batches with data ratio
learning_rate: float = 1e-3 # Learning rate for adam optimizer
seed: int = 0 # Initialization seed
scale: bool = False # Whether to scale the params or not
def make_hypermodel_agent(
config: HypermodelConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a hypermodel."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
prior_scale = config.prior_scale
if config.temp_scale_prior == 'lin':
prior_scale /= prior.temperature
elif config.temp_scale_prior == 'sqrt':
prior_scale /= float(jnp.sqrt(prior.temperature))
else:
pass
enn = networks.MLPHypermodelPriorIndependentLayers(
base_output_sizes=list(config.hidden_sizes) + [prior.num_classes],
prior_scale=prior_scale,
dummy_input=jnp.ones([100, prior.input_dim]),
indexer=networks.ScaledGaussianIndexer(config.index_dim),
prior_base_output_sizes=list(config.prior_hidden_sizes) +
[prior.num_classes],
hyper_hidden_sizes=[],
seed=config.seed,
scale=config.scale,
)
return enn
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
single_loss = losses.combine_single_index_losses_as_metric(
# This is the loss you are training on.
train_loss=losses.XentLoss(prior.num_classes),
# We will also log the accuracy in classification.
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Adding bootstrapping
boot_fn = data_noise.BootstrapNoise(enn, config.distribution, config.seed)
single_loss = losses.add_data_noise(single_loss, boot_fn)
# Averaging over index
loss_fn = losses.average_single_index_loss(
single_loss, config.num_index_samples)
# Adding weight decay
scale = config.l2_weight_decay
scale /= prior.num_train
if config.adaptive_weight_scale:
scale *= np.sqrt(prior.temperature) * prior.input_dim
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale)
return loss_fn
def batch_strategy(prior: testbed_base.PriorKnowledge) -> int:
if not config.batch_strategy:
return config.num_batches
data_ratio = prior.num_train / prior.input_dim
if data_ratio > 500: # high data regime
return config.num_batches * 5
elif data_ratio < 5: # low data regime
return config.num_batches // 5
else:
return config.num_batches
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
optimizer=optax.adam(config.learning_rate),
num_batches=batch_strategy,
seed=config.seed,)
return enn_agent.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/hypermodel.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for epinet agent."""
import dataclasses
from typing import Optional, Sequence
from enn import data_noise
from enn import losses
from enn import networks
from enn.networks import epinet
import jax.numpy as jnp
from neural_testbed import agents
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
@dataclasses.dataclass
class EpinetConfig:
"""Config for ensemble with prior functions."""
index_dim: int = 8 # Index dimension
l2_weight_decay: float = 0.2 # Weight decay
distribution: str = 'none' # Bootstrap distribution
prior_scale: float = 0.3 # Scale of the additive prior function
prior_scale_epi: float = 0. # Scale of the epinet prior function
prior_loss_freq: int = 100_000 # Prior loss frequency
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1_000 # Number of SGD steps
epi_hiddens: Sequence[int] = (15, 15) # Hidden sizes in epinet
add_hiddens: Sequence[int] = (5, 5) # Hidden sizes in additive prior
seed: int = 0 # Initialization seed
override_index_samples: Optional[int] = None # Set SGD training index samples
def make_agent(config: EpinetConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create an epinet agent with ensemble prior."""
num_index_samples = config.override_index_samples or config.index_dim
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
prior_scale = config.prior_scale / prior.temperature
# We only want to expose final hidden layer, so we set the flag for previous
# hidden layer and final output layer to False and for the final hidden
# layer to True.
if config.hidden_sizes:
expose_layers = [False,] * (len(config.hidden_sizes) - 1) + [True, False]
else:
expose_layers = [False]
enn = epinet.make_mlp_epinet(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
epinet_hiddens=config.epi_hiddens,
index_dim=config.index_dim,
expose_layers=expose_layers,
prior_scale=config.prior_scale_epi,
)
# Adding a linear combination of networks as prior function
mlp_prior_fns = networks.make_mlp_ensemble_prior_fns(
output_sizes=list(config.add_hiddens) + [prior.num_classes,],
dummy_input=jnp.ones([100, prior.input_dim]),
num_ensemble=config.index_dim,
seed=config.seed,
)
mlp_prior_fn = networks.combine_functions_linear_in_index(mlp_prior_fns)
return networks.EnnWithAdditivePrior(enn, mlp_prior_fn, prior_scale)
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
"""You can override this function to try different loss functions."""
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Adding bootstrapping
boot_fn = data_noise.BootstrapNoise(enn, config.distribution, config.seed)
single_loss = losses.add_data_noise(single_loss, boot_fn)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples)
# Adding weight decay
scale = config.l2_weight_decay
scale *= (prior.input_dim / prior.num_train) ** 0.7
def predicate(module_name: str, name: str, value) -> bool:
del name, value
return 'prior' not in module_name
loss_fn = losses.add_l2_weight_decay(loss_fn, scale, predicate)
return loss_fn
def num_batches(prior: testbed_base.PriorKnowledge) -> int:
if (prior.num_train / prior.input_dim) > 500:
return config.num_batches * 10
else:
return config.num_batches
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
num_batches=num_batches,
prior_loss_ctor=agents.default_enn_prior_loss(num_index_samples),
prior_loss_freq=config.prior_loss_freq,
seed=config.seed,
)
return enn_agent.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/epinet.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for Bayes by Backprop agent."""
import dataclasses
from typing import Sequence
from enn import losses
from enn import networks
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
import optax
@dataclasses.dataclass
class BBBConfig:
"""Configuration for bbb agent."""
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1000 # Number of SGD steps
learning_rate: float = 3e-3 # Learning rate for adam optimizer
seed: int = 0 # Initialization seed
sigma_1: float = 1.0 # Standard deviation of the first Gaussian prior
sigma_2: float = 0.75 # Standard deviation of the second Gaussian prior
mixture_scale: float = 1. # Scale for mixture of two Gauusian densities
num_index_samples: int = 8 # Number of index samples to average over
kl_method: str = 'analytical' # How to find KL of prior and vi posterior
adaptive_scale: bool = True # Whether to scale prior KL with temp
output_scale: bool = False # Whether to scale output with temperature
batch_strategy: bool = False # Whether to scale num_batches with data ratio
def make_agent(config: BBBConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a BBB agent."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
"""Makes ENN."""
temperature = 1.
if config.output_scale:
temperature = prior.temperature
enn = networks.make_bbb_enn(
base_output_sizes=list(config.hidden_sizes) + [prior.num_classes],
dummy_input=jnp.zeros(shape=(prior.input_dim,)),
temperature=temperature)
return enn
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
"""Define the ENN architecture from the prior."""
del enn
# Loss assuming a classification task.
assert prior.num_classes > 1
log_likelihood_fn = losses.get_categorical_loglike_fn(
num_classes=prior.num_classes)
if config.kl_method == 'analytical':
model_prior_kl_fn = losses.get_analytical_diagonal_linear_model_prior_kl_fn(
prior.num_train, config.sigma_1)
elif config.kl_method == 'sample_based':
model_prior_kl_fn = losses.get_sample_based_model_prior_kl_fn(
prior.num_train, config.sigma_1, config.sigma_2,
config.mixture_scale)
else:
ValueError(f'Invalid kl_method={config.kl_method}')
if config.adaptive_scale:
single_loss = losses.ElboLoss(log_likelihood_fn, model_prior_kl_fn,
prior.temperature, prior.input_dim)
else:
single_loss = losses.ElboLoss(log_likelihood_fn, model_prior_kl_fn)
loss_fn = losses.average_single_index_loss(
single_loss, num_index_samples=config.num_index_samples)
return loss_fn
def batch_strategy(prior: testbed_base.PriorKnowledge) -> int:
if not config.batch_strategy:
return config.num_batches
data_ratio = prior.num_train / prior.input_dim
if data_ratio > 500: # high data regime
return config.num_batches * 5
elif data_ratio < 5: # low data regime
return config.num_batches // 5
else:
return config.num_batches
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
num_batches=batch_strategy,
optimizer=optax.adam(config.learning_rate),
seed=config.seed,
)
return enn_agent.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/bbb.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Preconditioner for SGMCMC optimizers.
Code is based on
https://github.com/google-research/google-research/tree/master/bnn_hmc
"""
import dataclasses
from typing import Any, NamedTuple
import jax
from jax import numpy as jnp
from optax import Params
Momentum = Any # An arbitrary pytree of `jnp.ndarrays`
GradMomentEstimates = Params # Same type as parameters
PreconditionerState = NamedTuple # State of a preconditioner
@dataclasses.dataclass
class Preconditioner:
"""Preconditioner transformation."""
init: Any
update_preconditioner: Any
multiply_by_m_sqrt: Any
multiply_by_m_inv: Any
multiply_by_m_sqrt_inv: Any
class RMSPropPreconditionerState(PreconditionerState):
grad_moment_estimates: GradMomentEstimates
def get_rmsprop_preconditioner(running_average_factor=0.99, eps=1e-7):
"""Define RMSProp Preconditioner."""
def init_fn(params):
return RMSPropPreconditionerState(
grad_moment_estimates=jax.tree_map(jnp.zeros_like, params))
def update_preconditioner_fn(gradient, preconditioner_state):
r = running_average_factor
grad_moment_estimates = jax.tree_map(
lambda e, g: e * r + g**2 *(1-r),
preconditioner_state.grad_moment_estimates, gradient)
return RMSPropPreconditionerState(
grad_moment_estimates=grad_moment_estimates)
def multiply_by_m_inv_fn(vec, preconditioner_state):
return jax.tree_map(lambda e, v: v / (eps + jnp.sqrt(e)),
preconditioner_state.grad_moment_estimates, vec)
def multiply_by_m_sqrt_fn(vec, preconditioner_state):
return jax.tree_map(lambda e, v: v * jnp.sqrt(eps + jnp.sqrt(e)),
preconditioner_state.grad_moment_estimates, vec)
def multiply_by_m_sqrt_inv_fn(vec, preconditioner_state):
return jax.tree_map(lambda e, v: v / jnp.sqrt(eps + jnp.sqrt(e)),
preconditioner_state.grad_moment_estimates, vec)
return Preconditioner(
init=init_fn,
update_preconditioner=update_preconditioner_fn,
multiply_by_m_inv=multiply_by_m_inv_fn,
multiply_by_m_sqrt=multiply_by_m_sqrt_fn,
multiply_by_m_sqrt_inv=multiply_by_m_sqrt_inv_fn)
class IdentityPreconditionerState(PreconditionerState):
"""Identity preconditioner is stateless."""
def get_identity_preconditioner():
"""Define Identity Preconditioner."""
def init_fn(_):
return IdentityPreconditionerState()
def update_preconditioner_fn(gradient, preconditioner_state):
del gradient, preconditioner_state
return IdentityPreconditionerState()
def multiply_by_m_inv_fn(vec, _):
return vec
def multiply_by_m_sqrt_fn(vec, _):
return vec
def multiply_by_m_sqrt_inv_fn(vec, _):
return vec
return Preconditioner(
init=init_fn,
update_preconditioner=update_preconditioner_fn,
multiply_by_m_inv=multiply_by_m_inv_fn,
multiply_by_m_sqrt=multiply_by_m_sqrt_fn,
multiply_by_m_sqrt_inv=multiply_by_m_sqrt_inv_fn)
|
neural_testbed-master
|
neural_testbed/agents/factories/preconditioner.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing public methods for approximate posterior training."""
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
from neural_testbed.agents.factories import bbb
from neural_testbed.agents.factories import deep_kernel
from neural_testbed.agents.factories import dropout
from neural_testbed.agents.factories import ensemble
from neural_testbed.agents.factories import ensemble_plus
from neural_testbed.agents.factories import epinet
from neural_testbed.agents.factories import hypermodel
from neural_testbed.agents.factories import knn
from neural_testbed.agents.factories import random_forest
from neural_testbed.agents.factories import sgmcmc
|
neural_testbed-master
|
neural_testbed/agents/factories/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for ensemble_plus agent."""
import dataclasses
from typing import Optional, Sequence
from enn import data_noise
from enn import losses
from enn import networks
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
import numpy as np
@dataclasses.dataclass
class EnsembleConfig:
"""Config for ensemble with prior functions."""
num_ensemble: int = 100 # Size of ensemble
l2_weight_decay: float = 1. # Weight decay
adaptive_weight_scale: bool = True # Whether to scale with prior
distribution: str = 'none' # Boostrap distribution
prior_scale: float = 3. # Scale of prior function
temp_scale_prior: str = 'sqrt' # How to scale prior with temperature
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1_000 # Number of SGD steps
batch_strategy: bool = False # Whether to scale num_batches with data ratio
seed: int = 0 # Initialization seed
override_index_samples: Optional[int] = None # Set SGD training index samples
def make_agent(config: EnsembleConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a ensemble with prior."""
num_index_samples = config.override_index_samples or config.num_ensemble
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
prior_scale = config.prior_scale
if config.temp_scale_prior == 'linear':
prior_scale /= prior.temperature
elif config.temp_scale_prior == 'sqrt':
prior_scale /= float(jnp.sqrt(prior.temperature))
else:
pass
enn = networks.make_ensemble_mlp_with_prior_enn(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
dummy_input=jnp.ones([100, prior.input_dim]),
num_ensemble=config.num_ensemble,
prior_scale=prior_scale,
seed=config.seed + 999,
)
return enn
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
"""You can override this function to try different loss functions."""
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Adding bootstrapping
boot_fn = data_noise.BootstrapNoise(enn, config.distribution, config.seed)
single_loss = losses.add_data_noise(single_loss, boot_fn)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss,
num_index_samples)
# Adding weight decay
scale = config.l2_weight_decay / config.num_ensemble
scale /= prior.num_train
if config.adaptive_weight_scale:
scale *= np.sqrt(prior.temperature) * prior.input_dim
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale)
return loss_fn
def batch_strategy(prior: testbed_base.PriorKnowledge) -> int:
if not config.batch_strategy:
return config.num_batches
data_ratio = prior.num_train / prior.input_dim
if data_ratio > 500: # high data regime
return config.num_batches * 5
elif data_ratio < 5: # low data regime
return config.num_batches // 5
else:
return config.num_batches
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
num_batches=batch_strategy,
seed=config.seed,)
return enn_agent.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/ensemble_plus.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for sgmcmc agent."""
import dataclasses
from absl import logging
import chex
from enn import losses
from enn import networks
from enn import supervised
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.agents.factories import preconditioner as pre
from neural_testbed.agents.factories import sgld_optimizer
from neural_testbed.agents.factories import utils
@dataclasses.dataclass
class SGMCMCConfig:
"""Config Class for SGMCMC."""
learning_rate: float = 0.0001 # Learning rate for optimizers
prior_variance: float = 0.1 # Variance of Gaussian prior
alg_temperature: float = 1 # Temperature parameter for SGLD
momentum_decay: float = 0.9 # Momentum decay parameter for SGLD
preconditioner: str = 'None' # Choice of preconditioner; None or RMSprop
num_hidden: int = 50 # Hidden units in network
num_batches: int = 500000 # Number of SGD steps
burn_in_time: int = 100000 # Burn in time for MCMC sampling
model_saving_frequency: int = 1000 # Frequency of saving models
adaptive_prior_variance: bool = False # Scale prior_variance with dimension
seed: int = 0 # Initialization seed
# Choice of using preconditioner
def get_preconditioner(config: SGMCMCConfig):
if config.preconditioner == 'None':
preconditioner = None
else:
preconditioner = pre.get_rmsprop_preconditioner()
return preconditioner
# ENN sampler for MCMC
def extract_enn_sampler(enn: networks.EnnArray,
params_list) -> testbed_base.EpistemicSampler:
"""ENN sampler for MCMC."""
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Generate a random sample from posterior distribution at x."""
# pylint: disable=cell-var-from-loop
param_index = jax.random.randint(key, [], 0, len(params_list))
fns = [lambda x, w=p: enn.apply(w, x, 0) for p in params_list] # pytype: disable=missing-parameter
out = jax.lax.switch(param_index, fns, x)
return networks.parse_net_output(out)
return jax.jit(enn_sampler)
def make_agent(config: SGMCMCConfig):
"""Factory method to create a sgmcmc agent."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
enn = networks.make_einsum_ensemble_mlp_enn(
output_sizes=[config.num_hidden, config.num_hidden, prior.num_classes],
num_ensemble=1,
nonzero_bias=False,
)
return enn
def make_loss(prior: testbed_base.PriorKnowledge) -> losses.LossFnArray:
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
loss_fn = losses.average_single_index_loss(single_loss, 1)
# Gaussian prior can be interpreted as a L2-weight decay.
prior_variance = config.prior_variance
# Scale prior_variance for large input_dim
if config.adaptive_prior_variance and prior.input_dim >= 100:
prior_variance *= 2
scale = (1 / prior_variance) * jnp.sqrt(
prior.temperature) * prior.input_dim / prior.num_train
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale) # pytype: disable=wrong-arg-types # jax-types
return loss_fn
log_freq = int(config.num_batches / 50) or 1
def sgd_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
"""Train a MLP via SGMCMC."""
preconditioner = get_preconditioner(config)
optimizer_sgld = sgld_optimizer.sgld_gradient_update(
config.learning_rate,
momentum_decay=config.momentum_decay,
seed=0,
preconditioner=preconditioner,
temperature=config.alg_temperature/prior.num_train)
# Define the experiment
sgd_experiment = supervised.Experiment(
enn=make_enn(prior),
loss_fn=make_loss(prior),
optimizer=optimizer_sgld,
dataset=utils.make_iterator(data, batch_size=100),
train_log_freq=log_freq,
)
# Train the agent
params_list = []
step = 0
for _ in range(config.num_batches):
step += 1
sgd_experiment.train(1)
# Save the model every model_saving_frequency steps
if step >= config.burn_in_time and ((step - config.burn_in_time) %
config.model_saving_frequency == 0):
params_list.append(sgd_experiment.state.params)
logging.info('Saving params at step %d.', step)
return extract_enn_sampler(make_enn(prior), params_list)
return sgd_agent
|
neural_testbed-master
|
neural_testbed/agents/factories/sgmcmc.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for agent factories."""
from typing import Optional
import chex
from enn import datasets
from enn import networks
from enn import supervised
from enn import utils
import jax
from neural_testbed import base as testbed_base
def extract_enn_sampler(
experiment: supervised.BaseExperiment,
) -> testbed_base.EpistemicSampler:
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Generate a random sample from posterior distribution at x."""
net_out = experiment.predict(x, key)
return networks.parse_net_output(net_out)
return jax.jit(enn_sampler)
def make_iterator(
data: testbed_base.Data, batch_size: Optional[int] = None
) -> datasets.ArrayBatchIterator:
batch = datasets.ArrayBatch(x=data.x, y=data.y)
return utils.make_batch_iterator(batch, batch_size)
|
neural_testbed-master
|
neural_testbed/agents/factories/utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for deep kernel agent.
This works as follows:
1) Train a deep neural network on the data as normal (with regularization etc
if required).
2) Treat the learned mapping in the network as a *deep kernel* that is taking
the input data and mapping it to a new space where the examples are
linearly separable.
3) During inference we treat the learned deep kernel as the kernel in a
Gaussian process.
4) We do some clever linear algebra to keep the inference (relatively)
tractable as the problem size and number of data increases.
"""
import dataclasses
import functools
from typing import Callable, Iterable, NamedTuple, Optional, Sequence
import chex
from enn import datasets
from enn import losses
from enn import utils
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
import optax
@dataclasses.dataclass
class Normalization:
"""Enum categorizing how we normalize the activations in the last layer."""
during_training = 1
only_output = 2
no_normalization = 3
@dataclasses.dataclass
class DeepKernelConfig:
"""Deep kernel config."""
num_train_steps: int = 1_000 # number of training steps
batch_size: int = 100 # batch size to train with
learning_rate: float = 1e-3 # training learning rate
weight_decay: float = 1.0 # l2 weight decay
hidden_sizes: Sequence[int] = (50, 50) # num_features is hidden_sizes[-1]
scale_factor: float = 2.0 # sampling scale factor
num_inference_samples: int = 32_768 # max number of train data to use
sigma_squared_factor: float = 4.0 # noise factor
seed: int = 0 # initialization seed
normalization: int = Normalization.only_output # how to normalize last layer
class TrainingState(NamedTuple):
params: hk.Params
opt_state: optax.OptState
class MlpWithActivations(hk.Module):
"""A multi-layer perceptron module. Mostly copy-pasted from hk.nets.Mlp."""
def __init__(
self,
output_sizes: Iterable[int],
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,
normalize_final: int = Normalization.during_training,
name: Optional[str] = None,
):
"""Constructs an MLP where the last layer activation is available.
Args:
output_sizes: Sequence of layer sizes.
activation: Activation function to apply between :class:`~haiku.Linear`
layers. Defaults to ReLU.
normalize_final: How to normalize the activations of the penultimate
layer.
name: Optional name for this module.
Raises:
ValueError: If ``with_bias`` is ``False`` and ``b_init`` is not ``None``.
"""
super().__init__(name=name)
self.activation = activation
self.normalize_final = normalize_final
layers = []
for index, output_size in enumerate(output_sizes):
layers.append(
hk.Linear(output_size=output_size, name='linear_%d' % index))
self.layers = tuple(layers)
def __call__(
self,
inputs: jnp.ndarray,
) -> jnp.ndarray:
"""Connects the module to some inputs.
Args:
inputs: A Tensor of shape ``[batch_size, input_size]``.
Returns:
The output of the model of size ``[batch_size, output_size]``.
"""
num_layers = len(self.layers)
out = hk.Flatten()(inputs)
for i, layer in enumerate(self.layers):
if i == (num_layers - 1): # this is the final layer, apply normalization:
if self.normalize_final == Normalization.during_training:
out /= (1e-6 + jnp.expand_dims(jnp.linalg.norm(out, axis=1), 1))
penultimate_out = out
elif self.normalize_final == Normalization.only_output:
penultimate_out = out / (
1e-6 + jnp.expand_dims(jnp.linalg.norm(out, axis=1), 1))
elif self.normalize_final == Normalization.no_normalization:
penultimate_out = out
else:
raise ValueError('Invalid normalize_final setting')
out = layer(out)
if i < (num_layers - 1): # don't activate final layer
out = self.activation(out)
return (out, penultimate_out) # pytype: disable=bad-return-type # jax-ndarray
def make_agent(config: DeepKernelConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a deep kernel agent."""
def deep_kernel_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
"""Output uniform class probabilities."""
rng = hk.PRNGSequence(config.seed)
enn_data = datasets.ArrayBatch(x=data.x, y=data.y)
dataset = utils.make_batch_iterator(enn_data, config.batch_size,
config.seed)
def predict_fn(x):
model = MlpWithActivations(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
normalize_final=config.normalization)
logits, final_layer_activations = model(x)
return (logits, final_layer_activations)
predict_fn_t = hk.without_apply_rng(hk.transform(predict_fn))
params = predict_fn_t.init(next(rng), next(dataset).x)
# helper function to conform to testbed api
def net(params, x, index):
del index
logits, _ = predict_fn_t.apply(params, x)
return logits
# use the same weight_decay heuristic as other agents
weight_decay = (
config.weight_decay * jnp.sqrt(prior.temperature) * prior.input_dim /
prior.num_train)
single_loss = losses.combine_single_index_losses_as_metric(
# This is the loss you are training on.
train_loss=losses.XentLoss(prior.num_classes),
# We will also log the accuracy in classification.
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
single_loss = losses.wrap_single_loss_as_single_loss_no_state(single_loss)
loss_fn = losses.average_single_index_loss_no_state(single_loss,)
loss_fn = losses.add_l2_weight_decay_no_state(loss_fn, scale=weight_decay) # pytype: disable=wrong-arg-types # jax-types
loss_fn = jax.jit(functools.partial(loss_fn, net))
optimizer = optax.adam(config.learning_rate)
opt_state = optimizer.init(params)
def train_step(state, batch):
_, grads = jax.value_and_grad(
loss_fn, has_aux=True)(state.params, batch, None)
updates, new_opt_state = optimizer.update(grads, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(new_params, new_opt_state)
state = TrainingState(params, opt_state)
for _ in range(config.num_train_steps):
batch = next(dataset)
state = train_step(state, batch)
##### prepare Cholesky factor #####
# num_inference_samples controls how much training data to use for the
# inference step, might run into memory issues if using all data
num_inference_samples = min(config.num_inference_samples, data.x.shape[0])
# B_train -> num_inference_samples
d = utils.make_batch_iterator(enn_data, num_inference_samples, config.seed)
# phi_x [B_train, num_features] (training data)
_, phi_x = predict_fn_t.apply(state.params, next(d).x)
# at high temperature there is higher sampling noise
sigma_squared = config.sigma_squared_factor * prior.temperature
# [num_features, num_features]
m = sigma_squared * jnp.eye(phi_x.shape[1]) + phi_x.T @ phi_x
m_half = jax.scipy.linalg.cholesky(m, lower=True, overwrite_a=True)
##### Cholesky factor ready #####
def enn_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
# phi_s [B_test, num_features] (test data)
rng = hk.PRNGSequence(key)
mean_s, phi_s = predict_fn_t.apply(state.params, x)
# [num_features, num_classes]
sample = jax.random.normal(
next(rng), shape=(config.hidden_sizes[-1], prior.num_classes))
# [num_features, num_classes]
sample = jax.scipy.linalg.solve_triangular(
m_half, sample, lower=True, trans=True, overwrite_b=True)
scale = (
config.scale_factor * jnp.sqrt(sigma_squared) /
jnp.sqrt(prior.num_train) / jnp.sqrt(prior.temperature))
# [B_test, num_classes]
return mean_s + scale * phi_s @ sample # sampled logit from posterior
return enn_sampler
return deep_kernel_agent
|
neural_testbed-master
|
neural_testbed/agents/factories/deep_kernel.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for ensemble agent."""
import dataclasses
from typing import Sequence
from enn import losses
from enn import networks
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
import numpy as np
@dataclasses.dataclass
class VanillaEnsembleConfig:
num_ensemble: int = 100 # Size of ensemble
l2_weight_decay: float = 1. # Weight decay
adaptive_weight_scale: bool = True # Whether to scale with prior
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1000 # Number of SGD steps
batch_strategy: bool = False # Whether to scale num_batches with data ratio
seed: int = 0 # Initialization seed
def make_agent(config: VanillaEnsembleConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a vanilla ensemble."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
return networks.make_einsum_ensemble_mlp_enn(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
num_ensemble=config.num_ensemble,
nonzero_bias=False,
)
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss, config.num_ensemble)
# Adding weight decay
scale = config.l2_weight_decay / config.num_ensemble
scale /= prior.num_train
if config.adaptive_weight_scale:
scale *= np.sqrt(prior.temperature) * prior.input_dim
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale)
return loss_fn
def batch_strategy(prior: testbed_base.PriorKnowledge) -> int:
if not config.batch_strategy:
return config.num_batches
data_ratio = prior.num_train / prior.input_dim
if data_ratio > 500: # high data regime
return config.num_batches * 5
elif data_ratio < 5: # low data regime
return config.num_batches // 5
else:
return config.num_batches
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
num_batches=batch_strategy,
seed=config.seed,
)
return enn_agent.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/ensemble.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Optax implementations of SGMCMC optimizers.
Code is based on
https://github.com/google-research/google-research/tree/master/bnn_hmc
"""
from typing import Any, NamedTuple
import jax
from jax import numpy as jnp
from neural_testbed.agents.factories import preconditioner as pre
from optax import GradientTransformation
from optax import Params
Momentum = Any # An arbitrary pytree of `jnp.ndarrays`
GradMomentEstimates = Params # Same type as parameters
PreconditionerState = NamedTuple # State of a preconditioner
def normal_like_tree(a, key):
"""Generate Gaussian noises."""
treedef = jax.tree_structure(a)
num_vars = len(jax.tree_leaves(a))
all_keys = jax.random.split(key, num=(num_vars + 1))
noise = jax.tree_map(lambda p, k: jax.random.normal(k, shape=p.shape), a,
jax.tree_unflatten(treedef, all_keys[1:]))
return noise, all_keys[0]
class OptaxSGLDState(NamedTuple):
"""Optax state for the SGLD optimizer."""
count: jnp.ndarray
rng_key: jnp.ndarray
momentum: Momentum
preconditioner_state: PreconditionerState
def sgld_gradient_update(step_size,
seed,
momentum_decay=0.,
preconditioner=None,
temperature=0.1):
"""Optax implementation of the SGLD optimizer.
If momentum_decay is not zero,
we get the underdamped SGLD (SGHMC) method "Stochastic Gradient Hamiltonian
Monte Carlo" Tianqi Chen, Emily B. Fox, Carlos Guestrin; ICML 2014.
Args:
step_size: learning rate
seed: int, random seed.
momentum_decay: float, momentum decay parameter (default: 0).
preconditioner: Preconditioner, an object representing the preconditioner
or None; if None, identity preconditioner is used (default: None).
temperature: algorithm temperature. If temperature<1, return a cold
posterior.
Returns:
Optax.GradientTransformation.
"""
if preconditioner is None:
preconditioner = pre.get_identity_preconditioner()
def init_fn(params):
return OptaxSGLDState(
count=jnp.zeros([], jnp.int32),
rng_key=jax.random.PRNGKey(seed),
momentum=jax.tree_map(jnp.zeros_like, params),
preconditioner_state=preconditioner.init(params))
def update_fn(gradient, state, params=None):
del params
noise_std = jnp.sqrt(2 * (1 - momentum_decay) * temperature)
preconditioner_state = preconditioner.update_preconditioner(
gradient, state.preconditioner_state)
noise, new_key = normal_like_tree(gradient, state.rng_key)
noise = preconditioner.multiply_by_m_sqrt(noise, preconditioner_state)
def update_momentum(m, g, n):
return momentum_decay * m + g * jnp.sqrt(step_size) - n * noise_std
momentum = jax.tree_map(update_momentum, state.momentum, gradient,
noise)
updates = preconditioner.multiply_by_m_inv(momentum, preconditioner_state)
updates = jax.tree_map(lambda m: -m * jnp.sqrt(step_size), updates)
return updates, OptaxSGLDState(
count=state.count + 1,
rng_key=new_key,
momentum=momentum,
preconditioner_state=preconditioner_state)
return GradientTransformation(init_fn, update_fn)
|
neural_testbed-master
|
neural_testbed/agents/factories/sgld_optimizer.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for MC Dropout agent."""
import dataclasses
from typing import Sequence
from enn import losses
from enn import networks
from neural_testbed import agents
from neural_testbed import base as testbed_base
import numpy as np
import optax
@dataclasses.dataclass
class McDropoutConfig:
"""Configuration for mc dropout agent."""
dropout_rate: float = 0.1 # Drop probability for each hidden unit
length_scale: float = 1. # Length scale used for weight regularization
regularization_tau: float = 1. # tau for scaling the weight regularizer
dropout_input: bool = False # Whether to have dropout for the input layer
exclude_bias_l2: bool = False # Whether to exclude bias from regularization
adaptive_weight_scale: bool = True # Whether to scale with prior
hidden_sizes: Sequence[int] = (100, 100) # Hidden sizes for neural network
num_batches: int = 1000 # Number of SGD steps
batch_strategy: bool = False # Whether to scale num_batches with data ratio
learning_rate: float = 1e-3 # Learning rate for adam optimizer
seed: int = 0 # Initialization seed
def make_mc_dropout_agent(
config: McDropoutConfig) -> agents.VanillaEnnAgent:
"""Factory method to create MC dropout agent."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
return networks.MLPDropoutENN(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
dropout_rate=config.dropout_rate,
dropout_input=config.dropout_input,
seed=config.seed,
)
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples=1)
# Adding a special weight regularization based on paper "Dropout as a
# Bayesian Approximation: Representing Model Uncertainty in Deep Learning",
# https://github.com/yaringal/DropoutUncertaintyExps/blob/master/net/net.py#L72
scale = (config.length_scale**2) * (1 - config.dropout_rate) / (
2. * prior.num_train * config.regularization_tau)
if config.adaptive_weight_scale:
scale = config.length_scale * np.sqrt(
prior.temperature) * prior.input_dim / prior.num_train
if config.exclude_bias_l2:
predicate = lambda module, name, value: name != 'b'
else:
predicate = lambda module, name, value: True
loss_fn = losses.add_l2_weight_decay(loss_fn, scale, predicate)
return loss_fn
def batch_strategy(prior: testbed_base.PriorKnowledge) -> int:
if not config.batch_strategy:
return config.num_batches
data_ratio = prior.num_train / prior.input_dim
if data_ratio > 500: # high data regime
return config.num_batches * 5
elif data_ratio < 5: # low data regime
return config.num_batches // 5
else:
return config.num_batches
agent_config = agents.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
optimizer=optax.adam(config.learning_rate),
num_batches=batch_strategy,
seed=config.seed,
)
return agents.VanillaEnnAgent(agent_config)
|
neural_testbed-master
|
neural_testbed/agents/factories/dropout.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for Random Forest baseline agent."""
import dataclasses
import chex
from neural_testbed import base as testbed_base
import numpy as np
from sklearn import ensemble
@dataclasses.dataclass
class RandomForestConfig:
n_estimators: int = 100 # Number of elements in random forest
criterion: str = 'gini' # Splitting criterion 'gini' or 'entropy'
def make_agent(config: RandomForestConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a random forest agent."""
def random_forest_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
# sklearn cannot handle instances with no samples of that class
# so we add a fake sample for every class here as a hack
new_x = np.zeros((prior.num_classes, data.x.shape[1]))
new_y = np.expand_dims(np.arange(prior.num_classes), axis=1)
data = testbed_base.Data(
np.concatenate((data.x, new_x), axis=0),
np.concatenate((data.y, new_y), axis=0))
random_forest = ensemble.RandomForestClassifier(
n_estimators=config.n_estimators, criterion=config.criterion)
random_forest.fit(data.x, np.ravel(data.y))
# Ensure that the number of classes is correct
# (this will fail if the fake data isn't added above)
assert len(random_forest.classes_) == prior.num_classes
def enn_sampler(x: chex.Array, seed: int = 0) -> chex.Array:
del seed # seed does not affect the random_forest agent.
probs = random_forest.predict_proba(x)
# threshold the probabilities, otherwise get nans in the KL calculation
probs = np.minimum(np.maximum(probs, 0.01), 0.99)
return np.log(probs) # return logits
return enn_sampler
return random_forest_agent
|
neural_testbed-master
|
neural_testbed/agents/factories/random_forest.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This folder is designed to collect agent definitions for neural testbed.
Each file should contain:
- **AgentConfig** = dataclass describing the config of the agent. This should
be initialized with the chosen parameters as chosen by the experimental
sweep on the testbed.
- **AgentCtor**: AgentConfig -> TestbedAgent = constructor of the agent.
- **AgentSweep**: None -> Sequence[AgentConfig] = the sequence of parameters
necessary to run the sweep for the paper.
We might be able to express this structure through TypeVar.
"""
import dataclasses
from typing import Callable, Generic, Sequence, TypeVar
from neural_testbed import base as testbed_base
# TypeVar parameterizes in terms of this input
AgentConfig = TypeVar('AgentConfig')
@dataclasses.dataclass
class PaperAgent(Generic[AgentConfig]):
default: AgentConfig
ctor: Callable[[AgentConfig], testbed_base.TestbedAgent]
sweep: Callable[[], Sequence[AgentConfig]]
|
neural_testbed-master
|
neural_testbed/agents/factories/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for K-nearest neighbors baseline agent."""
import dataclasses
import chex
from neural_testbed import base as testbed_base
import numpy as np
from sklearn import neighbors
@dataclasses.dataclass
class KnnConfig:
num_neighbors: int = 3 # Number of nearest-neighbors
weighting: str = 'uniform' # Weighting type to use ('distance', 'uniform')
def make_agent(config: KnnConfig) -> testbed_base.TestbedAgent:
"""Factory method to create a K-nearst neighbors agent."""
def knn_agent(
data: testbed_base.Data,
prior: testbed_base.PriorKnowledge,
) -> testbed_base.EpistemicSampler:
# sklearn cannot handle instances with no samples of that class
# so we add a fake sample for every class here as a hack
new_x = np.zeros((prior.num_classes, data.x.shape[1]))
new_y = np.expand_dims(np.arange(prior.num_classes), axis=1)
data = testbed_base.Data(
np.concatenate((data.x, new_x), axis=0),
np.concatenate((data.y, new_y), axis=0))
knn = neighbors.KNeighborsClassifier(
n_neighbors=min(data.x.shape[0], config.num_neighbors),
weights=config.weighting)
knn.fit(data.x, np.ravel(data.y))
# Ensure that the number of classes is correct
# (this will fail if the fake data isn't added above)
assert len(knn.classes_) == prior.num_classes
def enn_sampler(x: chex.Array, seed: int = 0) -> chex.Array:
del seed # seed does not affect the knn agent.
probs = knn.predict_proba(x)
# threshold the probabilities, otherwise get nans in the KL calculation
probs = np.minimum(np.maximum(probs, 0.01), 0.99)
return np.log(probs) # return logits
return enn_sampler
return knn_agent
|
neural_testbed-master
|
neural_testbed/agents/factories/knn.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for hypermodel agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import hypermodel
def l2reg_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over l2 regularization parameters for paper."""
sweep = []
for l2_weight_decay in [0.1, 0.3, 1, 3, 10]:
sweep.append(hypermodel.HypermodelConfig(l2_weight_decay=l2_weight_decay,
num_batches=1000,
batch_strategy=True,))
return tuple(sweep)
def index_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over basic parameters for paper."""
sweep = []
for index_dim in [1, 3, 5, 7]:
sweep.append(
hypermodel.HypermodelConfig(
index_dim=index_dim,
num_batches=1000,
batch_strategy=True,
))
return tuple(sweep)
def boot_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for distribution in ['none', 'bernoulli', 'exponential']:
sweep.append(
hypermodel.HypermodelConfig(
distribution=distribution,
num_batches=1000,
batch_strategy=True,
))
return tuple(sweep)
def prior_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over prior function parameters for paper."""
sweep = []
for prior_hidden_sizes in [(10,), (10, 10)]:
for prior_scale in [1, 3]:
for temp_scale_prior in ['lin', 'sqrt']:
sweep.append(
hypermodel.HypermodelConfig(
prior_hidden_sizes=prior_hidden_sizes,
prior_scale=prior_scale,
temp_scale_prior=temp_scale_prior,
num_batches=1000,
batch_strategy=True,))
return tuple(sweep)
def batch_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
hypermodel.HypermodelConfig(
batch_strategy=batch_strategy,
num_batches=num_batches))
return tuple(sweep)
def combined_sweep() -> Sequence[hypermodel.HypermodelConfig]:
return tuple(prior_sweep()) + tuple(index_sweep()) + tuple(
l2reg_sweep()) + tuple(boot_sweep()) + tuple(batch_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=hypermodel.HypermodelConfig(),
ctor=hypermodel.make_hypermodel_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/hypermodel.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for Bayes by Backprop agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import bbb
def base_sweep() -> Sequence[bbb.BBBConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for learning_rate in [1e-3, 3e-3]:
for adaptive_scale in [True, False]:
sweep.append(
bbb.BBBConfig(
learning_rate=learning_rate,
adaptive_scale=adaptive_scale))
return tuple(sweep)
def prior_sweep() -> Sequence[bbb.BBBConfig]:
""""Basic sweep over hyperparams."""
sweep = []
for sigma_1 in [1, 2, 4]:
for sigma_2 in [0.25, 0.5, 0.75]:
for mixture_scale in [0, 0.5, 1]:
sweep.append(
bbb.BBBConfig(
sigma_1=sigma_1, sigma_2=sigma_2, mixture_scale=mixture_scale))
return tuple(sweep)
def network_sweep() -> Sequence[bbb.BBBConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for hidden_sizes in [(50, 50), (100, 100),]:
sweep.append(bbb.BBBConfig(hidden_sizes=hidden_sizes))
return tuple(sweep)
def batch_sweep() -> Sequence[bbb.BBBConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
bbb.BBBConfig(batch_strategy=batch_strategy, num_batches=num_batches))
return tuple(sweep)
def combined_sweep() -> Sequence[bbb.BBBConfig]:
return tuple(base_sweep()) + tuple(prior_sweep()) + tuple(
network_sweep()) + tuple(batch_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=bbb.BBBConfig(),
ctor=bbb.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/bbb.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing agent sweeps."""
import dataclasses
from typing import Sequence
import ml_collections
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
from neural_testbed.agents.factories.sweeps.testbed import bbb
from neural_testbed.agents.factories.sweeps.testbed import dropout
from neural_testbed.agents.factories.sweeps.testbed import ensemble
from neural_testbed.agents.factories.sweeps.testbed import ensemble_plus
from neural_testbed.agents.factories.sweeps.testbed import hypermodel
from neural_testbed.agents.factories.sweeps.testbed import mlp
from neural_testbed.agents.factories.sweeps.testbed import sgmcmc
# Populate a global dictionary that makes these agents easy to access.
_AGENTS = {
'baseline:uniform_class_probs': baselines.uniform_class_probs_paper_agent(),
'mlp': mlp.mlp_paper_agent(),
'bbb': bbb.paper_agent(),
'ensemble': ensemble.paper_agent(),
'ensemble+': ensemble_plus.paper_agent(),
'hypermodel': hypermodel.paper_agent(),
'dropout': dropout.paper_agent(),
'sgmcmc': sgmcmc.paper_agent(),
}
def get_implemented_agents() -> Sequence[str]:
return list(_AGENTS.keys())
def get_paper_agent(agent: str) -> factories_base.PaperAgent:
assert agent in get_implemented_agents()
return _AGENTS[agent]
def dummy_config() -> ml_collections.ConfigDict:
"""Creates a dummy config with all possible components."""
global_dict = {}
for agent in get_implemented_agents():
paper_agent = get_paper_agent(agent)
global_dict.update(dataclasses.asdict(paper_agent.default))
return ml_collections.ConfigDict(global_dict, type_safe=False)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for ensemble_plus agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import ensemble_plus
def basic_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for num_ensemble in [1, 3, 10, 30, 100]:
sweep.append(ensemble_plus.EnsembleConfig(
num_ensemble=num_ensemble,
batch_strategy=True,
))
return tuple(sweep)
def boot_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for distribution in ['none', 'bernoulli', 'exponential']:
sweep.append(ensemble_plus.EnsembleConfig(
distribution=distribution,
batch_strategy=True,
))
return tuple(sweep)
def weight_decay_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for l2_weight_decay in [0.1, 0.3, 1, 3, 10]:
sweep.append(ensemble_plus.EnsembleConfig(
l2_weight_decay=l2_weight_decay,
batch_strategy=True,
))
return tuple(sweep)
def prior_scale_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for temp_scale_prior in ['lin', 'sqrt']:
for prior_scale in [1, 3]:
sweep.append(ensemble_plus.EnsembleConfig(
temp_scale_prior=temp_scale_prior,
prior_scale=prior_scale,
batch_strategy=True,
))
return tuple(sweep)
def batch_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
ensemble_plus.EnsembleConfig(
batch_strategy=batch_strategy,
num_batches=num_batches))
return tuple(sweep)
def combined_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
return tuple(basic_sweep()) + tuple(boot_sweep()) + tuple(
weight_decay_sweep()) + tuple(prior_scale_sweep()) + tuple(batch_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble_plus.EnsembleConfig(),
ctor=ensemble_plus.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/ensemble_plus.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for sgmcmc agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import sgmcmc
def sgld_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for vanilla sgld."""
sweep = []
for learning_rate in [5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.5, 1, 2, 10]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate=learning_rate,
prior_variance=prior_variance,
adaptive_prior_variance=True,))
return tuple(sweep)
def momentum_sgld_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for momentum sgld."""
sweep = []
for learning_rate in [5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.5, 1, 2, 10]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate=learning_rate,
prior_variance=prior_variance,
adaptive_prior_variance=True,
alg_temperature=1,
momentum_decay=0.9))
return tuple(sweep)
def combined_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
return tuple(sgld_sweep()) + tuple(momentum_sgld_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=sgmcmc.SGMCMCConfig(),
ctor=sgmcmc.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/sgmcmc.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for mlp agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
def mlp_sweep() -> Sequence[baselines.MLPConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(baselines.MLPConfig(
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
batch_strategy=True,
))
return tuple(sweep)
def mlp_batch_sweep() -> Sequence[baselines.MLPConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
baselines.MLPConfig(
batch_strategy=batch_strategy,
num_batches=num_batches))
return tuple(sweep)
def mlp_combined_sweep() -> Sequence[baselines.MLPConfig]:
return tuple(mlp_sweep()) + tuple(mlp_batch_sweep())
def mlp_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=baselines.MLPConfig(),
ctor=baselines.make_mlp_agent,
sweep=mlp_combined_sweep)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/mlp.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for ensemble agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import ensemble
def vanilla_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
sweep = []
for num_ensemble in [1, 3, 10, 30, 100]:
sweep.append(ensemble.VanillaEnsembleConfig(num_ensemble=num_ensemble,
batch_strategy=True,))
return tuple(sweep)
def weight_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(ensemble.VanillaEnsembleConfig(
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
batch_strategy=True,
))
return tuple(sweep)
def batch_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
ensemble.VanillaEnsembleConfig(
batch_strategy=batch_strategy,
num_batches=num_batches))
return tuple(sweep)
def combined_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
return tuple(vanilla_sweep()) + tuple(weight_sweep()) + tuple(batch_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble.VanillaEnsembleConfig(),
ctor=ensemble.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/ensemble.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for MC Dropout agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import dropout
def droprate_sweep() -> Sequence[dropout.McDropoutConfig]:
"""Generates the dropout sweep over dropping parameters for paper."""
sweep = []
for dropout_rate in [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
for dropout_input in [True, False]:
sweep.append(
dropout.McDropoutConfig(
dropout_rate=dropout_rate,
dropout_input=dropout_input,
batch_strategy=True,
hidden_sizes=(50, 50)))
return tuple(sweep)
def l2reg_sweep() -> Sequence[dropout.McDropoutConfig]:
"""Generates the dropout sweep over l2 regularization parameters for paper."""
sweep = []
for adaptive_weight_scale in [True, False]:
for length_scale in [0.01, 0.1, 0.3, 1, 3, 10]:
sweep.append(
dropout.McDropoutConfig(
adaptive_weight_scale=adaptive_weight_scale,
length_scale=length_scale,
hidden_sizes=(50, 50)))
return tuple(sweep)
def network_sweep() -> Sequence[dropout.McDropoutConfig]:
"""Generates the dropout sweep over dropping parameters for paper."""
sweep = []
for hidden_sizes in [(50, 50), (100, 100)]:
sweep.append(
dropout.McDropoutConfig(
hidden_sizes=hidden_sizes,
batch_strategy=True,
))
return tuple(sweep)
def batch_sweep() -> Sequence[dropout.McDropoutConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for batch_strategy in [True, False]:
for num_batches in [500, 1000]:
sweep.append(
dropout.McDropoutConfig(
batch_strategy=batch_strategy,
num_batches=num_batches,
hidden_sizes=(50, 50)))
return tuple(sweep)
def combined_sweep() -> Sequence[dropout.McDropoutConfig]:
return tuple(droprate_sweep()) + tuple(l2reg_sweep()) + tuple(
network_sweep()) + tuple(batch_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=dropout.McDropoutConfig(),
ctor=dropout.make_mc_dropout_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed/dropout.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for agents, specialized to real datasets."""
import dataclasses
from typing import Sequence
import ml_collections
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
from neural_testbed.agents.factories import bbb
from neural_testbed.agents.factories import dropout
from neural_testbed.agents.factories import ensemble
from neural_testbed.agents.factories import ensemble_plus
from neural_testbed.agents.factories import hypermodel
from neural_testbed.agents.factories import sgmcmc
def mlp_sweep() -> Sequence[baselines.MLPConfig]:
"""Sweep over hyperparams."""
sweep = []
for num_batches in [1000, 5000, 10_000]:
for l2_weight_decay in [1e-2, 1e-1, 1, 10, 100]:
sweep.append(baselines.MLPConfig(
l2_weight_decay=l2_weight_decay,
num_batches=num_batches,
batch_strategy=False,
adaptive_weight_scale=False,
))
return tuple(sweep)
def mlp_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=baselines.MLPConfig(),
ctor=baselines.make_mlp_agent,
sweep=mlp_sweep,
)
def dropout_sweep() -> Sequence[dropout.McDropoutConfig]:
"""Sweep over hyperparams."""
sweep = []
for num_batches in [1000, 5000, 10_000]:
for length_scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
for dropout_rate in [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]:
sweep.append(dropout.McDropoutConfig(
length_scale=length_scale,
num_batches=num_batches,
dropout_rate=dropout_rate,
batch_strategy=False,
adaptive_weight_scale=False,
))
return tuple(sweep)
def dropout_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=dropout.McDropoutConfig(),
ctor=dropout.make_mc_dropout_agent,
sweep=dropout_sweep,
)
def ensemble_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
"""Sweep over hyperparams."""
sweep = []
for num_batches in [1000, 5000, 10_000]:
for l2_weight_decay in [1e-2, 1e-1, 1, 10, 100, 1000]:
sweep.append(ensemble.VanillaEnsembleConfig(
l2_weight_decay=l2_weight_decay,
num_batches=num_batches,
num_ensemble=100,
batch_strategy=False,
adaptive_weight_scale=False,
))
return tuple(sweep)
def ensemble_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble.VanillaEnsembleConfig(),
ctor=ensemble.make_agent,
sweep=ensemble_sweep,
)
def ensemble_plus_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Sweep over hyperparams."""
sweep = []
for num_batches in [1000, 5000, 10_000]:
for l2_weight_decay in [1e-2, 1e-1, 1, 10, 100]:
for prior_scale in [1, 3, 10, 30, 100]:
sweep.append(ensemble_plus.EnsembleConfig(
l2_weight_decay=l2_weight_decay,
num_batches=num_batches,
num_ensemble=100,
prior_scale=prior_scale,
batch_strategy=False,
adaptive_weight_scale=False,
temp_scale_prior='none',
))
return tuple(sweep)
def ensemble_plus_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble_plus.EnsembleConfig(),
ctor=ensemble_plus.make_agent,
sweep=ensemble_plus_sweep,
)
def hypermodel_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Sweep over hyperparams."""
sweep = []
for num_batches in [1000, 5000, 10_000]:
for l2_weight_decay in [1e-2, 1e-1, 1, 10, 100]:
for prior_scale in [1, 3, 10, 30, 100]:
sweep.append(hypermodel.HypermodelConfig(
l2_weight_decay=l2_weight_decay,
num_batches=num_batches,
prior_hidden_sizes=(10, 10),
prior_scale=prior_scale,
index_dim=5,
batch_strategy=False,
adaptive_weight_scale=False,
temp_scale_prior='none',
))
return tuple(sweep)
def hypermodel_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=hypermodel.HypermodelConfig(),
ctor=hypermodel.make_hypermodel_agent,
sweep=hypermodel_sweep,
)
def sgmcmc_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""Sweep over hyperparams."""
sweep = []
for learning_rate in [1e-4, 5e-4, 1e-3, 5e-3, 1e-2]:
for prior_variance in [1, 3, 10, 30, 100, 300,]:
for momentum_decay in [0, 0.9]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate=learning_rate,
prior_variance=prior_variance,
momentum_decay=momentum_decay,
adaptive_prior_variance=False))
return tuple(sweep)
def sgmcmc_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=sgmcmc.SGMCMCConfig(),
ctor=sgmcmc.make_agent,
sweep=sgmcmc_sweep,
)
def bbb_sweep() -> Sequence[bbb.BBBConfig]:
"""Sweep over hyperparams."""
sweep = []
for sigma_1 in [1, 2, 4]:
for sigma_2 in [0.1, 0.3, 0.5, 0.7, 0.9]:
for mixture_scale in [0, 0.5, 1]:
for num_batches in [1000, 5000, 10_000]:
sweep.append(
bbb.BBBConfig(
sigma_1=sigma_1,
sigma_2=sigma_2,
mixture_scale=mixture_scale,
num_batches=num_batches,
learning_rate=1e-3,
adaptive_scale=False
))
return tuple(sweep)
def bbb_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=bbb.BBBConfig(),
ctor=bbb.make_agent,
sweep=bbb_sweep,
)
# Populate a global dictionary that makes these agents easy to access.
_AGENTS = {
'baseline:uniform_class_probs': baselines.uniform_class_probs_paper_agent(),
'mlp': mlp_agent(),
'bbb': bbb_agent(),
'ensemble': ensemble_agent(),
'ensemble+': ensemble_plus_agent(),
'hypermodel': hypermodel_agent(),
'dropout': dropout_agent(),
'sgmcmc': sgmcmc_agent(),
}
def get_implemented_agents() -> Sequence[str]:
return list(_AGENTS.keys())
def get_paper_agent(agent: str) -> factories_base.PaperAgent:
assert agent in get_implemented_agents()
return _AGENTS[agent]
def dummy_config() -> ml_collections.ConfigDict:
"""Creates a dummy config with all possible components."""
global_dict = {}
for agent in get_implemented_agents():
paper_agent = get_paper_agent(agent)
global_dict.update(dataclasses.asdict(paper_agent.default))
return ml_collections.ConfigDict(global_dict, type_safe=False)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/real_data/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for hypermodel agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import hypermodel
def l2reg_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over l2 regularization parameters for paper."""
sweep = []
for l2_weight_decay in [0.1, 0.3, 1, 3, 10]:
sweep.append(hypermodel.HypermodelConfig(l2_weight_decay=l2_weight_decay))
return tuple(sweep)
def index_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over basic parameters for paper."""
sweep = []
for index_dim in [1, 3, 5, 7]:
sweep.append(hypermodel.HypermodelConfig(index_dim=index_dim))
return tuple(sweep)
def boot_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for distribution in ['none', 'bernoulli', 'exponential']:
sweep.append(hypermodel.HypermodelConfig(distribution=distribution,))
return tuple(sweep)
def prior_sweep() -> Sequence[hypermodel.HypermodelConfig]:
"""Generates the hypermodel sweep over prior function parameters for paper."""
sweep = []
for prior_scale in [0, 1]:
for prior_hidden_sizes in [(10,), (10, 10)]:
for l2_weight_decay in [1, 2]:
for temp_scale_prior in ['lin', 'sqrt']:
sweep.append(
hypermodel.HypermodelConfig(
prior_scale=prior_scale,
prior_hidden_sizes=prior_hidden_sizes,
l2_weight_decay=l2_weight_decay,
temp_scale_prior=temp_scale_prior))
return tuple(sweep)
def combined_sweep() -> Sequence[hypermodel.HypermodelConfig]:
return tuple(prior_sweep()) + tuple(index_sweep()) + tuple(
l2reg_sweep()) + tuple(boot_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=hypermodel.HypermodelConfig(),
ctor=hypermodel.make_hypermodel_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/hypermodel.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for Bayes by Backprop agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import bbb
def base_sweep() -> Sequence[bbb.BBBConfig]:
"""Generates the bbb sweep over network parameters."""
sweep = []
for learning_rate in [1e-3, 3e-3]:
for sigma_1 in [0.3, 0.5, 0.7, 1, 2]:
for num_batches in [1000, 2000]:
sweep.append(
bbb.BBBConfig(
learning_rate=learning_rate,
sigma_1=sigma_1,
num_batches=num_batches))
return tuple(sweep)
def prior_sweep() -> Sequence[bbb.BBBConfig]:
"""Generates the bbb sweep over prior parameters."""
sweep = []
for sigma_1 in [1, 2, 4]:
for sigma_2 in [0.3, 0.5, 0.7]:
for mixture_scale in [0.5]:
sweep.append(
bbb.BBBConfig(
sigma_1=sigma_1, sigma_2=sigma_2, mixture_scale=mixture_scale))
return tuple(sweep)
def network_sweep() -> Sequence[bbb.BBBConfig]:
"""Generates the bbb sweep over network architecture for paper."""
sweep = []
for hidden_sizes in [(50, 50), (100, 100), (50, 50, 50)]:
sweep.append(bbb.BBBConfig(hidden_sizes=hidden_sizes))
return tuple(sweep)
def combined_sweep() -> Sequence[bbb.BBBConfig]:
return tuple(base_sweep()) + tuple(prior_sweep()) + tuple(network_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=bbb.BBBConfig(),
ctor=bbb.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/bbb.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing agent sweeps."""
import dataclasses
from typing import Sequence
import ml_collections
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
from neural_testbed.agents.factories.sweeps.testbed_2d import bbb
from neural_testbed.agents.factories.sweeps.testbed_2d import deep_kernel
from neural_testbed.agents.factories.sweeps.testbed_2d import dropout
from neural_testbed.agents.factories.sweeps.testbed_2d import ensemble
from neural_testbed.agents.factories.sweeps.testbed_2d import ensemble_plus
from neural_testbed.agents.factories.sweeps.testbed_2d import hypermodel
from neural_testbed.agents.factories.sweeps.testbed_2d import knn
from neural_testbed.agents.factories.sweeps.testbed_2d import mlp
from neural_testbed.agents.factories.sweeps.testbed_2d import random_forest
from neural_testbed.agents.factories.sweeps.testbed_2d import sgmcmc
# Populate a global dictionary that makes these agents easy to access.
_AGENTS = {
'baseline:uniform_class_probs': baselines.uniform_class_probs_paper_agent(),
'baseline:average_class_probs': baselines.average_class_probs_paper_agent(),
'baseline:prior': baselines.prior_paper_agent(),
'logistic_regression': baselines.logistic_regression_paper_agent(),
'mlp': mlp.mlp_paper_agent(),
'bbb': bbb.paper_agent(),
'ensemble': ensemble.paper_agent(),
'ensemble+': ensemble_plus.paper_agent(),
'hypermodel': hypermodel.paper_agent(),
'dropout': dropout.paper_agent(),
'sgmcmc': sgmcmc.paper_agent(),
'deep_kernel': deep_kernel.paper_agent(),
'knn': knn.paper_agent(),
'random_forest': random_forest.paper_agent(),
}
def get_implemented_agents() -> Sequence[str]:
return list(_AGENTS.keys())
def get_paper_agent(agent: str) -> factories_base.PaperAgent:
assert agent in get_implemented_agents()
return _AGENTS[agent]
def dummy_config() -> ml_collections.ConfigDict:
"""Creates a dummy config with all possible components."""
global_dict = {}
for agent in get_implemented_agents():
paper_agent = get_paper_agent(agent)
global_dict.update(dataclasses.asdict(paper_agent.default))
return ml_collections.ConfigDict(global_dict, type_safe=False)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for ensemble_plus agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import ensemble_plus
def basic_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for num_ensemble in [1, 3, 10, 30, 100]:
sweep.append(ensemble_plus.EnsembleConfig(
num_ensemble=num_ensemble,
))
return tuple(sweep)
def boot_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for distribution in ['none', 'bernoulli', 'exponential']:
sweep.append(ensemble_plus.EnsembleConfig(
distribution=distribution,
))
return tuple(sweep)
def weight_decay_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for l2_weight_decay in [0.1, 0.3, 1, 3, 10]:
sweep.append(ensemble_plus.EnsembleConfig(
l2_weight_decay=l2_weight_decay,
))
return tuple(sweep)
def combined_sweep() -> Sequence[ensemble_plus.EnsembleConfig]:
return tuple(basic_sweep()) + tuple(boot_sweep()) + tuple(
weight_decay_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble_plus.EnsembleConfig(),
ctor=ensemble_plus.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/ensemble_plus.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for sgmcmc agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import sgmcmc
def sgld_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for vanilla sgld."""
sweep = []
for learning_rate in [1e-4, 5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.2]:
sweep.append(sgmcmc.SGMCMCConfig(learning_rate, prior_variance))
return tuple(sweep)
def momentum_sgld_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for momentum sgld."""
sweep = []
for learning_rate in [1e-4, 5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.2]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate,
prior_variance,
alg_temperature=1,
momentum_decay=0.9))
return tuple(sweep)
def precondition_momentum_sgld_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for momentum sgld with preconditioner."""
sweep = []
for learning_rate in [1e-4, 5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.2]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate,
prior_variance,
alg_temperature=1,
momentum_decay=0.9,
preconditioner='RMSprop'))
return tuple(sweep)
def mlp_sgd_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
"""sweep for a single mlp with sgd optimizer."""
sweep = []
for learning_rate in [1e-4, 5e-4, 1e-3, 5e-3]:
for prior_variance in [0.05, 0.1, 0.2]:
sweep.append(
sgmcmc.SGMCMCConfig(
learning_rate,
prior_variance,
alg_temperature=0,
burn_in_time=100000))
return tuple(sweep)
def combined_sweep() -> Sequence[sgmcmc.SGMCMCConfig]:
return tuple(sgld_sweep()) + tuple(momentum_sgld_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=sgmcmc.SGMCMCConfig(),
ctor=sgmcmc.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/sgmcmc.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for mlp agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import baselines
def mlp_sweep() -> Sequence[baselines.MLPConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(baselines.MLPConfig(
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
))
return tuple(sweep)
def mlp_paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=baselines.MLPConfig(),
ctor=baselines.make_mlp_agent,
sweep=mlp_sweep)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/mlp.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for deep kernel agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import deep_kernel
def deep_kernel_sweep() -> Sequence[deep_kernel.DeepKernelConfig]:
"""Basic sweep over hyperparams."""
sweep = []
for scale_factor in [1., 2., 3., 4., 5., 6.]:
for sigma_squared_factor in [0.5, 1., 2., 3., 4.]:
sweep.append(
deep_kernel.DeepKernelConfig(
scale_factor=scale_factor,
sigma_squared_factor=sigma_squared_factor))
return tuple(sweep)
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=deep_kernel.DeepKernelConfig(),
ctor=deep_kernel.make_agent,
sweep=deep_kernel_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/deep_kernel.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for ensemble agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import ensemble
def vanilla_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
sweep = []
for num_ensemble in [1, 3, 10, 30, 100]:
sweep.append(ensemble.VanillaEnsembleConfig(num_ensemble))
return tuple(sweep)
def weight_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(ensemble.VanillaEnsembleConfig(
num_ensemble=30,
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
))
return tuple(sweep)
def combined_sweep() -> Sequence[ensemble.VanillaEnsembleConfig]:
return tuple(vanilla_sweep()) + tuple(weight_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=ensemble.VanillaEnsembleConfig(),
ctor=ensemble.make_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/ensemble.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.